├── .gitallowed ├── .github └── workflows │ ├── release.yml │ └── update-images.yaml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── dist ├── activate ├── bin │ ├── kctf-challenge │ ├── kctf-cluster │ ├── kctf-completion │ └── kctf-log ├── challenge-templates │ ├── pwn │ │ ├── README.md │ │ ├── challenge.yaml │ │ ├── challenge │ │ │ ├── Dockerfile │ │ │ ├── Makefile │ │ │ ├── chal.c │ │ │ ├── flag │ │ │ └── nsjail.cfg │ │ └── healthcheck │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ ├── healthcheck.py │ │ │ ├── healthcheck_loop.sh │ │ │ └── healthz_webserver.py │ ├── web │ │ ├── README.md │ │ ├── challenge.yaml │ │ ├── challenge │ │ │ ├── Dockerfile │ │ │ ├── apache2-kctf-nsjail.conf │ │ │ ├── cgi-bin.nsjail.cfg │ │ │ ├── cgi-bin │ │ │ │ └── nsjail-php-cgi │ │ │ ├── flag │ │ │ ├── web-apps │ │ │ │ ├── nodejs │ │ │ │ │ └── app.js │ │ │ │ └── php │ │ │ │ │ └── index.php │ │ │ ├── web-servers.nsjail.cfg │ │ │ └── web-servers │ │ │ │ └── nodejs.sh │ │ └── healthcheck │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ ├── healthcheck.py │ │ │ ├── healthcheck_loop.sh │ │ │ └── healthz_webserver.py │ └── xss-bot │ │ ├── README.md │ │ ├── challenge.yaml │ │ ├── challenge │ │ ├── .puppeteerrc.cjs │ │ ├── Dockerfile │ │ ├── bot.js │ │ └── cookie │ │ └── healthcheck │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── healthcheck.py │ │ ├── healthcheck_loop.sh │ │ └── healthz_webserver.py └── resources │ └── install.yaml ├── docker-images ├── certbot │ ├── Dockerfile │ └── certbot.sh ├── challenge │ ├── Dockerfile │ ├── kctf_drop_privs │ ├── kctf_pow │ ├── kctf_setup │ └── pow.py ├── gcsfuse │ └── Dockerfile └── healthcheck │ ├── Dockerfile │ ├── kctf_bypass_pow │ └── kctf_drop_privs ├── docs ├── _config.yml ├── _layouts │ └── default.html ├── ctf-playbook.md ├── custom-domains.md ├── google-cloud.md ├── images │ ├── flag-locations.png │ ├── introduction-k8s.png │ ├── php_sample.png │ └── threat-model-graph.png ├── index.md ├── introduction.md ├── kctf-exploits.html ├── kctf-operator.md ├── local-testing.md ├── security-threat-model.md ├── troubleshooting.md └── vrp.md └── kctf-operator ├── .dockerignore ├── .gitignore ├── Dockerfile ├── Makefile ├── PROJECT ├── api └── v1 │ ├── challenge_types.go │ ├── groupversion_info.go │ └── zz_generated.deepcopy.go ├── bin └── .gitignore ├── build-and-deploy-operator.sh ├── bundle.Dockerfile ├── bundle └── .gitignore ├── config ├── crd │ ├── bases │ │ └── kctf.dev_challenges.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_challenges.yaml │ │ └── webhook_in_challenges.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── manager │ ├── controller_manager_config.yaml │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── bases │ │ └── kctf-operator.clusterserviceversion.yaml │ └── kustomization.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── challenge_editor_role.yaml │ ├── challenge_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── samples │ ├── kctf_v1_challenge.yaml │ ├── kustomization.yaml │ ├── mychal.yaml │ ├── mychal2.yaml │ ├── mychal3.yaml │ └── simple-challenge.yaml └── scorecard │ ├── bases │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ ├── basic.config.yaml │ └── olm.config.yaml ├── controllers ├── autoscaling │ ├── functions.go │ └── horizontal-pod-autoscaler.go ├── challenge_controller.go ├── deployment │ ├── deployment-with-healthcheck.go │ ├── deployment.go │ ├── functions.go │ ├── image.go │ └── replicas.go ├── network-policy │ ├── functions.go │ └── network-policy.go ├── pow │ ├── configmap.go │ └── functions.go ├── secrets │ ├── functions.go │ └── secrets.go ├── service │ ├── functions.go │ └── service.go ├── set │ └── default.go ├── status │ └── functions.go ├── suite_test.go ├── utils │ └── utils.go └── volumes │ ├── functions.go │ ├── persistentvolume.go │ └── persistentvolumeclaim.go ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt ├── main.go └── resources ├── allow-dns.go ├── constants.go ├── daemon-gcsfuse.go ├── external-dns.go ├── initializer.go ├── network-policy.go └── secret-pow.go /.gitallowed: -------------------------------------------------------------------------------- 1 | KCTF_CLOUD_API_KEY.* 2 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Create Release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | version: 7 | description: 'Version' 8 | required: true 9 | default: 'MAJOR.MINOR.PATCH' 10 | release_notes: 11 | description: 'Release Notes' 12 | required: true 13 | 14 | jobs: 15 | create_draft_release: 16 | runs-on: ubuntu-latest 17 | permissions: 18 | contents: write 19 | steps: 20 | - uses: actions/checkout@v2 21 | 22 | - name: Check version has the right format 23 | run: | 24 | [[ "${{ github.event.inputs.version }}" =~ ^[0-9]+[.][0-9]+[.][0-9]+$ ]] 25 | 26 | - name: Create archive 27 | run: | 28 | mv dist kctf 29 | echo ${{ github.event.inputs.version }} > kctf/VERSION 30 | tar -cz kctf > kctf.tgz 31 | git config user.name ${{ github.actor }} 32 | git config user.email action@github.com 33 | git tag v${{ github.event.inputs.version }} 34 | git push origin v${{ github.event.inputs.version }} 35 | 36 | - name: Create Release 37 | id: create_release 38 | uses: actions/create-release@v1 39 | env: 40 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 41 | with: 42 | tag_name: v${{ github.event.inputs.version }} 43 | release_name: Release ${{ github.event.inputs.version }} 44 | body: ${{ github.event.inputs.release_notes }} 45 | draft: true 46 | prerelease: false 47 | 48 | - name: Upload Release Asset 49 | id: upload-release-asset 50 | uses: actions/upload-release-asset@v1 51 | env: 52 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 53 | with: 54 | upload_url: ${{ steps.create_release.outputs.upload_url }} 55 | asset_path: kctf.tgz 56 | asset_name: kctf.tgz 57 | asset_content_type: application/gzip 58 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist/bin/kind 2 | dist/bin/kubectl 3 | dist/bin/yq 4 | dist/config/* 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | just a few small guidelines you need to follow. 5 | 6 | ## Contributor License Agreement 7 | 8 | Contributions to this project must be accompanied by a Contributor License 9 | Agreement. You (or your employer) retain the copyright to your contribution; 10 | this simply gives us permission to use and redistribute your contributions as 11 | part of the project. Head over to to see 12 | your current agreements on file or to sign a new one. 13 | 14 | You generally only need to submit a CLA once, so if you've already submitted one 15 | (even if it was for a different project), you probably don't need to do it 16 | again. 17 | 18 | ## Code reviews 19 | 20 | All submissions, including submissions by project members, require review. We 21 | use GitHub pull requests for this purpose. Consult 22 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more 23 | information on using pull requests. 24 | 25 | ## Community Guidelines 26 | 27 | This project follows [Google's Open Source Community 28 | Guidelines](https://opensource.google.com/conduct/). 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kCTF 2 | [![GKE Deployment](https://github.com/google/kctf/workflows/GKE%20Deployment/badge.svg?branch=master)](https://github.com/google/kctf/actions?query=workflow%3A%22GKE+Deployment%22) 3 | 4 | kCTF is a Kubernetes-based infrastructure for CTF competitions. 5 | 6 | ## Prerequisites 7 | 8 | * [gcloud](https://cloud.google.com/sdk/install) 9 | * [docker](https://docs.docker.com/install/) 10 | 11 | ## Getting Started / Documentation 12 | 13 | For an introduction to what kCTF is and how it interacts with Kubernetes, see [kCTF in 8 Minutes](https://google.github.io/kctf/introduction.html). 14 | 15 | Additional documentation resources are: 16 | 17 | * **[Local Testing Walkthrough](https://google.github.io/kctf/local-testing.html) – A quick start guide showing you how to build and test challenges locally.** 18 | * [Google Cloud Walkthrough](https://google.github.io/kctf/google-cloud.html) – Once you have everything up and running, try deploying to Google Cloud. 19 | * [Troubleshooting](https://google.github.io/kctf/troubleshooting.html) – Help with fixing broken challenges. 20 | * [Security Threat Model](https://google.github.io/kctf/security-threat-model.html) – Security considerations regarding kCTF including information on assets, risks, and potential attackers. 21 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | To report a vulnerability in this repository, or in Google Cloud please contact the Google Security Team at https://g.co/vulnz. Read more about reporting vulnerabilities in kCTF [here](docs/vrp.md). You can read the threat model [here](docs/security-threat-model.md). 4 | 5 | To report a vulnerability in a dependency, please contact the upstream maintainer directly. 6 | 7 | - [Kubernetes](https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability) 8 | - [Docker](https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-security-issues) 9 | - [Ubuntu](https://wiki.ubuntu.com/SecurityTeam/FAQ?_ga=2.254550412.542177495.1583355140-2013298171.1583355140#Contact) 10 | - [Linux kernel](https://www.kernel.org/doc/html/v4.11/admin-guide/security-bugs.html) 11 | -------------------------------------------------------------------------------- /dist/bin/kctf-log: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | _KCTF_COLOR_RED=$'\e[0;31m' 17 | _KCTF_COLOR_GREEN=$'\e[0;32m' 18 | _KCTF_COLOR_YELLOW=$'\e[0;33m' 19 | _KCTF_COLOR_END=$'\e[0m' 20 | 21 | function _kctf_log { 22 | echo -n "${_KCTF_COLOR_GREEN}[*]${_KCTF_COLOR_END} " >&2 23 | echo "$@" >&2 24 | } 25 | 26 | function _kctf_log_warn { 27 | echo -n "${_KCTF_COLOR_YELLOW}[W]${_KCTF_COLOR_END} " >&2 28 | echo "$@" >&2 29 | } 30 | 31 | function _kctf_log_err { 32 | echo -n "${_KCTF_COLOR_RED}[E]${_KCTF_COLOR_END} " >&2 33 | echo "$@" >&2 34 | } 35 | 36 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/README.md: -------------------------------------------------------------------------------- 1 | # Quickstart guide to writing a challenge 2 | 3 | The basic steps when preparing a challenge are: 4 | 5 | * A Docker image is built from the `challenge` directory. For the simplest challenges, replacing `challenge/chal.c` is sufficient. 6 | * Edit `challenge/Dockerfile` to change the commandline or the files you want to include. 7 | * To try the challenge locally, you will need to 8 | * create a a local cluster with `kctf cluster create --type kind --start $configname` 9 | * build the challenge binary with `make -C challenge` 10 | * and then deploy the challenge with `kctf chal start` 11 | * To access the challenge, create a port forward with `kctf chal debug port-forward` and connect to it via `nc localhost PORT` using the printed port. 12 | * Check out `kctf chal ` for more commands. 13 | 14 | ## Directory layout 15 | 16 | The following files/directories are available: 17 | 18 | ### /challenge.yaml 19 | 20 | `challenge.yaml` is the main configuration file. You can use it to change 21 | settings like the name and namespace of the challenge, the exposed ports, the 22 | proof-of-work difficulty etc. 23 | For documentation on the available fields, you can run `kubectl explain challenge` and 24 | `kubectl explain challenge.spec`. 25 | 26 | ### /challenge 27 | 28 | The `challenge` directory contains a Dockerfile that describes the challenge and 29 | any challenge files. This template comes with a Makefile to build the challenge, 30 | which is the recommended way for pwnables if the deployed binary matters, e.g. 31 | if you hand it out as an attachment for ROP gadgets. 32 | If the binary layout doesn't matter, you can build it using an intermediate 33 | container as part of the Dockerfile similar to how the chroot is created. 34 | 35 | ### /healthcheck 36 | 37 | The `healthcheck` directory is optional. If you don't want to write a healthcheck, feel free to delete it. However, we strongly recommend that you implement a healthcheck :). 38 | 39 | We provide a basic healthcheck skeleton that uses pwntools to implement the 40 | healthcheck code. The only requirement is that the healthcheck replies to GET 41 | requests to http://$host:45281/healthz with either a success or an error status 42 | code. 43 | 44 | In most cases, you will only have to modify `healthcheck/healthcheck.py`. 45 | 46 | ## API contract 47 | 48 | Ensure your setup fulfills the following requirements to ensure it works with kCTF: 49 | 50 | * Verify `kctf_setup` is used as the first command in the CMD instruction of your `challenge/Dockerfile`. 51 | * You can do pretty much whatever you want in the `challenge` directory but: 52 | * We strongly recommend using nsjail in all challenges. While nsjail is already installed, you need to configure it in `challenge/nsjail.cfg`. For more information on nsjail, see the [official website](https://nsjail.dev/). 53 | * Your challenge receives connections on port 1337. The port can be changed in `challenge.yaml`. 54 | * The healthcheck directory is optional. 55 | * If it exists, the image should run a webserver on port 45281 and respond to `/healthz` requests. 56 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/challenge.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kctf.dev/v1 2 | kind: Challenge 3 | metadata: 4 | name: pwn 5 | spec: 6 | deployed: true 7 | powDifficultySeconds: 0 8 | network: 9 | public: false 10 | healthcheck: 11 | # TIP: disable the healthcheck during development 12 | enabled: true 13 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/challenge/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2025 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM ubuntu:24.04 as chroot 15 | 16 | # ubuntu24 includes the ubuntu user by default 17 | RUN /usr/sbin/userdel -r ubuntu && /usr/sbin/useradd --no-create-home -u 1000 user 18 | 19 | COPY flag / 20 | COPY chal /home/user/ 21 | 22 | FROM gcr.io/kctf-docker/challenge@sha256:9f15314c26bd681a043557c9f136e7823414e9e662c08dde54d14a6bfd0b619f 23 | 24 | COPY --from=chroot / /chroot 25 | 26 | COPY nsjail.cfg /home/user/ 27 | 28 | CMD kctf_setup && \ 29 | kctf_drop_privs \ 30 | socat \ 31 | TCP-LISTEN:1337,reuseaddr,fork \ 32 | EXEC:"kctf_pow nsjail --config /home/user/nsjail.cfg -- /home/user/chal" 33 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/challenge/Makefile: -------------------------------------------------------------------------------- 1 | LDFLAGS=-static 2 | 3 | chal: chal.c 4 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/challenge/chal.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main(int argc, char *argv[]) { 4 | system("cat /flag"); 5 | return 0; 6 | } 7 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/challenge/flag: -------------------------------------------------------------------------------- 1 | CTF{TestFlag} -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/challenge/nsjail.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # See options available at https://github.com/google/nsjail/blob/master/config.proto 16 | 17 | name: "default-nsjail-configuration" 18 | description: "Default nsjail configuration for pwnable-style CTF task." 19 | 20 | mode: ONCE 21 | uidmap {inside_id: "1000"} 22 | gidmap {inside_id: "1000"} 23 | rlimit_as_type: HARD 24 | rlimit_cpu_type: HARD 25 | rlimit_nofile_type: HARD 26 | rlimit_nproc_type: HARD 27 | 28 | cwd: "/home/user" 29 | 30 | mount: [ 31 | { 32 | src: "/chroot" 33 | dst: "/" 34 | is_bind: true 35 | }, 36 | { 37 | dst: "/tmp" 38 | fstype: "tmpfs" 39 | rw: true 40 | }, 41 | { 42 | dst: "/proc" 43 | fstype: "proc" 44 | rw: true 45 | }, 46 | { 47 | src: "/etc/resolv.conf" 48 | dst: "/etc/resolv.conf" 49 | is_bind: true 50 | } 51 | ] 52 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/healthcheck/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM gcr.io/kctf-docker/healthcheck@sha256:66b34a47e7bbb832012905e229da0bbed80c5c3cddd4703127ca4026ba528cfc 15 | 16 | COPY healthcheck_loop.sh healthcheck.py healthz_webserver.py /home/user/ 17 | 18 | CMD kctf_drop_privs /home/user/healthcheck_loop.sh & /home/user/healthz_webserver.py 19 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/healthcheck/README.md: -------------------------------------------------------------------------------- 1 | # Healthcheck 2 | 3 | kCTF checks the health of challenges by accessing the healthcheck via 4 | http://host:45281/healthz which needs to return either 200 ok or an error 5 | depending on the status of the challenge. 6 | 7 | The default healthcheck consists of: 8 | * a loop that repeatedly calls a python script and writes the status to a file 9 | * a webserver that checks the file and serves /healthz 10 | * the actual healthcheck code using pwntools for convenience 11 | 12 | To modify it, you will likely only have to change the script in healthcheck.py. 13 | You can test if the challenge replies as expected or better add a full example 14 | solution that will try to get the flag from the challenge. 15 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/healthcheck/healthcheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright 2020 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import pwnlib.tubes 18 | 19 | def handle_pow(r): 20 | print(r.recvuntil(b'python3 ')) 21 | print(r.recvuntil(b' solve ')) 22 | challenge = r.recvline().decode('ascii').strip() 23 | p = pwnlib.tubes.process.process(['kctf_bypass_pow', challenge]) 24 | solution = p.readall().strip() 25 | r.sendline(solution) 26 | print(r.recvuntil(b'Correct\n')) 27 | 28 | r = pwnlib.tubes.remote.remote('127.0.0.1', 1337) 29 | print(r.recvuntil('== proof-of-work: ')) 30 | if r.recvline().startswith(b'enabled'): 31 | handle_pow(r) 32 | 33 | print(r.recvuntil(b'CTF{')) 34 | print(r.recvuntil(b'}')) 35 | 36 | exit(0) 37 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/healthcheck/healthcheck_loop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | set -Eeuo pipefail 16 | 17 | TIMEOUT=20 18 | PERIOD=30 19 | 20 | export TERM=linux 21 | export TERMINFO=/etc/terminfo 22 | 23 | while true; do 24 | echo -n "[$(date)] " 25 | if timeout "${TIMEOUT}" /home/user/healthcheck.py; then 26 | echo 'ok' | tee /tmp/healthz 27 | else 28 | echo -n "$? " 29 | echo 'err' | tee /tmp/healthz 30 | fi 31 | sleep "${PERIOD}" 32 | done 33 | -------------------------------------------------------------------------------- /dist/challenge-templates/pwn/healthcheck/healthz_webserver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright 2020 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | import http.server 17 | 18 | class HealthzHandler(http.server.BaseHTTPRequestHandler): 19 | def do_GET(self): 20 | if self.path != '/healthz': 21 | self.send_response(404) 22 | self.send_header("Content-length", "0") 23 | self.end_headers() 24 | return 25 | 26 | content = b'err' 27 | try: 28 | with open('/tmp/healthz', 'rb') as fd: 29 | content = fd.read().strip() 30 | except: 31 | pass 32 | self.send_response(200 if content == b'ok' else 400) 33 | self.send_header("Content-type", "text/plain") 34 | self.send_header("Content-length", str(len(content))) 35 | self.end_headers() 36 | self.wfile.write(content) 37 | 38 | httpd = http.server.HTTPServer(('', 45281), HealthzHandler) 39 | httpd.serve_forever() 40 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/README.md: -------------------------------------------------------------------------------- 1 | # Quickstart guide to writing a challenge 2 | 3 | The basic steps when preparing a challenge are: 4 | 5 | * A Docker image is built from the `challenge` directory. For the simplest challenges, replacing `challenge/chal.c` is sufficient. 6 | * Edit `challenge/Dockerfile` to change the commandline or the files you want to include. 7 | * To try the challenge locally, you will need to 8 | * create a a local cluster with `kctf cluster create --type kind --start $configname` 9 | * and then deploy the challenge with `kctf chal start` 10 | * To access the challenge, create a port forward with `kctf chal debug port-forward` and connect to it via `nc localhost PORT` using the printed port. 11 | * Check out `kctf chal ` for more commands. 12 | 13 | ## Sandboxing 14 | 15 | Sandboxing is only necessary for challenges that give players RCE-type of access. If a challenge does not provide such access, then it is reasonable to just use a normal HTTP server out of the box listening on port 1337, without any additonal sandboxing. 16 | 17 | For challenges that give users RCE-level access, it is then necessary to sandbox every player. In order to make that possible, kCTF provides two ways to sandbox a web server: 18 | 1. **CGI-sandbox**: You can configure PHP (or any other CGI) to be sandboxed. 19 | 2. **Proxy sandbox**: You can configure an HTTP server that sandboxes every HTTP request. 20 | 21 | A Proxy sandbox is a bit expensive, it starts an HTTP server on every TCP connection, hence it is a bit slow. A CGI sandbox is cheaper, and it just calls the normal CGI endpoint but with nsjail. 22 | 23 | The template challenge has an example of both (NodeJS running as a proxy, and PHP running as CGI). It is recommended that static resources are served with only Apache, as to save CPU and RAM. This can be accomplished by configuring apache to redirect certain sub-paths to the sandboxed web server, but to serve directly all other paths. 24 | 25 | ## Directory layout 26 | 27 | The following files/directories are available: 28 | 29 | ### /challenge.yaml 30 | 31 | `challenge.yaml` is the main configuration file. You can use it to change 32 | settings like the name and namespace of the challenge, the exposed ports, the 33 | proof-of-work difficulty etc. 34 | For documentation on the available fields, you can run `kubectl explain challenge` and 35 | `kubectl explain challenge.spec`. 36 | 37 | If you would like to have a shared directory (for sessions, or uploads), you can mount it using: 38 | 39 | 40 | ```yaml 41 | spec: 42 | persistentVolumeClaims: 43 | - $PUT_THE_NAME_OF_THE_CHALLENGE_HERE 44 | podTemplate: 45 | template: 46 | spec: 47 | containers: 48 | - name: challenge 49 | volumeMounts: 50 | - name: gcsfuse 51 | subPath: sessions # this this a folder inside volume 52 | mountPath: /mnt/disks/sessions 53 | - name: gcsfuse 54 | subPath: uploads 55 | mountPath: /mnt/disks/uploads 56 | volumes: 57 | - name: gcsfuse 58 | persistentVolumeClaim: 59 | claimName: $PUT_THE_NAME_OF_THE_CHALLENGE_HERE 60 | ``` 61 | 62 | This will mount a file across all challenges in that directory. You can test this setup on a remote cluster using the PHP/CGI sandbox. 63 | 64 | ### /challenge 65 | 66 | The `challenge` directory contains a Dockerfile that describes the challenge and 67 | any challenge files. You can use the Dockerfile to build your challenge as well 68 | if required. 69 | 70 | ### /healthcheck 71 | 72 | The `healthcheck` directory is optional. If you don't want to write a healthcheck, feel free to delete it. However, we strongly recommend that you implement a healthcheck :). 73 | 74 | We provide a basic healthcheck skeleton that uses pwntools to implement the 75 | healthcheck code. The only requirement is that the healthcheck replies to GET 76 | requests to http://$host:45281/healthz with either a success or an error status 77 | code. 78 | 79 | In most cases, you will only have to modify `healthcheck/healthcheck.py`. 80 | 81 | ## API contract 82 | 83 | Ensure your setup fulfills the following requirements to ensure it works with kCTF: 84 | 85 | * Verify `kctf_setup` is used as the first command in the CMD instruction of your `challenge/Dockerfile`. 86 | * You can do pretty much whatever you want in the `challenge` directory but: 87 | * We strongly recommend using nsjail in all challenges. While nsjail is already installed, you need to configure it in `challenge/nsjail.cfg`. For more information on nsjail, see the [official website](https://nsjail.dev/). 88 | * Your challenge receives connections on port 1337. The port can be changed in `challenge.yaml`. 89 | * The healthcheck directory is optional. 90 | * If it exists, the image should run a webserver on port 45281 and respond to `/healthz` requests. 91 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kctf.dev/v1 2 | kind: Challenge 3 | metadata: 4 | name: apache-others 5 | spec: 6 | deployed: true 7 | powDifficultySeconds: 0 8 | network: 9 | public: false 10 | ports: 11 | - protocol: "HTTPS" 12 | targetPort: 1337 13 | healthcheck: 14 | # TIP: disable the healthcheck during development 15 | enabled: true 16 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2025 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM ubuntu:24.04 as chroot 15 | 16 | # ubuntu24 includes the ubuntu user by default 17 | RUN /usr/sbin/userdel -r ubuntu && /usr/sbin/useradd --no-create-home -u 1000 user 18 | 19 | RUN apt-get update \ 20 | && apt-get install -yq --no-install-recommends \ 21 | curl ca-certificates socat gnupg lsb-release software-properties-common php-cgi \ 22 | && rm -rf /var/lib/apt/lists/* 23 | 24 | RUN curl -fsSL https://deb.nodesource.com/setup_20.x -o nodesource_setup.sh \ 25 | && bash nodesource_setup.sh \ 26 | && add-apt-repository universe \ 27 | && apt-get update \ 28 | && apt-get install -yq --no-install-recommends nodejs socat \ 29 | && rm -rf /var/lib/apt/lists/* 30 | 31 | RUN mkdir -p /mnt/disks/sessions 32 | RUN mkdir -p /mnt/disks/uploads 33 | 34 | VOLUME /mnt/disks/sessions 35 | VOLUME /mnt/disks/uploads 36 | 37 | COPY web-apps /web-apps 38 | COPY web-servers /web-servers 39 | 40 | COPY flag / 41 | 42 | FROM gcr.io/kctf-docker/challenge@sha256:9f15314c26bd681a043557c9f136e7823414e9e662c08dde54d14a6bfd0b619f 43 | 44 | RUN apt-get update \ 45 | && DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends tzdata apache2 \ 46 | && ln -fs /usr/share/zoneinfo/Europe/Berlin /etc/localtime \ 47 | && dpkg-reconfigure --frontend noninteractive tzdata \ 48 | && rm -rf /var/lib/apt/lists/* 49 | 50 | RUN service apache2 start 51 | 52 | COPY --from=chroot / /chroot 53 | 54 | # For Proxy 55 | RUN ln -s /etc/apache2/mods-available/proxy.load /etc/apache2/mods-enabled/ 56 | RUN ln -s /etc/apache2/mods-available/proxy_http.load /etc/apache2/mods-enabled/ 57 | 58 | # For CGI sandboxing 59 | RUN ln -s /etc/apache2/mods-available/cgi.load /etc/apache2/mods-enabled/cgi.load 60 | RUN ln -s /etc/apache2/mods-available/actions.load /etc/apache2/mods-enabled/actions.load 61 | RUN ln -s /chroot/web-apps /web-apps 62 | COPY cgi-bin /usr/lib/cgi-bin 63 | 64 | COPY apache2-kctf-nsjail.conf /etc/apache2/conf-enabled/ 65 | 66 | COPY web-servers.nsjail.cfg /home/user/web-servers.nsjail.cfg 67 | COPY cgi-bin.nsjail.cfg /home/user/cgi-bin.nsjail.cfg 68 | 69 | VOLUME /var/log/apache2 70 | VOLUME /var/run/apache2 71 | 72 | CMD kctf_setup \ 73 | && (kctf_drop_privs nsjail --config /home/user/web-servers.nsjail.cfg --port 8081 -- /web-servers/nodejs.sh &) \ 74 | && bash -c 'source /etc/apache2/envvars && APACHE_RUN_USER=user APACHE_RUN_GROUP=user /usr/sbin/apache2 -D FOREGROUND' 75 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/apache2-kctf-nsjail.conf: -------------------------------------------------------------------------------- 1 | ServerName kctf-nsjail 2 | Listen 1337 3 | User user 4 | 5 | # This is only necessary for CGI sandboxing 6 | 7 | Options +ExecCGI 8 | Options +FollowSymLinks 9 | Action application/x-nsjail-httpd-php /cgi-bin/nsjail-php-cgi 10 | AddHandler application/x-nsjail-httpd-php php 11 | Require all granted 12 | 13 | 14 | 15 | # For proxy sandboxing use the two lines below 16 | ProxyPreserveHost On 17 | ProxyPass "/nodejs" "http://localhost:8081/" 18 | # For CGI sandboxing use the line below 19 | DocumentRoot "/web-apps/php" 20 | 21 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/cgi-bin.nsjail.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # See options available at https://github.com/google/nsjail/blob/master/config.proto 16 | 17 | name: "apache2-proxy-nsjail" 18 | description: "Example nsjail configuration for containing a web server." 19 | 20 | mode: ONCE 21 | uidmap {inside_id: "1000"} 22 | gidmap {inside_id: "1000"} 23 | mount_proc: true 24 | keep_env: true 25 | rlimit_as_type: HARD 26 | rlimit_cpu_type: HARD 27 | rlimit_nofile_type: HARD 28 | rlimit_nproc_type: HARD 29 | 30 | mount: [ 31 | { 32 | src: "/chroot" 33 | dst: "/" 34 | is_bind: true 35 | }, 36 | { 37 | src: "/dev" 38 | dst: "/dev" 39 | is_bind: true 40 | }, 41 | { 42 | src: "/dev/null" 43 | dst: "/dev/null" 44 | is_bind: true 45 | }, 46 | { 47 | src: "/etc/resolv.conf" 48 | dst: "/etc/resolv.conf" 49 | is_bind: true 50 | }, 51 | { 52 | dst: "/mnt/disks/sessions" 53 | fstype: "tmpfs" 54 | rw: true 55 | }, 56 | { 57 | src: "/mnt/disks/sessions" 58 | dst: "/mnt/disks/sessions" 59 | is_bind: true 60 | rw: true 61 | mandatory: false 62 | }, 63 | { 64 | dst: "/mnt/disks/uploads" 65 | fstype: "tmpfs" 66 | rw: true 67 | }, 68 | { 69 | src: "/mnt/disks/uploads" 70 | dst: "/mnt/disks/uploads" 71 | is_bind: true 72 | rw: true 73 | mandatory: false 74 | }, 75 | { 76 | dst: "/tmp" 77 | fstype: "tmpfs" 78 | rw: true 79 | } 80 | ] 81 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/cgi-bin/nsjail-php-cgi: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /usr/bin/nsjail --config /home/user/cgi-bin.nsjail.cfg -- /usr/lib/cgi-bin/php $@ 4 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/flag: -------------------------------------------------------------------------------- 1 | CTF{TestFlag} -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/web-apps/nodejs/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | 3 | const hostname = '127.0.0.1'; 4 | const port = 8080; 5 | 6 | const server = http.createServer((req, res) => { 7 | res.statusCode = 200; 8 | res.setHeader('Content-Type', 'text/plain'); 9 | res.end(req.url.split('').reverse().join('')); 10 | }); 11 | 12 | server.listen(port, hostname, () => { 13 | console.log(`Server running at http://${hostname}:${port}/`); 14 | }); 15 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/web-apps/php/index.php: -------------------------------------------------------------------------------- 1 | 8 |
 9 | 
17 | 
18 |
19 |
20 | 21 | 22 | 23 |
24 |
25 | 28 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/web-servers.nsjail.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # See options available at https://github.com/google/nsjail/blob/master/config.proto 16 | 17 | name: "apache2-proxy-nsjail" 18 | description: "Example nsjail configuration for containing a web server." 19 | 20 | mode: LISTEN 21 | uidmap {inside_id: "1000"} 22 | gidmap {inside_id: "1000"} 23 | mount_proc: true 24 | rlimit_as_type: HARD 25 | rlimit_cpu_type: HARD 26 | rlimit_nofile_type: HARD 27 | rlimit_nproc_type: HARD 28 | 29 | mount: [ 30 | { 31 | src: "/chroot" 32 | dst: "/" 33 | is_bind: true 34 | }, 35 | { 36 | src: "/dev" 37 | dst: "/dev" 38 | is_bind: true 39 | }, 40 | { 41 | src: "/dev/null" 42 | dst: "/dev/null" 43 | is_bind: true 44 | rw: true 45 | }, 46 | { 47 | src: "/etc/resolv.conf" 48 | dst: "/etc/resolv.conf" 49 | is_bind: true 50 | }, 51 | { 52 | dst: "/tmp" 53 | fstype: "tmpfs" 54 | rw: true 55 | } 56 | ] 57 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/challenge/web-servers/nodejs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Start node web server 4 | (&>/dev/null node /web-apps/nodejs/app.js)& 5 | 6 | # Proxy stdin/stdout to web server 7 | socat - TCP:127.0.0.1:8080,forever 8 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/healthcheck/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM gcr.io/kctf-docker/healthcheck@sha256:66b34a47e7bbb832012905e229da0bbed80c5c3cddd4703127ca4026ba528cfc 15 | 16 | COPY healthcheck_loop.sh healthcheck.py healthz_webserver.py /home/user/ 17 | 18 | CMD kctf_drop_privs /home/user/healthcheck_loop.sh & /home/user/healthz_webserver.py 19 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/healthcheck/README.md: -------------------------------------------------------------------------------- 1 | # Healthcheck 2 | 3 | kCTF checks the health of challenges by accessing the healthcheck via 4 | http://host:45281/healthz which needs to return either 200 ok or an error 5 | depending on the status of the challenge. 6 | 7 | The default healthcheck consists of: 8 | * a loop that repeatedly calls a python script and writes the status to a file 9 | * a webserver that checks the file and serves /healthz 10 | * the actual healthcheck code using pwntools for convenience 11 | 12 | To modify it, you will likely only have to change the script in healthcheck.py. 13 | You can test if the challenge replies as expected or better add a full example 14 | solution that will try to get the flag from the challenge. 15 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/healthcheck/healthcheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright 2020 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import pwnlib.util.web 18 | 19 | if b"imanode" in pwnlib.util.web.wget("http://localhost:1337/nodejs?edonami"): 20 | exit(0) 21 | 22 | exit(1) 23 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/healthcheck/healthcheck_loop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | set -Eeuo pipefail 16 | 17 | TIMEOUT=20 18 | PERIOD=30 19 | 20 | export TERM=linux 21 | export TERMINFO=/etc/terminfo 22 | 23 | while true; do 24 | echo -n "[$(date)] " 25 | if timeout "${TIMEOUT}" /home/user/healthcheck.py; then 26 | echo 'ok' | tee /tmp/healthz 27 | else 28 | echo -n "$? " 29 | echo 'err' | tee /tmp/healthz 30 | fi 31 | sleep "${PERIOD}" 32 | done 33 | -------------------------------------------------------------------------------- /dist/challenge-templates/web/healthcheck/healthz_webserver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright 2020 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | import http.server 17 | 18 | class HealthzHandler(http.server.BaseHTTPRequestHandler): 19 | def do_GET(self): 20 | if self.path != '/healthz': 21 | self.send_response(404) 22 | self.send_header("Content-length", "0") 23 | self.end_headers() 24 | return 25 | 26 | content = b'err' 27 | try: 28 | with open('/tmp/healthz', 'rb') as fd: 29 | content = fd.read().strip() 30 | except: 31 | pass 32 | self.send_response(200 if content == b'ok' else 400) 33 | self.send_header("Content-type", "text/plain") 34 | self.send_header("Content-length", str(len(content))) 35 | self.end_headers() 36 | self.wfile.write(content) 37 | 38 | httpd = http.server.HTTPServer(('', 45281), HealthzHandler) 39 | httpd.serve_forever() 40 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/README.md: -------------------------------------------------------------------------------- 1 | = Example XSS Bot = 2 | 3 | This bot will read a url from the user and then connect to it using chrome (puppeteer). 4 | For the simplest setup, it should be enough to modify the `challenge/cookie` 5 | file and deploy. 6 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/challenge.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kctf.dev/v1 2 | kind: Challenge 3 | metadata: 4 | name: xss-bot 5 | spec: 6 | deployed: true 7 | powDifficultySeconds: 0 8 | network: 9 | public: false 10 | healthcheck: 11 | # TIP: disable the healthcheck during development 12 | enabled: true 13 | # You can allow the bot to connect to other challenges internally. 14 | # This can be useful during testing so that you don't have to make your 15 | # challenge public. 16 | # The challenge will be reachable at $name.default.svc.cluster.local or 17 | # simply at $name with the default k8s search list. 18 | #allowConnectTo: 19 | # - otherchallenge 20 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/challenge/.puppeteerrc.cjs: -------------------------------------------------------------------------------- 1 | 2 | const {join} = require('path'); 3 | 4 | 5 | /** 6 | * @type {import("puppeteer").Configuration} 7 | */ 8 | module.exports = { 9 | // Changes the cache location for Puppeteer. 10 | cacheDirectory: join(__dirname, ".cache", "puppeteer"), 11 | }; 12 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/challenge/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM gcr.io/kctf-docker/challenge@sha256:9f15314c26bd681a043557c9f136e7823414e9e662c08dde54d14a6bfd0b619f 15 | 16 | RUN apt-get update && apt-get install -y gnupg2 wget 17 | 18 | # Install latest chrome dev package and fonts to support major charsets (Chinese, Japanese, Arabic, Hebrew, Thai and a few others) 19 | # Note: this installs the necessary libs to make the bundled version of Chromium that Puppeteer installs, work. 20 | # Deps from https://github.com/puppeteer/puppeteer/blob/main/docs/troubleshooting.md#chrome-headless-doesnt-launch-on-unix 21 | # plus libxshmfence1 which seems to be missing 22 | RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ 23 | && sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list' \ 24 | && wget -q -O - https://deb.nodesource.com/setup_20.x | bash - \ 25 | && apt-get update \ 26 | && DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends \ 27 | ca-certificates \ 28 | fonts-liberation \ 29 | libappindicator3-1 \ 30 | libasound2t64 \ 31 | libatk-bridge2.0-0 \ 32 | libatk1.0-0 \ 33 | libc6 \ 34 | libcairo2 \ 35 | libcups2 \ 36 | libdbus-1-3 \ 37 | libexpat1 \ 38 | libfontconfig1 \ 39 | libgbm1 \ 40 | libgcc1 \ 41 | libglib2.0-0 \ 42 | libgtk-3-0 \ 43 | libnspr4 \ 44 | libnss3 \ 45 | libpango-1.0-0 \ 46 | libpangocairo-1.0-0 \ 47 | libstdc++6 \ 48 | libx11-6 \ 49 | libx11-xcb1 \ 50 | libxcb1 \ 51 | libxcomposite1 \ 52 | libxcursor1 \ 53 | libxdamage1 \ 54 | libxext6 \ 55 | libxfixes3 \ 56 | libxi6 \ 57 | libxrandr2 \ 58 | libxrender1 \ 59 | libxshmfence1 \ 60 | libxss1 \ 61 | libxtst6 \ 62 | lsb-release \ 63 | wget \ 64 | xdg-utils \ 65 | nodejs \ 66 | && rm -rf /var/lib/apt/lists/* 67 | 68 | COPY bot.js /home/user/ 69 | COPY cookie /home/user/ 70 | COPY .puppeteerrc.cjs /home/user/ 71 | RUN cd /home/user && npm install puppeteer 72 | 73 | ENV DOMAIN="www.example.com" 74 | # Hosting multiple web challenges same-site to each other can lead to 75 | # unintended solutions. E.g. an xss on a.foo.com will be able to overwrite 76 | # cookies on b.foo.com. 77 | # To prevent this, we can block chrome from accessing any subdomains under 78 | # foo.com except for the real challenge domain using a PAC script. 79 | # Unfortunately, PAC will not work in chrome headless mode, so this will use 80 | # more resources. 81 | ENV BLOCK_SUBORIGINS="1" 82 | ENV REGISTERED_DOMAIN="example.com" 83 | 84 | RUN if [ "${BLOCK_SUBORIGINS}" = "1" ]; then \ 85 | apt-get update \ 86 | && apt-get install -yq --no-install-recommends xvfb \ 87 | && rm -rf /var/lib/apt/lists/*; \ 88 | fi 89 | RUN sed -i -e "s/DOMAIN_SET_IN_DOCKERFILE/${DOMAIN}/" /home/user/cookie 90 | 91 | CMD kctf_setup && \ 92 | mount -t tmpfs none /tmp && \ 93 | mkdir /tmp/chrome-userdata && chmod o+rwx /tmp/chrome-userdata && \ 94 | while true; do \ 95 | if [ "${BLOCK_SUBORIGINS}" = "1" ]; then \ 96 | kctf_drop_privs env BLOCK_SUBORIGINS="${BLOCK_SUBORIGINS}" DOMAIN="${DOMAIN}" REGISTERED_DOMAIN="${REGISTERED_DOMAIN}" xvfb-run /usr/bin/node /home/user/bot.js; \ 97 | else \ 98 | kctf_drop_privs env BLOCK_SUBORIGINS="${BLOCK_SUBORIGINS}" DOMAIN="${DOMAIN}" REGISTERED_DOMAIN="${REGISTERED_DOMAIN}" /usr/bin/node /home/user/bot.js; \ 99 | fi; \ 100 | done & \ 101 | kctf_drop_privs \ 102 | socat \ 103 | TCP-LISTEN:1337,reuseaddr,fork \ 104 | EXEC:"kctf_pow socat STDIN TCP\:localhost\:1338" 105 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/challenge/bot.js: -------------------------------------------------------------------------------- 1 | const puppeteer = require('puppeteer'); 2 | const fs = require('fs'); 3 | const net = require('net'); 4 | 5 | const DOMAIN = process.env.DOMAIN; 6 | if (DOMAIN == undefined) throw 'domain undefined' 7 | const REGISTERED_DOMAIN = process.env.REGISTERED_DOMAIN; 8 | const BLOCK_SUBORIGINS = process.env.BLOCK_SUBORIGINS == "1"; 9 | const BOT_TIMEOUT = process.env.BOT_TIMEOUT || 60*1000; 10 | 11 | // will only be used if BLOCK_SUBORIGINS is enabled 12 | const PAC_B64 = Buffer.from(` 13 | function FindProxyForURL (url, host) { 14 | if (host == "${DOMAIN}") { 15 | return 'DIRECT'; 16 | } 17 | if (host == "${REGISTERED_DOMAIN}" || dnsDomainIs(host, ".${REGISTERED_DOMAIN}")) { 18 | return 'PROXY 127.0.0.1:1'; 19 | } 20 | return 'DIRECT'; 21 | } 22 | `).toString('base64'); 23 | const puppeter_args = {}; 24 | if (BLOCK_SUBORIGINS) { 25 | puppeter_args.headless = false; 26 | puppeter_args.args = [ 27 | '--user-data-dir=/tmp/chrome-userdata', 28 | '--breakpad-dump-location=/tmp/chrome-crashes', 29 | '--proxy-pac-url=data:application/x-ns-proxy-autoconfig;base64,'+PAC_B64, 30 | ]; 31 | } 32 | puppeter_args.args.push('--incognito'); 33 | 34 | (async function(){ 35 | const browser = await puppeteer.launch(puppeter_args); 36 | 37 | function ask_for_url(socket) { 38 | socket.state = 'URL'; 39 | socket.write('Please send me a URL to open.\n'); 40 | } 41 | 42 | async function load_url(socket, data) { 43 | let url = data.toString().trim(); 44 | console.log(`checking url: ${url}`); 45 | if (!url.startsWith('http://') && !url.startsWith('https://')) { 46 | socket.state = 'ERROR'; 47 | socket.write('Invalid scheme (http/https only).\n'); 48 | socket.destroy(); 49 | return; 50 | } 51 | socket.state = 'LOADED'; 52 | let cookie = JSON.parse(fs.readFileSync('/home/user/cookie')); 53 | 54 | const context = await browser.createBrowserContext(); 55 | const page = await context.newPage(); 56 | await page.setCookie(cookie); 57 | socket.write(`Loading page ${url}.\n`); 58 | setTimeout(()=>{ 59 | try { 60 | context.close(); 61 | socket.write('timeout\n'); 62 | socket.destroy(); 63 | } catch (err) { 64 | console.log(`err: ${err}`); 65 | } 66 | }, BOT_TIMEOUT); 67 | await page.goto(url); 68 | } 69 | 70 | var server = net.createServer(); 71 | server.listen(1338); 72 | console.log('listening on port 1338'); 73 | 74 | server.on('connection', socket=>{ 75 | socket.on('data', data=>{ 76 | try { 77 | if (socket.state == 'URL') { 78 | load_url(socket, data); 79 | } 80 | } catch (err) { 81 | console.log(`err: ${err}`); 82 | } 83 | }); 84 | 85 | try { 86 | ask_for_url(socket); 87 | } catch (err) { 88 | console.log(`err: ${err}`); 89 | } 90 | }); 91 | })(); 92 | 93 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/challenge/cookie: -------------------------------------------------------------------------------- 1 | { 2 | "name": "session", 3 | "value": "aiy3Uushcha4Zuzu", 4 | "domain": "DOMAIN_SET_IN_DOCKERFILE", 5 | "url": "https://DOMAIN_SET_IN_DOCKERFILE/", 6 | "path": "/", 7 | "httpOnly": true, 8 | "secure": true 9 | } 10 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/healthcheck/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM gcr.io/kctf-docker/healthcheck@sha256:66b34a47e7bbb832012905e229da0bbed80c5c3cddd4703127ca4026ba528cfc 15 | 16 | COPY healthcheck_loop.sh healthcheck.py healthz_webserver.py /home/user/ 17 | 18 | CMD kctf_drop_privs /home/user/healthcheck_loop.sh & /home/user/healthz_webserver.py 19 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/healthcheck/README.md: -------------------------------------------------------------------------------- 1 | # Healthcheck 2 | 3 | kCTF checks the health of challenges by accessing the healthcheck via 4 | http://host:45281/healthz which needs to return either 200 ok or an error 5 | depending on the status of the challenge. 6 | 7 | The default healthcheck consists of: 8 | * a loop that repeatedly calls a python script and writes the status to a file 9 | * a webserver that checks the file and serves /healthz 10 | * the actual healthcheck code using pwntools for convenience 11 | 12 | To modify it, you will likely only have to change the script in healthcheck.py. 13 | You can test if the challenge replies as expected or better add a full example 14 | solution that will try to get the flag from the challenge. 15 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/healthcheck/healthcheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import socket 5 | from pwn import * 6 | 7 | r = remote('127.0.0.1', 1337) 8 | l = listen() 9 | 10 | r.readuntil(b'URL to open.', timeout=10) 11 | r.send(bytes('http://localhost:{}/ok'.format(l.lport), 'ascii')) 12 | 13 | _ = l.wait_for_connection() 14 | l.readuntil(b'GET /ok HTTP/1.1') 15 | l.send(b'HTTP/1.1 200 OK\nContent-Length: 0\n\n') 16 | 17 | exit (0) 18 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/healthcheck/healthcheck_loop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | set -Eeuo pipefail 16 | 17 | TIMEOUT=20 18 | PERIOD=30 19 | 20 | export TERM=linux 21 | export TERMINFO=/etc/terminfo 22 | 23 | while true; do 24 | echo -n "[$(date)] " 25 | if timeout "${TIMEOUT}" /home/user/healthcheck.py; then 26 | echo 'ok' | tee /tmp/healthz 27 | else 28 | echo -n "$? " 29 | echo 'err' | tee /tmp/healthz 30 | fi 31 | sleep "${PERIOD}" 32 | done 33 | -------------------------------------------------------------------------------- /dist/challenge-templates/xss-bot/healthcheck/healthz_webserver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright 2020 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | import http.server 17 | 18 | class HealthzHandler(http.server.BaseHTTPRequestHandler): 19 | def do_GET(self): 20 | if self.path != '/healthz': 21 | self.send_response(404) 22 | self.send_header("Content-length", "0") 23 | self.end_headers() 24 | return 25 | 26 | content = b'err' 27 | try: 28 | with open('/tmp/healthz', 'rb') as fd: 29 | content = fd.read().strip() 30 | except: 31 | pass 32 | self.send_response(200 if content == b'ok' else 400) 33 | self.send_header("Content-type", "text/plain") 34 | self.send_header("Content-length", str(len(content))) 35 | self.end_headers() 36 | self.wfile.write(content) 37 | 38 | httpd = http.server.HTTPServer(('', 45281), HealthzHandler) 39 | httpd.serve_forever() 40 | -------------------------------------------------------------------------------- /docker-images/certbot/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y certbot python3-certbot-dns-google curl jq 3 | RUN curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl 4 | COPY certbot.sh certbot.sh 5 | RUN chmod +x certbot.sh 6 | CMD ./certbot.sh 7 | -------------------------------------------------------------------------------- /docker-images/certbot/certbot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "${DOMAIN}" ]; then 4 | echo "Make sure the DOMAIN environment variable points to the domain." 5 | exit 1 6 | fi 7 | 8 | if [ -z "${SECRET}" ]; then 9 | echo "Make sure the SECRET environment variable points to the k8s secret." 10 | exit 1 11 | fi 12 | 13 | if [ -z "${PROD}" ]; then 14 | echo "Making a TEST certificate because PROD environment variable is NOT set." 15 | TEST="--test-cert" 16 | else 17 | echo "Making a REAL certificate because PROD environment variable is set." 18 | TEST="" 19 | fi 20 | 21 | if [ -z "${EMAIL}" ]; then 22 | echo "Registering certificate unsafely without email. Pass an EMAIL to register an account with an email address." 23 | EMAIL_FLAG="--register-unsafely-without-email" 24 | else 25 | EMAIL_FLAG="-m ${EMAIL}" 26 | fi 27 | 28 | function request_certificate() { 29 | certbot certonly ${TEST} --non-interactive --agree-tos ${EMAIL_FLAG} --dns-google -d '*.'"${DOMAIN}" --dns-google-propagation-seconds 120 30 | } 31 | 32 | function update_tls_secret() { 33 | ./kubectl create secret tls "${SECRET}" --cert /etc/letsencrypt/live/"${DOMAIN}"/fullchain.pem --key /etc/letsencrypt/live/"${DOMAIN}"/privkey.pem --namespace kctf-system --dry-run=client --save-config -o yaml | ./kubectl apply -f - 34 | } 35 | 36 | function check_tls_validity() { 37 | ./kubectl get secret "${SECRET}" --namespace kctf-system -o 'jsonpath={.data}' | jq -r '.["tls.crt"]' | base64 -d | openssl x509 -checkend 2592000 -noout -in - 38 | } 39 | 40 | while true; do 41 | echo "Waiting 2 minutes to avoid hitting rate limits" 42 | sleep 2m 43 | if check_tls_validity; then 44 | echo "Certificate is valid for at least 30 days" 45 | else 46 | request_certificate && update_tls_secret && echo "TLS cert updated" 47 | fi 48 | done 49 | -------------------------------------------------------------------------------- /docker-images/challenge/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # build nsjail first 16 | FROM ubuntu:24.04 as nsjail 17 | 18 | ENV BUILD_PACKAGES build-essential git protobuf-compiler libprotobuf-dev bison flex pkg-config libnl-route-3-dev ca-certificates 19 | ENV NSJAIL_COMMIT 3677ccbe45b184bd4600415cbfb48762a2735674 20 | 21 | RUN apt-get update \ 22 | && apt-get install -yq --no-install-recommends $BUILD_PACKAGES \ 23 | && rm -rf /var/lib/apt/lists/* \ 24 | && git clone https://github.com/google/nsjail.git \ 25 | && cd /nsjail && git checkout $NSJAIL_COMMIT && make -j && cp nsjail /usr/bin/ \ 26 | && rm -R /nsjail 27 | 28 | # challenge image 29 | FROM ubuntu:24.04 30 | 31 | RUN apt-get update \ 32 | && apt-get install -yq --no-install-recommends build-essential python3-dev python3.8 python3-pip libgmp3-dev libmpc-dev uidmap libprotobuf32t64 libnl-route-3-200 wget netcat-traditional ca-certificates socat \ 33 | && rm -rf /var/lib/apt/lists/* 34 | 35 | # ubuntu24 includes the ubuntu user by default 36 | RUN /usr/sbin/userdel -r ubuntu && /usr/sbin/useradd --no-create-home -u 1000 user 37 | 38 | COPY --from=nsjail /usr/bin/nsjail /usr/bin/nsjail 39 | 40 | # gmpy2 and ecdsa used by the proof of work 41 | RUN python3 -m pip install --break-system-packages ecdsa gmpy2 42 | 43 | # we need a clean proc to allow nsjail to remount it in the user namespace 44 | RUN mkdir /kctf 45 | RUN mkdir -p /kctf/.fullproc/proc 46 | RUN chmod 0700 /kctf/.fullproc 47 | 48 | COPY kctf_setup /usr/bin/ 49 | COPY kctf_drop_privs /usr/bin/ 50 | COPY kctf_pow /usr/bin/ 51 | COPY pow.py /kctf/ 52 | -------------------------------------------------------------------------------- /docker-images/challenge/kctf_drop_privs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # There are two copies of this file in the nsjail and healthcheck base images. 4 | 5 | all_caps="-cap_0" 6 | for i in $(seq 1 $(cat /proc/sys/kernel/cap_last_cap)); do 7 | all_caps+=",-cap_${i}" 8 | done 9 | 10 | exec setpriv --init-groups --reset-env --reuid user --regid user --inh-caps=${all_caps} -- "$@" 11 | -------------------------------------------------------------------------------- /docker-images/challenge/kctf_pow: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | POW_FILE="/kctf/pow/pow.conf" 17 | 18 | if [ -f ${POW_FILE} ]; then 19 | POW="$(cat ${POW_FILE})" 20 | if ! /kctf/pow.py ask "${POW}"; then 21 | echo 'pow fail' 22 | exit 1 23 | fi 24 | fi 25 | 26 | exec "$@" 27 | -------------------------------------------------------------------------------- /docker-images/challenge/kctf_setup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | set -Eeuxo pipefail 16 | 17 | # We need a clean proc for user namespaces 18 | mount -t proc none /kctf/.fullproc/proc 19 | -------------------------------------------------------------------------------- /docker-images/challenge/pow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright 2020 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import base64 18 | import os 19 | import secrets 20 | import socket 21 | import sys 22 | import hashlib 23 | 24 | try: 25 | import gmpy2 26 | HAVE_GMP = True 27 | except ImportError: 28 | HAVE_GMP = False 29 | sys.stderr.write("[NOTICE] Running 10x slower, gotta go fast? pip3 install gmpy2\n") 30 | 31 | VERSION = 's' 32 | MODULUS = 2**1279-1 33 | CHALSIZE = 2**128 34 | 35 | SOLVER_URL = 'https://goo.gle/kctf-pow' 36 | 37 | def python_sloth_root(x, diff, p): 38 | exponent = (p + 1) // 4 39 | for i in range(diff): 40 | x = pow(x, exponent, p) ^ 1 41 | return x 42 | 43 | def python_sloth_square(y, diff, p): 44 | for i in range(diff): 45 | y = pow(y ^ 1, 2, p) 46 | return y 47 | 48 | def gmpy_sloth_root(x, diff, p): 49 | exponent = (p + 1) // 4 50 | for i in range(diff): 51 | x = gmpy2.powmod(x, exponent, p).bit_flip(0) 52 | return int(x) 53 | 54 | def gmpy_sloth_square(y, diff, p): 55 | y = gmpy2.mpz(y) 56 | for i in range(diff): 57 | y = gmpy2.powmod(y.bit_flip(0), 2, p) 58 | return int(y) 59 | 60 | def sloth_root(x, diff, p): 61 | if HAVE_GMP: 62 | return gmpy_sloth_root(x, diff, p) 63 | else: 64 | return python_sloth_root(x, diff, p) 65 | 66 | def sloth_square(x, diff, p): 67 | if HAVE_GMP: 68 | return gmpy_sloth_square(x, diff, p) 69 | else: 70 | return python_sloth_square(x, diff, p) 71 | 72 | def encode_number(num): 73 | size = (num.bit_length() // 24) * 3 + 3 74 | return str(base64.b64encode(num.to_bytes(size, 'big')), 'utf-8') 75 | 76 | def decode_number(enc): 77 | return int.from_bytes(base64.b64decode(bytes(enc, 'utf-8')), 'big') 78 | 79 | def decode_challenge(enc): 80 | dec = enc.split('.') 81 | if dec[0] != VERSION: 82 | raise Exception('Unknown challenge version') 83 | return list(map(decode_number, dec[1:])) 84 | 85 | def encode_challenge(arr): 86 | return '.'.join([VERSION] + list(map(encode_number, arr))) 87 | 88 | def get_challenge(diff): 89 | x = secrets.randbelow(CHALSIZE) 90 | return encode_challenge([diff, x]) 91 | 92 | def solve_challenge(chal): 93 | [diff, x] = decode_challenge(chal) 94 | y = sloth_root(x, diff, MODULUS) 95 | return encode_challenge([y]) 96 | 97 | def can_bypass(chal, sol): 98 | from ecdsa import VerifyingKey 99 | from ecdsa.util import sigdecode_der 100 | if not sol.startswith('b.'): 101 | return False 102 | sig = bytes.fromhex(sol[2:]) 103 | with open("/kctf/pow-bypass/pow-bypass-key-pub.pem", "r") as fd: 104 | vk = VerifyingKey.from_pem(fd.read()) 105 | return vk.verify(signature=sig, data=bytes(chal, 'ascii'), hashfunc=hashlib.sha256, sigdecode=sigdecode_der) 106 | 107 | def verify_challenge(chal, sol, allow_bypass=True): 108 | if allow_bypass and can_bypass(chal, sol): 109 | return True 110 | [diff, x] = decode_challenge(chal) 111 | [y] = decode_challenge(sol) 112 | res = sloth_square(y, diff, MODULUS) 113 | return (x == res) or (MODULUS - x == res) 114 | 115 | def usage(): 116 | sys.stdout.write('Usage:\n') 117 | sys.stdout.write('Solve pow: {} solve $challenge\n') 118 | sys.stdout.write('Check pow: {} ask $difficulty\n') 119 | sys.stdout.write(' $difficulty examples (for 1.6GHz CPU) in fast mode:\n') 120 | sys.stdout.write(' 1337: 1 sec\n') 121 | sys.stdout.write(' 31337: 30 secs\n') 122 | sys.stdout.write(' 313373: 5 mins\n') 123 | sys.stdout.flush() 124 | sys.exit(1) 125 | 126 | def main(): 127 | if len(sys.argv) != 3: 128 | usage() 129 | sys.exit(1) 130 | 131 | cmd = sys.argv[1] 132 | 133 | if cmd == 'ask': 134 | difficulty = int(sys.argv[2]) 135 | 136 | if difficulty == 0: 137 | sys.stdout.write("== proof-of-work: disabled ==\n") 138 | sys.exit(0) 139 | 140 | 141 | challenge = get_challenge(difficulty) 142 | 143 | sys.stdout.write("== proof-of-work: enabled ==\n") 144 | sys.stdout.write("please solve a pow first\n") 145 | sys.stdout.write("You can run the solver with:\n") 146 | sys.stdout.write(" python3 <(curl -sSL {}) solve {}\n".format(SOLVER_URL, challenge)) 147 | sys.stdout.write("===================\n") 148 | sys.stdout.write("\n") 149 | sys.stdout.write("Solution? ") 150 | sys.stdout.flush() 151 | solution = '' 152 | with os.fdopen(0, "rb", 0) as f: 153 | while not solution: 154 | line = f.readline().decode("utf-8") 155 | if not line: 156 | sys.stdout.write("EOF") 157 | sys.stdout.flush() 158 | sys.exit(1) 159 | solution = line.strip() 160 | 161 | if verify_challenge(challenge, solution): 162 | sys.stdout.write("Correct\n") 163 | sys.stdout.flush() 164 | sys.exit(0) 165 | else: 166 | sys.stdout.write("Proof-of-work fail") 167 | sys.stdout.flush() 168 | 169 | elif cmd == 'solve': 170 | challenge = sys.argv[2] 171 | solution = solve_challenge(challenge) 172 | 173 | if verify_challenge(challenge, solution, False): 174 | sys.stderr.write("Solution: \n".format(solution)) 175 | sys.stderr.flush() 176 | sys.stdout.write(solution) 177 | sys.stdout.flush() 178 | sys.stderr.write("\n") 179 | sys.stderr.flush() 180 | sys.exit(0) 181 | else: 182 | usage() 183 | 184 | sys.exit(1) 185 | 186 | if __name__ == "__main__": 187 | main() 188 | -------------------------------------------------------------------------------- /docker-images/gcsfuse/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | 3 | RUN apt-get update && apt-get install -y wget fuse 4 | RUN wget -q https://github.com/GoogleCloudPlatform/gcsfuse/releases/download/v1.4.2/gcsfuse_1.4.2_amd64.deb && dpkg -i gcsfuse_1.4.2_amd64.deb 5 | RUN mkdir -p /mnt/disks/gcs 6 | 7 | CMD test -f /config/gcs_bucket &&\ 8 | gcsfuse --foreground --debug_fuse --debug_gcs --stat-cache-ttl 0 -o allow_other --file-mode 0777 --dir-mode 0777 --uid 1000 --gid 1000 "$(cat /config/gcs_bucket)" /mnt/disks/gcs 9 | -------------------------------------------------------------------------------- /docker-images/healthcheck/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2025 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM ubuntu:24.04 15 | 16 | ENV BUILD_PACKAGES python3-pip build-essential python3-dev 17 | 18 | RUN apt-get update \ 19 | && apt-get -yq --no-install-recommends install $BUILD_PACKAGES \ 20 | && rm -rf /var/lib/apt/lists/* \ 21 | && python3 -m pip install --break-system-packages pwntools \ 22 | && apt-get remove --purge -y $BUILD_PACKAGES && apt-get autoremove -y 23 | 24 | RUN apt-get update && apt-get -yq --no-install-recommends install cpio openssl python3 && rm -rf /var/lib/apt/lists/* 25 | 26 | # ubuntu24 includes the ubuntu user by default 27 | RUN /usr/sbin/userdel -r ubuntu && /usr/sbin/useradd --no-create-home -u 1000 user 28 | 29 | RUN mkdir -p /home/user/.pwntools-cache && echo never > /home/user/.pwntools-cache/update 30 | 31 | COPY kctf_drop_privs /usr/bin/ 32 | COPY kctf_bypass_pow /usr/bin/ 33 | -------------------------------------------------------------------------------- /docker-images/healthcheck/kctf_bypass_pow: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CHALLENGE="$1" 4 | 5 | KEY="/pow-bypass/pow-bypass-key.pem" 6 | 7 | SIG=$(echo -n "${CHALLENGE}" | openssl dgst -SHA256 -hex -sign "${KEY}" - | awk '{print $2}') 8 | 9 | echo -n "b.${SIG}" 10 | -------------------------------------------------------------------------------- /docker-images/healthcheck/kctf_drop_privs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # There are two copies of this file in the nsjail and healthcheck base images. 4 | 5 | all_caps="-cap_0" 6 | for i in $(seq 1 $(cat /proc/sys/kernel/cap_last_cap)); do 7 | all_caps+=",-cap_${i}" 8 | done 9 | 10 | exec setpriv --init-groups --reset-env --reuid user --regid user --inh-caps=${all_caps} -- $@ 11 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-leap-day 2 | title: kCTF 3 | description: kCTF is a Kubernetes-based infrastructure for CTF competitions 4 | -------------------------------------------------------------------------------- /docs/_layouts/default.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | {% seo %} 8 | 9 | 10 | 11 | 14 | 15 | 16 | 17 | 18 | 19 |
20 |

{{ page.title | default: site.title | default: site.github.repository_name }}

21 |

{{ page.description | default: site.description | default: site.github.project_tagline }}

22 |
23 | 24 | 41 | 42 |
43 | 46 |
47 | {{ content }} 48 | 53 |
54 |
55 | {% if site.google_analytics %} 56 | 64 | {% endif %} 65 | 66 | 67 | -------------------------------------------------------------------------------- /docs/ctf-playbook.md: -------------------------------------------------------------------------------- 1 | # CTF Playbook 2 | 3 | Now that your challenges are ready, let's talk about how to set up your cluster to ensure that everything runs smoothly during the CTF event itself. 4 | 5 | If you haven't set up a cluster yet, please follow the [google cloud walkthrough](google-cloud.md) to do so. 6 | 7 | Also, make sure that every challenge has a working healthcheck. This will allow kubernetes to automatically restart challenges that stop working. 8 | 9 | ## Scaling 10 | 11 | You want to make sure that all the challenges are scaled depending on how much traffic they receive. 12 | 13 | First of all, we need to make sure that the cluster has enough nodes (VMs) to run the challenges on. 14 | You can use `kctf cluster resize` to add a new node pool to your cluster and delete the old one. 15 | The parameters allow you to configure the minimum and maximum amount of nodes for automatic scaling as well as what kind of machines to use. For example: 16 | 17 | ```sh 18 | kctf cluster resize --min-nodes 4 --max-nodes 16 --num-nodes 5 --machine-type n2-standard-4 19 | ``` 20 | 21 | | :warning: the maximum number of nodes may be limited by the [cloud project quotas](https://cloud.google.com/compute/quotas) | 22 | | --- | 23 | 24 | After enabling scaling for the number of nodes, we also want to enable scaling for the challenges. You can do this by adding a horizontalAutoScaler spec and a resource request to the `challenge.yaml`. 25 | For available fields, see: 26 | 27 | ```sh 28 | kubectl explain challenge.spec.horizontalPodAutoscalerSpec 29 | ``` 30 | 31 | An example spec can look like this: 32 | 33 | ```yaml 34 | apiVersion: kctf.dev/v1 35 | kind: Challenge 36 | metadata: 37 | name: mychallenge 38 | spec: 39 | # [...] 40 | horizontalPodAutoscalerSpec: 41 | maxReplicas: 8 42 | minReplicas: 2 43 | targetCPUUtilizationPercentage: 60 44 | podTemplate: 45 | template: 46 | spec: 47 | containers: 48 | - name: 'challenge' 49 | resources: 50 | requests: 51 | memory: "1000Mi" 52 | cpu: "500m" 53 | ``` 54 | 55 | Start the challenge: 56 | 57 | ```sh 58 | kctf chal start 59 | ``` 60 | 61 | And you can confirm that the autoscaler was created with `kubectl`: 62 | 63 | ```sh 64 | kubectl get horizontalPodAutoscaler 65 | ``` 66 | 67 | ### Proof of Work 68 | 69 | If you notice that one of the challenges uses a high amount of resources, you can enable a proof of work on every connection. 70 | Simply set the `powDifficultySeconds` parameter and restart the challenge: 71 | 72 | ```yaml 73 | apiVersion: kctf.dev/v1 74 | kind: Challenge 75 | metadata: 76 | name: mychallenge 77 | spec: 78 | powDifficultySeconds: 60 79 | ``` 80 | 81 | Note that the proof of work doesn't support web challenges. For those, you can include a captcha on your web endpoints, for example [reCAPTCHA](https://www.google.com/recaptcha/about/). 82 | 83 | ## Monitoring 84 | 85 | We all know IRC is the best way to get alerts about broken challenges :). But if you want to keep an eye on the challenges and catch potential issues early, there are a few ways to check the health. 86 | 87 | First of all, you can list all challenges with `kubectl`, this will show you which are deemed healthy by the healthcheck and which are not. Remember, healthchecks are important! 88 | 89 | ```sh 90 | $ kubectl get challenges 91 | NAME HEALTH STATUS DEPLOYED PUBLIC 92 | mychallenge healthy Running true true 93 | $ cd mychallenge 94 | $ kctf chal status 95 | ``` 96 | 97 | If any of the challenges are broken, you can check out our [troubleshooting docs](troubleshooting.md) for some debugging tips. 98 | 99 | Another option is to use the [google cloud web UI](https://console.cloud.google.com), which shows you various information about your cluster. For example: 100 | * [Clusters](https://console.cloud.google.com/kubernetes/list) includes how many nodes are currently running. 101 | * [Workloads](https://console.cloud.google.com/kubernetes/workload) has data on CPU/Memory/Disk usage of every challenge. 102 | * [Monitoring](https://console.cloud.google.com/monitoring) allows you to create dashboards and set up alerts. 103 | -------------------------------------------------------------------------------- /docs/custom-domains.md: -------------------------------------------------------------------------------- 1 | # Custom Domains 2 | 3 | When creating your cluster, you can specify a domain with the `--domain-name` flag. 4 | kCTF will then automatically create domain names for challenges of the form: 5 | * $chal\_name.$kctf\_domain for TCP based challenges 6 | * $chal\_name-web.$kctf\_domain for HTTPS based challenges 7 | 8 | You might want to use custom domains for some of your challenges, for example: 9 | * if you need to have a challenge available on multiple host names 10 | * to protect web challenges against same-site attacks 11 | * or simply if you want to have a fancy domain name 12 | 13 | For TCP based challenges, all you need to do is to create a CNAME DNS entry from $cooldomain to $chal\_name.$kctf\_domain. 14 | 15 | For HTTPS based challenges, you also need to add a CNAME entry (pay attention to the -web suffix) and in addition, list the domain in the port configuration of the challenge: 16 | ```yaml 17 | apiVersion: kctf.dev/v1 18 | kind: Challenge 19 | metadata: 20 | name: web 21 | spec: 22 | deployed: true 23 | powDifficultySeconds: 0 24 | network: 25 | public: true 26 | ports: 27 | - protocol: "HTTPS" 28 | targetPort: 1337 29 | domains: 30 | - "cooldomain.com" 31 | ``` 32 | With this, kCTF will automatically create a certificate for you and attach it to the challenge's LoadBalancer. 33 | -------------------------------------------------------------------------------- /docs/images/flag-locations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/kctf/9a27df49d0ee1360109e9eb493e9fe3875f180bc/docs/images/flag-locations.png -------------------------------------------------------------------------------- /docs/images/introduction-k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/kctf/9a27df49d0ee1360109e9eb493e9fe3875f180bc/docs/images/introduction-k8s.png -------------------------------------------------------------------------------- /docs/images/php_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/kctf/9a27df49d0ee1360109e9eb493e9fe3875f180bc/docs/images/php_sample.png -------------------------------------------------------------------------------- /docs/images/threat-model-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/kctf/9a27df49d0ee1360109e9eb493e9fe3875f180bc/docs/images/threat-model-graph.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # kCTF 2 | 3 | kCTF is CTF infrastructure written on top of Kubernetes. It allows you to manage, deploy, and sandbox CTF challenges. 4 | 5 | ## [Try it – it takes 5 minutes to get started](local-testing.md) 6 | 7 | > We built kCTF to help CTF organizers have great CTF infrastructure tooling, and to give challenge authors an environment where they can build and test their challenges without having to learn about Kubernetes, while also allowing Kubernetes experts to use all the features they are accustomed to. 8 | 9 | 10 | | **Versatile** | **Specialized** | **Secure** | 11 | |:--------------|:----------------|:-----------| 12 | | A simple challenge can be configured in a single Dockerfile. More complex deployments can use all Kubernetes features. | Common CTF needs are provided as services (proof of work, healthchecks, DNS and SSL certificates, network fileshare). | kCTF was built by Google's Security Team with the goal of hosting untrusted, vulnerable applications as part of CTF competitions. | 13 | 14 | ## Preview 15 | You can have a local challenge running in 5 minutes. 16 | 17 | [](https://asciinema.org/a/sePuQKLBHaO3JOtQj9gWayWvU) 18 | 19 | 20 | See the [Local Testing Walkthrough](local-testing.md) to get started with a local challenge. 21 | 22 | ## Google Cloudshell Codelab 23 | To try this out on a real server, try out the Google Cloudshell codelab. 24 | 25 | [![Open in Cloudshell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/google/kctf&tutorial=docs/google-cloud.md&shellonly=true) 26 | 27 | ## Online demo 28 | We have a fake challenge running, you can see what an isolated challenge would look like by connecting directly to: 29 | ```bash 30 | nc kctf.vrp.ctfcompetition.com 1337 31 | ``` 32 | 33 | If you are able to break out of it, you can [earn up to $133,337 USD](vrp.md). 34 | 35 | **Tip**: Execute `bash -i 2>&1` to get an interactive bash session. 36 | 37 | ## Available Documentation 38 | 39 | * [Local Testing Walkthrough](local-testing.md) – A quick start guide showing you how to build and test challenges locally. 40 | * [kCTF in 8 Minutes](introduction.md) – A quick 8-minute summary of what kCTF is and how it interacts with Kubernetes. 41 | * [Google Cloud Walkthrough](google-cloud.md) – Once you have everything up and running, try deploying to Google Cloud. 42 | * [Custom Domains](custom-domains.md) – How to add custom domains for your challenges. 43 | * [Troubleshooting](troubleshooting.md) – Help with fixing broken challenges. 44 | * [CTF playbook](ctf-playbook.md) – How to set up your cluster and challenges to scale during a CTF. 45 | * [Security Threat Model](security-threat-model.md) – Security considerations regarding kCTF including information on assets, risks, and potential attackers. 46 | * [kCTF VRP Setup](vrp.md) – Demonstrate an exploit against our kCTF demo cluster based on the challenges presented on this page. 47 | -------------------------------------------------------------------------------- /docs/kctf-operator.md: -------------------------------------------------------------------------------- 1 | this document is outdated 2 | 3 | Please check 4 | 5 | https://github.com/google/kctf/issues/356 6 | 7 | --- 8 | 9 | # Developing the operator 10 | 11 | ## Introduction 12 | 13 | The kctf-operator is responsible for deploying the Kubernetes configurations in the cluster and 14 | keeps everything up-to-date accordingly to the configuration we put in the Custom Resource specific to the challenge. 15 | 16 | This operator is created automatically in the scripts when you run `start.sh`. 17 | In case you want to run it alone, go directly to [Testing locally](#https://github.com/google/kctf/blob/beta/docs/kctf-operator.md#testing-locally) or [Deploying the operator](https://github.com/google/kctf/blob/beta/docs/kctf-operator.md#deploying-the-operator). They correspond, respectively, to local testing and cluster testing. 18 | 19 | This code was implemented using operator-sdk 0.18, so ensure you have it installed. 20 | If you want to know more about what you can do using operator-sdk, you can access: https://v0-18-x.sdk.operatorframework.io/docs/golang/quickstart/. 21 | 22 | ## Changing the code 23 | 24 | About the structure of the code, inside the folder deploy, we have `operator.yaml`, 25 | `rbac.yaml` and a folder called `crds`. The first one is the yaml file of the operator, 26 | which creates its deployment. The second one is the necessary permissions that the operator need to run. 27 | Finally, the third one is where the CRDs are stored when you generate them. 28 | 29 | We have also the folder `pkg`, which contains three other folders: `apis`, `controller` and `resources`. 30 | The `apis `folder contains the code responsible for generating the CRDs and for Deep Copy. 31 | The important file there is `challenge_types.go`, which is inside `pkg/apis/kctf/v1alpha1/`. 32 | It defines the specifications and the status of the challenge. Using kubebuilder in the comments, 33 | you can also define other things such as if the field is mandatory or not. 34 | Inside the `controller` folder, we have the code of the operator logic. Inside `pkg/controller/challenge`, 35 | you have the packages for each utility and the file `challenge_controller.go` 36 | contains the Reconcile function, which is called every time there's a change in the watched objects. 37 | The `resources` folder contains objects that 38 | are created when the operator is initialized. These resources consist of the services that kCTF provides. 39 | 40 | The folder `cmd/manager` contains the `main.go`, which is the main function of the code and the folder `build` contains necessary code so that 41 | everything works right. This last folder shouldn't be modified. 42 | 43 | Finally, the folder `samples` contains some CRs as example and the folder `version` contains the current version of the operator. 44 | 45 | ## Generating CRD and the Deep Copy code 46 | 47 | In order to generate a new CRD based on the new code in `challenge_types.go`, you have to run from the `kctf-operator` folder: 48 | 49 | ``` 50 | operator-sdk generate crds 51 | ``` 52 | 53 | After generating the CRD, have a look in the issue #136. Don't forget to apply/create the CRD after generating it. 54 | 55 | And, to update `zz_generated.deepcopy.go`: 56 | 57 | ``` 58 | operator-sdk generate k8s 59 | ``` 60 | 61 | ## Testing locally 62 | 63 | You can run the operator locally and see the logs that come from it by running: 64 | 65 | ``` 66 | operator-sdk run local --watch-namespace="" 67 | ``` 68 | 69 | ## Creating an image, pushing it and changing the `operator.yaml` 70 | 71 | You can create a new image considering your changes in the code by running: 72 | 73 | ``` 74 | operator-sdk build gcr.io/myrepo/myimagename:tag 75 | ``` 76 | 77 | And, you can push it to your repository by doing: 78 | 79 | ``` 80 | docker push gcr.io/myrepo/myimagename:tag 81 | ``` 82 | 83 | Remember to change the image in `deploy/operator.yaml` to make the operator use this image. 84 | 85 | ## Deploying the operator 86 | 87 | You can deploy the operator by running: 88 | 89 | ``` 90 | kubectl apply -f deploy/operator.yaml 91 | ``` 92 | 93 | ## Testing with sample custom resources 94 | 95 | You can test the operator by applying the samples CRs: 96 | 97 | ``` 98 | kubectl apply -f samples/mychal.yaml 99 | ``` 100 | 101 | ## More information 102 | 103 | You can find more information in the website of operator-sdk cited above, where it says how to generate new CRDs of other versions and how to create controllers specific to them. 104 | -------------------------------------------------------------------------------- /docs/vrp.md: -------------------------------------------------------------------------------- 1 | # kCTF VRP Setup 2 | 3 | We invite you to demonstrate an exploit against our kCTF demo cluster based on the challenges presented on this page. Successful demonstrations which don't use Linux kernel vulnerabilities are eligible for rewards as described in our [kCTF VRP announcement blog post](https://security.googleblog.com/2020/05/expanding-our-work-with-open-source.html). 4 | 5 | **Important note:** If you'd like to demonstrate an exploit against a Linux kernel vulnerability, please submit it to [our kernelCTF program](https://google.github.io/security-research/kernelctf/rules.html). **kCTF VRP does not accept Linux kernel vulnerabilities anymore** since 2023-06-14. For more information read our blog post "[Learnings from kCTF VRP's 42 Linux kernel exploits submissions](https://security.googleblog.com/2023/06/learnings-from-kctf-vrps-42-linux.html)". 6 | 7 | [kCTF](https://github.com/google/kctf) is an open source infrastructure for CTF competitions. You can find details on how it works in the [kCTF documentation](https://google.github.io/kctf/introduction.html), but in short, it’s running on a hardened Kubernetes cluster with the following security features: 8 | 9 | * The OS and Kubernetes versions are upgraded automatically. 10 | * The nodes are running Container-Optimized OS. 11 | * Pod egress network access is restricted to public IPs only. 12 | * [Workload Identity](https://cloud.google.com/blog/products/containers-kubernetes/introducing-workload-identity-better-authentication-for-your-gke-applications) restricts access to service accounts and the metadata server in addition to the network policies. 13 | * Every connection to a challenge spawns a separate [nsjail](https://github.com/google/nsjail) sandbox to isolate players from each other. 14 | 15 | At present, we’re interested in two attack scenarios against this infrastructure: 16 | 17 | 1. Breaking out of the nsjail sandbox as it would allow solving challenges in unintended ways. 18 | 2. Breaking the isolation that Kubernetes provides and accessing the flags of other challenges. 19 | 20 | For this purpose, we set up two kCTF challenges with secret flags: “kctf” and “full-chain”. You can demonstrate a working exploit by leaking the flags of either of these. 21 | You can find the code for the challenges 22 | [here](https://github.com/google/google-ctf/tree/master/vrp). 23 | 24 | ![drawing showing the location of the flags](./images/flag-locations.png) 25 | 26 | 27 | ## kctf challenge 28 | 29 | The “kctf” challenge is the only entry point to the cluster. You can connect to it via: 30 | 31 | **Older cluster (GKE Regular release channel):** 32 | ``` 33 | socat FILE:`tty`,raw,echo=0 TCP:kctf.vrp.ctfcompetition.com:1337 34 | ``` 35 | 36 | **Newer cluster (GKE Rapid release channel):** 37 | ``` 38 | socat FILE:`tty`,raw,echo=0 TCP:kctf.vrp2.ctfcompetition.com:1337 39 | ``` 40 | 41 | It will ask you to solve a proof-of-work and then gives you access to a bash running in a setup similar to the [kCTF pwn template challenge](https://github.com/google/kctf/tree/beta/dist/challenge-templates/pwn). The only difference is that the flag is not accessible inside of the nsjail sandbox and you will need to break out of the chroot in order to read it. You can observe the full source code [here](https://github.com/google/google-ctf/tree/master/vrp). 42 | 43 | The details of the environment of the VM can be read from `/etc/node-os-release`, and you can get the image of the VM following [this script](https://gist.github.com/sirdarckcat/568934df2b33a125b0b0f42a5366df8c) based on the output of `/etc/node-os-release`. 44 | 45 | 46 | ## full-chain challenge 47 | 48 | The “full-chain” challenge is a challenge that runs a `while sleep` loop and doesn’t have any exposed ports. In order to get access to the flag, you will need to break out of the “kctf” challenge and break the pod isolation of the cluster. 49 | 50 | 51 | ## Flags 52 | 53 | The flags are stored in Kubernetes [secrets](https://kubernetes.io/docs/concepts/configuration/secret/) and mounted to the filesystem of the two challenges at “/flag/flag”. They are of the format: 54 | 55 | 56 | ``` 57 | KCTF{$CHAL_NAME-$TIMESTAMP:$MAC} 58 | ``` 59 | 60 | 61 | As you can see, the flags include a timestamp and are rotated frequently. 62 | 63 | ### Submission 64 | 65 | We want to avoid learning about unfixed vulnerabilities, so the process to submit reports is: 66 | 1. Test your exploit - we recommend you to test it locally first, and run a GKE cluster to debug. 67 | 2. If it is a 0day (there's no patch for it yet), then send us a checksum of your working exploit to our form [here](https://docs.google.com/forms/d/e/1FAIpQLSeQf6aWmIIjtG4sbEKfgOBK0KL3zzeHCrsgA1EcPr-xsFAk7w/viewform). You won't share any technical details about the vulnerability, you will just record the fact you found something (as we only reward the first person that writes an exploit for a given bug, we use it to resolve the timing in case of an exploit collision). Make sure to submit the exploit checksum **before** there's a public patch and to submit the full exploit **within a week** after the patch is public. If you take longer than a week, we might issue the reward to someone else. 68 | 3. For 1days or once there is a public patch, test your exploit it on the [lab environment](#kctf-challenge). If you have troubles let us know in [#kctf](https://discord.gg/V8UqnZ6JBG) and we'll help you figure out any problems. 69 | 4. Once you get the flag, send it together with the patch and the exploit [here](https://docs.google.com/forms/d/e/1FAIpQLSeQf6aWmIIjtG4sbEKfgOBK0KL3zzeHCrsgA1EcPr-xsFAk7w/viewform). 70 | 5. To increase the timely sharing of new techniques with the community, we are also now requiring that the exploits that receive innovation bonus get publicly documented within a month, otherwise we may publish it. 71 | 72 | ### Notes 73 | 74 | We want to encourage the community to help research vulnerabilities, but which are still unfixed since they have not been shown to be exploitable. As such: 75 | 76 | * The person that develops the exploit and receives the reward might not be the same as the person that discovered or patched the vulnerability. 77 | * It's ok to use 1-day exploits against the lab environment using publicly known vulnerabilities that exploit the patch gap between the time when a patch is announced and the lab environment is updated, however we will only issue a single reward per vulnerability. 78 | 79 | **When we receive an exploit for a fixed vulnerability we'll add details [here](https://docs.google.com/spreadsheets/d/e/2PACX-1vS1REdTA29OJftst8xN5B5x8iIUcxuK6bXdzF8G1UXCmRtoNsoQ9MbebdRdFnj6qZ0Yd7LwQfvYC2oF/pubhtml).** 80 | 81 | In case of questions or suggestions, you can reach us in [#kctf](https://discord.gg/V8UqnZ6JBG). 82 | -------------------------------------------------------------------------------- /kctf-operator/.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | testbin/ 5 | -------------------------------------------------------------------------------- /kctf-operator/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated file by kCTF 2 | operator.yaml 3 | # Temporary Build Files 4 | build/_output 5 | build/_test 6 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 7 | ### Emacs ### 8 | # -*- mode: gitignore; -*- 9 | *~ 10 | \#*\# 11 | /.emacs.desktop 12 | /.emacs.desktop.lock 13 | *.elc 14 | auto-save-list 15 | tramp 16 | .\#* 17 | # Org-mode 18 | .org-id-locations 19 | *_archive 20 | # flymake-mode 21 | *_flymake.* 22 | # eshell files 23 | /eshell/history 24 | /eshell/lastdir 25 | # elpa packages 26 | /elpa/ 27 | # reftex files 28 | *.rel 29 | # AUCTeX auto folder 30 | /auto/ 31 | # cask packages 32 | .cask/ 33 | # Flycheck 34 | flycheck_*.el 35 | # server auth directory 36 | /server/ 37 | # projectiles files 38 | .projectile 39 | projectile-bookmarks.eld 40 | # directory configuration 41 | .dir-locals.el 42 | # saveplace 43 | places 44 | # url cache 45 | url/cache/ 46 | # cedet 47 | ede-projects.el 48 | # smex 49 | smex-items 50 | # company-statistics 51 | company-statistics-cache.el 52 | # anaconda-mode 53 | anaconda-mode/ 54 | ### Go ### 55 | # Binaries for programs and plugins 56 | *.exe 57 | *.exe~ 58 | *.dll 59 | *.so 60 | *.dylib 61 | # Test binary, build with 'go test -c' 62 | *.test 63 | # Output of the go coverage tool, specifically when used with LiteIDE 64 | *.out 65 | ### Vim ### 66 | # swap 67 | .sw[a-p] 68 | .*.sw[a-p] 69 | # session 70 | Session.vim 71 | # temporary 72 | .netrwhist 73 | # auto-generated tag files 74 | tags 75 | ### VisualStudioCode ### 76 | .vscode/* 77 | .history 78 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 79 | -------------------------------------------------------------------------------- /kctf-operator/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.21 AS builder 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | # cache deps before building and copying source so that we don't need to re-download as much 11 | # and so that source changes don't invalidate our downloaded layer 12 | RUN go mod download 13 | 14 | # Copy the go source 15 | COPY main.go main.go 16 | COPY api/ api/ 17 | COPY controllers/ controllers/ 18 | COPY resources/ resources/ 19 | 20 | # Build 21 | # the GOARCH has not a default value to allow the binary be built according to the host where the command 22 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO 23 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, 24 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. 25 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go 26 | 27 | # Use distroless as minimal base image to package the manager binary 28 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 29 | FROM gcr.io/distroless/static:nonroot 30 | WORKDIR / 31 | COPY --from=builder /workspace/manager . 32 | USER 65532:65532 33 | 34 | ENTRYPOINT ["/manager"] 35 | -------------------------------------------------------------------------------- /kctf-operator/PROJECT: -------------------------------------------------------------------------------- 1 | domain: dev 2 | layout: 3 | - go.kubebuilder.io/v3 4 | plugins: 5 | manifests.sdk.operatorframework.io/v2: {} 6 | scorecard.sdk.operatorframework.io/v2: {} 7 | projectName: kctf-operator 8 | repo: github.com/google/kctf 9 | resources: 10 | - api: 11 | crdVersion: v1 12 | namespaced: true 13 | controller: true 14 | domain: dev 15 | group: kctf 16 | kind: Challenge 17 | path: github.com/google/kctf/api/v1 18 | version: v1 19 | version: "3" 20 | -------------------------------------------------------------------------------- /kctf-operator/api/v1/challenge_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | intstr "k8s.io/apimachinery/pkg/util/intstr" 23 | ) 24 | 25 | type PortSpec struct { 26 | // Name of the port 27 | Name string `json:"name,omitempty"` 28 | 29 | // TargetPort is not optional 30 | // +kubebuilder:validation:Required 31 | TargetPort intstr.IntOrString `json:"targetPort"` 32 | 33 | // Port 34 | Port int32 `json:"port,omitempty"` 35 | 36 | // Protocol is not optional 37 | // +kubebuilder:validation:Required 38 | Protocol corev1.Protocol `json:"protocol"` 39 | 40 | // Extra domains for managed certificates. Only used for type HTTPS. 41 | Domains []string `json:"domains,omitempty"` 42 | } 43 | 44 | // Network specifications for the service 45 | type NetworkSpec struct { 46 | 47 | // +kubebuilder:default:=false 48 | Public bool `json:"public,omitempty"` 49 | 50 | // By default, one port is set with default values 51 | Ports []PortSpec `json:"ports,omitempty"` 52 | } 53 | 54 | // Healthcheck specifications 55 | type HealthcheckSpec struct { 56 | 57 | // +kubebuilder:default:=false 58 | Enabled bool `json:"enabled,omitempty"` 59 | 60 | // Image for the healthcheck container 61 | // +kubebuilder:default:="healthcheck" 62 | Image string `json:"image,omitempty"` 63 | } 64 | 65 | // HorizontalPodAutoscalerSpec without ScaleTargetRef 66 | type HorizontalPodAutoscalerSpec struct { 67 | // minReplicas is the lower limit for the number of replicas to which the autoscaler 68 | // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the 69 | // alpha feature gate HPAScaleToZero is enabled and at least one Object or External 70 | // metric is configured. Scaling is active as long as at least one metric value is 71 | // available. 72 | // +optional 73 | MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` 74 | // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. 75 | // +kubebuilder:validation:Required 76 | MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` 77 | // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; 78 | // if not specified the default autoscaling policy will be used. 79 | TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` 80 | } 81 | 82 | // ChallengeSpec defines the desired state of Challenge 83 | type ChallengeSpec struct { 84 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 85 | // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html 86 | 87 | // Image used by the deployment 88 | // +kubebuilder:default:="challenge" 89 | Image string `json:"image"` 90 | 91 | // Shows if the challenge is ready to be deployed, if not, 92 | // it sets the replicas to 0 and disables services/ingress 93 | // +kubebuilder:default:=false 94 | Deployed bool `json:"deployed,omitempty"` 95 | 96 | // The desired quantity of replicas if horizontal pod autoscaler is disabled 97 | // +kubebuilder:default:=1 98 | Replicas *int32 `json:"replicas,omitempty"` 99 | 100 | // The quantity of seconds of the proof of work 101 | // +kubebuilder:default:=0 102 | PowDifficultySeconds int `json:"powDifficultySeconds,omitempty"` 103 | 104 | // The network specifications: if it's public or not and specifications about ports 105 | Network NetworkSpec `json:"network,omitempty"` 106 | 107 | // Healthcheck checks if the challenge works 108 | // If empty, healthcheck is not enabled by default 109 | // +kubebuilder:validation:Required 110 | Healthcheck HealthcheckSpec `json:"healthcheck,omitempty"` 111 | 112 | // Autoscaling features determine quantity of replicas and CPU utilization 113 | // If empty, autoscaling is not enabled by default 114 | HorizontalPodAutoscalerSpec *HorizontalPodAutoscalerSpec `json:"horizontalPodAutoscalerSpec,omitempty"` 115 | 116 | // PodTemplate is used to set the template for the deployment's pod, 117 | // so that an author can add volumeMounts and other extra features 118 | PodTemplate *corev1.PodTemplate `json:"podTemplate,omitempty"` 119 | 120 | // Names of the desired PersistentVolumeClaims 121 | PersistentVolumeClaims []string `json:"persistentVolumeClaims,omitempty"` 122 | 123 | AllowConnectTo []string `json:"allowConnectTo,omitempty"` 124 | } 125 | 126 | // ChallengeStatus defines the observed state of Challenge 127 | type ChallengeStatus struct { 128 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 129 | // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html 130 | // Says if the challenge is up to date or being updated 131 | // +kubebuilder:default:="up-to-date" 132 | Status corev1.PodPhase `json:"status"` 133 | 134 | // Shows healthcheck returns 135 | // +kubebuilder:default:="disabled" 136 | Health string `json:"health"` 137 | } 138 | 139 | //+kubebuilder:object:root=true 140 | //+kubebuilder:subresource:status 141 | 142 | // Challenge is the Schema for the challenges API 143 | // +kubebuilder:subresource:status 144 | // +kubebuilder:resource:path=challenges,scope=Namespaced 145 | // +kubebuilder:printcolumn:name="Health",type=string,JSONPath=`.status.health` 146 | // +kubebuilder:printcolumn:name="Status", type=string,JSONPath=`.status.status` 147 | // +kubebuilder:printcolumn:name="Deployed",type=boolean,JSONPath=`.spec.deployed` 148 | // +kubebuilder:printcolumn:name="Public",type=boolean,JSONPath=`.spec.network.public` 149 | type Challenge struct { 150 | metav1.TypeMeta `json:",inline"` 151 | metav1.ObjectMeta `json:"metadata,omitempty"` 152 | 153 | Spec ChallengeSpec `json:"spec,omitempty"` 154 | Status ChallengeStatus `json:"status,omitempty"` 155 | } 156 | 157 | //+kubebuilder:object:root=true 158 | 159 | // ChallengeList contains a list of Challenge 160 | type ChallengeList struct { 161 | metav1.TypeMeta `json:",inline"` 162 | metav1.ListMeta `json:"metadata,omitempty"` 163 | Items []Challenge `json:"items"` 164 | } 165 | 166 | func init() { 167 | SchemeBuilder.Register(&Challenge{}, &ChallengeList{}) 168 | } 169 | -------------------------------------------------------------------------------- /kctf-operator/api/v1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1 contains API Schema definitions for the kctf v1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=kctf.dev 20 | package v1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "kctf.dev", Version: "v1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /kctf-operator/bin/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /kctf-operator/build-and-deploy-operator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | set -Eeuo pipefail 16 | DIR="$( cd "$( dirname "$( readlink -f "${BASH_SOURCE[0]}")" )" >/dev/null && pwd )/.." 17 | 18 | if [[ -z "${PROJECT}" ]]; then 19 | echo "you need to load a cluster config first (source kctf/activate)" >&2 20 | exit 1 21 | fi 22 | 23 | IMAGE_BASE="${REGISTRY}/${PROJECT}" 24 | echo "building images and pushing to ${IMAGE_BASE}" 25 | 26 | pushd "${DIR}/kctf-operator" 27 | 28 | set -x 29 | 30 | GCSFUSE_IMAGE_URL="${IMAGE_BASE}/gcsfuse" 31 | CERTBOT_IMAGE_URL="${IMAGE_BASE}/certbot" 32 | 33 | GCSFUSE_IMAGE_ID=$(docker build -t "${GCSFUSE_IMAGE_URL}" -q "${DIR}/docker-images/gcsfuse") 34 | CERTBOT_IMAGE_ID=$(docker build -t "${CERTBOT_IMAGE_URL}" -q "${DIR}/docker-images/certbot") 35 | 36 | docker push "${GCSFUSE_IMAGE_URL}" 37 | docker push "${CERTBOT_IMAGE_URL}" 38 | 39 | GCSFUSE_IMAGE_SHA=$(docker inspect -f '{{index .RepoDigests 0}}' "${GCSFUSE_IMAGE_ID}") 40 | CERTBOT_IMAGE_SHA=$(docker inspect -f '{{index .RepoDigests 0}}' "${CERTBOT_IMAGE_ID}") 41 | 42 | sed -i 's%const DOCKER_GCSFUSE_IMAGE = .*%const DOCKER_GCSFUSE_IMAGE = "'${GCSFUSE_IMAGE_SHA}'"%' resources/constants.go 43 | sed -i 's%const DOCKER_CERTBOT_IMAGE = .*%const DOCKER_CERTBOT_IMAGE = "'${CERTBOT_IMAGE_SHA}'"%' resources/constants.go 44 | 45 | set +x 46 | 47 | IMAGE_URL="${IMAGE_BASE}/kctf-operator:dev" 48 | make manifests docker-build operator.yaml bundle IMG="${IMAGE_URL}" 49 | OPERATOR_SHA=$(docker push "${IMAGE_URL}" | egrep -o 'sha256:[0-9a-f]+' | head -n1) 50 | IMAGE_ID="${IMAGE_BASE}/kctf-operator@${OPERATOR_SHA}" 51 | 52 | echo "pushed to ${IMAGE_ID}" 53 | 54 | OPERATOR_YAML="${KCTF_CTF_DIR}/kctf/resources/operator.yaml" 55 | sed -i "s#image: ${IMAGE_URL}#image: ${IMAGE_ID}#" "operator.yaml" 56 | cat "operator.yaml" > "${OPERATOR_YAML}" 57 | 58 | cp -f bundle/manifests/*.yaml "${KCTF_CTF_DIR}/kctf/resources/" 59 | 60 | popd 61 | 62 | "${KCTF_BIN}/kctf-cluster" start 63 | -------------------------------------------------------------------------------- /kctf-operator/bundle.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | # Core bundle labels. 4 | LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 5 | LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ 6 | LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ 7 | LABEL operators.operatorframework.io.bundle.package.v1=kctf-operator 8 | LABEL operators.operatorframework.io.bundle.channels.v1=alpha 9 | LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.36.0 10 | LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 11 | LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 12 | 13 | # Labels for testing. 14 | LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 15 | LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ 16 | 17 | # Copy files to locations specified by labels. 18 | COPY bundle/manifests /manifests/ 19 | COPY bundle/metadata /metadata/ 20 | COPY bundle/tests/scorecard /tests/scorecard/ 21 | -------------------------------------------------------------------------------- /kctf-operator/bundle/.gitignore: -------------------------------------------------------------------------------- 1 | ** 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /kctf-operator/config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/kctf.dev_challenges.yaml 6 | #+kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patchesStrategicMerge: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- patches/webhook_in_challenges.yaml 12 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 13 | 14 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 15 | # patches here are for enabling the CA injection for each CRD 16 | #- patches/cainjection_in_challenges.yaml 17 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 18 | 19 | # the following config is for teaching kustomize how to do kustomization for CRDs. 20 | configurations: 21 | - kustomizeconfig.yaml 22 | -------------------------------------------------------------------------------- /kctf-operator/config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /kctf-operator/config/crd/patches/cainjection_in_challenges.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: challenges.kctf.dev 8 | -------------------------------------------------------------------------------- /kctf-operator/config/crd/patches/webhook_in_challenges.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: challenges.kctf.dev 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /kctf-operator/config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: kctf-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: kctf-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | resources: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patches: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - path: manager_auth_proxy_patch.yaml 32 | 33 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 34 | # crd/kustomization.yaml 35 | #- path: manager_webhook_patch.yaml 36 | 37 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 38 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 39 | # 'CERTMANAGER' needs to be enabled to use ca injection 40 | #- path: webhookcainjection_patch.yaml 41 | 42 | # the following config is for teaching kustomize how to do var substitution 43 | vars: 44 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 45 | # Uncomment the following replacements to add the cert-manager CA injection annotations 46 | #replacements: 47 | # - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs 48 | # kind: Certificate 49 | # group: cert-manager.io 50 | # version: v1 51 | # name: serving-cert # this name should match the one in certificate.yaml 52 | # fieldPath: .metadata.namespace # namespace of the certificate CR 53 | # targets: 54 | # - select: 55 | # kind: ValidatingWebhookConfiguration 56 | # fieldPaths: 57 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 58 | # options: 59 | # delimiter: '/' 60 | # index: 0 61 | # create: true 62 | # - select: 63 | # kind: MutatingWebhookConfiguration 64 | # fieldPaths: 65 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 66 | # options: 67 | # delimiter: '/' 68 | # index: 0 69 | # create: true 70 | # - select: 71 | # kind: CustomResourceDefinition 72 | # fieldPaths: 73 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 74 | # options: 75 | # delimiter: '/' 76 | # index: 0 77 | # create: true 78 | # - source: 79 | # kind: Certificate 80 | # group: cert-manager.io 81 | # version: v1 82 | # name: serving-cert # this name should match the one in certificate.yaml 83 | # fieldPath: .metadata.name 84 | # targets: 85 | # - select: 86 | # kind: ValidatingWebhookConfiguration 87 | # fieldPaths: 88 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 89 | # options: 90 | # delimiter: '/' 91 | # index: 1 92 | # create: true 93 | # - select: 94 | # kind: MutatingWebhookConfiguration 95 | # fieldPaths: 96 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 97 | # options: 98 | # delimiter: '/' 99 | # index: 1 100 | # create: true 101 | # - select: 102 | # kind: CustomResourceDefinition 103 | # fieldPaths: 104 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 105 | # options: 106 | # delimiter: '/' 107 | # index: 1 108 | # create: true 109 | # - source: # Add cert-manager annotation to the webhook Service 110 | # kind: Service 111 | # version: v1 112 | # name: webhook-service 113 | # fieldPath: .metadata.name # namespace of the service 114 | # targets: 115 | # - select: 116 | # kind: Certificate 117 | # group: cert-manager.io 118 | # version: v1 119 | # fieldPaths: 120 | # - .spec.dnsNames.0 121 | # - .spec.dnsNames.1 122 | # options: 123 | # delimiter: '.' 124 | # index: 0 125 | # create: true 126 | # - source: 127 | # kind: Service 128 | # version: v1 129 | # name: webhook-service 130 | # fieldPath: .metadata.namespace # namespace of the service 131 | # targets: 132 | # - select: 133 | # kind: Certificate 134 | # group: cert-manager.io 135 | # version: v1 136 | # fieldPaths: 137 | # - .spec.dnsNames.0 138 | # - .spec.dnsNames.1 139 | # options: 140 | # delimiter: '.' 141 | # index: 1 142 | # create: true 143 | -------------------------------------------------------------------------------- /kctf-operator/config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | securityContext: 14 | allowPrivilegeEscalation: false 15 | capabilities: 16 | drop: 17 | - "ALL" 18 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 19 | args: 20 | - "--secure-listen-address=0.0.0.0:8443" 21 | - "--upstream=http://127.0.0.1:8080/" 22 | - "--logtostderr=true" 23 | - "--v=0" 24 | ports: 25 | - containerPort: 8443 26 | protocol: TCP 27 | name: https 28 | resources: 29 | limits: 30 | cpu: 500m 31 | memory: 128Mi 32 | requests: 33 | cpu: 5m 34 | memory: 64Mi 35 | - name: manager 36 | args: 37 | - "--health-probe-bind-address=:8081" 38 | - "--metrics-bind-address=127.0.0.1:8080" 39 | - "--leader-elect" 40 | -------------------------------------------------------------------------------- /kctf-operator/config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /kctf-operator/config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: 558d99b6.dev 12 | -------------------------------------------------------------------------------- /kctf-operator/config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | 7 | configMapGenerator: 8 | - files: 9 | - controller_manager_config.yaml 10 | name: manager-config 11 | apiVersion: kustomize.config.k8s.io/v1beta1 12 | kind: Kustomization 13 | images: 14 | - name: controller 15 | newName: gcr.io/kctf-docker/kctf-operator 16 | -------------------------------------------------------------------------------- /kctf-operator/config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | annotations: 25 | kubectl.kubernetes.io/default-container: manager 26 | spec: 27 | securityContext: 28 | runAsNonRoot: true 29 | containers: 30 | - command: 31 | - /manager 32 | args: 33 | - --leader-elect 34 | env: 35 | - name: ALLOWED_IPS 36 | value: 0.0.0.0/0 37 | - name: SECURITY_POLICY 38 | value: DISABLED 39 | image: controller:latest 40 | name: manager 41 | securityContext: 42 | allowPrivilegeEscalation: false 43 | livenessProbe: 44 | httpGet: 45 | path: /healthz 46 | port: 8081 47 | initialDelaySeconds: 15 48 | periodSeconds: 20 49 | readinessProbe: 50 | httpGet: 51 | path: /readyz 52 | port: 8081 53 | initialDelaySeconds: 5 54 | periodSeconds: 10 55 | resources: 56 | limits: 57 | cpu: 200m 58 | memory: 100Mi 59 | requests: 60 | cpu: 100m 61 | memory: 20Mi 62 | serviceAccountName: controller-manager 63 | terminationGracePeriodSeconds: 10 64 | -------------------------------------------------------------------------------- /kctf-operator/config/manifests/bases/kctf-operator.clusterserviceversion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | annotations: 5 | alm-examples: '[]' 6 | capabilities: Basic Install 7 | name: kctf-operator.v0.0.0 8 | namespace: placeholder 9 | spec: 10 | apiservicedefinitions: {} 11 | customresourcedefinitions: 12 | owned: 13 | - description: Challenge is the Schema for the challenges API 14 | displayName: Challenge 15 | kind: Challenge 16 | name: challenges.kctf.dev 17 | version: v1 18 | description: Operator for KCTF 19 | displayName: kctf-operator 20 | icon: 21 | - base64data: "" 22 | mediatype: "" 23 | install: 24 | spec: 25 | deployments: null 26 | strategy: "" 27 | installModes: 28 | - supported: false 29 | type: OwnNamespace 30 | - supported: false 31 | type: SingleNamespace 32 | - supported: false 33 | type: MultiNamespace 34 | - supported: true 35 | type: AllNamespaces 36 | keywords: 37 | - kctf 38 | links: 39 | - name: Kctf Operator 40 | url: https://kctf-operator.domain 41 | maintainers: 42 | - email: kctf@google.com 43 | name: kctf 44 | maturity: alpha 45 | provider: 46 | name: Google 47 | url: http://kctf.dev 48 | version: 0.0.0 49 | -------------------------------------------------------------------------------- /kctf-operator/config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # These resources constitute the fully configured set of manifests 2 | # used to generate the 'manifests/' directory in a bundle. 3 | resources: 4 | - bases/kctf-operator.clusterserviceversion.yaml 5 | - ../default 6 | - ../samples 7 | - ../scorecard 8 | 9 | # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. 10 | # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. 11 | # These patches remove the unnecessary "cert" volume and its manager container volumeMount. 12 | #patchesJson6902: 13 | #- target: 14 | # group: apps 15 | # version: v1 16 | # kind: Deployment 17 | # name: controller-manager 18 | # namespace: system 19 | # patch: |- 20 | # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. 21 | # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. 22 | # - op: remove 23 | # path: /spec/template/spec/containers/1/volumeMounts/0 24 | # # Remove the "cert" volume, since OLM will create and mount a set of certs. 25 | # # Update the indices in this path if adding or removing volumes in the manager's Deployment. 26 | # - op: remove 27 | # path: /spec/template/spec/volumes/0 28 | -------------------------------------------------------------------------------- /kctf-operator/config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /kctf-operator/config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | scheme: https 15 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: 7 | - "/metrics" 8 | verbs: 9 | - get 10 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | protocol: TCP 13 | targetPort: https 14 | selector: 15 | control-plane: controller-manager 16 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/challenge_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit challenges. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: challenge-editor-role 6 | rules: 7 | - apiGroups: 8 | - kctf.dev 9 | resources: 10 | - challenges 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - kctf.dev 21 | resources: 22 | - challenges/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/challenge_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view challenges. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: challenge-viewer-role 6 | rules: 7 | - apiGroups: 8 | - kctf.dev 9 | resources: 10 | - challenges 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - kctf.dev 17 | resources: 18 | - challenges/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - apps 9 | resources: 10 | - daemonsets 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - apps 21 | resources: 22 | - deployments 23 | verbs: 24 | - create 25 | - delete 26 | - get 27 | - list 28 | - patch 29 | - update 30 | - watch 31 | - apiGroups: 32 | - autoscaling 33 | resources: 34 | - horizontalpodautoscalers 35 | verbs: 36 | - create 37 | - delete 38 | - get 39 | - list 40 | - patch 41 | - update 42 | - watch 43 | - apiGroups: 44 | - cloud.google.com 45 | resources: 46 | - backendconfigs 47 | verbs: 48 | - create 49 | - delete 50 | - get 51 | - list 52 | - patch 53 | - update 54 | - watch 55 | - apiGroups: 56 | - "" 57 | resources: 58 | - configmaps 59 | verbs: 60 | - create 61 | - delete 62 | - get 63 | - list 64 | - patch 65 | - update 66 | - watch 67 | - apiGroups: 68 | - "" 69 | resources: 70 | - endpoints 71 | verbs: 72 | - create 73 | - delete 74 | - get 75 | - list 76 | - patch 77 | - update 78 | - watch 79 | - apiGroups: 80 | - "" 81 | resources: 82 | - nodes 83 | verbs: 84 | - create 85 | - delete 86 | - get 87 | - list 88 | - patch 89 | - update 90 | - watch 91 | - apiGroups: 92 | - "" 93 | resources: 94 | - persistentvolumeclaims 95 | verbs: 96 | - create 97 | - delete 98 | - get 99 | - list 100 | - patch 101 | - update 102 | - watch 103 | - apiGroups: 104 | - "" 105 | resources: 106 | - persistentvolumes 107 | verbs: 108 | - create 109 | - delete 110 | - get 111 | - list 112 | - patch 113 | - update 114 | - watch 115 | - apiGroups: 116 | - "" 117 | resources: 118 | - pods 119 | verbs: 120 | - create 121 | - delete 122 | - get 123 | - list 124 | - patch 125 | - update 126 | - watch 127 | - apiGroups: 128 | - "" 129 | resources: 130 | - secrets 131 | verbs: 132 | - create 133 | - delete 134 | - get 135 | - list 136 | - patch 137 | - update 138 | - watch 139 | - apiGroups: 140 | - "" 141 | resources: 142 | - services 143 | verbs: 144 | - create 145 | - delete 146 | - get 147 | - list 148 | - patch 149 | - update 150 | - watch 151 | - apiGroups: 152 | - extensions 153 | resources: 154 | - ingresses 155 | verbs: 156 | - create 157 | - delete 158 | - get 159 | - list 160 | - patch 161 | - update 162 | - watch 163 | - apiGroups: 164 | - kctf.dev 165 | resources: 166 | - challenges 167 | verbs: 168 | - create 169 | - delete 170 | - get 171 | - list 172 | - patch 173 | - update 174 | - watch 175 | - apiGroups: 176 | - kctf.dev 177 | resources: 178 | - challenges/finalizers 179 | verbs: 180 | - update 181 | - apiGroups: 182 | - kctf.dev 183 | resources: 184 | - challenges/status 185 | verbs: 186 | - get 187 | - patch 188 | - update 189 | - apiGroups: 190 | - networking.gke.io 191 | resources: 192 | - managedcertificates 193 | verbs: 194 | - create 195 | - delete 196 | - get 197 | - list 198 | - patch 199 | - update 200 | - watch 201 | - apiGroups: 202 | - networking.k8s.io 203 | resources: 204 | - ingresses 205 | verbs: 206 | - create 207 | - delete 208 | - get 209 | - list 210 | - patch 211 | - update 212 | - watch 213 | - apiGroups: 214 | - networking.k8s.io 215 | resources: 216 | - networkpolicies 217 | verbs: 218 | - create 219 | - delete 220 | - get 221 | - list 222 | - patch 223 | - update 224 | - watch 225 | - apiGroups: 226 | - rbac.authorization.k8s.io 227 | resources: 228 | - clusterrolebindings 229 | verbs: 230 | - create 231 | - delete 232 | - get 233 | - list 234 | - patch 235 | - update 236 | - watch 237 | - apiGroups: 238 | - rbac.authorization.k8s.io 239 | resources: 240 | - clusterroles 241 | verbs: 242 | - create 243 | - delete 244 | - get 245 | - list 246 | - patch 247 | - update 248 | - watch 249 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /kctf-operator/config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | -------------------------------------------------------------------------------- /kctf-operator/config/samples/kctf_v1_challenge.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kctf.dev/v1 2 | kind: Challenge 3 | metadata: 4 | name: challenge-sample 5 | spec: 6 | # Add fields here 7 | -------------------------------------------------------------------------------- /kctf-operator/config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - kctf_v1_challenge.yaml 4 | #+kubebuilder:scaffold:manifestskustomizesamples 5 | -------------------------------------------------------------------------------- /kctf-operator/config/samples/mychal.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: mychal 5 | 6 | --- 7 | 8 | apiVersion: operator.kctf.dev/v1 9 | kind: Challenge 10 | metadata: 11 | name: mychal 12 | namespace: mychal 13 | spec: 14 | # Add fields here 15 | image: "quay.io/aliciafmachado/bash:latest" 16 | deployed: true 17 | healthcheck: 18 | enabled: false 19 | network: 20 | public: true 21 | dns: true 22 | ports: 23 | - protocol: "TCP" 24 | port: 3 25 | targetPort: 1337 26 | - protocol: "HTTPS" 27 | targetPort: 1338 28 | horizontalPodAutoscalerSpec: 29 | minReplicas: 1 30 | maxReplicas: 2 31 | -------------------------------------------------------------------------------- /kctf-operator/config/samples/mychal2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: mychal2 5 | 6 | --- 7 | 8 | apiVersion: kctf.dev/v1 9 | kind: Challenge 10 | metadata: 11 | name: mychal2 12 | namespace: mychal2 13 | spec: 14 | # Add fields here 15 | image: "memcached:1.4.36-alpine" 16 | deployed: true 17 | healthcheck: 18 | enabled: false 19 | powDifficultySeconds: 10 20 | network: 21 | public: true 22 | dns: false 23 | -------------------------------------------------------------------------------- /kctf-operator/config/samples/mychal3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: mychal3 5 | 6 | --- 7 | 8 | apiVersion: kctf.dev/v1 9 | kind: Challenge 10 | metadata: 11 | name: mychal3 12 | namespace: mychal3 13 | spec: 14 | # Add fields here 15 | image: "memcached:1.4.36-alpine" 16 | deployed: true 17 | powDifficultySeconds: 10 18 | healthcheck: 19 | enabled: false 20 | network: 21 | public: false 22 | dns: false 23 | podTemplate: 24 | template: 25 | spec: 26 | containers: 27 | - name: challenge 28 | volumeMounts: 29 | - name: sessions 30 | mountPath: /mnt/disks/sessions 31 | volumes: 32 | - name: sessions 33 | persistentVolumeClaim: 34 | claimName: mychal3-sessions 35 | persistentVolumeClaims: 36 | - mychal3-sessions 37 | -------------------------------------------------------------------------------- /kctf-operator/config/samples/simple-challenge.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: simple-challenge 5 | 6 | --- 7 | 8 | apiVersion: operator.kctf.dev/v1 9 | kind: Challenge 10 | metadata: 11 | name: simple-challenge 12 | namespace: simple-challenge 13 | spec: 14 | image: "memcached:1.4.36-alpine" 15 | deployed: false 16 | healthcheck: 17 | enabled: false 18 | -------------------------------------------------------------------------------- /kctf-operator/config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /kctf-operator/config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | patchesJson6902: 4 | - path: patches/basic.config.yaml 5 | target: 6 | group: scorecard.operatorframework.io 7 | version: v1alpha3 8 | kind: Configuration 9 | name: config 10 | - path: patches/olm.config.yaml 11 | target: 12 | group: scorecard.operatorframework.io 13 | version: v1alpha3 14 | kind: Configuration 15 | name: config 16 | #+kubebuilder:scaffold:patchesJson6902 17 | -------------------------------------------------------------------------------- /kctf-operator/config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.36.0 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /kctf-operator/config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - olm-bundle-validation 7 | image: quay.io/operator-framework/scorecard-test:v1.36.0 8 | labels: 9 | suite: olm 10 | test: olm-bundle-validation-test 11 | - op: add 12 | path: /stages/0/tests/- 13 | value: 14 | entrypoint: 15 | - scorecard-test 16 | - olm-crds-have-validation 17 | image: quay.io/operator-framework/scorecard-test:v1.36.0 18 | labels: 19 | suite: olm 20 | test: olm-crds-have-validation-test 21 | - op: add 22 | path: /stages/0/tests/- 23 | value: 24 | entrypoint: 25 | - scorecard-test 26 | - olm-crds-have-resources 27 | image: quay.io/operator-framework/scorecard-test:v1.36.0 28 | labels: 29 | suite: olm 30 | test: olm-crds-have-resources-test 31 | - op: add 32 | path: /stages/0/tests/- 33 | value: 34 | entrypoint: 35 | - scorecard-test 36 | - olm-spec-descriptors 37 | image: quay.io/operator-framework/scorecard-test:v1.36.0 38 | labels: 39 | suite: olm 40 | test: olm-spec-descriptors-test 41 | - op: add 42 | path: /stages/0/tests/- 43 | value: 44 | entrypoint: 45 | - scorecard-test 46 | - olm-status-descriptors 47 | image: quay.io/operator-framework/scorecard-test:v1.36.0 48 | labels: 49 | suite: olm 50 | test: olm-status-descriptors-test 51 | -------------------------------------------------------------------------------- /kctf-operator/controllers/autoscaling/functions.go: -------------------------------------------------------------------------------- 1 | // Create autoscaling 2 | 3 | package autoscaling 4 | 5 | import ( 6 | "context" 7 | "reflect" 8 | 9 | "github.com/go-logr/logr" 10 | kctfv1 "github.com/google/kctf/api/v1" 11 | autoscalingv1 "k8s.io/api/autoscaling/v1" 12 | "k8s.io/apimachinery/pkg/api/errors" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | "k8s.io/apimachinery/pkg/types" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 17 | ) 18 | 19 | func isEqual(autoscalingFound *autoscalingv1.HorizontalPodAutoscaler, 20 | autoscaling *autoscalingv1.HorizontalPodAutoscaler) bool { 21 | return reflect.DeepEqual(autoscalingFound.Spec, autoscaling.Spec) 22 | } 23 | 24 | func create(challenge *kctfv1.Challenge, client client.Client, scheme *runtime.Scheme, 25 | log logr.Logger, ctx context.Context) (bool, error) { 26 | // creates autoscaling if it doesn't exist yet 27 | autoscaling := generate(challenge) 28 | log.Info("Creating a Autoscaling", "Autoscaling name: ", 29 | autoscaling.Name, " with namespace ", autoscaling.Namespace) 30 | 31 | // Creates owner references 32 | err := controllerutil.SetControllerReference(challenge, autoscaling, scheme) 33 | 34 | // Creates autoscaling 35 | err = client.Create(ctx, autoscaling) 36 | 37 | if err != nil { 38 | log.Error(err, "Failed to create Autoscaling", "Autoscaling name: ", 39 | autoscaling.Name, " with namespace ", autoscaling.Namespace) 40 | return false, err 41 | } 42 | 43 | return true, nil 44 | } 45 | 46 | func delete(autoscalingFound *autoscalingv1.HorizontalPodAutoscaler, client client.Client, 47 | scheme *runtime.Scheme, log logr.Logger, ctx context.Context) (bool, error) { 48 | log.Info("Deleting Autoscaling", "Autoscaling name: ", 49 | autoscalingFound.Name, " with namespace ", autoscalingFound.Namespace) 50 | 51 | err := client.Delete(ctx, autoscalingFound) 52 | if err != nil { 53 | log.Error(err, "Failed to delete Autoscaling", "Autoscaling name: ", 54 | autoscalingFound.Name, " with namespace ", autoscalingFound.Namespace) 55 | return false, err 56 | } 57 | 58 | return true, nil 59 | } 60 | 61 | func Update(challenge *kctfv1.Challenge, client client.Client, scheme *runtime.Scheme, 62 | log logr.Logger, ctx context.Context) (bool, error) { 63 | // Creates autoscaling object 64 | // Checks if an autoscaling was configured 65 | // If enabled, it checks if it already exists 66 | autoscalingFound := &autoscalingv1.HorizontalPodAutoscaler{} 67 | err := client.Get(ctx, types.NamespacedName{Name: challenge.Name, 68 | Namespace: challenge.Namespace}, autoscalingFound) 69 | 70 | if challenge.Spec.HorizontalPodAutoscalerSpec != nil && errors.IsNotFound(err) && 71 | challenge.Spec.Deployed == true { 72 | // creates autoscaling if it doesn't exist yet 73 | return create(challenge, client, scheme, log, ctx) 74 | } 75 | 76 | if (challenge.Spec.HorizontalPodAutoscalerSpec == nil || challenge.Spec.Deployed == false) && err == nil { 77 | // delete autoscaling 78 | return delete(autoscalingFound, client, scheme, log, ctx) 79 | } 80 | 81 | if err == nil { 82 | if autoscaling := generate(challenge); !isEqual(autoscalingFound, autoscaling) { 83 | autoscalingFound.Spec = autoscaling.Spec 84 | err = client.Update(ctx, autoscalingFound) 85 | if err != nil { 86 | log.Error(err, "Failed to update autoscaling", "Autoscaling name: ", 87 | autoscalingFound.Name, " with namespace ", autoscalingFound.Namespace) 88 | return false, err 89 | } 90 | log.Info("Updated autoscaling successfully", "Autoscaling name: ", 91 | autoscalingFound.Name, " with namespace ", autoscalingFound.Namespace) 92 | return true, nil 93 | } 94 | } 95 | 96 | return false, nil 97 | } 98 | -------------------------------------------------------------------------------- /kctf-operator/controllers/autoscaling/horizontal-pod-autoscaler.go: -------------------------------------------------------------------------------- 1 | package autoscaling 2 | 3 | import ( 4 | kctfv1 "github.com/google/kctf/api/v1" 5 | autoscalingv1 "k8s.io/api/autoscaling/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | func generate(challenge *kctfv1.Challenge) *autoscalingv1.HorizontalPodAutoscaler { 10 | // We create the autoscaling object 11 | autoscaling := &autoscalingv1.HorizontalPodAutoscaler{ 12 | ObjectMeta: metav1.ObjectMeta{ 13 | Name: challenge.Name, 14 | Namespace: challenge.Namespace, 15 | }, 16 | Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ 17 | MaxReplicas: challenge.Spec.HorizontalPodAutoscalerSpec.MaxReplicas, 18 | MinReplicas: challenge.Spec.HorizontalPodAutoscalerSpec.MinReplicas, 19 | TargetCPUUtilizationPercentage: challenge.Spec.HorizontalPodAutoscalerSpec.TargetCPUUtilizationPercentage, 20 | ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ 21 | Kind: "Deployment", 22 | Name: challenge.Name, 23 | APIVersion: "apps/v1", 24 | }, 25 | }, 26 | } 27 | 28 | return autoscaling 29 | } 30 | -------------------------------------------------------------------------------- /kctf-operator/controllers/deployment/deployment-with-healthcheck.go: -------------------------------------------------------------------------------- 1 | package deployment 2 | 3 | import ( 4 | kctfv1 "github.com/google/kctf/api/v1" 5 | utils "github.com/google/kctf/controllers/utils" 6 | appsv1 "k8s.io/api/apps/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | resource "k8s.io/apimachinery/pkg/api/resource" 9 | intstr "k8s.io/apimachinery/pkg/util/intstr" 10 | ) 11 | 12 | // Deployment with Healthcheck 13 | func withHealthcheck(challenge *kctfv1.Challenge) *appsv1.Deployment { 14 | dep := deployment(challenge) 15 | 16 | idx_challenge := utils.IndexOfContainer("challenge", dep.Spec.Template.Spec.Containers) 17 | idx_healthcheck := utils.IndexOfContainer("healthcheck", dep.Spec.Template.Spec.Containers) 18 | 19 | challengeContainer := &dep.Spec.Template.Spec.Containers[idx_challenge] 20 | 21 | // Get the container with the challenge and add healthcheck configurations 22 | livenessProbe := &challengeContainer.LivenessProbe 23 | if *livenessProbe == nil { 24 | *livenessProbe = &corev1.Probe{ 25 | FailureThreshold: 2, 26 | ProbeHandler: corev1.ProbeHandler{ 27 | HTTPGet: &corev1.HTTPGetAction{ 28 | Path: "/healthz", 29 | Port: intstr.FromInt(45281), 30 | }, 31 | }, 32 | InitialDelaySeconds: 45, 33 | TimeoutSeconds: 3, 34 | PeriodSeconds: 30, 35 | } 36 | } 37 | 38 | readinessProbe := &challengeContainer.ReadinessProbe 39 | if *readinessProbe == nil { 40 | *readinessProbe = &corev1.Probe{ 41 | ProbeHandler: corev1.ProbeHandler{ 42 | HTTPGet: &corev1.HTTPGetAction{ 43 | Path: "/healthz", 44 | Port: intstr.FromInt(45281), 45 | }, 46 | }, 47 | InitialDelaySeconds: 5, 48 | TimeoutSeconds: 3, 49 | PeriodSeconds: 5, 50 | } 51 | } 52 | 53 | if idx_healthcheck == -1 { 54 | healthcheckContainer := corev1.Container{ 55 | Name: "healthcheck", 56 | } 57 | dep.Spec.Template.Spec.Containers = append(dep.Spec.Template.Spec.Containers, healthcheckContainer) 58 | idx_healthcheck = len(dep.Spec.Template.Spec.Containers) - 1 59 | } 60 | 61 | healthcheckContainer := &dep.Spec.Template.Spec.Containers[idx_healthcheck] 62 | 63 | healthcheckContainer.Image = challenge.Spec.Healthcheck.Image 64 | healthcheckContainer.Resources = corev1.ResourceRequirements{ 65 | Limits: corev1.ResourceList{ 66 | "cpu": *resource.NewMilliQuantity(1000, resource.DecimalSI), 67 | }, 68 | Requests: corev1.ResourceList{ 69 | "cpu": *resource.NewMilliQuantity(50, resource.DecimalSI), 70 | }, 71 | } 72 | 73 | healthcheckContainer.VolumeMounts = []corev1.VolumeMount{{ 74 | Name: "pow-bypass", 75 | ReadOnly: true, 76 | MountPath: "/pow-bypass", 77 | }} 78 | 79 | healthcheckVolume := corev1.Volume{ 80 | Name: "pow-bypass", 81 | VolumeSource: corev1.VolumeSource{ 82 | Secret: &corev1.SecretVolumeSource{ 83 | SecretName: "pow-bypass", 84 | }, 85 | }, 86 | } 87 | 88 | dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, healthcheckVolume) 89 | 90 | return dep 91 | } 92 | -------------------------------------------------------------------------------- /kctf-operator/controllers/deployment/deployment.go: -------------------------------------------------------------------------------- 1 | package deployment 2 | 3 | import ( 4 | kctfv1 "github.com/google/kctf/api/v1" 5 | utils "github.com/google/kctf/controllers/utils" 6 | appsv1 "k8s.io/api/apps/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // Deployment without Healthcheck 12 | func deployment(challenge *kctfv1.Challenge) *appsv1.Deployment { 13 | var replicas int32 = 1 14 | if challenge.Spec.Replicas != nil { 15 | replicas = *challenge.Spec.Replicas 16 | } 17 | 18 | var readOnlyRootFilesystem = true 19 | 20 | deployment := &appsv1.Deployment{ 21 | ObjectMeta: metav1.ObjectMeta{ 22 | Name: challenge.Name, 23 | Namespace: challenge.Namespace, 24 | Labels: map[string]string{"app": challenge.Name}, 25 | }, 26 | Spec: appsv1.DeploymentSpec{ 27 | Replicas: &replicas, 28 | Selector: &metav1.LabelSelector{ 29 | MatchLabels: map[string]string{"app": challenge.Name}, 30 | }, 31 | }, 32 | } 33 | 34 | if challenge.Spec.PodTemplate != nil { 35 | deployment.Spec.Template = challenge.Spec.PodTemplate.Template 36 | } 37 | 38 | // Find the index of container challenge if existent: 39 | idx_challenge := utils.IndexOfContainer("challenge", deployment.Spec.Template.Spec.Containers) 40 | 41 | // if idx_challenge is -1, it means that pod template doesn't contain a container called challenge 42 | if idx_challenge == -1 { 43 | // Creates a container called challenge 44 | challengeContainer := corev1.Container{ 45 | Name: "challenge", 46 | } 47 | deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, 48 | challengeContainer) 49 | idx_challenge = len(deployment.Spec.Template.Spec.Containers) - 1 50 | } 51 | 52 | // Changes what need to be changed in the Template and in the container challenge 53 | deployment.Spec.Template.ObjectMeta = metav1.ObjectMeta{ 54 | Labels: map[string]string{"app": challenge.Name}, 55 | Annotations: map[string]string{ 56 | "container.apparmor.security.beta.kubernetes.io/challenge": "unconfined", 57 | }, 58 | } 59 | // Set container ports based on the ports that were passed 60 | deployment.Spec.Template.Spec.Containers[idx_challenge].Ports = containerPorts(challenge) 61 | // Set other container's configurations 62 | deployment.Spec.Template.Spec.Containers[idx_challenge].Image = challenge.Spec.Image 63 | if deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext == nil { 64 | deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext = &corev1.SecurityContext{} 65 | } 66 | if deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext.ReadOnlyRootFilesystem == nil { 67 | deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext.ReadOnlyRootFilesystem = &readOnlyRootFilesystem 68 | } 69 | if deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext.Capabilities == nil { 70 | deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext.Capabilities = &corev1.Capabilities{} 71 | } 72 | 73 | deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext.Capabilities.Add = 74 | append(deployment.Spec.Template.Spec.Containers[idx_challenge].SecurityContext.Capabilities.Add, "SYS_ADMIN") 75 | 76 | volumeMounts := []corev1.VolumeMount{ 77 | { 78 | Name: "pow", 79 | ReadOnly: true, 80 | MountPath: "/kctf/pow", 81 | }, 82 | { 83 | Name: "pow-bypass-pub", 84 | ReadOnly: true, 85 | MountPath: "/kctf/pow-bypass", 86 | }, 87 | } 88 | 89 | deployment.Spec.Template.Spec.Containers[idx_challenge].VolumeMounts = 90 | append(deployment.Spec.Template.Spec.Containers[idx_challenge].VolumeMounts, volumeMounts...) 91 | 92 | volumes := []corev1.Volume{{ 93 | Name: "pow", 94 | VolumeSource: corev1.VolumeSource{ 95 | ConfigMap: &corev1.ConfigMapVolumeSource{ 96 | LocalObjectReference: corev1.LocalObjectReference{ 97 | Name: challenge.Name + "-pow", 98 | }, 99 | }, 100 | }, 101 | }, 102 | { 103 | Name: "pow-bypass-pub", 104 | VolumeSource: corev1.VolumeSource{ 105 | Secret: &corev1.SecretVolumeSource{ 106 | SecretName: "pow-bypass-pub", 107 | }, 108 | }, 109 | }} 110 | 111 | deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volumes...) 112 | 113 | return deployment 114 | } 115 | -------------------------------------------------------------------------------- /kctf-operator/controllers/deployment/functions.go: -------------------------------------------------------------------------------- 1 | // Creates deployment 2 | 3 | package deployment 4 | 5 | import ( 6 | "context" 7 | "reflect" 8 | 9 | "github.com/go-logr/logr" 10 | kctfv1 "github.com/google/kctf/api/v1" 11 | appsv1 "k8s.io/api/apps/v1" 12 | corev1 "k8s.io/api/core/v1" 13 | "k8s.io/apimachinery/pkg/api/errors" 14 | "k8s.io/apimachinery/pkg/runtime" 15 | "k8s.io/apimachinery/pkg/types" 16 | "sigs.k8s.io/controller-runtime/pkg/client" 17 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 18 | ) 19 | 20 | func isEqual(deploymentFound *appsv1.Deployment, 21 | deployment *appsv1.Deployment) bool { 22 | return reflect.DeepEqual(deploymentFound.Spec.Template.Spec, 23 | deployment.Spec.Template.Spec) 24 | } 25 | 26 | func containerPorts(challenge *kctfv1.Challenge) []corev1.ContainerPort { 27 | ports := []corev1.ContainerPort{} 28 | 29 | for _, port := range challenge.Spec.Network.Ports { 30 | containerPort := corev1.ContainerPort{ 31 | ContainerPort: port.TargetPort.IntVal, 32 | } 33 | ports = append(ports, containerPort) 34 | } 35 | 36 | return ports 37 | } 38 | 39 | // deploymentForChallenge returns a challenge Deployment object 40 | func generate(challenge *kctfv1.Challenge) *appsv1.Deployment { 41 | if challenge.Spec.Healthcheck.Enabled == true { 42 | return withHealthcheck(challenge) 43 | } else { 44 | return deployment(challenge) 45 | } 46 | } 47 | 48 | func create(challenge *kctfv1.Challenge, cl client.Client, scheme *runtime.Scheme, 49 | log logr.Logger, ctx context.Context) (bool, error) { 50 | dep := generate(challenge) 51 | log.Info("Creating a new Deployment", "Deployment.Namespace", 52 | dep.Namespace, "Deployment.Name", dep.Name) 53 | 54 | // Set Challenge instance as the owner and controller 55 | controllerutil.SetControllerReference(challenge, dep, scheme) 56 | 57 | err := cl.Create(ctx, dep) 58 | 59 | if err != nil { 60 | log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", 61 | dep.Namespace, "Deployment.Name", dep.Name) 62 | return false, err 63 | } 64 | 65 | // Deployment created successfully - return and requeue 66 | return true, nil 67 | } 68 | 69 | func Update(challenge *kctfv1.Challenge, client client.Client, scheme *runtime.Scheme, 70 | log logr.Logger, ctx context.Context) (bool, error) { 71 | // Flags if there was a change 72 | change := false 73 | 74 | deploymentFound := &appsv1.Deployment{} 75 | err := client.Get(ctx, types.NamespacedName{Name: challenge.Name, 76 | Namespace: challenge.Namespace}, deploymentFound) 77 | 78 | // Just enters here if it's a new deployment 79 | if err != nil && errors.IsNotFound(err) { 80 | // Define a new deployment 81 | return create(challenge, client, scheme, log, ctx) 82 | 83 | } else if err != nil { 84 | log.Error(err, "Couldn't get the deployment", "Challenge Name: ", 85 | challenge.Name, " with namespace ", challenge.Namespace) 86 | return false, err 87 | } 88 | 89 | // Checks if the deployment is correctly set 90 | if dep := generate(challenge); !isEqual(deploymentFound, dep) { 91 | change = true 92 | deploymentFound.Spec.Template.Spec = dep.Spec.Template.Spec 93 | } 94 | 95 | // Ensure if the challenge is ready and, if not, set replicas to 0 96 | changedReplicas := updateNumReplicas(deploymentFound.Spec.Replicas, challenge) 97 | // Ensure that the images of the challenge and of the healthcheck are the same as the ones in the CR 98 | changedImage := updateImages(deploymentFound, challenge) 99 | 100 | // Checks if there was a change in the deployment 101 | change = change || changedReplicas || changedImage 102 | 103 | // Updates deployment with client 104 | if change == true { 105 | err = client.Update(ctx, deploymentFound) 106 | if err != nil { 107 | log.Error(err, "Failed to update deployment", "Challenge Name: ", 108 | challenge.Name, " with namespace ", challenge.Namespace) 109 | return false, err 110 | } 111 | log.Info("Deployment updated succesfully", "Name: ", 112 | challenge.Name, " with namespace ", challenge.Namespace) 113 | return true, nil 114 | } 115 | 116 | return false, nil 117 | } 118 | -------------------------------------------------------------------------------- /kctf-operator/controllers/deployment/image.go: -------------------------------------------------------------------------------- 1 | package deployment 2 | 3 | import ( 4 | kctfv1 "github.com/google/kctf/api/v1" 5 | utils "github.com/google/kctf/controllers/utils" 6 | appsv1 "k8s.io/api/apps/v1" 7 | ) 8 | 9 | func updateImages(deploymentFound *appsv1.Deployment, challenge *kctfv1.Challenge) bool { 10 | // Check if the image was changed and change it if necessary 11 | change := false 12 | idxChallenge := utils.IndexOfContainer("challenge", deploymentFound.Spec.Template.Spec.Containers) 13 | idxHealthcheck := utils.IndexOfContainer("healthcheck", deploymentFound.Spec.Template.Spec.Containers) 14 | 15 | if deploymentFound.Spec.Template.Spec.Containers[idxChallenge].Image != challenge.Spec.Image { 16 | deploymentFound.Spec.Template.Spec.Containers[idxChallenge].Image = challenge.Spec.Image 17 | change = true 18 | } 19 | if challenge.Spec.Healthcheck.Enabled == true { 20 | if deploymentFound.Spec.Template.Spec.Containers[idxHealthcheck].Image != challenge.Spec.Healthcheck.Image { 21 | deploymentFound.Spec.Template.Spec.Containers[idxHealthcheck].Image = challenge.Spec.Healthcheck.Image 22 | change = true 23 | } 24 | } 25 | 26 | return change 27 | } 28 | -------------------------------------------------------------------------------- /kctf-operator/controllers/deployment/replicas.go: -------------------------------------------------------------------------------- 1 | package deployment 2 | 3 | import kctfv1 "github.com/google/kctf/api/v1" 4 | 5 | func numReplicas(challenge *kctfv1.Challenge) int32 { 6 | if challenge.Spec.Deployed == false { 7 | return 0 8 | } 9 | 10 | if challenge.Spec.HorizontalPodAutoscalerSpec != nil { 11 | return -1 12 | } 13 | 14 | if challenge.Spec.Replicas != nil { 15 | return *challenge.Spec.Replicas 16 | } 17 | 18 | return 1 19 | } 20 | 21 | func updateNumReplicas(currentReplicas *int32, challenge *kctfv1.Challenge) bool { 22 | // Updates the number of replicas according to being deployed or not and considering the autoscaling 23 | replicas := numReplicas(challenge) 24 | 25 | // replicas = -1 means autoscaling is enabled and deployed is true 26 | if replicas != *currentReplicas && replicas != -1 { 27 | *currentReplicas = replicas 28 | return true 29 | } 30 | 31 | return false 32 | } 33 | -------------------------------------------------------------------------------- /kctf-operator/controllers/network-policy/functions.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | 7 | "github.com/go-logr/logr" 8 | kctfv1 "github.com/google/kctf/api/v1" 9 | netv1 "k8s.io/api/networking/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/types" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 15 | ) 16 | 17 | func isEqual(existingPolicy *netv1.NetworkPolicy, newPolicy *netv1.NetworkPolicy) bool { 18 | return reflect.DeepEqual(existingPolicy.Spec, newPolicy.Spec) 19 | } 20 | 21 | func Update(challenge *kctfv1.Challenge, cl client.Client, scheme *runtime.Scheme, 22 | log logr.Logger, ctx context.Context) (bool, error) { 23 | requeue := false 24 | var err error 25 | 26 | for _, policy := range generatePolicies(challenge) { 27 | requeue, err = updatePolicy(ctx, policy, challenge, cl, scheme, log) 28 | if err != nil { 29 | return false, err 30 | } 31 | } 32 | 33 | return requeue, nil 34 | } 35 | 36 | func updatePolicy(ctx context.Context, policy netv1.NetworkPolicy, challenge *kctfv1.Challenge, 37 | cl client.Client, scheme *runtime.Scheme, log logr.Logger) (bool, error) { 38 | existingPolicy := &netv1.NetworkPolicy{} 39 | err := cl.Get(ctx, types.NamespacedName{Name: policy.ObjectMeta.Name, 40 | Namespace: policy.ObjectMeta.Namespace}, existingPolicy) 41 | 42 | // Just enters here if it's a new policy 43 | if err != nil && errors.IsNotFound(err) { 44 | // Create a new object 45 | controllerutil.SetControllerReference(challenge, &policy, scheme) 46 | err = cl.Create(ctx, &policy) 47 | if err != nil { 48 | log.Error(err, "Failed to create Policy", " Name: ", 49 | policy.ObjectMeta.Name, " with namespace ", policy.ObjectMeta.Namespace) 50 | return false, err 51 | } 52 | return true, nil 53 | } else if err != nil { 54 | log.Error(err, "Couldn't get the Policy", " Name: ", 55 | policy.ObjectMeta.Name, " with namespace ", policy.ObjectMeta.Namespace) 56 | return false, err 57 | } 58 | 59 | if !isEqual(existingPolicy, &policy) { 60 | existingPolicy.Spec = policy.Spec 61 | err = cl.Update(ctx, existingPolicy) 62 | if err != nil { 63 | log.Error(err, "Failed to update Policy", " Name: ", 64 | policy.ObjectMeta.Name, " with namespace ", policy.ObjectMeta.Namespace) 65 | return false, err 66 | } 67 | 68 | log.Info("Policy updated succesfully", " Name: ", 69 | policy.ObjectMeta.Name, " with namespace ", policy.ObjectMeta.Namespace) 70 | return true, nil 71 | } 72 | 73 | return false, nil 74 | } 75 | -------------------------------------------------------------------------------- /kctf-operator/controllers/network-policy/network-policy.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "fmt" 5 | 6 | kctfv1 "github.com/google/kctf/api/v1" 7 | netv1 "k8s.io/api/networking/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | func generatePolicies(challenge *kctfv1.Challenge) []netv1.NetworkPolicy { 12 | var egressRules = make([]netv1.NetworkPolicyEgressRule, len(challenge.Spec.AllowConnectTo)) 13 | for i, targetName := range challenge.Spec.AllowConnectTo { 14 | egressRules[i] = netv1.NetworkPolicyEgressRule{ 15 | To: []netv1.NetworkPolicyPeer{ 16 | { 17 | PodSelector: &metav1.LabelSelector{ 18 | MatchLabels: map[string]string{ 19 | "app": targetName, 20 | }, 21 | }, 22 | }, 23 | }, 24 | } 25 | } 26 | 27 | challengeAccessPolicy := netv1.NetworkPolicy{ 28 | ObjectMeta: metav1.ObjectMeta{ 29 | Name: fmt.Sprintf("%v-challenge-access", challenge.Name), 30 | Namespace: challenge.Namespace, 31 | }, 32 | Spec: netv1.NetworkPolicySpec{ 33 | PolicyTypes: []netv1.PolicyType{"Egress"}, 34 | PodSelector: metav1.LabelSelector{ 35 | MatchLabels: map[string]string{ 36 | "app": challenge.Name, 37 | }, 38 | }, 39 | Egress: egressRules, 40 | }, 41 | } 42 | 43 | return []netv1.NetworkPolicy{challengeAccessPolicy} 44 | } 45 | -------------------------------------------------------------------------------- /kctf-operator/controllers/pow/configmap.go: -------------------------------------------------------------------------------- 1 | package pow 2 | 3 | import ( 4 | "strconv" 5 | 6 | kctfv1 "github.com/google/kctf/api/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // Generates the configmap that contains how difficult should be the proof of work 12 | func generate(challenge *kctfv1.Challenge) *corev1.ConfigMap { 13 | data := map[string]string{ 14 | "pow.conf": strconv.Itoa(challenge.Spec.PowDifficultySeconds*1337) + "\n", 15 | } 16 | configmap := &corev1.ConfigMap{ 17 | ObjectMeta: metav1.ObjectMeta{ 18 | Name: challenge.Name + "-pow", 19 | Namespace: challenge.Namespace, 20 | }, 21 | Data: data, 22 | } 23 | 24 | return configmap 25 | } 26 | -------------------------------------------------------------------------------- /kctf-operator/controllers/pow/functions.go: -------------------------------------------------------------------------------- 1 | package pow 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | 7 | "github.com/go-logr/logr" 8 | kctfv1 "github.com/google/kctf/api/v1" 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/types" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 15 | ) 16 | 17 | func isEqual(configmapFound *corev1.ConfigMap, configmap *corev1.ConfigMap) bool { 18 | return reflect.DeepEqual(configmapFound.Data, 19 | configmap.Data) 20 | } 21 | 22 | // Create the configmaps 23 | func create(challenge *kctfv1.Challenge, client client.Client, scheme *runtime.Scheme, 24 | log logr.Logger, ctx context.Context) (bool, error) { 25 | // creates pow if it doesn't exist yet 26 | configmap := generate(challenge) 27 | log.Info("Creating a ConfigMap for Proof of work", "ConfigMap name: ", 28 | configmap.Name, " with namespace ", configmap.Namespace) 29 | 30 | // Creates owner references 31 | controllerutil.SetControllerReference(challenge, configmap, scheme) 32 | 33 | // Creates configmap 34 | err := client.Create(ctx, configmap) 35 | 36 | if err != nil { 37 | log.Error(err, "Failed to create ConfigMap for Proof of work", "ConfigMap name: ", 38 | configmap.Name, " with namespace ", configmap.Namespace) 39 | return false, err 40 | } 41 | 42 | return true, nil 43 | } 44 | 45 | func Update(challenge *kctfv1.Challenge, cl client.Client, scheme *runtime.Scheme, 46 | log logr.Logger, ctx context.Context) (bool, error) { 47 | configmapFound := &corev1.ConfigMap{} 48 | err := cl.Get(ctx, types.NamespacedName{Name: challenge.Name + "-pow", 49 | Namespace: challenge.Namespace}, configmapFound) 50 | 51 | // Just enters here if it's a new configmap 52 | if err != nil && errors.IsNotFound(err) { 53 | // Create a new configmap 54 | return create(challenge, cl, scheme, log, ctx) 55 | 56 | } else if err != nil { 57 | log.Error(err, "Couldn't get the ConfigMap of Proof of work", "Configmap Name: ", 58 | challenge.Name+"-pow", " with namespace ", challenge.Namespace) 59 | return false, err 60 | } 61 | 62 | // Checks if the confimap is correctly set 63 | if configmap := generate(challenge); !isEqual(configmapFound, configmap) { 64 | configmapFound.Data = configmap.Data 65 | err = cl.Update(ctx, configmapFound) 66 | if err != nil { 67 | log.Error(err, "Failed to update ConfigMap for Proof of work", "ConfigMap Name: ", 68 | "pow", " with namespace ", challenge.Namespace) 69 | return false, err 70 | } 71 | log.Info("ConfigMap for Proof of Work updated succesfully", "Name: ", 72 | "pow", " with namespace ", challenge.Namespace) 73 | return true, nil 74 | } 75 | 76 | return false, nil 77 | } 78 | -------------------------------------------------------------------------------- /kctf-operator/controllers/secrets/functions.go: -------------------------------------------------------------------------------- 1 | package secrets 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | 7 | "github.com/go-logr/logr" 8 | kctfv1 "github.com/google/kctf/api/v1" 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/types" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 15 | ) 16 | 17 | func isEqual(secretFound *corev1.Secret, secret *corev1.Secret) bool { 18 | return reflect.DeepEqual(secretFound.Data, secret.Data) 19 | } 20 | 21 | // Create the secrets 22 | func create(secretName string, challenge *kctfv1.Challenge, client client.Client, scheme *runtime.Scheme, 23 | log logr.Logger, ctx context.Context) (bool, error) { 24 | 25 | secret, err := generate(secretName, challenge, 26 | client, scheme, log, ctx) 27 | 28 | if err != nil { 29 | log.Error(err, "Couldn't get the Secret from kctf-system", "Secret Name: ", 30 | secretName, " with namespace ", challenge.Namespace) 31 | return false, err 32 | } 33 | 34 | log.Info("Creating Secret", "Secret ", secret.Name, 35 | " with namespace ", challenge.Namespace) 36 | 37 | // Creates owner references 38 | controllerutil.SetControllerReference(challenge, secret, scheme) 39 | 40 | err = client.Create(ctx, secret) 41 | if err != nil { 42 | log.Error(err, "Failed to create Secret", "Secret name: ", 43 | secret.Name, " with namespace ", challenge.Namespace) 44 | return false, err 45 | } 46 | 47 | return true, nil 48 | } 49 | 50 | func Update(challenge *kctfv1.Challenge, cl client.Client, scheme *runtime.Scheme, 51 | log logr.Logger, ctx context.Context) (bool, error) { 52 | secrets := []string{"pow-bypass", "pow-bypass-pub", "tls-cert"} 53 | requeue := false 54 | var err error 55 | 56 | for _, secret := range secrets { 57 | // Creates object 58 | requeue, err = updateSecret(secret, challenge, cl, scheme, log, ctx) 59 | if err != nil { 60 | return false, err 61 | } 62 | } 63 | 64 | return requeue, nil 65 | } 66 | 67 | func updateSecret(secretName string, challenge *kctfv1.Challenge, 68 | cl client.Client, scheme *runtime.Scheme, log logr.Logger, ctx context.Context) (bool, error) { 69 | secretFound := &corev1.Secret{} 70 | err := cl.Get(ctx, types.NamespacedName{Name: secretName, 71 | Namespace: challenge.Namespace}, secretFound) 72 | 73 | // Just enters here if it's a new secret 74 | if err != nil && errors.IsNotFound(err) { 75 | // Create a new secret 76 | return create(secretName, challenge, cl, scheme, log, ctx) 77 | 78 | } else if err != nil { 79 | log.Error(err, "Couldn't get the Secret", "Secret Name: ", 80 | secretName, " with namespace ", challenge.Namespace) 81 | return false, err 82 | } 83 | 84 | // Checks if the confimap and the secrets are correctly set 85 | secret, err := generate(secretName, challenge, 86 | cl, scheme, log, ctx) 87 | 88 | if err != nil { 89 | log.Error(err, "Couldn't get the Secret from kctf-system", "Secret Name: ", 90 | secretName, " with namespace ", challenge.Namespace) 91 | return false, err 92 | } 93 | 94 | if !isEqual(secretFound, secret) { 95 | secretFound.Data = secret.Data 96 | err = cl.Update(ctx, secretFound) 97 | if err != nil { 98 | log.Error(err, "Failed to update Secret", "Secret Name: ", 99 | secretName, " with namespace ", challenge.Namespace) 100 | return false, err 101 | } 102 | 103 | log.Info("Secret updated succesfully", "Name: ", 104 | secretName, " with namespace ", challenge.Namespace) 105 | return true, nil 106 | } else { 107 | log.Info("Secrets are the same", "name", secretName, "namespace", challenge.Namespace) 108 | } 109 | 110 | return false, nil 111 | } 112 | -------------------------------------------------------------------------------- /kctf-operator/controllers/secrets/secrets.go: -------------------------------------------------------------------------------- 1 | package secrets 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | kctfv1 "github.com/google/kctf/api/v1" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apimachinery/pkg/types" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | func generate(secretName string, challenge *kctfv1.Challenge, 16 | cl client.Client, scheme *runtime.Scheme, log logr.Logger, 17 | ctx context.Context) (*corev1.Secret, error) { 18 | // We get the secret from kctf-system 19 | secretKube := &corev1.Secret{} 20 | err := cl.Get(ctx, types.NamespacedName{Name: secretName, 21 | Namespace: "kctf-system"}, secretKube) 22 | 23 | if err != nil { 24 | return secretKube, err 25 | } 26 | 27 | secret := &corev1.Secret{ 28 | ObjectMeta: metav1.ObjectMeta{ 29 | Name: secretName, 30 | Namespace: challenge.Namespace, 31 | }, 32 | 33 | Data: secretKube.Data, 34 | } 35 | 36 | return secret, nil 37 | } 38 | -------------------------------------------------------------------------------- /kctf-operator/controllers/service/service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strconv" 7 | "strings" 8 | 9 | gkenetv1 "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/apis/networking.gke.io/v1" 10 | kctfv1 "github.com/google/kctf/api/v1" 11 | corev1 "k8s.io/api/core/v1" 12 | netv1 "k8s.io/api/networking/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | backendv1 "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" 15 | ) 16 | 17 | func generateNodePortService(challenge *kctfv1.Challenge) *corev1.Service { 18 | service := &corev1.Service{ 19 | ObjectMeta: metav1.ObjectMeta{ 20 | Name: challenge.Name, 21 | Namespace: challenge.Namespace, 22 | Labels: map[string]string{"app": challenge.Name}, 23 | Annotations: map[string]string{"cloud.google.com/backend-config": fmt.Sprintf("{\"default\": \"%s\"}", challenge.Name)}, 24 | }, 25 | Spec: corev1.ServiceSpec{ 26 | Selector: map[string]string{"app": challenge.Name}, 27 | Type: "NodePort", 28 | Ports: []corev1.ServicePort{}, 29 | }, 30 | } 31 | 32 | portsSeen := make(map[int32]bool) 33 | 34 | for i, port := range challenge.Spec.Network.Ports { 35 | if portsSeen[port.Port] { 36 | continue 37 | } 38 | portsSeen[port.Port] = true 39 | 40 | protocol := corev1.ProtocolTCP 41 | switch port.Protocol { 42 | case corev1.ProtocolSCTP, corev1.ProtocolTCP, corev1.ProtocolUDP: 43 | protocol = port.Protocol 44 | } 45 | 46 | servicePort := port.Port 47 | if servicePort == 0 { 48 | servicePort = port.TargetPort.IntVal 49 | } 50 | 51 | portName := port.Name 52 | if portName == "" { 53 | portName = "port-" + strconv.Itoa(i) 54 | } 55 | 56 | service.Spec.Ports = append(service.Spec.Ports, corev1.ServicePort{ 57 | Port: servicePort, 58 | TargetPort: port.TargetPort, 59 | Protocol: protocol, 60 | Name: portName, 61 | }) 62 | } 63 | 64 | return service 65 | } 66 | 67 | func generateBackendConfig(challenge *kctfv1.Challenge) *backendv1.BackendConfig { 68 | config := &backendv1.BackendConfig{ 69 | ObjectMeta: metav1.ObjectMeta{ 70 | Name: challenge.Name, 71 | Namespace: challenge.Namespace, 72 | }, 73 | Spec: backendv1.BackendConfigSpec{}, 74 | } 75 | if os.Getenv("SECURITY_POLICY") != "DISABLED" { 76 | config.Spec.SecurityPolicy = &backendv1.SecurityPolicyConfig{ 77 | Name: os.Getenv("SECURITY_POLICY"), 78 | } 79 | } 80 | return config 81 | } 82 | 83 | func findHTTPSPort(challenge *kctfv1.Challenge) *kctfv1.PortSpec { 84 | for _, port := range challenge.Spec.Network.Ports { 85 | // non-HTTPS is handled by generateLoadBalancerService 86 | if port.Protocol != "HTTPS" { 87 | continue 88 | } 89 | return &port 90 | } 91 | return nil 92 | } 93 | 94 | func generateManagedCertificate(challenge *kctfv1.Challenge, domains []string) *gkenetv1.ManagedCertificate { 95 | cert := &gkenetv1.ManagedCertificate{ 96 | ObjectMeta: metav1.ObjectMeta{ 97 | Name: challenge.Name, 98 | Namespace: challenge.Namespace, 99 | Labels: map[string]string{"app": challenge.Name}, 100 | }, 101 | Spec: gkenetv1.ManagedCertificateSpec{ 102 | Domains: domains, 103 | }, 104 | Status: gkenetv1.ManagedCertificateStatus{ 105 | DomainStatus: []gkenetv1.DomainStatus{}, 106 | }, 107 | } 108 | return cert 109 | } 110 | 111 | func generateIngress(domainName string, challenge *kctfv1.Challenge, port *kctfv1.PortSpec) *netv1.Ingress { 112 | // Ingress object 113 | ingress := &netv1.Ingress{ 114 | ObjectMeta: metav1.ObjectMeta{ 115 | Name: challenge.Name, 116 | Namespace: challenge.Namespace, 117 | Labels: map[string]string{"app": challenge.Name}, 118 | Annotations: map[string]string{}, 119 | }, 120 | Spec: netv1.IngressSpec{ 121 | TLS: []netv1.IngressTLS{{ 122 | SecretName: "tls-cert", 123 | }}, 124 | Rules: []netv1.IngressRule{{ 125 | Host: challenge.Name + "-web." + domainName, 126 | }}, 127 | }, 128 | } 129 | 130 | servicePort := port.Port 131 | if servicePort == 0 { 132 | servicePort = port.TargetPort.IntVal 133 | } 134 | 135 | ingress.Spec.DefaultBackend = &netv1.IngressBackend{ 136 | Service: &netv1.IngressServiceBackend{ 137 | Name: challenge.Name, 138 | Port: netv1.ServiceBackendPort{ 139 | Number: int32(servicePort), 140 | }, 141 | }, 142 | } 143 | 144 | if port.Domains != nil { 145 | ingress.Annotations["networking.gke.io/managed-certificates"] = challenge.Name 146 | } 147 | 148 | return ingress 149 | } 150 | 151 | func generateLoadBalancerService(domainName string, challenge *kctfv1.Challenge) *corev1.Service { 152 | // Service object 153 | service := &corev1.Service{ 154 | ObjectMeta: metav1.ObjectMeta{ 155 | Name: challenge.Name + "-lb-service", 156 | Namespace: challenge.Namespace, 157 | Labels: map[string]string{"app": challenge.Name}, 158 | }, 159 | Spec: corev1.ServiceSpec{ 160 | Selector: map[string]string{"app": challenge.Name}, 161 | Type: "LoadBalancer", 162 | LoadBalancerSourceRanges: strings.Split(os.Getenv("ALLOWED_IPS"), ","), 163 | }, 164 | } 165 | 166 | for i, port := range challenge.Spec.Network.Ports { 167 | // HTTPS is handled by generateIngress 168 | if port.Protocol == "HTTPS" { 169 | continue 170 | } 171 | 172 | servicePortNumber := port.Port 173 | if servicePortNumber == 0 { 174 | servicePortNumber = port.TargetPort.IntVal 175 | } 176 | 177 | servicePort := corev1.ServicePort{ 178 | Port: servicePortNumber, 179 | TargetPort: port.TargetPort, 180 | Protocol: port.Protocol, 181 | } 182 | 183 | if port.Name != "" { 184 | servicePort.Name = port.Name 185 | } else { 186 | servicePort.Name = "port-" + strconv.Itoa(i) 187 | } 188 | 189 | service.Spec.Ports = append(service.Spec.Ports, servicePort) 190 | } 191 | 192 | service.ObjectMeta.Annotations = 193 | map[string]string{"external-dns.alpha.kubernetes.io/hostname": challenge.Name + "." + domainName} 194 | 195 | return service 196 | } 197 | -------------------------------------------------------------------------------- /kctf-operator/controllers/set/default.go: -------------------------------------------------------------------------------- 1 | // File that set values correctly and return default values that weren't specified 2 | package set 3 | 4 | import ( 5 | kctfv1 "github.com/google/kctf/api/v1" 6 | "k8s.io/apimachinery/pkg/runtime" 7 | intstr "k8s.io/apimachinery/pkg/util/intstr" 8 | ) 9 | 10 | // Function to return the default ports 11 | func portsDefault() []kctfv1.PortSpec { 12 | var portsDefault = []kctfv1.PortSpec{ 13 | kctfv1.PortSpec{ 14 | // Keeping the same name as in previous network file 15 | Name: "netcat", 16 | Port: 1337, 17 | TargetPort: intstr.FromInt(1337), 18 | Protocol: "TCP", 19 | }, 20 | } 21 | return portsDefault 22 | } 23 | 24 | // Function to check if all is set to default 25 | func DefaultValues(challenge *kctfv1.Challenge, scheme *runtime.Scheme) { 26 | // Sets default ports 27 | if challenge.Spec.Network.Ports == nil { 28 | challenge.Spec.Network.Ports = portsDefault() 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kctf-operator/controllers/status/functions.go: -------------------------------------------------------------------------------- 1 | package status 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | kctfv1 "github.com/google/kctf/api/v1" 8 | utils "github.com/google/kctf/controllers/utils" 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/labels" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | func Update(requeue bool, err error, challenge *kctfv1.Challenge, cl client.Client, 15 | log logr.Logger, ctx context.Context) error { 16 | 17 | pods := &corev1.PodList{} 18 | var listOption client.ListOption 19 | listOption = &client.ListOptions{ 20 | Namespace: challenge.Namespace, 21 | LabelSelector: labels.SelectorFromSet(map[string]string{"app": challenge.Name}), 22 | } 23 | 24 | err_list := cl.List(ctx, pods, listOption) 25 | 26 | if err_list == nil { 27 | // First we find the right pod 28 | for _, pod := range pods.Items { 29 | idx_challenge := utils.IndexOfContainer("challenge", pod.Spec.Containers) 30 | idx_healthcheck := utils.IndexOfContainer("healthcheck", pod.Spec.Containers) 31 | 32 | // This variable tells if the container is right one considering the healthcheck only 33 | right_healthcheck := !challenge.Spec.Healthcheck.Enabled 34 | 35 | // We prevent to get an error if the pod is being terminated 36 | if len(pod.Spec.Containers) != 0 { 37 | if right_healthcheck == false && idx_healthcheck != -1 { 38 | if pod.Spec.Containers[idx_healthcheck].Image != "healthcheck" { 39 | right_healthcheck = true 40 | } 41 | } 42 | // We take the right pod (it's possible that, if the challenge is not healthy, 43 | // that we have multiple pods) 44 | if idx_challenge > -1 && pod.Spec.Containers[idx_challenge].Image != "challenge" && right_healthcheck { 45 | // We update the status 46 | challenge.Status.Status = pod.Status.Phase 47 | 48 | // Then we update Health 49 | if challenge.Spec.Healthcheck.Enabled == false || idx_challenge >= len(pod.Status.ContainerStatuses) { 50 | challenge.Status.Health = "disabled" 51 | } else { 52 | // We check if the challenge is ready to know if it's healthy 53 | if pod.Status.ContainerStatuses[idx_challenge].Ready == false { 54 | challenge.Status.Health = "unhealthy" 55 | } else { 56 | challenge.Status.Health = "healthy" 57 | } 58 | } 59 | } 60 | } 61 | } 62 | } else { 63 | log.Error(err_list, "Failed to get pods") 64 | } 65 | 66 | err_status := cl.Status().Update(ctx, challenge) 67 | 68 | if err_status != nil { 69 | log.Error(err_status, "Error updating status") 70 | } 71 | 72 | return err_status 73 | } 74 | -------------------------------------------------------------------------------- /kctf-operator/controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "path/filepath" 21 | "testing" 22 | 23 | . "github.com/onsi/ginkgo/v2" 24 | . "github.com/onsi/gomega" 25 | "k8s.io/client-go/kubernetes/scheme" 26 | "k8s.io/client-go/rest" 27 | "sigs.k8s.io/controller-runtime/pkg/client" 28 | "sigs.k8s.io/controller-runtime/pkg/envtest" 29 | logf "sigs.k8s.io/controller-runtime/pkg/log" 30 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 31 | 32 | kctfv1 "github.com/google/kctf/api/v1" 33 | //+kubebuilder:scaffold:imports 34 | ) 35 | 36 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 37 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 38 | 39 | var cfg *rest.Config 40 | var k8sClient client.Client 41 | var testEnv *envtest.Environment 42 | 43 | func TestAPIs(t *testing.T) { 44 | RegisterFailHandler(Fail) 45 | 46 | RunSpecs(t, "Controller Suite") 47 | } 48 | 49 | var _ = BeforeSuite(func() { 50 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 51 | 52 | By("bootstrapping test environment") 53 | testEnv = &envtest.Environment{ 54 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 55 | ErrorIfCRDPathMissing: true, 56 | } 57 | 58 | cfg, err := testEnv.Start() 59 | Expect(err).NotTo(HaveOccurred()) 60 | Expect(cfg).NotTo(BeNil()) 61 | 62 | err = kctfv1.AddToScheme(scheme.Scheme) 63 | Expect(err).NotTo(HaveOccurred()) 64 | 65 | //+kubebuilder:scaffold:scheme 66 | 67 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 68 | Expect(err).NotTo(HaveOccurred()) 69 | Expect(k8sClient).NotTo(BeNil()) 70 | 71 | }) 72 | 73 | var _ = AfterSuite(func() { 74 | By("tearing down the test environment") 75 | err := testEnv.Stop() 76 | Expect(err).NotTo(HaveOccurred()) 77 | }) 78 | -------------------------------------------------------------------------------- /kctf-operator/controllers/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | kctfv1 "github.com/google/kctf/api/v1" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/apimachinery/pkg/types" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | // We get the configmap that contains the domain name and returns it 15 | func GetDomainName(challenge *kctfv1.Challenge, client client.Client, 16 | log logr.Logger, ctx context.Context) string { 17 | domainName := "" 18 | configmap := &corev1.ConfigMap{} 19 | 20 | err := client.Get(ctx, types.NamespacedName{Name: "external-dns", 21 | Namespace: "kctf-system"}, configmap) 22 | 23 | if err != nil && !errors.IsNotFound(err) { 24 | log.Error(err, "Couldn't get the configmap of the domain name.") 25 | } 26 | 27 | if err == nil { 28 | domainName = configmap.Data["DOMAIN_NAME"] 29 | } 30 | 31 | return domainName 32 | } 33 | 34 | // Find index of the container with a specific name in a list of containers 35 | func IndexOfContainer(name string, containers []corev1.Container) int { 36 | for i, container := range containers { 37 | if container.Name == name { 38 | return i 39 | } 40 | } 41 | return -1 42 | } 43 | -------------------------------------------------------------------------------- /kctf-operator/controllers/volumes/functions.go: -------------------------------------------------------------------------------- 1 | package volumes 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | kctfv1 "github.com/google/kctf/api/v1" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/labels" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apimachinery/pkg/types" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 14 | ) 15 | 16 | // Function that maps names of the persistent volume claims in the list to their index 17 | func mapNameIdx(persistentVolumeClaimsFound *corev1.PersistentVolumeClaimList) map[string]int { 18 | m := make(map[string]int) 19 | 20 | for idx, item := range persistentVolumeClaimsFound.Items { 21 | m[item.Name] = idx 22 | } 23 | 24 | return m 25 | } 26 | 27 | // Calls creation of persistent volume claim and persistent volume 28 | func create(challenge *kctfv1.Challenge, claim string, 29 | client client.Client, scheme *runtime.Scheme, log logr.Logger, ctx context.Context) (bool, error) { 30 | pvc := persistentVolumeClaim(claim, challenge) 31 | 32 | // We set the ownership 33 | controllerutil.SetControllerReference(challenge, pvc, scheme) 34 | 35 | // First we create the persistent volume claim 36 | err := client.Create(ctx, pvc) 37 | if err != nil { 38 | log.Error(err, "Failed to create persistentVolumeClaim: ", "Name: ", pvc.Name, 39 | "Namespace: ", pvc.Namespace) 40 | return false, err 41 | } 42 | 43 | pv := persistentVolume(pvc, challenge) 44 | // We set the ownership 45 | controllerutil.SetControllerReference(challenge, pv, scheme) 46 | 47 | err = client.Create(ctx, pv) 48 | 49 | if err != nil { 50 | log.Error(err, "Failed to create persistentVolume: ", "Name: ", pv.Name, "Namespace: ", 51 | pv.Namespace) 52 | return false, err 53 | } 54 | 55 | return true, nil 56 | } 57 | 58 | // Name delete was changed to avoid being the same name as the function delete used to delete an element in 59 | // the map 60 | // This function delete the persistentVolumeClaim and the persistentVolume associated 61 | func deleteVolumes(persistentVolumeClaim *corev1.PersistentVolumeClaim, 62 | client client.Client, scheme *runtime.Scheme, log logr.Logger, 63 | ctx context.Context) (bool, error) { 64 | // Calls deletion of persistent volume claim 65 | err := client.Delete(ctx, persistentVolumeClaim) 66 | 67 | // Calls deletion of persistent volume claim 68 | if err != nil { 69 | log.Error(err, "Failed to delete persistentVolumeClaim: ", "Name: ", persistentVolumeClaim.Name, 70 | "Namespace: ", persistentVolumeClaim.Namespace) 71 | return false, err 72 | } 73 | 74 | persistentVolume := &corev1.PersistentVolume{} 75 | err = client.Get(ctx, types.NamespacedName{Name: persistentVolumeClaim.Name, 76 | Namespace: persistentVolumeClaim.Namespace}, persistentVolume) 77 | 78 | if err != nil { 79 | log.Error(err, "Failed to get persistentVolume: ", "Name: ", persistentVolumeClaim.Name, 80 | "Namespace: ", persistentVolumeClaim.Namespace) 81 | return false, err 82 | } 83 | 84 | err = client.Delete(ctx, persistentVolume) 85 | 86 | if err != nil { 87 | log.Error(err, "Failed to delete persistentVolume: ", "Name: ", persistentVolumeClaim.Name, 88 | "Namespace: ", persistentVolumeClaim.Namespace) 89 | return false, err 90 | } 91 | 92 | return true, nil 93 | } 94 | 95 | // Function that updates the persistent volume claim list and the persistent volumes 96 | func Update(challenge *kctfv1.Challenge, cl client.Client, scheme *runtime.Scheme, 97 | log logr.Logger, ctx context.Context) (bool, error) { 98 | // Check if all persistent volume claims are correctly set and update them if necessary 99 | // We get all persistentVolumeClaims in the same namespace as the challenge 100 | persistentVolumeClaimsFound := &corev1.PersistentVolumeClaimList{} 101 | change := false 102 | 103 | // List all persistent volume claims in the namespace of the challenge 104 | var listOption client.ListOption 105 | listOption = &client.ListOptions{ 106 | Namespace: challenge.Namespace, 107 | LabelSelector: labels.SelectorFromSet(map[string]string{"app": challenge.Name}), 108 | } 109 | 110 | err := cl.List(ctx, persistentVolumeClaimsFound, listOption) 111 | if err != nil { 112 | log.Error(err, "Failed to list persistent volume claims", "Challenge Name: ", 113 | challenge.Name, " with namespace ", challenge.Namespace) 114 | return false, err 115 | } 116 | 117 | // First we create a map with the names of the persistent volume claims that already exist 118 | namesFound := mapNameIdx(persistentVolumeClaimsFound) 119 | 120 | // For comparing two persistentVolumeClaims, we will use DeepEqual 121 | if challenge.Spec.PersistentVolumeClaims != nil { 122 | for _, claim := range challenge.Spec.PersistentVolumeClaims { 123 | _, present := namesFound[claim] 124 | if present == true { 125 | delete(namesFound, claim) 126 | } else { 127 | // Creates the object 128 | change, err = create(challenge, claim, 129 | cl, scheme, log, ctx) 130 | if err != nil { 131 | return false, err 132 | } 133 | log.Info("PersistentVolumeClaim and PersistentVolume created successfully", 134 | "Name: ", claim, "Namespace:", challenge.Namespace) 135 | } 136 | } 137 | } 138 | 139 | // Then we delete the persistent volume claims that remained 140 | for name, idx := range namesFound { 141 | change, err = deleteVolumes(&persistentVolumeClaimsFound.Items[idx], 142 | cl, scheme, log, ctx) 143 | if err != nil { 144 | return false, err 145 | } 146 | log.Info("PersistentVolumeClaim and PersistentVolume deleted successfully", 147 | "Name: ", name, "Namespace:", challenge.Namespace) 148 | } 149 | 150 | return change, err 151 | } 152 | -------------------------------------------------------------------------------- /kctf-operator/controllers/volumes/persistentvolume.go: -------------------------------------------------------------------------------- 1 | package volumes 2 | 3 | import ( 4 | kctfv1 "github.com/google/kctf/api/v1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | func persistentVolume(persistentVolumeClaim *corev1.PersistentVolumeClaim, 10 | challenge *kctfv1.Challenge) *corev1.PersistentVolume { 11 | // returns persistent volume correspondent to persistentvolumeclaim 12 | persistentVolume := &corev1.PersistentVolume{ 13 | ObjectMeta: metav1.ObjectMeta{ 14 | Name: persistentVolumeClaim.Spec.VolumeName, 15 | Namespace: persistentVolumeClaim.Namespace, 16 | }, 17 | Spec: corev1.PersistentVolumeSpec{ 18 | PersistentVolumeSource: corev1.PersistentVolumeSource{ 19 | HostPath: &corev1.HostPathVolumeSource{ 20 | Path: "/mnt/disks/gcs/" + challenge.Namespace + "/" + 21 | challenge.Name + "/" + persistentVolumeClaim.Spec.VolumeName, 22 | }, 23 | }, 24 | StorageClassName: "manual", 25 | Capacity: persistentVolumeClaim.Spec.Resources.Requests, 26 | AccessModes: persistentVolumeClaim.Spec.AccessModes, 27 | PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete, 28 | }, 29 | } 30 | return persistentVolume 31 | } 32 | -------------------------------------------------------------------------------- /kctf-operator/controllers/volumes/persistentvolumeclaim.go: -------------------------------------------------------------------------------- 1 | // Creates persistentVolumeClaims 2 | package volumes 3 | 4 | import ( 5 | kctfv1 "github.com/google/kctf/api/v1" 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/api/resource" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | func persistentVolumeClaim(claim string, 12 | challenge *kctfv1.Challenge) *corev1.PersistentVolumeClaim { 13 | storageClassName := "manual" 14 | requirement, _ := resource.ParseQuantity("10Gi") 15 | resources := map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: requirement} 16 | 17 | // returns persistent volume correspondent to persistentvolumeclaim 18 | persistentVolumeClaim := &corev1.PersistentVolumeClaim{ 19 | ObjectMeta: metav1.ObjectMeta{ 20 | Name: claim, 21 | Namespace: challenge.Namespace, 22 | Labels: map[string]string{ 23 | "app": challenge.Name, 24 | }, 25 | }, 26 | Spec: corev1.PersistentVolumeClaimSpec{ 27 | StorageClassName: &storageClassName, 28 | AccessModes: []corev1.PersistentVolumeAccessMode{ 29 | "ReadWriteMany", 30 | }, 31 | VolumeName: claim, 32 | Resources: corev1.VolumeResourceRequirements{ 33 | Requests: resources, 34 | }, 35 | }, 36 | } 37 | return persistentVolumeClaim 38 | } 39 | -------------------------------------------------------------------------------- /kctf-operator/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/google/kctf 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/GoogleCloudPlatform/gke-managed-certs v1.2.5 7 | github.com/go-logr/logr v1.4.1 8 | github.com/onsi/ginkgo/v2 v2.14.0 9 | github.com/onsi/gomega v1.30.0 10 | k8s.io/api v0.29.2 11 | k8s.io/apimachinery v0.29.2 12 | k8s.io/client-go v1.5.2 13 | k8s.io/ingress-gce v1.23.1 14 | sigs.k8s.io/controller-runtime v0.17.3 15 | ) 16 | 17 | // gke-managed-certs depends weirdly on 1.5.2 18 | replace k8s.io/client-go v1.5.2 => k8s.io/client-go v0.29.2 19 | 20 | require ( 21 | github.com/beorn7/perks v1.0.1 // indirect 22 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 23 | github.com/davecgh/go-spew v1.1.1 // indirect 24 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 25 | github.com/evanphx/json-patch/v5 v5.8.0 // indirect 26 | github.com/fsnotify/fsnotify v1.7.0 // indirect 27 | github.com/go-logr/zapr v1.3.0 // indirect 28 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 29 | github.com/go-openapi/jsonreference v0.20.2 // indirect 30 | github.com/go-openapi/swag v0.22.3 // indirect 31 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect 32 | github.com/gogo/protobuf v1.3.2 // indirect 33 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 34 | github.com/golang/protobuf v1.5.3 // indirect 35 | github.com/google/gnostic-models v0.6.8 // indirect 36 | github.com/google/go-cmp v0.6.0 // indirect 37 | github.com/google/gofuzz v1.2.0 // indirect 38 | github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect 39 | github.com/google/uuid v1.3.0 // indirect 40 | github.com/imdario/mergo v0.3.12 // indirect 41 | github.com/josharian/intern v1.0.0 // indirect 42 | github.com/json-iterator/go v1.1.12 // indirect 43 | github.com/mailru/easyjson v0.7.7 // indirect 44 | github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect 45 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 46 | github.com/modern-go/reflect2 v1.0.2 // indirect 47 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 48 | github.com/pkg/errors v0.9.1 // indirect 49 | github.com/prometheus/client_golang v1.18.0 // indirect 50 | github.com/prometheus/client_model v0.5.0 // indirect 51 | github.com/prometheus/common v0.45.0 // indirect 52 | github.com/prometheus/procfs v0.12.0 // indirect 53 | github.com/spf13/pflag v1.0.5 // indirect 54 | go.uber.org/multierr v1.11.0 // indirect 55 | go.uber.org/zap v1.26.0 // indirect 56 | golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect 57 | golang.org/x/net v0.19.0 // indirect 58 | golang.org/x/oauth2 v0.12.0 // indirect 59 | golang.org/x/sys v0.16.0 // indirect 60 | golang.org/x/term v0.15.0 // indirect 61 | golang.org/x/text v0.14.0 // indirect 62 | golang.org/x/time v0.3.0 // indirect 63 | golang.org/x/tools v0.16.1 // indirect 64 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 65 | google.golang.org/appengine v1.6.7 // indirect 66 | google.golang.org/protobuf v1.31.0 // indirect 67 | gopkg.in/inf.v0 v0.9.1 // indirect 68 | gopkg.in/yaml.v2 v2.4.0 // indirect 69 | gopkg.in/yaml.v3 v3.0.1 // indirect 70 | k8s.io/apiextensions-apiserver v0.29.2 // indirect 71 | k8s.io/component-base v0.29.2 // indirect 72 | k8s.io/klog/v2 v2.110.1 // indirect 73 | k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect 74 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect 75 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 76 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 77 | sigs.k8s.io/yaml v1.4.0 // indirect 78 | ) 79 | -------------------------------------------------------------------------------- /kctf-operator/hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /kctf-operator/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "flag" 21 | "os" 22 | 23 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 24 | // to ensure that exec-entrypoint and run can make use of them. 25 | _ "k8s.io/client-go/plugin/pkg/client/auth" 26 | 27 | gkenetv1 "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/apis/networking.gke.io/v1" 28 | "k8s.io/apimachinery/pkg/runtime" 29 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 30 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 31 | backendv1 "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" 32 | ctrl "sigs.k8s.io/controller-runtime" 33 | "sigs.k8s.io/controller-runtime/pkg/healthz" 34 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 35 | metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" 36 | webhook "sigs.k8s.io/controller-runtime/pkg/webhook" 37 | 38 | kctfv1 "github.com/google/kctf/api/v1" 39 | "github.com/google/kctf/controllers" 40 | //+kubebuilder:scaffold:imports 41 | 42 | "github.com/google/kctf/resources" 43 | ) 44 | 45 | var ( 46 | scheme = runtime.NewScheme() 47 | setupLog = ctrl.Log.WithName("setup") 48 | ) 49 | 50 | func init() { 51 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 52 | 53 | utilruntime.Must(kctfv1.AddToScheme(scheme)) 54 | utilruntime.Must(backendv1.AddToScheme(scheme)) 55 | utilruntime.Must(gkenetv1.AddToScheme(scheme)) 56 | //+kubebuilder:scaffold:scheme 57 | } 58 | 59 | func main() { 60 | var metricsAddr string 61 | var enableLeaderElection bool 62 | var probeAddr string 63 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 64 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 65 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 66 | "Enable leader election for controller manager. "+ 67 | "Enabling this will ensure there is only one active controller manager.") 68 | opts := zap.Options{ 69 | Development: true, 70 | } 71 | opts.BindFlags(flag.CommandLine) 72 | flag.Parse() 73 | 74 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 75 | 76 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 77 | Scheme: scheme, 78 | Metrics: metricsserver.Options{BindAddress: metricsAddr}, 79 | WebhookServer: webhook.NewServer(webhook.Options{Port: 9443}), 80 | HealthProbeBindAddress: probeAddr, 81 | LeaderElection: enableLeaderElection, 82 | LeaderElectionID: "558d99b6.dev", 83 | }) 84 | if err != nil { 85 | setupLog.Error(err, "unable to start manager") 86 | os.Exit(1) 87 | } 88 | 89 | if err = (&controllers.ChallengeReconciler{ 90 | Client: mgr.GetClient(), 91 | Scheme: mgr.GetScheme(), 92 | }).SetupWithManager(mgr); err != nil { 93 | setupLog.Error(err, "unable to create controller", "controller", "Challenge") 94 | os.Exit(1) 95 | } 96 | //+kubebuilder:scaffold:builder 97 | 98 | // Initializer that creates objects and connect them to the lifetime of the operator 99 | // Should be only used when testing the operator in a cluster 100 | // since the instances that are created are associated to the deployment of the operator 101 | // which only happens when it is ran inside the cluster 102 | client := mgr.GetClient() 103 | if err := resources.InitializeOperator(&client); err != nil { 104 | setupLog.Error(err, "Error initializing initial instances") 105 | os.Exit(1) 106 | } 107 | 108 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 109 | setupLog.Error(err, "unable to set up health check") 110 | os.Exit(1) 111 | } 112 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 113 | setupLog.Error(err, "unable to set up ready check") 114 | os.Exit(1) 115 | } 116 | 117 | setupLog.Info("starting manager") 118 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 119 | setupLog.Error(err, "problem running manager") 120 | os.Exit(1) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /kctf-operator/resources/allow-dns.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | networkingv1 "k8s.io/api/networking/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "k8s.io/apimachinery/pkg/util/intstr" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | func NewAllowDns() client.Object { 12 | udpProtocol := corev1.ProtocolUDP 13 | udpPort := intstr.FromInt(53) 14 | tcpProtocol := corev1.ProtocolTCP 15 | tcpPort := intstr.FromInt(53) 16 | 17 | networkPolicy := &networkingv1.NetworkPolicy{ 18 | ObjectMeta: metav1.ObjectMeta{ 19 | Name: "allow-dns", 20 | Namespace: "default", 21 | }, 22 | Spec: networkingv1.NetworkPolicySpec{ 23 | PodSelector: metav1.LabelSelector{}, 24 | PolicyTypes: []networkingv1.PolicyType{ 25 | "Egress", 26 | }, 27 | Egress: []networkingv1.NetworkPolicyEgressRule{{ 28 | To: []networkingv1.NetworkPolicyPeer{}, 29 | Ports: []networkingv1.NetworkPolicyPort{ 30 | { 31 | Protocol: &udpProtocol, 32 | Port: &udpPort, 33 | }, 34 | { 35 | Protocol: &tcpProtocol, 36 | Port: &tcpPort, 37 | }, 38 | }, 39 | }}, 40 | }, 41 | } 42 | 43 | return networkPolicy 44 | } 45 | -------------------------------------------------------------------------------- /kctf-operator/resources/constants.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | // 4 | // ======================================= 5 | // == || These are set by automation || == 6 | // .. vv ........................... vv .. 7 | 8 | const DOCKER_CERTBOT_IMAGE = "gcr.io/kctf-docker/certbot@sha256:86f883af58f630babd55a20ae7d89974d9cc35608c25cb405fe3faf90d0ceef0" 9 | const DOCKER_GCSFUSE_IMAGE = "gcr.io/kctf-docker/gcsfuse@sha256:6cb70b79588dc30e4ea64d85f2d0d1aff928d9002b75b697ce50009e8b88b208" 10 | 11 | // .. ^^ ........................... ^^ .. 12 | // == || These are set by automation || == 13 | // ======================================= 14 | // 15 | -------------------------------------------------------------------------------- /kctf-operator/resources/daemon-gcsfuse.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | appsv1 "k8s.io/api/apps/v1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | ) 9 | 10 | func NewDaemonSetGcsFuse() client.Object { 11 | privileged := true 12 | mountPropagation := corev1.MountPropagationBidirectional 13 | daemonSet := &appsv1.DaemonSet{ 14 | ObjectMeta: metav1.ObjectMeta{ 15 | Name: "ctf-daemon-gcsfuse", 16 | Namespace: "kctf-system", 17 | Labels: map[string]string{"k8s-app": "ctf-daemon-gcsfuse"}, 18 | }, 19 | Spec: appsv1.DaemonSetSpec{ 20 | Selector: &metav1.LabelSelector{ 21 | MatchLabels: map[string]string{"name": "ctf-daemon-gcsfuse"}, 22 | }, 23 | Template: corev1.PodTemplateSpec{ 24 | ObjectMeta: metav1.ObjectMeta{ 25 | Labels: map[string]string{"name": "ctf-daemon-gcsfuse"}, 26 | }, 27 | Spec: corev1.PodSpec{ 28 | ServiceAccountName: "gcsfuse-sa", 29 | Tolerations: []corev1.Toleration{{ 30 | Key: "node-role.kubernetes.io/master", 31 | Effect: corev1.TaintEffectNoSchedule, 32 | }}, 33 | Containers: []corev1.Container{{ 34 | Name: "ctf-daemon", 35 | Image: DOCKER_GCSFUSE_IMAGE, 36 | SecurityContext: &corev1.SecurityContext{ 37 | Privileged: &privileged, 38 | }, 39 | VolumeMounts: []corev1.VolumeMount{ 40 | { 41 | Name: "mnt-disks-gcs", 42 | MountPath: "/mnt/disks/gcs", 43 | MountPropagation: &mountPropagation, 44 | }, 45 | { 46 | Name: "config", 47 | MountPath: "/config", 48 | }, 49 | }, 50 | Lifecycle: &corev1.Lifecycle{ 51 | PreStop: &corev1.LifecycleHandler{ 52 | Exec: &corev1.ExecAction{ 53 | Command: []string{"sh", "-c", "fusermount -u /mnt/disks/gcs"}, 54 | }, 55 | }, 56 | }, 57 | }}, 58 | Volumes: []corev1.Volume{ 59 | { 60 | Name: "mnt-disks-gcs", 61 | VolumeSource: corev1.VolumeSource{ 62 | HostPath: &corev1.HostPathVolumeSource{ 63 | Path: "/mnt/disks/gcs", 64 | }, 65 | }, 66 | }, 67 | { 68 | Name: "config", 69 | VolumeSource: corev1.VolumeSource{ 70 | ConfigMap: &corev1.ConfigMapVolumeSource{ 71 | LocalObjectReference: corev1.LocalObjectReference{ 72 | Name: "gcsfuse-config", 73 | }, 74 | }, 75 | }, 76 | }, 77 | }, 78 | }, 79 | }, 80 | }, 81 | } 82 | return daemonSet 83 | } 84 | -------------------------------------------------------------------------------- /kctf-operator/resources/external-dns.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | appsv1 "k8s.io/api/apps/v1" 5 | corev1 "k8s.io/api/core/v1" 6 | rbacv1 "k8s.io/api/rbac/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | func NewExternalDnsClusterRole() client.Object { 12 | clusterRole := &rbacv1.ClusterRole{ 13 | ObjectMeta: metav1.ObjectMeta{ 14 | Name: "external-dns-viewer", 15 | }, 16 | Rules: []rbacv1.PolicyRule{ 17 | { 18 | APIGroups: []string{""}, 19 | Resources: []string{"secrets"}, 20 | Verbs: []string{"get", "create", "update", "patch", "list"}, 21 | }, 22 | { 23 | APIGroups: []string{""}, 24 | Resources: []string{"services", "endpoints", "pods", "nodes"}, 25 | Verbs: []string{"get", "watch", "list"}, 26 | }, 27 | { 28 | APIGroups: []string{"extensions", "networking.k8s.io"}, 29 | Resources: []string{"ingresses"}, 30 | Verbs: []string{"get", "watch", "list"}, 31 | }, 32 | }, 33 | } 34 | return clusterRole 35 | } 36 | 37 | func NewExternalDnsClusterRoleBinding() client.Object { 38 | clusterRoleBinding := &rbacv1.ClusterRoleBinding{ 39 | ObjectMeta: metav1.ObjectMeta{ 40 | Name: "external-dns-sa:external-dns-viewer", 41 | }, 42 | RoleRef: rbacv1.RoleRef{ 43 | APIGroup: "rbac.authorization.k8s.io", 44 | Kind: "ClusterRole", 45 | Name: "external-dns-viewer", 46 | }, 47 | Subjects: []rbacv1.Subject{{ 48 | Kind: "ServiceAccount", 49 | Name: "external-dns-sa", 50 | Namespace: "kctf-system", 51 | }}, 52 | } 53 | return clusterRoleBinding 54 | } 55 | 56 | func NewExternalDnsDeployment() client.Object { 57 | deployment := &appsv1.Deployment{ 58 | ObjectMeta: metav1.ObjectMeta{ 59 | Name: "external-dns", 60 | Namespace: "kctf-system", 61 | }, 62 | Spec: appsv1.DeploymentSpec{ 63 | Strategy: appsv1.DeploymentStrategy{ 64 | Type: appsv1.RecreateDeploymentStrategyType, 65 | }, 66 | Selector: &metav1.LabelSelector{ 67 | MatchLabels: map[string]string{"app": "external-dns"}, 68 | }, 69 | Template: corev1.PodTemplateSpec{ 70 | ObjectMeta: metav1.ObjectMeta{ 71 | Labels: map[string]string{"app": "external-dns"}, 72 | }, 73 | Spec: corev1.PodSpec{ 74 | ServiceAccountName: "external-dns-sa", 75 | Containers: []corev1.Container{ 76 | { 77 | Image: "us.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.10.0", 78 | Name: "external-dns", 79 | Env: []corev1.EnvVar{{ 80 | Name: "DOMAIN_NAME", 81 | ValueFrom: &corev1.EnvVarSource{ 82 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 83 | LocalObjectReference: corev1.LocalObjectReference{ 84 | Name: "external-dns", 85 | }, 86 | Key: "DOMAIN_NAME", 87 | }, 88 | }, 89 | }}, 90 | Args: []string{"--log-level=debug", "--source=service", "--source=ingress", 91 | "--provider=google", "--domain-filter=$(DOMAIN_NAME)", "--registry=txt", 92 | "--txt-owner-id=kctf-cloud-dns"}, 93 | }, 94 | { 95 | Image: DOCKER_CERTBOT_IMAGE, 96 | Name: "certbot", 97 | Env: []corev1.EnvVar{ 98 | { 99 | Name: "DOMAIN", 100 | ValueFrom: &corev1.EnvVarSource{ 101 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 102 | LocalObjectReference: corev1.LocalObjectReference{ 103 | Name: "external-dns", 104 | }, 105 | Key: "DOMAIN_NAME", 106 | }, 107 | }, 108 | }, 109 | { 110 | Name: "SECRET", 111 | Value: "tls-cert", 112 | }, 113 | { 114 | Name: "EMAIL", 115 | ValueFrom: &corev1.EnvVarSource{ 116 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 117 | LocalObjectReference: corev1.LocalObjectReference{ 118 | Name: "external-dns", 119 | }, 120 | Key: "EMAIL_ADDRESS", 121 | }, 122 | }, 123 | }, 124 | { 125 | Name: "PROD", 126 | Value: "true", 127 | }, 128 | }, 129 | }, 130 | }, 131 | }, 132 | }, 133 | }, 134 | } 135 | return deployment 136 | } 137 | -------------------------------------------------------------------------------- /kctf-operator/resources/initializer.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "context" 5 | "os" 6 | 7 | logf "sigs.k8s.io/controller-runtime/pkg/log" 8 | 9 | "github.com/go-logr/logr" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | clientPkg "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | var log logr.Logger = logf.Log.WithName("cmd") 15 | 16 | func InitializeOperator(client *clientPkg.Client) error { 17 | // Creates the objects that enable the DNS, external DNS and etc 18 | 19 | // Create the tls secret separately since we don't want to overwrite it if it exists 20 | tlsSecret := NewSecretTls() 21 | err := (*client).Create(context.Background(), tlsSecret) 22 | if err != nil && !errors.IsAlreadyExists(err) { 23 | log.Error(err, "Could not create TLS secret") 24 | return err 25 | } 26 | 27 | objectFunctions := []func() clientPkg.Object{NewExternalDnsClusterRole, NewExternalDnsClusterRoleBinding, 28 | NewExternalDnsDeployment, NewDaemonSetGcsFuse, NewSecretPowBypass, 29 | NewSecretPowBypassPub, NewNetworkPolicyBlockInternal, NewAllowDns} 30 | 31 | names := []string{ 32 | "External DNS Cluster Role", "External DNS Cluster Role Binding", "External DNS Deployment", 33 | "Daemon Set Gcs Fuse", "Secret for PowBypass", "Secret for PowBypassPub", 34 | "Network Policy Block Internal", "Allow DNS"} 35 | 36 | for i, newObject := range objectFunctions { 37 | 38 | obj := newObject() 39 | 40 | // Creates the object 41 | err := (*client).Create(context.Background(), obj) 42 | 43 | // Checks if the error is already exists, because if it is, it's not a problem 44 | if err != nil { 45 | if errors.IsAlreadyExists(err) { 46 | log.Info("This object already exists.", "Name: ", names[i]) 47 | 48 | // Try to update the resource instead 49 | err = (*client).Update(context.Background(), obj) 50 | } 51 | if err != nil { 52 | log.Error(err, names[i]) 53 | log.Info(names[i]) 54 | return err 55 | } 56 | } else { 57 | log.Info("Created object.", "Name:", names[i]) 58 | } 59 | } 60 | 61 | f, err := os.Create("/tmp/initialized") 62 | if err != nil { 63 | log.Error(err, "Could not create file for ReadinessProbe") 64 | return err 65 | } 66 | f.Close() 67 | 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /kctf-operator/resources/network-policy.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | networkingv1 "k8s.io/api/networking/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "sigs.k8s.io/controller-runtime/pkg/client" 7 | ) 8 | 9 | func NewNetworkPolicyBlockInternal() client.Object { 10 | networkPolicy := &networkingv1.NetworkPolicy{ 11 | ObjectMeta: metav1.ObjectMeta{ 12 | Name: "block-internal", 13 | Namespace: "default", 14 | }, 15 | Spec: networkingv1.NetworkPolicySpec{ 16 | PodSelector: metav1.LabelSelector{}, 17 | PolicyTypes: []networkingv1.PolicyType{ 18 | "Egress", 19 | }, 20 | Egress: []networkingv1.NetworkPolicyEgressRule{{ 21 | To: []networkingv1.NetworkPolicyPeer{{ 22 | IPBlock: &networkingv1.IPBlock{ 23 | CIDR: "0.0.0.0/0", 24 | Except: []string{"0.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10", 25 | "127.0.0.0/8", "169.254.0.0/16", "172.16.0.0/12", "192.0.0.0/24", 26 | "192.0.2.0/24", "192.88.99.0/24", "192.168.0.0/16", "198.18.0.0/15", 27 | "198.51.100.0/24", "203.0.113.0/24", "224.0.0.0/4", "240.0.0.0/4"}, 28 | }, 29 | }}, 30 | }}, 31 | }, 32 | } 33 | return networkPolicy 34 | } 35 | -------------------------------------------------------------------------------- /kctf-operator/resources/secret-pow.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "crypto/ecdsa" 5 | "crypto/elliptic" 6 | "crypto/rand" 7 | "crypto/x509" 8 | "encoding/pem" 9 | 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | var privateKey *ecdsa.PrivateKey = generateKey() 16 | 17 | func generateKey() *ecdsa.PrivateKey { 18 | // Generate the public key 19 | privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) 20 | 21 | // Check error 22 | if err != nil { 23 | log.Error(err, "Failed to generate private key") 24 | } 25 | 26 | return privateKey 27 | } 28 | 29 | func secret(name string, nameMap string, private bool) *corev1.Secret { 30 | var err error 31 | var der []byte 32 | var block pem.Block 33 | 34 | // We take the right key 35 | if private == true { 36 | der, err = x509.MarshalPKCS8PrivateKey(privateKey) 37 | block.Type = "EC PRIVATE KEY" 38 | } else { 39 | der, err = x509.MarshalPKIXPublicKey(privateKey.Public()) 40 | block.Type = "PUBLIC KEY" 41 | } 42 | 43 | // Check error 44 | if err != nil { 45 | log.Error(err, "Couldn't get DER form") 46 | } 47 | 48 | block.Bytes = der 49 | 50 | // Transform in bytes 51 | pem := pem.EncodeToMemory(&block) 52 | 53 | data := map[string][]byte{nameMap: pem} 54 | // Then we create the secret 55 | secret := &corev1.Secret{ 56 | ObjectMeta: metav1.ObjectMeta{ 57 | Name: name, 58 | Namespace: "kctf-system", 59 | }, 60 | Data: data, 61 | } 62 | 63 | return secret 64 | } 65 | 66 | func NewSecretPowBypass() client.Object { 67 | return secret("pow-bypass", "pow-bypass-key.pem", true) 68 | } 69 | 70 | func NewSecretPowBypassPub() client.Object { 71 | return secret("pow-bypass-pub", "pow-bypass-key-pub.pem", false) 72 | } 73 | 74 | func NewSecretTls() client.Object { 75 | // Generate empty secret so ingress works 76 | secret := &corev1.Secret{ 77 | ObjectMeta: metav1.ObjectMeta{ 78 | Name: "tls-cert", 79 | Namespace: "kctf-system", 80 | }, 81 | Type: corev1.SecretTypeTLS, 82 | Data: map[string][]byte{ 83 | corev1.TLSCertKey: []byte{}, 84 | corev1.TLSPrivateKeyKey: []byte{}, 85 | }, 86 | } 87 | return secret 88 | } 89 | --------------------------------------------------------------------------------