├── .coveragerc
├── .flake8
├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── CONTRIBUTING.rst
├── LICENSE
├── Makefile
├── README.rst
├── docs
├── Makefile
├── api
│ ├── challenge.rst
│ └── project.rst
├── backends
│ ├── index.rst
│ ├── k8s
│ │ ├── cluster-role.yaml
│ │ └── index.rst
│ └── rctf
│ │ └── index.rst
├── challenge.rst
├── conf.py
├── config-samples.rst
├── contributing.rst
├── index.rst
├── make.bat
└── project.rst
├── example
├── pwn
│ └── example
│ │ ├── .dockerignore
│ │ └── challenge.yaml
├── rcds.yaml
└── web
│ └── with-database
│ ├── chall
│ ├── .dockerignore
│ ├── Dockerfile
│ ├── index.js
│ ├── package-lock.json
│ └── package.json
│ └── challenge.yaml
├── poetry.lock
├── pyproject.toml
├── rcds
├── __init__.py
├── backend
│ ├── __init__.py
│ └── backend.py
├── backends
│ ├── __init__.py
│ ├── k8s
│ │ ├── __init__.py
│ │ ├── backend.py
│ │ ├── jinja.py
│ │ ├── manifests.py
│ │ ├── options.schema.yaml
│ │ └── templates
│ │ │ ├── _helpers.jinja
│ │ │ ├── deployment.yaml
│ │ │ ├── ingress.yaml
│ │ │ ├── namespace.yaml
│ │ │ ├── network-policy.yaml
│ │ │ └── service.yaml
│ └── rctf
│ │ ├── __init__.py
│ │ ├── backend.py
│ │ ├── options.schema.yaml
│ │ └── rctf.py
├── challenge
│ ├── __init__.py
│ ├── challenge.py
│ ├── challenge.schema.yaml
│ ├── config.py
│ └── docker.py
├── cli
│ ├── __init__.py
│ ├── __main__.py
│ └── deploy.py
├── errors.py
├── project
│ ├── __init__.py
│ ├── assets.py
│ ├── config.py
│ ├── project.py
│ └── rcds.schema.yaml
├── py.typed
└── util
│ ├── __init__.py
│ ├── deep_merge.py
│ ├── find.py
│ ├── jsonschema.py
│ └── load.py
├── tests
├── challenge
│ ├── test_challenge.py
│ ├── test_challenge
│ │ ├── bad#dir
│ │ │ └── challenge.yml
│ │ ├── id_override
│ │ │ └── challenge.yml
│ │ ├── json
│ │ │ └── challenge.json
│ │ ├── nonexistent
│ │ │ └── .dir
│ │ ├── rcds.yml
│ │ ├── render-description
│ │ │ └── challenge.yml
│ │ ├── shortcuts-http
│ │ │ └── challenge.yaml
│ │ ├── shortcuts-tcp
│ │ │ └── challenge.yaml
│ │ ├── static-assets
│ │ │ ├── challenge.yml
│ │ │ ├── file1.txt
│ │ │ └── file2.txt
│ │ └── yaml
│ │ │ └── challenge.yml
│ ├── test_config.py
│ ├── test_config
│ │ ├── default-category
│ │ │ └── chall
│ │ │ │ └── challenge.yml
│ │ ├── defaults
│ │ │ ├── 1
│ │ │ │ └── challenge.yml
│ │ │ └── 2
│ │ │ │ └── challenge.yml
│ │ ├── expose-no-containers
│ │ │ └── challenge.yml
│ │ ├── flag-format
│ │ │ ├── invalid
│ │ │ │ └── challenge.yml
│ │ │ └── valid
│ │ │ │ └── challenge.yml
│ │ ├── no-default-category
│ │ │ └── challenge.yml
│ │ ├── nonexistent-flag-file
│ │ │ └── challenge.yml
│ │ ├── nonexistent-provide-file
│ │ │ └── challenge.yml
│ │ ├── nonexistent-target-container
│ │ │ └── challenge.yml
│ │ ├── nonexistent-target-port
│ │ │ └── challenge.yml
│ │ ├── rcds.yaml
│ │ ├── schema-fail
│ │ │ └── challenge.yml
│ │ ├── valid
│ │ │ ├── challenge.yml
│ │ │ └── flag.txt
│ │ └── warn-multiline-flag
│ │ │ ├── challenge.yml
│ │ │ └── flag.txt
│ ├── test_docker.py
│ └── test_docker
│ │ ├── contexts
│ │ ├── basic
│ │ │ ├── .file
│ │ │ ├── Dockerfile
│ │ │ ├── a
│ │ │ │ ├── .file
│ │ │ │ ├── b
│ │ │ │ │ ├── .file
│ │ │ │ │ └── file
│ │ │ │ └── file
│ │ │ └── file
│ │ ├── complex_dockerignore
│ │ │ ├── .dockerignore
│ │ │ ├── Dockerfile
│ │ │ ├── a
│ │ │ ├── b
│ │ │ ├── c
│ │ │ │ └── file
│ │ │ └── d
│ │ │ │ └── file
│ │ └── dockerignore
│ │ │ ├── .dockerignore
│ │ │ ├── .file
│ │ │ ├── Dockerfile
│ │ │ ├── a
│ │ │ ├── .file
│ │ │ ├── b
│ │ │ │ ├── .file
│ │ │ │ └── file
│ │ │ └── file
│ │ │ └── file
│ │ └── project
│ │ ├── chall
│ │ └── challenge.yml
│ │ ├── chall2
│ │ └── challenge.yml
│ │ └── rcds.yml
├── project
│ ├── test_assets.py
│ └── test_assets
│ │ ├── dir
│ │ └── .dir
│ │ ├── file1
│ │ └── file2
└── util
│ └── test_deep_merge.py
└── tox.ini
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | source = rcds
3 | branch = True
4 |
5 | [report]
6 | exclude_lines =
7 | pragma: no cover
8 | if TYPE_CHECKING:
9 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 80
3 | select = C,E,F,W,B,B950
4 | ignore = E203, E501, W503
5 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | push:
4 | branches: [master]
5 | tags:
6 | - v*
7 | pull_request:
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-20.04
12 | steps:
13 | - uses: actions/checkout@v2
14 |
15 | - name: Set up Python
16 | uses: actions/setup-python@v1
17 | with:
18 | python-version: 3.9
19 |
20 | - name: Install dependencies
21 | run: |
22 | python -m pip install --upgrade pip
23 | python -m pip install --upgrade poetry
24 | poetry --version
25 | poetry install --no-interaction
26 |
27 | - name: Lint
28 | run: poetry run pre-commit run --all-files
29 |
30 | test:
31 | runs-on: ubuntu-20.04
32 | strategy:
33 | matrix:
34 | python-version: [3.6, 3.7, 3.8, 3.9]
35 | steps:
36 | - uses: actions/checkout@v2
37 |
38 | - name: Set up Python ${{ matrix.python-version }}
39 | uses: actions/setup-python@v1
40 | with:
41 | python-version: ${{ matrix.python-version }}
42 |
43 | - name: Install dependencies
44 | run: |
45 | python -m pip install --upgrade pip
46 | python -m pip install --upgrade poetry
47 | python -m pip install --upgrade codecov
48 | poetry --version
49 | poetry install --no-interaction
50 |
51 | - name: Test
52 | run: |
53 | make cover
54 |
55 | - name: Upload coverage reports
56 | env:
57 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
58 | run: |
59 | codecov --token="$CODECOV_TOKEN"
60 |
61 | publish:
62 | runs-on: ubuntu-20.04
63 | if: startsWith(github.ref, 'refs/tags/v')
64 | needs:
65 | - lint
66 | - test
67 | steps:
68 | - uses: actions/checkout@v2
69 |
70 | - name: Set up Python
71 | uses: actions/setup-python@v1
72 | with:
73 | python-version: 3.9
74 |
75 | - name: Install dependencies
76 | run: |
77 | python -m pip install --upgrade pip
78 | python -m pip install --upgrade poetry
79 | poetry --version
80 |
81 | - name: Check tag is correct
82 | run: |
83 | [ $(poetry version -s) = ${GITHUB_REF##refs/tags/v} ]
84 |
85 | - name: Build artifacts
86 | run: poetry build
87 |
88 | - name: Publish
89 | env:
90 | POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_TOKEN }}
91 | run: poetry publish
92 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Created by https://www.gitignore.io/api/python
3 | # Edit at https://www.gitignore.io/?templates=python
4 |
5 | ### Python ###
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | pip-wheel-metadata/
29 | share/python-wheels/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 | MANIFEST
34 |
35 | # PyInstaller
36 | # Usually these files are written by a python script from a template
37 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
38 | *.manifest
39 | *.spec
40 |
41 | # Installer logs
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 |
45 | # Unit test / coverage reports
46 | htmlcov/
47 | .tox/
48 | .nox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *.cover
55 | .hypothesis/
56 | .pytest_cache/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Scrapy stuff:
63 | .scrapy
64 |
65 | # Sphinx documentation
66 | docs/_build/
67 |
68 | # PyBuilder
69 | target/
70 |
71 | # pyenv
72 | .python-version
73 |
74 | # pipenv
75 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
76 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
77 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
78 | # install all needed dependencies.
79 | #Pipfile.lock
80 |
81 | # celery beat schedule file
82 | celerybeat-schedule
83 |
84 | # SageMath parsed files
85 | *.sage.py
86 |
87 | # Spyder project settings
88 | .spyderproject
89 | .spyproject
90 |
91 | # Rope project settings
92 | .ropeproject
93 |
94 | # Mr Developer
95 | .mr.developer.cfg
96 | .project
97 | .pydevproject
98 |
99 | # mkdocs documentation
100 | /site
101 |
102 | # mypy
103 | .mypy_cache/
104 | .dmypy.json
105 | dmypy.json
106 |
107 | # Pyre type checker
108 | .pyre/
109 |
110 | # End of https://www.gitignore.io/api/python
111 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v2.5.0
4 | hooks:
5 | - id: trailing-whitespace
6 | - id: end-of-file-fixer
7 | - id: check-yaml
8 | exclude: >-
9 | (?x)^(
10 | rcds/backends/k8s/templates/.*\.ya?ml
11 | )$
12 | - id: check-added-large-files
13 | - repo: local
14 | hooks:
15 | - id: isort
16 | name: isort
17 | entry: poetry run isort
18 | language: system
19 | types: [python]
20 | require_serial: true
21 | - id: black
22 | name: black
23 | entry: poetry run black
24 | language: system
25 | types: [python]
26 | require_serial: true
27 | - id: flake8
28 | name: flake8
29 | entry: poetry run flake8
30 | language: system
31 | types: [python]
32 | require_serial: true
33 | - id: mypy
34 | name: mypy
35 | entry: poetry run mypy
36 | args: ["--scripts-are-modules"]
37 | language: system
38 | types: [python]
39 | require_serial: true
40 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | sphinx:
4 | configuration: docs/conf.py
5 | builder: dirhtml
6 |
7 | python:
8 | version: 3.8
9 | install:
10 | - method: pip
11 | path: .
12 | extra_requirements:
13 | - docs
14 |
--------------------------------------------------------------------------------
/CONTRIBUTING.rst:
--------------------------------------------------------------------------------
1 | Contributing to rCDS
2 | ====================
3 |
4 | Workflow
5 | --------
6 |
7 | Dependency management is done through poetry_, and pre-commit linting hooks are
8 | maanged with pre-commit_. To get started, run ``poetry install`` and ``poetry
9 | run pre-commit install``.
10 |
11 | Tests use pytest_, and a Makefile is set up with targets for common operations;
12 | to run tests, use ``make test``; to run tests and get an HTML coverage report,
13 | use ``make htmlcov``; to lint everything, use either ``make lint`` or ``poetry
14 | run pre-commit run -a`` (``make lint`` will stop executing linters after one
15 | failure, while pre-commit will not lint any untracked files).
16 |
17 | .. note::
18 |
19 | This is subject to change; we may move the running of these jobs to use tox
20 | in the future, but for now ``make`` is used for running scripts.
21 |
22 | This project uses isort_, black_, flake8_ with flake8-bugbear_, and mypy_ (see
23 | the `pre-commit configuration`_); consider setting up editor integrations to
24 | ease your development process (particularly with mypy).
25 |
26 | If you want a live preview of the docs as you work, you can install
27 | sphinx-autobuild_ into the Poetry virtualenv (``poetry run pip install
28 | sphinx-autobuild``) and run it via ``poetry run make livebuild``.
29 |
30 | Git
31 | ---
32 |
33 | This project follows `Conventional Commits`_, and uses Angular's `commit types`__.
34 |
35 | .. __: https://github.com/angular/angular/blob/master/CONTRIBUTING.md#types
36 |
37 | Branches should be named prefixed with a type (the same types as used in the
38 | commit message) and a short description of the purpose of the branch. Some
39 | examples::
40 |
41 | feat/brief-description
42 | fix/bug-description
43 |
44 |
45 | .. _poetry: https://python-poetry.org/
46 | .. _pre-commit: https://pre-commit.com/
47 | .. _pytest: https://docs.pytest.org/en/latest/
48 | .. _isort: https://timothycrosley.github.io/isort/
49 | .. _black: https://black.readthedocs.io/en/stable/
50 | .. _flake8: https://flake8.pycqa.org/en/latest/
51 | .. _flake8-bugbear: https://github.com/PyCQA/flake8-bugbear
52 | .. _mypy: https://github.com/python/mypy
53 | .. _sphinx-autobuild: https://github.com/GaretJax/sphinx-autobuild
54 | .. _conventional commits: https://www.conventionalcommits.org/
55 |
56 | .. _pre-commit configuration: https://github.com/redpwn/rCDS/blob/master/.pre-commit-config.yaml
57 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2020, redpwn team
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are met:
5 |
6 | 1. Redistributions of source code must retain the above copyright notice, this
7 | list of conditions and the following disclaimer.
8 |
9 | 2. Redistributions in binary form must reproduce the above copyright notice,
10 | this list of conditions and the following disclaimer in the documentation
11 | and/or other materials provided with the distribution.
12 |
13 | 3. Neither the name of the copyright holder nor the names of its
14 | contributors may be used to endorse or promote products derived from
15 | this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | POETRY ?= poetry
2 | PYTHON ?= $(POETRY) run python
3 |
4 | .PHONY: lint
5 | lint: isort black flake8 mypy
6 |
7 | .PHONY: test
8 | test:
9 | $(PYTHON) -m pytest
10 |
11 | .PHONY: cover
12 | cover:
13 | $(POETRY) run coverage run -m pytest
14 |
15 | .PHONY: htmlcov
16 | htmlcov: cover
17 | $(POETRY) run coverage html
18 |
19 | .PHONY: mypy
20 | mypy:
21 | $(POETRY) run mypy .
22 |
23 | .PHONY: black
24 | black:
25 | $(POETRY) run black .
26 |
27 | .PHONY: flake8
28 | flake8:
29 | $(POETRY) run flake8 .
30 |
31 | .PHONY: isort
32 | isort:
33 | $(POETRY) run isort .
34 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | #######
2 | rCDS
3 | #######
4 |
5 | .. image:: https://github.com/redpwn/rCDS/workflows/CI/badge.svg
6 | :target: https://github.com/redpwn/rCDS/actions?query=workflow%3ACI+branch%3Amaster
7 | :alt: CI Status
8 |
9 | .. image:: https://img.shields.io/codecov/c/gh/redpwn/rcds
10 | :target: https://codecov.io/gh/redpwn/rcds
11 | :alt: Coverage
12 |
13 | .. image:: https://img.shields.io/readthedocs/rcds/latest
14 | :target: https://rcds.redpwn.net/
15 | :alt: Docs
16 |
17 | .. image:: https://img.shields.io/pypi/v/rcds
18 | :target: https://pypi.org/project/rcds/
19 | :alt: PyPI
20 |
21 | .. This text is copied from the first paragraphs of doc/index.rst
22 |
23 | rCDS is redpwn_'s CTF challenge deployment tool. It is designed to automate the
24 | entire challenge deployment process, taking sources from challenge authors and
25 | provisioning the necessary resources to both make challenges available on the
26 | competition scoreboard and to spin up Docker containers that the challenge needs
27 | to run.
28 |
29 | rCDS has an opinionated model for managing CTF challenges. It operates on a
30 | centralized challenge repository and is designed to be run from a CI/CD system.
31 | This repository is the single source of truth for all data about challenges, and
32 | rCDS itself essentially acts as a tool to sync the state of various systems (the
33 | scoreboard and the container runtime) to what is described by this repository.
34 | Authors do not directly interface with any of these systems, and instead push
35 | their changes and let a CI job apply them. Thus, the challenge repository can be
36 | versioned, creating an audit log of all changes and allowing for point-in-time
37 | rollbacks of everything regarding a challenge should something go wrong.
38 |
39 | For more information, see `the documentation `_.
40 |
41 | .. _redpwn: https://redpwn.net/
42 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | SPHINXAUTOBUILD ?= sphinx-autobuild
12 |
13 | .DEFAULT_GOAL = build
14 |
15 | .PHONY: Makefile build livebuild
16 |
17 | # Convenience target
18 | build: dirhtml
19 | @
20 |
21 | livebuild: livedirhtml
22 | @
23 |
24 | live%: Makefile
25 | @$(SPHINXAUTOBUILD) -b $(subst live,,$@) "$(SOURCEDIR)" "$(BUILDDIR)/$(subst live,,$@)" $(SPHINXOPTS) $(O)
26 |
27 | # Catch-all target: route all unknown targets to Sphinx using the new
28 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
29 | %: Makefile
30 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
31 |
--------------------------------------------------------------------------------
/docs/api/challenge.rst:
--------------------------------------------------------------------------------
1 | :mod:`rcds.challenge` --- Challenges
2 | ====================================
3 |
4 | .. automodule:: rcds.challenge
5 |
6 | .. autoclass:: rcds.ChallengeLoader
7 | :members:
8 |
9 | .. autoclass:: rcds.Challenge
10 | :members:
11 |
12 | :mod:`rcds.challenge.config` - Config loading
13 | ---------------------------------------------
14 |
15 | .. automodule:: rcds.challenge.config
16 | :members:
17 |
18 | :mod:`rcds.challenge.docker` - Docker containers
19 | ------------------------------------------------
20 |
21 | .. module:: rcds.challenge.docker
22 |
23 | .. autofunction:: get_context_files
24 | .. autofunction:: generate_sum
25 |
26 | .. autoclass:: ContainerManager
27 | :members:
28 |
29 | .. automethod:: __init__
30 |
31 | .. autoclass:: Container
32 | :members:
33 |
34 | .. autoattribute:: IS_BUILDABLE
35 |
36 | .. autoclass:: BuildableContainer
37 | :members:
38 |
39 | .. autoattribute:: IS_BUILDABLE
40 |
--------------------------------------------------------------------------------
/docs/api/project.rst:
--------------------------------------------------------------------------------
1 | :mod:`rcds.project` --- Projects
2 | ================================
3 |
4 | .. automodule:: rcds.project
5 |
6 | .. autoclass:: rcds.Project
7 | :members:
8 |
9 | :mod:`rcds.project.config` - Config loading
10 | -------------------------------------------
11 |
12 | .. automodule:: rcds.project.config
13 | :members:
14 |
15 | :mod:`rcds.project.assets` - Asset management
16 | ---------------------------------------------
17 |
18 | .. automodule:: rcds.project.assets
19 | :members:
20 |
--------------------------------------------------------------------------------
/docs/backends/index.rst:
--------------------------------------------------------------------------------
1 | Deployment Backends
2 | ===================
3 |
4 | rCDS uses a pluggable backend model for the task of actually deploying
5 | challenges to infrastructure. rCDS contains a few built-in backends, and
6 | third-party backends may be loaded by specifying their module name.
7 |
8 | Backends are specified in the top-level configuration :file:`rcds.yaml`:
9 |
10 | .. code-block:: yaml
11 |
12 | backends:
13 | - resolve: name
14 | options:
15 | key: value
16 |
17 | The top-level key ``backends`` is an array of backend objects, which consist of
18 | their name (``resolve``) and the options for the backend (``options``).
19 | ``resolve`` first attempts to load a built-in backend of the corresponding name,
20 | and, if it does not exist, then interprets the name as a package name and loads
21 | from it.
22 |
23 | Each backend may also modify the ``challenge.yaml`` schema---be sure to read
24 | the docs for the backends you are using to understand challenge options specific
25 | to that backend.
26 |
27 | .. _backends#scoreboard:
28 |
29 | Scoreboard Backends
30 | -------------------
31 |
32 | These are responsible for displaying the challenge to competitors; they handle
33 | uploading the challenge's metadata (description, flags, point value, etc) and
34 | any assets that are served to competitors.
35 |
36 | - :doc:`rCTF `
37 |
38 | .. _backends#container-runtime:
39 |
40 | Container Runtime Backends
41 | --------------------------
42 |
43 | These are responsible for running the built challenge containers. By design,
44 | none of the built-in backends will start containers on the machine that rCDS is
45 | being run from.
46 |
47 | - :doc:`Kubernetes `
48 |
--------------------------------------------------------------------------------
/docs/backends/k8s/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: rcds
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["services", "namespaces"]
8 | verbs: ["list", "get", "watch", "create", "update", "patch", "delete"]
9 | - apiGroups: ["apps"]
10 | resources: ["deployments"]
11 | verbs: ["list", "get", "watch", "create", "update", "patch", "delete"]
12 | - apiGroups: ["networking.k8s.io"]
13 | resources: ["ingresses", "networkpolicies"]
14 | verbs: ["list", "get", "watch", "create", "update", "patch", "delete"]
15 |
--------------------------------------------------------------------------------
/docs/backends/k8s/index.rst:
--------------------------------------------------------------------------------
1 | ``k8s`` --- Kubernetes
2 | ======================
3 |
4 | This backend deploys challenges to a Kubernetes cluster. Each challenge is
5 | deployed under its own namespace, and exposed via either a NodePort service or
6 | an Ingress object, depending on the protocol specified by the challenge. No
7 | accommodations are currently being made in case of NodePort conflicts---it is
8 | recommended that challenges are deployed to an isolated cluster (you should be
9 | doing this anyways since Kubernetes currently does not have hard multi-tenancy).
10 | A NetworkPolicy is also created to prevent network traffic from outside a
11 | challenge's namespace reaching any containers which are not explicitly exposed.
12 |
13 | Like with rCDS's Docker integration, the Kubernetes backend does not have a
14 | dependency on any system commands (e.g. ``kubectl``); having a kubeconfig in the
15 | standard location is all that is necessary.
16 |
17 | Configuration
18 | -------------
19 |
20 | The only required option is ``domain``---NodePort services must be reachable on
21 | this domain, and the cluster's ingress controller must be reachable on its
22 | subdomains. For example, if ``domain`` is set as ``example.com``, then
23 | ``example.com`` must accept incoming TCP connections to NodePort services, and
24 | ``chall.example.com`` must be routed through the cluster's ingress controller.
25 | It is your responsibility to set up the ingress controller.
26 |
27 | Additional annotations on ingress and service objects can be specified through
28 | the ``annotations`` key, and affinity and tolerations on pods can be set through
29 | ``affinity`` and ``tolerations``, respectively.
30 |
31 | See the :ref:`backends/k8s#reference` for more details.
32 |
33 | Recommended Cluster Configuration
34 | ---------------------------------
35 |
36 | RBAC
37 | ~~~~
38 |
39 | As always, we recommend running rCDS from a CI service; thus, rCDS will need to
40 | authorize with your Kubernetes cluster. We have provided a ClusterRole which
41 | grants the minimum privileges required by the Kubernetes backend (also
42 | accessible here__):
43 |
44 | .. __: https://github.com/redpwn/rcds/blob/master/docs/content/backends/k8s/cluster-role.yaml
45 |
46 | .. literalinclude:: ./cluster-role.yaml
47 | :language: yaml
48 |
49 | Cluster Operator
50 | ~~~~~~~~~~~~~~~~
51 |
52 | We recommend `Google Kubernetes Engine`_, because it supports restricting of the
53 | metadata server by Kubernetes service account, a feature called `Workload
54 | Identity`_. This prevents SSRF from escalating into takeover of compute
55 | resources.
56 |
57 | .. _Google Kubernetes Engine: https://cloud.google.com/kubernetes-engine
58 | .. _Workload Identity: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
59 |
60 | Ingress
61 | ~~~~~~~
62 |
63 | Traefik_ is recommended as an ingress controller, since it is very configurable
64 | and supports JSON access logging for easy visibility into your challenges with
65 | your cluster operator's logging solution. Consider manually issuing a wildcard
66 | LetsEncrypt certificate and setting it as the default. Then, set annotations on
67 | ingresses to use TLS, and configure Traefik to upgrade HTTP to HTTPS for full
68 | HTTPS on challenges.
69 |
70 | .. note::
71 |
72 | By default, Traefik will attempt to auto-detect Content-Type; apply the
73 | ``contentType`` middleware to disable this behavior if it breaks your
74 | challenges.
75 |
76 | .. _Traefik: https://traefik.io/
77 |
78 | .. _backends/k8s#reference:
79 |
80 | Options Reference
81 | -----------------
82 |
83 | .. jsonschema:: ../../../rcds/backends/k8s/options.schema.yaml
84 |
85 | Raw schema:
86 |
87 | .. literalinclude:: ../../../rcds/backends/k8s/options.schema.yaml
88 | :language: yaml
89 |
--------------------------------------------------------------------------------
/docs/backends/rctf/index.rst:
--------------------------------------------------------------------------------
1 | ``rctf`` --- rCTF
2 | =================
3 |
4 | This backend deploys challenges to rCTF_. The options ``url`` and ``token``
5 | specify the URL of the rCTF instance and the token of the admin account to use,
6 | respectively. Both of these will be set from the environment variables
7 | ``RCDS_RCTF_URL`` and ``RCDS_RCTF_TOKEN`` respectively, if they exist.
8 | Challenges with a ``value`` set are assumed to be statically-scored; all other
9 | challenges are dynamically-scored according to the global ``scoring`` config
10 | (between ``scoring.minPoints`` and ``scoring.maxPoints``). rCTF does not support
11 | regex flags.
12 |
13 | .. _rCTF: https://rctf.redpwn.net/
14 |
15 | The ``sortOrder`` option allows you to automatically set the ``sortWeight``
16 | fields on challenges based on an ordering provided in this key. Listed
17 | challenges are assigned a ``sortWeight`` equal to its index in the array
18 | multiplied by -1. This means that if all the challenges have the same score and
19 | solve count, they will be displayed with the first element of the array at the
20 | top.
21 |
22 | Additional Challenge Properties
23 | -------------------------------
24 |
25 | ``author`` and ``category`` are required.
26 |
27 | ``tiebreakEligible`` (bool): whether or not this challenge factors into time-based
28 | tiebreakers. Defaults to ``true``.
29 |
30 | ``sortWeight`` (number): rCTF sort weight parameter. Ignored if the challenge is
31 | listed in the global ``sortOrder`` option. Defaults to ``0``.
32 |
33 | Options Reference
34 | -----------------
35 |
36 | .. jsonschema:: ../../../rcds/backends/rctf/options.schema.yaml
37 |
38 | Raw schema:
39 |
40 | .. literalinclude:: ../../../rcds/backends/rctf/options.schema.yaml
41 | :language: yaml
42 |
--------------------------------------------------------------------------------
/docs/challenge.rst:
--------------------------------------------------------------------------------
1 | ``challenge.yaml`` --- Challenge Config
2 | =======================================
3 |
4 | The file ``challenge.yaml`` defines the configuration for a challenge within an
5 | rCDS project. ``.yml`` and ``.json`` files are also supported.
6 |
7 | Basics
8 | ------
9 |
10 | ``id`` --- the identifier for this challenge. Must be unique project wide. This
11 | key is set automatically from the name of the directory the challenge is in;
12 | unless you have a very good reason to, don't set this in ``challenge.yaml``.
13 |
14 | ``author`` -- a string or list of strings containing the authors for this
15 | challenge.
16 |
17 | ``description`` -- self-explanatory. It is in Markdown format and will be
18 | processed with Jinja_. See :ref:`challenge#templating` for more details.
19 |
20 | ``category`` -- self-explanatory. If the challenge directory is exactly two
21 | directories deep (for example, ``/pwn/chall``, where ``/`` is the project root),
22 | this is set from the parent directory of the challenge's directory ("pwn" in the
23 | previous example). We recommend organizing your challenges in a
24 | :samp:`{category}/{chall}` structure.
25 |
26 | ``flag`` --- the flag for the challenge. If it is a string, then the flag is set
27 | to the string verbatim. Otherwise, if ``flag.file`` is set, the flag is loaded
28 | from the specified file (relative to the challenge root), and stripped of
29 | leading and trailing whitespace. If ``flag.regex`` is set, the flag is anything
30 | matching the given regex. A warning is emitted if the flag contains multiple
31 | lines (usually this is from an improperly configured flag file).
32 |
33 | ``provide`` --- an array of files to provide to competitors as downloads. The
34 | files can either be a string, in which case they are interpreted as the path to
35 | the file, or an object with the ``file`` and ``as`` properties; these properties
36 | define the path and the displayed name of the file, respectively.
37 |
38 | ``value`` --- point value of this challenge. Meaning is defined by the
39 | scoreboard backend.
40 |
41 | ``visible`` --- if set to ``false``, the scoreboard backend will act as if this
42 | challenge does not exist.
43 |
44 | .. warning::
45 |
46 | Most scoreboard backends will delete any challenges that were created by
47 | rCDS but now no longer exist---switching ``visible`` to ``false`` after the
48 | challenge has already been deployed may cause solves to be lost.
49 |
50 | Deployment
51 | ----------
52 |
53 | In rCDS, you first define all of the :ref:`containers `
54 | that your challenge needs to run, and then declare how you want them
55 | :ref:`exposed ` to the world.
56 |
57 | ``deployed`` --- whether or not this challenge's containers should be deployed.
58 | Defaults to ``true``.
59 |
60 | .. _challenge#containers:
61 |
62 | Containers
63 | ~~~~~~~~~~
64 |
65 | The ``containers`` key is an object whose keys are the names of the containers
66 | this challenge creates. These containers can either use an existing image, or
67 | specify a path to a Dockerfile to build from. Each container must declare all
68 | ports that need to be connected to, both from other containers and by
69 | competitors; which ports are exposed to competitors are specified
70 | :ref:`separately `. Containers from the same challenge can
71 | connect to each other via a DNS lookup of their names; for example, if a
72 | container ``app`` is defined, another container can connect to any of ``app``'s
73 | declared ports by looking up the name ``app``.
74 |
75 | Whether a container needs to be rebuilt is determined by looking at every file
76 | in the Docker build context. Thus, it is very important that you include only
77 | what is necessary in the build context by using a ``.dockerignore`` file; at
78 | minimum, ``challenge.yaml`` should be excluded to prevent needing to rebuild the
79 | container when the challenge's description is updated.
80 |
81 | ``image`` --- the tag of an existing image to run
82 |
83 | ``build`` --- settings for building this container. If it is a string, then it
84 | is the path to the Docker build context (the directory where a Dockerfile is).
85 | It can also be an object for advanced configuration:
86 |
87 | ``build.context`` --- path to the Docker build context.
88 |
89 | ``build.dockerfile`` --- path to the Dockerfile, relative to the build context
90 | root.
91 |
92 | ``build.args`` --- Docker build args to set when building the container.
93 | Key-value object.
94 |
95 | ``ports`` --- a list of integers of the port numbers this container listens on.
96 | If anything needs to connect to a port on the container, list it here.
97 |
98 | ``replicas`` --- number of replicas of this container to run (on backends that
99 | support it). Defaults to 1. Leave at 1 for stateful containers.
100 |
101 | ``environment`` --- key-value object of environment variables to set.
102 |
103 | ``resources`` --- resource limits on the container. See `Kubernetes's
104 | documentation`__ on the format of this value (only ``cpu`` and ``memory`` are
105 | implemented).
106 |
107 | .. __: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
108 |
109 | .. _challenge#expose:
110 |
111 | Expose
112 | ~~~~~~
113 |
114 | The top-level ``expose`` key defines all of the ports on :ref:`containers
115 | ` that should be exposed to competitors. It is an object
116 | whose keys correspond to the names of defined containers, and whose values are
117 | arrays of port objects. These objects each describe how one port should be
118 | exposed.
119 |
120 | ``target`` --- the port on the container that this rule is targeting.
121 |
122 | ``tcp`` --- if specified, this port should be treated as TCP. The value is the
123 | port at which it is exposed on, on the challenge host.
124 |
125 | ``http`` --- if specified, this port should be treated as HTTP, and will be
126 | reverse proxied with TLS termination. The value is a string, the subdomain name
127 | on which the challenge will be hosted. Alternatively, it can be an object with a
128 | ``raw`` key, in which case ``http.raw`` contains the FQDN that the challenge
129 | will be served on. When using ``http.raw``, rCDS will handle the virtual
130 | hosting, however as a challenge author, you will need to coordinate with your
131 | infrastructure admin on setting up TLS and DNS records.
132 |
133 | .. _challenge#templating:
134 |
135 | Templating
136 | ----------
137 |
138 | Challenge descriptions are rendered using Jinja_. The contents of the
139 | challenge's config is available on the ``challenge`` object in the Jinja
140 | environment. Some fields are altered with more concrete versions of their
141 | contents---for example, the ``http`` key on ``expose`` port objects will contain
142 | the fully-qualified domain name, instead of just the prefix. Container backends
143 | will also add a ``host`` key to a TCP ``expose`` port, which contains the host at
144 | which that port will be accessible.
145 |
146 | .. note::
147 |
148 | An example configuration:
149 |
150 | .. code-block:: yaml
151 |
152 | # challenge.yaml
153 | ...
154 | description: |
155 | 1: {{ challenge.expose.main[0].http }}
156 |
157 | 2: {{ challenge.expose.main[1].host }}:{{ challenge.expose.main[1].tcp }}
158 | containers:
159 | main:
160 | ports: [1337, 1338]
161 | expose:
162 | main:
163 | - target: 1337
164 | http: leet
165 | - target: 1338
166 | tcp: 31253
167 |
168 | Assuming the container backend is hosted on example.com, the description
169 | would render as:
170 |
171 | 1: leet.example.com
172 |
173 | 2: example.com:31253
174 |
175 | There are also shortcuts available for the most common use-case: a single
176 | exposed port. ``host`` is the hostname under which the port is accessible.
177 | ``link`` will automatically create a Markdown link to the exposed port, and
178 | ``url`` will create just the URL without the accompanying Markdown. This works
179 | for both HTTP and TCP ports, since you may want to expose a challenge which
180 | breaks behind a reverse proxy as TCP. For TCP ports, there is also ``port``,
181 | which is the exposed port number of the port, and ``nc``, which
182 | will create a ``nc`` command to connect to the challenge---it is equivalent to
183 | ``nc {{ host }} {{ port }}``.
184 |
185 | .. _Jinja: https://jinja.palletsprojects.com
186 |
187 | Reference
188 | ---------
189 |
190 | .. jsonschema:: ../rcds/challenge/challenge.schema.yaml
191 |
192 | Raw schema:
193 |
194 | .. literalinclude:: ../rcds/challenge/challenge.schema.yaml
195 | :language: yaml
196 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | # import os
14 | # import sys
15 | # sys.path.insert(0, os.path.abspath('.'))
16 |
17 |
18 | # -- Project information -----------------------------------------------------
19 |
20 | project = "rCDS"
21 | copyright = "2020, redpwn"
22 | author = "redpwn"
23 |
24 |
25 | # -- General configuration ---------------------------------------------------
26 |
27 | # Add any Sphinx extension module names here, as strings. They can be
28 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
29 | # ones.
30 | extensions = [
31 | "sphinx.ext.autodoc",
32 | "sphinx.ext.coverage",
33 | "sphinx.ext.viewcode",
34 | "sphinx.ext.intersphinx",
35 | "sphinx_rtd_theme",
36 | "sphinx-jsonschema",
37 | ]
38 |
39 | # Add any paths that contain templates here, relative to this directory.
40 | templates_path = ["_templates"]
41 |
42 | # List of patterns, relative to source directory, that match files and
43 | # directories to ignore when looking for source files.
44 | # This pattern also affects html_static_path and html_extra_path.
45 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
46 |
47 |
48 | # -- Options for HTML output -------------------------------------------------
49 |
50 | # The theme to use for HTML and HTML Help pages. See the documentation for
51 | # a list of builtin themes.
52 | #
53 | html_theme = "sphinx_rtd_theme"
54 |
55 | # Add any paths that contain custom static files (such as style sheets) here,
56 | # relative to this directory. They are copied after the builtin static files,
57 | # so a file named "default.css" will overwrite the builtin "default.css".
58 | html_static_path = ["_static"]
59 |
60 |
61 | # -- Extension configuration -------------------------------------------------
62 |
63 | intersphinx_mapping = {
64 | "python": ("https://docs.python.org/3", None),
65 | "docker": ("https://docker-py.readthedocs.io/en/stable/", None),
66 | }
67 |
--------------------------------------------------------------------------------
/docs/config-samples.rst:
--------------------------------------------------------------------------------
1 | Sample Configs
2 | ==============
3 |
4 | Multi-Container Web Challenge
5 | -----------------------------
6 |
7 | This challenge uses Redis and NGINX containers in addition to the main ``app``
8 | container. The containers communicate with each other by host name. Adapted from
9 | `Viper`_ from `redpwnCTF 2020`_.
10 |
11 | .. code-block:: yaml
12 |
13 | name: viper
14 | author: Jim
15 | description: |-
16 | Don't you want your own ascii viper? No? Well here is Viper as a Service.
17 | If you experience any issues, send it
18 | [here](https://admin-bot.redpwnc.tf/submit?challenge=viper)
19 |
20 | Site: {{link}}
21 |
22 | flag:
23 | file: ./app/flag.txt
24 |
25 | provide:
26 | - ./viper.tar.gz
27 |
28 | containers:
29 | app:
30 | build: ./app
31 | resources:
32 | limits:
33 | cpu: 100m
34 | memory: 100M
35 | ports: [31337]
36 | nginx:
37 | build: ./nginx
38 | resources:
39 | limits:
40 | cpu: 100m
41 | memory: 100M
42 | ports: [80]
43 | redis:
44 | image: redis
45 | resources:
46 | limits:
47 | cpu: 100m
48 | memory: 100M
49 | ports: [6379]
50 |
51 | expose:
52 | nginx:
53 | - target: 80
54 | http: viper
55 |
56 | .. _config-samples#gke-rctf-gitlab:
57 |
58 | GKE and rCTF on GitLab CI
59 | -------------------------
60 |
61 | This is the configuration used for `redpwnCTF 2020`_.
62 |
63 | .. code-block:: yaml
64 |
65 | # rcds.yaml
66 | docker:
67 | image:
68 | prefix: gcr.io/project/ctf/2020
69 |
70 | flagFormat: flag\{[a-zA-Z0-9_,.'?!@$<>*:-]*\}
71 |
72 | defaults:
73 | containers:
74 | resources:
75 | limits:
76 | cpu: 100m
77 | memory: 150Mi
78 | requests:
79 | cpu: 10m
80 | memory: 30Mi
81 |
82 | backends:
83 | - resolve: k8s
84 | options:
85 | kubeContext: gke_project_zone_cluster
86 | domain: challs.2020.example.com
87 | annotations:
88 | ingress:
89 | traefik.ingress.kubernetes.io/router.tls: "true"
90 | traefik.ingress.kubernetes.io/router.middlewares: "ingress-nocontenttype@kubernetescrd"
91 | - resolve: rctf
92 | options:
93 | scoring:
94 | minPoints: 100
95 | maxPoints: 500
96 |
97 | .. code-block:: yaml
98 |
99 | # .gitlab-ci.yml
100 | image: google/cloud-sdk:slim
101 |
102 | services:
103 | - docker:dind
104 |
105 | stages:
106 | - deploy
107 |
108 | variables:
109 | DOCKER_HOST: tcp://docker:2375
110 | RCDS_RCTF_URL: https://2020.example.com/
111 |
112 | before_script:
113 | - pip3 install rcds
114 | - gcloud auth activate-service-account service-account@project.iam.gserviceaccount.com --key-file=$GCLOUD_SA_TOKEN
115 | - gcloud config set project project
116 | - gcloud auth configure-docker gcr.io --quiet
117 | - gcloud container clusters get-credentials cluster --zone=zone
118 |
119 | deploy:
120 | stage: deploy
121 | when: manual
122 | environment:
123 | name: production
124 | script:
125 | - rcds deploy
126 |
127 | The config creates Kubernetes Ingress objects compatible with Traefik, and
128 | references the following middleware CRD exists to disable Traefik's
129 | Content-Type auto-detection (change the name and namespace, both in the CRD and
130 | the ingress annotation, to suit your setup):
131 |
132 | .. code-block:: yaml
133 |
134 | apiVersion: traefik.containo.us/v1alpha1
135 | kind: Middleware
136 | metadata:
137 | name: nocontenttype
138 | namespace: ingress
139 | spec:
140 | contentType:
141 | autoDetect: false
142 |
143 | .. _Viper: https://github.com/redpwn/redpwnctf-2020-challenges/blob/master/web/viper/challenge.yaml
144 | .. _redpwnCTF 2020: https://2020.redpwn.net/
145 |
--------------------------------------------------------------------------------
/docs/contributing.rst:
--------------------------------------------------------------------------------
1 | ../CONTRIBUTING.rst
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. rCDS documentation master file, created by
2 | sphinx-quickstart on Fri Apr 10 17:12:14 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | rCDS - A CTF Challenge Deployment Tool
7 | ======================================
8 |
9 | .. A short version of this text is in README.rst
10 |
11 | rCDS_ is redpwn_'s CTF challenge deployment tool. It is designed to automate the
12 | entire challenge deployment process, taking sources from challenge authors and
13 | provisioning the necessary resources to both make challenges available on the
14 | competition scoreboard and to spin up Docker containers that the challenge needs
15 | to run.
16 |
17 | rCDS has an opinionated model for managing CTF challenges. It operates on a
18 | centralized challenge repository and is designed to be run from a CI/CD system.
19 | This repository is the single source of truth for all data about challenges, and
20 | rCDS itself essentially acts as a tool to sync the state of various systems (the
21 | scoreboard and the container runtime) to what is described by this repository.
22 | Authors do not directly interface with any of these systems, and instead push
23 | their changes and let a CI job apply them. Thus, the challenge repository can be
24 | versioned, creating an audit log of all changes and allowing for point-in-time
25 | rollbacks of everything regarding a challenge should something go wrong.
26 |
27 | If you are a challenge author for a CTF using rCDS, head over to the
28 | :doc:`challenge config format docs `.
29 |
30 | rCDS's mode of operation is optimized for a CI environment. After validating
31 | all challenges' configurations, rCDS runs in 4 stages:
32 |
33 | 1. Build all challenge containers, as needed, and upload to a remote container
34 | registry
35 | 2. Collect all files to upload to competitors
36 | 3. Push the containers over to a :ref:`container runtime
37 | `
38 | 4. Render descriptions and push all relevant data to a :ref:`scoreboard
39 | `
40 |
41 | At its core, rCDS only handles building the Docker containers and preparing all
42 | assets for a challenge (description, files, etc.)---everything else is handled
43 | by a :doc:`backend `.
44 |
45 | rCDS does not rely on any system dependencies other than its Python
46 | dependencies. It does not shell out to system commands for performing any
47 | operations, and thus does not need the Docker CLI installed; it just needs to be
48 | able to connect to a Docker daemon.
49 |
50 | GitLab CI
51 | ---------
52 |
53 | rCDS recommends running CI/CD on `GitLab CI`_, because it allows for manual job
54 | runs and tracking deployments in an "environment", which enables easy rollbacks
55 | in case anything goes wrong. It also has well-documented support for performing
56 | `Docker image builds `_ on
57 | Gitlab.com. On Gitlab.com's shared runners, Docker builds can be run by running
58 | the ``docker:dind`` service and setting the ``DOCKER_HOST`` environment variable
59 | to ``tcp://docker:2375``---with this variable set, rCDS does not need to run on
60 | the ``docker`` image; you can use ``python`` or any other image with a working
61 | ``pip`` (e.g. ``google/cloud-sdk:slim``).
62 |
63 | .. note::
64 |
65 | An example ``.gitlab-ci.yml``:
66 |
67 | .. code-block:: yaml
68 |
69 | image: python:3.8
70 |
71 | services:
72 | - docker:dind
73 |
74 | variables:
75 | DOCKER_HOST: tcp://docker:2375
76 |
77 | before_script:
78 | - pip3 install rcds
79 |
80 | deploy:
81 | when: manual
82 | environment:
83 | name: production
84 | script:
85 | - rcds deploy
86 |
87 | You may need additional options to run various deployment backends; see
88 | :ref:`an example using GKE and rCTF `.
89 |
90 | .. _rCDS: https://github.com/redpwn/rcds
91 | .. _redpwn: https://redpwn.net/
92 | .. _GitLab CI: https://docs.gitlab.com/ee/ci
93 |
94 |
95 | .. TOC Trees--------------------------------------------------------------------
96 |
97 | .. toctree::
98 | :hidden:
99 |
100 | Introduction
101 |
102 | .. toctree::
103 | :maxdepth: 1
104 | :caption: Contents
105 |
106 | project
107 | challenge
108 | backends/index
109 | config-samples
110 | contributing
111 |
112 | .. toctree::
113 | :maxdepth: 1
114 | :caption: Backends
115 |
116 | backends/rctf/index
117 | backends/k8s/index
118 |
119 | .. toctree::
120 | :maxdepth: 2
121 | :caption: API Reference
122 |
123 | api/project
124 | api/challenge
125 |
126 |
127 | Indices and tables
128 | ------------------
129 |
130 | * :ref:`genindex`
131 | * :ref:`modindex`
132 | * :ref:`search`
133 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/project.rst:
--------------------------------------------------------------------------------
1 | ``rcds.yaml`` --- Project Config
2 | ================================
3 |
4 | The file ``rcds.yaml`` defines the configuration for the current project, and
5 | its location also defines the root of the project. ``.yml`` and ``.json`` files
6 | are also supported. Challenges will be searched for in subdirectories of the
7 | project root. This file contains various global configuration options, including
8 | for the :doc:`backends ` and :ref:`Docker containers
9 | `
10 |
11 | .. _backends: ../backends/
12 |
13 | .. _project#docker:
14 |
15 | Docker
16 | ------
17 |
18 | ``docker.image.prefix`` *(required)* --- the prefix for generated Docker image
19 | tags. This should contain the registry and "directory" --- e.g.
20 | ``gcr.io/redpwn/challs``.
21 |
22 | ``docker.image.template`` --- the Jinja template to create image tags with; it
23 | is joined with ``docker.image.prefix``. Defaults to ``rcds-{{ challenge.id }}-{{
24 | container.name }}``.
25 |
26 | Misc
27 | ----
28 |
29 | ``defaults`` --- default options to set on challenges. This key takes an object of
30 | the same shape as ``challenge.yaml``. Setting defaults on keys like ``expose``
31 | and ``containers`` will apply the defaults to all exposed ports and containers,
32 | respectively.
33 |
34 | .. note::
35 |
36 | An example of setting default resource limits on all containers which don't
37 | otherwise specify limits:
38 |
39 | .. code-block:: yaml
40 |
41 | defaults:
42 | containers:
43 | resources:
44 | limits:
45 | cpu: 100m
46 | memory: 150Mi
47 | requests:
48 | cpu: 10m
49 | memory: 30Mi
50 |
51 | ``flagFormat`` --- a regex to test all (static) flags against.
52 |
53 | Reference
54 | ---------
55 |
56 | .. jsonschema:: ../rcds/project/rcds.schema.yaml
57 |
58 | Raw schema:
59 |
60 | .. literalinclude:: ../rcds/project/rcds.schema.yaml
61 | :language: yaml
62 |
--------------------------------------------------------------------------------
/example/pwn/example/.dockerignore:
--------------------------------------------------------------------------------
1 | challenge.yaml
2 |
--------------------------------------------------------------------------------
/example/pwn/example/challenge.yaml:
--------------------------------------------------------------------------------
1 | name: example
2 | description: description
3 | flag: flag{example_flag}
4 | points: 500
5 |
6 | containers:
7 | main:
8 | build: .
9 | resources:
10 | requests:
11 | cpu: 0.1
12 | memory: 100Mi
13 | limits:
14 | cpu: 0.1
15 | memory: 100Mi
16 | ports:
17 | - 9999
18 | expose:
19 | main:
20 | - target: 9999
21 | tcp: 31010
22 |
--------------------------------------------------------------------------------
/example/rcds.yaml:
--------------------------------------------------------------------------------
1 | docker:
2 | image:
3 | prefix: rcds-example
4 |
5 | backends:
6 | - resolve: k8s
7 | options:
8 | domain: challs.example.com
9 |
--------------------------------------------------------------------------------
/example/web/with-database/chall/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 |
--------------------------------------------------------------------------------
/example/web/with-database/chall/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:14
2 |
3 | RUN mkdir /app
4 | WORKDIR /app
5 |
6 | COPY package.json .
7 |
8 | RUN npm install
9 |
10 | COPY index.js .
11 |
12 | CMD ["node", "index.js"]
13 |
--------------------------------------------------------------------------------
/example/web/with-database/chall/index.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redpwn/rcds/0b3a2e1c6aa272725b2cd032ddddd139393cc5d3/example/web/with-database/chall/index.js
--------------------------------------------------------------------------------
/example/web/with-database/chall/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chall",
3 | "version": "1.0.0",
4 | "lockfileVersion": 1,
5 | "requires": true,
6 | "dependencies": {
7 | "buffer-writer": {
8 | "version": "2.0.0",
9 | "resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz",
10 | "integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw=="
11 | },
12 | "packet-reader": {
13 | "version": "1.0.0",
14 | "resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz",
15 | "integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ=="
16 | },
17 | "pg": {
18 | "version": "8.2.1",
19 | "resolved": "https://registry.npmjs.org/pg/-/pg-8.2.1.tgz",
20 | "integrity": "sha512-DKzffhpkWRr9jx7vKxA+ur79KG+SKw+PdjMb1IRhMiKI9zqYUGczwFprqy+5Veh/DCcFs1Y6V8lRLN5I1DlleQ==",
21 | "requires": {
22 | "buffer-writer": "2.0.0",
23 | "packet-reader": "1.0.0",
24 | "pg-connection-string": "^2.2.3",
25 | "pg-pool": "^3.2.1",
26 | "pg-protocol": "^1.2.4",
27 | "pg-types": "^2.1.0",
28 | "pgpass": "1.x",
29 | "semver": "4.3.2"
30 | }
31 | },
32 | "pg-connection-string": {
33 | "version": "2.2.3",
34 | "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.2.3.tgz",
35 | "integrity": "sha512-I/KCSQGmOrZx6sMHXkOs2MjddrYcqpza3Dtsy0AjIgBr/bZiPJRK9WhABXN1Uy1UDazRbi9gZEzO2sAhL5EqiQ=="
36 | },
37 | "pg-int8": {
38 | "version": "1.0.1",
39 | "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz",
40 | "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw=="
41 | },
42 | "pg-pool": {
43 | "version": "3.2.1",
44 | "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.2.1.tgz",
45 | "integrity": "sha512-BQDPWUeKenVrMMDN9opfns/kZo4lxmSWhIqo+cSAF7+lfi9ZclQbr9vfnlNaPr8wYF3UYjm5X0yPAhbcgqNOdA=="
46 | },
47 | "pg-protocol": {
48 | "version": "1.2.4",
49 | "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.2.4.tgz",
50 | "integrity": "sha512-/8L/G+vW/VhWjTGXpGh8XVkXOFx1ZDY+Yuz//Ab8CfjInzFkreI+fDG3WjCeSra7fIZwAFxzbGptNbm8xSXenw=="
51 | },
52 | "pg-types": {
53 | "version": "2.2.0",
54 | "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz",
55 | "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==",
56 | "requires": {
57 | "pg-int8": "1.0.1",
58 | "postgres-array": "~2.0.0",
59 | "postgres-bytea": "~1.0.0",
60 | "postgres-date": "~1.0.4",
61 | "postgres-interval": "^1.1.0"
62 | }
63 | },
64 | "pgpass": {
65 | "version": "1.0.2",
66 | "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.2.tgz",
67 | "integrity": "sha1-Knu0G2BltnkH6R2hsHwYR8h3swY=",
68 | "requires": {
69 | "split": "^1.0.0"
70 | }
71 | },
72 | "postgres-array": {
73 | "version": "2.0.0",
74 | "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz",
75 | "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA=="
76 | },
77 | "postgres-bytea": {
78 | "version": "1.0.0",
79 | "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz",
80 | "integrity": "sha1-AntTPAqokOJtFy1Hz5zOzFIazTU="
81 | },
82 | "postgres-date": {
83 | "version": "1.0.5",
84 | "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.5.tgz",
85 | "integrity": "sha512-pdau6GRPERdAYUQwkBnGKxEfPyhVZXG/JiS44iZWiNdSOWE09N2lUgN6yshuq6fVSon4Pm0VMXd1srUUkLe9iA=="
86 | },
87 | "postgres-interval": {
88 | "version": "1.2.0",
89 | "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz",
90 | "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==",
91 | "requires": {
92 | "xtend": "^4.0.0"
93 | }
94 | },
95 | "semver": {
96 | "version": "4.3.2",
97 | "resolved": "https://registry.npmjs.org/semver/-/semver-4.3.2.tgz",
98 | "integrity": "sha1-x6BxWKgL7dBSNVt3DYLWZA+AO+c="
99 | },
100 | "split": {
101 | "version": "1.0.1",
102 | "resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
103 | "integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
104 | "requires": {
105 | "through": "2"
106 | }
107 | },
108 | "through": {
109 | "version": "2.3.8",
110 | "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
111 | "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU="
112 | },
113 | "xtend": {
114 | "version": "4.0.2",
115 | "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
116 | "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
117 | }
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/example/web/with-database/chall/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chall",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "author": "",
10 | "license": "ISC",
11 | "dependencies": {
12 | "pg": "^8.2.1"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/example/web/with-database/challenge.yaml:
--------------------------------------------------------------------------------
1 | name: Database challenge
2 | author: author
3 | description: |-
4 | Challenge with a database
5 |
6 | flag: flag{l33t_t3xt_h3r3}
7 |
8 | provide:
9 | - ./chall/index.js
10 |
11 | containers:
12 | main:
13 | build: chall
14 | ports: [3000]
15 | environment:
16 | PGHOST: db
17 | PGUSER: postgres
18 | PGPASSWORD: unguessable_password
19 | PGPORT: "5432"
20 | PGDATABASE: database # note - this may requiree manual setup to create
21 | db:
22 | image: postgres
23 | environment:
24 | POSTGRES_PASSWORD: unguessable_password
25 | ports: [5432]
26 |
27 | expose:
28 | main:
29 | - target: 3000
30 | http: with-database # subdomain name
31 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "rcds"
3 | version = "0.1.4"
4 | description = "An automated CTF challenge deployment tool"
5 | readme = "README.rst"
6 | authors = ["redpwn "]
7 | homepage = "https://rcds.redpwn.net"
8 | repository = "https://github.com/redpwn/rCDS"
9 | license = "BSD-3-Clause"
10 | packages = [
11 | { include = "rcds" },
12 | ]
13 |
14 | [tool.poetry.scripts]
15 | rcds = "rcds.cli:cli"
16 |
17 | [tool.poetry.dependencies]
18 | python = "^3.6"
19 | pyyaml = "^5.3.1"
20 | pathspec = "^0.8.1"
21 | docker = "^4.3.1"
22 | jsonschema = "^3.2.0"
23 | dataclasses = { version = ">=0.7,<0.9", python = "~3.6" }
24 | Jinja2 = ">=2.11.2,<4.0.0"
25 | kubernetes = "^12.0.0"
26 | requests = "^2.24.0"
27 | requests-toolbelt = "^0.9.1"
28 | click = "^7.1.2"
29 |
30 | # Docs build dependencies
31 | sphinx = { version = "^3.3.0", optional = true }
32 | sphinx_rtd_theme = { version = "^0.5.0", optional = true }
33 | sphinx-jsonschema = { version = "^1.15", optional = true }
34 |
35 | [tool.poetry.dev-dependencies]
36 | pre-commit = { version = "^2.12.1", python = "^3.6.1" }
37 | black = "^20.8b1"
38 | pytest = "^6.2.3"
39 | coverage = "^5.5"
40 | mypy = "^0.812"
41 | flake8 = "^3.9.0"
42 | flake8-bugbear = "^21.4.3"
43 | pytest-datadir = "^1.3.1"
44 | isort = "^5.8.0"
45 |
46 | [tool.poetry.extras]
47 | docs = ["sphinx", "sphinx_rtd_theme", "sphinx-jsonschema"]
48 |
49 | [tool.isort]
50 | profile = "black"
51 |
52 | [build-system]
53 | requires = ["poetry>=0.12"]
54 | build-backend = "poetry.masonry.api"
55 |
--------------------------------------------------------------------------------
/rcds/__init__.py:
--------------------------------------------------------------------------------
1 | from rcds.challenge import Challenge # noqa: F401
2 | from rcds.challenge import ChallengeLoader # noqa: F401
3 | from rcds.project import Project # noqa: F401
4 |
--------------------------------------------------------------------------------
/rcds/backend/__init__.py:
--------------------------------------------------------------------------------
1 | from .backend import BackendBase # noqa: F401
2 | from .backend import BackendContainerRuntime # noqa: F401
3 | from .backend import BackendScoreboard # noqa: F401
4 | from .backend import BackendsInfo # noqa: F401
5 | from .backend import load_backend_module # noqa: F401
6 |
--------------------------------------------------------------------------------
/rcds/backend/backend.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from importlib import import_module
3 | from typing import TYPE_CHECKING, Any, Dict
4 |
5 | if TYPE_CHECKING:
6 | import rcds
7 |
8 |
9 | class BackendBase(ABC):
10 | def patch_challenge_schema(self, schema: Dict[str, Any]):
11 | pass
12 |
13 |
14 | class BackendScoreboard(BackendBase):
15 | @abstractmethod
16 | def commit(self) -> bool:
17 | raise NotImplementedError()
18 |
19 |
20 | class BackendContainerRuntime(BackendBase):
21 | @abstractmethod
22 | def commit(self) -> bool:
23 | raise NotImplementedError()
24 |
25 |
26 | class BackendsInfo:
27 | HAS_SCOREBOARD: bool = False
28 | HAS_CONTAINER_RUNTIME: bool = False
29 |
30 | def get_scoreboard(
31 | self, project: "rcds.Project", options: Dict[str, Any]
32 | ) -> BackendScoreboard:
33 | raise NotImplementedError()
34 |
35 | def get_container_runtime(
36 | self, project: "rcds.Project", options: Dict[str, Any]
37 | ) -> BackendContainerRuntime:
38 | raise NotImplementedError()
39 |
40 |
41 | def load_backend_module(name: str) -> BackendsInfo:
42 | try:
43 | module = import_module(f"rcds.backends.{name}")
44 | except ModuleNotFoundError:
45 | module = import_module(name)
46 | return module.get_info() # type: ignore
47 |
--------------------------------------------------------------------------------
/rcds/backends/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redpwn/rcds/0b3a2e1c6aa272725b2cd032ddddd139393cc5d3/rcds/backends/__init__.py
--------------------------------------------------------------------------------
/rcds/backends/k8s/__init__.py:
--------------------------------------------------------------------------------
1 | from .backend import get_info # noqa: F401
2 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/backend.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | from pathlib import Path
3 | from typing import Any, Dict, List
4 |
5 | import yaml
6 | from jinja2 import Environment, Template
7 | from kubernetes import config # type: ignore
8 |
9 | import rcds
10 | import rcds.backend
11 | from rcds.util import load_any
12 | from rcds.util.jsonschema import DefaultValidatingDraft7Validator
13 |
14 | from .jinja import jinja_env
15 | from .manifests import AnyManifest, sync_manifests
16 |
17 | options_schema_validator = DefaultValidatingDraft7Validator(
18 | schema=load_any(Path(__file__).parent / "options.schema.yaml")
19 | )
20 |
21 |
22 | class ContainerBackend(rcds.backend.BackendContainerRuntime):
23 | _project: rcds.Project
24 | _options: Dict[str, Any]
25 | _namespace_template: Template
26 | _jinja_env: Environment
27 |
28 | def __init__(self, project: rcds.Project, options: Dict[str, Any]):
29 | self._project = project
30 | self._options = options
31 |
32 | # FIXME: validate options better
33 | if not options_schema_validator.is_valid(self._options):
34 | raise ValueError("Invalid options")
35 |
36 | self._namespace_template = Template(self._options["namespaceTemplate"])
37 | self._jinja_env = jinja_env.overlay()
38 | self._jinja_env.globals["options"] = self._options
39 |
40 | config.load_kube_config(context=self._options.get("kubeContext", None))
41 |
42 | def commit(self) -> bool:
43 | deployed_challs = filter(
44 | lambda c: c.config["deployed"], self._project.challenges.values()
45 | )
46 | # TODO: auto assignment of expose params
47 | manifests = list(
48 | itertools.chain.from_iterable(
49 | map(
50 | lambda chall: self.gen_manifests_for_challenge(chall),
51 | deployed_challs,
52 | )
53 | )
54 | )
55 | sync_manifests(manifests)
56 | return True
57 |
58 | def get_namespace_for_challenge(self, challenge: rcds.Challenge) -> str:
59 | return self._namespace_template.render({"challenge": challenge.config})
60 |
61 | def gen_manifests_for_challenge(
62 | self, challenge: rcds.Challenge
63 | ) -> List[AnyManifest]:
64 | if "containers" not in challenge.config:
65 | return []
66 |
67 | manifests: List[Dict[str, Any]] = []
68 |
69 | def render_and_append(env: Environment, template: str) -> None:
70 | nonlocal manifests
71 | manifest = env.get_template(template).render().strip()
72 | manifests += filter(lambda x: x is not None, yaml.safe_load_all(manifest))
73 |
74 | challenge_env: Environment = self._jinja_env.overlay()
75 | challenge_env.globals["challenge"] = challenge.config
76 | challenge_env.globals["namespace"] = self.get_namespace_for_challenge(challenge)
77 |
78 | render_and_append(challenge_env, "namespace.yaml")
79 | render_and_append(challenge_env, "network-policy.yaml")
80 |
81 | for container_name, container_config in challenge.config["containers"].items():
82 | expose_config = challenge.config.get("expose", dict()).get(
83 | container_name, None
84 | )
85 |
86 | if expose_config is not None:
87 | for expose_port in expose_config:
88 | if "http" in expose_port:
89 | if isinstance(expose_port["http"], str):
90 | expose_port["http"] += "." + self._options["domain"]
91 | else:
92 | assert isinstance(expose_port["http"], dict)
93 | if "raw" in expose_port["http"]:
94 | expose_port["http"] = expose_port["http"]["raw"]
95 | if "tcp" in expose_port:
96 | expose_port["host"] = self._options["domain"]
97 |
98 | container_env: Environment = challenge_env.overlay()
99 | container_env.globals["container"] = {
100 | "name": container_name,
101 | "config": container_config,
102 | }
103 | if expose_config is not None:
104 | container_env.globals["container"]["expose"] = expose_config
105 |
106 | render_and_append(container_env, "deployment.yaml")
107 | render_and_append(container_env, "service.yaml")
108 | render_and_append(container_env, "ingress.yaml")
109 |
110 | return manifests
111 |
112 |
113 | class BackendsInfo(rcds.backend.BackendsInfo):
114 | HAS_CONTAINER_RUNTIME = True
115 |
116 | def get_container_runtime(
117 | self, project: rcds.Project, options: Dict[str, Any]
118 | ) -> ContainerBackend:
119 | return ContainerBackend(project, options)
120 |
121 |
122 | def get_info() -> BackendsInfo:
123 | return BackendsInfo()
124 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/jinja.py:
--------------------------------------------------------------------------------
1 | from textwrap import dedent
2 | from typing import Any, Dict, Optional
3 |
4 | import yaml
5 | from jinja2 import Environment, PackageLoader, filters
6 |
7 | jinja_env = Environment(
8 | loader=PackageLoader("rcds.backends.k8s", "templates"),
9 | autoescape=False,
10 | trim_blocks=True,
11 | lstrip_blocks=True,
12 | )
13 |
14 |
15 | def jinja_filter_indent(data: str, *args, **kwargs) -> str:
16 | return filters.do_indent(dedent(data), *args, **kwargs)
17 |
18 |
19 | def jinja_filter_yaml(data: Dict[str, Any], indent: Optional[int] = None) -> str:
20 | output = yaml.dump(data).strip()
21 | if indent is not None:
22 | output = jinja_filter_indent(output, indent)
23 | return output
24 |
25 |
26 | jinja_env.filters["indent"] = jinja_filter_indent
27 | jinja_env.filters["yaml"] = jinja_filter_yaml
28 | jinja_env.filters["quote"] = lambda s: repr(str(s))
29 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/manifests.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Any, Callable, Dict, Iterable, List, Set
3 |
4 | from kubernetes import client # type: ignore
5 |
6 | AnyManifest = Dict[str, Any]
7 |
8 |
9 | # namespaced manifests only - namespaces are handled separately
10 | MANIFEST_KINDS = ["Deployment", "Service", "Ingress", "NetworkPolicy"]
11 | KIND_TO_API_VERISON = {
12 | "Deployment": "apps/v1",
13 | "Service": "v1",
14 | "Ingress": "networking.k8s.io/v1",
15 | "NetworkPolicy": "networking.k8s.io/v1",
16 | }
17 |
18 |
19 | camel_case_to_snake_case_re = re.compile(r"(?=[A-Z])")
20 |
21 |
22 | def kind_to_api_method_postfix(kind: str) -> str:
23 | return "_namespaced" + camel_case_to_snake_case_re.sub("_", kind).lower()
24 |
25 |
26 | def get_api_method_for_kind(api_client: Any, method: str, kind: str) -> Callable:
27 | return getattr(api_client, method + kind_to_api_method_postfix(kind))
28 |
29 |
30 | def labels_to_label_selector(labels: Dict[str, str]) -> str:
31 | selector = ""
32 | for k, v in labels.items():
33 | selector += f"{k}={v},"
34 | return selector[:-1]
35 |
36 |
37 | def sync_manifests(all_manifests: Iterable[Dict[str, Any]]):
38 | v1 = client.CoreV1Api()
39 | appsv1 = client.AppsV1Api()
40 | networkingv1 = client.NetworkingV1Api()
41 | networkingv1beta1 = client.NetworkingV1beta1Api()
42 |
43 | api_version_to_client = {
44 | "v1": v1,
45 | "apps/v1": appsv1,
46 | "networking.k8s.io/v1": networkingv1,
47 | "networking.k8s.io/v1beta1": networkingv1beta1,
48 | }
49 |
50 | manifests_by_namespace_kind: Dict[str, Dict[str, List[Dict[str, Any]]]] = dict()
51 | namespaces: List[Dict[str, Any]] = []
52 |
53 | for manifest in all_manifests:
54 | kind = manifest["kind"]
55 | if kind == "Namespace":
56 | namespaces.append(manifest)
57 | else:
58 | namespace = manifest["metadata"]["namespace"]
59 | manifests_by_namespace_kind.setdefault(namespace, dict())
60 | manifests_by_namespace_kind[namespace].setdefault(kind, [])
61 | manifests_by_namespace_kind[namespace][kind].append(manifest)
62 |
63 | server_namespaces_names: Set[str] = set(
64 | map(
65 | lambda ns: ns.metadata.name,
66 | v1.list_namespace(label_selector="app.kubernetes.io/managed-by=rcds").items,
67 | )
68 | )
69 |
70 | for namespace_manifest in namespaces:
71 | namespace = namespace_manifest["metadata"]["name"]
72 |
73 | try:
74 | server_namespaces_names.remove(namespace)
75 | # the namespace already exists; patch it
76 | print(f"PATCH Namespace {namespace}")
77 | v1.patch_namespace(namespace, namespace_manifest)
78 | except KeyError:
79 | # the namespace doesn't exist; create it
80 | print(f"CREATE Namespace {namespace}")
81 | v1.create_namespace(namespace_manifest)
82 |
83 | # TODO: Potentially decouple this from the namespace's labels?
84 | # Common labels for rCDS manifests in this namespace
85 | ns_labels: Dict[str, str] = namespace_manifest["metadata"]["labels"]
86 | ns_labels.pop("name")
87 |
88 | # Process all manifest kinds we know about in this namespace
89 | for kind in MANIFEST_KINDS:
90 | manifests = manifests_by_namespace_kind[namespace].get(kind, [])
91 | server_manifest_names: Set[str] = set(
92 | map(
93 | lambda m: m.metadata.name,
94 | get_api_method_for_kind(
95 | api_version_to_client[KIND_TO_API_VERISON[kind]], "list", kind
96 | )(
97 | namespace, label_selector=labels_to_label_selector(ns_labels)
98 | ).items,
99 | )
100 | )
101 | for manifest in manifests:
102 | manifest_name = manifest["metadata"]["name"]
103 | try:
104 | server_manifest_names.remove(manifest_name)
105 | # the manifest already exists; patch it
106 | print(f"PATCH {kind} {namespace}/{manifest_name}")
107 | try:
108 | get_api_method_for_kind(
109 | api_version_to_client[manifest["apiVersion"]], "patch", kind
110 | )(manifest_name, namespace, manifest)
111 | except client.rest.ApiException:
112 | # Conflict of some sort - let's just delete and recreate it
113 | print(f"DELETE {kind} {namespace}/{manifest_name}")
114 | get_api_method_for_kind(
115 | api_version_to_client[manifest["apiVersion"]],
116 | "delete",
117 | kind,
118 | )(manifest_name, namespace)
119 | print(f"CREATE {kind} {namespace}/{manifest_name}")
120 | get_api_method_for_kind(
121 | api_version_to_client[manifest["apiVersion"]],
122 | "create",
123 | kind,
124 | )(namespace, manifest)
125 | except KeyError:
126 | # the manifest doesn't exist; create it
127 | print(f"CREATE {kind} {namespace}/{manifest_name}")
128 | get_api_method_for_kind(
129 | api_version_to_client[manifest["apiVersion"]], "create", kind
130 | )(namespace, manifest)
131 | for manifest_name in server_manifest_names:
132 | print(f"DELETE {kind} {namespace}/{manifest_name}")
133 | get_api_method_for_kind(
134 | api_version_to_client[KIND_TO_API_VERISON[kind]], "delete", kind
135 | )(manifest_name, namespace)
136 |
137 | for namespace_name in server_namespaces_names:
138 | print(f"DELETE Namespace {namespace_name}")
139 | v1.delete_namespace(namespace_name)
140 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/options.schema.yaml:
--------------------------------------------------------------------------------
1 | $schema: http://json-schema.org/draft-07/schema#
2 | $id: http://rcds.redpwn.com/schemas/backends/k8s/options.yaml
3 | type: object
4 | definitions:
5 | annotation:
6 | type: object
7 | additionalProperties:
8 | type: string
9 | properties:
10 | kubeContext:
11 | type: string
12 | description: >-
13 | The name of the context in the kubeconfig file to use. If unset, defaults
14 | to current context in kubeconfig file.
15 | namespaceTemplate:
16 | type: string
17 | description: >-
18 | Template for the namespace to create for each challenge. Evaluated using
19 | Jinja. The challenge's config is available under the variable `challenge`
20 | default: "rcds-{{ challenge.id }}"
21 | domain:
22 | type: string
23 | description: >-
24 | Domain that the cluster is accessible at. NodePorts must be accessible on
25 | this domain, and ingresses will be created on subdomains.
26 | annotations:
27 | type: object
28 | description: >-
29 | Annotations for Kubernetes objects that are created.
30 | properties:
31 | ingress:
32 | $ref: "#/definitions/annotation"
33 | service:
34 | $ref: "#/definitions/annotation"
35 | tolerations:
36 | type: array
37 | description: >-
38 | Kubernetes tolerations applied to challenge pods.
39 | affinity:
40 | type: object
41 | description: >-
42 | Kubernetes affinities applied to challenge pods.
43 | required:
44 | - domain
45 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/templates/_helpers.jinja:
--------------------------------------------------------------------------------
1 | {% macro common_labels() %}
2 | app.kubernetes.io/managed-by: rcds
3 | rcds.redpwn.net/challenge-id: {{ challenge.id }}
4 | {%- endmacro %}
5 | {% macro container_labels() %}
6 | rcds.redpwn.net/container-name: {{ container.name }}
7 | rcds.redpwn.net/visibility: {{ 'public' if container.expose is not none else 'private' }}
8 | {%- endmacro %}
9 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | {% import '_helpers.jinja' as helpers with context %}
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | namespace: {{ namespace }}
6 | name: {{ container.name }}
7 | labels:
8 | {{ helpers.common_labels() | indent(4) }}
9 | {{ helpers.container_labels() | indent(4) }}
10 | spec:
11 | replicas: {{ container.config.replicas }}
12 | selector:
13 | matchLabels:
14 | {{ helpers.common_labels() | indent(6) }}
15 | {{ helpers.container_labels() | indent(6) }}
16 | template:
17 | metadata:
18 | labels:
19 | {{ helpers.common_labels() | indent(8) }}
20 | {{ helpers.container_labels() | indent(8) }}
21 | spec:
22 | containers:
23 | - name: {{ container.name }}
24 | image: {{ container.config.image }}
25 | {% if container.config.ports %}
26 | ports:
27 | {% for port in container.config.ports %}
28 | - containerPort: {{ port }}
29 | name: port-{{ port }}
30 | {% endfor %}
31 | {% endif %}
32 | {% if container.config.environment %}
33 | env:
34 | {% for name, value in container.config.environment.items() %}
35 | - name: {{ name }}
36 | value: {{ value | quote }}
37 | {% endfor %}
38 | {% endif %}
39 | {% if container.config.resources %}
40 | resources:
41 | {{ container.config.resources | yaml(12) }}
42 | {% endif %}
43 | automountServiceAccountToken: false
44 | {% if options.tolerations %}
45 | tolerations:
46 | {{ options.tolerations | yaml(8) }}
47 | {% endif %}
48 | {% if options.affinity %}
49 | affinity:
50 | {{ options.affinity | yaml(8) }}
51 | {% endif %}
52 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {% import '_helpers.jinja' as helpers with context %}
2 | {% if container.expose %}
3 | {% set httpPorts = container.expose|selectattr("http", "defined")|list %}
4 | {% if httpPorts | first is defined %}
5 | apiVersion: networking.k8s.io/v1beta1
6 | kind: Ingress
7 | metadata:
8 | namespace: {{ namespace }}
9 | name: {{ container.name }}
10 | labels:
11 | {{ helpers.common_labels() | indent(4) }}
12 | {{ helpers.container_labels() | indent(4) }}
13 | {% if options.annotations and options.annotations.ingress %}
14 | annotations:
15 | {{ options.annotations.ingress | yaml(4) }}
16 | {% endif %}
17 | spec:
18 | rules:
19 | {% for httpPort in httpPorts %}
20 | - host: {{ httpPort.http }}
21 | http:
22 | paths:
23 | - path: /
24 | backend:
25 | serviceName: {{ container.name }}
26 | servicePort: {{ httpPort.target }}
27 | {% endfor %}
28 | {% endif %}
29 | {% endif %}
30 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/templates/namespace.yaml:
--------------------------------------------------------------------------------
1 | {% import '_helpers.jinja' as helpers with context %}
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: {{ namespace }}
6 | labels:
7 | name: {{ namespace }}
8 | {{ helpers.common_labels() | indent(4) }}
9 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/templates/network-policy.yaml:
--------------------------------------------------------------------------------
1 | {% import '_helpers.jinja' as helpers with context %}
2 | apiVersion: networking.k8s.io/v1
3 | kind: NetworkPolicy
4 | metadata:
5 | namespace: {{ namespace }}
6 | name: network-policy-private
7 | labels:
8 | {{ helpers.common_labels() | indent(4) }}
9 | spec:
10 | podSelector:
11 | matchLabels:
12 | {{ helpers.common_labels() | indent(6) }}
13 | rcds.redpwn.net/visibility: private
14 | policyTypes:
15 | - Ingress
16 | - Egress
17 | ingress:
18 | - from:
19 | - namespaceSelector:
20 | matchLabels:
21 | {{ helpers.common_labels() | indent(14) }}
22 | egress:
23 | - to:
24 | - namespaceSelector:
25 | matchLabels:
26 | {{ helpers.common_labels() | indent(14) }}
27 |
--------------------------------------------------------------------------------
/rcds/backends/k8s/templates/service.yaml:
--------------------------------------------------------------------------------
1 | {% import '_helpers.jinja' as helpers with context %}
2 | {% if container.config.ports %}
3 | apiVersion: v1
4 | kind: Service
5 | metadata:
6 | namespace: {{ namespace }}
7 | name: {{ container.name }}
8 | labels:
9 | {{ helpers.common_labels() | indent(4) }}
10 | {{ helpers.container_labels() | indent(4) }}
11 | {% if options.annotations and options.annotations.service %}
12 | annotations:
13 | {{ options.annotations.service | yaml(4) }}
14 | {% endif %}
15 | spec:
16 | type: {{ 'NodePort' if container.expose is not none and container.expose|selectattr("tcp", "defined")|first is defined else 'ClusterIP' }}
17 | selector:
18 | {{ helpers.common_labels() | indent(4) }}
19 | {{ helpers.container_labels() | indent(4) }}
20 | ports:
21 | {% for port in container.config.ports %}
22 | - port: {{ port }}
23 | targetPort: {{ port }}
24 | name: port-{{ port }}
25 | {% set exposedPort = container.expose|selectattr("target", "eq", port)|first %}
26 | {% if exposedPort and exposedPort.tcp %}
27 | nodePort: {{ exposedPort.tcp }}
28 | {% endif %}
29 | {% endfor %}
30 | {% endif %}
31 |
--------------------------------------------------------------------------------
/rcds/backends/rctf/__init__.py:
--------------------------------------------------------------------------------
1 | from .backend import get_info # noqa: F401
2 |
--------------------------------------------------------------------------------
/rcds/backends/rctf/backend.py:
--------------------------------------------------------------------------------
1 | import os
2 | from hashlib import sha256
3 | from pathlib import Path
4 | from typing import Any, Dict, Set
5 |
6 | import rcds
7 | import rcds.backend
8 | from rcds.util import deep_merge, load_any
9 | from rcds.util.jsonschema import DefaultValidatingDraft7Validator
10 |
11 | from .rctf import RCTFAdminV1
12 |
13 | options_schema_validator = DefaultValidatingDraft7Validator(
14 | schema=load_any(Path(__file__).parent / "options.schema.yaml")
15 | )
16 |
17 |
18 | class ScoreboardBackend(rcds.backend.BackendScoreboard):
19 | _project: rcds.Project
20 | _options: Dict[str, Any]
21 | _adminv1: RCTFAdminV1
22 |
23 | def __init__(self, project: rcds.Project, options: Dict[str, Any]):
24 | self._project = project
25 | self._options = options
26 |
27 | for option_key in ["url", "token"]:
28 | env_key = f"RCDS_RCTF_{option_key.upper()}"
29 | self._options[option_key] = os.environ.get(
30 | env_key, self._options.get(option_key, None)
31 | )
32 |
33 | # FIXME: validate options better
34 | if not options_schema_validator.is_valid(self._options):
35 | raise ValueError("Invalid options")
36 |
37 | self._adminv1 = RCTFAdminV1(self._options["url"], self._options["token"])
38 |
39 | def patch_challenge_schema(self, schema: Dict[str, Any]) -> None:
40 | # Disallow regex flags
41 | flag_schema = next(
42 | s for s in schema["properties"]["flag"]["oneOf"] if s["type"] == "object"
43 | )
44 | flag_schema["properties"].pop("regex")
45 | flag_schema["oneOf"] = [
46 | s for s in flag_schema["oneOf"] if s["required"][0] != "regex"
47 | ]
48 |
49 | # tiebreakEligible flag
50 | schema["properties"]["tiebreakEligible"] = {
51 | "type": "boolean",
52 | "description": "Whether or not this challenge affects tiebreakers.",
53 | "default": True,
54 | }
55 |
56 | # sortWeight
57 | schema["properties"]["sortWeight"] = {
58 | "type": "number",
59 | "description": (
60 | "A parameter used for ordering when points and solves are equal."
61 | ),
62 | "default": 0,
63 | }
64 |
65 | schema["required"] += ["author", "category", "tiebreakEligible", "sortWeight"]
66 |
67 | def commit(self) -> bool:
68 | # Validate challenges
69 | for challenge in self._project.challenges.values():
70 | self.validate_challenge(challenge)
71 |
72 | for challenge in self._project.challenges.values():
73 | self.preprocess_challenge(challenge)
74 |
75 | # Begin actual commit
76 | remote_challenges: Set[str] = set(
77 | c["id"]
78 | for c in self._adminv1.list_challenges()
79 | if c.get("managedBy", None) == "rcds"
80 | )
81 | for challenge in self._project.challenges.values():
82 | if not challenge.config["visible"]:
83 | continue
84 | try:
85 | remote_challenges.remove(challenge.config["id"])
86 | except KeyError:
87 | pass
88 | self.commit_challenge(challenge)
89 | for chall_id in remote_challenges:
90 | print(f"Deleting {chall_id}")
91 | self._adminv1.delete_challenge(chall_id)
92 | return True
93 |
94 | def validate_challenge(self, challenge: rcds.Challenge) -> None:
95 | """
96 | Raises exception on validation fail
97 | """
98 | if isinstance(challenge.config["flag"], dict):
99 | if challenge.config["flag"]["regex"] is not None:
100 | raise ValueError("rCTF does not support regex flags")
101 | else:
102 | raise RuntimeError(
103 | 'Unexpected content in "flag" key on challenge config'
104 | )
105 |
106 | def preprocess_challenge(self, challenge: rcds.Challenge) -> None:
107 | chall_id = challenge.config["id"]
108 | if "sortOrder" in self._options:
109 | if chall_id in self._options["sortOrder"]:
110 | challenge.config["sortWeight"] = -self._options["sortOrder"].index(
111 | chall_id
112 | )
113 |
114 | def commit_challenge(self, challenge: rcds.Challenge) -> None:
115 | chall_id = challenge.config["id"]
116 | rctf_challenge: Dict[str, Any] = {"managedBy": "rcds"}
117 | for common_field in [
118 | "name",
119 | "author",
120 | "category",
121 | "flag",
122 | "tiebreakEligible",
123 | "sortWeight",
124 | ]:
125 | rctf_challenge[common_field] = challenge.config[common_field]
126 | rctf_challenge["description"] = challenge.render_description()
127 | if "value" in challenge.config:
128 | # Static value
129 | rctf_challenge["points"] = {
130 | "min": challenge.config["value"],
131 | "max": challenge.config["value"],
132 | }
133 | else:
134 | # No value = dynamically scored
135 | rctf_challenge["points"] = {
136 | "min": self._options["scoring"]["minPoints"],
137 | "max": self._options["scoring"]["maxPoints"],
138 | }
139 |
140 | am_ctx = challenge.get_asset_manager_context()
141 | file_hashes: Dict[str, str] = dict()
142 | for filename in am_ctx.ls():
143 | h = sha256()
144 | with am_ctx.get(filename).open("rb") as fd:
145 | for chunk in iter(lambda: fd.read(5245288), b""):
146 | h.update(chunk)
147 | file_hashes[filename] = h.hexdigest()
148 | file_urls: Dict[str, str] = {
149 | f: u
150 | for f, u in self._adminv1.get_url_for_files(file_hashes).items()
151 | if u is not None
152 | }
153 | deep_merge(
154 | file_urls,
155 | self._adminv1.create_upload(
156 | {
157 | name: am_ctx.get(name).read_bytes()
158 | for name in am_ctx.ls()
159 | if name not in file_urls
160 | }
161 | ),
162 | )
163 | rctf_challenge["files"] = [
164 | {"name": name, "url": url} for name, url in file_urls.items()
165 | ]
166 |
167 | self._adminv1.put_challenge(chall_id, rctf_challenge)
168 |
169 |
170 | class BackendsInfo(rcds.backend.BackendsInfo):
171 | HAS_SCOREBOARD = True
172 |
173 | def get_scoreboard(
174 | self, project: rcds.Project, options: Dict[str, Any]
175 | ) -> ScoreboardBackend:
176 | return ScoreboardBackend(project, options)
177 |
178 |
179 | def get_info() -> BackendsInfo:
180 | return BackendsInfo()
181 |
--------------------------------------------------------------------------------
/rcds/backends/rctf/options.schema.yaml:
--------------------------------------------------------------------------------
1 | $schema: http://json-schema.org/draft-07/schema#
2 | $id: http://rcds.redpwn.com/schemas/backends/rctf/options.yaml
3 | type: object
4 | properties:
5 | url:
6 | type: string
7 | token:
8 | type: string
9 | scoring:
10 | type: object
11 | properties:
12 | minPoints:
13 | type: integer
14 | description: >-
15 | Minimum points for dynamically-scored challenges
16 | default: 100
17 | maxPoints:
18 | type: integer
19 | description: >-
20 | Maximum points for dynamically-scored challenges
21 | default: 500
22 | required: ['minPoints', 'maxPoints']
23 | default: {}
24 | sortOrder:
25 | type: array
26 | description: >-
27 | List of challenge IDs - their sortWeights will be set in this order
28 | items:
29 | type: string
30 | required: ['url', 'token', 'scoring']
31 | default: {}
32 |
--------------------------------------------------------------------------------
/rcds/backends/rctf/rctf.py:
--------------------------------------------------------------------------------
1 | from base64 import b64encode
2 | from typing import Any, Dict, List, Optional
3 | from urllib.parse import quote, urljoin
4 |
5 | import requests
6 | from requests_toolbelt.sessions import BaseUrlSession # type: ignore
7 |
8 |
9 | class RCTFAdminV1:
10 |
11 | session: requests.Session
12 |
13 | def __init__(self, endpoint: str, login_token: Optional[str]):
14 | self.session = BaseUrlSession(urljoin(endpoint, "api/v1/admin/"))
15 |
16 | if login_token is not None:
17 | login_resp = requests.post(
18 | urljoin(endpoint, "api/v1/auth/login"), json={"teamToken": login_token}
19 | ).json()
20 | if login_resp["kind"] == "goodLogin":
21 | auth_token = login_resp["data"]["authToken"]
22 | self.session.headers["Authorization"] = f"Bearer {auth_token}"
23 | else:
24 | raise ValueError(
25 | f"Invalid login_token provided (reason: {login_resp['kind']})"
26 | )
27 |
28 | @staticmethod
29 | def assertResponseKind(response: Any, kind: str) -> None:
30 | if response["kind"] != kind:
31 | raise RuntimeError(f"Server error: {response['kind']}")
32 |
33 | def list_challenges(self) -> List[Dict[str, Any]]:
34 | r = self.session.get("challs").json()
35 | self.assertResponseKind(r, "goodChallenges")
36 | return r["data"]
37 |
38 | def put_challenge(self, chall_id: str, data: Dict[str, Any]) -> None:
39 | r = self.session.put("challs/" + quote(chall_id), json={"data": data}).json()
40 | self.assertResponseKind(r, "goodChallengeUpdate")
41 |
42 | def delete_challenge(self, chall_id: str) -> None:
43 | r = self.session.delete("challs/" + quote(chall_id)).json()
44 | self.assertResponseKind(r, "goodChallengeDelete")
45 |
46 | def create_upload(self, uploads: Dict[str, bytes]) -> Dict[str, str]:
47 | """
48 | :param uploads: uploads {name: data}
49 | :return: urls {name: url}
50 | """
51 | if len(uploads) == 0:
52 | return {}
53 | payload = [
54 | {"name": name, "data": "data:;base64," + b64encode(data).decode()}
55 | for name, data in uploads.items()
56 | ]
57 | r = self.session.post("upload", json={"files": payload}).json()
58 | self.assertResponseKind(r, "goodFilesUpload")
59 | return {f["name"]: f["url"] for f in r["data"]}
60 |
61 | def get_url_for_files(self, files: Dict[str, str]) -> Dict[str, Optional[str]]:
62 | """
63 | :param files: files to get {name: sha256}
64 | :return: urls {name: url}
65 | """
66 | payload = [{"name": name, "sha256": sha256} for name, sha256 in files.items()]
67 | r = self.session.post("upload/query", json={"uploads": payload}).json()
68 | self.assertResponseKind(r, "goodUploadsQuery")
69 | return {f["name"]: f["url"] for f in r["data"]}
70 |
--------------------------------------------------------------------------------
/rcds/challenge/__init__.py:
--------------------------------------------------------------------------------
1 | from .challenge import Challenge # noqa: F401
2 | from .challenge import ChallengeLoader # noqa: F401
3 |
--------------------------------------------------------------------------------
/rcds/challenge/challenge.py:
--------------------------------------------------------------------------------
1 | import re
2 | from pathlib import Path
3 | from typing import TYPE_CHECKING, Any, Callable, Dict, List, cast
4 |
5 | from ..util import SUPPORTED_EXTENSIONS, deep_merge, find_files
6 | from .config import ConfigLoader
7 |
8 | if TYPE_CHECKING:
9 | import rcds
10 |
11 | from ..project import Project
12 | from ..project.assets import AssetManagerContext, AssetManagerTransaction
13 |
14 |
15 | def _strip_scheme(url: str) -> str:
16 | return re.sub(r".*?://", "", url)
17 |
18 |
19 | class ChallengeLoader:
20 | """
21 | Class for loading a :class:`Challenge` within the context of a
22 | :class:`rcds.Project`
23 | """
24 |
25 | project: "Project"
26 | _config_loader: ConfigLoader
27 |
28 | def __init__(self, project: "rcds.Project"):
29 | self.project = project
30 | self._config_loader = ConfigLoader(self.project)
31 |
32 | def load(self, root: Path):
33 | """
34 | Load a challenge by path
35 |
36 | The challenge must be within the project associated with this loader.
37 |
38 | :param pathlib.Path root: Path to challenge root
39 | """
40 | try:
41 | cfg_file = find_files(
42 | ["challenge"], SUPPORTED_EXTENSIONS, path=root, recurse=False
43 | )["challenge"]
44 | except KeyError:
45 | raise ValueError(f"No config file found at '{root}'")
46 | config = self._config_loader.load_config(cfg_file)
47 | return Challenge(self.project, root, config)
48 |
49 |
50 | class Challenge:
51 | """
52 | A challenge within a given :class:`rcds.Project`
53 |
54 | This class is not meant to be constructed directly, use a :class:`ChallengeLoader`
55 | to load a challenge.
56 | """
57 |
58 | project: "Project"
59 | root: Path
60 | config: Dict[str, Any]
61 | context: Dict[str, Any] # overrides to Jinja context
62 | _asset_manager_context: "AssetManagerContext"
63 | _asset_sources: List[Callable[["AssetManagerTransaction"], None]]
64 |
65 | def __init__(self, project: "Project", root: Path, config: dict):
66 | self.project = project
67 | self.root = root
68 | self.config = config
69 | self.context = dict()
70 | self._asset_manager_context = self.project.asset_manager.create_context(
71 | self.config["id"]
72 | )
73 | self._asset_sources = []
74 |
75 | self.register_asset_source(self._add_static_assets)
76 |
77 | def _add_static_assets(self, transaction: "AssetManagerTransaction") -> None:
78 | if "provide" not in self.config:
79 | return
80 | for provide in self.config["provide"]:
81 | if isinstance(provide, str):
82 | path = self.root / Path(provide)
83 | name = path.name
84 | else:
85 | path = self.root / Path(provide["file"])
86 | name = provide["as"]
87 | transaction.add_file(name, path)
88 |
89 | def register_asset_source(
90 | self, do_add: Callable[["AssetManagerTransaction"], None]
91 | ) -> None:
92 | """
93 | Register a function to add assets to the transaction for this challenge.
94 | """
95 | self._asset_sources.append(do_add)
96 |
97 | def create_transaction(self) -> "AssetManagerTransaction":
98 | """
99 | Get a transaction to update this challenge's assets
100 | """
101 | transaction = self._asset_manager_context.transaction()
102 | for do_add in self._asset_sources:
103 | do_add(transaction)
104 | return transaction
105 |
106 | def get_asset_manager_context(self) -> "AssetManagerContext":
107 | return self._asset_manager_context
108 |
109 | def get_relative_path(self) -> Path:
110 | """
111 | Utiity function to get this challenge's path relative to the project root
112 | """
113 | return self.root.relative_to(self.project.root)
114 |
115 | def get_context_shortcuts(self) -> Dict[str, Any]:
116 | shortcuts: Dict[str, Any] = dict()
117 |
118 | if (
119 | "expose" in self.config
120 | and len(self.config["expose"]) == 1
121 | and len(next(iter(cast(Dict[str, list], self.config["expose"]).values())))
122 | == 1
123 | ):
124 | # One container exposed; we can define expose shortcuts
125 | expose_cfg = cast(
126 | Dict[str, Any], next(iter(self.config["expose"].values()))[0]
127 | )
128 | shortcuts["host"] = expose_cfg.get("http", expose_cfg.get("host", None))
129 | has_url = False
130 | if "tcp" in expose_cfg:
131 | shortcuts["port"] = expose_cfg["tcp"]
132 | shortcuts["nc"] = f"nc {shortcuts['host']} {shortcuts['port']}"
133 | shortcuts["url"] = f"http://{shortcuts['host']}:{shortcuts['port']}"
134 | has_url = True
135 | if "http" in expose_cfg:
136 | shortcuts["url"] = f"https://{shortcuts['host']}"
137 | has_url = True
138 | if has_url:
139 | shortcuts[
140 | "link"
141 | ] = f"[{_strip_scheme(shortcuts['url'])}]({shortcuts['url']})"
142 |
143 | return shortcuts
144 |
145 | def render_description(self) -> str:
146 | """
147 | Render the challenge's description template to a string
148 | """
149 |
150 | return self.project.jinja_env.from_string(self.config["description"]).render(
151 | deep_merge(
152 | dict(),
153 | {"challenge": self.config},
154 | self.get_context_shortcuts(),
155 | self.context,
156 | )
157 | )
158 |
--------------------------------------------------------------------------------
/rcds/challenge/challenge.schema.yaml:
--------------------------------------------------------------------------------
1 | $schema: http://json-schema.org/draft-07/schema#
2 | $id: http://rcds.redpwn.com/schemas/challenge.yaml
3 | definitions:
4 | domainsafe-name:
5 | type: string
6 | # k8s dns label names allows a max length of 63 characters
7 | pattern: "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
8 | cpu-value:
9 | oneOf:
10 | - type: string
11 | pattern: "^[0-9]+m$"
12 | - type: number
13 | memory-value:
14 | oneOf:
15 | - type: string
16 | pattern: "^[0-9]+[KMGTE]i?$"
17 | - type: number
18 | type: object
19 | properties:
20 | # Basic challenge details
21 | id:
22 | $ref: "#/definitions/domainsafe-name"
23 | description: >-
24 | Override the automatically generated id for this challenge. You should
25 | avoid setting this whenever possible.
26 | name:
27 | type: string
28 | description: >-
29 | The name of the challenge
30 | author:
31 | description: >-
32 | The author(s) of the challenge
33 | oneOf:
34 | - type: string
35 | - type: array
36 | items:
37 | type: string
38 | description:
39 | type: string
40 | description: >-
41 | Description of the challenge. It is in Markdown format and will be
42 | processed with Jinja.
43 | category:
44 | type: string
45 | description: >-
46 | Category of the challenge. If not provided, defaults to the parent
47 | directory of the challenge (e.g. if this file is located at
48 | /pwn/chall1/challenge.yaml, the category will default to 'pwn').
49 | flag:
50 | description: >-
51 | The flag for the challenge.
52 | oneOf:
53 | - type: string
54 | - type: object
55 | properties:
56 | file:
57 | type: string
58 | description: >-
59 | File to load the flag from. The file should contain one line with
60 | only the flag.
61 | regex:
62 | type: string
63 | format: regex
64 | # Exactly one of (file, regex) allowed
65 | oneOf:
66 | - required: [file]
67 | - required: [regex]
68 | value:
69 | type: integer
70 | description: >-
71 | The point value of the challenge, or full point value for a
72 | dynamically-scored challenge; the precise meaning is defined by the
73 | scoreboard backend being used.
74 | minimum: 0
75 |
76 | visible:
77 | type: boolean
78 | description: >-
79 | Whether or not this challenge should be shown on the scoreboard.
80 | default: true
81 |
82 | # Static assets
83 | provide:
84 | type: array
85 | description: >-
86 | Static files (that are in the repository already on disk) to provide to competitors
87 | items:
88 | oneOf:
89 | - type: string
90 | description: >-
91 | Path to the file to provide
92 | - type: object
93 | properties:
94 | file:
95 | type: string
96 | description: >-
97 | Path to the file to provide
98 | as:
99 | type: string
100 | description: >-
101 | Name of file as shown to competitors
102 | required:
103 | - file
104 | - as
105 |
106 | # Runtime (containers)
107 | deployed:
108 | type: boolean
109 | description: >-
110 | Whether or not this challenge's containers should be deployed
111 | default: true
112 | containers:
113 | type: object
114 | description: >-
115 | Containers to be deployed for this challenge. The key of each container
116 | is its name, where the container can be found via DNS lookup at runtime
117 | from other containers within this challenge.
118 | additionalProperties:
119 | type: object
120 | properties:
121 | image:
122 | type: string
123 | description: >-
124 | The image tag for this container. If 'build' is not specified, the
125 | container will be pulled (e.g. containers for services like a
126 | database found on dockerhub). If 'build' is specified, this
127 | overrides the 'name' (default the name of the directory specified in
128 | 'build') in the image tag template defined globally in the project.
129 | build:
130 | oneOf:
131 | - type: string
132 | description: >-
133 | Path to the directory containing a Dockerfile to build for this container.
134 | - type: object
135 | description: >-
136 | Complex build configuration object roughly mirroring that of
137 | docker-compose.yml.
138 | properties:
139 | context:
140 | type: string
141 | description: >-
142 | Path to the build context
143 | dockerfile:
144 | type: string
145 | description: >-
146 | Path to the Dockerfile within the build context
147 | args:
148 | type: object
149 | description: >-
150 | Build arguments to be passed to the build. Please write
151 | numbers as strings to avoid ambiguity from number formatting
152 | additionalProperties:
153 | type: string
154 | required:
155 | - context
156 | replicas:
157 | type: integer
158 | description: >-
159 | Number of replicas of this container to run. Set to 1 for stateful
160 | applications.
161 | default: 1
162 | minimum: 1
163 | environment:
164 | type: object
165 | description: >-
166 | Environment variables to set within the container. Please format all
167 | values as strings. Keys without values are not supported.
168 | additionalProperties:
169 | type: string
170 | ports:
171 | type: array
172 | description: >-
173 | Port numbers (as integers) on this container to expose to other
174 | containers within this challenge. If a port is supposed to be
175 | exposed to the Internet, make sure it is specified here, and add it
176 | to the top level 'expose' key.
177 |
178 | This key exists to ensure Kubernetes services have the correct
179 | ports configured on each service. Due to restrictions with Docker
180 | Compose / Docker Swarm, remapping ports as can be done with
181 | Kubernetes services is not possible.
182 | items:
183 | type: integer
184 | resources:
185 | type: object
186 | description: >-
187 | Compute resource requests and limits for this container. This
188 | follows the same format as Kubernetes's resources property on
189 | container specs.
190 |
191 | Not all features are supported by all backends (though limits should
192 | work on most).
193 | properties:
194 | limits:
195 | type: object
196 | description: >-
197 | Compute resource limits for this container. Using more of a
198 | resource than the limit is not allowed.
199 | properties:
200 | cpu:
201 | description: >-
202 | CPU usage limits for this container - 1 unit corresponds to
203 | 1 CPU second per (wall-clock) second.
204 | $ref: "#/definitions/cpu-value"
205 | memory:
206 | description: >-
207 | Memory usage limits for this container.
208 | $ref: "#/definitions/memory-value"
209 | requests:
210 | type: object
211 | description: >-
212 | Compute resource requets for this container.
213 | properties:
214 | cpu:
215 | description: >-
216 | CPU usage requests for this container - 1 unit corresponds to
217 | 1 CPU second per (wall-clock) second.
218 | $ref: "#/definitions/cpu-value"
219 | memory:
220 | description: >-
221 | Memory usage requests for this container.
222 | $ref: "#/definitions/memory-value"
223 | anyOf:
224 | # Either 'image' or 'build' must be specified
225 | - required:
226 | - image
227 | - required:
228 | - build
229 | propertyNames:
230 | $ref: "#/definitions/domainsafe-name"
231 | expose:
232 | type: object
233 | description: >-
234 | Ports on containers to expose to the Internet. Keys correspond to the key
235 | of the container that the rule is targeting.
236 | additionalProperties:
237 | type: array
238 | items:
239 | type: object
240 | properties:
241 | target:
242 | type: integer
243 | description: >-
244 | The port number on the container this rule targets.
245 | tcp:
246 | type: integer
247 | description: >-
248 | The external port number to expose, treating this port as raw TCP.
249 | http:
250 | description: >-
251 | Configuration to expose this port as HTTP.
252 | oneOf:
253 | - type: string
254 | description: >-
255 | The hostname to expose; this is a subdomain name.
256 | - type: object
257 | properties:
258 | raw:
259 | type: string
260 | description: >-
261 | The raw hostname to expose (use when you need the port
262 | to not be on the shared parent domain). Deployment backends
263 | will not handle DNS records for this.
264 | oneOf:
265 | - required: [raw]
266 | # Exactly one of (http, tcp) allowed
267 | oneOf:
268 | - required: [http]
269 | - required: [tcp]
270 | propertyNames:
271 | $ref: "#/definitions/domainsafe-name"
272 | required:
273 | - name
274 | - description
275 |
--------------------------------------------------------------------------------
/rcds/challenge/config.py:
--------------------------------------------------------------------------------
1 | import re
2 | from copy import deepcopy
3 | from itertools import tee
4 | from pathlib import Path
5 | from typing import (
6 | TYPE_CHECKING,
7 | Any,
8 | Dict,
9 | Iterable,
10 | Optional,
11 | Pattern,
12 | Tuple,
13 | Union,
14 | cast,
15 | )
16 | from warnings import warn
17 |
18 | import jsonschema # type: ignore
19 |
20 | from rcds import errors
21 |
22 | from ..util import deep_merge, load_any
23 | from ..util.jsonschema import DefaultValidatingDraft7Validator
24 |
25 | if TYPE_CHECKING:
26 | from rcds import Project
27 |
28 |
29 | config_schema = load_any(Path(__file__).parent / "challenge.schema.yaml")
30 |
31 |
32 | class TargetNotFoundError(errors.ValidationError):
33 | pass
34 |
35 |
36 | class TargetFileNotFoundError(TargetNotFoundError):
37 | target: Path
38 |
39 | def __init__(self, message: str, target: Path):
40 | super().__init__(message)
41 | self.target = target
42 |
43 |
44 | class InvalidFlagError(errors.ValidationError):
45 | pass
46 |
47 |
48 | class ConfigLoader:
49 | """
50 | Object that manages loading challenge config files
51 | """
52 |
53 | project: "Project"
54 | config_schema: Dict[str, Any]
55 | config_schema_validator: Any
56 | _flag_regex: Optional[Pattern[str]] = None
57 |
58 | def __init__(self, project: "Project"):
59 | """
60 | :param rcds.Project project: project context to use
61 | """
62 | self.project = project
63 | self.config_schema = deepcopy(config_schema)
64 |
65 | # Load flag regex if present
66 | if "flagFormat" in self.project.config:
67 | self._flag_regex = re.compile(f"^{self.project.config['flagFormat']}$")
68 |
69 | # Backend config patching
70 | for backend in [
71 | self.project.container_backend,
72 | self.project.scoreboard_backend,
73 | ]:
74 | if backend is not None:
75 | backend.patch_challenge_schema(self.config_schema)
76 | self.config_schema_validator = DefaultValidatingDraft7Validator(
77 | schema=self.config_schema, format_checker=jsonschema.draft7_format_checker
78 | )
79 |
80 | def _apply_defaults(self, config: Dict[str, Any]) -> Dict[str, Any]:
81 | """
82 | Apply project-level defaults
83 | """
84 | try:
85 | root_defaults = deepcopy(self.project.config["defaults"])
86 | except KeyError:
87 | # No defaults present
88 | return config
89 |
90 | container_defaults = root_defaults.pop("containers", None)
91 | expose_defaults = root_defaults.pop("expose", None)
92 |
93 | # Array types with no sensible defaults - ignore them
94 | root_defaults.pop("provide", None)
95 |
96 | config = deep_merge(root_defaults, config)
97 | if container_defaults is not None and "containers" in config:
98 | for container_name, container_config in config["containers"].items():
99 | config["containers"][container_name] = deep_merge(
100 | dict(), container_defaults, container_config
101 | )
102 | if expose_defaults is not None and "expose" in config:
103 | for expose_config in config["expose"].values():
104 | for i, expose_port in enumerate(expose_config):
105 | expose_config[i] = deep_merge(dict(), expose_defaults, expose_port)
106 | return config
107 |
108 | def parse_config(
109 | self, config_file: Path
110 | ) -> Iterable[Union[errors.ValidationError, Dict[str, Any]]]:
111 | """
112 | Load and validate a config file, returning both the config and any
113 | errors encountered.
114 |
115 | :param pathlib.Path config_file: The challenge config to load
116 | :returns: Iterable containing any errors (all instances of
117 | :class:`rcds.errors.ValidationError`) and the parsed config. The config will
118 | always be last.
119 | """
120 | root = config_file.parent
121 | relative_path = root.resolve().relative_to(self.project.root.resolve())
122 | config = load_any(config_file)
123 |
124 | config.setdefault("id", root.name) # derive id from parent directory name
125 |
126 | config = self._apply_defaults(config)
127 |
128 | if len(relative_path.parts) >= 2:
129 | # default category name is the parent of the challenge directory
130 | config.setdefault("category", relative_path.parts[-2])
131 |
132 | schema_errors: Iterable[errors.SchemaValidationError] = (
133 | errors.SchemaValidationError(str(e), e)
134 | for e in self.config_schema_validator.iter_errors(config)
135 | )
136 | # Make a duplicate to check whethere there are errors returned
137 | schema_errors, schema_errors_dup = tee(schema_errors)
138 | # This is the same test as used in Validator.is_valid
139 | if next(schema_errors_dup, None) is not None:
140 | yield from schema_errors
141 | else:
142 | if "expose" in config:
143 | if "containers" not in config:
144 | yield TargetNotFoundError(
145 | "Cannot expose ports without containers defined"
146 | )
147 | else:
148 | for key, expose_objs in config["expose"].items():
149 | if key not in config["containers"]:
150 | yield TargetNotFoundError(
151 | f'`expose` references container "{key}" but '
152 | f"it is not defined in `containers`"
153 | )
154 | else:
155 | for expose_obj in expose_objs:
156 | if (
157 | expose_obj["target"]
158 | not in config["containers"][key]["ports"]
159 | ):
160 | yield TargetNotFoundError(
161 | f"`expose` references port "
162 | f'{expose_obj["target"]} on container '
163 | f'"{key}" which is not defined'
164 | )
165 | if "provide" in config:
166 | for f in config["provide"]:
167 | if isinstance(f, str):
168 | f = Path(f)
169 | else:
170 | f = Path(f["file"])
171 | if not (root / f).is_file():
172 | yield TargetFileNotFoundError(
173 | f'`provide` references file "{str(f)}" which does not '
174 | f"exist",
175 | f,
176 | )
177 | if "flag" in config:
178 | if isinstance(config["flag"], dict):
179 | if "file" in config["flag"]:
180 | f = Path(config["flag"]["file"])
181 | f_resolved = root / f
182 | if f_resolved.is_file():
183 | with f_resolved.open("r") as fd:
184 | flag = fd.read().strip()
185 | config["flag"] = flag
186 | else:
187 | yield TargetFileNotFoundError(
188 | f'`flag.file` references file "{str(f)}" which does '
189 | f"not exist",
190 | f,
191 | )
192 | if isinstance(config["flag"], str):
193 | if self._flag_regex is not None and not self._flag_regex.match(
194 | config["flag"]
195 | ):
196 | yield InvalidFlagError(
197 | f'Flag "{config["flag"]}" does not match the flag format'
198 | )
199 | if config["flag"].count("\n") > 0:
200 | warn(
201 | RuntimeWarning(
202 | "Flag contains multiple lines; is this intended?"
203 | )
204 | )
205 | yield config
206 |
207 | def check_config(
208 | self, config_file: Path
209 | ) -> Tuple[Optional[Dict[str, Any]], Optional[Iterable[errors.ValidationError]]]:
210 | """
211 | Load and validate a config file, returning any errors encountered.
212 |
213 | If the config file is valid, the tuple returned contains the loaded config as
214 | the first element, and the second element is None. Otherwise, the second
215 | element is an iterable of errors that occurred during validation
216 |
217 | This method wraps :meth:`parse_config`.
218 |
219 | :param pathlib.Path config_file: The challenge config to load
220 | """
221 | load_data = self.parse_config(config_file)
222 | load_data, load_data_dup = tee(load_data)
223 | first = next(load_data_dup)
224 | if isinstance(first, errors.ValidationError):
225 | validation_errors = cast(
226 | Iterable[errors.ValidationError],
227 | filter(lambda v: isinstance(v, errors.ValidationError), load_data),
228 | )
229 | return (None, validation_errors)
230 | else:
231 | return (first, None)
232 |
233 | def load_config(self, config_file: Path) -> Dict[str, Any]:
234 | """
235 | Loads a config file, or throw an exception if it is not valid
236 |
237 | This method wraps :meth:`check_config`, and throws the first error returned
238 | if there are any errors.
239 |
240 | :param pathlib.Path config_file: The challenge config to load
241 | :returns: The loaded config
242 | """
243 | config, errors = self.check_config(config_file)
244 | if errors is not None:
245 | raise next(iter(errors))
246 | # errors is None
247 | assert config is not None
248 | return config
249 |
--------------------------------------------------------------------------------
/rcds/challenge/docker.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import collections.abc
3 | import hashlib
4 | import json
5 | from pathlib import Path, PurePosixPath
6 | from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, Type, Union, cast
7 |
8 | import docker # type: ignore
9 | import pathspec # type: ignore
10 |
11 | if TYPE_CHECKING:
12 | from ..project import Project
13 | from .challenge import Challenge
14 |
15 |
16 | def flatten(i: Iterable[Union[str, Iterable[str]]]) -> Iterable[str]:
17 | for x in i:
18 | if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
19 | yield from (y for y in x)
20 | else:
21 | yield x
22 |
23 |
24 | def get_context_files(root: Path) -> Iterator[Path]:
25 | """
26 | Generate a list of all files in the build context of the specified Dockerfile
27 |
28 | :param pathlib.Path root: Path to the containing directory of the Dockerfile to
29 | analyze
30 | """
31 | files: Iterator[Path] = root.rglob("*")
32 | dockerignore = root / ".dockerignore"
33 | if dockerignore.exists():
34 | with dockerignore.open("r") as fd:
35 | spec = pathspec.PathSpec.from_lines(
36 | "gitwildmatch",
37 | flatten(
38 | # pathspec's behavior with negated patterns is different than that
39 | # of docker (and its own behavior with non-negated patterns) in that
40 | # patterns ending with `/` will match files in subdirectories, but
41 | # not a file with the same name, and pattens not ending in `/` will
42 | # only match files, but not files in subdirectories. For example,
43 | # the pattern `!/a` will exclude `a`, but not `a/b`, and the pattern
44 | # `!/a/` will exclude `a/b`, but not `a`. Since docker treats these
45 | # interchangeably, we automatically insert the corresponding ignore
46 | # rule into the rules list if a negated pattern is detected (insert
47 | # `!/a/` if `!/a` is detected and vice versa).
48 | # FIXME: normalize / parse the lines better to support e.g. comments
49 | (
50 | (
51 | [line[:-2] + "\n", line]
52 | if line[-2] == "/"
53 | else [line, line[:-1] + "/\n"]
54 | )
55 | if line[0] == "!"
56 | else line
57 | )
58 | for line in fd
59 | ),
60 | )
61 | files = filter(lambda p: not spec.match_file(p.relative_to(root)), files)
62 | return filter(lambda p: p.is_file(), files)
63 |
64 |
65 | def generate_sum(root: Path) -> str:
66 | """
67 | Generate a checksum of all files in the build context of the specified directory
68 |
69 | :param pathlib.Path root: Path to the containing directory of the Dockerfile to
70 | analyze
71 | """
72 | h = hashlib.sha256()
73 | for f in sorted(get_context_files(root), key=lambda f: str(f.relative_to(root))):
74 | h.update(bytes(f.relative_to(root)))
75 | with f.open("rb") as fd:
76 | for chunk in iter(lambda: fd.read(524288), b""):
77 | h.update(chunk)
78 | return h.hexdigest()
79 |
80 |
81 | class Container:
82 | """
83 | A single container
84 | """
85 |
86 | manager: "ContainerManager"
87 | challenge: "Challenge"
88 | project: "Project"
89 | name: str
90 | config: Dict[str, Any]
91 |
92 | IS_BUILDABLE: bool = False
93 |
94 | def __init__(self, *, container_manager: "ContainerManager", name: str) -> None:
95 | self.manager = container_manager
96 | self.challenge = self.manager.challenge
97 | self.project = self.challenge.project
98 | self.name = name
99 | self.config = container_manager.config[self.name]
100 |
101 | def get_full_tag(self) -> str:
102 | """
103 | Get the full image tag (e.g. ``k8s.gcr.io/etcd:3.4.3-0``) for this container
104 |
105 | :returns: The image tag
106 | """
107 | return self.config["image"]
108 |
109 | def is_built(self) -> bool:
110 | """
111 | If the container is buildable (:const:`IS_BUILDABLE` is `True`), this method
112 | returns whether or not the container is already built (and up-to-date). For
113 | non-buildable containers, this method always returns `True`.
114 |
115 | :returns: Whether or not the container is built
116 | """
117 | return True
118 |
119 | def build(self, force: bool = False) -> None:
120 | """
121 | Build the challenge if applicable and necessary.
122 |
123 | For challenges that are not buildable (:const:`IS_BUILDABLE` is False), this
124 | method is a no-op
125 |
126 | :param bool force: Force a rebuild of this container even if it is up-to-date
127 | """
128 | pass
129 |
130 |
131 | class BuildableContainer(Container):
132 | """
133 | A container that is built from source
134 | """
135 |
136 | root: Path
137 | dockerfile: str
138 | buildargs: Dict[str, str]
139 |
140 | IS_BUILDABLE: bool = True
141 |
142 | def __init__(self, **kwargs) -> None:
143 | super().__init__(**kwargs)
144 | build = self.config.get("build", None)
145 | assert build is not None
146 | if isinstance(build, str):
147 | self.root = self.challenge.root / Path(build)
148 | self.dockerfile = "Dockerfile"
149 | self.buildargs = dict()
150 | elif isinstance(build, dict):
151 | build = cast(Dict[str, Any], build)
152 | self.root = self.challenge.root / Path(build["context"])
153 | self.dockerfile = build.get("dockerfile", "Dockerfile")
154 | self.buildargs = cast(Dict[str, str], build.get("args", dict()))
155 | self.content_hash = generate_sum(self.root)
156 | self.image = self.manager.get_docker_image(self)
157 |
158 | def _build(self) -> None:
159 | self.project.docker_client.images.build(
160 | path=str(self.root),
161 | tag=f"{self.image}:{self.content_hash}",
162 | dockerfile=self.dockerfile,
163 | buildargs=self.buildargs,
164 | pull=True,
165 | rm=True,
166 | )
167 | self.project.docker_client.images.push(
168 | self.image, tag=self.content_hash, auth_config=self.manager._auth_config
169 | )
170 |
171 | def get_full_tag(self) -> str:
172 | return f"{self.image}:{self.content_hash}"
173 |
174 | def is_built(self) -> bool:
175 | """
176 | Checks if a container built with a build context with a matching hash exists,
177 | either locally or remotely.
178 |
179 | :returns: Whether or not the image was found
180 | """
181 | try:
182 | self.project.docker_client.images.get_registry_data(
183 | self.get_full_tag(), auth_config=self.manager._auth_config
184 | )
185 | return True
186 | except docker.errors.NotFound:
187 | pass # continue
188 | return False
189 |
190 | def build(self, force: bool = False) -> None:
191 | if force or not self.is_built():
192 | self._build()
193 |
194 |
195 | class _AuthCfgCache:
196 | _cache: Dict[str, Dict[str, str]] = dict() # class-level
197 |
198 | def get_auth_config(self, registry: str, api_client) -> Dict[str, str]:
199 | if registry not in self._cache:
200 | header = docker.auth.get_config_header(api_client, registry)
201 | if header is not None:
202 | auth_config = json.loads(
203 | base64.urlsafe_b64decode(header).decode("ascii")
204 | )
205 | else:
206 | auth_config = None
207 | self._cache[registry] = auth_config
208 | return self._cache[registry]
209 |
210 |
211 | _auth_cfg_cache = _AuthCfgCache()
212 |
213 |
214 | class ContainerManager:
215 | """
216 | Object managing all containers defined by a given :class:`rcds.Challenge`
217 | """
218 |
219 | challenge: "Challenge"
220 | project: "Project"
221 | config: Dict[str, Dict[str, Any]]
222 | containers: Dict[str, Container]
223 | _auth_config: Dict[str, str]
224 |
225 | def __init__(self, challenge: "Challenge"):
226 | """
227 | :param rcds.Challenge challenge: The challenge that this ContainerManager
228 | belongs to
229 | """
230 |
231 | self.challenge = challenge
232 | self.project = self.challenge.project
233 | self.containers = dict()
234 | self.config = cast(
235 | Dict[str, Dict[str, Any]], self.challenge.config.get("containers", dict())
236 | )
237 |
238 | self._auth_config = self._get_auth_config()
239 |
240 | for name in self.config.keys():
241 | container_config = self.config[name]
242 | container_constructor: Type[Container]
243 | if "build" in container_config:
244 | container_constructor = BuildableContainer
245 | else:
246 | container_constructor = Container
247 | self.containers[name] = container_constructor(
248 | container_manager=self, name=name
249 | )
250 | container_config["image"] = self.containers[name].get_full_tag()
251 |
252 | def get_docker_image(self, container: Container) -> str:
253 | image_template = self.project.jinja_env.from_string(
254 | self.project.config["docker"]["image"]["template"]
255 | )
256 | template_context = {
257 | "challenge": self.challenge.config,
258 | "container": dict(container.config),
259 | }
260 | template_context["container"]["name"] = container.name
261 | image = image_template.render(template_context)
262 | # FIXME: better implementation than abusing PosixPath?
263 | return str(
264 | PurePosixPath(self.project.config["docker"]["image"]["prefix"]) / image
265 | )
266 |
267 | def _get_auth_config(self) -> Dict[str, str]:
268 | registry, _ = docker.auth.resolve_repository_name(
269 | self.project.config["docker"]["image"]["prefix"]
270 | )
271 | return _auth_cfg_cache.get_auth_config(registry, self.project.docker_client.api)
272 |
--------------------------------------------------------------------------------
/rcds/cli/__init__.py:
--------------------------------------------------------------------------------
1 | from .__main__ import cli # noqa: F401
2 |
--------------------------------------------------------------------------------
/rcds/cli/__main__.py:
--------------------------------------------------------------------------------
1 | import click
2 |
3 | from .deploy import deploy
4 |
5 |
6 | @click.group()
7 | def cli():
8 | pass
9 |
10 |
11 | cli.add_command(deploy)
12 |
13 |
14 | if __name__ == "__main__":
15 | cli()
16 |
--------------------------------------------------------------------------------
/rcds/cli/deploy.py:
--------------------------------------------------------------------------------
1 | from sys import exit
2 |
3 | import click
4 |
5 | import rcds
6 | import rcds.challenge.docker
7 | from rcds.util import SUPPORTED_EXTENSIONS, find_files
8 |
9 |
10 | @click.command()
11 | def deploy() -> None:
12 | try:
13 | project_config = find_files(["rcds"], SUPPORTED_EXTENSIONS, recurse=True)[
14 | "rcds"
15 | ].parent
16 | except KeyError:
17 | click.echo("Could not find project root!")
18 | exit(1)
19 | click.echo(f"Loading project at {project_config}")
20 | project = rcds.Project(project_config)
21 | click.echo("Initializing backends")
22 | project.load_backends()
23 | click.echo("Loading challenges")
24 | project.load_all_challenges()
25 | for challenge in project.challenges.values():
26 | cm = rcds.challenge.docker.ContainerManager(challenge)
27 | for container_name, container in cm.containers.items():
28 | click.echo(f"{challenge.config['id']}: checking container {container_name}")
29 | if not container.is_built():
30 | click.echo(
31 | f"{challenge.config['id']}: building container {container_name}"
32 | f" ({container.get_full_tag()})"
33 | )
34 | container.build()
35 | challenge.create_transaction().commit()
36 | if project.container_backend is not None:
37 | click.echo("Commiting container backend")
38 | project.container_backend.commit()
39 | else:
40 | click.echo("WARN: no container backend, skipping...")
41 | if project.scoreboard_backend is not None:
42 | click.echo("Commiting scoreboard backend")
43 | project.scoreboard_backend.commit()
44 | else:
45 | click.echo("WARN: no scoreboard backend, skipping...")
46 |
--------------------------------------------------------------------------------
/rcds/errors.py:
--------------------------------------------------------------------------------
1 | """
2 | Error types for various rCDS methods
3 | """
4 |
5 | import jsonschema.exceptions # type: ignore
6 |
7 |
8 | class ValidationError(ValueError):
9 | pass
10 |
11 |
12 | class SchemaValidationError(ValidationError):
13 | cause: jsonschema.exceptions.ValidationError
14 |
15 | def __init__(self, message: str, cause: jsonschema.exceptions.ValidationError):
16 | super().__init__(message)
17 |
18 | self.cause = cause
19 |
--------------------------------------------------------------------------------
/rcds/project/__init__.py:
--------------------------------------------------------------------------------
1 | from .project import Project # noqa: F401
2 |
--------------------------------------------------------------------------------
/rcds/project/assets.py:
--------------------------------------------------------------------------------
1 | import io
2 | import json
3 | import os
4 | import pathlib
5 | import shutil
6 | from dataclasses import dataclass
7 | from pathlib import Path
8 | from typing import (
9 | TYPE_CHECKING,
10 | BinaryIO,
11 | ByteString,
12 | Callable,
13 | Dict,
14 | Iterable,
15 | Set,
16 | Tuple,
17 | Union,
18 | cast,
19 | )
20 | from warnings import warn
21 |
22 | if TYPE_CHECKING:
23 | import rcds
24 |
25 | from .project import Project
26 |
27 |
28 | File = Union[BinaryIO, Path, bytes]
29 | """
30 | Something that the asset manager can interpret as a file (contents only)
31 |
32 | Valid types:
33 |
34 | - A :class:`pathlib.Path` object referring to a file that already exists on-disk
35 |
36 | - Any :class:`typing.BinaryIO` object that is seekable
37 |
38 | - A :class:`typing.ByteString` object containing the contents of the file (internally
39 | this is converted to a :class:`io.BytesIO`)
40 | """
41 |
42 |
43 | def _is_valid_name(name: str):
44 | return (
45 | len(pathlib.PurePosixPath(name).parts) == 1
46 | and len(pathlib.PureWindowsPath(name).parts) == 1
47 | )
48 |
49 |
50 | class AssetManagerTransaction:
51 | """
52 | A transaction within an :class:`AssetManagerContext`
53 |
54 | This class manages declarative transactional updates to a context, allowing you to
55 | declare the files that should exist in the context, the last time that file was
56 | modified, and a callable to run to get the file, should it be out-of-date in the
57 | cache. The transaction starts in a blank state; without adding anything by calling
58 | :meth:`add`, :meth:`commit` will clear the context. No actions are performed until
59 | :meth:`commit` is called.
60 |
61 | This classs is not meant to be constructed directly, use
62 | :meth:`AssetManagerContext.transaction`
63 | """
64 |
65 | _asset_manager_context: "AssetManagerContext"
66 | _is_active: bool
67 |
68 | @dataclass
69 | class _FileEntry:
70 | """
71 | :meta private:
72 | """
73 |
74 | mtime: float
75 |
76 | # Callable is wrapped in a tuple because otherwise, mypy thinks the field is a
77 | # class method (related to python/mypy#708)
78 | get_contents: Tuple[Callable[[], File]]
79 |
80 | _files: Dict[str, _FileEntry]
81 |
82 | def __init__(self, asset_manager_context: "AssetManagerContext"):
83 | """
84 | :meta private:
85 | """
86 | self._asset_manager_context = asset_manager_context
87 | self._is_active = True
88 | self._files = dict()
89 |
90 | def add(
91 | self, name: str, mtime: float, contents: Union[File, Callable[[], File]]
92 | ) -> None:
93 | """
94 | Add a file to the context
95 |
96 | :param str name: The name of the asset to add
97 | :param float mtime: The time the asset to add was modified
98 | (:attr:`os.stat_result.st_mtime`)
99 | :param contents: The contents of the file - this can either be the contents
100 | directly as a :const:`File`, or a thunk function that, when calls, returns
101 | the contents
102 | :type contents: :const:`File` or :obj:`Callable[[], File]`
103 | :raises RuntimeError: if the transaction has already been committed
104 | :raises ValueError: if the asset name is not valid
105 | """
106 | if not self._is_active:
107 | raise RuntimeError("This transaction has already been committed")
108 | self._asset_manager_context._assert_valid_name(name)
109 | get_contents: Callable[[], File]
110 | if callable(contents):
111 | get_contents = contents
112 | else:
113 |
114 | def get_contents() -> File:
115 | return cast(File, contents)
116 |
117 | self._files[name] = self._FileEntry(mtime=mtime, get_contents=(get_contents,))
118 |
119 | def add_file(self, name: str, file: Path):
120 | """
121 | Add an already-existing file on disk to the context
122 |
123 | This wraps :meth:`add`
124 |
125 | :param str name: The name of the asset to add
126 | :param Path file: The path to the asset on disk
127 | """
128 | if not file.is_file():
129 | raise ValueError(f"Provided file does not exist: '{str(file)}'")
130 | self.add(name, file.stat().st_mtime, lambda: file)
131 |
132 | def _create(self, fpath: Path, fentry: _FileEntry) -> None:
133 | """
134 | Create / overwrite the asset in the cache
135 |
136 | :meta private:
137 | """
138 | contents = fentry.get_contents[0]()
139 | if isinstance(contents, Path):
140 | if not contents.is_file():
141 | raise ValueError(f"Provided file does not exist: '{str(contents)}'")
142 | fpath.symlink_to(contents)
143 | else:
144 | if isinstance(contents, ByteString):
145 | contents = io.BytesIO(contents)
146 | assert isinstance(contents, io.IOBase)
147 | with fpath.open("wb") as ofd:
148 | shutil.copyfileobj(contents, ofd)
149 | os.utime(fpath, (fentry.mtime, fentry.mtime))
150 |
151 | def commit(self) -> None:
152 | """
153 | Commit the transaction.
154 |
155 | This transaction can no longer be used after :meth:`commit` is called.
156 | """
157 | self._is_active = False
158 | self._asset_manager_context._is_transaction_active = False
159 | files_to_delete = set(self._asset_manager_context.ls())
160 | for name, file_entry in self._files.items():
161 | fpath = self._asset_manager_context._get(name)
162 | try:
163 | files_to_delete.remove(name)
164 | except KeyError:
165 | pass
166 | if self._asset_manager_context.exists(name):
167 | cache_mtime = self._asset_manager_context.get_mtime(name)
168 | if not file_entry.mtime > cache_mtime:
169 | continue
170 | self._create(fpath, file_entry)
171 | self._asset_manager_context._add(name, force=True)
172 | for name in files_to_delete:
173 | fpath = self._asset_manager_context.get(name)
174 | fpath.unlink()
175 | self._asset_manager_context._rm(name)
176 | self._asset_manager_context.sync(check=True)
177 |
178 |
179 | class AssetManagerContext:
180 | """
181 | A subcontext within an :class:`AssetManager`
182 |
183 | Represents a namespace within the :class:`AssetManager`, essentially a
184 | subdirectory. The context holds assets for a challenge with the same id
185 |
186 | This class is not meant to be constructed directly, use
187 | :meth:`AssetManager.create_context`
188 | """
189 |
190 | _asset_manager: "AssetManager"
191 | _name: str
192 | _root: Path
193 | _files: Set[str]
194 | _files_root: Path
195 | _manifest_file: Path
196 |
197 | _is_transaction_active: bool
198 |
199 | def __init__(self, asset_manager: "AssetManager", name: str):
200 | """
201 | :meta private:
202 | """
203 | self._asset_manager = asset_manager
204 | self._name = name
205 | self._files = set()
206 | self._is_transaction_active = False
207 | self._root = self._asset_manager.root / name
208 | self._root.mkdir(parents=True, exist_ok=True)
209 | self._files_root = self._root / "files"
210 | self._files_root.mkdir(exist_ok=True)
211 | self._manifest_file = self._root / "manifest.json"
212 |
213 | try:
214 | with self._manifest_file.open("r") as fd:
215 | manifest = json.load(fd)
216 | self._files = set(manifest["files"])
217 | for fn in list(self._files):
218 | f = self._get(fn)
219 | if f.is_symlink() and not f.exists():
220 | # Broken symlink; remove it
221 | self._files.remove(fn)
222 | f.unlink()
223 | self.sync()
224 | except FileNotFoundError:
225 | pass
226 |
227 | def _assert_valid_name(self, name: str) -> None:
228 | if not _is_valid_name(name):
229 | raise ValueError(f"Invalid asset name '{name}'")
230 |
231 | def transaction(self) -> AssetManagerTransaction:
232 | """
233 | Create a :class:`AssetManagerTransaction`.
234 |
235 | Only one transaction can be created at a time.
236 |
237 | :returns: The transaction
238 | :raises RuntimeError: when attempting to create a transaction while one already
239 | exists
240 | """
241 | # TODO: better locking mechanism?
242 | if self._is_transaction_active:
243 | raise RuntimeError(
244 | "Attempted to create transaction while one is already in progress"
245 | )
246 | self._is_transaction_active = True
247 | return AssetManagerTransaction(self)
248 |
249 | def sync(self, *, check: bool = True):
250 | """
251 | Syncs the manifest for this context to disk
252 |
253 | :param bool check: If true (default), check to make sure all files in the
254 | manifest exist, and that there are no extra files
255 | """
256 | if check:
257 | disk = set(self._files_root.iterdir())
258 | files = {self._files_root / f for f in self._files}
259 | for extra in disk - files:
260 | warn(
261 | RuntimeWarning(
262 | f"Unexpected item found in cache: '{str(extra)}'; removing..."
263 | )
264 | )
265 | if extra.is_dir():
266 | shutil.rmtree(extra)
267 | else:
268 | extra.unlink()
269 | for missing in files - disk:
270 | raise RuntimeError(f"Cache item missing: '{str(missing)}'")
271 | with self._manifest_file.open("w") as fd:
272 | json.dump({"files": sorted(self._files)}, fd)
273 |
274 | def _add(self, name: str, *, force: bool = False) -> None:
275 | """
276 | Add an asset to the manifest
277 |
278 | :meta private:
279 | :param str name: The name of the asset
280 | :param bool force: If true, do not error if the asset already exists
281 | """
282 | self._assert_valid_name(name)
283 | if not force and name in self._files:
284 | raise FileExistsError(f"Asset already exists: '{name}'")
285 | self._files.add(name)
286 |
287 | def _rm(self, name: str, *, force: bool = False) -> None:
288 | """
289 | Remove an asset from the manifest
290 |
291 | :meta private:
292 | :param str name: The name of the asset
293 | :param bool force: If true, do not error if the asset does not exist
294 | """
295 | self._assert_valid_name(name)
296 | try:
297 | self._files.remove(name)
298 | except KeyError:
299 | if not force:
300 | raise FileNotFoundError(f"Asset not found: '{name}'")
301 |
302 | def ls(self) -> Iterable[str]:
303 | """
304 | List all files within this context
305 |
306 | :returns: The list of asset names
307 | """
308 | return self._files
309 |
310 | def _get(self, name: str) -> Path:
311 | """
312 | Retrieves the path for an asset with the given name, even if it does not already
313 | exist
314 |
315 | :meta private:
316 | """
317 | self._assert_valid_name(name)
318 | return self._files_root / name
319 |
320 | def exists(self, name: str) -> bool:
321 | """
322 | Queries if an asset exists
323 |
324 | :param str name: The name of the asset
325 | :returns: Whether or not it exists
326 | """
327 | self._assert_valid_name(name)
328 | return name in self._files
329 |
330 | def get(self, name: str) -> Path:
331 | """
332 | Retrieves the asset
333 |
334 | :param str name: The name of the asset
335 | :returns: The asset
336 | """
337 | if not self.exists(name):
338 | raise FileNotFoundError(f"Asset not found: '{name}'")
339 | return self._get(name)
340 |
341 | def get_mtime(self, name: str) -> float:
342 | """
343 | Retrieves the time an asset was modified
344 |
345 | :param str name: The name of the asset
346 | :returns: The time the asset was modified (:attr`os.stat_result.st_mtime`)
347 | """
348 | return self.get(name).stat().st_mtime
349 |
350 | def clear(self) -> None:
351 | """
352 | Clear all files in this context
353 | """
354 | for f in self.ls():
355 | self.get(f).unlink()
356 | self._files = set()
357 | self.sync(check=True)
358 |
359 |
360 | class AssetManager:
361 | """
362 | Class for managing assets from challenges that are provided to competitors
363 |
364 | This class manages all assets under a given project.
365 | """
366 |
367 | project: "Project"
368 | root: Path
369 |
370 | def __init__(self, project: "rcds.Project"):
371 | self.project = project
372 | self.root = self.project.root / ".rcds-cache" / "assets"
373 | self.root.mkdir(parents=True, exist_ok=True)
374 |
375 | def create_context(self, name: str) -> AssetManagerContext:
376 | """
377 | Create a subcontext within the :class:`AssetManager`
378 |
379 | :param str name: The name of the context (challenge id)
380 | :raises ValueError: if the context name is not valid
381 | """
382 | if not _is_valid_name(name):
383 | raise ValueError(f"Invalid context name '{name}'")
384 | return AssetManagerContext(self, name)
385 |
386 | def list_context_names(self) -> Iterable[str]:
387 | """
388 | List the names of all subcontexts within this :class:`AssetManager`
389 |
390 | :returns: The contexts' names. Call :meth:`create_context` on a name to obtain a
391 | :class:`AssetManagerContext` object
392 | """
393 | for d in self.root.iterdir():
394 | if not d.is_dir():
395 | raise RuntimeError(f"Unexpected item found in cache: '{str(d)}'")
396 | yield d.name
397 |
--------------------------------------------------------------------------------
/rcds/project/config.py:
--------------------------------------------------------------------------------
1 | """
2 | Functions for loading project config files
3 | """
4 |
5 | from itertools import tee
6 | from pathlib import Path
7 | from typing import Any, Dict, Iterable, Optional, Tuple, Union, cast
8 |
9 | import jsonschema # type: ignore
10 |
11 | from rcds import errors
12 |
13 | from ..util import load_any
14 | from ..util.jsonschema import DefaultValidatingDraft7Validator
15 |
16 | config_schema_validator = DefaultValidatingDraft7Validator(
17 | schema=load_any(Path(__file__).parent / "rcds.schema.yaml"),
18 | format_checker=jsonschema.draft7_format_checker,
19 | )
20 |
21 |
22 | def parse_config(
23 | config_file: Path,
24 | ) -> Iterable[Union[errors.ValidationError, Dict[str, Any]]]:
25 | """
26 | Load and validate a config file, returning both the config and any
27 | errors encountered.
28 |
29 | :param pathlib.Path config_file: The challenge config to load
30 | :returns: Iterable containing any errors (all instances of
31 | :class:`rcds.errors.ValidationError`) and the parsed config. The config will
32 | always be last.
33 | """
34 | # root = config_file.parent
35 | config = load_any(config_file)
36 | schema_errors: Iterable[errors.SchemaValidationError] = (
37 | errors.SchemaValidationError(str(e), e)
38 | for e in config_schema_validator.iter_errors(config)
39 | )
40 | # Make a duplicate to check whethere there are errors returned
41 | schema_errors, schema_errors_dup = tee(schema_errors)
42 | # This is the same test as used in Validator.is_valid
43 | if next(schema_errors_dup, None) is not None:
44 | yield from schema_errors
45 | yield config
46 |
47 |
48 | def check_config(
49 | config_file: Path,
50 | ) -> Tuple[Optional[Dict[str, Any]], Optional[Iterable[errors.ValidationError]]]:
51 | """
52 | Load and validate a config file, returning any errors encountered.
53 |
54 | If the config file is valid, the tuple returned contains the loaded config as
55 | the first element, and the second element is None. Otherwise, the second
56 | element is an iterable of errors that occurred during validation
57 |
58 | This method wraps :func:`parse_config`.
59 |
60 | :param pathlib.Path config_file: The challenge config to load
61 | """
62 | load_data = parse_config(config_file)
63 | load_data, load_data_dup = tee(load_data)
64 | first = next(load_data_dup)
65 | if isinstance(first, errors.ValidationError):
66 | validation_errors = cast(
67 | Iterable[errors.ValidationError],
68 | filter(lambda v: isinstance(v, errors.ValidationError), load_data),
69 | )
70 | return (None, validation_errors)
71 | else:
72 | return (first, None)
73 |
74 |
75 | def load_config(config_file: Path) -> Dict[str, Any]:
76 | """
77 | Loads a config file, or throw an exception if it is not valid
78 |
79 | This method wraps :func:`check_config`, and throws the first error returned
80 | if there are any errors.
81 |
82 | :param pathlib.Path config_file: The challenge config to load
83 | :returns: The loaded config
84 | """
85 | config, errors = check_config(config_file)
86 | if errors is not None:
87 | raise next(iter(errors))
88 | # errors is None
89 | assert config is not None
90 | return config
91 |
--------------------------------------------------------------------------------
/rcds/project/project.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import Any, Dict, Optional
3 |
4 | import docker # type: ignore
5 | from jinja2 import Environment
6 |
7 | from rcds.util import SUPPORTED_EXTENSIONS, find_files
8 |
9 | from ..backend import BackendContainerRuntime, BackendScoreboard, load_backend_module
10 | from ..challenge import Challenge, ChallengeLoader
11 | from . import config
12 | from .assets import AssetManager
13 |
14 |
15 | class Project:
16 | """
17 | An rCDS project; the context that all actions are done within
18 | """
19 |
20 | root: Path
21 | config: dict
22 | challenges: Dict[Path, Challenge]
23 | challenge_loader: ChallengeLoader
24 |
25 | asset_manager: AssetManager
26 |
27 | container_backend: Optional[BackendContainerRuntime] = None
28 | scoreboard_backend: Optional[BackendScoreboard] = None
29 |
30 | jinja_env: Environment
31 | docker_client: Any
32 |
33 | def __init__(
34 | self, root: Path, docker_client: Optional[docker.client.DockerClient] = None
35 | ):
36 | """
37 | Create a project
38 | """
39 | root = root.resolve()
40 | try:
41 | cfg_file = find_files(
42 | ["rcds"], SUPPORTED_EXTENSIONS, path=root, recurse=False
43 | )["rcds"]
44 | except KeyError:
45 | raise ValueError(f"No config file found at '{root}'")
46 | self.root = root
47 | self.config = config.load_config(cfg_file)
48 | self.challenge_loader = ChallengeLoader(self)
49 | self.challenges = dict()
50 | self.asset_manager = AssetManager(self)
51 | self.jinja_env = Environment(autoescape=False)
52 | if docker_client is not None:
53 | self.docker_client = docker_client
54 | else:
55 | self.docker_client = docker.from_env()
56 |
57 | def load_all_challenges(self) -> None:
58 | for ext in SUPPORTED_EXTENSIONS:
59 | for chall_file in self.root.rglob(f"challenge.{ext}"):
60 | path = chall_file.parent
61 | self.challenges[
62 | path.relative_to(self.root)
63 | ] = self.challenge_loader.load(path)
64 |
65 | def get_challenge(self, relPath: Path) -> Challenge:
66 | return self.challenges[relPath]
67 |
68 | def load_backends(self) -> None:
69 | for backend_config in self.config["backends"]:
70 | backend_info = load_backend_module(backend_config["resolve"])
71 | if self.scoreboard_backend is None and backend_info.HAS_SCOREBOARD:
72 | self.scoreboard_backend = backend_info.get_scoreboard(
73 | self, backend_config["options"]
74 | )
75 | if self.container_backend is None and backend_info.HAS_CONTAINER_RUNTIME:
76 | self.container_backend = backend_info.get_container_runtime(
77 | self, backend_config["options"]
78 | )
79 | # TODO: maybe don't reinitialize here?
80 | self.challenge_loader = ChallengeLoader(self)
81 |
--------------------------------------------------------------------------------
/rcds/project/rcds.schema.yaml:
--------------------------------------------------------------------------------
1 | $schema: http://json-schema.org/draft-07/schema#
2 | $id: http://rcds.redpwn.com/schemas/rcds.yaml
3 | type: object
4 | properties:
5 | docker:
6 | type: object
7 | description: >-
8 | Settings for the docker images that will be built for challenges.
9 | properties:
10 | image:
11 | type: object
12 | description: >-
13 | Settings controlling the generation of tags for built docker images.
14 | properties:
15 | prefix:
16 | type: string
17 | description: >-
18 | Prefix for the docker image. This is will be joined as a URL
19 | component with the image name as determined by the template. This
20 | would include the regsitry url if you are not planning to use
21 | Docker Hub.
22 | examples:
23 | - redpwn
24 | - quay.io/redpwn
25 | template:
26 | type: string
27 | description: >-
28 | Template for the name of the docker image.
29 | default: "rcds-{{ challenge.id }}-{{ container.name }}"
30 | required:
31 | - prefix
32 | - template
33 | required:
34 | - image
35 | backends:
36 | type: array
37 | description: >-
38 | Deployment backends to use. Backends earlier in the array have higher
39 | precedence.
40 | items:
41 | type: object
42 | properties:
43 | resolve:
44 | type: string
45 | description: >-
46 | The name of this backend (either the name of one of rCDS's internal
47 | backends, or its fully qualified module name).
48 | options:
49 | type: object
50 | description: >-
51 | Options for this backend.
52 | default: {}
53 | required:
54 | - resolve
55 | - options
56 | default: []
57 | defaults:
58 | type: object
59 | description: >-
60 | Default options for challenges. Values for array / mapping keys such as
61 | `expose` and `containers` will be applied to each individual item within
62 | that key (on the level where a concrete schema is defined).
63 | not:
64 | anyOf:
65 | # Properties that are not allowed to have defaults
66 | - required: ['id']
67 | flagFormat:
68 | type: string
69 | description: >-
70 | (Optional) regex to validate all flags against. Will be surrounded with `^$`.
71 | format: regex
72 | required:
73 | - backends
74 |
--------------------------------------------------------------------------------
/rcds/py.typed:
--------------------------------------------------------------------------------
1 | # Marker file for PEP 561. The mypy package uses inline types.
2 |
--------------------------------------------------------------------------------
/rcds/util/__init__.py:
--------------------------------------------------------------------------------
1 | from .deep_merge import deep_merge # noqa: F401
2 | from .find import find_files # noqa: F401
3 | from .load import SUPPORTED_EXTENSIONS, load_any # noqa: F401
4 |
--------------------------------------------------------------------------------
/rcds/util/deep_merge.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | from typing import List, Tuple, Union
3 |
4 |
5 | def deep_merge(a: dict, *rest: dict) -> dict:
6 | """
7 | Deep merge remaining arguments from left to right into `a`. Mutates `a`,
8 | does not mutate any other arguments.
9 |
10 | :param a dict:
11 | :return: the merged dictionary
12 | :rtype: dict
13 | """
14 | _rest: Union[List[dict], Tuple[dict, ...]] = rest
15 | while len(_rest) > 0:
16 | b, *_rest = _rest
17 | for key in b:
18 | if isinstance(b[key], dict):
19 | if key in a and isinstance(a[key], dict):
20 | deep_merge(a[key], b[key])
21 | else:
22 | a[key] = deepcopy(b[key])
23 | else:
24 | a[key] = b[key]
25 | return a
26 |
--------------------------------------------------------------------------------
/rcds/util/find.py:
--------------------------------------------------------------------------------
1 | from itertools import chain
2 | from pathlib import Path
3 | from typing import Dict, List, Optional
4 |
5 | from .load import SUPPORTED_EXTENSIONS
6 |
7 |
8 | def find_files(
9 | names: List[str],
10 | extensions: List[str],
11 | path: Optional[Path] = None,
12 | recurse: bool = True,
13 | ) -> Dict[str, Path]:
14 | if path is None:
15 | path = Path.cwd().resolve()
16 | foundNames = set(names)
17 | found = dict()
18 | dirList = chain([path], path.parents) if recurse else [path]
19 | for d in dirList:
20 | for f in filter(lambda f: f.is_file(), d.iterdir()):
21 | if f.suffix[1:] in extensions and f.stem in foundNames:
22 | found[f.stem] = f
23 | foundNames.remove(f.stem)
24 | return found
25 |
26 |
27 | def find_cfgs(path: Optional[Path] = None) -> Dict[str, Path]:
28 | return find_files(["rcds", "challenge"], SUPPORTED_EXTENSIONS, path)
29 |
--------------------------------------------------------------------------------
/rcds/util/jsonschema.py:
--------------------------------------------------------------------------------
1 | from jsonschema import Draft7Validator, validators # type: ignore
2 |
3 | # From
4 | # https://python-jsonschema.readthedocs.io/en/stable/faq/#why-doesn-t-my-schema-s-default-property-set-the-default-on-my-instance # noqa: B950
5 |
6 |
7 | def extend_with_default(validator_class):
8 | validate_properties = validator_class.VALIDATORS["properties"]
9 |
10 | def set_defaults(validator, properties, instance, schema):
11 | for property, subschema in properties.items():
12 | if "default" in subschema:
13 | instance.setdefault(property, subschema["default"])
14 |
15 | yield from validate_properties(validator, properties, instance, schema)
16 |
17 | return validators.extend(validator_class, {"properties": set_defaults})
18 |
19 |
20 | DefaultValidatingDraft7Validator = extend_with_default(Draft7Validator)
21 |
--------------------------------------------------------------------------------
/rcds/util/load.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility for loading configuration files
3 |
4 | Objects only, loading files consisting of arrays is not supported.
5 | """
6 |
7 | import json
8 | from pathlib import Path
9 | from typing import Any, Dict
10 |
11 | import yaml
12 |
13 |
14 | def _normalize_jsonlike(data: Any) -> Dict[str, Any]:
15 | if data is None:
16 | return dict()
17 | return data
18 |
19 |
20 | def load_yaml(f: Path) -> Dict[str, Any]:
21 | with f.open("r") as fd:
22 | return _normalize_jsonlike(yaml.safe_load(fd))
23 |
24 |
25 | def load_json(f: Path) -> Dict[str, Any]:
26 | with f.open("r") as fd:
27 | return _normalize_jsonlike(json.load(fd))
28 |
29 |
30 | def load_any(f: Path) -> Dict[str, Any]:
31 | ext = f.suffix[1:]
32 | if ext in ["yml", "yaml"]:
33 | return load_yaml(f)
34 | elif ext in ["json"]:
35 | return load_json(f)
36 | else:
37 | raise Exception("Unsupported extension")
38 |
39 |
40 | SUPPORTED_EXTENSIONS = ["yml", "yaml", "json"]
41 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | from pathlib import Path
3 | from textwrap import dedent
4 |
5 | import pytest # type: ignore
6 |
7 | from rcds import Project, errors
8 | from rcds.challenge import ChallengeLoader
9 |
10 |
11 | @pytest.fixture
12 | def project(datadir) -> Project:
13 | return Project(datadir)
14 |
15 |
16 | @pytest.fixture
17 | def loader(project) -> ChallengeLoader:
18 | return ChallengeLoader(project)
19 |
20 |
21 | def test_load_yaml_challenge(project: Project, loader: ChallengeLoader) -> None:
22 | chall = loader.load(project.root / "yaml")
23 | assert chall.config["name"] == "Challenge"
24 | assert chall.config["description"] == "Description"
25 | assert chall.get_relative_path() == Path("yaml")
26 | assert chall.config["id"] == "yaml"
27 |
28 |
29 | def test_load_json_challenge(project: Project, loader: ChallengeLoader) -> None:
30 | chall = loader.load(project.root / "json")
31 | assert chall.config["name"] == "Challenge"
32 | assert chall.config["description"] == "Description"
33 | assert chall.get_relative_path() == Path("json")
34 | assert chall.config["id"] == "json"
35 |
36 |
37 | def test_override_challenge_id(project: Project, loader: ChallengeLoader) -> None:
38 | chall = loader.load(project.root / "id_override")
39 | assert chall.config["id"] == "overridden"
40 |
41 |
42 | def test_load_nonexistent_challenge(project: Project, loader: ChallengeLoader) -> None:
43 | with pytest.raises(ValueError) as exc:
44 | loader.load(project.root / "nonexistent")
45 | assert "No config file found at " in str(exc)
46 |
47 |
48 | def test_load_bad_dir_name(project: Project, loader: ChallengeLoader) -> None:
49 | with pytest.raises(errors.SchemaValidationError):
50 | loader.load(project.root / "bad#dir")
51 |
52 |
53 | def test_render_description(project: Project, loader: ChallengeLoader) -> None:
54 | chall = loader.load(project.root / "render-description")
55 | chall.context["foo"] = "bar"
56 | assert (
57 | chall.render_description()
58 | == dedent(
59 | """
60 | # A fancy challenge (render-description)
61 | **Written by Robert**
62 | bar
63 | """
64 | ).strip()
65 | )
66 |
67 |
68 | def test_static_assets(project: Project, loader: ChallengeLoader) -> None:
69 | chall = loader.load(project.root / "static-assets")
70 | chall.create_transaction().commit()
71 | ctx = project.asset_manager.create_context("static-assets")
72 | assert set(ctx.ls()) == {"file1.txt", "file3.txt"}
73 | assert (
74 | ctx.get("file1.txt").read_text()
75 | == (project.root / "static-assets" / "file1.txt").read_text()
76 | )
77 | assert (
78 | ctx.get("file3.txt").read_text()
79 | == (project.root / "static-assets" / "file2.txt").read_text()
80 | )
81 |
82 |
83 | class TestContextShortcuts:
84 | @staticmethod
85 | def test_tcp(project: Project, loader: ChallengeLoader) -> None:
86 | chall = loader.load(project.root / "shortcuts-tcp")
87 | expose_cfg = chall.config["expose"]["nginx"][0]
88 | expose_cfg["host"] = "tcp.example.com"
89 | expose_cfg_copy = deepcopy(expose_cfg)
90 | shortcuts = chall.get_context_shortcuts()
91 | assert shortcuts["host"] == expose_cfg_copy["host"]
92 | assert shortcuts["port"] == expose_cfg_copy["tcp"]
93 | assert (
94 | shortcuts["url"]
95 | == f"http://{expose_cfg_copy['host']}:{expose_cfg_copy['tcp']}"
96 | )
97 | assert shortcuts["link"] == (
98 | f"[{expose_cfg_copy['host']}:{expose_cfg_copy['tcp']}]"
99 | f"(http://{expose_cfg_copy['host']}:{expose_cfg_copy['tcp']})"
100 | )
101 | assert (
102 | shortcuts["nc"] == f"nc {expose_cfg_copy['host']} {expose_cfg_copy['tcp']}"
103 | )
104 |
105 | @staticmethod
106 | def test_http(project: Project, loader: ChallengeLoader) -> None:
107 | chall = loader.load(project.root / "shortcuts-http")
108 | expose_cfg_copy = deepcopy(chall.config["expose"]["nginx"][0])
109 | shortcuts = chall.get_context_shortcuts()
110 | assert shortcuts["host"] == expose_cfg_copy["http"]
111 | assert shortcuts["url"] == f"https://{expose_cfg_copy['http']}"
112 | assert shortcuts["link"] == (
113 | f"[{expose_cfg_copy['http']}](https://{expose_cfg_copy['http']})"
114 | )
115 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/bad#dir/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Challenge
2 | description: Description
3 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/id_override/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Challenge
2 | description: Description
3 | id: overridden
4 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/json/challenge.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Challenge",
3 | "description": "Description"
4 | }
5 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/nonexistent/.dir:
--------------------------------------------------------------------------------
1 | This file is here to force Git to record the directory it is in
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/rcds.yml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redpwn/rcds/0b3a2e1c6aa272725b2cd032ddddd139393cc5d3/tests/challenge/test_challenge/rcds.yml
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/render-description/challenge.yml:
--------------------------------------------------------------------------------
1 | name: A fancy challenge
2 | description: |-
3 | # {{ challenge.name }} ({{ challenge.id }})
4 | **Written by {{ challenge.author }}**
5 | {{ foo }}
6 | author: Robert
7 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/shortcuts-http/challenge.yaml:
--------------------------------------------------------------------------------
1 | name: name
2 | description: description
3 |
4 | containers:
5 | nginx:
6 | image: nginx
7 | ports: [80]
8 |
9 | expose:
10 | nginx:
11 | - target: 80
12 | http: nginx
13 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/shortcuts-tcp/challenge.yaml:
--------------------------------------------------------------------------------
1 | name: name
2 | description: description
3 |
4 | containers:
5 | nginx:
6 | image: nginx
7 | ports: [80]
8 |
9 | expose:
10 | nginx:
11 | - target: 80
12 | tcp: 31582
13 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/static-assets/challenge.yml:
--------------------------------------------------------------------------------
1 | name: name
2 | description: description
3 |
4 | provide:
5 | - ./file1.txt
6 | - file: ./file2.txt
7 | as: file3.txt
8 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/static-assets/file1.txt:
--------------------------------------------------------------------------------
1 | File 1 contents
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/static-assets/file2.txt:
--------------------------------------------------------------------------------
1 | File 2 contents
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_challenge/yaml/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Challenge
2 | description: Description
3 |
--------------------------------------------------------------------------------
/tests/challenge/test_config.py:
--------------------------------------------------------------------------------
1 | import pytest # type: ignore
2 |
3 | import rcds
4 | import rcds.errors
5 | from rcds import Project
6 | from rcds.challenge import config
7 |
8 |
9 | @pytest.fixture
10 | def test_datadir(request, datadir):
11 | fn_name = request.function.__name__
12 | assert fn_name[:5] == "test_"
13 | return datadir / fn_name[5:].replace("_", "-")
14 |
15 |
16 | @pytest.fixture(scope="function")
17 | def project(datadir):
18 | return Project(datadir)
19 |
20 |
21 | @pytest.fixture
22 | def configloader(project):
23 | return config.ConfigLoader(project)
24 |
25 |
26 | def test_valid(configloader, test_datadir) -> None:
27 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
28 | assert errors is None
29 | assert cfg["flag"] == "flag{test_flag_here}"
30 |
31 |
32 | def test_schema_fail(configloader, test_datadir) -> None:
33 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
34 | assert errors is not None
35 | assert cfg is None
36 | errors = list(errors)
37 | assert (
38 | sum([1 for e in errors if isinstance(e, rcds.errors.SchemaValidationError)]) > 0
39 | )
40 |
41 |
42 | def test_expose_no_containers(configloader, test_datadir) -> None:
43 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
44 | assert errors is not None
45 | assert cfg is None
46 | errors = list(errors)
47 | error_messages = [str(e) for e in errors]
48 | assert len(errors) != 0
49 | assert "Cannot expose ports without containers defined" in error_messages
50 | assert sum([1 for e in errors if isinstance(e, config.TargetNotFoundError)]) == 1
51 |
52 |
53 | def test_nonexistent_target_container(configloader, test_datadir) -> None:
54 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
55 | assert errors is not None
56 | assert cfg is None
57 | errors = list(errors)
58 | error_messages = [str(e) for e in errors]
59 | assert len(errors) != 0
60 | assert (
61 | '`expose` references container "main" but it is not defined in `containers`'
62 | in error_messages
63 | )
64 | assert sum([1 for e in errors if isinstance(e, config.TargetNotFoundError)]) == 1
65 |
66 |
67 | def test_nonexistent_target_port(configloader, test_datadir) -> None:
68 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
69 | assert errors is not None
70 | assert cfg is None
71 | errors = list(errors)
72 | error_messages = [str(e) for e in errors]
73 | assert len(errors) != 0
74 | assert (
75 | '`expose` references port 1 on container "main" which is not defined'
76 | in error_messages
77 | )
78 | assert sum([1 for e in errors if isinstance(e, config.TargetNotFoundError)]) == 1
79 |
80 |
81 | def test_nonexistent_provide_file(configloader, test_datadir) -> None:
82 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
83 | assert errors is not None
84 | assert cfg is None
85 | errors = list(errors)
86 | error_messages = [str(e) for e in errors]
87 | assert len(errors) != 0
88 | assert (
89 | '`provide` references file "nonexistent" which does not exist' in error_messages
90 | )
91 | assert (
92 | sum([1 for e in errors if isinstance(e, config.TargetFileNotFoundError)]) == 1
93 | )
94 |
95 |
96 | def test_nonexistent_flag_file(configloader, test_datadir) -> None:
97 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
98 | assert errors is not None
99 | assert cfg is None
100 | errors = list(errors)
101 | error_messages = [str(e) for e in errors]
102 | assert len(errors) != 0
103 | assert (
104 | '`flag.file` references file "nonexistent" which does not exist'
105 | in error_messages
106 | )
107 | assert (
108 | sum([1 for e in errors if isinstance(e, config.TargetFileNotFoundError)]) == 1
109 | )
110 |
111 |
112 | def test_warn_multiline_flag(configloader, test_datadir) -> None:
113 | with pytest.warns(
114 | RuntimeWarning, match=r"^Flag contains multiple lines; is this intended\?$"
115 | ):
116 | cfg, errors = configloader.check_config(test_datadir / "challenge.yml")
117 | assert errors is None
118 |
119 |
120 | def test_default_category(configloader, test_datadir) -> None:
121 | cfg = configloader.load_config(test_datadir / "chall" / "challenge.yml")
122 | assert cfg is not None
123 | assert cfg["category"] == "default-category"
124 |
125 |
126 | def test_no_default_category(configloader, test_datadir) -> None:
127 | cfg = configloader.load_config(test_datadir / "challenge.yml")
128 | assert cfg is not None
129 | assert "category" not in cfg
130 |
131 |
132 | def test_load_valid(configloader: config.ConfigLoader, datadir) -> None:
133 | cfg = configloader.load_config(datadir / "valid" / "challenge.yml")
134 | assert cfg is not None
135 |
136 |
137 | def test_load_invalid(configloader: config.ConfigLoader, datadir) -> None:
138 | with pytest.raises(rcds.errors.ValidationError):
139 | configloader.load_config(datadir / "nonexistent-flag-file" / "challenge.yml")
140 |
141 |
142 | class TestProjectDefaults:
143 | @staticmethod
144 | def test_omnibus(project: Project, datadir) -> None:
145 | project.config["defaults"] = {
146 | "containers": {
147 | "resources": {
148 | "limits": {"cpu": "10m", "memory": "10Mi"},
149 | "requests": {"cpu": "10m", "memory": "10Mi"},
150 | }
151 | },
152 | "expose": {"foo": "bar"},
153 | "value": 100,
154 | }
155 | configloader = config.ConfigLoader(project)
156 | cfg1 = configloader.load_config(datadir / "defaults" / "1" / "challenge.yml")
157 | assert cfg1["value"] == 100
158 | assert cfg1["containers"]["main"] == {
159 | "image": "gcr.io/google-samples/hello-app",
160 | "resources": {
161 | "limits": {"cpu": "10m", "memory": "10Mi"},
162 | "requests": {"cpu": "10m", "memory": "10Mi"},
163 | },
164 | "ports": [80],
165 | "replicas": 1,
166 | }
167 | assert cfg1["containers"]["partial"] == {
168 | "image": "gcr.io/google-samples/hello-app",
169 | "resources": {
170 | "limits": {"cpu": "20m", "memory": "10Mi"},
171 | "requests": {"cpu": "10m", "memory": "10Mi"},
172 | },
173 | "ports": [80],
174 | "replicas": 1,
175 | }
176 | assert cfg1["expose"]["main"][0] == {"target": 80, "tcp": 31525, "foo": "bar"}
177 | assert cfg1["expose"]["partial"][0] == {
178 | "target": 80,
179 | "tcp": 31546,
180 | "foo": "baz",
181 | }
182 | cfg2 = configloader.load_config(datadir / "defaults" / "2" / "challenge.yml")
183 | assert cfg2["value"] == 100
184 |
185 |
186 | class TestFlagFormat:
187 | @staticmethod
188 | def test_valid_flag(project: Project, datadir) -> None:
189 | project.config["flagFormat"] = r"flag\{[a-z]*\}"
190 | configloader = config.ConfigLoader(project)
191 | cfg, errors = configloader.check_config(
192 | datadir / "flag-format" / "valid" / "challenge.yml"
193 | )
194 | assert cfg is not None
195 | assert errors is None
196 |
197 | @staticmethod
198 | def test_invalid_flag(project: Project, datadir) -> None:
199 | project.config["flagFormat"] = r"flag\{[a-z]*\}"
200 | configloader = config.ConfigLoader(project)
201 | cfg, errors = configloader.check_config(
202 | datadir / "flag-format" / "invalid" / "challenge.yml"
203 | )
204 | assert errors is not None
205 | assert cfg is None
206 | errors = list(errors)
207 | error_messages = [str(e) for e in errors]
208 | assert len(errors) != 0
209 | assert 'Flag "flag{1234}" does not match the flag format' in error_messages
210 | assert sum([1 for e in errors if isinstance(e, config.InvalidFlagError)]) == 1
211 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/default-category/chall/challenge.yml:
--------------------------------------------------------------------------------
1 | name: chall
2 | description: description
3 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/defaults/1/challenge.yml:
--------------------------------------------------------------------------------
1 | name: challenge
2 | description: desc
3 |
4 | containers:
5 | main:
6 | image: gcr.io/google-samples/hello-app
7 | ports: [80]
8 | partial:
9 | image: gcr.io/google-samples/hello-app
10 | resources:
11 | limits:
12 | cpu: 20m
13 | ports: [80]
14 |
15 | expose:
16 | main:
17 | - target: 80
18 | tcp: 31525
19 | partial:
20 | - target: 80
21 | tcp: 31546
22 | foo: baz
23 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/defaults/2/challenge.yml:
--------------------------------------------------------------------------------
1 | name: name
2 | description: description
3 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/expose-no-containers/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
5 | flag: flag{test_flag_here}
6 |
7 | description: |
8 | Here's the flag!
9 |
10 | `flag{test_flag_here}`
11 |
12 | expose:
13 | main:
14 | - target: 9999
15 | tcp: 31554
16 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/flag-format/invalid/challenge.yml:
--------------------------------------------------------------------------------
1 | name: name
2 | description: description
3 | flag: flag{1234}
4 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/flag-format/valid/challenge.yml:
--------------------------------------------------------------------------------
1 | name: name
2 | description: description
3 | flag: flag{abcd}
4 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/no-default-category/challenge.yml:
--------------------------------------------------------------------------------
1 | name: name
2 | description: description
3 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/nonexistent-flag-file/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
5 | flag:
6 | file: ./nonexistent
7 |
8 | description: |
9 | Here's the flag!
10 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/nonexistent-provide-file/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
5 | flag: flag{test_flag_here}
6 |
7 | description: |
8 | Here's the flag!
9 |
10 | `flag{test_flag_here}`
11 |
12 | provide:
13 | - ./nonexistent
14 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/nonexistent-target-container/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
5 | flag: flag{test_flag_here}
6 |
7 | description: |
8 | Here's the flag!
9 |
10 | `flag{test_flag_here}`
11 |
12 | containers:
13 | container:
14 | build: .
15 | ports:
16 | - 9999
17 |
18 | expose:
19 | main:
20 | - target: 9999
21 | tcp: 31554
22 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/nonexistent-target-port/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
5 | flag: flag{test_flag_here}
6 |
7 | description: |
8 | Here's the flag!
9 |
10 | `flag{test_flag_here}`
11 |
12 | containers:
13 | main:
14 | build: .
15 | ports:
16 | - 9999
17 |
18 | expose:
19 | main:
20 | - target: 1
21 | tcp: 31554
22 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/rcds.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redpwn/rcds/0b3a2e1c6aa272725b2cd032ddddd139393cc5d3/tests/challenge/test_config/rcds.yaml
--------------------------------------------------------------------------------
/tests/challenge/test_config/schema-fail/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/valid/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
5 | flag:
6 | file: ./flag.txt
7 |
8 | description: |
9 | Here's the flag!
10 |
11 | provide:
12 | - ./flag.txt
13 | - file: ./flag.txt
14 | as: other-file.txt
15 |
16 | containers:
17 | main:
18 | build: .
19 | ports:
20 | - 9999
21 | replicas: 2
22 |
23 | expose:
24 | main:
25 | - target: 9999
26 | tcp: 31554
27 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/valid/flag.txt:
--------------------------------------------------------------------------------
1 | flag{test_flag_here}
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/warn-multiline-flag/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Test challenge
2 | author: author
3 | value: 500
4 |
5 | flag:
6 | file: ./flag.txt
7 |
8 | description: |
9 | Here's the flag!
10 |
--------------------------------------------------------------------------------
/tests/challenge/test_config/warn-multiline-flag/flag.txt:
--------------------------------------------------------------------------------
1 | two
2 | lines
3 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import cast
3 |
4 | import pytest # type: ignore
5 |
6 | from rcds import ChallengeLoader, Project
7 | from rcds.challenge import docker
8 |
9 |
10 | class TestGetContextFiles:
11 | def test_basic(self, datadir) -> None:
12 | df_root = datadir / "contexts" / "basic"
13 | assert df_root.is_dir()
14 | got = {str(p.relative_to(df_root)) for p in docker.get_context_files(df_root)}
15 | assert got == {
16 | "Dockerfile",
17 | "file",
18 | "a/file",
19 | "a/b/file",
20 | ".file",
21 | "a/.file",
22 | "a/b/.file",
23 | }
24 |
25 | def test_with_dockerignore(self, datadir: Path) -> None:
26 | df_root = datadir / "contexts" / "dockerignore"
27 | assert df_root.is_dir()
28 | got = {str(p.relative_to(df_root)) for p in docker.get_context_files(df_root)}
29 | assert got == {"Dockerfile", ".dockerignore", ".file", "file"}
30 |
31 | def test_complex_dockerignore(self, datadir: Path) -> None:
32 | df_root = datadir / "contexts" / "complex_dockerignore"
33 | assert df_root.is_dir()
34 | got = {str(p.relative_to(df_root)) for p in docker.get_context_files(df_root)}
35 | assert got == {"a", "b", "c/file", "d/file"}
36 |
37 |
38 | class TestGenerateSum:
39 | def test_basic(self, datadir) -> None:
40 | df_root = datadir / "contexts" / "basic"
41 | assert df_root.is_dir()
42 | # TODO: better way of testing than blackbox hash compare
43 | assert (
44 | docker.generate_sum(df_root)
45 | == "683c5631d14165f0326ef55dfaf5463cc0aa550743398a4d8e31d37c4f5d6981"
46 | )
47 |
48 |
49 | class TestContainerManager:
50 | @pytest.fixture()
51 | def project(self, datadir: Path) -> Project:
52 | return Project(datadir / "project")
53 |
54 | def test_omnibus(self, project: Project) -> None:
55 | challenge_loader = ChallengeLoader(project)
56 | chall = challenge_loader.load(project.root / "chall")
57 | container_mgr = docker.ContainerManager(chall)
58 |
59 | simple_container = container_mgr.containers["simple"]
60 | assert simple_container.name == "simple"
61 | assert simple_container.IS_BUILDABLE
62 | assert type(simple_container) == docker.BuildableContainer
63 | simple_container = cast(docker.BuildableContainer, simple_container)
64 | assert simple_container.get_full_tag().startswith("registry.com/ns/")
65 | assert "simple" in simple_container.get_full_tag()
66 | assert chall.config["containers"]["simple"]["image"].startswith(
67 | "registry.com/ns/"
68 | )
69 | assert "simple" in chall.config["containers"]["simple"]["image"]
70 | assert simple_container.dockerfile == "Dockerfile"
71 | assert simple_container.buildargs == dict()
72 |
73 | complex_container = container_mgr.containers["complex"]
74 | assert complex_container.name == "complex"
75 | assert complex_container.IS_BUILDABLE
76 | assert type(complex_container) == docker.BuildableContainer
77 | complex_container = cast(docker.BuildableContainer, complex_container)
78 | assert complex_container.get_full_tag().startswith("registry.com/ns/")
79 | assert "complex" in complex_container.get_full_tag()
80 | assert chall.config["containers"]["complex"]["image"].startswith(
81 | "registry.com/ns/"
82 | )
83 | assert "complex" in chall.config["containers"]["complex"]["image"]
84 | assert complex_container.dockerfile == "Dockerfile.alternate"
85 | assert complex_container.buildargs["foo"] == "bar"
86 |
87 | pg_container = container_mgr.containers["postgres"]
88 | assert pg_container.name == "postgres"
89 | assert not pg_container.IS_BUILDABLE
90 | assert type(pg_container) == docker.Container
91 | assert pg_container.get_full_tag() == "postgres"
92 |
93 | def test_multiple_chall_independence(self, project) -> None:
94 | challenge_loader = ChallengeLoader(project)
95 | chall1 = challenge_loader.load(project.root / "chall")
96 | chall2 = challenge_loader.load(project.root / "chall2")
97 | chall1_mgr = docker.ContainerManager(chall1)
98 | chall2_mgr = docker.ContainerManager(chall2)
99 |
100 | assert "chall2ctr" not in chall1_mgr.containers
101 | assert "postgres" not in chall2_mgr.containers
102 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/basic/.file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/basic/Dockerfile:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redpwn/rcds/0b3a2e1c6aa272725b2cd032ddddd139393cc5d3/tests/challenge/test_docker/contexts/basic/Dockerfile
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/basic/a/.file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/basic/a/b/.file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/basic/a/b/file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/basic/a/file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/basic/file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/complex_dockerignore/.dockerignore:
--------------------------------------------------------------------------------
1 | *
2 | !/a/
3 | !/b
4 | !/c/
5 | !/d
6 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/complex_dockerignore/Dockerfile:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redpwn/rcds/0b3a2e1c6aa272725b2cd032ddddd139393cc5d3/tests/challenge/test_docker/contexts/complex_dockerignore/Dockerfile
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/complex_dockerignore/a:
--------------------------------------------------------------------------------
1 | asdf
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/complex_dockerignore/b:
--------------------------------------------------------------------------------
1 | asdf
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/complex_dockerignore/c/file:
--------------------------------------------------------------------------------
1 | asdf
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/complex_dockerignore/d/file:
--------------------------------------------------------------------------------
1 | asdf
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/.dockerignore:
--------------------------------------------------------------------------------
1 | a/
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/.file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/Dockerfile:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redpwn/rcds/0b3a2e1c6aa272725b2cd032ddddd139393cc5d3/tests/challenge/test_docker/contexts/dockerignore/Dockerfile
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/a/.file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/a/b/.file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/a/b/file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/a/file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/contexts/dockerignore/file:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/project/chall/challenge.yml:
--------------------------------------------------------------------------------
1 | name: Challenge
2 | description: Description
3 |
4 | containers:
5 | simple:
6 | build: .
7 | ports:
8 | - 9999
9 | complex:
10 | build:
11 | context: .
12 | dockerfile: Dockerfile.alternate
13 | args:
14 | foo: "bar"
15 | ports:
16 | - 9999
17 | postgres:
18 | image: postgres
19 | ports:
20 | - 5432
21 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/project/chall2/challenge.yml:
--------------------------------------------------------------------------------
1 | name: chall2
2 | description: desc
3 |
4 | containers:
5 | chall2ctr:
6 | build: .
7 | ports: [9999]
8 |
--------------------------------------------------------------------------------
/tests/challenge/test_docker/project/rcds.yml:
--------------------------------------------------------------------------------
1 | docker:
2 | image:
3 | prefix: registry.com/ns
4 |
--------------------------------------------------------------------------------
/tests/project/test_assets.py:
--------------------------------------------------------------------------------
1 | import io
2 | import time
3 | from pathlib import Path
4 | from textwrap import dedent
5 | from unittest import mock
6 |
7 | import pytest # type: ignore
8 |
9 | import rcds
10 | from rcds.project import assets
11 |
12 |
13 | def _create_project(path: Path) -> None:
14 | (path / "rcds.yml").write_text(
15 | dedent(
16 | """\
17 | """
18 | )
19 | )
20 |
21 |
22 | @pytest.fixture(scope="function")
23 | def am_fn(tmp_path: Path) -> assets.AssetManager:
24 | """
25 | Function-scoped AssetManager
26 | """
27 | root = tmp_path
28 | _create_project(root)
29 | project = rcds.Project(root)
30 | return assets.AssetManager(project)
31 |
32 |
33 | def test_name_validation() -> None:
34 | assert assets._is_valid_name("valid")
35 | assert not assets._is_valid_name("../directory_traversal")
36 | assert not assets._is_valid_name(r"..\win_dir_traversal")
37 | assert not assets._is_valid_name("/absolute")
38 | assert not assets._is_valid_name(r"C:\win_absolute")
39 |
40 |
41 | def test_list_contexts(am_fn: assets.AssetManager) -> None:
42 | asset_manager = am_fn
43 | asset_manager.create_context("c1")
44 | asset_manager.create_context("c2")
45 | assert set(asset_manager.list_context_names()) == {"c1", "c2"}
46 |
47 |
48 | def test_get_nonexistent(am_fn: assets.AssetManager) -> None:
49 | asset_manager = am_fn
50 | ctx = asset_manager.create_context("challenge")
51 | with pytest.raises(FileNotFoundError) as errinfo:
52 | ctx.get("nonexistent")
53 | assert str(errinfo.value) == "Asset not found: 'nonexistent'"
54 |
55 |
56 | def test_create_from_disk(datadir: Path, am_fn: assets.AssetManager) -> None:
57 | asset_manager = am_fn
58 | ctx = asset_manager.create_context("challenge")
59 | file1 = datadir / "file1"
60 | transaction = ctx.transaction()
61 | transaction.add_file("file1", file1)
62 | transaction.commit()
63 | assert set(ctx.ls()) == {"file1"}
64 | asset_f1 = ctx.get("file1")
65 | assert asset_f1.exists()
66 | assert asset_f1.is_symlink()
67 | assert asset_f1.resolve() == file1.resolve()
68 | ctx2 = asset_manager.create_context("challenge")
69 | assert set(ctx2.ls()) == {"file1"}
70 |
71 |
72 | def test_create_from_io(am_fn: assets.AssetManager) -> None:
73 | asset_manager = am_fn
74 | ctx = asset_manager.create_context("challenge")
75 | contents = b"abcd"
76 | transaction = ctx.transaction()
77 | transaction.add("file", time.time(), io.BytesIO(contents))
78 | transaction.commit()
79 | assert set(ctx.ls()) == {"file"}
80 | asset_file = ctx.get("file")
81 | with asset_file.open("rb") as fd:
82 | assert fd.read() == contents
83 | ctx2 = asset_manager.create_context("challenge")
84 | assert set(ctx2.ls()) == {"file"}
85 |
86 |
87 | def test_create_from_bytes(am_fn: assets.AssetManager) -> None:
88 | asset_manager = am_fn
89 | ctx = asset_manager.create_context("challenge")
90 | contents = b"abcd"
91 | transaction = ctx.transaction()
92 | transaction.add("file", time.time(), contents)
93 | transaction.commit()
94 | assert set(ctx.ls()) == {"file"}
95 | asset_file = ctx.get("file")
96 | with asset_file.open("rb") as fd:
97 | assert fd.read() == contents
98 | ctx2 = asset_manager.create_context("challenge")
99 | assert set(ctx2.ls()) == {"file"}
100 |
101 |
102 | def test_create_from_thunk(am_fn: assets.AssetManager) -> None:
103 | asset_manager = am_fn
104 | ctx = asset_manager.create_context("challenge")
105 | contents = b"abcd"
106 | transaction = ctx.transaction()
107 | transaction.add("file", time.time(), lambda: contents)
108 | transaction.commit()
109 | assert set(ctx.ls()) == {"file"}
110 | asset_file = ctx.get("file")
111 | with asset_file.open("rb") as fd:
112 | assert fd.read() == contents
113 | ctx2 = asset_manager.create_context("challenge")
114 | assert set(ctx2.ls()) == {"file"}
115 |
116 |
117 | def test_create_from_multiple_literals(am_fn: assets.AssetManager) -> None:
118 | asset_manager = am_fn
119 | ctx = asset_manager.create_context("challenge")
120 | contents1 = b"abcd"
121 | contents2 = b"wxyz"
122 | transaction = ctx.transaction()
123 | transaction.add("file1", time.time(), contents1)
124 | transaction.add("file2", time.time(), contents2)
125 | transaction.commit()
126 | assert set(ctx.ls()) == {"file1", "file2"}
127 | with ctx.get("file1").open("rb") as fd:
128 | assert fd.read() == contents1
129 | with ctx.get("file2").open("rb") as fd:
130 | assert fd.read() == contents2
131 | ctx2 = asset_manager.create_context("challenge")
132 | assert set(ctx2.ls()) == {"file1", "file2"}
133 |
134 |
135 | def test_transaction_clear(datadir: Path, am_fn: assets.AssetManager) -> None:
136 | asset_manager = am_fn
137 | ctx = asset_manager.create_context("challenge")
138 | transaction = ctx.transaction()
139 | transaction.add_file("file1", datadir / "file1")
140 | transaction.commit()
141 | transaction = ctx.transaction()
142 | transaction.add_file("file2", datadir / "file2")
143 | transaction.commit()
144 | assert set(ctx.ls()) == {"file2"}
145 | ctx2 = asset_manager.create_context("challenge")
146 | assert set(ctx2.ls()) == {"file2"}
147 |
148 |
149 | def test_updates_when_newer(am_fn: assets.AssetManager) -> None:
150 | asset_manager = am_fn
151 | ctx = asset_manager.create_context("challenge")
152 |
153 | contents = b"abcd"
154 |
155 | transaction = ctx.transaction()
156 | transaction.add("file", 1, contents)
157 | transaction.commit()
158 |
159 | get_contents = mock.Mock(return_value=contents)
160 | transaction = ctx.transaction()
161 | transaction.add("file", 2, get_contents)
162 | transaction.commit()
163 |
164 | get_contents.assert_called()
165 |
166 |
167 | def test_does_not_update_when_older(am_fn: assets.AssetManager) -> None:
168 | asset_manager = am_fn
169 | ctx = asset_manager.create_context("challenge")
170 |
171 | contents = b"abcd"
172 |
173 | transaction = ctx.transaction()
174 | transaction.add("file", 2, contents)
175 | transaction.commit()
176 |
177 | get_contents = mock.Mock(return_value=contents)
178 | transaction = ctx.transaction()
179 | transaction.add("file", 1, get_contents)
180 | transaction.commit()
181 |
182 | get_contents.assert_not_called()
183 |
184 |
185 | def test_context_clear(datadir: Path, am_fn: assets.AssetManager) -> None:
186 | asset_manager = am_fn
187 | ctx = asset_manager.create_context("challenge")
188 | transaction = ctx.transaction()
189 | transaction.add_file("file1", datadir / "file1")
190 | transaction.add_file("file2", datadir / "file2")
191 | transaction.add("file3", time.time(), b"abcd")
192 | transaction.commit()
193 | assert len(list(ctx.ls())) != 0
194 | ctx.clear()
195 | assert len(list(ctx.ls())) == 0
196 | ctx2 = asset_manager.create_context("challenge")
197 | assert len(list(ctx2.ls())) == 0
198 |
199 |
200 | def test_disallow_concurrent_transaction(am_fn: assets.AssetManager) -> None:
201 | asset_manager = am_fn
202 | ctx = asset_manager.create_context("challenge")
203 | ctx.transaction()
204 | with pytest.raises(RuntimeError) as errinfo:
205 | ctx.transaction()
206 | assert (
207 | str(errinfo.value) == "Attempted to create transaction while one is already "
208 | "in progress"
209 | )
210 |
211 |
212 | def test_disallow_add_after_commit(am_fn: assets.AssetManager) -> None:
213 | asset_manager = am_fn
214 | ctx = asset_manager.create_context("challenge")
215 | transaction = ctx.transaction()
216 | transaction.commit()
217 | with pytest.raises(RuntimeError) as errinfo:
218 | transaction.add("file", time.time(), b"abcd")
219 | assert str(errinfo.value) == "This transaction has already been committed"
220 |
221 |
222 | def test_disallow_invalid_file_name(am_fn: assets.AssetManager) -> None:
223 | asset_manager = am_fn
224 | ctx = asset_manager.create_context("challenge")
225 | transaction = ctx.transaction()
226 | with pytest.raises(ValueError) as errinfo:
227 | transaction.add("bad/../name", time.time(), b"abcd")
228 | assert str(errinfo.value) == "Invalid asset name 'bad/../name'"
229 | transaction.commit()
230 | with pytest.raises(ValueError) as errinfo:
231 | ctx.get("bad/../name")
232 | assert str(errinfo.value) == "Invalid asset name 'bad/../name'"
233 |
234 |
235 | def test_disallow_invalid_challenge_name(am_fn: assets.AssetManager) -> None:
236 | asset_manager = am_fn
237 | with pytest.raises(ValueError) as errinfo:
238 | asset_manager.create_context("bad/../name")
239 | assert str(errinfo.value) == "Invalid context name 'bad/../name'"
240 |
241 |
242 | def test_disallow_nonexistent_files_add_file(
243 | datadir: Path, am_fn: assets.AssetManager
244 | ) -> None:
245 | asset_manager = am_fn
246 | ctx = asset_manager.create_context("challenge")
247 | transaction = ctx.transaction()
248 | with pytest.raises(ValueError) as errinfo:
249 | transaction.add_file("file", datadir / "nonexistent")
250 | assert "Provided file does not exist: " in str(errinfo.value)
251 |
252 |
253 | def test_disallow_directories_files_add_file(
254 | datadir: Path, am_fn: assets.AssetManager
255 | ) -> None:
256 | asset_manager = am_fn
257 | ctx = asset_manager.create_context("challenge")
258 | transaction = ctx.transaction()
259 | with pytest.raises(ValueError) as errinfo:
260 | transaction.add_file("file", datadir / "dir")
261 | assert "Provided file does not exist: " in str(errinfo.value)
262 |
263 |
264 | def test_disallow_nonexistent_files_add(
265 | datadir: Path, am_fn: assets.AssetManager
266 | ) -> None:
267 | asset_manager = am_fn
268 | ctx = asset_manager.create_context("challenge")
269 | transaction = ctx.transaction()
270 | transaction.add("file", time.time(), datadir / "nonexistent")
271 | with pytest.raises(ValueError) as errinfo:
272 | transaction.commit()
273 | assert "Provided file does not exist: " in str(errinfo.value)
274 |
275 |
276 | def test_disallow_directories_files_add(
277 | datadir: Path, am_fn: assets.AssetManager
278 | ) -> None:
279 | asset_manager = am_fn
280 | ctx = asset_manager.create_context("challenge")
281 | transaction = ctx.transaction()
282 | transaction.add("file", time.time(), datadir / "dir")
283 | with pytest.raises(ValueError) as errinfo:
284 | transaction.commit()
285 | assert "Provided file does not exist: " in str(errinfo.value)
286 |
287 |
288 | class TestCacheErrorRecovery:
289 | def test_deleted_asset(self, datadir: Path, am_fn: assets.AssetManager) -> None:
290 | asset_manager = am_fn
291 | ctx = asset_manager.create_context("challenge")
292 | transaction = ctx.transaction()
293 | transaction.add_file("file1", datadir / "file1")
294 | transaction.commit()
295 | (ctx._root / "files" / "file1").unlink()
296 | with pytest.raises(RuntimeError) as errinfo:
297 | ctx.sync(check=True)
298 | assert "Cache item missing: " in str(errinfo.value)
299 |
300 | def test_extra_file(self, datadir: Path, am_fn: assets.AssetManager) -> None:
301 | asset_manager = am_fn
302 | ctx = asset_manager.create_context("challenge")
303 | file1 = ctx._root / "files" / "file1"
304 | with file1.open("w") as fd:
305 | fd.write("abcd")
306 | with pytest.warns(RuntimeWarning, match=r"^Unexpected item found in cache: "):
307 | ctx.sync(check=True)
308 | assert not file1.exists()
309 |
310 | def test_extra_dir(self, datadir: Path, am_fn: assets.AssetManager) -> None:
311 | asset_manager = am_fn
312 | ctx = asset_manager.create_context("challenge")
313 | dir1 = ctx._root / "files" / "dir1"
314 | dir1.mkdir()
315 | with pytest.warns(RuntimeWarning, match=r"^Unexpected item found in cache: "):
316 | ctx.sync(check=True)
317 | assert not dir1.exists()
318 |
319 | def test_broken_link(self, datadir: Path, am_fn: assets.AssetManager) -> None:
320 | asset_manager = am_fn
321 | ctx = asset_manager.create_context("challenge")
322 | transaction = ctx.transaction()
323 | transaction.add_file("file1", datadir / "file1")
324 | transaction.commit()
325 | (datadir / "file1").unlink()
326 | ctx = asset_manager.create_context("challenge")
327 | transaction = ctx.transaction()
328 | transaction.add_file("file1", datadir / "file2")
329 | transaction.commit()
330 |
331 |
332 | class TestInternals:
333 | def test_add_remove(self, am_fn: assets.AssetManager) -> None:
334 | asset_manager = am_fn
335 | ctx = asset_manager.create_context("challenge")
336 | ctx._add("file")
337 | assert "file" in ctx.ls()
338 | ctx._rm("file")
339 | assert "file" not in ctx.ls()
340 |
341 | def test_add_existing_raises(self, am_fn: assets.AssetManager) -> None:
342 | asset_manager = am_fn
343 | ctx = asset_manager.create_context("challenge")
344 | ctx._add("file", force=True)
345 | with pytest.raises(FileExistsError) as errinfo:
346 | ctx._add("file", force=False)
347 | assert str(errinfo.value) == "Asset already exists: 'file'"
348 |
349 | def test_force_add_existing(self, am_fn: assets.AssetManager) -> None:
350 | asset_manager = am_fn
351 | ctx = asset_manager.create_context("challenge")
352 | ctx._add("file", force=True)
353 | ctx._add("file", force=True)
354 | assert "file" in ctx.ls()
355 |
356 | def test_remove_nonexistent_raises(self, am_fn: assets.AssetManager) -> None:
357 | asset_manager = am_fn
358 | ctx = asset_manager.create_context("challenge")
359 | with pytest.raises(FileNotFoundError) as errinfo:
360 | ctx._rm("file", force=False)
361 | assert str(errinfo.value) == "Asset not found: 'file'"
362 |
363 | def test_force_remove_nonexistent(self, am_fn: assets.AssetManager) -> None:
364 | asset_manager = am_fn
365 | ctx = asset_manager.create_context("challenge")
366 | ctx._rm("file", force=True)
367 | assert "file" not in ctx.ls()
368 |
--------------------------------------------------------------------------------
/tests/project/test_assets/dir/.dir:
--------------------------------------------------------------------------------
1 | This file is here to force Git to record the directory it is in
2 |
--------------------------------------------------------------------------------
/tests/project/test_assets/file1:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/project/test_assets/file2:
--------------------------------------------------------------------------------
1 | abcd
2 |
--------------------------------------------------------------------------------
/tests/util/test_deep_merge.py:
--------------------------------------------------------------------------------
1 | from rcds.util.deep_merge import deep_merge
2 |
3 |
4 | def test_basic_merge():
5 | a = {1: {1: 1, 2: 2}}
6 | b = {1: {1: 2, 3: 3}}
7 | expected = {1: {1: 2, 2: 2, 3: 3}}
8 | assert deep_merge(a, b) == expected
9 | assert a == expected
10 | assert b == {1: {1: 2, 3: 3}}
11 |
12 |
13 | def test_override_dict_with_other_type():
14 | a = {1: {1: 1}}
15 | b = {1: 2}
16 | expected = {1: 2}
17 | assert deep_merge(a, b) == expected
18 | assert a == expected
19 | assert b == {1: 2}
20 |
21 |
22 | def test_override_other_type_with_dict():
23 | a = {1: 2}
24 | b = {1: {1: 1}}
25 | expected = {1: {1: 1}}
26 | assert deep_merge(a, b) == expected
27 | assert a == expected
28 | assert b == {1: {1: 1}}
29 |
30 |
31 | def test_merge_multiple():
32 | a = {1: {1: 1}}
33 | b = {1: {1: 2, 2: 2}}
34 | c = {1: {1: 3, 3: 3}}
35 | expected = {1: {1: 3, 2: 2, 3: 3}}
36 | assert deep_merge(a, b, c) == expected
37 | assert a == expected
38 | assert b == {1: {1: 2, 2: 2}}
39 | assert c == {1: {1: 3, 3: 3}}
40 |
41 |
42 | def test_nonmutating_merge():
43 | a = {1: {1: 1}}
44 | b = {1: {1: 2, 2: {3: 3}}}
45 | c = {1: {1: 3, 2: {3: 4}}}
46 | expected = {1: {1: 3, 2: {3: 4}}}
47 | assert deep_merge(dict(), a, b, c) == expected
48 | assert a == {1: {1: 1}}
49 | assert b == {1: {1: 2, 2: {3: 3}}}
50 | assert c == {1: {1: 3, 2: {3: 4}}}
51 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | isolated_build = true
3 | envlist = py36, py37, py38
4 |
5 | [testenv]
6 | whitelist_externals = poetry
7 | commands =
8 | poetry install -v
9 | poetry run pytest tests/
10 |
--------------------------------------------------------------------------------