├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── assets ├── create.html ├── create.js ├── update.html ├── update.js ├── view.html └── view.js ├── config.json ├── container_manager.py ├── image-readme ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── demo.gif ├── http.png ├── main.png ├── manage.png └── tcp.png ├── models.py ├── settings.json └── templates ├── container_dashboard.html └── container_settings.html /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Phan Nhat 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 10 | 11 | 12 | 13 | 14 | 21 | 22 | 23 |
24 |
25 | 26 |

CTFd Docker Containers Plugin

27 |

28 | A plugin that can create containerize challenges for your CTF contest 29 |
30 |

31 |
32 | 33 | 34 | 35 | 36 |
37 | Table of Contents 38 |
    39 |
  1. 40 | Getting Started 41 | 45 |
  2. 46 |
  3. Usage
  4. 47 |
  5. Demo
  6. 48 |
  7. Roadmap
  8. 49 |
  9. License
  10. 50 |
  11. Contact
  12. 51 |
52 |
53 | 54 | 55 | 56 | ## Getting Started 57 | 58 | This is an example of how you may give instructions on setting up your project locally. 59 | To get a local copy up and running follow these simple example steps. 60 | 61 | ### Prerequisites 62 | 63 | To use this plugin you'll need 64 | 65 | - Know how to host CTFd w Docker 66 | - Know how to use Docker 67 | 68 | ### Installation 69 | 70 | 71 | 1. Map docker socket into CTFd container by modifying the `docker-compose.yml` file: 72 | ```docker 73 | services: 74 | ctfd: 75 | ... 76 | volumes: 77 | ... 78 | - /var/run/docker.sock:/var/run/docker.sock 79 | ... 80 | ``` 81 | 2. Clone this repository 82 | 83 | 3. Rename "CTFd-Docker-Plugin" to "containers" 84 | 85 | 4. Place `containers` folder inside `CTFd/plugins` directory 86 | 87 |

(back to top)

88 | 89 | 90 | 91 | ## Usage 92 | 93 | 1. Connect to Docker daemon: 94 | 95 | If the CTFd and the challenges host in the same machine, you just need to go to the plugin settings page `/containers/settings` and fill in everything you need except the `Base URL` field. 96 | 97 | ![](./image-readme/1.png) 98 | 99 | If you host the CTFd and the challenges in different machines, you need to follow the instructions one that page **I dont think its working XD, I'll try to fix that later** 100 | 101 | 2. Create the challenge: 102 | - Select `container` type and fill all the required fields 103 | 104 | ![](./image-readme/2.png) 105 | 106 | - If you want regular scoring for the challenge, set the maximum and minimum values to the same amount and the decay to zero. 107 | 108 | - In the image field, it allows you to select the docker image already on the machine 109 | 110 | ![](./image-readme/3.png) 111 | 112 | - In the `Connect type` field, it allows you to choose how to connect to the challenge such as via web or tcp 113 | 114 | ![](./image-readme/4.png) 115 | 116 |

(back to top)

117 | 118 | ## Demo 119 | 120 | Admin can manage created containers, containers can also be filtered by challenge or player 121 | 122 | ![](./image-readme/manage.png) 123 | 124 | **Challenge view** 125 | Web | TCP 126 | :-------------------------:|:-------------------------: 127 | ![](./image-readme/http.png) | ![](./image-readme/tcp.png) 128 | 129 | ![](./image-readme/demo.gif) 130 | 131 | 132 |

(back to top)

133 | 134 | 135 | ## Roadmap 136 | 137 | - [x] Make the plugin work in user mode 138 | - [x] Make the admin dashboard can filter by team/user or challenge 139 | - [x] Make the plugin work with core-beta theme 140 | 141 | 142 | See the [open issues](https://github.com/othneildrew/Best-README-Template/issues) for a full list of proposed features (and known issues). 143 | 144 |

(back to top)

145 | 146 | 147 | 148 | ## License 149 | 150 | Distributed under the MIT License. See `LICENSE.txt` for more information. 151 | 152 | Actually, this is just an upgrade of [andyjsmith's plugin](https://github.com/andyjsmith/CTFd-Docker-Plugin) that I upgraded by myself. I haven't worked much with the Licenses on github so it might be a violation. If you have anything please contact me by email below, I will respond within 2 days! 153 | 154 | Thanks again [andyjsmith](https://github.com/andyjsmith) for creating this base plugin! 155 | 156 |

(back to top)

157 | 158 | 159 | 160 | ## Contact 161 | 162 | Phan Nhat - @Discord ftpotato - contact@phannhat.id.vn 163 | 164 | Project Link: [https://github.com/phannhat17/CTFd-Docker-Plugin](https://github.com/phannhat17/CTFd-Docker-Plugin) 165 | 166 |

(back to top)

167 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import time 4 | import json 5 | import datetime 6 | import math 7 | 8 | from flask import Blueprint, request, Flask, render_template, url_for, redirect, flash 9 | 10 | from CTFd.models import db, Solves 11 | from CTFd.plugins import register_plugin_assets_directory 12 | from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge 13 | from CTFd.utils.decorators import authed_only, admins_only, during_ctf_time_only, ratelimit, require_verified_emails 14 | from CTFd.utils.user import get_current_user 15 | from CTFd.utils.modes import get_model 16 | from CTFd.utils import get_config 17 | 18 | from .models import ContainerChallengeModel, ContainerInfoModel, ContainerSettingsModel 19 | from .container_manager import ContainerManager, ContainerException 20 | 21 | def get_settings_path(): 22 | import os 23 | return os.path.join(os.path.dirname(os.path.abspath(__file__)), "settings.json") 24 | 25 | settings = json.load(open(get_settings_path())) 26 | 27 | USERS_MODE = settings["modes"]["USERS_MODE"] 28 | TEAMS_MODE = settings["modes"]["TEAMS_MODE"] 29 | 30 | 31 | class ContainerChallenge(BaseChallenge): 32 | id = settings["plugin-info"]["id"] # Unique identifier used to register challenges 33 | name = settings["plugin-info"]["name"] # Name of a challenge type 34 | templates = settings["plugin-info"]["templates"] # Handlebars templates used for each aspect of challenge editing & viewing 35 | scripts = settings["plugin-info"]["scripts"] # Scripts that are loaded when a template is loaded 36 | # Route at which files are accessible. This must be registered using register_plugin_assets_directory() 37 | route = "/plugins/containers/assets/" 38 | 39 | challenge_model = ContainerChallengeModel 40 | 41 | @classmethod 42 | def read(cls, challenge): 43 | """ 44 | This method is in used to access the data of a challenge in a format processable by the front end. 45 | 46 | :param challenge: 47 | :return: Challenge object, data dictionary to be returned to the user 48 | """ 49 | data = { 50 | "id": challenge.id, 51 | "name": challenge.name, 52 | "value": challenge.value, 53 | "image": challenge.image, 54 | "port": challenge.port, 55 | "command": challenge.command, 56 | "ctype": challenge.ctype, 57 | "ssh_username": challenge.ssh_username, 58 | "ssh_password": challenge.ssh_password, 59 | "initial": challenge.initial, 60 | "decay": challenge.decay, 61 | "minimum": challenge.minimum, 62 | "description": challenge.description, 63 | "connection_info": challenge.connection_info, 64 | "category": challenge.category, 65 | "state": challenge.state, 66 | "max_attempts": challenge.max_attempts, 67 | "type": challenge.type, 68 | "type_data": { 69 | "id": cls.id, 70 | "name": cls.name, 71 | "templates": cls.templates, 72 | "scripts": cls.scripts, 73 | }, 74 | } 75 | return data 76 | 77 | @classmethod 78 | def calculate_value(cls, challenge): 79 | Model = get_model() 80 | 81 | solve_count = ( 82 | Solves.query.join(Model, Solves.account_id == Model.id) 83 | .filter( 84 | Solves.challenge_id == challenge.id, 85 | Model.hidden == False, 86 | Model.banned == False, 87 | ) 88 | .count() 89 | ) 90 | 91 | # If the solve count is 0 we shouldn't manipulate the solve count to 92 | # let the math update back to normal 93 | if solve_count != 0: 94 | # We subtract -1 to allow the first solver to get max point value 95 | solve_count -= 1 96 | 97 | # It is important that this calculation takes into account floats. 98 | # Hence this file uses from __future__ import division 99 | value = ( 100 | ((challenge.minimum - challenge.initial) / (challenge.decay ** 2)) 101 | * (solve_count ** 2) 102 | ) + challenge.initial 103 | 104 | value = math.ceil(value) 105 | 106 | if value < challenge.minimum: 107 | value = challenge.minimum 108 | 109 | challenge.value = value 110 | db.session.commit() 111 | return challenge 112 | 113 | @classmethod 114 | def update(cls, challenge, request): 115 | """ 116 | This method is used to update the information associated with a challenge. This should be kept strictly to the 117 | Challenges table and any child tables. 118 | :param challenge: 119 | :param request: 120 | :return: 121 | """ 122 | data = request.form or request.get_json() 123 | 124 | for attr, value in data.items(): 125 | # We need to set these to floats so that the next operations don't operate on strings 126 | if attr in ("initial", "minimum", "decay"): 127 | value = float(value) 128 | setattr(challenge, attr, value) 129 | 130 | return ContainerChallenge.calculate_value(challenge) 131 | 132 | @classmethod 133 | def solve(cls, user, team, challenge, request): 134 | super().solve(user, team, challenge, request) 135 | 136 | ContainerChallenge.calculate_value(challenge) 137 | 138 | 139 | def settings_to_dict(settings): 140 | return { 141 | setting.key: setting.value for setting in settings 142 | } 143 | 144 | def is_team_mode(): 145 | mode = get_config("user_mode") 146 | if mode == TEAMS_MODE: 147 | return True 148 | elif mode == USERS_MODE: 149 | return False 150 | else: 151 | return None 152 | 153 | def load(app: Flask): 154 | app.db.create_all() 155 | CHALLENGE_CLASSES["container"] = ContainerChallenge 156 | register_plugin_assets_directory( 157 | app, base_path="/plugins/containers/assets/" 158 | ) 159 | 160 | container_settings = settings_to_dict(ContainerSettingsModel.query.all()) 161 | container_manager = ContainerManager(container_settings, app) 162 | 163 | containers_bp = Blueprint( 164 | 'containers', __name__, template_folder='templates', static_folder='assets', url_prefix='/containers') 165 | 166 | @containers_bp.app_template_filter("format_time") 167 | def format_time_filter(unix_seconds): 168 | dt = datetime.datetime.fromtimestamp(unix_seconds, tz=datetime.datetime.now( 169 | datetime.timezone.utc).astimezone().tzinfo) 170 | return dt.strftime("%H:%M:%S %d/%m/%Y") 171 | 172 | def kill_container(container_id): 173 | container: ContainerInfoModel = ContainerInfoModel.query.filter_by( 174 | container_id=container_id).first() 175 | 176 | try: 177 | container_manager.kill_container(container_id) 178 | except ContainerException: 179 | return {"error": "Docker is not initialized. Please check your settings."} 180 | 181 | db.session.delete(container) 182 | 183 | db.session.commit() 184 | return {"success": "Container killed"} 185 | 186 | def renew_container(chal_id, xid, is_team): 187 | # Get the requested challenge 188 | challenge = ContainerChallenge.challenge_model.query.filter_by( 189 | id=chal_id).first() 190 | 191 | # Make sure the challenge exists and is a container challenge 192 | if challenge is None: 193 | return {"error": "Challenge not found"}, 400 194 | 195 | if is_team is True: 196 | running_containers = ContainerInfoModel.query.filter_by( 197 | challenge_id=challenge.id, team_id=xid) 198 | else: 199 | running_containers = ContainerInfoModel.query.filter_by( 200 | challenge_id=challenge.id, user_id=xid) 201 | running_container = running_containers.first() 202 | 203 | if running_container is None: 204 | return {"error": "Container not found, try resetting the container."} 205 | 206 | try: 207 | running_container.expires = int( 208 | time.time() + container_manager.expiration_seconds) 209 | db.session.commit() 210 | except ContainerException: 211 | return {"error": "Database error occurred, please try again."} 212 | 213 | return {"success": "Container renewed", "expires": running_container.expires, "hostname": container_manager.settings.get("docker_hostname", ""), "port": running_container.port, "connect": challenge.ctype} 214 | 215 | def create_container(chal_id, xid, uid, is_team): 216 | # Get the requested challenge 217 | challenge = ContainerChallenge.challenge_model.query.filter_by( 218 | id=chal_id).first() 219 | 220 | # Make sure the challenge exists and is a container challenge 221 | if challenge is None: 222 | return {"error": "Challenge not found"}, 400 223 | 224 | # Check if user already has MAX_CONTAINERS_ALLOWED number running containers. 225 | MAX_CONTAINERS_ALLOWED = settings["vars"]["MAX_CONTAINERS_ALLOWED"] 226 | if not is_team: uid = xid 227 | t_containers = ContainerInfoModel.query.filter_by( 228 | user_id=uid) 229 | 230 | if t_containers.count() >= MAX_CONTAINERS_ALLOWED: 231 | return { "error": f"You can only spawn {MAX_CONTAINERS_ALLOWED} containers at a time. Please stop other containers to continue" }, 500 232 | 233 | # Check for any existing containers for the team 234 | if is_team is True: 235 | running_containers = ContainerInfoModel.query.filter_by( 236 | challenge_id=challenge.id, team_id=xid) 237 | else: 238 | running_containers = ContainerInfoModel.query.filter_by( 239 | challenge_id=challenge.id, user_id=xid) 240 | running_container = running_containers.first() 241 | 242 | # If a container is already running for the team, return it 243 | if running_container: 244 | # Check if Docker says the container is still running before returning it 245 | try: 246 | if container_manager.is_container_running( 247 | running_container.container_id): 248 | return json.dumps({ 249 | "status": "already_running", 250 | "hostname": container_manager.settings.get("docker_hostname", ""), 251 | "port": running_container.port, 252 | "ssh_username": running_container.ssh_username, 253 | "ssh_password": running_container.ssh_password, 254 | "connect": challenge.ctype, 255 | "expires": running_container.expires 256 | }) 257 | else: 258 | # Container is not running, it must have died or been killed, 259 | # remove it from the database and create a new one 260 | running_containers.delete() 261 | db.session.commit() 262 | except ContainerException as err: 263 | return {"error": str(err)}, 500 264 | 265 | # Run a new Docker container 266 | try: 267 | created_container = container_manager.create_container( 268 | chal_id, xid, uid, challenge.image, challenge.port, challenge.command, challenge.volumes) 269 | except ContainerException as err: 270 | return {"error": str(err)} 271 | 272 | # Fetch the random port Docker assigned 273 | port = container_manager.get_container_port(created_container.id) 274 | 275 | # Port may be blank if the container failed to start 276 | if port is None: 277 | return json.dumps({ 278 | "status": "error", 279 | "error": "Could not get port" 280 | }) 281 | 282 | expires = int(time.time() + container_manager.expiration_seconds) 283 | 284 | # Insert the new container into the database 285 | if is_team is True: 286 | new_container = ContainerInfoModel( 287 | container_id=created_container.id, 288 | challenge_id=challenge.id, 289 | team_id=xid, 290 | user_id=uid, 291 | port=port, 292 | timestamp=int(time.time()), 293 | expires=expires 294 | ) 295 | else: 296 | new_container = ContainerInfoModel( 297 | container_id=created_container.id, 298 | challenge_id=challenge.id, 299 | user_id=xid, 300 | port=port, 301 | timestamp=int(time.time()), 302 | expires=expires 303 | ) 304 | db.session.add(new_container) 305 | db.session.commit() 306 | 307 | return json.dumps({ 308 | "status": "created", 309 | "hostname": container_manager.settings.get("docker_hostname", ""), 310 | "port": port, 311 | "connect": challenge.ctype, 312 | "expires": expires 313 | }) 314 | 315 | def view_container_info(chal_id, xid, is_team): 316 | # Get the requested challenge 317 | challenge = ContainerChallenge.challenge_model.query.filter_by( 318 | id=chal_id).first() 319 | 320 | # Make sure the challenge exists and is a container challenge 321 | if challenge is None: 322 | return {"error": "Challenge not found"}, 400 323 | 324 | # Check for any existing containers for the team 325 | if is_team is True: 326 | running_containers = ContainerInfoModel.query.filter_by( 327 | challenge_id=challenge.id, team_id=xid) 328 | else: 329 | running_containers = ContainerInfoModel.query.filter_by( 330 | challenge_id=challenge.id, user_id=xid) 331 | running_container = running_containers.first() 332 | 333 | # If a container is already running for the team, return it 334 | if running_container: 335 | # Check if Docker says the container is still running before returning it 336 | try: 337 | if container_manager.is_container_running( 338 | running_container.container_id): 339 | return json.dumps({ 340 | "status": "already_running", 341 | "hostname": container_manager.settings.get("docker_hostname", ""), 342 | "port": running_container.port, 343 | "connect": challenge.ctype, 344 | "expires": running_container.expires 345 | }) 346 | else: 347 | # Container is not running, it must have died or been killed, 348 | # remove it from the database and create a new one 349 | running_containers.delete() 350 | db.session.commit() 351 | except ContainerException as err: 352 | return {"error": str(err)}, 500 353 | else: 354 | return {"status": "Suffering hasn't begun"} 355 | 356 | def connect_type(chal_id): 357 | # Get the requested challenge 358 | challenge = ContainerChallenge.challenge_model.query.filter_by( 359 | id=chal_id).first() 360 | 361 | # Make sure the challenge exists and is a container challenge 362 | if challenge is None: 363 | return {"error": "Challenge not found"}, 400 364 | 365 | return json.dumps({ 366 | "status": "Ok", 367 | "connect": challenge.ctype 368 | }) 369 | 370 | @containers_bp.route('/api/get_connect_type/', methods=['GET']) 371 | @authed_only 372 | @during_ctf_time_only 373 | @require_verified_emails 374 | @ratelimit(method="GET", limit=settings["requests"]["limit"], interval=settings["requests"]["limit"]) 375 | def get_connect_type(challenge_id): 376 | try: 377 | return connect_type(challenge_id) 378 | except ContainerException as err: 379 | return {"error": str(err)}, 500 380 | 381 | @containers_bp.route('/api/view_info', methods=['POST']) 382 | @authed_only 383 | @during_ctf_time_only 384 | @require_verified_emails 385 | @ratelimit(method="POST", limit=settings["requests"]["limit"], interval=settings["requests"]["limit"]) 386 | def route_view_info(): 387 | user = get_current_user() 388 | 389 | # Validate the request 390 | if request.json is None: 391 | return {"error": "Invalid request"}, 400 392 | 393 | if request.json.get("chal_id", None) is None: 394 | return {"error": "No chal_id specified"}, 400 395 | 396 | if user is None: 397 | return {"error": "User not found"}, 400 398 | if user.team is None and is_team_mode() is True: 399 | return {"error": "User not a member of a team"}, 400 400 | 401 | try: 402 | if is_team_mode() is True: 403 | return view_container_info(request.json.get("chal_id"), user.team.id, True) 404 | elif is_team_mode() is False: 405 | return view_container_info(request.json.get("chal_id"), user.id, False) 406 | except ContainerException as err: 407 | return {"error": str(err)}, 500 408 | 409 | @containers_bp.route('/api/request', methods=['POST']) 410 | @authed_only 411 | @during_ctf_time_only 412 | @require_verified_emails 413 | @ratelimit(method="POST", limit=settings["requests"]["limit"], interval=settings["requests"]["limit"]) 414 | def route_request_container(): 415 | user = get_current_user() 416 | 417 | # Validate the request 418 | if request.json is None: 419 | return {"error": "Invalid request"}, 400 420 | 421 | if request.json.get("chal_id", None) is None: 422 | return {"error": "No chal_id specified"}, 400 423 | 424 | if user is None: 425 | return {"error": "User not found"}, 400 426 | if user.team is None and is_team_mode() is True: 427 | return {"error": "User not a member of a team"}, 400 428 | 429 | try: 430 | if is_team_mode() is True: 431 | return create_container(request.json.get("chal_id"), user.team.id, user.id,True) 432 | elif is_team_mode() is False: 433 | return create_container(request.json.get("chal_id"), user.id, user.id, False) 434 | except ContainerException as err: 435 | return {"error": str(err)}, 500 436 | 437 | @containers_bp.route('/api/renew', methods=['POST']) 438 | @authed_only 439 | @during_ctf_time_only 440 | @require_verified_emails 441 | @ratelimit(method="POST", limit=settings["requests"]["limit"], interval=settings["requests"]["limit"]) 442 | def route_renew_container(): 443 | user = get_current_user() 444 | 445 | # Validate the request 446 | if request.json is None: 447 | return {"error": "Invalid request"}, 400 448 | 449 | if request.json.get("chal_id", None) is None: 450 | return {"error": "No chal_id specified"}, 400 451 | 452 | if user is None: 453 | return {"error": "User not found"}, 400 454 | if user.team is None and is_team_mode() is True: 455 | return {"error": "User not a member of a team"}, 400 456 | 457 | try: 458 | if is_team_mode() is True: 459 | return renew_container(request.json.get("chal_id"), user.team.id, True) 460 | elif is_team_mode() is False: 461 | return renew_container(request.json.get("chal_id"), user.id, False) 462 | except ContainerException as err: 463 | return {"error": str(err)}, 500 464 | 465 | user = get_current_user() 466 | 467 | # Validate the request 468 | if request.json is None: 469 | return {"error": "Invalid request"}, 400 470 | 471 | if request.json.get("chal_id", None) is None: 472 | return {"error": "No chal_id specified"}, 400 473 | 474 | if user is None: 475 | return {"error": "User not found"}, 400 476 | if user.team is None and is_team_mode() is True: 477 | return {"error": "User not a member of a team"}, 400 478 | 479 | if is_team_mode() is True: 480 | running_container: ContainerInfoModel = ContainerInfoModel.query.filter_by( 481 | challenge_id=request.json.get("chal_id"), team_id=user.team.id).first() 482 | 483 | if running_container: 484 | kill_container(running_container.container_id) 485 | 486 | return create_container(request.json.get("chal_id"), user.team.id, user.id, True) 487 | elif is_team_mode() is False: 488 | running_container: ContainerInfoModel = ContainerInfoModel.query.filter_by( 489 | challenge_id=request.json.get("chal_id"), team_id=user.id).first() 490 | 491 | if running_container: 492 | kill_container(running_container.container_id) 493 | 494 | return create_container(request.json.get("chal_id"), user.id, None, False) 495 | 496 | @containers_bp.route('/api/stop', methods=['POST']) 497 | @authed_only 498 | @during_ctf_time_only 499 | @require_verified_emails 500 | @ratelimit(method="POST", limit=settings["requests"]["limit"], interval=settings["requests"]["limit"]) 501 | def route_stop_container(): 502 | user = get_current_user() 503 | 504 | # Validate the request 505 | if request.json is None: 506 | return {"error": "Invalid request"}, 400 507 | 508 | if request.json.get("chal_id", None) is None: 509 | return {"error": "No chal_id specified"}, 400 510 | 511 | if user is None: 512 | return {"error": "User not found"}, 400 513 | if user.team is None and is_team_mode() is True: 514 | return {"error": "User not a member of a team"}, 400 515 | 516 | if is_team_mode() is True: 517 | running_container: ContainerInfoModel = ContainerInfoModel.query.filter_by( 518 | challenge_id=request.json.get("chal_id"), team_id=user.team.id).first() 519 | 520 | if running_container: 521 | return kill_container(running_container.container_id) 522 | 523 | return {"error": "No container found"}, 400 524 | elif is_team_mode() is False: 525 | running_container: ContainerInfoModel = ContainerInfoModel.query.filter_by( 526 | challenge_id=request.json.get("chal_id"), user_id=user.id).first() 527 | 528 | if running_container: 529 | return kill_container(running_container.container_id) 530 | 531 | return {"error": "No container found"}, 400 532 | 533 | 534 | @containers_bp.route('/api/kill', methods=['POST']) 535 | @admins_only 536 | def route_kill_container(): 537 | if request.json is None: 538 | return {"error": "Invalid request"}, 400 539 | 540 | if request.json.get("container_id", None) is None: 541 | return {"error": "No container_id specified"}, 400 542 | 543 | return kill_container(request.json.get("container_id")) 544 | 545 | @containers_bp.route('/api/purge', methods=['POST']) 546 | @admins_only 547 | def route_purge_containers(): 548 | containers: "list[ContainerInfoModel]" = ContainerInfoModel.query.all() 549 | for container in containers: 550 | try: 551 | kill_container(container.container_id) 552 | except ContainerException: 553 | pass 554 | return {"success": "Purged all containers"}, 200 555 | 556 | @containers_bp.route('/api/images', methods=['GET']) 557 | @admins_only 558 | def route_get_images(): 559 | try: 560 | images = container_manager.get_images() 561 | except ContainerException as err: 562 | return {"error": str(err)} 563 | 564 | return {"images": images} 565 | 566 | @containers_bp.route('/api/settings/update', methods=['POST']) 567 | @admins_only 568 | def route_update_settings(): 569 | if request.form.get("docker_base_url") is None: 570 | return {"error": "Invalid request"}, 400 571 | 572 | if request.form.get("docker_hostname") is None: 573 | return {"error": "Invalid request"}, 400 574 | 575 | if request.form.get("container_expiration") is None: 576 | return {"error": "Invalid request"}, 400 577 | 578 | if request.form.get("container_maxmemory") is None: 579 | return {"error": "Invalid request"}, 400 580 | 581 | if request.form.get("container_maxcpu") is None: 582 | return {"error": "Invalid request"}, 400 583 | 584 | docker_base_url = ContainerSettingsModel.query.filter_by( 585 | key="docker_base_url").first() 586 | 587 | docker_hostname = ContainerSettingsModel.query.filter_by( 588 | key="docker_hostname").first() 589 | 590 | container_expiration = ContainerSettingsModel.query.filter_by( 591 | key="container_expiration").first() 592 | 593 | container_maxmemory = ContainerSettingsModel.query.filter_by( 594 | key="container_maxmemory").first() 595 | 596 | container_maxcpu = ContainerSettingsModel.query.filter_by( 597 | key="container_maxcpu").first() 598 | 599 | # Create or update 600 | if docker_base_url is None: 601 | # Create 602 | docker_base_url = ContainerSettingsModel( 603 | key="docker_base_url", value=request.form.get("docker_base_url")) 604 | db.session.add(docker_base_url) 605 | else: 606 | # Update 607 | docker_base_url.value = request.form.get("docker_base_url") 608 | 609 | # Create or update 610 | if docker_hostname is None: 611 | # Create 612 | docker_hostname = ContainerSettingsModel( 613 | key="docker_hostname", value=request.form.get("docker_hostname")) 614 | db.session.add(docker_hostname) 615 | else: 616 | # Update 617 | docker_hostname.value = request.form.get("docker_hostname") 618 | 619 | # Create or update 620 | if container_expiration is None: 621 | # Create 622 | container_expiration = ContainerSettingsModel( 623 | key="container_expiration", value=request.form.get("container_expiration")) 624 | db.session.add(container_expiration) 625 | else: 626 | # Update 627 | container_expiration.value = request.form.get( 628 | "container_expiration") 629 | 630 | # Create or update 631 | if container_maxmemory is None: 632 | # Create 633 | container_maxmemory = ContainerSettingsModel( 634 | key="container_maxmemory", value=request.form.get("container_maxmemory")) 635 | db.session.add(container_maxmemory) 636 | else: 637 | # Update 638 | container_maxmemory.value = request.form.get("container_maxmemory") 639 | 640 | # Create or update 641 | if container_maxcpu is None: 642 | # Create 643 | container_maxcpu = ContainerSettingsModel( 644 | key="container_maxcpu", value=request.form.get("container_maxcpu")) 645 | db.session.add(container_maxcpu) 646 | else: 647 | # Update 648 | container_maxcpu.value = request.form.get("container_maxcpu") 649 | 650 | db.session.commit() 651 | 652 | container_manager.settings = settings_to_dict( 653 | ContainerSettingsModel.query.all()) 654 | 655 | if container_manager.settings.get("docker_base_url") is not None: 656 | try: 657 | container_manager.initialize_connection( 658 | container_manager.settings, app) 659 | except ContainerException as err: 660 | flash(str(err), "error") 661 | return redirect(url_for(".route_containers_settings")) 662 | 663 | return redirect(url_for(".route_containers_dashboard")) 664 | 665 | @containers_bp.route('/dashboard', methods=['GET']) 666 | @admins_only 667 | def route_containers_dashboard(): 668 | running_containers = ContainerInfoModel.query.order_by( 669 | ContainerInfoModel.timestamp.desc()).all() 670 | 671 | connected = False 672 | try: 673 | connected = container_manager.is_connected() 674 | except ContainerException: 675 | pass 676 | 677 | for i, container in enumerate(running_containers): 678 | try: 679 | running_containers[i].is_running = container_manager.is_container_running( 680 | container.container_id) 681 | except ContainerException: 682 | running_containers[i].is_running = False 683 | 684 | return render_template('container_dashboard.html', containers=running_containers, connected=connected) 685 | 686 | @containers_bp.route('/api/running_containers', methods=['GET']) 687 | @admins_only 688 | def route_get_running_containers(): 689 | running_containers = ContainerInfoModel.query.order_by( 690 | ContainerInfoModel.timestamp.desc()).all() 691 | 692 | connected = False 693 | try: 694 | connected = container_manager.is_connected() 695 | except ContainerException: 696 | pass 697 | 698 | # Create lists to store unique teams and challenges 699 | unique_teams = set() 700 | unique_challenges = set() 701 | 702 | for i, container in enumerate(running_containers): 703 | try: 704 | running_containers[i].is_running = container_manager.is_container_running( 705 | container.container_id) 706 | except ContainerException: 707 | running_containers[i].is_running = False 708 | 709 | # Add team and challenge to the unique sets 710 | if is_team_mode() is True: 711 | unique_teams.add(f"{container.team.name} [{container.team_id}]") 712 | else: 713 | unique_teams.add(f"{container.user.name} [{container.user_id}]") 714 | unique_challenges.add(f"{container.challenge.name} [{container.challenge_id}]") 715 | 716 | # Convert unique sets to lists 717 | unique_teams_list = list(unique_teams) 718 | unique_challenges_list = list(unique_challenges) 719 | 720 | # Create a list of dictionaries containing running_containers data 721 | running_containers_data = [] 722 | for container in running_containers: 723 | if is_team_mode() is True: 724 | container_data = { 725 | "container_id": container.container_id, 726 | "image": container.challenge.image, 727 | "challenge": f"{container.challenge.name} [{container.challenge_id}]", 728 | "team": f"{container.team.name} [{container.team_id}]", 729 | "user": f"{container.user.name} [{container.user_id}]", 730 | "port": container.port, 731 | "created": container.timestamp, 732 | "expires": container.expires, 733 | "is_running": container.is_running 734 | } 735 | else: 736 | container_data = { 737 | "container_id": container.container_id, 738 | "image": container.challenge.image, 739 | "challenge": f"{container.challenge.name} [{container.challenge_id}]", 740 | "user": f"{container.user.name} [{container.user_id}]", 741 | "port": container.port, 742 | "created": container.timestamp, 743 | "expires": container.expires, 744 | "is_running": container.is_running 745 | } 746 | running_containers_data.append(container_data) 747 | 748 | # Create a JSON response containing running_containers_data, unique teams, and unique challenges 749 | response_data = { 750 | "containers": running_containers_data, 751 | "connected": connected, 752 | "teams": unique_teams_list, 753 | "challenges": unique_challenges_list 754 | } 755 | 756 | # Return the JSON response 757 | return json.dumps(response_data) 758 | 759 | 760 | @containers_bp.route('/settings', methods=['GET']) 761 | @admins_only 762 | def route_containers_settings(): 763 | running_containers = ContainerInfoModel.query.order_by( 764 | ContainerInfoModel.timestamp.desc()).all() 765 | return render_template('container_settings.html', settings=container_manager.settings) 766 | 767 | app.register_blueprint(containers_bp) 768 | -------------------------------------------------------------------------------- /assets/create.html: -------------------------------------------------------------------------------- 1 | {% extends "admin/challenges/create.html" %} 2 | 3 | {% block header %} 4 | 7 | {% endblock %} 8 | 9 | {% block value %} 10 |
11 | 17 | 19 |
20 |
21 | 26 | 27 | 28 |
29 | 30 |
31 | 36 | 37 |
38 | 39 |
40 | 45 | 46 |
47 | 48 |
49 | 55 | 59 |
60 | 61 |
62 | 68 | 75 |
76 | 77 |
78 | 84 | 85 |
86 | 87 |
88 | 94 | 95 |
96 | 97 |
98 | 105 | 106 |
107 | {% endblock %} 108 | 109 | {% block type %} 110 | 111 | {% endblock %} -------------------------------------------------------------------------------- /assets/create.js: -------------------------------------------------------------------------------- 1 | CTFd.plugin.run((_CTFd) => { 2 | const $ = _CTFd.lib.$; 3 | const md = _CTFd.lib.markdown(); 4 | }); 5 | 6 | var containerImage = document.getElementById("container-image"); 7 | var containerImageDefault = document.getElementById("container-image-default"); 8 | var path = "/containers/api/images"; 9 | 10 | fetch(path, { 11 | method: "GET", 12 | headers: { 13 | "Accept": "application/json", 14 | "CSRF-Token": init.csrfNonce 15 | } 16 | }) 17 | .then(response => { 18 | if (!response.ok) { 19 | // Handle error response 20 | return Promise.reject("Error fetching data"); 21 | } 22 | return response.json(); 23 | }) 24 | .then(data => { 25 | if (data.error != undefined) { 26 | // Error 27 | containerImageDefault.innerHTML = data.error; 28 | } else { 29 | // Success 30 | for (var i = 0; i < data.images.length; i++) { 31 | var opt = document.createElement("option"); 32 | opt.value = data.images[i]; 33 | opt.innerHTML = data.images[i]; 34 | containerImage.appendChild(opt); 35 | } 36 | containerImageDefault.innerHTML = "Choose an image..."; 37 | containerImage.removeAttribute("disabled"); 38 | } 39 | console.log(data); 40 | }) 41 | .catch(error => { 42 | // Handle fetch error 43 | console.error(error); 44 | }); 45 | -------------------------------------------------------------------------------- /assets/update.html: -------------------------------------------------------------------------------- 1 | {% extends "admin/challenges/update.html" %} 2 | 3 | {% block connection_info %} 4 |
5 | 11 | 13 |
14 | {% endblock %} 15 | 16 | {% block value %} 17 | 18 |
19 | 24 | 25 |
26 | 27 |
28 | 33 | 34 |
35 | 36 |
37 | 42 | 43 |
44 | 45 |
46 | 51 | 52 |
53 | 54 |
55 | 58 | 64 | 68 |
69 | 70 |
71 | 77 | 84 |
85 | 86 | {% if challenge.ctype == "ssh" %} 87 |
88 | 94 | 95 |
96 | 97 |
98 | 104 | 105 |
106 | {% endif %} 107 | 108 |
109 | 115 | 116 |
117 | 118 |
119 | 125 | 126 |
127 | 128 |
129 | 136 | 137 |
138 | {% endblock %} -------------------------------------------------------------------------------- /assets/update.js: -------------------------------------------------------------------------------- 1 | var containerImage = document.getElementById("container-image"); 2 | var containerImageDefault = document.getElementById("container-image-default"); 3 | var path = "/containers/api/images"; 4 | 5 | fetch(path, { 6 | method: "GET", 7 | headers: { 8 | "Accept": "application/json", 9 | "CSRF-Token": init.csrfNonce 10 | } 11 | }) 12 | .then(response => response.json()) 13 | .then(data => { 14 | if (data.error !== undefined) { 15 | // Error 16 | containerImageDefault.innerHTML = data.error; 17 | } else { 18 | // Success 19 | for (var i = 0; i < data.images.length; i++) { 20 | var opt = document.createElement("option"); 21 | opt.value = data.images[i]; 22 | opt.innerHTML = data.images[i]; 23 | containerImage.appendChild(opt); 24 | } 25 | containerImageDefault.innerHTML = "Choose an image..."; 26 | containerImage.removeAttribute("disabled"); 27 | containerImage.value = container_image_selected; 28 | } 29 | }) 30 | .catch(error => { 31 | console.error("Fetch error:", error); 32 | }); 33 | 34 | var currentURL = window.location.href; 35 | var match = currentURL.match(/\/challenges\/(\d+)/); 36 | 37 | if (match && match[1]) { 38 | var challenge_id = parseInt(match[1]); 39 | 40 | var connectType = document.getElementById("connect-type"); 41 | var connectTypeDefault = document.getElementById("connect-type-default"); 42 | 43 | var connectTypeEndpoint = "/containers/api/get_connect_type/" + challenge_id; 44 | 45 | fetch(connectTypeEndpoint, { 46 | method: "GET", 47 | headers: { 48 | "Accept": "application/json", 49 | "CSRF-Token": init.csrfNonce 50 | } 51 | }) 52 | .then(response => response.json()) 53 | .then(connectTypeData => { 54 | if (connectTypeData.error !== undefined) { 55 | console.error("Error:", connectTypeData.error); 56 | } else { 57 | var connectTypeValue = connectTypeData.connect; 58 | connectTypeDefault.innerHTML = "Choose..."; 59 | connectType.removeAttribute("disabled"); 60 | connectType.value = connectTypeValue; 61 | } 62 | }) 63 | .catch(error => { 64 | console.error("Fetch error:", error); 65 | }); 66 | } else { 67 | console.error("Challenge ID not found in the URL."); 68 | } 69 | -------------------------------------------------------------------------------- /assets/view.html: -------------------------------------------------------------------------------- 1 | {% extends "challenge.html" %} 2 | 3 | {% block connection_info %} 4 |
5 | 6 |
7 | 8 | 11 | 14 | 17 | 18 |
19 | {% endblock %} -------------------------------------------------------------------------------- /assets/view.js: -------------------------------------------------------------------------------- 1 | CTFd._internal.challenge.data = undefined; 2 | 3 | CTFd._internal.challenge.renderer = null; 4 | 5 | CTFd._internal.challenge.preRender = function () {}; 6 | 7 | CTFd._internal.challenge.render = null; 8 | 9 | CTFd._internal.challenge.postRender = function () {}; 10 | 11 | CTFd._internal.challenge.submit = function (preview) { 12 | var challenge_id = parseInt(CTFd.lib.$("#challenge-id").val()); 13 | var submission = CTFd.lib.$("#challenge-input").val(); 14 | 15 | let alert = resetAlert(); 16 | 17 | var body = { 18 | challenge_id: challenge_id, 19 | submission: submission, 20 | }; 21 | var params = {}; 22 | if (preview) { 23 | params["preview"] = true; 24 | } 25 | 26 | return CTFd.api 27 | .post_challenge_attempt(params, body) 28 | .then(function (response) { 29 | if (response.status === 429) { 30 | // User was ratelimited but process response 31 | return response; 32 | } 33 | if (response.status === 403) { 34 | // User is not logged in or CTF is paused. 35 | return response; 36 | } 37 | return response; 38 | }); 39 | }; 40 | 41 | function mergeQueryParams(parameters, queryParameters) { 42 | if (parameters.$queryParameters) { 43 | Object.keys(parameters.$queryParameters).forEach(function ( 44 | parameterName 45 | ) { 46 | var parameter = parameters.$queryParameters[parameterName]; 47 | queryParameters[parameterName] = parameter; 48 | }); 49 | } 50 | 51 | return queryParameters; 52 | } 53 | 54 | function resetAlert() { 55 | let alert = document.getElementById("deployment-info"); 56 | alert.innerHTML = ""; 57 | alert.classList.remove("alert-danger"); 58 | return alert; 59 | } 60 | 61 | function toggleChallengeCreate() { 62 | let btn = document.getElementById("create-chal"); 63 | btn.classList.toggle('d-none'); 64 | } 65 | 66 | function toggleChallengeUpdate() { 67 | let btn = document.getElementById("extend-chal"); 68 | btn.classList.toggle('d-none'); 69 | 70 | btn = document.getElementById("terminate-chal"); 71 | btn.classList.toggle('d-none'); 72 | } 73 | 74 | function calculateExpiry(date) { 75 | // Get the difference in minutes 76 | let difference = Math.ceil( 77 | (new Date(date * 1000) - new Date()) / 1000 / 60 78 | );; 79 | return difference; 80 | } 81 | 82 | function createChallengeLinkElement(data, parent) { 83 | 84 | var expires = document.createElement('span'); 85 | expires.textContent = "Suffering ends in " + calculateExpiry(new Date(data.expires)) + " minutes."; 86 | 87 | parent.append(expires); 88 | parent.append(document.createElement('br')); 89 | 90 | if (data.connect == "tcp") { 91 | let codeElement = document.createElement('code'); 92 | codeElement.textContent = 'nc ' + data.hostname + " " + data.port; 93 | parent.append(codeElement); 94 | } else if(data.connect == "ssh") { 95 | let codeElement = document.createElement('code'); 96 | // In case you have to get the password from other sources 97 | if(data.ssh_password == null) { 98 | codeElement.textContent = 'ssh -o StrictHostKeyChecking=no ' + data.ssh_username + '@' + data.hostname + " -p" + data.port; 99 | } else { 100 | codeElement.textContent = 'sshpass -p' + data.ssh_password + " ssh -o StrictHostKeyChecking=no " + data.ssh_username + '@' + data.hostname + " -p" + data.port; 101 | } 102 | parent.append(codeElement); 103 | } else { 104 | let link = document.createElement('a'); 105 | link.href = 'http://' + data.hostname + ":" + data.port; 106 | link.textContent = 'http://' + data.hostname + ":" + data.port; 107 | link.target = '_blank' 108 | parent.append(link); 109 | } 110 | } 111 | 112 | function view_container_info(challenge_id) { 113 | resetAlert(); 114 | var path = "/containers/api/view_info"; 115 | 116 | let alert = document.getElementById("deployment-info"); 117 | fetch(path, { 118 | method: "POST", 119 | headers: { 120 | "Content-Type": "application/json", 121 | "Accept": "application/json", 122 | "CSRF-Token": init.csrfNonce 123 | }, 124 | body: JSON.stringify({ chal_id: challenge_id }) 125 | }) 126 | .then(response => response.json()) 127 | .then(data => { 128 | if (data.status == "Suffering hasn't begun") { 129 | alert.append(data.status); 130 | toggleChallengeCreate(); 131 | } else if (data.status == "already_running") { 132 | // Success 133 | createChallengeLinkElement(data, alert); 134 | toggleChallengeUpdate(); 135 | } else { 136 | resetAlert(); 137 | alert.append(data.message); 138 | alert.classList.toggle('alert-danger'); 139 | toggleChallengeUpdate(); 140 | } 141 | }) 142 | .catch(error => { 143 | console.error("Fetch error:", error); 144 | }); 145 | } 146 | 147 | function container_request(challenge_id) { 148 | var path = "/containers/api/request"; 149 | let alert = resetAlert(); 150 | 151 | fetch(path, { 152 | method: "POST", 153 | headers: { 154 | "Content-Type": "application/json", 155 | "Accept": "application/json", 156 | "CSRF-Token": init.csrfNonce 157 | }, 158 | body: JSON.stringify({ chal_id: challenge_id }) 159 | }) 160 | .then(response => response.json()) 161 | .then(data => { 162 | if (data.error !== undefined) { 163 | // Container error 164 | alert.append(data.error); 165 | alert.classList.toggle('alert-danger'); 166 | toggleChallengeCreate(); 167 | } else if (data.message !== undefined) { 168 | // CTFd error 169 | alert.append(data.message); 170 | alert.classList.toggle('alert-danger'); 171 | toggleChallengeCreate(); 172 | } else { 173 | // Success 174 | createChallengeLinkElement(data, alert); 175 | toggleChallengeUpdate(); 176 | toggleChallengeCreate(); 177 | } 178 | }) 179 | .catch(error => { 180 | console.error("Fetch error:", error); 181 | }); 182 | } 183 | 184 | function container_renew(challenge_id) { 185 | var path = "/containers/api/renew"; 186 | let alert = resetAlert(); 187 | 188 | fetch(path, { 189 | method: "POST", 190 | headers: { 191 | "Content-Type": "application/json", 192 | "Accept": "application/json", 193 | "CSRF-Token": init.csrfNonce 194 | }, 195 | body: JSON.stringify({ chal_id: challenge_id }) 196 | }) 197 | .then(response => response.json()) 198 | .then(data => { 199 | if (data.error !== undefined) { 200 | // Container error 201 | alert.append(data.error); 202 | alert.classList.toggle('alert-danger'); 203 | toggleChallengeCreate(); 204 | } else if (data.message !== undefined) { 205 | // CTFd error 206 | alert.append(data.message); 207 | alert.classList.toggle('alert-danger'); 208 | toggleChallengeCreate(); 209 | } else { 210 | // Success 211 | createChallengeLinkElement(data, alert); 212 | } 213 | }) 214 | .catch(error => { 215 | console.error("Fetch error:", error); 216 | }); 217 | } 218 | 219 | function container_stop(challenge_id) { 220 | var path = "/containers/api/stop"; 221 | let alert = resetAlert(); 222 | 223 | fetch(path, { 224 | method: "POST", 225 | headers: { 226 | "Content-Type": "application/json", 227 | "Accept": "application/json", 228 | "CSRF-Token": init.csrfNonce 229 | }, 230 | body: JSON.stringify({ chal_id: challenge_id }) 231 | }) 232 | .then(response => response.json()) 233 | .then(data => { 234 | if (data.error !== undefined) { 235 | // Container error 236 | alert.append(data.error); 237 | alert.classList.toggle('alert-danger'); 238 | toggleChallengeCreate(); 239 | } else if (data.message !== undefined) { 240 | // CTFd error 241 | alert.append(data.message); 242 | alert.classList.toggle('alert-danger'); 243 | toggleChallengeCreate(); 244 | } else { 245 | // Success 246 | alert.append("You have suffered enough."); 247 | toggleChallengeCreate(); 248 | toggleChallengeUpdate(); 249 | } 250 | }) 251 | .catch(error => { 252 | console.error("Fetch error:", error); 253 | }); 254 | } 255 | 256 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Containers", 3 | "route": "/containers/dashboard" 4 | } 5 | -------------------------------------------------------------------------------- /container_manager.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import time 3 | import json 4 | 5 | from flask import Flask 6 | from apscheduler.schedulers.background import BackgroundScheduler 7 | from apscheduler.schedulers import SchedulerNotRunningError 8 | import docker 9 | import paramiko.ssh_exception 10 | import requests 11 | import socket 12 | import random 13 | 14 | from CTFd.models import db 15 | from .models import ContainerInfoModel 16 | 17 | """ To those who will just copy instead of forking, atleast give credits to the author and change your commit messages ;) """ 18 | class ContainerException(Exception): 19 | def __init__(self, *args: object) -> None: 20 | super().__init__(*args) 21 | if args: 22 | self.message = args[0] 23 | else: 24 | self.message = None 25 | 26 | def __str__(self) -> str: 27 | if self.message: 28 | return self.message 29 | else: 30 | return "Unknown Container Exception" 31 | 32 | class ContainerManager: 33 | def __init__(self, settings, app): 34 | self.settings = settings 35 | self.client = None 36 | self.app = app 37 | if settings.get("docker_base_url") is None or settings.get("docker_base_url") == "": 38 | return 39 | 40 | # Connect to the docker daemon 41 | try: 42 | self.initialize_connection(settings, app) 43 | except ContainerException: 44 | print("Docker could not initialize or connect.") 45 | return 46 | 47 | def __check_port__(self, port: int) -> bool: 48 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 49 | s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 50 | s.settimeout(1) 51 | try: 52 | s.bind(("0.0.0.0", port)) 53 | s.close() 54 | return True 55 | except Exception as e: 56 | print(f"Error when fetching port: {e}") 57 | return False 58 | 59 | def initialize_connection(self, settings, app) -> None: 60 | self.settings = settings 61 | self.app = app 62 | 63 | # Remove any leftover expiration schedulers 64 | try: 65 | self.expiration_scheduler.shutdown() 66 | except (SchedulerNotRunningError, AttributeError): 67 | # Scheduler was never running 68 | pass 69 | 70 | if settings.get("docker_base_url") is None: 71 | self.client = None 72 | return 73 | 74 | try: 75 | self.client = docker.DockerClient( 76 | base_url=settings.get("docker_base_url")) 77 | except (docker.errors.DockerException) as e: 78 | self.client = None 79 | print(f"Error: {e}") 80 | raise ContainerException("CTFd could not connect to Docker") 81 | except TimeoutError as e: 82 | self.client = None 83 | raise ContainerException( 84 | "CTFd timed out when connecting to Docker") 85 | except paramiko.ssh_exception.NoValidConnectionsError as e: 86 | self.client = None 87 | raise ContainerException( 88 | "CTFd timed out when connecting to Docker: " + str(e)) 89 | except paramiko.ssh_exception.AuthenticationException as e: 90 | self.client = None 91 | raise ContainerException( 92 | "CTFd had an authentication error when connecting to Docker: " + str(e)) 93 | 94 | # Set up expiration scheduler 95 | try: 96 | self.expiration_seconds = int( 97 | settings.get("container_expiration", 0)) * 60 98 | except (ValueError, AttributeError): 99 | self.expiration_seconds = 0 100 | 101 | EXPIRATION_CHECK_INTERVAL = 5 102 | 103 | if self.expiration_seconds > 0: 104 | self.expiration_scheduler = BackgroundScheduler() 105 | self.expiration_scheduler.add_job( 106 | func=self.kill_expired_containers, args=(app,), trigger="interval", seconds=EXPIRATION_CHECK_INTERVAL) 107 | self.expiration_scheduler.start() 108 | 109 | # Shut down the scheduler when exiting the app 110 | atexit.register(lambda: self.expiration_scheduler.shutdown()) 111 | 112 | # TODO: Fix this cause it doesn't work 113 | def run_command(func): 114 | def wrapper_run_command(self, *args, **kwargs): 115 | if self.client is None: 116 | try: 117 | self.__init__(self.settings, self.app) 118 | except: 119 | raise ContainerException("Docker is not connected") 120 | try: 121 | if self.client is None: 122 | raise ContainerException("Docker is not connected") 123 | if self.client.ping(): 124 | return func(self, *args, **kwargs) 125 | except (paramiko.ssh_exception.SSHException, ConnectionError, requests.exceptions.ConnectionError) as e: 126 | # Try to reconnect before failing 127 | try: 128 | self.__init__(self.settings, self.app) 129 | except: 130 | pass 131 | raise ContainerException( 132 | "Docker connection was lost. Please try your request again later.") 133 | return wrapper_run_command 134 | 135 | @run_command 136 | def kill_expired_containers(self, app: Flask): 137 | with app.app_context(): 138 | containers: "list[ContainerInfoModel]" = ContainerInfoModel.query.all() 139 | 140 | for container in containers: 141 | delta_seconds = container.expires - int(time.time()) 142 | if delta_seconds < 0: 143 | try: 144 | self.kill_container(container.container_id) 145 | except ContainerException: 146 | print( 147 | "[Container Expiry Job] Docker is not initialized. Please check your settings.") 148 | 149 | db.session.delete(container) 150 | db.session.commit() 151 | 152 | @run_command 153 | def is_container_running(self, container_id: str) -> bool: 154 | container = self.client.containers.list(filters={"id": container_id}) 155 | if len(container) == 0: 156 | return False 157 | return container[0].status == "running" 158 | 159 | @run_command 160 | def create_container(self, chal_id: str, team_id: str, user_id: str, image: str, port: int, command: str, volumes: str): 161 | kwargs = {} 162 | 163 | # Set the memory and CPU limits for the container 164 | if self.settings.get("container_maxmemory"): 165 | try: 166 | mem_limit = int(self.settings.get("container_maxmemory")) 167 | if mem_limit > 0: 168 | kwargs["mem_limit"] = f"{mem_limit}m" 169 | except ValueError: 170 | ContainerException( 171 | "Configured container memory limit must be an integer") 172 | if self.settings.get("container_maxcpu"): 173 | try: 174 | cpu_period = float(self.settings.get("container_maxcpu")) 175 | if cpu_period > 0: 176 | kwargs["cpu_quota"] = int(cpu_period * 100000) 177 | kwargs["cpu_period"] = 100000 178 | except ValueError: 179 | ContainerException( 180 | "Configured container CPU limit must be a number") 181 | 182 | if volumes is not None and volumes != "": 183 | print("Volumes:", volumes) 184 | try: 185 | volumes_dict = json.loads(volumes) 186 | kwargs["volumes"] = volumes_dict 187 | except json.decoder.JSONDecodeError: 188 | raise ContainerException("Volumes JSON string is invalid") 189 | 190 | external_port = random.randint(port, 65535) 191 | while not self.__check_port__(external_port): 192 | external_port = random.randint(port, 65535) 193 | 194 | print(f"Using {external_port} as the external port for challenge {chal_id} for team {team_id} spawned by {user_id}") 195 | try: 196 | return self.client.containers.run( 197 | image, 198 | ports={str(port): str(external_port)}, 199 | command=command, 200 | detach=True, 201 | auto_remove=True, 202 | environment={"CHALLENGE_ID": chal_id, "TEAM_ID": team_id, "USER_ID": user_id}, 203 | **kwargs 204 | ) 205 | except docker.errors.ImageNotFound: 206 | raise ContainerException("Docker image not found") 207 | 208 | @run_command 209 | def get_container_port(self, container_id: str) -> "str|None": 210 | try: 211 | for port in list(self.client.containers.get(container_id).ports.values()): 212 | if port is not None: 213 | return port[0]["HostPort"] 214 | except (KeyError, IndexError): 215 | return None 216 | 217 | @run_command 218 | def get_images(self) -> "list[str]|None": 219 | try: 220 | images = self.client.images.list() 221 | except (KeyError, IndexError): 222 | return [] 223 | 224 | images_list = [] 225 | for image in images: 226 | if len(image.tags) > 0: 227 | images_list.append(image.tags[0]) 228 | 229 | images_list.sort() 230 | return images_list 231 | 232 | @run_command 233 | def kill_container(self, container_id: str): 234 | try: 235 | self.client.containers.get(container_id).kill() 236 | except docker.errors.NotFound: 237 | pass 238 | 239 | def is_connected(self) -> bool: 240 | try: 241 | self.client.ping() 242 | except: 243 | return False 244 | return True 245 | -------------------------------------------------------------------------------- /image-readme/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/1.png -------------------------------------------------------------------------------- /image-readme/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/2.png -------------------------------------------------------------------------------- /image-readme/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/3.png -------------------------------------------------------------------------------- /image-readme/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/4.png -------------------------------------------------------------------------------- /image-readme/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/demo.gif -------------------------------------------------------------------------------- /image-readme/http.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/http.png -------------------------------------------------------------------------------- /image-readme/main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/main.png -------------------------------------------------------------------------------- /image-readme/manage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/manage.png -------------------------------------------------------------------------------- /image-readme/tcp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TheFlash2k/containers/b8cf27c5d8224e8e987fde7391c9ab9c48e02b7c/image-readme/tcp.png -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.sql import func 2 | from sqlalchemy.orm import relationship 3 | 4 | from CTFd.models import db 5 | from CTFd.models import Challenges 6 | 7 | 8 | class ContainerChallengeModel(Challenges): 9 | __mapper_args__ = {"polymorphic_identity": "container"} 10 | id = db.Column( 11 | db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True 12 | ) 13 | image = db.Column(db.Text) 14 | port = db.Column(db.Integer) 15 | command = db.Column(db.Text, default="") 16 | volumes = db.Column(db.Text, default="") 17 | ctype = db.Column(db.Text, default="tcp") 18 | 19 | ssh_username = db.Column(db.Text, nullable=True) 20 | ssh_password = db.Column(db.Text, nullable=True) 21 | 22 | # Dynamic challenge properties 23 | initial = db.Column(db.Integer, default=0) 24 | minimum = db.Column(db.Integer, default=0) 25 | decay = db.Column(db.Integer, default=0) 26 | 27 | def __init__(self, *args, **kwargs): 28 | super(ContainerChallengeModel, self).__init__(**kwargs) 29 | self.value = kwargs["initial"] 30 | 31 | 32 | class ContainerInfoModel(db.Model): 33 | __mapper_args__ = {"polymorphic_identity": "container_info"} 34 | container_id = db.Column(db.String(512), primary_key=True) 35 | challenge_id = db.Column( 36 | db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE") 37 | ) 38 | team_id = db.Column( 39 | db.Integer, db.ForeignKey("teams.id", ondelete="CASCADE") 40 | ) 41 | user_id = db.Column( 42 | db.Integer, db.ForeignKey("users.id", ondelete="CASCADE") 43 | ) 44 | port = db.Column(db.Integer) 45 | ssh_username = db.Column(db.Text, nullable=True) 46 | ssh_password = db.Column(db.Text, nullable=True) 47 | timestamp = db.Column(db.Integer) 48 | expires = db.Column(db.Integer) 49 | team = relationship("Teams", foreign_keys=[team_id]) 50 | user = relationship("Users", foreign_keys=[user_id]) 51 | challenge = relationship(ContainerChallengeModel, 52 | foreign_keys=[challenge_id]) 53 | 54 | class ContainerSettingsModel(db.Model): 55 | __mapper_args__ = {"polymorphic_identity": "container_settings"} 56 | key = db.Column(db.String(512), primary_key=True) 57 | value = db.Column(db.Text) 58 | -------------------------------------------------------------------------------- /settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "modes" : { 3 | "USERS_MODE": "users", 4 | "TEAMS_MODE": "teams" 5 | }, 6 | 7 | "plugin-info" : { 8 | "id": "container", 9 | "name": "container", 10 | "templates" : { 11 | "create": "/plugins/containers/assets/create.html", 12 | "update": "/plugins/containers/assets/update.html", 13 | "view": "/plugins/containers/assets/view.html" 14 | }, 15 | "scripts": { 16 | "create": "/plugins/containers/assets/create.js", 17 | "update": "/plugins/containers/assets/update.js", 18 | "view": "/plugins/containers/assets/view.js" 19 | }, 20 | "route" : "/plugins/containers/assets/" 21 | }, 22 | 23 | "requests" : { 24 | "limit": 500, 25 | "interval": 10 26 | }, 27 | 28 | "vars" : { 29 | "MAX_CONTAINERS_ALLOWED": 4 30 | } 31 | } -------------------------------------------------------------------------------- /templates/container_dashboard.html: -------------------------------------------------------------------------------- 1 | {% extends "admin/base.html" %} 2 | 3 | 4 | {% block content %} 5 | 6 | 16 | 17 |
18 |
19 |

Containers

20 |
21 |
22 |
23 | {% with messages = get_flashed_messages() %} 24 | {% if messages %} 25 | {% for message in messages %} 26 | 29 | {% endfor %} 30 | {% endif %} 31 | {% endwith %} 32 | 33 | 35 | Settings 37 | 38 | {% if connected %} 39 | Docker Connected 40 | {% else %} 41 | Docker Not Connected 42 | {% endif %} 43 | 44 |
45 | 46 |
47 |
48 |
49 | 53 |
54 |
55 |
56 |
57 | 61 |
62 |
63 |
64 | 65 |
66 |
67 |
68 | 69 | 70 | {% if containers %} 71 | {% for c in containers %} 72 | 73 | 74 | 76 | 78 | 80 | {% if c.team == None %} 81 | 82 | {% else %} 83 | 84 | 85 | {% endif %} 86 | 88 | 90 | 92 | 94 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | {% if c.team == None %} 104 | 105 | {% else %} 106 | 107 | 108 | {% endif %} 109 | 110 | 111 | 112 | {% if c.is_running %} 113 | 114 | {% else %} 115 | 116 | {% endif %} 117 | 119 | 120 | 121 | {% endfor %} 122 | {% endif %} 123 |
Container ID 75 | Image 77 | Challenge 79 | UserUserTeamPort 87 | Created 89 | Expires 91 | Running 93 | Kill 95 |
{{ c.container_id[:12] }}{{ c.challenge.image }}{{ c.challenge.name }} [{{ c.challenge_id }}]{{ c.user.name }} [{{ c.user_id }}]{{ c.user.name }} [{{ c.user_id }}]{{ c.team.name }} [{{ c.team_id }}]{{ c.port }}{{ c.timestamp|format_time }}{{ c.expires|format_time }}YesNo
124 |
125 | 126 | {% endblock %} 127 | 128 | {% block scripts %} 129 | 271 | {% endblock %} -------------------------------------------------------------------------------- /templates/container_settings.html: -------------------------------------------------------------------------------- 1 | {% extends 'admin/base.html' %} 2 | {% block content %} 3 |
4 |
5 |

Docker Config

6 |
7 |
8 |
9 | {% with messages = get_flashed_messages() %} 10 | {% if messages %} 11 | {% for message in messages %} 12 | 15 | {% endfor %} 16 | {% endif %} 17 | {% endwith %} 18 |
19 |
20 |
22 |
23 | 26 | 29 |
30 |
31 | 34 | 36 |
37 |
38 | 41 | 43 |
44 |
45 | 48 | 50 |
51 |
52 | 55 | 57 |
58 |
59 | 62 | Cancel 63 |
64 |
65 | 66 | 67 |
68 |

Instructions

69 |

70 | The Base URL should be the local socket address of the Docker daemon, i.e. 71 | unix://var/run/docker.sock, or it can be a remote SSH address, e.g. 72 | ssh://root@example.com. In either case, sudo will not be executed. For a local socket, the user 73 | CTFd is running as should have permissions for Docker; for SSH connections, the SSH user in the Base URL should 74 | be root or have Docker permissions. 75 |

76 |
77 | {% endblock content %} 78 | {% block scripts %} 79 | 84 | {% endblock scripts %} --------------------------------------------------------------------------------