├── .gitignore
├── README.md
├── __init__.py
├── assets
├── create.html
├── create.js
├── update.html
├── update.js
├── view.html
└── view.js
├── config.json
├── container_challenge.py
├── container_manager.py
├── docs
└── Images
│ ├── create_chall.png
│ ├── dashboard.png
│ ├── dialog.png
│ ├── docker_images.png
│ ├── settings.png
│ ├── tcp.png
│ └── web.png
├── logs.py
├── models.py
├── requirements.txt
├── routes.py
├── routes_helper.py
├── setup.py
└── templates
├── container_dashboard.html
└── container_settings.html
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | .pytest_cache/
4 | *.py[cod]
5 | *$py.class
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 | cover/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | .pybuilder/
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | # For a library or package, you might want to ignore these files since the code is
88 | # intended to run in multiple environments; otherwise, check them in:
89 | # .python-version
90 |
91 | # pipenv
92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
95 | # install all needed dependencies.
96 | #Pipfile.lock
97 |
98 | # poetry
99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100 | # This is especially recommended for binary packages to ensure reproducibility, and is more
101 | # commonly ignored for libraries.
102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103 | #poetry.lock
104 |
105 | # pdm
106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107 | #pdm.lock
108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109 | # in version control.
110 | # https://pdm.fming.dev/#use-with-ide
111 | .pdm.toml
112 |
113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
114 | __pypackages__/
115 |
116 | # Celery stuff
117 | celerybeat-schedule
118 | celerybeat.pid
119 |
120 | # SageMath parsed files
121 | *.sage.py
122 |
123 | # Environments
124 | .env
125 | .venv
126 | env/
127 | venv/
128 | ENV/
129 | env.bak/
130 | venv.bak/
131 |
132 | # Spyder project settings
133 | .spyderproject
134 | .spyproject
135 |
136 | # Rope project settings
137 | .ropeproject
138 |
139 | # mkdocs documentation
140 | /site
141 |
142 | # mypy
143 | .mypy_cache/
144 | .dmypy.json
145 | dmypy.json
146 |
147 | # Pyre type checker
148 | .pyre/
149 |
150 | # pytype static type analyzer
151 | .pytype/
152 |
153 | # Cython debug symbols
154 | cython_debug/
155 |
156 | # PyCharm
157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
159 | # and can be added to the global gitignore or merged into this file. For a more nuclear
160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161 | #.idea/
162 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CTFd Docker Containers Plugin
2 |
3 | 
4 | 
5 | 
6 |
7 | This CTFd plugin allows you to run ephemeral Docker containers for specific challenges. Users can request a container to use as needed, and its lifecycle will be managed by the plugin.
8 |
9 | ## Usage
10 |
11 | ### Installation
12 |
13 | #### On premise
14 |
15 | Go to your CTFd/plugins folder and execute following commands:
16 |
17 | ```shell
18 | git clone https://github.com/Bigyls/CTFdDockerContainersPlugin.git containers
19 | cd containers
20 | pip install -r requirements.txt
21 | ```
22 |
23 | Restart your ctfd.
24 |
25 | #### Docker Compose
26 |
27 | You will need to specify some values, including the docker connection type to use.
28 |
29 | If you are using Docker Compose CTFd installation, you can map docker socket into CTFd container by modifying the docker-compose.yml file ([Be careful to best pratices](https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers)):
30 |
31 | ```yml
32 | services:
33 | ctfd:
34 | ...
35 | volumes:
36 | ...
37 | - /var/run/docker.sock:/var/run/docker.sock
38 | ...
39 | ```
40 |
41 | Then, go to your CTFd/plugins folder and execute following command:
42 |
43 | ```shell
44 | git clone https://github.com/Bigyls/CTFdDockerContainersPlugin.git containers
45 | ```
46 |
47 | Re-build your docker-compose:
48 |
49 | ```shell
50 | docker-compose down
51 | docker-compose up --build
52 | ```
53 |
54 | ### Configuration
55 |
56 | To configure the plugin, go to the admin page, click the dropdown in the navbar for plugins, and go to the Containers page (https://example.com/containers/settings).
57 |
58 | 
59 |
60 | Other options are described on the page. After saving (`Submit` button), the plugin will try to connect to the Docker daemon and the status should show as an error message or as a green symbol on dashboard (**maybe restart ctf to be sure**).
61 |
62 | ## Demo
63 |
64 | ### Add challenge
65 |
66 | To create challenges, use the container challenge type and configure the options. It is set up with dynamic scoring, so if you want regular scoring, set the maximum and minimum to the same value and the decay to 1.
67 |
68 | Go to https://example.com/admin/challenges/new and select container challenge type and fill all the required fields:
69 |
70 | 
71 |
72 | In the docker image field, it allows you to select the docker image already builded on the machine:
73 |
74 | 
75 |
76 | If you need to specify advanced options like the volumes, read the [Docker SDK for Python documentation](https://docker-py.readthedocs.io/en/stable/containers.html) for the syntax, since most options are passed directly to the SDK.
77 |
78 | #### Auto deployment
79 |
80 | It's also possible to configure auto deployment using [ctfcli](https://github.com/CTFd/ctfcli) and its YAML configuration:
81 |
82 | ```yaml
83 | name: BestChallenge
84 | ...
85 | type: container
86 | value: 50
87 | extra:
88 | initial: 50
89 | decay: 75
90 | minimum: 10
91 | image: bestchallenge:latest
92 | port: 1337
93 |
94 | connection_info: https://container.example.com
95 | ...
96 | ```
97 |
98 | ### Admin view
99 |
100 | Admin can manage created containers at https://example.com/containers/dashboard.
101 |
102 | 
103 |
104 | ### User view
105 |
106 | When a user clicks on a challenge with container, a button labeled "Start Instance" appears. Clicking it shows the information below with a random port assignment.
107 |
108 | Web | TCP
109 | :-------------------------:|:-------------------------:
110 |  | 
111 |
112 | All connections types are supported (HTTP, HTTPS, TCP, UDP, OPCUA, MQTT, ZeroMQ, ...).
113 |
114 | ### Logs
115 |
116 | The plugin logs all actions in the CTFd logs folder (`CTFd/CTFd/logs`) into the `containers.log` file.
117 |
118 | There is 3 levels of logging:
119 | - INFO : User actions
120 | - DEBUG : Help for diagnostics
121 | - ERROR : What more can I say
122 |
123 | ### Rate limits
124 |
125 | If the CTF is a physical event, be careful about limits at API endpoints. Feel free to change values or remove them by commenting them.
126 |
127 | ## Roadmap
128 | - [ ] Write tests.
129 | - [ ] Not only restriction to 1 docker per team/user, change settings to input how many (max) container per team/user.
130 | - [ ] Possibility to use 2 docker TCP connection method (like 1 windows and 1 linux).
131 | - [x] Add exhaustive logging.
132 | - [x] Add docker restrictions (1 docker per team, 1 docker per user, unlimited).
133 | - [x] Make it work with User and Team mode.
134 | - [x] Tests with 3.7 CTFd.
135 |
136 | ## Contributing
137 |
138 | You can create issues and PRs by yourself if you experienced a bug, have questions or if you have an idea for a new feature. This repository aims to remain active, up to date and scalable.
139 |
140 | ## Credits
141 |
142 | Project Link: https://github.com/Bigyls/CTFdDockerContainersPlugin
143 |
144 | Based on: https://github.com/andyjsmith/CTFd-Docker-Plugin
145 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This module initializes and sets up the containers plugin for CTFd.
3 |
4 | It handles the registration of the container challenge type, sets up logging,
5 | and registers the plugin's routes and assets with the CTFd application.
6 | """
7 |
8 | from flask import Flask
9 | from flask.blueprints import Blueprint
10 |
11 | from CTFd.plugins import register_plugin_assets_directory
12 | from CTFd.plugins.challenges import CHALLENGE_CLASSES
13 |
14 | from .container_challenge import ContainerChallenge
15 | from .setup import setup_default_configs
16 | from .routes import register_app
17 | from .logs import init_logs
18 |
19 | def load(app: Flask) -> None:
20 | """
21 | Initialize and set up the containers plugin for CTFd.
22 |
23 | This function is called by CTFd when the plugin is loaded. It performs the following tasks:
24 | 1. Disables Flask-RESTX's automatic 404 help messages.
25 | 2. Creates all necessary database tables.
26 | 3. Runs the default configuration setup if the plugin hasn't been set up before.
27 | 4. Registers the ContainerChallenge class with CTFd's challenge system.
28 | 5. Registers the plugin's static assets directory.
29 | 6. Initializes logging for the plugin.
30 | 7. Registers the plugin's routes with the CTFd application.
31 |
32 | Args:
33 | app (Flask): The Flask application instance of CTFd.
34 |
35 | Returns:
36 | None
37 | """
38 | app.config['RESTX_ERROR_404_HELP'] = False
39 | app.db.create_all()
40 | setup_default_configs()
41 | CHALLENGE_CLASSES["container"] = ContainerChallenge
42 | register_plugin_assets_directory(app, base_path="/plugins/containers/assets/")
43 |
44 | # Initialize logging for this plugin
45 | init_logs(app)
46 |
47 | # Get the blueprint from register_app and register it here
48 | containers_bp: Blueprint = register_app(app)
49 | app.register_blueprint(containers_bp)
50 |
--------------------------------------------------------------------------------
/assets/create.html:
--------------------------------------------------------------------------------
1 | {% extends "admin/challenges/create.html" %}
2 |
3 | {% block header %}
4 |
5 | Container challenges dynamically spin up a Docker container for each team to run the challenge on.
6 |
7 |
91 | {% endblock %}
92 |
93 | {% block type %}
94 |
95 |
96 | {% endblock %}
97 |
--------------------------------------------------------------------------------
/assets/create.js:
--------------------------------------------------------------------------------
1 | CTFd.plugin.run((_CTFd) => {
2 | const $ = _CTFd.lib.$; // Access jQuery from CTFd
3 | const md = _CTFd.lib.markdown(); // Access the markdown library from CTFd
4 | });
5 |
6 | // Get the DOM elements for the container image dropdown and its default option
7 | var containerImage = document.getElementById("container-image");
8 | var containerImageDefault = document.getElementById("container-image-default");
9 |
10 | // Define the API path to fetch Docker images
11 | var path = "/containers/api/images";
12 |
13 | // Create a new XMLHttpRequest object to fetch the list of container images
14 | var xhr = new XMLHttpRequest();
15 | xhr.open("GET", path, true); // Initialize a GET request
16 | xhr.setRequestHeader("Accept", "application/json"); // Set the request header to accept JSON
17 | xhr.setRequestHeader("CSRF-Token", init.csrfNonce); // Include CSRF token for security
18 | xhr.send(); // Send the request
19 |
20 | // Define the function to handle the response when the request is complete
21 | xhr.onload = function () {
22 | var data = JSON.parse(this.responseText); // Parse the JSON response
23 | if (data.error != undefined) {
24 | // If there is an error in the response, display it in the default option
25 | containerImageDefault.innerHTML = data.error;
26 | } else {
27 | // If the response is successful, populate the dropdown with images
28 | for (var i = 0; i < data.images.length; i++) {
29 | var opt = document.createElement("option"); // Create a new option element
30 | opt.value = data.images[i]; // Set the option value to the image name
31 | opt.innerHTML = data.images[i]; // Set the displayed text of the option
32 | containerImage.appendChild(opt); // Add the option to the dropdown
33 | }
34 | // Update the default option text and enable the dropdown
35 | containerImageDefault.innerHTML = "Choose an image...";
36 | containerImage.removeAttribute("disabled");
37 | }
38 | console.log(data); // Log the response data for debugging
39 | };
40 |
--------------------------------------------------------------------------------
/assets/update.html:
--------------------------------------------------------------------------------
1 | {% extends "admin/challenges/update.html" %}
2 |
3 | {% block value %}
4 |
5 |
10 |
11 |
12 |
13 |
14 |
19 |
20 |
21 |
22 |
23 |
28 |
29 |
30 |
31 |
32 |
37 |
38 |
39 |
40 |
41 |
45 |
51 |
55 |
56 |
57 |
58 |
64 |
65 |
66 |
67 |
68 |
74 |
75 |
76 |
77 |
78 |
85 |
86 |
87 | {% endblock %}
88 |
--------------------------------------------------------------------------------
/assets/update.js:
--------------------------------------------------------------------------------
1 | // Get references to the container image dropdown and its default option
2 | var containerImage = document.getElementById("container-image");
3 | var containerImageDefault = document.getElementById("container-image-default");
4 |
5 | // Define the API endpoint to fetch Docker images
6 | var path = "/containers/api/images";
7 |
8 | // Create a new XMLHttpRequest object to communicate with the server
9 | var xhr = new XMLHttpRequest();
10 | xhr.open("GET", path, true); // Initialize a GET request to the specified path
11 | xhr.setRequestHeader("Accept", "application/json"); // Indicate that we expect a JSON response
12 | xhr.setRequestHeader("CSRF-Token", init.csrfNonce); // Include CSRF token for security
13 | xhr.send(); // Send the request
14 |
15 | // Define what happens when the request completes
16 | xhr.onload = function () {
17 | var data = JSON.parse(this.responseText); // Parse the JSON response
18 | if (data.error != undefined) {
19 | // If an error is returned from the server
20 | containerImageDefault.innerHTML = data.error; // Display the error message in the default option
21 | } else {
22 | // If the request is successful and returns images
23 | for (var i = 0; i < data.images.length; i++) {
24 | // Loop through each image returned
25 | var opt = document.createElement("option"); // Create a new option element
26 | opt.value = data.images[i]; // Set the value of the option to the image name
27 | opt.innerHTML = data.images[i]; // Set the displayed text to the image name
28 | containerImage.appendChild(opt); // Append the option to the dropdown
29 | }
30 | containerImageDefault.innerHTML = "Choose an image..."; // Update the default option text
31 | containerImage.removeAttribute("disabled"); // Enable the dropdown for user selection
32 | containerImage.value = container_image_selected; // Set the dropdown to the currently selected image
33 | }
34 | console.log(data); // Log the response data for debugging purposes
35 | };
36 |
--------------------------------------------------------------------------------
/assets/view.html:
--------------------------------------------------------------------------------
1 | {% extends "challenge.html" %}
2 |
3 | {% block connection_info %}
4 |
5 |
6 |
Instance Info
7 |
8 |
9 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
24 |
25 |
26 |
30 |
31 |
32 |
33 |
35 | Error
36 |
37 |
38 |
39 |
40 |
43 |
44 | {% endblock %}
45 |
--------------------------------------------------------------------------------
/assets/view.js:
--------------------------------------------------------------------------------
1 | // Initialize challenge data and renderer
2 | CTFd._internal.challenge.data = undefined;
3 | CTFd._internal.challenge.renderer = null;
4 | CTFd._internal.challenge.preRender = function () { };
5 | CTFd._internal.challenge.render = null;
6 | CTFd._internal.challenge.postRender = function () { };
7 |
8 | // Function to handle submission of challenge attempts
9 | CTFd._internal.challenge.submit = function (preview) {
10 | var challenge_id = parseInt(CTFd.lib.$("#challenge-id").val());
11 | var submission = CTFd.lib.$("#challenge-input").val();
12 |
13 | var body = {
14 | challenge_id: challenge_id,
15 | submission: submission,
16 | };
17 | var params = {};
18 | if (preview) {
19 | params["preview"] = true;
20 | }
21 |
22 | return CTFd.api
23 | .post_challenge_attempt(params, body)
24 | .then(function (response) {
25 | // Handle different response statuses
26 | if (response.status === 429) {
27 | return response; // Rate limit reached
28 | }
29 | if (response.status === 403) {
30 | return response; // User not logged in or CTF paused
31 | }
32 | return response; // Success or other statuses
33 | });
34 | };
35 |
36 | // Function to merge query parameters
37 | function mergeQueryParams(parameters, queryParameters) {
38 | if (parameters.$queryParameters) {
39 | Object.keys(parameters.$queryParameters).forEach(function (parameterName) {
40 | var parameter = parameters.$queryParameters[parameterName];
41 | queryParameters[parameterName] = parameter; // Merge query parameters
42 | });
43 | }
44 | return queryParameters;
45 | }
46 |
47 | // Function to check if the container is already running
48 | function container_running(challenge_id) {
49 | var path = "/containers/api/running";
50 | var requestButton = document.getElementById("container-request-btn");
51 | var requestError = document.getElementById("container-request-error");
52 |
53 | var xhr = new XMLHttpRequest();
54 | xhr.open("POST", path, true);
55 | xhr.setRequestHeader("Content-Type", "application/json");
56 | xhr.setRequestHeader("Accept", "application/json");
57 | xhr.setRequestHeader("CSRF-Token", init.csrfNonce);
58 | xhr.send(JSON.stringify({ chal_id: challenge_id }));
59 |
60 | xhr.onload = function () {
61 | var data = JSON.parse(this.responseText);
62 | if (data.error !== undefined) {
63 | // Container error handling
64 | requestError.style.display = "";
65 | requestError.firstElementChild.innerHTML = data.error;
66 | requestButton.removeAttribute("disabled");
67 | } else if (data.message !== undefined) {
68 | // CTFd error handling
69 | requestError.style.display = "";
70 | requestError.firstElementChild.innerHTML = data.message;
71 | requestButton.removeAttribute("disabled");
72 | } else if (data && data.status === "already_running" && data.container_id == challenge_id) {
73 | // If the container is already running
74 | console.log(challenge_id);
75 | container_request(challenge_id); // Request to run the container
76 | } else {
77 | // Other cases, if needed
78 | }
79 | console.log(data);
80 | };
81 | }
82 |
83 | // Function to request a new container instance
84 | function container_request(challenge_id) {
85 | var path = "/containers/api/request";
86 | var requestButton = document.getElementById("container-request-btn");
87 | var requestResult = document.getElementById("container-request-result");
88 | var connectionInfo = document.getElementById("container-connection-info");
89 | var containerExpires = document.getElementById("container-expires");
90 | var containerExpiresTime = document.getElementById("container-expires-time");
91 | var requestError = document.getElementById("container-request-error");
92 |
93 | requestButton.setAttribute("disabled", "disabled");
94 |
95 | var xhr = new XMLHttpRequest();
96 | xhr.open("POST", path, true);
97 | xhr.setRequestHeader("Content-Type", "application/json");
98 | xhr.setRequestHeader("Accept", "application/json");
99 | xhr.setRequestHeader("CSRF-Token", init.csrfNonce);
100 | xhr.send(JSON.stringify({ chal_id: challenge_id }));
101 |
102 | xhr.onload = function () {
103 | var data = JSON.parse(this.responseText);
104 | if (data.error !== undefined) {
105 | // Handle container error
106 | requestError.style.display = "";
107 | requestError.firstElementChild.innerHTML = data.error;
108 | requestButton.removeAttribute("disabled");
109 | } else if (data.message !== undefined) {
110 | // Handle CTFd error
111 | requestError.style.display = "";
112 | requestError.firstElementChild.innerHTML = data.message;
113 | requestButton.removeAttribute("disabled");
114 | } else {
115 | // Success case
116 | requestError.style.display = "none";
117 | requestError.firstElementChild.innerHTML = "";
118 | requestButton.parentNode.removeChild(requestButton);
119 | if (data.hostname.startsWith("http")) {
120 | connectionInfo.innerHTML = '' + data.hostname + ':' + data.port + '';
121 | } else {
122 | connectionInfo.innerHTML = data.hostname + ' ' + data.port;
123 | }
124 | containerExpires.innerHTML = Math.ceil((new Date(data.expires * 1000) - new Date()) / 1000 / 60);
125 | containerExpiresTime.style.display = "";
126 | requestResult.style.display = "";
127 | }
128 | console.log(data);
129 | };
130 | }
131 |
132 | // Function to reset the container
133 | function container_reset(challenge_id) {
134 | var path = "/containers/api/reset";
135 | var resetButton = document.getElementById("container-reset-btn");
136 | var requestResult = document.getElementById("container-request-result");
137 | var connectionInfo = document.getElementById("container-connection-info");
138 | var requestError = document.getElementById("container-request-error");
139 |
140 | resetButton.setAttribute("disabled", "disabled");
141 |
142 | var xhr = new XMLHttpRequest();
143 | xhr.open("POST", path, true);
144 | xhr.setRequestHeader("Content-Type", "application/json");
145 | xhr.setRequestHeader("Accept", "application/json");
146 | xhr.setRequestHeader("CSRF-Token", init.csrfNonce);
147 | xhr.send(JSON.stringify({ chal_id: challenge_id }));
148 |
149 | xhr.onload = function () {
150 | var data = JSON.parse(this.responseText);
151 | if (data.error !== undefined) {
152 | // Handle container error
153 | requestError.style.display = "";
154 | requestError.firstElementChild.innerHTML = data.error;
155 | resetButton.removeAttribute("disabled");
156 | } else if (data.message !== undefined) {
157 | // Handle CTFd error
158 | requestError.style.display = "";
159 | requestError.firstElementChild.innerHTML = data.message;
160 | resetButton.removeAttribute("disabled");
161 | } else {
162 | // Success case
163 | requestError.style.display = "none";
164 | connectionInfo.innerHTML = data.hostname + ":" + data.port;
165 | containerExpires.innerHTML = Math.ceil((new Date(data.expires * 1000) - new Date()) / 1000 / 60);
166 | requestResult.style.display = "";
167 | resetButton.removeAttribute("disabled");
168 | }
169 | console.log(data);
170 | };
171 | }
172 |
173 | // Function to renew the container instance
174 | function container_renew(challenge_id) {
175 | var path = "/containers/api/renew";
176 | var renewButton = document.getElementById("container-renew-btn");
177 | var requestResult = document.getElementById("container-request-result");
178 | var containerExpires = document.getElementById("container-expires");
179 | var requestError = document.getElementById("container-request-error");
180 |
181 | renewButton.setAttribute("disabled", "disabled");
182 |
183 | var xhr = new XMLHttpRequest();
184 | xhr.open("POST", path, true);
185 | xhr.setRequestHeader("Content-Type", "application/json");
186 | xhr.setRequestHeader("Accept", "application/json");
187 | xhr.setRequestHeader("CSRF-Token", init.csrfNonce);
188 | xhr.send(JSON.stringify({ chal_id: challenge_id }));
189 |
190 | xhr.onload = function () {
191 | var data = JSON.parse(this.responseText);
192 | if (data.error !== undefined) {
193 | // Handle container error
194 | requestError.style.display = "";
195 | requestError.firstElementChild.innerHTML = data.error;
196 | renewButton.removeAttribute("disabled");
197 | } else if (data.message !== undefined) {
198 | // Handle CTFd error
199 | requestError.style.display = "";
200 | requestError.firstElementChild.innerHTML = data.message;
201 | renewButton.removeAttribute("disabled");
202 | } else {
203 | // Success case
204 | requestError.style.display = "none";
205 | requestResult.style.display = "";
206 | containerExpires.innerHTML = Math.ceil((new Date(data.expires * 1000) - new Date()) / 1000 / 60);
207 | renewButton.removeAttribute("disabled");
208 | }
209 | console.log(data);
210 | };
211 | }
212 |
213 | // Function to stop the container instance
214 | function container_stop(challenge_id) {
215 | var path = "/containers/api/stop";
216 | var stopButton = document.getElementById("container-stop-btn");
217 | var requestResult = document.getElementById("container-request-result");
218 | var connectionInfo = document.getElementById("container-connection-info");
219 | var requestError = document.getElementById("container-request-error");
220 | var containerExpiresTime = document.getElementById("container-expires-time");
221 |
222 | stopButton.setAttribute("disabled", "disabled");
223 |
224 | var xhr = new XMLHttpRequest();
225 | xhr.open("POST", path, true);
226 | xhr.setRequestHeader("Content-Type", "application/json");
227 | xhr.setRequestHeader("Accept", "application/json");
228 | xhr.setRequestHeader("CSRF-Token", init.csrfNonce);
229 | xhr.send(JSON.stringify({ chal_id: challenge_id }));
230 |
231 | xhr.onload = function () {
232 | var data = JSON.parse(this.responseText);
233 | if (data.error !== undefined) {
234 | // Handle container error
235 | requestError.style.display = "";
236 | requestError.firstElementChild.innerHTML = data.error;
237 | stopButton.removeAttribute("disabled");
238 | } else if (data.message !== undefined) {
239 | // Handle CTFd error
240 | requestError.style.display = "";
241 | requestError.firstElementChild.innerHTML = data.message;
242 | stopButton.removeAttribute("disabled");
243 | } else {
244 | // Success case
245 | requestError.style.display = "none";
246 | requestResult.innerHTML = "Container stopped. Reopen this challenge to start another.";
247 | containerExpiresTime.style.display = "none"; // Hide expiration time
248 | }
249 | console.log(data);
250 | };
251 | }
252 |
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Containers",
3 | "route": "/containers/dashboard"
4 | }
5 |
--------------------------------------------------------------------------------
/container_challenge.py:
--------------------------------------------------------------------------------
1 | """
2 | This module defines the ContainerChallenge class, which extends BaseChallenge
3 | to provide functionality for container-based challenges in CTFd.
4 | """
5 |
6 | from __future__ import division
7 |
8 | import math
9 | from typing import Dict, Any, Optional
10 |
11 | from flask import Request
12 | from CTFd.models import db, Solves, Users, Teams
13 | from CTFd.plugins.challenges import BaseChallenge
14 | from CTFd.utils.modes import get_model
15 |
16 | from .models import ContainerChallengeModel
17 |
18 | class ContainerChallenge(BaseChallenge):
19 | """
20 | ContainerChallenge class for handling container-based challenges in CTFd.
21 |
22 | This class extends BaseChallenge and provides methods for reading,
23 | updating, and solving container challenges, as well as calculating
24 | their dynamic point values.
25 | """
26 |
27 | id: str = "container" # Unique identifier used to register challenges
28 | name: str = "container" # Name of a challenge type
29 | templates: Dict[str, str] = { # Handlebars templates used for each aspect of challenge editing & viewing
30 | "create": "/plugins/containers/assets/create.html",
31 | "update": "/plugins/containers/assets/update.html",
32 | "view": "/plugins/containers/assets/view.html",
33 | }
34 | scripts: Dict[str, str] = { # Scripts that are loaded when a template is loaded
35 | "create": "/plugins/containers/assets/create.js",
36 | "update": "/plugins/containers/assets/update.js",
37 | "view": "/plugins/containers/assets/view.js",
38 | }
39 | # Route at which files are accessible. This must be registered using register_plugin_assets_directory()
40 | route: str = "/plugins/containers/assets/"
41 |
42 | challenge_model = ContainerChallengeModel
43 |
44 | @classmethod
45 | def read(cls, challenge: ContainerChallengeModel) -> Dict[str, Any]:
46 | """
47 | Access the data of a challenge in a format processable by the front end.
48 |
49 | Args:
50 | challenge: The challenge object to read data from.
51 |
52 | Returns:
53 | A dictionary containing the challenge data for frontend processing.
54 | """
55 | data: Dict[str, Any] = {
56 | "id": challenge.id,
57 | "name": challenge.name,
58 | "value": challenge.value,
59 | "image": challenge.image,
60 | "port": challenge.port,
61 | "command": challenge.command,
62 | "initial": challenge.initial,
63 | "decay": challenge.decay,
64 | "minimum": challenge.minimum,
65 | "description": challenge.description,
66 | "connection_info": challenge.connection_info,
67 | "category": challenge.category,
68 | "state": challenge.state,
69 | "max_attempts": challenge.max_attempts,
70 | "type": challenge.type,
71 | "type_data": {
72 | "id": cls.id,
73 | "name": cls.name,
74 | "templates": cls.templates,
75 | "scripts": cls.scripts,
76 | },
77 | }
78 | return data
79 |
80 | @classmethod
81 | def calculate_value(cls, challenge: ContainerChallengeModel) -> ContainerChallengeModel:
82 | """
83 | Calculate the dynamic point value for a challenge based on solve count.
84 |
85 | Args:
86 | challenge: The challenge object to calculate value for.
87 |
88 | Returns:
89 | The challenge object with updated value.
90 | """
91 | Model = get_model()
92 |
93 | solve_count: int = (
94 | Solves.query.join(Model, Solves.account_id == Model.id)
95 | .filter(
96 | Solves.challenge_id == challenge.id,
97 | Model.hidden == False,
98 | Model.banned == False,
99 | )
100 | .count()
101 | )
102 |
103 | if solve_count != 0:
104 | solve_count -= 1
105 |
106 | value: float = (
107 | ((challenge.minimum - challenge.initial) / (challenge.decay ** 2))
108 | * (solve_count ** 2)
109 | ) + challenge.initial
110 |
111 | value = math.ceil(value)
112 |
113 | if value < challenge.minimum:
114 | value = challenge.minimum
115 |
116 | challenge.value = value
117 | db.session.commit()
118 | return challenge
119 |
120 | @classmethod
121 | def update(cls, challenge: ContainerChallengeModel, request: Request) -> ContainerChallengeModel:
122 | """
123 | Update the information associated with a challenge.
124 |
125 | Args:
126 | challenge: The challenge object to update.
127 | request: The request object containing the update data.
128 |
129 | Returns:
130 | The updated challenge object with recalculated value.
131 | """
132 | data: Dict[str, Any] = request.form or request.get_json()
133 |
134 | for attr, value in data.items():
135 | if attr in ("initial", "minimum", "decay"):
136 | value = float(value)
137 | setattr(challenge, attr, value)
138 |
139 | return ContainerChallenge.calculate_value(challenge)
140 |
141 | @classmethod
142 | def solve(cls, user: Users, team: Optional[Teams], challenge: ContainerChallengeModel, request: Request) -> None:
143 | """
144 | Handle the solving of a challenge by a user or team.
145 |
146 | Args:
147 | user: The user solving the challenge.
148 | team: The team solving the challenge.
149 | challenge: The challenge being solved.
150 | request: The request object associated with the solve attempt.
151 |
152 | Returns:
153 | None
154 | """
155 | super().solve(user, team, challenge, request)
156 |
157 | ContainerChallenge.calculate_value(challenge)
158 |
--------------------------------------------------------------------------------
/container_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | This module defines the ContainerManager class for managing Docker containers in CTFd.
3 | It also includes a custom ContainerException class for handling container-related errors.
4 | """
5 |
6 | import atexit
7 | import time
8 | import json
9 | import docker
10 | import paramiko.ssh_exception
11 | import requests
12 |
13 | from flask import Flask
14 | from apscheduler.schedulers.background import BackgroundScheduler
15 | from apscheduler.schedulers import SchedulerNotRunningError
16 |
17 | from CTFd.models import db
18 |
19 | from .models import ContainerInfoModel
20 |
21 | class ContainerException(Exception):
22 | """
23 | Custom exception class for container-related errors.
24 | """
25 | def __init__(self, *args: object) -> None:
26 | super().__init__(*args)
27 | if args:
28 | self.message = args[0]
29 | else:
30 | self.message = None
31 |
32 | def __str__(self) -> str:
33 | if self.message:
34 | return self.message
35 | else:
36 | return "Unknown Container Exception"
37 |
38 | class ContainerManager:
39 | """
40 | Manages Docker containers for CTFd challenges.
41 | """
42 | def __init__(self, settings, app):
43 | """
44 | Initialize the ContainerManager.
45 |
46 | Args:
47 | settings (dict): Configuration settings for the container manager.
48 | app (Flask): The Flask application instance.
49 | """
50 | self.settings = settings
51 | self.client = None
52 | self.app = app
53 | if settings.get("docker_base_url") is None or settings.get("docker_base_url") == "":
54 | return
55 |
56 | # Connect to the docker daemon
57 | try:
58 | self.initialize_connection(settings, app)
59 | except ContainerException:
60 | print("Docker could not initialize or connect.")
61 | return
62 |
63 | def initialize_connection(self, settings, app) -> None:
64 | """
65 | Initialize the connection to the Docker daemon.
66 |
67 | Args:
68 | settings (dict): Configuration settings for the container manager.
69 | app (Flask): The Flask application instance.
70 |
71 | Raises:
72 | ContainerException: If unable to connect to Docker.
73 | """
74 | self.settings = settings
75 | self.app = app
76 |
77 | # Remove any leftover expiration schedulers
78 | try:
79 | self.expiration_scheduler.shutdown()
80 | except (SchedulerNotRunningError, AttributeError):
81 | # Scheduler was never running
82 | pass
83 |
84 | if settings.get("docker_base_url") is None:
85 | self.client = None
86 | return
87 |
88 | try:
89 | self.client = docker.DockerClient(
90 | base_url=settings.get("docker_base_url"))
91 | except (docker.errors.DockerException) as e:
92 | self.client = None
93 | raise ContainerException("CTFd could not connect to Docker")
94 | except TimeoutError as e:
95 | self.client = None
96 | raise ContainerException(
97 | "CTFd timed out when connecting to Docker")
98 | except paramiko.ssh_exception.NoValidConnectionsError as e:
99 | self.client = None
100 | raise ContainerException(
101 | "CTFd timed out when connecting to Docker: " + str(e))
102 | except paramiko.ssh_exception.AuthenticationException as e:
103 | self.client = None
104 | raise ContainerException(
105 | "CTFd had an authentication error when connecting to Docker: " + str(e))
106 |
107 | # Set up expiration scheduler
108 | try:
109 | self.expiration_seconds = int(
110 | settings.get("container_expiration", 0)) * 60
111 | except (ValueError, AttributeError):
112 | self.expiration_seconds = 0
113 |
114 | EXPIRATION_CHECK_INTERVAL = 5
115 |
116 | if self.expiration_seconds > 0:
117 | self.expiration_scheduler = BackgroundScheduler()
118 | self.expiration_scheduler.add_job(
119 | func=self.kill_expired_containers, args=(app,), trigger="interval", seconds=EXPIRATION_CHECK_INTERVAL)
120 | self.expiration_scheduler.start()
121 |
122 | # Shut down the scheduler when exiting the app
123 | atexit.register(lambda: self.expiration_scheduler.shutdown())
124 |
125 | def run_command(func):
126 | """
127 | Decorator to ensure Docker connection is active before running a command.
128 | """
129 | def wrapper_run_command(self, *args, **kwargs):
130 | if self.client is None:
131 | try:
132 | self.__init__(self.settings, self.app)
133 | except:
134 | raise ContainerException("Docker is not connected")
135 | try:
136 | if self.client is None:
137 | raise ContainerException("Docker is not connected")
138 | if self.client.ping():
139 | return func(self, *args, **kwargs)
140 | except (paramiko.ssh_exception.SSHException, ConnectionError, requests.exceptions.ConnectionError) as e:
141 | # Try to reconnect before failing
142 | try:
143 | self.__init__(self.settings, self.app)
144 | except:
145 | pass
146 | raise ContainerException(
147 | "Docker connection was lost. Please try your request again later.")
148 | return wrapper_run_command
149 |
150 | @run_command
151 | def kill_expired_containers(self, app: Flask):
152 | """
153 | Kill containers that have expired.
154 |
155 | Args:
156 | app (Flask): The Flask application instance.
157 | """
158 | with app.app_context():
159 | containers: "list[ContainerInfoModel]" = ContainerInfoModel.query.all()
160 |
161 | for container in containers:
162 | delta_seconds = container.expires - int(time.time())
163 | if delta_seconds < 0:
164 | try:
165 | self.kill_container(container.container_id)
166 | except ContainerException:
167 | print(
168 | "[Container Expiry Job] Docker is not initialized. Please check your settings.")
169 |
170 | db.session.delete(container)
171 | db.session.commit()
172 |
173 | @run_command
174 | def is_container_running(self, container_id: str) -> bool:
175 | """
176 | Check if a container is running.
177 |
178 | Args:
179 | container_id (str): The ID of the container to check.
180 |
181 | Returns:
182 | bool: True if the container is running, False otherwise.
183 | """
184 | container = self.client.containers.list(filters={"id": container_id})
185 | if len(container) == 0:
186 | return False
187 | return container[0].status == "running"
188 |
189 | @run_command
190 | def create_container(self, image: str, port: int, command: str, volumes: str):
191 | """
192 | Create a new Docker container.
193 |
194 | Args:
195 | image (str): The Docker image to use.
196 | port (int): The port to expose.
197 | command (str): The command to run in the container.
198 | volumes (str): JSON string representing volume configurations.
199 |
200 | Returns:
201 | docker.models.containers.Container: The created container.
202 |
203 | Raises:
204 | ContainerException: If the container creation fails.
205 | """
206 | kwargs = {}
207 |
208 | # Set the memory and CPU limits for the container
209 | if self.settings.get("container_maxmemory"):
210 | try:
211 | mem_limit = int(self.settings.get("container_maxmemory"))
212 | if mem_limit > 0:
213 | kwargs["mem_limit"] = f"{mem_limit}m"
214 | except ValueError:
215 | ContainerException(
216 | "Configured container memory limit must be an integer")
217 | if self.settings.get("container_maxcpu"):
218 | try:
219 | cpu_period = float(self.settings.get("container_maxcpu"))
220 | if cpu_period > 0:
221 | kwargs["cpu_quota"] = int(cpu_period * 100000)
222 | kwargs["cpu_period"] = 100000
223 | except ValueError:
224 | ContainerException(
225 | "Configured container CPU limit must be a number")
226 |
227 | if volumes is not None and volumes != "":
228 | print("Volumes:", volumes)
229 | try:
230 | volumes_dict = json.loads(volumes)
231 | kwargs["volumes"] = volumes_dict
232 | except json.decoder.JSONDecodeError:
233 | raise ContainerException("Volumes JSON string is invalid")
234 |
235 | try:
236 | return self.client.containers.run(
237 | image,
238 | ports={str(port): None},
239 | command=command,
240 | detach=True,
241 | auto_remove=True,
242 | **kwargs
243 | )
244 | except docker.errors.ImageNotFound:
245 | raise ContainerException("Docker image not found")
246 |
247 | @run_command
248 | def get_container_port(self, container_id: str) -> "str|None":
249 | """
250 | Get the host port that a container's port is mapped to.
251 |
252 | Args:
253 | container_id (str): The ID of the container.
254 |
255 | Returns:
256 | str|None: The host port, or None if not found.
257 | """
258 | try:
259 | for port in list(self.client.containers.get(container_id).ports.values()):
260 | if port is not None:
261 | return port[0]["HostPort"]
262 | except (KeyError, IndexError):
263 | return None
264 |
265 | @run_command
266 | def get_images(self) -> "list[str]|None":
267 | """
268 | Get a list of available Docker images.
269 |
270 | Returns:
271 | list[str]|None: A sorted list of image tags, or None if no images are found.
272 | """
273 | try:
274 | images = self.client.images.list()
275 | except (KeyError, IndexError):
276 | return []
277 |
278 | images_list = []
279 | for image in images:
280 | if len(image.tags) > 0:
281 | images_list.append(image.tags[0])
282 |
283 | images_list.sort()
284 | return images_list
285 |
286 | @run_command
287 | def kill_container(self, container_id: str):
288 | """
289 | Kill a running container.
290 |
291 | Args:
292 | container_id (str): The ID of the container to kill.
293 | """
294 | try:
295 | self.client.containers.get(container_id).kill()
296 | except docker.errors.NotFound:
297 | pass
298 |
299 | def is_connected(self) -> bool:
300 | """
301 | Check if the Docker client is connected.
302 |
303 | Returns:
304 | bool: True if connected, False otherwise.
305 | """
306 | try:
307 | self.client.ping()
308 | except:
309 | return False
310 | return True
311 |
--------------------------------------------------------------------------------
/docs/Images/create_chall.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Bigyls/CTFdDockerContainersPlugin/dc9372cd88d83c2540ac10b49f17d9dd5a3117b1/docs/Images/create_chall.png
--------------------------------------------------------------------------------
/docs/Images/dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Bigyls/CTFdDockerContainersPlugin/dc9372cd88d83c2540ac10b49f17d9dd5a3117b1/docs/Images/dashboard.png
--------------------------------------------------------------------------------
/docs/Images/dialog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Bigyls/CTFdDockerContainersPlugin/dc9372cd88d83c2540ac10b49f17d9dd5a3117b1/docs/Images/dialog.png
--------------------------------------------------------------------------------
/docs/Images/docker_images.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Bigyls/CTFdDockerContainersPlugin/dc9372cd88d83c2540ac10b49f17d9dd5a3117b1/docs/Images/docker_images.png
--------------------------------------------------------------------------------
/docs/Images/settings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Bigyls/CTFdDockerContainersPlugin/dc9372cd88d83c2540ac10b49f17d9dd5a3117b1/docs/Images/settings.png
--------------------------------------------------------------------------------
/docs/Images/tcp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Bigyls/CTFdDockerContainersPlugin/dc9372cd88d83c2540ac10b49f17d9dd5a3117b1/docs/Images/tcp.png
--------------------------------------------------------------------------------
/docs/Images/web.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Bigyls/CTFdDockerContainersPlugin/dc9372cd88d83c2540ac10b49f17d9dd5a3117b1/docs/Images/web.png
--------------------------------------------------------------------------------
/logs.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides a custom logging system for the CTFd containers plugin.
3 | It includes a CustomFormatter, LoggerFilter, and LoggingManager to handle
4 | specialized logging requirements.
5 | """
6 |
7 | import os
8 | import logging
9 | import logging.handlers
10 | from flask import has_request_context, request
11 | from CTFd.utils.user import get_current_user
12 |
13 | class CustomFormatter(logging.Formatter):
14 | """
15 | A custom formatter for log records that includes IP address and user ID.
16 | """
17 | def format(self, record):
18 | """
19 | Format the specified record.
20 |
21 | Args:
22 | record (LogRecord): The log record to format.
23 |
24 | Returns:
25 | str: The formatted log record.
26 | """
27 | user = get_current_user() # Get the current user object.
28 | if has_request_context():
29 | ip = request.remote_addr # Get the remote IP address of the request.
30 | if ip is not None and ip != "" and ip != "None":
31 | record.ip = ip # Assign IP address to the log record.
32 | else:
33 | record.ip = "Unknown" # Default value for unknown IP.
34 | else:
35 | record.ip = "N/A" # Set to N/A if not in request context.
36 |
37 | if '%' in record.msg:
38 | record.formatted_message = record.msg % record.__dict__ # Format message using old-style formatting.
39 | else:
40 | record.formatted_message = record.msg.format(**record.__dict__) # Format message using new-style formatting.
41 |
42 | record.loglevel = record.levelname # Store the log level in the record.
43 | record.user_id = user.id if user else 'Unknown' # Store the user ID in the record.
44 | return super().format(record) # Call the parent class's format method.
45 |
46 | class LoggerFilter(logging.Filter):
47 | """
48 | A filter that only allows records from a specific logger.
49 | """
50 | def __init__(self, logger_name):
51 | """
52 | Initialize the LoggerFilter.
53 |
54 | Args:
55 | logger_name (str): The name of the logger to allow.
56 | """
57 | super().__init__()
58 | self.logger_name = logger_name # Store the logger name.
59 |
60 | def filter(self, record):
61 | """
62 | Check if the record should be logged.
63 |
64 | Args:
65 | record (LogRecord): The log record to check.
66 |
67 | Returns:
68 | bool: True if the record should be logged, False otherwise.
69 | """
70 | return record.name == self.logger_name # Only allow records that match the logger name.
71 |
72 | class LoggingManager:
73 | """
74 | A singleton class to manage loggers for the containers plugin.
75 | """
76 | _instance = None # Class-level instance variable for singleton pattern.
77 |
78 | def __new__(cls):
79 | if cls._instance is None:
80 | cls._instance = super(LoggingManager, cls).__new__(cls) # Create a new instance if none exists.
81 | cls._instance.loggers = {} # Initialize an empty dictionary for loggers.
82 | return cls._instance # Return the singleton instance.
83 |
84 | def init_logs(self, app, log_levels=None):
85 | """
86 | Initialize loggers for the containers plugin.
87 |
88 | Args:
89 | app (Flask): The Flask application instance.
90 | log_levels (dict, optional): A dictionary of logger names and their log levels.
91 | """
92 | if log_levels is None:
93 | log_levels = {
94 | "containers_actions": logging.INFO,
95 | "containers_errors": logging.ERROR,
96 | "containers_debug": logging.DEBUG,
97 | } # Default log levels if none are provided.
98 |
99 | log_dir = app.config.get("LOG_FOLDER", "logs") # Get the log directory from app config.
100 | if not os.path.exists(log_dir):
101 | os.makedirs(log_dir) # Create the log directory if it doesn't exist.
102 |
103 | log_file = os.path.join(log_dir, "containers.log") # Specify the log file name.
104 |
105 | # Create a formatter for the logs.
106 | formatter = CustomFormatter('%(asctime)s|%(loglevel)s|IP:%(ip)s|USER_ID:%(user_id)s|%(formatted_message)s')
107 |
108 | for logger_name, level in log_levels.items():
109 | logger = logging.getLogger(logger_name) # Get the logger by name.
110 | logger.setLevel(level) # Set the logger's level.
111 |
112 | handler = logging.handlers.RotatingFileHandler(
113 | log_file, maxBytes=10485760, backupCount=5 # Create a rotating file handler for the logs.
114 | )
115 | handler.setFormatter(formatter) # Set the custom formatter for the handler.
116 | handler.addFilter(LoggerFilter(logger_name)) # Add the filter to the handler.
117 |
118 | logger.addHandler(handler) # Attach the handler to the logger.
119 | logger.propagate = False # Prevent log messages from being propagated to ancestor loggers.
120 |
121 | self.loggers[logger_name] = logger # Store the logger in the instance dictionary.
122 |
123 | def log(self, logger_name, format, **kwargs):
124 | """
125 | Log a message using the specified logger.
126 |
127 | Args:
128 | logger_name (str): The name of the logger to use.
129 | format (str): The message format string.
130 | **kwargs: Additional keyword arguments to be passed to the logger.
131 |
132 | Raises:
133 | ValueError: If the specified logger is not found.
134 | """
135 | logger = self.loggers.get(logger_name) # Retrieve the logger from the instance.
136 | if logger is None:
137 | raise ValueError(f"Unknown logger: {logger_name}") # Raise error if logger not found.
138 |
139 | # Determine the logging method based on logger name.
140 | if "errors" in logger_name:
141 | log_method = logger.error
142 | elif "debug" in logger_name:
143 | log_method = logger.debug
144 | else:
145 | log_method = logger.info
146 |
147 | log_method(format, extra=kwargs) # Log the message using the determined method.
148 |
149 | logging_manager = LoggingManager() # Create a singleton instance of LoggingManager.
150 |
151 | def init_logs(app):
152 | """
153 | Initialize the logging system for the containers plugin.
154 |
155 | Args:
156 | app (Flask): The Flask application instance.
157 | """
158 | logging_manager.init_logs(app) # Call the init_logs method of the logging manager.
159 |
160 | def log(logger_name, format, **kwargs):
161 | """
162 | Log a message using the specified logger.
163 |
164 | Args:
165 | logger_name (str): The name of the logger to use.
166 | format (str): The message format string.
167 | **kwargs: Additional keyword arguments to be passed to the logger.
168 | """
169 | logging_manager.log(logger_name, format, **kwargs) # Log the message through the logging manager.
170 |
--------------------------------------------------------------------------------
/models.py:
--------------------------------------------------------------------------------
1 | """
2 | This module defines the database models for the containers plugin in CTFd.
3 | It includes models for container challenges, container information, and container settings.
4 | """
5 |
6 | from sqlalchemy.sql import func
7 | from sqlalchemy.orm import relationship
8 |
9 | from CTFd.models import db
10 | from CTFd.models import Challenges
11 |
12 | class ContainerChallengeModel(Challenges):
13 | """
14 | Represents a container-based challenge in CTFd.
15 |
16 | This model extends the base Challenges model with additional fields
17 | specific to container challenges.
18 | """
19 | __mapper_args__ = {"polymorphic_identity": "container"}
20 | id = db.Column(
21 | db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True
22 | ) # Unique identifier for each container challenge.
23 | image = db.Column(db.Text) # Docker image used for the container challenge.
24 | port = db.Column(db.Integer) # Port number the container listens on.
25 | command = db.Column(db.Text, default="") # Command to run inside the container.
26 | volumes = db.Column(db.Text, default="") # Volume mappings for the container.
27 |
28 | # Dynamic challenge properties
29 | initial = db.Column(db.Integer, default=0) # Initial point value for the challenge.
30 | minimum = db.Column(db.Integer, default=0) # Minimum point value after decay.
31 | decay = db.Column(db.Integer, default=0) # Rate of point decay over time.
32 |
33 | def __init__(self, *args, **kwargs):
34 | """
35 | Initialize a new ContainerChallengeModel instance.
36 |
37 | Args:
38 | *args: Variable length argument list.
39 | **kwargs: Arbitrary keyword arguments.
40 | """
41 | super(ContainerChallengeModel, self).__init__(**kwargs)
42 | self.value = kwargs["initial"] # Set the initial point value from the given arguments.
43 |
44 | class ContainerInfoModel(db.Model):
45 | """
46 | Represents information about a running container instance.
47 |
48 | This model stores details about container instances created for challenges,
49 | including which user or team the container belongs to.
50 | """
51 | __mapper_args__ = {"polymorphic_identity": "container_info"}
52 | container_id = db.Column(db.String(512), primary_key=True) # Unique container ID.
53 | challenge_id = db.Column(
54 | db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")
55 | ) # Associated challenge ID for the container.
56 | user_id = db.Column(
57 | db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")
58 | ) # ID of the user who owns the container.
59 | team_id = db.Column(
60 | db.Integer, db.ForeignKey("teams.id", ondelete="CASCADE")
61 | ) # ID of the team who owns the container.
62 | port = db.Column(db.Integer) # Port number for the container instance.
63 | timestamp = db.Column(db.Integer) # Creation timestamp of the container.
64 | expires = db.Column(db.Integer) # Expiration timestamp for the container.
65 |
66 | # Relationships to link container to user, team, and challenge.
67 | user = db.relationship("Users", foreign_keys=[user_id])
68 | team = db.relationship("Teams", foreign_keys=[team_id])
69 | challenge = db.relationship(ContainerChallengeModel,
70 | foreign_keys=[challenge_id])
71 |
72 | class ContainerSettingsModel(db.Model):
73 | """
74 | Represents configuration settings for the containers plugin.
75 |
76 | This model stores key-value pairs for various settings related to
77 | container management in the CTFd platform.
78 | """
79 | key = db.Column(db.String(512), primary_key=True) # Setting key.
80 | value = db.Column(db.Text) # Setting value.
81 |
82 | @classmethod
83 | def apply_default_config(cls, key, value):
84 | """
85 | Set the default configuration for a container setting.
86 |
87 | Args:
88 | key (str): The setting key.
89 | value (str): The setting value.
90 | """
91 | # If the setting is not already in the database, add it as a new entry.
92 | if not cls.query.filter_by(key=key).first():
93 | db.session.add(cls(key=key, value=value))
94 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | docker
2 | paramiko
3 | apscheduler
4 |
--------------------------------------------------------------------------------
/routes.py:
--------------------------------------------------------------------------------
1 | """
2 | This module defines the routes and blueprint for the containers plugin in CTFd.
3 | It handles container management operations such as running, requesting, renewing,
4 | resetting, stopping, and purging containers, as well as administrative functions
5 | like updating settings and viewing the container dashboard.
6 | """
7 |
8 | import datetime
9 |
10 | from flask import Blueprint, request, Flask, render_template, url_for, redirect, current_app
11 |
12 | from CTFd.models import db
13 | from CTFd.utils.decorators import authed_only, admins_only, during_ctf_time_only, ratelimit, require_verified_emails
14 | from CTFd.utils.user import get_current_user
15 |
16 | # Import custom modules and helper functions for managing containers
17 | from .logs import log
18 | from .models import ContainerInfoModel, ContainerSettingsModel
19 | from .container_manager import ContainerManager
20 | from .container_challenge import ContainerChallenge
21 | from .routes_helper import format_time_filter, create_container, renew_container, kill_container
22 |
23 | # Blueprint definition for the containers module
24 | containers_bp = Blueprint(
25 | 'containers', __name__, template_folder='templates', static_folder='assets', url_prefix='/containers')
26 |
27 | def settings_to_dict(settings):
28 | """
29 | Convert settings objects to a dictionary.
30 |
31 | Args:
32 | settings (list): A list of settings model objects.
33 |
34 | Returns:
35 | dict: A dictionary with setting keys and values.
36 | """
37 | return {setting.key: setting.value for setting in settings}
38 |
39 | def register_app(app: Flask):
40 | """
41 | Register the containers blueprint with the Flask app.
42 |
43 | Args:
44 | app (Flask): The Flask application instance.
45 |
46 | Returns:
47 | Blueprint: The registered containers blueprint.
48 | """
49 | container_settings = settings_to_dict(ContainerSettingsModel.query.all())
50 | log("containers_debug", format="Registering containers blueprint with settings: {settings}",
51 | settings=container_settings)
52 |
53 | # Initialize a global container manager using the app context and settings
54 | app.container_manager = ContainerManager(container_settings, app)
55 | return containers_bp
56 |
57 | def format_time_filter(timestamp):
58 | """
59 | Format a timestamp into a readable string.
60 |
61 | Args:
62 | timestamp (float): Unix timestamp.
63 |
64 | Returns:
65 | str: A human-readable formatted timestamp.
66 | """
67 | return datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
68 |
69 | # Register the time formatting filter for templates
70 | containers_bp.app_template_filter("format_time")(format_time_filter)
71 |
72 | @containers_bp.route('/api/running', methods=['POST'])
73 | @authed_only
74 | @during_ctf_time_only
75 | @require_verified_emails
76 | @ratelimit(method="POST", limit=100, interval=300, key_prefix='rl_running_container_post')
77 | def route_running_container():
78 | """
79 | Check if a container is running for a given challenge.
80 |
81 | This route verifies if a user already has an active container for a challenge
82 | based on the challenge ID (`chal_id`) sent in the request.
83 |
84 | Returns:
85 | dict: JSON response with the container status.
86 | """
87 | user = get_current_user()
88 | log("containers_debug", format="Checking running container status")
89 |
90 | # Validate the request parameters
91 | if request.json is None or request.json.get("chal_id") is None or user is None:
92 | log("containers_errors", format="Invalid request to /api/running")
93 | return {"error": "Invalid request"}, 400
94 |
95 | try:
96 | # Fetch challenge information
97 | challenge = ContainerChallenge.challenge_model.query.filter_by(id=request.json.get("chal_id")).first()
98 | if challenge is None:
99 | log("containers_errors", format="CHALL_ID:{challenge_id}|Challenge not found during running container check",
100 | challenge_id=request.json.get("chal_id"))
101 | return {"error": "An error occurred."}, 500
102 |
103 | docker_assignment = current_app.container_manager.settings.get("docker_assignment")
104 | log("containers_debug", format="CHALL_ID:{challenge_id}|Docker assignment mode: {mode}",
105 | challenge_id=challenge.id,
106 | mode=docker_assignment)
107 |
108 | # Determine container ownership based on assignment type
109 | if docker_assignment in ["user", "unlimited"]:
110 | running_container = ContainerInfoModel.query.filter_by(
111 | challenge_id=challenge.id,
112 | user_id=user.id).first()
113 | else:
114 | running_container = ContainerInfoModel.query.filter_by(
115 | challenge_id=challenge.id, team_id=user.team_id).first()
116 |
117 | # Return the status of the container (running or stopped)
118 | if running_container:
119 | log("containers_actions", format="CHALL_ID:{challenge_id}|Container '{container_id}' already running",
120 | challenge_id=challenge.id,
121 | container_id=running_container.container_id)
122 | return {"status": "already_running", "container_id": request.json.get("chal_id")}, 200
123 | else:
124 | log("containers_actions", format="CHALL_ID:{challenge_id}|No running container found",
125 | challenge_id=challenge.id)
126 | return {"status": "stopped", "container_id": request.json.get("chal_id")}, 200
127 |
128 | except Exception as err:
129 | log("containers_errors", format="CHALL_ID:{challenge_id}|Error checking running container status ({error})",
130 | challenge_id=request.json.get("chal_id"),
131 | error=str(err))
132 | return {"error": "An error has occurred."}, 500
133 |
134 | @containers_bp.route('/api/request', methods=['POST'])
135 | @authed_only
136 | @during_ctf_time_only
137 | @require_verified_emails
138 | @ratelimit(method="POST", limit=100, interval=300, key_prefix='rl_request_container_post')
139 | def route_request_container():
140 | """
141 | Request a new container for a challenge.
142 | """
143 | user = get_current_user()
144 | log("containers_debug", format="Requesting container")
145 |
146 | if request.json is None or request.json.get("chal_id") is None or user is None:
147 | log("containers_errors", format="Invalid request to /api/request")
148 | return {"error": "Invalid request"}, 400
149 |
150 | try:
151 | docker_assignment = current_app.container_manager.settings.get("docker_assignment")
152 | log("containers_debug", format="CHALL_ID:{challenge_id}|Docker assignment mode: {mode}",
153 | challenge_id=request.json.get("chal_id"),
154 | mode=docker_assignment)
155 |
156 | return create_container(current_app.container_manager, request.json.get("chal_id"), user.id, user.team_id, docker_assignment)
157 | except Exception as err:
158 | log("containers_errors", format="CHALL_ID:{challenge_id}|Error during container creation ({error})",
159 | challenge_id=request.json.get("chal_id"),
160 | error=str(err))
161 | return {"error": "An error has occured."}, 500
162 |
163 | @containers_bp.route('/api/renew', methods=['POST'])
164 | @authed_only
165 | @during_ctf_time_only
166 | @require_verified_emails
167 | @ratelimit(method="POST", limit=100, interval=300, key_prefix='rl_renew_container_post')
168 | def route_renew_container():
169 | """
170 | Renew an existing container for a challenge.
171 | """
172 | user = get_current_user()
173 | log("containers_debug", format="Requesting container renewal")
174 |
175 | if request.json is None or request.json.get("chal_id") is None or user is None:
176 | log("containers_errors", format="Invalid request to /api/renew")
177 | return {"error": "Invalid request"}, 400
178 |
179 | try:
180 | docker_assignment = current_app.container_manager.settings.get("docker_assignment")
181 | log("containers_debug", format="CHALL_ID:{challenge_id}|Docker assignment mode: {mode}",
182 | challenge_id=request.json.get("chal_id"),
183 | mode=docker_assignment)
184 |
185 | return renew_container(current_app.container_manager, request.json.get("chal_id"), user.id, user.team_id, docker_assignment)
186 | except Exception as err:
187 | log("containers_errors", format="CHALL_ID:{challenge_id}|Error during container renewal ({error})",
188 | challenge_id=request.json.get("chal_id"),
189 | error=str(err))
190 | return {"error": "An error has occurred."}, 500
191 |
192 | @containers_bp.route('/api/reset', methods=['POST'])
193 | @authed_only
194 | @during_ctf_time_only
195 | @require_verified_emails
196 | @ratelimit(method="POST", limit=100, interval=300, key_prefix='rl_restart_container_post')
197 | def route_restart_container():
198 | """
199 | Restart a container for a challenge.
200 | """
201 | user = get_current_user()
202 | log("containers_debug", format="Requesting container reset")
203 |
204 | if request.json is None or request.json.get("chal_id") is None or user is None:
205 | log("containers_errors", format="Invalid request to /api/reset")
206 | return {"error": "Invalid request"}, 400
207 |
208 | docker_assignment = current_app.container_manager.settings.get("docker_assignment")
209 | log("containers_debug", format="CHALL_ID:{challenge_id}|Docker assignment mode: {mode}",
210 | challenge_id=request.json.get("chal_id"),
211 | mode=docker_assignment)
212 |
213 | if docker_assignment in ["user", "unlimited"]:
214 | running_container = ContainerInfoModel.query.filter_by(
215 | challenge_id=request.json.get("chal_id"),
216 | user_id=user.id).first()
217 | else:
218 | running_container = ContainerInfoModel.query.filter_by(
219 | challenge_id=request.json.get("chal_id"), team_id=user.team_id).first()
220 |
221 | if running_container:
222 | log("containers_actions", format="CHALL_ID:{challenge_id}|Resetting container '{container_id}'",
223 | challenge_id=request.json.get("chal_id"),
224 | container_id=running_container.container_id)
225 | kill_container(current_app.container_manager, running_container.container_id, request.json.get("chal_id"))
226 |
227 | log("containers_actions", format="CHALL_ID:{challenge_id}|Recreating container",
228 | challenge_id=request.json.get("chal_id"))
229 | return create_container(current_app.container_manager, request.json.get("chal_id"), user.id, user.team_id, docker_assignment)
230 |
231 | @containers_bp.route('/api/stop', methods=['POST'])
232 | @authed_only
233 | @during_ctf_time_only
234 | @require_verified_emails
235 | @ratelimit(method="POST", limit=100, interval=300, key_prefix='rl_stop_container_post')
236 | def route_stop_container():
237 | """
238 | Stop a running container for a challenge.
239 | """
240 | user = get_current_user()
241 | log("containers_debug", format="Requesting container stop")
242 |
243 | if request.json is None or request.json.get("chal_id") is None or user is None:
244 | log("containers_errors", format="Invalid request to /api/stop")
245 | return {"error": "Invalid request"}, 400
246 |
247 | docker_assignment = current_app.container_manager.settings.get("docker_assignment")
248 | log("containers_debug", format="CHALL_ID:{challenge_id}|Docker assignment mode: {mode}",
249 | challenge_id=request.json.get("chal_id"),
250 | mode=docker_assignment)
251 |
252 | if docker_assignment in ["user", "unlimited"]:
253 | running_container = ContainerInfoModel.query.filter_by(
254 | challenge_id=request.json.get("chal_id"),
255 | user_id=user.id).first()
256 | else:
257 | running_container = ContainerInfoModel.query.filter_by(
258 | challenge_id=request.json.get("chal_id"), team_id=user.team_id).first()
259 |
260 | if running_container:
261 | log("containers_actions", format="CHALL_ID:{challenge_id}|Stopping container '{container_id}'",
262 | challenge_id=request.json.get("chal_id"),
263 | container_id=running_container.container_id)
264 | return kill_container(current_app.container_manager, running_container.container_id, request.json.get("chal_id"))
265 |
266 | log("containers_errors", format="CHALL_ID:{challenge_id}|No running container found to stop",
267 | challenge_id=request.json.get("chal_id"))
268 | return {"error": "No running container found."}, 400
269 |
270 | @containers_bp.route('/api/kill', methods=['POST'])
271 | @admins_only
272 | def route_kill_container():
273 | """
274 | Admin route to kill a specific container by its ID.
275 | """
276 | # Validate the request data to ensure a container ID is provided
277 | if request.json is None or request.json.get("container_id") is None:
278 | log("containers_errors", format="Invalid request to /api/kill")
279 | return {"error": "Invalid request"}, 400
280 |
281 | # Extract the container ID and perform the kill operation
282 | container_id = request.json.get("container_id")
283 | log("containers_actions", format="Admin killing container '{container_id}'", container_id=container_id)
284 | return kill_container(current_app.container_manager, container_id, "N/A")
285 |
286 | @containers_bp.route('/api/purge', methods=['POST'])
287 | @admins_only
288 | def route_purge_containers():
289 | """
290 | Admin route to purge (stop and delete) all containers currently managed by the application.
291 |
292 | This endpoint allows administrators to kill all active containers at once. It should be used with caution,
293 | as it will affect all active users and their challenge sessions.
294 | """
295 | log("containers_actions", format="Requesting container purge")
296 |
297 | # Retrieve all containers stored in the database
298 | containers = ContainerInfoModel.query.all()
299 |
300 | # Loop through each container and attempt to stop/kill it
301 | for container in containers:
302 | try:
303 | log("containers_actions", format="Admin killing container '{container_id}'",
304 | container_id=container.container_id)
305 | # Attempt to kill the container using its ID
306 | kill_container(current_app.container_manager, container.container_id, "N/A")
307 | except Exception as err:
308 | # Log any errors encountered while killing individual containers
309 | log("containers_errors", format="Error during purging container '{container_id}' ({error})",
310 | container_id=container.container_id,
311 | error=str(err))
312 |
313 | # Log the successful completion of the purge operation
314 | log("containers_actions", format="Admin completed container purge")
315 | return {"success": "Purged all containers"}, 200
316 |
317 | @containers_bp.route('/api/images', methods=['GET'])
318 | @admins_only
319 | def route_get_images():
320 | """
321 | Admin route to retrieve a list of all available Docker images.
322 |
323 | This endpoint provides administrators with a list of Docker images that are available for container creation.
324 | The list is fetched directly from the Docker registry managed by the container manager.
325 | """
326 | log("containers_debug", format="Admin requesting Docker images list")
327 |
328 | try:
329 | # Attempt to retrieve the list of available Docker images
330 | images = current_app.container_manager.get_images()
331 | # Log the number of images successfully retrieved
332 | log("containers_actions", format="Admin successfully retrieved {count} Docker images",
333 | count=len(images))
334 |
335 | # Return the list of images as a JSON response
336 | return {"images": images}, 200
337 | except Exception as err:
338 | # Log any errors encountered during the process of fetching Docker images
339 | log("containers_errors", format="Admin encountered error while fetching Docker images ({error})",
340 | error=str(err))
341 | return {"error": "An error has occurred."}, 500
342 |
343 | @containers_bp.route('/api/settings/update', methods=['POST'])
344 | @admins_only
345 | def route_update_settings():
346 | """
347 | Admin route to update container settings.
348 |
349 | This route allows administrators to modify container-related configurations such as Docker base URL,
350 | hostname, expiration time, memory, and CPU settings. These settings are used by the container manager
351 | to handle container creation and management.
352 | """
353 | log("containers_debug", format="Admin initiating settings update")
354 |
355 | # Define the list of required fields that must be present in the request form
356 | required_fields = [
357 | "docker_base_url", "docker_hostname", "container_expiration",
358 | "container_maxmemory", "container_maxcpu", "docker_assignment"
359 | ]
360 |
361 | # Check if any required field is missing and log an error if found
362 | for field in required_fields:
363 | if request.form.get(field) is None:
364 | log("containers_errors", format="Admin settings update failed: Missing required field {field}",
365 | field=field)
366 | return {"error": f"Missing required field: {field}"}, 400
367 |
368 | # Retrieve the settings from the request and store them in a dictionary
369 | settings = {
370 | "docker_base_url": request.form.get("docker_base_url"),
371 | "docker_hostname": request.form.get("docker_hostname"),
372 | "container_expiration": request.form.get("container_expiration"),
373 | "container_maxmemory": request.form.get("container_maxmemory"),
374 | "container_maxcpu": request.form.get("container_maxcpu"),
375 | "docker_assignment": request.form.get("docker_assignment")
376 | }
377 |
378 | try:
379 | # Update or create each setting in the database
380 | for key, value in settings.items():
381 | setting = ContainerSettingsModel.query.filter_by(key=key).first()
382 | if setting is None:
383 | # Create a new setting if it doesn't exist in the database
384 | new_setting = ContainerSettingsModel(key=key, value=value)
385 | db.session.add(new_setting)
386 | log("containers_actions", format=f"Admin created '{key}' setting DB row")
387 | else:
388 | # Update the setting if it already exists and log the change
389 | old_value = setting.value
390 | if old_value != value:
391 | setting.value = value
392 | log("containers_actions", format="Admin updated '{key}' setting DB row ({old_value} => {new_value})",
393 | key=key, old_value=old_value, new_value=value)
394 | except Exception as err:
395 | # Log any errors encountered during the update operation
396 | log("containers_errors", format="Admin encountered error while updating settings ({error})",
397 | error=str(err))
398 | return {"error": "An error has occurred."}, 500
399 |
400 | try:
401 | # Commit the changes to the database
402 | db.session.commit()
403 | log("containers_actions", format="Admin successfully committed settings to database")
404 | except Exception as err:
405 | # If there's an error during commit, roll back the transaction and log the issue
406 | db.session.rollback()
407 | log("containers_errors", format="Admin encountered error while committing settings ({error})",
408 | error=str(err))
409 | return {"error": "Failed to update settings in database"}, 500
410 |
411 | try:
412 | # Reload settings into the container manager to apply changes immediately
413 | all_settings = ContainerSettingsModel.query.all()
414 | new_settings = settings_to_dict(all_settings)
415 | with current_app.app_context():
416 | current_app.container_manager.settings.update(new_settings)
417 | log("containers_actions", format="Admin completed settings update. New settings: {settings}",
418 | settings=current_app.container_manager.settings)
419 | except Exception as err:
420 | # Log any error that occurs while updating the container manager settings
421 | log("containers_errors", format="Admin encountered error while updating container_manager settings ({error})",
422 | error=str(err))
423 | return {"error": "Failed to update container manager settings"}, 500
424 |
425 | # Redirect to the containers dashboard after successfully updating the settings
426 | return redirect(url_for(".route_containers_dashboard"))
427 |
428 | @containers_bp.route('/dashboard', methods=['GET'])
429 | @admins_only
430 | def route_containers_dashboard():
431 | """
432 | Admin route to view the containers dashboard.
433 |
434 | This route provides an overview of all running containers, their status, and whether the Docker daemon
435 | is currently connected. It allows administrators to view and manage active containers.
436 | """
437 | admin_user = get_current_user()
438 | log("containers_actions", format="Admin accessing container dashboard", user_id=admin_user.id)
439 |
440 | try:
441 | # Retrieve all running containers from the database, ordered by timestamp
442 | running_containers = ContainerInfoModel.query.order_by(
443 | ContainerInfoModel.timestamp.desc()).all()
444 | log("containers_debug", format="Admin retrieved {count} containers from database",
445 | user_id=admin_user.id, count=len(running_containers))
446 |
447 | # Check if the Docker daemon is connected
448 | connected = False
449 | try:
450 | connected = current_app.container_manager.is_connected()
451 | log("containers_debug", format="Admin checked Docker daemon connection: {status}",
452 | user_id=admin_user.id, status="Connected" if connected else "Disconnected")
453 | except Exception as err:
454 | # Log any errors encountered during Docker connection check
455 | log("containers_errors", format="Admin encountered error checking Docker daemon connection: {error}",
456 | user_id=admin_user.id, error=str(err))
457 |
458 | # Check the running status of each container and update the corresponding field
459 | for i, container in enumerate(running_containers):
460 | try:
461 | running_containers[i].is_running = current_app.container_manager.is_container_running(
462 | container.container_id)
463 | log("containers_debug", format="Admin checked container '{container_id}' status: {status}",
464 | user_id=admin_user.id, container_id=container.container_id,
465 | status="Running" if running_containers[i].is_running else "Stopped")
466 | except Exception as err:
467 | # Log any errors encountered while checking container status
468 | log("containers_errors", format="Admin encountered error checking container '{container_id}' status: {error}",
469 | user_id=admin_user.id, container_id=container.container_id, error=str(err))
470 | running_containers[i].is_running = False
471 |
472 | # Retrieve the current Docker assignment mode from settings
473 | docker_assignment = current_app.container_manager.settings.get("docker_assignment")
474 | log("containers_debug", format="Admin retrieved Docker assignment mode: {mode}",
475 | user_id=admin_user.id, mode=docker_assignment)
476 |
477 | # Render the dashboard template with the necessary context data
478 | log("containers_debug", format="Admin rendering dashboard with {running_containers} containers and docker_assignment to {docker_assignment}",
479 | user_id=admin_user.id, running_containers=len(running_containers),
480 | docker_assignment=docker_assignment)
481 |
482 | return render_template('container_dashboard.html',
483 | containers=running_containers,
484 | connected=connected,
485 | settings={'docker_assignment': docker_assignment})
486 | except Exception as err:
487 | # Log any errors that occur while loading or rendering the dashboard
488 | log("containers_errors", format="Admin encountered error rendering container dashboard: {error}",
489 | user_id=admin_user.id, error=str(err))
490 | current_app.logger.error(f"Error in container dashboard: {str(err)}", exc_info=True)
491 | return "An error occurred while loading the dashboard. Please check the logs.", 500
492 |
493 | @containers_bp.route('/settings', methods=['GET'])
494 | @admins_only
495 | def route_containers_settings():
496 | """
497 | Admin route to view and edit container settings.
498 |
499 | This route displays the current container-related settings and allows administrators to modify them.
500 | It serves as an interface for managing configurations used by the container manager.
501 | """
502 | # Retrieve the list of running containers and current settings from the database
503 | running_containers = ContainerInfoModel.query.order_by(
504 | ContainerInfoModel.timestamp.desc()).all()
505 | log("containers_actions", format="Admin Container settings called")
506 |
507 | # Render the settings template with the retrieved settings and containers
508 | return render_template('container_settings.html', settings=current_app.container_manager.settings)
509 |
--------------------------------------------------------------------------------
/routes_helper.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides utility functions for managing containers in the CTFd platform.
3 | It includes functions for killing, renewing, and creating containers, as well as
4 | helper functions for formatting time and converting settings to a dictionary.
5 | """
6 |
7 | import time
8 | import json
9 | import datetime
10 |
11 | from CTFd.models import db
12 |
13 | from .logs import log
14 | from .models import ContainerInfoModel
15 | from .container_challenge import ContainerChallenge
16 |
17 | def settings_to_dict(settings):
18 | """
19 | Convert a list of settings objects to a dictionary.
20 |
21 | Args:
22 | settings (list): A list of setting objects retrieved from the database.
23 |
24 | Returns:
25 | dict: A dictionary with setting keys as dictionary keys and their corresponding values.
26 | """
27 | return {
28 | setting.key: setting.value for setting in settings
29 | }
30 |
31 | def format_time_filter(unix_seconds):
32 | """
33 | Convert Unix timestamp to ISO format string, including the current timezone.
34 |
35 | Args:
36 | unix_seconds (int): Unix timestamp in seconds since the epoch.
37 |
38 | Returns:
39 | str: ISO formatted date-time string with timezone information.
40 | """
41 | return datetime.datetime.fromtimestamp(unix_seconds, tz=datetime.datetime.now(
42 | datetime.timezone.utc).astimezone().tzinfo).isoformat()
43 |
44 | def kill_container(container_manager, container_id, challenge_id):
45 | """
46 | Kill a running container and remove its record from the database.
47 |
48 | Args:
49 | container_manager: The container manager object responsible for managing Docker containers.
50 | container_id (str): The ID of the container to terminate.
51 | challenge_id (int): The ID of the associated challenge to log for tracking purposes.
52 |
53 | Returns:
54 | tuple: A dictionary with a success or error message and an HTTP status code.
55 | """
56 | # Log the initiation of the container termination process
57 | log("containers_debug", format="CHALL_ID:{challenge_id}|Initiating container kill process for container '{container_id}'",
58 | challenge_id=challenge_id,
59 | container_id=container_id)
60 |
61 | # Retrieve the container information from the database
62 | container = ContainerInfoModel.query.filter_by(container_id=container_id).first()
63 |
64 | # If the container does not exist in the database, log an error and return a 400 response
65 | if not container:
66 | log("containers_errors", format="CHALL_ID:{challenge_id}|Container '{container_id}' not found in database",
67 | challenge_id=challenge_id,
68 | container_id=container_id)
69 | return {"error": "Container not found"}, 400
70 |
71 | try:
72 | # Attempt to kill the container using the Docker manager
73 | log("containers_actions", format="CHALL_ID:{challenge_id}|Killing container '{container_id}'",
74 | challenge_id=challenge_id,
75 | container_id=container_id)
76 | container_manager.kill_container(container_id)
77 | log("containers_debug", format="CHALL_ID:{challenge_id}|Container '{container_id}' successfully killed by Docker",
78 | challenge_id=challenge_id,
79 | container_id=container_id)
80 | except Exception as err:
81 | # Log and return an error message if the container could not be terminated
82 | log("containers_errors", format="CHALL_ID:{challenge_id}|Failed to kill container '{container_id}' ({error})",
83 | challenge_id=challenge_id,
84 | container_id=container_id,
85 | error=str(err))
86 | return {"error": "Failed to kill container"}, 500
87 |
88 | try:
89 | # Remove the container record from the database after successful termination
90 | log("containers_debug", format="CHALL_ID:{challenge_id}|Removing container '{container_id}' from database",
91 | challenge_id=challenge_id,
92 | container_id=container_id)
93 | db.session.delete(container)
94 | db.session.commit()
95 | log("containers_debug", format="CHALL_ID:{challenge_id}|Container '{container_id}' successfully removed from database",
96 | challenge_id=challenge_id,
97 | container_id=container_id)
98 | except Exception as db_err:
99 | # Handle any database errors during the removal process
100 | log("containers_errors", format="CHALL_ID:{challenge_id}|Failed to remove container '{container_id}' from database ({error})",
101 | challenge_id=challenge_id,
102 | container_id=container_id,
103 | error=str(db_err))
104 | return {"error": "Failed to update database"}, 500
105 |
106 | # Log the successful removal of the container and return a success response
107 | log("containers_actions", format="CHALL_ID:{challenge_id}|Container '{container_id}' successfully killed and removed",
108 | challenge_id=challenge_id,
109 | container_id=container_id)
110 | return {"success": "Container killed and removed"}
111 |
112 | def renew_container(container_manager, challenge_id, user_id, team_id, docker_assignment):
113 | """
114 | Renew the expiration time of a running container.
115 |
116 | Args:
117 | container_manager: The container manager object responsible for managing Docker containers.
118 | challenge_id (int): The ID of the associated challenge to be renewed.
119 | user_id (int): The ID of the user who owns the container.
120 | team_id (int): The ID of the team that owns the container.
121 | docker_assignment (str): The assignment mode for Docker containers (e.g., 'user', 'team', 'unlimited').
122 |
123 | Returns:
124 | tuple: A dictionary with a success message and new expiration time or an error message and HTTP status code.
125 | """
126 | # Log the initiation of the container renewal process
127 | log("containers_debug", format="CHALL_ID:{challenge_id}|Initiating container renewal process",
128 | challenge_id=challenge_id)
129 |
130 | # Retrieve the challenge object associated with the given challenge ID
131 | challenge = ContainerChallenge.challenge_model.query.filter_by(id=challenge_id).first()
132 |
133 | # If the challenge does not exist, log an error and return a 400 response
134 | if challenge is None:
135 | log("containers_errors", format="CHALL_ID:{challenge_id}|Renewing container failed (Challenge not found)",
136 | challenge_id=challenge_id)
137 | return {"error": "Challenge not found"}, 400
138 |
139 | # Log the Docker assignment mode being used
140 | log("containers_debug", format="CHALL_ID:{challenge_id}|Docker assignment mode: {mode}",
141 | challenge_id=challenge_id,
142 | mode=docker_assignment)
143 |
144 | # Determine the container to renew based on the assignment mode ('user' or 'team')
145 | if docker_assignment in ["user", "unlimited"]:
146 | running_container = ContainerInfoModel.query.filter_by(
147 | challenge_id=challenge_id,
148 | user_id=user_id).first()
149 | else:
150 | running_container = ContainerInfoModel.query.filter_by(
151 | challenge_id=challenge_id, team_id=team_id).first()
152 |
153 | # If the container does not exist, log an error and return a 400 response
154 | if running_container is None:
155 | log("containers_errors", format="CHALL_ID:{challenge_id}|Renew container failed (Container not found)",
156 | challenge_id=challenge_id)
157 | return {"error": "Container not found"}, 400
158 |
159 | try:
160 | # Update the expiration time of the container and commit changes to the database
161 | new_expiration = int(time.time() + container_manager.expiration_seconds)
162 | old_expiration = running_container.expires
163 | running_container.expires = new_expiration
164 |
165 | log("containers_debug", format="CHALL_ID:{challenge_id}|Updating container '{container_id}' expiration: {old_exp} -> {new_exp}",
166 | challenge_id=challenge_id,
167 | container_id=running_container.container_id,
168 | old_exp=old_expiration,
169 | new_exp=new_expiration)
170 |
171 | db.session.commit()
172 |
173 | # Log and return a success message with the new expiration time
174 | log("containers_debug", format="CHALL_ID:{challenge_id}|Container '{container_id}' renewed. New expiration: {new_exp}",
175 | challenge_id=challenge_id,
176 | container_id=running_container.container_id,
177 | new_exp=new_expiration)
178 |
179 | return {"success": "Container renewed", "expires": new_expiration}
180 | except Exception as err:
181 | # Log and return an error message if the renewal process fails
182 | log("containers_errors", format="CHALL_ID:{challenge_id}|Renew container '{container_id}' failed ({error})",
183 | challenge_id=challenge_id,
184 | container_id=running_container.container_id,
185 | error=str(err))
186 | return {"error": "Failed to renew container"}, 500
187 |
188 | def create_container(container_manager, challenge_id, user_id, team_id, docker_assignment):
189 | """
190 | Create a new container for a challenge.
191 |
192 | Args:
193 | container_manager: The container manager object.
194 | challenge_id (int): The ID of the associated challenge.
195 | user_id (int): The ID of the user.
196 | team_id (int): The ID of the team.
197 | docker_assignment (str): The docker assignment mode.
198 |
199 | Returns:
200 | str: A JSON string containing the container creation result.
201 | """
202 | # Log the start of the container creation process for a given challenge ID
203 | log("containers_debug", format="CHALL_ID:{challenge_id}|Initiating container creation process",
204 | challenge_id=challenge_id)
205 |
206 | # Retrieve the challenge object from the database using the provided challenge_id
207 | challenge = ContainerChallenge.challenge_model.query.filter_by(id=challenge_id).first()
208 |
209 | # If the challenge is not found, log an error and return a 400 response
210 | if challenge is None:
211 | log("containers_errors", format="CHALL_ID:{challenge_id}|Container creation failed (Challenge not found)",
212 | challenge_id=challenge_id)
213 | return {"error": "Challenge not found"}, 400
214 |
215 | # Log the Docker assignment mode being used
216 | log("containers_debug", format="CHALL_ID:{challenge_id}|Docker assignment mode: {mode}",
217 | challenge_id=challenge_id,
218 | mode=docker_assignment)
219 |
220 | running_containers_for_user = None
221 |
222 | # Determine the running containers to check based on the docker_assignment mode (user or team)
223 | if docker_assignment in ["user", "unlimited"]:
224 | running_containers = ContainerInfoModel.query.filter_by(
225 | challenge_id=challenge.id, user_id=user_id)
226 | elif docker_assignment == "team":
227 | running_containers = ContainerInfoModel.query.filter_by(
228 | challenge_id=challenge.id, team_id=team_id)
229 |
230 | # Check if there is already a container running for the given challenge and user/team
231 | running_container = running_containers.first()
232 |
233 | if running_container:
234 | try:
235 | # If the container is running, log the status and return the container's information
236 | if container_manager.is_container_running(running_container.container_id):
237 | log("containers_actions", format="CHALL_ID:{challenge_id}|Container '{container_id}' already running",
238 | challenge_id=challenge_id,
239 | container_id=running_container.container_id)
240 | return json.dumps({
241 | "status": "already_running",
242 | "hostname": challenge.connection_info,
243 | "port": running_container.port,
244 | "expires": running_container.expires
245 | })
246 | else:
247 | # If the container is not running, remove it from the database and proceed with creation
248 | log("containers_debug", format="CHALL_ID:{challenge_id}|Container '{container_id}' not running, removing from database",
249 | challenge_id=challenge_id, container_id=running_container.container_id)
250 | db.session.delete(running_container)
251 | db.session.commit()
252 | except Exception as err:
253 | # Handle errors when checking the container status or deleting it from the database
254 | log("containers_errors", format="CHALL_ID:{challenge_id}|Error checking container '{container_id}' ({error})",
255 | challenge_id=challenge_id, container_id=running_container.container_id, error=str(err))
256 | return {"error": "Error checking container status"}, 500
257 |
258 | # Check if there are other running containers for the same user/team
259 | if docker_assignment == "user":
260 | running_containers_for_user = ContainerInfoModel.query.filter_by(user_id=user_id)
261 | elif docker_assignment == "team":
262 | running_containers_for_user = ContainerInfoModel.query.filter_by(team_id=team_id)
263 | else:
264 | running_container_for_user = None
265 |
266 | # Retrieve the first container running for the user/team, if any
267 | running_container_for_user = running_containers_for_user.first() if running_containers_for_user else None
268 |
269 | # If another container is already running for a different challenge, return an error
270 | if running_container_for_user:
271 | challenge_of_running_container = ContainerChallenge.challenge_model.query.filter_by(id=running_container_for_user.challenge_id).first()
272 | log("containers_errors", format="CHALL_ID:{challenge_id}|Container creation failed (Other instance '{other_container_id}' for challenge '{other_challenge_name}' already running)",
273 | challenge_id=challenge_id,
274 | other_container_id=running_container_for_user.container_id,
275 | other_challenge_name=challenge_of_running_container.name)
276 | return {"error": f"Stop other instance running ({challenge_of_running_container.name})"}, 400
277 |
278 | try:
279 | # Attempt to create a new Docker container using the container manager
280 | log("containers_debug", format="CHALL_ID:{challenge_id}|Creating new Docker container",
281 | challenge_id=challenge_id)
282 | created_container = container_manager.create_container(
283 | challenge.image, challenge.port, challenge.command, challenge.volumes)
284 | except Exception as err:
285 | # If the container creation fails, log the error and return a 500 response
286 | log("containers_errors", format="CHALL_ID:{challenge_id}|Container creation failed: {error}",
287 | challenge_id=challenge_id,
288 | error=str(err))
289 | return {"error": "Failed to create container"}, 500
290 |
291 | # Retrieve the port assigned to the created container
292 | port = container_manager.get_container_port(created_container.id)
293 |
294 | # If the port cannot be obtained, log the error and return an error response
295 | if port is None:
296 | log("containers_errors", format="CHALL_ID:{challenge_id}|Could not get port for container '{container_id}'",
297 | challenge_id=challenge_id,
298 | container_id=created_container.id)
299 | return json.dumps({"status": "error", "error": "Could not get port"})
300 |
301 | # Calculate the expiration time for the new container
302 | expires = int(time.time() + container_manager.expiration_seconds)
303 |
304 | # Create a new database entry for the container
305 | new_container = ContainerInfoModel(
306 | container_id=created_container.id,
307 | challenge_id=challenge.id,
308 | user_id=user_id,
309 | team_id=team_id,
310 | port=port,
311 | timestamp=int(time.time()),
312 | expires=expires
313 | )
314 |
315 | try:
316 | # Add the new container record to the database and commit the changes
317 | db.session.add(new_container)
318 | db.session.commit()
319 | log("containers_actions", format="CHALL_ID:{challenge_id}|Container '{container_id}' created and added to database",
320 | challenge_id=challenge_id,
321 | container_id=created_container.id)
322 | except Exception as db_err:
323 | # Handle any database errors during the insertion of the new container record
324 | log("containers_errors", format="CHALL_ID:{challenge_id}|Failed to add container '{container_id}' to database: {error}",
325 | challenge_id=challenge_id,
326 | container_id=created_container.id,
327 | error=str(db_err))
328 | return {"error": "Failed to save container information"}, 500
329 |
330 | # Log the successful completion of the container creation process and return the container's information
331 | log("containers_debug", format="CHALL_ID:{challenge_id}|Container '{container_id}' creation process completed",
332 | challenge_id=challenge_id, container_id=created_container.id)
333 | return json.dumps({
334 | "status": "created",
335 | "hostname": challenge.connection_info,
336 | "port": port,
337 | "expires": expires
338 | })
339 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from .models import db, ContainerSettingsModel
2 |
3 | def setup_default_configs():
4 | """
5 | Sets up default configurations for container settings in the database.
6 |
7 | This function initializes the essential container settings required for the system to function properly.
8 | If any of these settings are not yet defined in the database, it applies the default values specified
9 | in the function. Once all settings are verified or updated, the changes are committed to the database.
10 | """
11 | # Dictionary containing the default configuration values
12 | default_configs = {
13 | "setup": "true", # Indicates whether initial setup is completed
14 | "docker_base_url": "unix://var/run/docker.sock", # URL for connecting to the Docker daemon
15 | "docker_hostname": "", # Hostname of the Docker server (empty by default)
16 | "container_expiration": "45", # Default expiration time for containers (in minutes)
17 | "container_maxmemory": "512", # Maximum memory limit for containers (in MB)
18 | "container_maxcpu": "0.5", # Maximum CPU allocation for containers
19 | "docker_assignment": "user", # Assignment mode for Docker containers (e.g., 'user', 'team' or 'unlimited')
20 | }
21 |
22 | # Iterate over each key-value pair in the default configurations
23 | for key, val in default_configs.items():
24 | # Check if the configuration already exists, and if not, apply the default value
25 | ContainerSettingsModel.apply_default_config(key, val)
26 |
27 | # Commit the changes to the database to ensure the new settings are saved
28 | db.session.commit()
29 |
--------------------------------------------------------------------------------
/templates/container_dashboard.html:
--------------------------------------------------------------------------------
1 | {% extends "admin/base.html" %}
2 |
3 | {% block content %}
4 |
5 |
18 |
19 |
20 |
21 |
Containers
22 |
23 |
24 |
25 | {% with messages = get_flashed_messages() %}
26 | {% if messages %}
27 | {% for message in messages %}
28 |
108 | The Base URL should be the local socket address of the Docker daemon, i.e.
109 | unix://var/run/docker.sock, also it can be via SSH e.g. ssh://user@example.com, or
110 | remote TCP address, e.g.
111 | tcp://example.com:port (https://docs.docker.com/config/daemon/remote-access/).
113 | In either case, sudo will not
114 | be executed. For a local socket, the user
115 | CTFd is running as should have permissions for Docker; for SSH/TCP connections...
116 |