├── DECEIVE.png ├── SSH ├── prompt.txt ├── config.ini.TEMPLATE └── ssh_server.py ├── requirements.txt ├── TODO.txt ├── LICENSE ├── .gitignore └── README.md /DECEIVE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/splunk/DECEIVE/HEAD/DECEIVE.png -------------------------------------------------------------------------------- /SSH/prompt.txt: -------------------------------------------------------------------------------- 1 | You are a video game developer's system. Include realistic video game source and asset files. 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # SSH server 2 | asyncssh 3 | # AI API framework 4 | langchain 5 | langchain_community 6 | # For OpenAI models 7 | langchain_openai 8 | # For Google's Gemini models 9 | langchain_google_genai 10 | # For Ollama models 11 | langchain_ollama 12 | # For AWS 13 | langchain_aws 14 | transformers 15 | torch 16 | # For anthropic models (via AWS) 17 | anthropic 18 | -------------------------------------------------------------------------------- /TODO.txt: -------------------------------------------------------------------------------- 1 | * Branch out to other protocols, such as SMTP or HTTP (REST API?) 2 | * Sometimes the LLM hallucinates the user's future inputs, and acts as though the user typed them. The prompt section below tries to fix this, but doesn't seem to work well. 3 | Never include your guess at the next user command(s) in your output. Be sure to only emulate one input at a time, and to never anticipate what the user's next input will be. For each user input, only send the expected output and nothing else. For example, in the following Python interpreter snippet, the user typed 'exti', but the LLM incrorrectly responded with 'exit' and then simulated exiting the Python shell. Do not do things like this. 4 | guest@devserver:~$ python 5 | Python 3.9.7 (default, Sep 3 2021, 12:37:55) 6 | [GCC 7.5.0] on linux 7 | Type "help", "copyright", "credits" or "license" for more information. 8 | >>> exti 9 | exit 10 | guest@devserver:~$ 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Splunk GitHub 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | 164 | # SSH Keys 165 | *_host_key 166 | *.key 167 | *.pub 168 | 169 | # config files 170 | *.ini 171 | -------------------------------------------------------------------------------- /SSH/config.ini.TEMPLATE: -------------------------------------------------------------------------------- 1 | # THIS IS A TEMPLATE CONFIG FILE FOR HADES 2 | # We provide reasonable defaults for most configuration items, but you should 3 | # review this before using it in production. 4 | 5 | [honeypot] 6 | # The name of the file you wish to write the honeypot log to. 7 | log_file = ssh_log.log 8 | 9 | # The name of the sensor, used to identify this honeypot in the logs. 10 | # If you leave this blank, the honeypot will use the system's hostname. 11 | sensor_name = deceive 12 | 13 | # Settings for the SSH honeypot 14 | [ssh] 15 | # The port the SSH honeypot will listen on. You will probably want to set 16 | # this to 22 for production use. 17 | port = 8022 18 | # The host key to use for the SSH server. This should be a private key. 19 | # See the README for how to generate this key. 20 | host_priv_key = ssh_host_key 21 | # The server version string to send to clients. The SSH server automatically 22 | # prepends "SSH-2.0-" to this. So "OpenSSH_8.2p1 Ubuntu-4ubuntu0.3" will 23 | # be transformed to "SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3". 24 | server_version_string = OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 25 | 26 | # Settings to configure which LLM backend to use. Only one stanza 27 | # should be uncommented at a time. 28 | [llm] 29 | ##### OpenAI 30 | llm_provider = openai 31 | model_name = gpt-4o 32 | 33 | ##### Azure OpenAI 34 | #llm_provider = azure 35 | #azure_deployment = gpt-4o 36 | #azure_api_version = 2025-01-01-preview 37 | #azure_endpoint = 38 | #model_name = gpt-4o 39 | 40 | ##### ollama llama3 41 | #llm_provider = ollama 42 | #model_name = llama3.3 43 | 44 | ##### Any model via Amazon Bedrock 45 | # Valid AWS model names can be found here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html 46 | #llm_provider = AWS 47 | #model_name = anthropic.claude-3-5-sonnet-20240620-v1:0 48 | # NOTE: Bedrock is not supported on all AWS regions. 49 | #aws_region = us-east-1 50 | #aws_credentials_profile = default 51 | 52 | ##### Google Gemini 53 | #llm_provider = gemini 54 | #model_name = gemini-1.5-pro 55 | 56 | # Temperature controls randomness in LLM responses. Values usually range from 0.0 to 2.0. 57 | # Lower values (e.g., 0.2) make responses more focused and deterministic. 58 | # Higher values (e.g., 0.8) make responses more creative and variable. 59 | # Default is 0.2. 60 | temperature = 0.2 61 | 62 | # The maximum number of tokens to send to the LLM backend in a single 63 | # request. This includes the message history for the session, so should 64 | # be fairly high. Not all models support large token counts, so be sure 65 | # to test this with your model. 66 | trimmer_max_tokens = 64000 67 | 68 | # System prompt for the LLM. This will be the same for any running instance. 69 | # Change this if you need to tweak behaviors of the simulation itself. 70 | system_prompt = Interpret all inputs as though they were SSH commands and provide a realistic 71 | output. Unless otherwise specified, assume you are emulating a Linux operating 72 | system, so be sure to include realistic users, processes, and files. Do not 73 | include extraneous formatting in your responses. Also make sure that your 74 | responses do not indicate that the user is on a virtual machine, container, 75 | or emulation at all. 76 | 77 | If {interactive} is True, make sure to include a realistic MOTD on the 78 | first call, and to end all responses with a realistic shell prompt to display to 79 | the user, including a space at the end. 80 | 81 | If {interactive} is False, do not include a shell prompt or MOTD. 82 | 83 | Include ANSI color codes for the terminal with the output of ls commands 84 | (including any flags), or in any other situation where it is appropriate, but 85 | do not include the ``` code formatting around those blocks. 86 | 87 | Make sure all user and host names conform to some reasonable corporate naming 88 | standard. Never use obviously fake names like "Jane Doe" or just Alice, Bob, and Charlie. 89 | 90 | If at any time the user's input would cause the SSH session to close (e.g., if 91 | they exited the login shell), your only answer should be "XXX-END-OF-SESSION-XXX" 92 | with no additional output before or after. Remember that the user could start up 93 | subshells or other command interpreters, and exiting those subprocesses should not 94 | end the SSH session. 95 | 96 | Assume the username is {username}. 97 | 98 | # The valid user accounts and passwords for the SSH server, in the 99 | # form "username = password". Note that you can enable login without 100 | # a password by leaving that field blank (e.g., "guest =" on a line by 101 | # itself). You can set an account to accept ANY password, including an empty 102 | # password, by setting the password to "*" 103 | [user_accounts] 104 | guest = 105 | user1 = secretpw 106 | user2 = password123 107 | root = * 108 | 109 | 110 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DECEIVE 2 | 3 | A cybercriminal interacts with a ghostly, AI-driven honeypot system 4 | 5 | DECEIVE, the **DECeption with Evaluative Integrated Validation Engine**, is a high-interaction, low-effort honeypot system. Unlike most high-interaction honeypots, DECEIVE doesn't provide attackers with access to any actual system. AI actually does all the work of simulating a realistic honeypot system based on a configurable system prompt that describes what type of system you want to simulate. Unlike many other high-interaction honeypots which require substantial effort to seed with realistic users, data, and applications, DECEIVE's AI backend will do all this for you, automatically. 6 | 7 | This version of DECEIVE simulates a Linux server via the SSH protocol. It will log all the user inputs, the outputs returned by the LLM backend, as well as a summary of each session after they end. It'll even tell you if it thinks a users' session was benign, suspicious, or outright malicious. 8 | 9 | ⛔️⛔️ **DECEIVE is a proof-of-concept project. It is not production quality. Try it, learn from it, but be cautious about deploying it in a production environment.** ⛔️⛔️ 10 | 11 | ## Supported Host Platforms 12 | DECEIVE is primarily developed on MacOS 15 (Sequoia), but it should work on any UNIX-like system which can run Python3. This includes other versions of MacOS, Linux, and even Windows (via Windows Subsystem for Linux). 13 | 14 | ## Setup 15 | ### Check out the latest code from GitHub 16 | You can fetch the latest version using the following command: 17 | 18 | git clone https://github.com/splunk/DECEIVE 19 | 20 | The rest of these instructions assume you have changed your current directory to the repo after cloning completes. 21 | 22 | ### Install Dependencies 23 | Ensure you have Python3 installed. We recommend running DECEIVE in it's own Python virtualenv but it is not required. 24 | 25 | Next, install the Python modules the honeypot needs: 26 | 27 | pip3 install -r requirements.txt 28 | 29 | ### Generate the SSH Host Key 30 | 31 | The SSH server requires a TLS keypair for security communications. From the top directory of the repo, generate an SSH keypair using the following command: 32 | 33 | ssh-keygen -t rsa -b 4096 -f SSH/ssh_host_key 34 | 35 | ### Copy the Template Configuration File 36 | 37 | Copy the `SSH/config.ini.TEMPLATE` file to `SSH/config.ini`: 38 | 39 | ### Edit the Configuration File 40 | 41 | Open the `SSH/config.ini` file and review the settings. Update the values as needed, paying special attention to the values in the `[llm]` section, where you will configure the LLM backend you wish to use, and to the `[user_accounts]` section, where you can configure the usernames and passwords you'd like the honeypot to support. 42 | 43 | ### Tell DECEIVE What it's Emulating 44 | Edit the `SSH/prompt.txt` file to include a short description of the type of system you want it to pretend to be. You don't have to be very detailed here, though the more details you can provide, the better the simulation will be. You can keep it high level, like: 45 | 46 | You are a video game developer's system. Include realistic video game source and asset files. 47 | If you like, you can add whatever additional details you think will be helpful. For example: 48 | 49 | You are the Internet-facing mail server for bigschool.edu, a state-sponsored university in Virginia. Valid user accounts are "a20093887", "a20093887-admin", and "mxadmin". Home directories are in "/home/$USERNAME". Everyone's default shell is /bin/zsh, except mxadmin's, which is bash. Mail spools for all campus users (be sure to include email accounts that are not valid for logon to this server) are in /var/spool/mail. Be sure to simulate some juicy emails there, but make them realistic. Some should be personal, but some should be just about the business of administering the school, dealing with students, applying for financial aid, etc. Make the spool permissions relaxed, simulating a misconfiguration that would allow anyone on the system to read the files. 50 | 51 | ## Running the Honeypot 52 | To start the DECEIVE honeypot server, first make sure that you have set any environment variables required by your chosen LLM backend. For example, if you are using any of the OpenAI models, you will need to set the `OPENAI_API_KEY` variable like so: 53 | 54 | export OPENAI_API_KEY=" 55 | 56 | Next, change to the `SSH` directory and run the following command: 57 | 58 | python3 ./ssh_server.py 59 | 60 | The server will start and listen for incoming SSH connections on the configured port. It will not produce any output, but will stay executing in the foreground. 61 | 62 | ## Test it Out 63 | Once the server is running (this can take a few seconds), access it on the configured port. If you are on a Linux or UNIX-like system, try the following command (substitute "localhost" and "8022" as appropriate for your config): 64 | 65 | ssh guest@localhost -p 8022 66 | 67 | ### Logging 68 | Logs will be written to the file specified in the `log_file` configuration option. By default, this is `SSH/ssh_log.log`. 69 | 70 | DECEIVE logs are in JSON lines format, with each line being a complete JSON document. 71 | 72 | The following is a complete example of a simple SSH session, in which the user executed two simple commands (`pwd` and `exit`): 73 | 74 | ```json 75 | {"timestamp": "2025-01-10T20:37:55.018+00:00", "level": "INFO", "task_name": "-", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "SSH connection received", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 59, "funcName": "connection_made", "created": 1736541475.0183098, "msecs": 18.0, "relativeCreated": 13872.790813446045, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": null} 76 | {"timestamp": "2025-01-10T20:37:55.177+00:00", "level": "INFO", "task_name": "Task-5", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "Authentication success", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 75, "funcName": "begin_auth", "created": 1736541475.1775439, "msecs": 177.0, "relativeCreated": 14032.02486038208, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": "Task-5", "username": "guest", "password": ""} 77 | {"timestamp": "2025-01-10T20:37:57.456+00:00", "level": "INFO", "task_name": "session-6355218b-59e5-4549-add3-49e6d1efc133", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "LLM response", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 174, "funcName": "handle_client", "created": 1736541477.4568708, "msecs": 456.0, "relativeCreated": 16311.351776123047, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": "session-6355218b-59e5-4549-add3-49e6d1efc133", "details": "V2VsY29tZSB0byBHYW1lRGV2IENvcnAncyBEZXZlbG9wbWVudCBFbnZpcm9ubWVudAoKLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLQogICBXZWxjb21lLCBndWVzdCEgCiAgIExhc3QgbG9naW46IFR1ZSBPY3QgMjQgMTQ6MzI6MTUgMjAyMyBmcm9tIDE5Mi4xNjguMS4xMAogICBQcm9qZWN0czoKICAgICAtIEZhbnRhc3lRdWVzdAogICAgIC0gU3BhY2VFeHBsb3JlcnMKICAgICAtIFJhY2luZ01hbmlhCiAgIFN5c3RlbSBTdGF0dXM6IEFsbCBzeXN0ZW1zIG9wZXJhdGlvbmFsCiAgIFJlbWVtYmVyIHRvIGNvbW1pdCB5b3VyIGNoYW5nZXMhCi0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0KCmd1ZXN0QGRldi13b3Jrc3RhdGlvbjp+JCA="} 78 | {"timestamp": "2025-01-10T20:37:59.333+00:00", "level": "INFO", "task_name": "session-6355218b-59e5-4549-add3-49e6d1efc133", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "User input", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 184, "funcName": "handle_client", "created": 1736541479.3334038, "msecs": 333.0, "relativeCreated": 18187.88480758667, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": "session-6355218b-59e5-4549-add3-49e6d1efc133", "details": "cHdk"} 79 | {"timestamp": "2025-01-10T20:38:00.189+00:00", "level": "INFO", "task_name": "session-6355218b-59e5-4549-add3-49e6d1efc133", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "LLM response", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 200, "funcName": "handle_client", "created": 1736541480.189375, "msecs": 189.0, "relativeCreated": 19043.855905532837, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": "session-6355218b-59e5-4549-add3-49e6d1efc133", "details": "L2hvbWUvZ3Vlc3QKCmd1ZXN0QGRldi13b3Jrc3RhdGlvbjp+JCA="} 80 | {"timestamp": "2025-01-10T20:38:01.944+00:00", "level": "INFO", "task_name": "session-6355218b-59e5-4549-add3-49e6d1efc133", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "User input", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 184, "funcName": "handle_client", "created": 1736541481.944072, "msecs": 944.0, "relativeCreated": 20798.552989959717, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": "session-6355218b-59e5-4549-add3-49e6d1efc133", "details": "ZXhpdA=="} 81 | {"timestamp": "2025-01-10T20:38:04.132+00:00", "level": "INFO", "task_name": "session-6355218b-59e5-4549-add3-49e6d1efc133", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "Session summary", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 151, "funcName": "session_summary", "created": 1736541484.1324642, "msecs": 132.0, "relativeCreated": 22986.945152282715, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": "session-6355218b-59e5-4549-add3-49e6d1efc133", "details": "The user issued basic commands like `pwd` to check the current working directory and `exit` to terminate the session. This activity is typical of a benign user checking their environment upon logging in and then closing the session. There is no indication of reconnaissance, exploitation, or any post-foothold activity such as privilege escalation or data exfiltration. The actions appear to be standard and routine.\n\nJudgement: BENIGN", "judgement": "BENIGN"} 82 | {"timestamp": "2025-01-10T20:38:04.139+00:00", "level": "INFO", "task_name": "-", "src_ip": "::1", "src_port": 58164, "dst_ip": "::1", "dst_port": 8022, "message": "SSH connection closed", "name": "__main__", "levelname": "INFO", "levelno": 20, "pathname": "/home/deceive/DECEIVE/SSH/./ssh_server.py", "filename": "ssh_server.py", "module": "ssh_server", "exc_info": null, "exc_text": null, "stack_info": null, "lineno": 65, "funcName": "connection_lost", "created": 1736541484.139776, "msecs": 139.0, "relativeCreated": 22994.2569732666, "thread": 8145041472, "threadName": "MainThread", "processName": "MainProcess", "process": 10823, "taskName": null} 83 | ``` 84 | 85 | Things to note: 86 | * Timestamps are always in UTC. UTC||GTFO! 87 | * The `task_name` field contains a unique value that can be used to associate all the entries from a single SSH session. 88 | * The "message" field will tell you what type of entry this: 89 | * `SSH connection received` 90 | * `Authentication success` 91 | * `User input` 92 | * `LLM response` 93 | * `Session summary` 94 | * `SSH connection closed` 95 | * Several of these message types also feature a `details` field with additional information 96 | * `User input` messages contain a base64-encoded copy of the entire user input in the `details` field, as well as an `interactive` field (true/false) that tells you whether this was an interactive or non-interactive command (i.e., whether they logged in with a terminal session or provided a command on the SSH command-line). 97 | * `LLM response` messages contain a base64-encoded copy of the entire simulated response in the `details` field. 98 | * `Session summary` messages contain not only a summary of the commands, but also a guess as to what they might have been intended to accomplish. There will also be a `judgement` field that contains one of "BENIGN", "SUSPICIOUS", or "MALICIOUS" 99 | * Since this is a honeypot and not intended for use by real users, IT WILL LOG USERNAMES AND PASSWORDS! These are found in the `Authentication success` messages, in the `username` and `password` fields. 100 | 101 | ### Contributing 102 | Contributions are welcome! Please submit pull requests or open issues to discuss any changes or improvements. 103 | 104 | ### License 105 | This project is licensed under the MIT License. See the LICENSE file for details. 106 | -------------------------------------------------------------------------------- /SSH/ssh_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from configparser import ConfigParser 4 | import argparse 5 | import asyncio 6 | import asyncssh 7 | import threading 8 | import sys 9 | import json 10 | import os 11 | import traceback 12 | from typing import Optional 13 | import logging 14 | import datetime 15 | import uuid 16 | from base64 import b64encode 17 | from operator import itemgetter 18 | from langchain_openai import ChatOpenAI, AzureChatOpenAI 19 | from langchain_aws import ChatBedrock, ChatBedrockConverse 20 | from langchain_google_genai import ChatGoogleGenerativeAI 21 | from langchain_ollama import ChatOllama 22 | from langchain_core.messages import HumanMessage, SystemMessage, trim_messages 23 | from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory 24 | from langchain_core.runnables.history import RunnableWithMessageHistory 25 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 26 | from langchain_core.runnables import RunnablePassthrough 27 | from asyncssh.misc import ConnectionLost 28 | import socket 29 | 30 | class JSONFormatter(logging.Formatter): 31 | def __init__(self, sensor_name, *args, **kwargs): 32 | super().__init__(*args, **kwargs) 33 | self.sensor_name = sensor_name 34 | 35 | def format(self, record): 36 | log_record = { 37 | "timestamp": datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T", timespec="milliseconds"), 38 | "level": record.levelname, 39 | "task_name": record.task_name, 40 | "src_ip": record.src_ip, 41 | "src_port": record.src_port, 42 | "dst_ip": record.dst_ip, 43 | "dst_port": record.dst_port, 44 | "message": record.getMessage(), 45 | "sensor_name": self.sensor_name, 46 | "sensor_protocol": "ssh" 47 | } 48 | if hasattr(record, 'interactive'): 49 | log_record["interactive"] = record.interactive 50 | # Include any additional fields from the extra dictionary 51 | for key, value in record.__dict__.items(): 52 | if key not in log_record and key != 'args' and key != 'msg': 53 | log_record[key] = value 54 | return json.dumps(log_record) 55 | 56 | class MySSHServer(asyncssh.SSHServer): 57 | def __init__(self): 58 | super().__init__() 59 | self.summary_generated = False 60 | 61 | def connection_made(self, conn: asyncssh.SSHServerConnection) -> None: 62 | # Get the source and destination IPs and ports 63 | peername = conn.get_extra_info('peername') 64 | sockname = conn.get_extra_info('sockname') 65 | 66 | if peername is not None: 67 | src_ip, src_port = peername[:2] 68 | else: 69 | src_ip, src_port = '-', '-' 70 | 71 | if sockname is not None: 72 | dst_ip, dst_port = sockname[:2] 73 | else: 74 | dst_ip, dst_port = '-', '-' 75 | 76 | # Store the connection details in thread-local storage 77 | thread_local.src_ip = src_ip 78 | thread_local.src_port = src_port 79 | thread_local.dst_ip = dst_ip 80 | thread_local.dst_port = dst_port 81 | 82 | # Log the connection details 83 | logger.info("SSH connection received", extra={"src_ip": src_ip, "src_port": src_port, "dst_ip": dst_ip, "dst_port": dst_port}) 84 | 85 | def connection_lost(self, exc: Optional[Exception]) -> None: 86 | if exc: 87 | logger.error('SSH connection error', extra={"error": str(exc)}) 88 | if not isinstance(exc, ConnectionLost): 89 | traceback.print_exception(exc) 90 | else: 91 | logger.info("SSH connection closed") 92 | # Ensure session summary is called on connection loss if attributes are set 93 | if hasattr(self, '_process') and hasattr(self, '_llm_config') and hasattr(self, '_session'): 94 | asyncio.create_task(session_summary(self._process, self._llm_config, self._session, self)) 95 | 96 | def begin_auth(self, username: str) -> bool: 97 | if accounts.get(username) != '': 98 | logger.info("User attempting to authenticate", extra={"username": username}) 99 | return True 100 | else: 101 | logger.info("Authentication success", extra={"username": username, "password": ""}) 102 | return False 103 | 104 | def password_auth_supported(self) -> bool: 105 | return True 106 | def host_based_auth_supported(self) -> bool: 107 | return False 108 | def public_key_auth_supported(self) -> bool: 109 | return False 110 | def kbdinit_auth_supported(self) -> bool: 111 | return False 112 | 113 | def validate_password(self, username: str, password: str) -> bool: 114 | pw = accounts.get(username, '*') 115 | 116 | if pw == '*' or (pw != '*' and password == pw): 117 | logger.info("Authentication success", extra={"username": username, "password": password}) 118 | return True 119 | else: 120 | logger.info("Authentication failed", extra={"username": username, "password": password}) 121 | return False 122 | 123 | async def session_summary(process: asyncssh.SSHServerProcess, llm_config: dict, session: RunnableWithMessageHistory, server: MySSHServer): 124 | # Check if the summary has already been generated 125 | if server.summary_generated: 126 | return 127 | 128 | # When the SSH session ends, ask the LLM to give a nice 129 | # summary of the attacker's actions and probable intent, 130 | # as well as a snap judgement about whether we should be 131 | # concerned or not. 132 | 133 | prompt = ''' 134 | Examine the list of all the SSH commands the user issued during 135 | this session. The user is likely (but not proven) to be an 136 | attacker. Analyze the commands and provide the following: 137 | 138 | A concise, high-level description of what the user did during the 139 | session, including whether this appears to be reconnaissance, 140 | exploitation, post-foothold activity, or another stage of an attack. 141 | Specify the likely goals of the user. 142 | 143 | A judgement of the session's nature as either "BENIGN," "SUSPICIOUS," 144 | or "MALICIOUS," based on the observed activity. 145 | 146 | Ensure the high-level description accounts for the overall context and intent, 147 | even if some commands seem benign in isolation. 148 | 149 | End your response with "Judgement: [BENIGN/SUSPICIOUS/MALICIOUS]". 150 | 151 | Be very terse, but always include the high-level attacker's goal (e.g., 152 | "post-foothold reconnaisance", "cryptomining", "data theft" or similar). 153 | Also do not label the sections (except for the judgement, which you should 154 | label clearly), and don't provide bullet points or item numbers. You do 155 | not need to explain every command, just provide the highlights or 156 | representative examples. 157 | ''' 158 | 159 | # Ask the LLM for its summary 160 | llm_response = await session.ainvoke( 161 | { 162 | "messages": [HumanMessage(content=prompt)], 163 | "username": process.get_extra_info('username'), 164 | "interactive": True # Ensure interactive flag is passed 165 | }, 166 | config=llm_config 167 | ) 168 | 169 | # Extract the judgement from the response 170 | judgement = "UNKNOWN" 171 | if "Judgement: BENIGN" in llm_response.content: 172 | judgement = "BENIGN" 173 | elif "Judgement: SUSPICIOUS" in llm_response.content: 174 | judgement = "SUSPICIOUS" 175 | elif "Judgement: MALICIOUS" in llm_response.content: 176 | judgement = "MALICIOUS" 177 | 178 | logger.info("Session summary", extra={"details": llm_response.content, "judgement": judgement}) 179 | 180 | server.summary_generated = True 181 | 182 | async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer) -> None: 183 | # This is the main loop for handling SSH client connections. 184 | # Any user interaction should be done here. 185 | 186 | # Give each session a unique name 187 | task_uuid = f"session-{uuid.uuid4()}" 188 | current_task = asyncio.current_task() 189 | current_task.set_name(task_uuid) 190 | 191 | llm_config = {"configurable": {"session_id": task_uuid}} 192 | 193 | try: 194 | if process.command: 195 | # Handle non-interactive command execution 196 | command = process.command 197 | logger.info("User input", extra={"details": b64encode(command.encode('utf-8')).decode('utf-8'), "interactive": False}) 198 | llm_response = await with_message_history.ainvoke( 199 | { 200 | "messages": [HumanMessage(content=command)], 201 | "username": process.get_extra_info('username'), 202 | "interactive": False 203 | }, 204 | config=llm_config 205 | ) 206 | process.stdout.write(f"{llm_response.content}") 207 | logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8'), "interactive": False}) 208 | await session_summary(process, llm_config, with_message_history, server) 209 | process.exit(0) 210 | else: 211 | # Handle interactive session 212 | llm_response = await with_message_history.ainvoke( 213 | { 214 | "messages": [HumanMessage(content="")], 215 | "username": process.get_extra_info('username'), 216 | "interactive": True 217 | }, 218 | config=llm_config 219 | ) 220 | 221 | process.stdout.write(f"{llm_response.content}") 222 | logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8'), "interactive": True}) 223 | 224 | async for line in process.stdin: 225 | line = line.rstrip('\n') 226 | logger.info("User input", extra={"details": b64encode(line.encode('utf-8')).decode('utf-8'), "interactive": True}) 227 | 228 | # Send the command to the LLM and give the response to the user 229 | llm_response = await with_message_history.ainvoke( 230 | { 231 | "messages": [HumanMessage(content=line)], 232 | "username": process.get_extra_info('username'), 233 | "interactive": True 234 | }, 235 | config=llm_config 236 | ) 237 | if llm_response.content == "YYY-END-OF-SESSION-YYY": 238 | await session_summary(process, llm_config, with_message_history, server) 239 | process.exit(0) 240 | return 241 | else: 242 | process.stdout.write(f"{llm_response.content}") 243 | logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8'), "interactive": True}) 244 | 245 | except asyncssh.BreakReceived: 246 | pass 247 | finally: 248 | await session_summary(process, llm_config, with_message_history, server) 249 | process.exit(0) 250 | 251 | # Just in case we ever get here, which we probably shouldn't 252 | # process.exit(0) 253 | 254 | async def start_server() -> None: 255 | async def process_factory(process: asyncssh.SSHServerProcess) -> None: 256 | server = process.get_server() 257 | await handle_client(process, server) 258 | 259 | await asyncssh.listen( 260 | port=config['ssh'].getint("port", 8022), 261 | reuse_address=True, 262 | reuse_port=True, 263 | server_factory=MySSHServer, 264 | server_host_keys=config['ssh'].get("host_priv_key", "ssh_host_key"), 265 | process_factory=lambda process: handle_client(process, MySSHServer()), 266 | server_version=config['ssh'].get("server_version_string", "SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3") 267 | ) 268 | 269 | class ContextFilter(logging.Filter): 270 | """ 271 | This filter is used to add the current asyncio task name to the log record, 272 | so you can group events in the same session together. 273 | """ 274 | 275 | def filter(self, record): 276 | 277 | task = asyncio.current_task() 278 | if task: 279 | task_name = task.get_name() 280 | else: 281 | task_name = thread_local.__dict__.get('session_id', '-') 282 | 283 | record.src_ip = thread_local.__dict__.get('src_ip', '-') 284 | record.src_port = thread_local.__dict__.get('src_port', '-') 285 | record.dst_ip = thread_local.__dict__.get('dst_ip', '-') 286 | record.dst_port = thread_local.__dict__.get('dst_port', '-') 287 | 288 | record.task_name = task_name 289 | 290 | return True 291 | 292 | def llm_get_session_history(session_id: str) -> BaseChatMessageHistory: 293 | if session_id not in llm_sessions: 294 | llm_sessions[session_id] = InMemoryChatMessageHistory() 295 | return llm_sessions[session_id] 296 | 297 | def get_user_accounts() -> dict: 298 | if (not 'user_accounts' in config) or (len(config.items('user_accounts')) == 0): 299 | raise ValueError("No user accounts found in configuration file.") 300 | 301 | accounts = dict() 302 | 303 | for k, v in config.items('user_accounts'): 304 | accounts[k] = v 305 | 306 | return accounts 307 | 308 | def choose_llm(llm_provider: Optional[str] = None, model_name: Optional[str] = None): 309 | llm_provider_name = llm_provider or config['llm'].get("llm_provider", "openai") 310 | llm_provider_name = llm_provider_name.lower() 311 | model_name = model_name or config['llm'].get("model_name", "gpt-4o-mini") 312 | 313 | # Get temperature parameter from config, default to 0.2 if not specified 314 | temperature = config['llm'].getfloat("temperature", 0.2) 315 | 316 | if llm_provider_name == 'openai': 317 | llm_model = ChatOpenAI( 318 | model=model_name, 319 | temperature=temperature 320 | ) 321 | elif llm_provider_name == 'azure': 322 | llm_model = AzureChatOpenAI( 323 | azure_deployment=config['llm'].get("azure_deployment"), 324 | azure_endpoint=config['llm'].get("azure_endpoint"), 325 | api_version=config['llm'].get("azure_api_version"), 326 | model=config['llm'].get("model_name"), # Ensure model_name is passed here 327 | temperature=temperature 328 | ) 329 | elif llm_provider_name == 'ollama': 330 | llm_model = ChatOllama( 331 | model=model_name, 332 | temperature=temperature 333 | ) 334 | elif llm_provider_name == 'aws': 335 | llm_model = ChatBedrockConverse( 336 | model=model_name, 337 | region_name=config['llm'].get("aws_region", "us-east-1"), 338 | credentials_profile_name=config['llm'].get("aws_credentials_profile", "default"), 339 | temperature=temperature 340 | ) 341 | elif llm_provider_name == 'gemini': 342 | llm_model = ChatGoogleGenerativeAI( 343 | model=model_name, 344 | temperature=temperature 345 | ) 346 | else: 347 | raise ValueError(f"Invalid LLM provider {llm_provider_name}.") 348 | 349 | return llm_model 350 | 351 | def get_prompts(prompt: Optional[str], prompt_file: Optional[str]) -> dict: 352 | system_prompt = config['llm']['system_prompt'] 353 | if prompt is not None: 354 | if not prompt.strip(): 355 | print("Error: The prompt text cannot be empty.", file=sys.stderr) 356 | sys.exit(1) 357 | user_prompt = prompt 358 | elif prompt_file: 359 | if not os.path.exists(prompt_file): 360 | print(f"Error: The specified prompt file '{prompt_file}' does not exist.", file=sys.stderr) 361 | sys.exit(1) 362 | with open(prompt_file, "r") as f: 363 | user_prompt = f.read() 364 | elif os.path.exists("prompt.txt"): 365 | with open("prompt.txt", "r") as f: 366 | user_prompt = f.read() 367 | else: 368 | raise ValueError("Either prompt or prompt_file must be provided.") 369 | return { 370 | "system_prompt": system_prompt, 371 | "user_prompt": user_prompt 372 | } 373 | 374 | #### MAIN #### 375 | 376 | try: 377 | # Parse command line arguments 378 | parser = argparse.ArgumentParser(description='Start the SSH honeypot server.') 379 | parser.add_argument('-c', '--config', type=str, default=None, help='Path to the configuration file') 380 | parser.add_argument('-p', '--prompt', type=str, help='The entire text of the prompt') 381 | parser.add_argument('-f', '--prompt-file', type=str, default='prompt.txt', help='Path to the prompt file') 382 | parser.add_argument('-l', '--llm-provider', type=str, help='The LLM provider to use') 383 | parser.add_argument('-m', '--model-name', type=str, help='The model name to use') 384 | parser.add_argument('-t', '--trimmer-max-tokens', type=int, help='The maximum number of tokens to send to the LLM backend in a single request') 385 | parser.add_argument('-s', '--system-prompt', type=str, help='System prompt for the LLM') 386 | parser.add_argument('-r', '--temperature', type=float, help='Temperature parameter for controlling randomness in LLM responses (0.0-2.0)') 387 | parser.add_argument('-P', '--port', type=int, help='The port the SSH honeypot will listen on') 388 | parser.add_argument('-k', '--host-priv-key', type=str, help='The host key to use for the SSH server') 389 | parser.add_argument('-v', '--server-version-string', type=str, help='The server version string to send to clients') 390 | parser.add_argument('-L', '--log-file', type=str, help='The name of the file you wish to write the honeypot log to') 391 | parser.add_argument('-S', '--sensor-name', type=str, help='The name of the sensor, used to identify this honeypot in the logs') 392 | parser.add_argument('-u', '--user-account', action='append', help='User account in the form username=password. Can be repeated.') 393 | args = parser.parse_args() 394 | 395 | # Determine which config file to load 396 | config = ConfigParser() 397 | if args.config is not None: 398 | # User explicitly set a config file; error if it doesn't exist. 399 | if not os.path.exists(args.config): 400 | print(f"Error: The specified config file '{args.config}' does not exist.", file=sys.stderr) 401 | sys.exit(1) 402 | config.read(args.config) 403 | else: 404 | default_config = "config.ini" 405 | if os.path.exists(default_config): 406 | config.read(default_config) 407 | else: 408 | # Use defaults when no config file found. 409 | config['honeypot'] = {'log_file': 'ssh_log.log', 'sensor_name': socket.gethostname()} 410 | config['ssh'] = {'port': '8022', 'host_priv_key': 'ssh_host_key', 'server_version_string': 'SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3'} 411 | config['llm'] = {'llm_provider': 'openai', 'model_name': 'gpt-3.5-turbo', 'trimmer_max_tokens': '64000', 'temperature': '0.7', 'system_prompt': ''} 412 | config['user_accounts'] = {} 413 | 414 | # Override config values with command line arguments if provided 415 | if args.llm_provider: 416 | config['llm']['llm_provider'] = args.llm_provider 417 | if args.model_name: 418 | config['llm']['model_name'] = args.model_name 419 | if args.trimmer_max_tokens: 420 | config['llm']['trimmer_max_tokens'] = str(args.trimmer_max_tokens) 421 | if args.system_prompt: 422 | config['llm']['system_prompt'] = args.system_prompt 423 | if args.temperature is not None: 424 | config['llm']['temperature'] = str(args.temperature) 425 | if args.port: 426 | config['ssh']['port'] = str(args.port) 427 | if args.host_priv_key: 428 | config['ssh']['host_priv_key'] = args.host_priv_key 429 | if args.server_version_string: 430 | config['ssh']['server_version_string'] = args.server_version_string 431 | if args.log_file: 432 | config['honeypot']['log_file'] = args.log_file 433 | if args.sensor_name: 434 | config['honeypot']['sensor_name'] = args.sensor_name 435 | 436 | # Merge command-line user accounts into the config 437 | if args.user_account: 438 | if 'user_accounts' not in config: 439 | config['user_accounts'] = {} 440 | for account in args.user_account: 441 | if '=' in account: 442 | key, value = account.split('=', 1) 443 | config['user_accounts'][key.strip()] = value.strip() 444 | else: 445 | config['user_accounts'][account.strip()] = '' 446 | 447 | # Read the user accounts from the configuration 448 | accounts = get_user_accounts() 449 | 450 | # Always use UTC for logging 451 | logging.Formatter.formatTime = (lambda self, record, datefmt=None: datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T",timespec="milliseconds")) 452 | 453 | # Get the sensor name from the config or use the system's hostname 454 | sensor_name = config['honeypot'].get('sensor_name', socket.gethostname()) 455 | 456 | # Set up the honeypot logger 457 | logger = logging.getLogger(__name__) 458 | logger.setLevel(logging.INFO) 459 | 460 | log_file_handler = logging.FileHandler(config['honeypot'].get("log_file", "ssh_log.log")) 461 | logger.addHandler(log_file_handler) 462 | 463 | log_file_handler.setFormatter(JSONFormatter(sensor_name)) 464 | 465 | f = ContextFilter() 466 | logger.addFilter(f) 467 | 468 | # Now get access to the LLM 469 | 470 | prompts = get_prompts(args.prompt, args.prompt_file) 471 | llm_system_prompt = prompts["system_prompt"] 472 | llm_user_prompt = prompts["user_prompt"] 473 | 474 | llm = choose_llm(config['llm'].get("llm_provider"), config['llm'].get("model_name")) 475 | 476 | llm_sessions = dict() 477 | 478 | llm_trimmer = trim_messages( 479 | max_tokens=config['llm'].getint("trimmer_max_tokens", 64000), 480 | strategy="last", 481 | token_counter=llm, 482 | include_system=True, 483 | allow_partial=False, 484 | start_on="human", 485 | ) 486 | 487 | llm_prompt = ChatPromptTemplate.from_messages( 488 | [ 489 | ( 490 | "system", 491 | llm_system_prompt 492 | ), 493 | ( 494 | "system", 495 | llm_user_prompt 496 | ), 497 | MessagesPlaceholder(variable_name="messages"), 498 | ] 499 | ) 500 | 501 | llm_chain = ( 502 | RunnablePassthrough.assign(messages=itemgetter("messages") | llm_trimmer) 503 | | llm_prompt 504 | | llm 505 | ) 506 | 507 | with_message_history = RunnableWithMessageHistory( 508 | llm_chain, 509 | llm_get_session_history, 510 | input_messages_key="messages" 511 | ) 512 | # Thread-local storage for connection details 513 | thread_local = threading.local() 514 | 515 | # Kick off the server! 516 | loop = asyncio.new_event_loop() 517 | asyncio.set_event_loop(loop) 518 | loop.run_until_complete(start_server()) 519 | loop.run_forever() 520 | 521 | except Exception as e: 522 | print(f"Error: {e}", file=sys.stderr) 523 | traceback.print_exc() 524 | sys.exit(1) 525 | 526 | --------------------------------------------------------------------------------