├── .gitignore
├── LICENSE
├── README.md
├── docker-compose.yaml
├── dockerfiles
├── collector
├── dockerhp
└── mongodb
├── requirements.txt
├── samples
├── boto_config_sample.json
├── collector_config_sample.json
├── env
├── hp_config_sample.json
└── secrets_sample.json
├── scripts
├── collector.py
├── deploy.py
├── docker_honeypot.py
├── ez_certs.py
└── python_cmd.sh
├── setup.py
└── src
└── docker_honey
├── __init__.py
├── collector_actions.py
├── commands.py
├── consts.py
├── dockerhp_actions.py
├── mongo_orm.py
├── notify.py
├── server.py
├── simple_commands
├── __init__.py
├── actions.py
├── app.py
├── boto.py
├── consts.py
├── ssh.py
└── util.py
└── util.py
/.gitignore:
--------------------------------------------------------------------------------
1 | new_secrets.json
2 | new_secrets_file.json
3 | prod_secrets.json
4 | backup_secrets.json
5 | secrets.json
6 | ./ssl/*
7 | ./config/*.json
8 | ./config/ssl/*.key
9 | ./config/ssl/*.pem
10 | ./config/ssl/*.crt
11 | ./config/ssl/*.crl
12 | ssl
13 | hp_config.json
14 | collector_config.json
15 | boto_controller_config.json
16 | boto_config.json
17 | ssh_keys/
18 | dhp_env/
19 | dhp_venv/
20 | internal_notes/
21 | ssl/
22 | internal-scripts/
23 | tests/internal_keys.toml
24 | tests/internal-test-data
25 | tests/internal-scripts/
26 | tests/internal-test-data/internal_keys.toml
27 | configs/*
28 | *.swp
29 | *.pem
30 | *.key
31 | # Byte-compiled / optimized / DLL files
32 | __pycache__/
33 | *.py[cod]
34 | *$py.class
35 |
36 | # C extensions
37 | *.so
38 |
39 | # Distribution / packaging
40 | .Python
41 | env/
42 | build/
43 | develop-eggs/
44 | dist/
45 | downloads/
46 | eggs/
47 | .eggs/
48 | lib/
49 | lib64/
50 | parts/
51 | sdist/
52 | var/
53 | *.egg-info/
54 | .installed.cfg
55 | *.egg
56 |
57 | # PyInstaller
58 | # Usually these files are written by a python script from a template
59 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
60 | *.manifest
61 | *.spec
62 |
63 | # Installer logs
64 | pip-log.txt
65 | pip-delete-this-directory.txt
66 |
67 | # Unit test / coverage reports
68 | htmlcov/
69 | .tox/
70 | .coverage
71 | .coverage.*
72 | .cache
73 | nosetests.xml
74 | coverage.xml
75 | *,cover
76 | .hypothesis/
77 |
78 | # Translations
79 | *.mo
80 | *.pot
81 |
82 | # Django stuff:
83 | *.log
84 | local_settings.py
85 |
86 | # Flask stuff:
87 | instance/
88 | .webassets-cache
89 |
90 | # Scrapy stuff:
91 | .scrapy
92 |
93 | # Sphinx documentation
94 | docs/_build/
95 |
96 | # PyBuilder
97 | target/
98 |
99 | # IPython Notebook
100 | .ipynb_checkpoints
101 |
102 | # pyenv
103 | .python-version
104 |
105 | # celery beat schedule file
106 | celerybeat-schedule
107 |
108 | # dotenv
109 | .env
110 |
111 | # virtualenv
112 | venv/
113 | ENV/
114 |
115 | # Spyder project settings
116 | .spyderproject
117 |
118 | # Rope project settings
119 | .ropeproject
120 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright 2020 Cisco Systems, Inc.
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## About
2 |
3 | This is a very simple server that emulates some aspects of the Docker HTTP API. The server will respond to:
4 | 1. HTTP `GET` version
5 | 2. HTTP `GET` ping
6 | 3. HTTP `POST` create image
7 | 4. HTTP Error Code 500 in almost all other cases
8 |
9 | The assumption is that this service is running in a cloud provider somewhere. As of right now, the service is a simple script that runs in a shell. When a recon event or the creation of a container is detected, the server will log the event to any of the following services:
10 | 1. Webex Teams
11 | 2. Slack
12 | 3. Mongodb
13 | 4. HTTP Collector
14 |
15 | Furthermore, if there is a desire to pull a webpage through a honeypot. The collector can be used to distribute the request and then retrieve the results.
16 |
17 | If you end up using AWS, then there is an easy deployment script, `deploy.py`. The script orchestrates the ordered deployment (e.g. instance allocation, setup commands, etc.) of all the requisite resources.
18 |
19 | **After getting the AWS IAM creds and the required webhooks, update `samples/secrets_sample.json` --> `secrets.json` and execute this command**
20 |
21 |
22 | ```
23 | python scripts/deploy.py -config samples/boto_config_sample.json \
24 | -secrets secrets.json -new_secrets_file prod_secrets.json \
25 | -mongodb_up -mongodb_region us-east-2 \
26 | -collector_up -collector_region us-east2 \
27 | -dockerhp_up -dockerhp_regions us-east-1 us-east-2 us-west-1 us-west-2 \
28 | -dockerhp_count 3
29 | ```
30 |
31 | ## Deployment and environment
32 |
33 | These services can be deployed several ways. There is a `deploy.py` that will deploy all the services to Amazon AWS. This script will create and update all the relevant _secrets_. The services can be deployed using Python or Docker. In these two cases, the configiration files for the honeypot and collector will need to be updated.
34 |
35 | ### Using Docker
36 |
37 | Setup the target hosts that will be used to run each or all of the services. Install docker on to these hosts.
38 |
39 | Create all the relevant SSL/TLS certificates for all the services:
40 | ```
41 | python3 scripts/ez_certs.py -output_path ./ssl/ \
42 | -common_names mongodb collector-dockerhp dockerhp \
43 | -ca_name ca-collector-dockerhp
44 | ```
45 |
46 | #### Create tokens and passwords
47 |
48 | Create an `env` file and update the password. This password will be used to update the `mongo_pass` in the `collector_config.json`.
49 |
50 | The Mongo Docker relies on `env` which should be updated:
51 | ```
52 | MONGO_INITDB_ROOT_PASSWORD=PASSWORD
53 | MONGO_INITDB_ROOT_USERNAME=mongo_user
54 | ```
55 |
56 | The configurations files also need to be updated with the relevant passwords and tokens.
57 |
58 | ##### Updating the collector configurations
59 | Copy the ```samples/collector_config_sample.json``` to ```collector_config.json``` contains all the parameters necessary to run the collector. This file needs to be updated with the `honeypot_tokens` and `admin_token`. The `secret_server_key` can also be the `admin_token`. At this time, `secret_server_token` is used as a shared key betweeen the `collector` and `dockerhp` so service to service calls can be made, like to request web pages through a particular honeypot.
60 |
61 | If the `collector` will only be accessible in a RFC 1918 environment, update the `global_hostname` with the RFC 1918 address. Otherwise this will be updated with the host's public IP address at when the service starts. If Slack or WebEx Teams is being used, update the relevant _webhooks urls_.
62 |
63 |
64 | ##### Updating the dockerhp configuration
65 | Copy the ```samples/hp_config_sample.json``` to ```hp_config.json``` contains all the parameters necessary to run the dockerhp. This file needs to be updated with the `collector_host`, `collector_token` and `secret_server_token`. The `secret_server_key` is used as a shared key betweeen the `collector` and `dockerhp` so service to service calls can be made, like to request web pages through a particular honeypot.
66 |
67 | If the `dockerhp` service will only be accessible in a RFC 1918 environment, update the `global_hostname` with the RFC 1918 address. Otherwise this will be updated with the host's public IP address at when the service starts.
68 |
69 | #### Starting the MongoDB, Collector, and DockerHP services
70 | To start the respective services:
71 |
72 | 1. __MongoDB:__ `docker-compose build collector_mongo && docker-compose up -d collector_mongo`
73 | 2. __Collector:__ `docker-compose build collector && docker-compose up -d collector`
74 | 3. __DockerHP:__ `docker-compose build dockerhp && docker-compose up -d dockerhp`
75 |
76 |
77 | ### Using `deploy.py`
78 |
79 | The deployment script will read the `samples/boto_config_sample.json` and merge the command parameters used for AWS instance creation and host command execution. Each instance node in the `boto_config_sample.json` (e.g. entries in `instance_descriptions`) contain _activities_ and each of those activities have _steps_. The _steps_ map to _activities_ in the configuration. These steps include commands, files to upload, etc. These will be executed to setup the instance, configure it, and then start the Docker container.
80 |
81 | Update the `./samples/secrets_sample.json` with all of the relevant secrets for Mongo, the collector, and the docker honeypot. Create all the relevant SSL/TLS certificates for all the services:
82 | ```
83 | python3 scripts/ez_certs.py -output_path ./ssl/ \
84 | -common_names mongodb collector-dockerhp dockerhp \
85 | -ca_name ca-collector-dockerhp
86 | ```
87 |
88 | Create a working secrets file:
89 | ```
90 | cp samples/secrets_sample.json prod_scecrets.json
91 | ```
92 |
93 | Update the AWS credentials and all the relevant tokens or secrets for services.
94 |
95 | #### Bringing up `mongodb` in AWS
96 |
97 | Reads the configuration file, secrets, and then starts setting up the MongoDB service. At the conclusion of the process, `prod_secrets.json` will be updated with the MongoDB Host IP address and MongoDB password, if a new one was created.
98 |
99 | During the course of the setup and installation:
100 | 1. an AWS instance and a data volume is created,
101 | 2. volume is mounted,
102 | 3. SSL certificates are uploaded,
103 | 4. `docker-compose.yaml` is uploaded,
104 | 5. the `collector_mongodb` container is started
105 | ```
106 | python scripts/deploy.py -config samples/boto_config_sample.json -secrets prod_secrets.json -new_secrets_file prod_secrets.json -mongodb_up
107 | ```
108 |
109 | #### Bringing up `collector` in AWS
110 |
111 | Reads the configuration file, secrets, and then starts setting up the Collector service and an Alternate Collector Service (enables resilience during maintenance). At the conclusion of the process, `prod_secrets.json` will be updated Collector and Alternate Collector Host IP address, `admin` and `honeypot` tokens, if any are created.
112 |
113 | 1. an AWS instance and a data volume is created,
114 | 2. SSL certificates are uploaded,
115 | 3. `docker-compose.yaml` is uploaded,
116 | 4. `collector_config.json` is uploaded,
117 | 5. the `collector` container is started
118 |
119 | During the course of the setup and installation, an AWS instance is created, SSL certificates, `docker-compose.yaml`, and then the configuration file is updated and uploaded to the collector host. The `collector` is started as a Docker container.
120 |
121 | ```
122 | python scripts/deploy.py -config samples/boto_config_sample.json -secrets prod_secrets.json -new_secrets_file prod_secrets.json -collector_up
123 | ```
124 |
125 | #### Bringing up `dockerhp` in AWS
126 |
127 | Reads the configuration file, secrets, and then starts setting up the DockerHP services.
128 |
129 | 1. an AWS instance and a data volume is created,
130 | 2. SSL certificates are uploaded,
131 | 3. `docker-compose.yaml` is uploaded,
132 | 4. `hp_config.json` is uploaded,
133 | 5. the `dockerhp` container is started
134 |
135 | During the course of the setup and installation, an AWS instance is created, SSL certificates, `docker-compose.yaml`, and then the configuration file is updated and uploaded to the collector host. The `dockerhp` is started as a Docker container. Three instances are started in of the specified regions.
136 |
137 | ```
138 | python scripts/deploy.py -config samples/boto_config_sample.json \
139 | -secrets prod_secrets.json -new_secrets_file prod_secrets.json \
140 | -dockerhp_up -dockerhp_count 3 -dockerhp_regions us-east-1 us-east-2 \
141 | ```
142 |
143 | #### Tearing everything down in AWS
144 |
145 | Reads the configuration file, secrets, and then terminates all instances with tags corresponding with Mongodb, the collector, and the dockerhp instances in each region.
146 |
147 | ```
148 | python scripts/deploy.py -config samples/boto_config_sample.json \
149 | -secrets prod_secrets.json -mongodb_down -collector_down \
150 | -dockerhp_down -dockerhp_regions us-east-1 us-east-2 \
151 | ```
152 |
153 | ## Requesting a Webpage
154 |
155 | Visit ```https://COLLECTOR_HOST:5000/remote_web_request```. Use the `admin_token` and specified the URL, any parameters/payloads, and the sensor that you wish to leverage. When the request is executed and ready for download, notifications will be sent out to the Slack or Webex channels.
156 |
157 |
158 | ## Testing the Docker Honeypot
159 |
160 | With `docker` installed, execute the following host against the honeypot, replacing the `HOST` variable
161 |
162 | ```
163 | export HOST=192.168.122.1
164 | time docker -H tcp://${HOST}:2375 run --rm -v /:/mnt alpine chroot /mnt /bin/sh -c "echo 'IyEvYmluL2Jhc2gKZGF0ZSArJ0ZPUk1BVCcKIAojIyMgbW0vZGQveXl5eSAjIyMKZGF0ZSArJyVtLyVkLyVZJwogCiMjIFRpbWUgaW4gMTIgaHIgZm9ybWF0ICMjIwpkYXRlICsnJXInCiAKIyMgYmFja3VwIGRpciBmb3JtYXQgIyMKYmFja3VwX2Rpcj0kKGRhdGUgKyclbS8lZC8lWScpCmVjaG8gIkJhY2t1cCBkaXIgZm9yIHRvZGF5OiAvbmFzMDQvYmFja3Vwcy8ke2JhY2t1cF9kaXJ9Igo=' | base64 -f | bash"
165 | ```
166 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3.1'
2 |
3 | services:
4 |
5 | mongo:
6 | image: collector_mongo
7 | restart: always
8 | build:
9 | context: .
10 | dockerfile: ./dockerfiles/mongodb
11 | env_file:
12 | - ./env
13 | volumes:
14 | - /opt/data/mongo:/data/db
15 | # - ./ssl/mongodb.pem:/data/ssl/mongodb.pem
16 | # - ./ssl/mongodb_ca.crt:/data/ssl/mongodb-cert.crt
17 | ports:
18 | - 27127:27017
19 | command:
20 | --tlsMode=requireTLS
21 | --tlsCertificateKeyFile=/data/ssl/mongodb.pem
22 | --tlsCAFile=/data/ssl/mongodb-cert.crt
23 | --tlsAllowConnectionsWithoutCertificates
24 |
25 | collector:
26 | image: collector
27 | restart: always
28 | build:
29 | context: .
30 | dockerfile: ./dockerfiles/collector
31 | ports:
32 | - 5000:5000
33 |
34 | dockerhp:
35 | image: dockerhp
36 | restart: always
37 | build:
38 | context: .
39 | dockerfile: ./dockerfiles/dockerhp
40 | ports:
41 | - 61023:61023
42 | - 2375:2375
43 | - 2376:2376
44 | - 2377:2377
45 | - 4243:4243
46 | - 4244:4244
--------------------------------------------------------------------------------
/dockerfiles/collector:
--------------------------------------------------------------------------------
1 | FROM python:3.8
2 |
3 | RUN apt-get update; apt-get install -y git python3 python3-pip libmagic-dev
4 | RUN mkdir -p ~/.ssh && chmod 0700 ~/.ssh
5 |
6 | RUN mkdir /opt/main; chmod -R a+rw /opt
7 | WORKDIR /opt/main
8 |
9 | ADD collector_config.json config.json
10 |
11 | RUN git clone https://github.com/ciscocsirt/dhp.git dhp; \
12 | cd dhp; \
13 | pip3 install -r requirements.txt; pip3 install .; \
14 | cd .. ; \
15 | cp dhp/scripts/collector.py main.py; \
16 | cp dhp/scripts/python_cmd.sh python_cmd.sh; \
17 | rm -r dhp ; mkdir ssl ;
18 |
19 | ADD ssl/ca-dockerhp-collector.crt ssl/ca-dockerhp-collector.crt
20 | ADD ssl/collector-cert.pem ssl/collector-cert.pem
21 | ADD ssl/collector-key.pem ssl/collector-key.pem
22 |
23 | CMD [ "sh", "python_cmd.sh" ]
24 | expose 5000
--------------------------------------------------------------------------------
/dockerfiles/dockerhp:
--------------------------------------------------------------------------------
1 | FROM python:3.8
2 |
3 | RUN apt-get update; apt-get install -y git python3 python3-pip libmagic-dev
4 | RUN mkdir -p ~/.ssh && chmod 0700 ~/.ssh
5 |
6 | RUN mkdir /opt/main; chmod -R a+rw /opt
7 | WORKDIR /opt/main
8 |
9 | ADD hp_config.json config.json
10 |
11 | RUN git clone https://github.com/ciscocsirt/dhp.git dhp; \
12 | cd dhp; \
13 | pip3 install -r requirements.txt; pip3 install .; \
14 | cd .. ; \
15 | cp dhp/scripts/docker_honeypot.py main.py; \
16 | cp dhp/scripts/python_cmd.sh python_cmd.sh; \
17 | rm -r dhp ; mkdir ssl ;
18 |
19 | ADD ssl/ca-dockerhp-collector.crt ssl/ca-dockerhp-collector.crt
20 | ADD ssl/dockerhp-cert.pem ssl/dockerhp-cert.pem
21 | ADD ssl/dockerhp-key.pem ssl/dockerhp-key.pem
22 |
23 |
24 | CMD [ "sh", "python_cmd.sh" ]
25 | expose 61023
26 | expose 2375
27 | expose 2376
28 | expose 2377
29 | expose 4243
30 | expose 4244
--------------------------------------------------------------------------------
/dockerfiles/mongodb:
--------------------------------------------------------------------------------
1 | FROM mongo:latest
2 |
3 | RUN mkdir -p /data/ssl/
4 | ADD ssl/mongodb.pem /data/ssl/mongodb.pem
5 | ADD ssl/ca-dockerhp-collector.crt /data/ssl/mongodb-cert.crt
6 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | wheel
2 | mongoengine
3 | regex
4 | requests
5 | flask_restful
6 | flask
7 | netifaces
8 | quart
9 | paramiko
10 | boto
11 | scp
12 | hypercorn
13 | rfc3987
14 | validator-collection
--------------------------------------------------------------------------------
/samples/boto_config_sample.json:
--------------------------------------------------------------------------------
1 | {
2 | "aws_access_key_id": "",
3 | "aws_secret_access_key": "",
4 | "ssh_key_path": "./ssh_keys/",
5 | "regions": [
6 | "us-east-2"
7 | ],
8 | "instance_type": "t2.micro",
9 | "instance_descriptions": [
10 | {"name": "dockerhp",
11 | "image_architecture": "x86_64",
12 | "image_platform_details": "Linux/UNIX",
13 | "image_public": true,
14 | "image_name": "",
15 | "image_type": "machine",
16 | "image_description_keywords": ["canonical, ubuntu, 20.04 lts, amd64 focal image"],
17 | "image_owner_id": "099720109477",
18 | "instance_type": "t2.micro",
19 | "security_groups": ["remote-ssh-access", "dockerhp-docker-access"],
20 | "base_keyname": "dockerhp-host",
21 | "keyname_fmt": "{region}-{base_keyname}.pem",
22 | "recreate_keypair": false,
23 | "tag_specs": ["dockerhp-tags"],
24 | "regions": ["us-east-2"],
25 | "multi_region": true,
26 | "max_count": 3,
27 | "username": "ubuntu",
28 | "command_string_parameters": [
29 | {"name":"username", "value":"ubuntu"},
30 | {"name":"container_name", "value":"dockerhp"},
31 |
32 | {"name":"server_secret_key", "value":null},
33 | {"name":"global_port", "value":5000},
34 | {"name":"global_hostname", "value":null},
35 |
36 | {"name":"collector", "value":true},
37 | {"name":"collector_token", "value":null},
38 | {"name":"collector_host", "value":null},
39 | {"name":"collector_alt_host", "value":null}
40 |
41 | ],
42 | "activities": {
43 | "setup": {"description":"", "steps":["setup_collector_dockerhp", "upload_dockerhp_configs"]},
44 | "startup":{"description":"", "steps":["start_container"]}
45 | }
46 | },
47 | {"name": "dockerhp-collector",
48 | "image_architecture": "x86_64",
49 | "image_platform_details": "Linux/UNIX",
50 | "image_public": true,
51 | "image_name": "",
52 | "image_type": "machine",
53 | "image_description_keywords": ["canonical, ubuntu, 20.04 lts, amd64 focal image"],
54 | "image_owner_id": "099720109477",
55 | "security_groups": ["remote-ssh-access", "dockerhp-collector-access"],
56 | "base_keyname": "dockerhp-collector",
57 | "keyname_fmt": "{region}-{base_keyname}.pem",
58 | "recreate_keypair": false,
59 | "tag_specs": ["collector-tags"],
60 | "regions": ["us-east-2"],
61 | "multi_region": false,
62 | "max_count": 1,
63 | "username": "ubuntu",
64 | "command_string_parameters": [
65 | {"name":"container_name", "value":"collector"},
66 | {"name":"username", "value":"ubuntu"},
67 | {"name":"mongo", "value":true},
68 | {"name":"mongo_user", "value":"mongo_user"},
69 | {"name":"mongo_host", "value":null},
70 | {"name":"mongo_pass", "value":null},
71 | {"name":"mongo_user", "value":"mongo_user"},
72 | {"name":"server_secret_key", "value":null},
73 | {"name":"global_port", "value":5000},
74 | {"name":"global_hostname", "value":null},
75 |
76 | {"name":"collector", "value":false},
77 | {"name":"collector_token", "value":null},
78 | {"name":"collector_host", "value":null},
79 | {"name":"collector_alt_host", "value":null},
80 |
81 | {"name":"wbx_webhook", "value":null},
82 | {"name":"wbx", "value":true},
83 |
84 | {"name":"slack_webhook", "value":null},
85 | {"name":"slack", "value":true}
86 | ],
87 | "activities": {
88 | "setup": {"description":"", "steps":["setup_collector_dockerhp", "upload_collector_configs"]},
89 | "startup":{"description":"", "steps":["start_container"]}
90 | }
91 | },
92 | {"name": "dockerhp-mongodb",
93 | "instance_type": "t2.micro",
94 | "image_architecture": "x86_64",
95 | "image_platform_details": "Linux/UNIX",
96 | "image_public": true,
97 | "image_name": "",
98 | "image_type": "machine",
99 | "image_description_keywords": ["canonical, ubuntu, 20.04 lts, amd64 focal image"],
100 | "image_owner_id": "099720109477",
101 | "security_groups": ["remote-ssh-access", "dockerhp-mongodb-access"],
102 | "base_keyname": "dockerhp-collector",
103 | "keyname_fmt": "{region}-{base_keyname}.pem",
104 | "recreate_keypair": false,
105 | "volumes": ["dockerhp-mongodb"],
106 | "volume_devices": [
107 | {"device": "/dev/xvdf",
108 | "volume": "dockerhp-mongodb",
109 | "filesystem":"ext4",
110 | "mountpoint":"/opt/data"}
111 | ],
112 | "tag_specs": ["dockerhp-mongodb-tags"],
113 | "regions": ["us-east-2"],
114 | "multi_region": false,
115 | "max_count": 1,
116 | "username": "ubuntu",
117 | "command_string_parameters": [
118 | {"name":"username", "value":"ubuntu"},
119 | {"name":"mongo_user", "value":"mongo_user"},
120 | {"name":"mongo_pass", "value":"fill_me_in"},
121 | {"name":"container_name", "value":"mongo"}
122 | ],
123 | "activities":{
124 | "setup": {"description":"", "steps":["setup_mongodb", "upload_mongodb_configs", "start_container"]}
125 | }
126 |
127 | }
128 | ],
129 | "tag_specs": [
130 | {"name": "dockerhp-tags",
131 | "resource_type": "instance",
132 | "format": "key_value",
133 | "tags": {"DataClassification": "None",
134 | "MailAlias": "noone@nowhere.org",
135 | "Name": "dockerhp",
136 | "ApplicationName": "dockerhp-application",
137 | "ResourceOwner": "adam pridgen",
138 | "Environment": "development"
139 | }
140 | },
141 | {"name": "collector-tags",
142 | "resource_type": "instance",
143 | "format": "key_value",
144 | "tags": {"DataClassification": "None",
145 | "MailAlias": "noone@nowhere.org",
146 | "Name": "dockerhp-collector",
147 | "ApplicationName": "dockerhp-application",
148 | "ResourceOwner": "adam pridgen",
149 | "Environment": "development"
150 | }
151 | },
152 | {"name": "dockerhp-mongodb-tags",
153 | "resource_type": "instance",
154 | "format": "key_value",
155 | "tags": {"DataClassification": "None",
156 | "MailAlias": "noone@nowhere.org",
157 | "Name": "dockerhp-mongodb",
158 | "ApplicationName": "dockerhp-application",
159 | "ResourceOwner": "adam pridgen",
160 | "Environment": "development"
161 | }
162 | },
163 | {"name": "dockerhp-mongodb-tags",
164 | "resource_type": "volume",
165 | "format": "key_value",
166 | "tags": {"DataClassification": "None",
167 | "MailAlias": "noone@nowhere.org",
168 | "Name": "dockerhp-mongodb",
169 | "ApplicationName": "dockerhp-application",
170 | "ResourceOwner": "adam pridgen",
171 | "Environment": "development"
172 | }
173 | }
174 | ],
175 | "volumes": [
176 | {"name": "dockerhp-mongodb",
177 | "tag_specs": ["dockerhp-mongodb-tags"],
178 | "snapshotid": null,
179 | "volumetype": "standard",
180 | "multiattach": false,
181 | "encrypted": false,
182 | "size": 100
183 | }
184 | ],
185 | "security_groups": [
186 | {"name": "dockerhp-docker-access",
187 | "description": "docker-honeypot security group",
188 | "permissions": [
189 | {"FromPort": 4240, "ToPort": 4245, "IpProtocol": "tcp",
190 | "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": ""}],
191 | "Ipv6Ranges": [], "PrefixListIds": [],
192 | "UserIdGroupPairs": []},
193 | {"FromPort": 2375, "ToPort": 2380, "IpProtocol": "tcp",
194 | "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": ""}],
195 | "Ipv6Ranges": [], "PrefixListIds": [],
196 | "UserIdGroupPairs": []},
197 | {"FromPort": 61023, "ToPort": 61023, "IpProtocol": "tcp",
198 | "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": ""}],
199 | "Ipv6Ranges": [], "PrefixListIds": [],
200 | "UserIdGroupPairs": []}
201 | ]
202 | },
203 | {"name": "remote-ssh-access",
204 | "description": "ssh remote access security group",
205 | "permissions": [
206 | {"FromPort": 22, "ToPort": 22, "IpProtocol": "tcp",
207 | "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": ""}],
208 | "Ipv6Ranges": [], "PrefixListIds": [],
209 | "UserIdGroupPairs": []}
210 | ]
211 | },
212 | {"name": "dockerhp-mongodb-access",
213 | "description": "mongodb for dockerhp-collector ingress security group",
214 | "permissions": [
215 | {"FromPort": 27127, "ToPort": 27127, "IpProtocol": "tcp",
216 | "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": ""}],
217 | "Ipv6Ranges": [], "PrefixListIds": [],
218 | "UserIdGroupPairs": []}
219 | ]
220 | },
221 | {"name": "dockerhp-collector-access",
222 | "description": "dockerhp-collector ingress security group",
223 | "permissions": [
224 | {"FromPort": 5000, "ToPort": 5000, "IpProtocol": "tcp",
225 | "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": ""}],
226 | "Ipv6Ranges": [], "PrefixListIds": [],
227 | "UserIdGroupPairs": []}
228 | ]
229 | }
230 | ],
231 | "actions":{
232 | "setup_mongodb": {"type": "commands",
233 | "commands": [
234 | "sudo mkfs.ext4 /dev/xvdf",
235 | "sudo mkdir -p /opt/data/mongo",
236 | "chmod a+rw -R /opt/data",
237 | "sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl software-properties-common git python3-pip",
238 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -",
239 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable\"",
240 | "sudo apt update && sudo apt install -y docker-ce docker-compose",
241 | "echo 'MONGO_INITDB_ROOT_USERNAME={mongo_user}' >> env",
242 | "echo 'MONGO_INITDB_ROOT_PASSWORD={mongo_pass}' >> env",
243 | "mkdir ssl",
244 | "mkdir dockerfiles",
245 | "sudo usermod -aG docker {username}"
246 | ]
247 | },
248 | "setup_collector_dockerhp": {"type": "commands",
249 | "pre_wait": 20.0,
250 | "post_wait":0.0,
251 | "commands": [
252 | "sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl software-properties-common git python3-pip",
253 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -",
254 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable\"",
255 | "sudo apt update && sudo apt install -y docker-ce docker-compose",
256 | "sudo usermod -aG docker {username}",
257 | "mkdir ssl",
258 | "mkdir dockerfiles",
259 | "touch env"
260 | ]
261 |
262 | },
263 | "start_container": {"type": "commands",
264 | "commands": [
265 | "docker-compose up -d {container_name}"
266 | ]
267 | },
268 | "upload_mongodb_configs": {"type": "upload_files",
269 | "files":[
270 | {"src": "ssl/mongodb.pem", "dst": "ssl/mongodb.pem"},
271 | {"src": "ssl/ca-dockerhp-collector.crt", "dst": "ssl/ca-dockerhp-collector.crt"},
272 | {"src": "ssl/ca-dockerhp-collector.crt", "dst": "ssl/ca-dockerhp-collector.crt"},
273 | {"src": "dockerfiles/mongodb", "dst": "dockerfiles/mongodb"},
274 | {"src": "docker-compose.yaml", "dst": "docker-compose.yaml"}
275 | ]},
276 | "upload_collector_configs": {"type": "upload_files",
277 | "files":[
278 | {"src": "ssl/collector-cert.pem", "dst": "ssl/collector-cert.pem"},
279 | {"src": "ssl/collector-key.pem", "dst": "ssl/collector-key.pem"},
280 | {"src": "ssl/ca-dockerhp-collector.crt", "dst": "ssl/ca-dockerhp-collector.crt"},
281 | {"src": "dockerfiles/collector", "dst": "dockerfiles/collector"},
282 | {"src": "docker-compose.yaml", "dst": "docker-compose.yaml"}
283 | ]},
284 | "upload_dockerhp_configs": {"type": "upload_files",
285 | "files":[
286 | {"src": "ssl/dockerhp-cert.pem", "dst": "ssl/dockerhp-cert.pem"},
287 | {"src": "ssl/dockerhp-key.pem", "dst": "ssl/dockerhp-key.pem"},
288 | {"src": "ssl/ca-dockerhp-collector.crt", "dst": "ssl/ca-dockerhp-collector.crt"},
289 | {"src": "dockerfiles/dockerhp", "dst": "dockerfiles/dockerhp"},
290 | {"src": "docker-compose.yaml", "dst": "docker-compose.yaml"}
291 | ]}
292 | }
293 |
294 | }
--------------------------------------------------------------------------------
/samples/collector_config_sample.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_secret_key": null,
3 | "global_hostname": null,
4 | "global_port": 5000,
5 | "certs_path": "./ssl/",
6 | "supress_recon": true,
7 |
8 | "collector": false,
9 | "collector_host": "127.0.0.1",
10 | "collector_port": 5000,
11 | "collector_verify_ssl": false,
12 | "collector_crt": "collector-cert.pem",
13 | "collector_key": "collector-key.pem",
14 | "collector_ca_crt": "ca-dockerhp-collector.crt",
15 | "collector_ca_name": "ca-dockerhp-collector",
16 | "collector_common_name": "dockerhp-collector",
17 | "collector_token": null,
18 | "honeypot_tokens": null,
19 | "admin_token": null,
20 |
21 | "dockerhp_listen": false,
22 | "dockerhp_host": "127.0.0.1",
23 | "dockerhp_port": 61023,
24 | "dockerhp_verify_ssl": false,
25 | "dockerhp_crt": "dockerhp-cert.pem",
26 | "dockerhp_key": "dockerhp-key.pem",
27 | "dockerhp_ca_crt": "ca-dockerhp-collector.crt",
28 | "dockerhp_ca_name": "ca-dockerhp-collector",
29 | "dockerhp_common_name": "dockerhp",
30 |
31 | "mongo": false,
32 | "mongo_db": "docker_honeypot",
33 | "mongo_ssl": true,
34 | "mongo_host": null,
35 | "mongo_port": 27127,
36 | "mongo_user": "mongo_user",
37 | "mongo_pass": null,
38 |
39 | "slack": true,
40 | "slack_channel": "#tw-threat-intel",
41 | "slack_username": "docker-hp",
42 | "slack_webhook": null,
43 |
44 | "wbx": true,
45 | "wbx_webhook": null
46 | }
47 |
--------------------------------------------------------------------------------
/samples/env:
--------------------------------------------------------------------------------
1 | MONGO_INITDB_ROOT_PASSWORD=
2 | MONGO_INITDB_ROOT_USERNAME=mongo_user
--------------------------------------------------------------------------------
/samples/hp_config_sample.json:
--------------------------------------------------------------------------------
1 | {
2 |
3 | "ports": [2375, 2376, 2377, 4243, 4244],
4 | "terminate_with_error": true,
5 | "sensor_id": null,
6 | "certs_path": "./ssl/",
7 |
8 | "server_secret_key": null,
9 | "global_hostname": null,
10 | "global_port": 61023,
11 |
12 | "collector": true,
13 | "collector_alt_host": null,
14 | "collector_alt_port": 5001,
15 | "collector_host": null,
16 | "collector_port": 5000,
17 | "collector_verify_ssl": false,
18 | "collector_crt": "collector-cert.pem",
19 | "collector_key": "collector-key.pem",
20 | "collector_ca_crt": "ca-dockerhp-collector.crt",
21 | "collector_ca_name": "ca-dockerhp-collector",
22 | "collector_common_name": "dockerhp-collector",
23 | "collector_token": null,
24 | "honeypot_tokens": null,
25 |
26 | "dockerhp_listen": true,
27 | "dockerhp_host": "0.0.0.0",
28 | "dockerhp_port": 61023,
29 | "dockerhp_verify_ssl": false,
30 | "dockerhp_crt": "dockerhp-cert.pem",
31 | "dockerhp_key": "dockerhp-key.pem",
32 | "dockerhp_ca_crt": "ca-dockerhp-collector.crt",
33 | "dockerhp_ca_name": "ca-dockerhp-collector",
34 | "dockerhp_common_name": "dockerhp",
35 |
36 | "mongo": false,
37 | "mongo_db": "docker_honeypot",
38 | "mongo_ssl": true,
39 | "mongo_host": "127.0.0.1",
40 | "mongo_port": 27127,
41 | "mongo_user": "mongo_user",
42 | "mongo_pass": null,
43 |
44 | "slack": false,
45 | "slack_channel": "#tw-threat-intel",
46 | "slack_username": "docker-hp",
47 | "slack_webhook": null,
48 |
49 | "wbx": false,
50 | "wbx_webhook": null
51 | }
52 |
--------------------------------------------------------------------------------
/samples/secrets_sample.json:
--------------------------------------------------------------------------------
1 | {
2 | "server_secret_key": null,
3 | "honeypot_token": null,
4 |
5 | "mongo_user": "mongo_user",
6 | "mongo_pass": null,
7 | "mongo_host": null,
8 |
9 | "username": "ubuntu",
10 |
11 | "collector_host": null,
12 | "collector_alt_host": null,
13 | "collector_token": null,
14 | "admin_token": null,
15 | "first_admin_email": "noone@localhost.com",
16 | "first_admin_name": "admin collector",
17 | "first_admin_description": "first admin token info",
18 |
19 | "wbx_webhook": null,
20 | "slack_webhook": null,
21 | }
--------------------------------------------------------------------------------
/scripts/collector.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 | import sys
20 | import subprocess
21 | from docker_honey.util import *
22 | from docker_honey.collector_actions import *
23 | from docker_honey.commands import *
24 | from docker_honey.consts import GLOBAL_NOTIFIER as NOTIFIER
25 | from docker_honey.consts import *
26 | from docker_honey.notify import *
27 | from docker_honey.simple_commands.app import Hypercorn as App
28 | from docker_honey.simple_commands.util import *
29 |
30 | from time import sleep
31 | import asyncio
32 | import argparse
33 | from multiprocessing import Process
34 | from quart import Quart, jsonify, Response, request
35 | import json
36 | import sys
37 | from hypercorn.config import Config
38 | from hypercorn.asyncio import serve
39 |
40 | LOGGER = get_stream_logger(__name__)
41 |
42 | # require installation
43 | parser = argparse.ArgumentParser()
44 | parser.add_argument("-config", help="json config to load from", default=None)
45 | parser.add_argument("-submit", help="submit url to remote host", default=False, action="store_true")
46 | parser.add_argument("-user_agent", help="user agent", default=DEFAULT_USER_AGENT)
47 | parser.add_argument("-headers", help="headers", default={})
48 | parser.add_argument("-url", help="url to submit", default=None)
49 | parser.add_argument("-json_payload", help="json payloard to submit", default=None)
50 | parser.add_argument("-data_parameters", help="data payload to submit", default=None)
51 | parser.add_argument("-method", help="data payload to submit", default=None)
52 | parser.add_argument("-sensor_id", help="sensor_id to submit to or 'all'", default=None)
53 | parser.add_argument("-sensor_ip", help="sensor_ip to submit to or 'all'", default=None)
54 |
55 |
56 | APP = None
57 |
58 | async def submit_remote_request(**kargs):
59 | request_parameters = CommandHandler.build_perform_web_request_payload(**kargs)
60 | sensor_ip = kargs.get('sensor_ip', None)
61 | sensor_id = kargs.get('sensor_id', None)
62 | if sensor_id is None:
63 | raise Exception("Sensor id not set.")
64 |
65 | skargs = {"sensor_ip":kargs.get("sensor_ip", None),
66 | "sensor_id":kargs.get("sensor_id", None),
67 | "token_value":kargs.get("token_value", None)}
68 |
69 | sensor_infos = get_single_notifier().get_sensor_infos(**skargs)
70 | print(sensor_infos, skargs)
71 |
72 | base_payload = {k: v for k, v in skargs.items() if not v is None}
73 | base_payload.update(request_parameters)
74 | for si in sensor_infos:
75 | payload = base_payload.copy()
76 | payload['sensor_ip'] = si.sensor_ip
77 | payload['sensor_id'] = si.sensor_id
78 | payload['token_value'] = si.token
79 | sensor_ip = sensor_ip if sensor_ip and len(sensor_infos) == 1 else si.sensor_ip
80 | sensor_id = sensor_id if sensor_id and len(sensor_infos) == 1 else si.sensor_id
81 | print(payload, sensor_id, sensor_ip)
82 | await CommandHandler.submit_remote_web_request(sensor_id, sensor_ip, DEFAULT_HP_LPORT, si.token, payload)
83 |
84 |
85 |
86 |
87 |
88 |
89 | async def main(**args):
90 | global APP, NOTIFIER
91 | host = dargs.get('collector_host', '0.0.0.0')
92 | ca_name = dargs.get('collector_ca', 'ca-'+FAKE_COMMON_NAME)
93 | ca_crt = dargs.get('collector_ca_crt', ca_name+'.crt')
94 | server_crt = dargs.get('collector_crt', None)
95 | server_key = dargs.get('collector_key', None)
96 | port = dargs.get('collector_port', DEFAULT_HTTP_PORT)
97 | certs_path = dargs.get('certs_path', "./ssl")
98 | if ca_name is None:
99 | ca_name = "ca-collector"
100 |
101 | if server_crt is None or server_key is None:
102 | # common_name = 'collector'
103 | # server_key = "{}.key".format(common_name)
104 | # server_crt = "{}.crt".format(common_name)
105 | # create_certs(ca_name=ca_name, common_name=common_name, certs_path=certs_path)
106 | LOGGER.critical("Missing certificates for SSL, exiting")
107 | raise Exception("Missing certificates for SSL, exiting")
108 |
109 | dargs.update({"collector_crt": server_crt,
110 | "collector_key": server_key,
111 | "ca_name": ca_name})
112 |
113 |
114 | dargs['is_collector'] = True
115 | NOTIFIER = get_single_notifier(**dargs)
116 |
117 | admin_token_info = await get_single_notifier().get_first_token()
118 | if admin_token_info is None:
119 | LOGGER.info("Missing first token, attempting to create it")
120 | email = dargs.get('first_admin_email', 'noon@localhost')
121 | name = dargs.get('first_admin_name', 'admin collector')
122 | description = dargs.get('first_admin_name', 'first admin token')
123 | _ = await get_single_notifier().create_first_admin(email=email,
124 | name=name,
125 | description=description)
126 | admin_token_info = await get_single_notifier().get_first_token()
127 |
128 | if admin_token_info is None:
129 | LOGGER.critical("No admin token found, failing")
130 | raise Exception("No admin token found, failing")
131 |
132 | use_admin_token = dargs.get('admin_token', None)
133 | use_admin_token_info = None
134 | if use_admin_token is not None:
135 | use_admin_token_info = await get_single_notifier().get_token(use_admin_token)
136 |
137 | if use_admin_token_info is None:
138 | use_admin_token_info = await get_single_notifier().add_token(admin_token_info.token,
139 | use_admin_token,
140 | email=admin_token_info.email,
141 | name=admin_token_info.name,
142 | description='admin collector token',
143 | is_admin=False, is_active=True)
144 | if use_admin_token_info is None:
145 | use_admin_token_info = admin_token_info
146 |
147 |
148 | _honeypot_tokens = dargs.get('honeypot_tokens', None)
149 | honeypot_tokens = None
150 | if isinstance(_honeypot_tokens, list) \
151 | and len(_honeypot_tokens) > 0:
152 | honeypot_tokens = []
153 | for token in sorted(set(_honeypot_tokens)):
154 | token_info = await get_single_notifier().get_token(token)
155 | if token_info:
156 | honeypot_tokens.append(token_info)
157 | else:
158 | token_info = await get_single_notifier().add_token(admin_token_info.token,
159 | token,
160 | email=admin_token_info.email,
161 | name=admin_token_info.name,
162 | description='honeypot token',
163 | is_admin=False, is_active=True)
164 | honeypot_tokens.append(token_info)
165 | if honeypot_tokens is None:
166 | honeypot_tokens = await get_single_notifier().get_honeypot_token_values()
167 |
168 | if honeypot_tokens is None or len(honeypot_tokens) == 0:
169 | honeypot_tokens = [await get_single_notifier().create_honeypot_token()]
170 |
171 | h_tokens = [hpt.token for hpt in honeypot_tokens]
172 | a_tokens = use_admin_token_info.token
173 | NOTIFIER.honeypot_tokens = h_tokens
174 | NOTIFIER.admin_token = a_tokens
175 | NOTIFIER.notify_collector_startup()
176 |
177 | app = App('docker-hp-collector', host='0.0.0.0', port=port, certs_path=certs_path,
178 | ca_crt=ca_crt, server_crt=server_crt, server_key=server_key)
179 |
180 | app.add_url_rule(REMOTE_REQUEST_ENDPOINT, 'basic_submit_web_request', handle_remote_web_request_page, methods = ['POST', 'GET'] )
181 | app.add_url_rule(EVENTS_ENDPOINT, 'events', handle_events, methods = ['POST', 'GET'])
182 | app.add_url_rule(REGISTER_ENDPOINT, 'register_sensor', handle_register, methods = ['POST'])
183 | app.add_url_rule(SUMMARY_ENDPOINT, 'remote_summary_downloads', handle_summary_downloads, methods = ['GET'])
184 | app.add_url_rule(DOWNLOAD_ENDPOINT, 'remote_file_downloads', handle_file_downloads, methods = ['GET'])
185 | app.add_url_rule(PING_ENDPOINT, 'ping', handle_ping, methods = ['POST'])
186 | app.add_url_rule(NEW_TOKEN_ENDPOINT, 'new_token', handle_new_token, methods = ['POST'])
187 | app.add_url_rule(COMMANDS_RESPONSE_ENDPOINT, 'remote_commands_response', handle_remote_command_responses, methods = ['POST'])
188 | app.add_url_rule(EVENT_ENDPOINT, 'get_event', handle_get_event, methods = ['GET'])
189 |
190 | LOGGER.info("Admin token for this instances is: {}".format(use_admin_token_info.token))
191 | for hpt in honeypot_tokens:
192 | LOGGER.info("honeypot token that will be accepted for this instances is: {}".format(hpt.token))
193 | APP = app
194 |
195 |
196 | if __name__ == '__main__':
197 | args = parser.parse_args()
198 | dargs = vars(args)
199 |
200 | if args.config:
201 | config_path = args.config
202 | del dargs[CONFIG]
203 | config = json.load(open(config_path))
204 | dargs.update(config)
205 | else:
206 | parser.print_help()
207 | sys.exit(-1)
208 |
209 | if dargs.get('global_hostname', None):
210 | dargs['global_hostname'] = get_external_ip()
211 | dargs['global_port'] = 5000
212 |
213 |
214 | if args.submit:
215 | NOTIFIER = get_single_notifier(**dargs)
216 | loop = asyncio.get_event_loop()
217 | # results =
218 | asyncio.run(submit_remote_request(**dargs))
219 | sys.exit(0)
220 |
221 |
222 | loop = asyncio.get_event_loop()
223 |
224 | asyncio.run(main(**dargs))
225 | # cheating the nested event loops
226 | APP.quart_run()
227 |
--------------------------------------------------------------------------------
/scripts/deploy.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import subprocess
3 | from docker_honey.util import *
4 | from docker_honey.collector_actions import *
5 | from docker_honey.commands import *
6 | from docker_honey.consts import GLOBAL_NOTIFIER as NOTIFIER
7 | from docker_honey.consts import *
8 | from docker_honey.notify import *
9 | from docker_honey.simple_commands.app import Hypercorn as App
10 | from docker_honey.simple_commands.util import *
11 |
12 | from docker_honey.simple_commands.consts import *
13 | from docker_honey.simple_commands import boto
14 | from docker_honey.simple_commands import ssh
15 | from docker_honey.simple_commands.actions import *
16 | import json
17 | import os
18 |
19 | from multiprocessing import Process
20 | from threading import Thread
21 | from time import sleep
22 | import asyncio
23 | import argparse
24 | from multiprocessing import Process
25 | from quart import Quart, jsonify, Response, request
26 | import json
27 | import sys
28 | from hypercorn.config import Config
29 | from hypercorn.asyncio import serve
30 |
31 | LOGGER = get_stream_logger(__name__)
32 |
33 | NEW_SECRETS = 'new_secrets_file.json'
34 | parser = argparse.ArgumentParser()
35 | parser.add_argument("-config", help="json config to load from", default=None)
36 | parser.add_argument("-secrets", help="json config containing sensitive parameters", default=None)
37 | parser.add_argument("-new_secrets_file", help="json config with updated secrets", default=NEW_SECRETS)
38 | parser.add_argument("-recreate_keys", help="recreate ssh keys", default=False, action="store_true")
39 |
40 | parser.add_argument("-mongodb_up", help="bring up the mongodb instance", default=False, action="store_true")
41 | parser.add_argument("-mongodb_down", help="bring down the mongo instance", default=False, action="store_true")
42 | parser.add_argument("-mongodb_delete_vols", help="delete the mongo volumes", default=False, action="store_true")
43 | parser.add_argument("-mongodb_region", help="region for mongodb", default="us-east-2")
44 |
45 | parser.add_argument("-collector_up", help="bring up the collector instance", default=False, action="store_true")
46 | parser.add_argument("-collector_down", help="bring down the collector instance", default=False, action="store_true")
47 | parser.add_argument("-collector_region", help="region for the collector", default="us-east-2")
48 | parser.add_argument("-collector_count", help="number of collecters to deploy (max 2)", default=1)
49 | parser.add_argument("-collector_config", help="base collector configuration to update and upload to the instance",
50 | default="./samples/collector_config_sample.json")
51 |
52 | parser.add_argument("-dockerhp_up", help="bring up the docker-hp instances", default=False, action="store_true")
53 | parser.add_argument("-dockerhp_down", help="bring down the docker-hp instances", default=False, action="store_true")
54 | parser.add_argument("-dockerhp_regions", help="regions for the honeypot", nargs='+', default=["us-east-2"])
55 | parser.add_argument("-dockerhp_count", help="number of docker honeypots to deploy", default=1, type=int)
56 | parser.add_argument("-dockerhp_config", help="base collector configuration to update and upload to the instance",
57 | default="./samples/hp_config_sample.json")
58 |
59 |
60 |
61 | DOCKERHP_TAGS = {
62 | 'ApplicationName': "dockerhp-application",
63 | 'Name': 'dockerhp'
64 | }
65 |
66 | COLLECTOR_TAGS = {
67 | 'ApplicationName': "dockerhp-application",
68 | 'Name': 'dockerhp-collector'
69 | }
70 |
71 | MONGODB_TAGS = {
72 | 'ApplicationName': "dockerhp-application",
73 | 'Name': 'dockerhp-mongodb'
74 | }
75 |
76 | def instance_down(instance_type, regions=['us-east-2']):
77 | if instance_type == 'mongodb':
78 | boto.Commands.terminate_relevant_instances_multiple_regions(regions=regions, target_tags=MONGODB_TAGS, dry_run=False)
79 | elif instance_type == 'mongodb_vols':
80 | boto.Commands.delete_relevant_volumes_multiple_regions(regions=regions, target_tags=MONGODB_TAGS, dry_run=False)
81 | elif instance_type == 'collector':
82 | boto.Commands.terminate_relevant_instances_multiple_regions(regions=regions, target_tags=COLLECTOR_TAGS, dry_run=False)
83 | elif instance_type == 'dockerhp':
84 | boto.Commands.terminate_relevant_instances_multiple_regions(regions=regions, target_tags=DOCKERHP_TAGS, dry_run=False)
85 |
86 |
87 | def handle_dockerhp_config_update_and_start(instance_name, region: str, base_config: str, dockerhp_instances: dict,
88 | instance_public_ip: dict, command_format_args: dict, boto_config: dict):
89 |
90 | dockerhp_config = base_config.copy()
91 | collector_host = command_format_args.get('collector_host')
92 | collector_alt_host = command_format_args.get('collector_alt_host')
93 | collector_port = command_format_args.get('collector_port')
94 | collector_alt_port = command_format_args.get('collector_alt_port')
95 | collector_token = command_format_args.get('collector_token')
96 | server_secret_key = command_format_args.get('server_secret_key')
97 | # admin_token = command_format_args.get('admin_token', None)
98 | honeypot_tokens = command_format_args.get('honeypot_tokens')
99 | if len(honeypot_tokens) > 0:
100 | collector_token = honeypot_tokens[0]
101 |
102 | if collector_alt_host is None:
103 | collector_alt_host = collector_host
104 |
105 | if collector_port is None:
106 | collector_port = 5000
107 |
108 | if collector_alt_port is None:
109 | collector_alt_port = 5001
110 |
111 | dockerhp_config["server_secret_key"] = server_secret_key
112 | dockerhp_config['collector_host'] = collector_host
113 | dockerhp_config['collector_alt_host'] = collector_host
114 | dockerhp_config['collector_port'] = collector_port
115 | dockerhp_config['collector_alt_port'] = collector_alt_port
116 | dockerhp_config["collector_token"] = collector_token
117 | dockerhp_config["collector"] = True
118 | dockerhp_config['honeypot_tokens'] = None
119 | dockerhp_config['wbx'] = False
120 | dockerhp_config['wbx_webhook'] = None
121 | dockerhp_config['slack'] = False
122 | dockerhp_config['slack_webhook'] = None
123 |
124 | if 'aws_access_key_id' in dockerhp_config:
125 | del dockerhp_config['aws_access_key_id']
126 |
127 | if 'aws_secret_access_key' in dockerhp_config:
128 | del dockerhp_config['aws_secret_access_key']
129 |
130 | key_info = boto.Commands.get_instance_key_info(instance_name, boto_config, region=region)
131 | key_name = key_info.get("key_name", None)
132 | key_filename = boto.Commands.get_key_pair(key_info['key_name'], key_info['key_path'],
133 | recreate=False, region=region, **boto_config)
134 |
135 | for instance, ip in instance_public_ip.items():
136 | dockerhp_config["sensor_id"] = "{}:|:{}:|:{}".format(region, ip, instance)
137 | dockerhp_config['global_hostname'] = ip
138 | config_bytes = json.dumps(dockerhp_config, sort_keys=True, indent=6).encode('ascii')
139 | # print(json.dumps(dockerhp_config, indent=6, sort_keys=True))
140 | ssh.Commands.upload_bytes(config_bytes, "hp_config.json", host=ip, key_filename=key_filename, username=UBUNTU)
141 |
142 | activity_name = "startup"
143 | return perform_activity(instance_name, dockerhp_instances, activity_name, instance_public_ip, boto_config, command_format_args)
144 |
145 | def handle_collector_config_update_and_start(instance_name, region: str, base_config: str, collector_instances: dict,
146 | instance_public_ip: dict, command_format_args: dict, boto_config: dict):
147 |
148 | if 'aws_access_key_id' in base_config:
149 | del base_config['aws_access_key_id']
150 | if 'aws_secret_access_key' in base_config:
151 | del base_config['aws_secret_access_key']
152 |
153 | collector_config = base_config.copy()
154 | alt_collector_config = base_config.copy()
155 |
156 | collector_host = command_format_args.get('collector_host')
157 | alt_collector_host = command_format_args.get('alt_collector_host')
158 | if alt_collector_host is None:
159 | alt_collector_host = collector_host
160 | collector_token = command_format_args.get('collector_token')
161 | server_secret_key = command_format_args.get('server_secret_key')
162 | honeypot_tokens = command_format_args.get('honeypot_tokens', None)
163 | admin_tokens = command_format_args.get('admin_token', None)
164 | slack_webhook = command_format_args.get('slack_webhook', None)
165 | wbx_webhook = command_format_args.get('wbx_webhook', None)
166 | admin_token = command_format_args.get('admin_token', None)
167 | mongo_host = command_format_args.get('mongo_host', None)
168 | mongo_pass = command_format_args.get('mongo_pass', None)
169 |
170 |
171 | slack = slack_webhook is None
172 | wbx = wbx_webhook is None
173 |
174 | # primary collector
175 | # collector_config["collector_token"] = collector_token
176 | collector_config['global_hostname'] = collector_host
177 | collector_config["server_secret_key"] = server_secret_key
178 | collector_config["admin_token"] = admin_token
179 | collector_config["honeypot_tokens"] = sorted(set(honeypot_tokens))
180 | collector_config["slack_webhook"] = slack_webhook
181 | collector_config["slack"] = not slack_webhook is None
182 | collector_config["wbx_webhook"] = wbx_webhook
183 | collector_config["wbx"] = not wbx_webhook is None
184 | collector_config['mongo'] = not mongo_host is None
185 | collector_config['mongo_host'] = mongo_host
186 | collector_config['mongo_pass'] = mongo_pass
187 |
188 | # alternate collector
189 | alt_collector_config['global_hostname'] = alt_collector_host
190 | alt_collector_config["server_secret_key"] = server_secret_key
191 | alt_collector_config["admin_token"] = admin_token
192 | alt_collector_config["honeypot_tokens"] = sorted(set(honeypot_tokens))
193 | alt_collector_config["slack_webhook"] = slack_webhook
194 | alt_collector_config["slack"] = not slack_webhook is None
195 | alt_collector_config["wbx_webhook"] = wbx_webhook
196 | alt_collector_config["wbx"] = not wbx_webhook is None
197 | alt_collector_config['mongo'] = not mongo_host is None
198 | alt_collector_config['mongo_host'] = mongo_host
199 | alt_collector_config['mongo_pass'] = mongo_pass
200 |
201 |
202 | key_info = boto.Commands.get_instance_key_info(instance_name, boto_config, region=region)
203 | key_name = key_info.get("key_name", None)
204 | key_filename = boto.Commands.get_key_pair(key_info['key_name'], key_info['key_path'],
205 | recreate=False, **boto_config)
206 |
207 | username = UBUNTU
208 | config_bytes = json.dumps(collector_config, sort_keys=True, indent=6).encode('ascii')
209 | ssh.Commands.upload_bytes(config_bytes, "collector_config.json", host=collector_host, key_filename=key_filename, username=username)
210 | config_bytes = json.dumps(alt_collector_config, sort_keys=True, indent=6).encode('ascii')
211 | if alt_collector_host is not None and alt_collector_host != collector_host:
212 | ssh.Commands.upload_bytes(config_bytes, "collector_config.json", host=alt_collector_host, key_filename=key_filename, username=username)
213 | activity_name = "startup"
214 | return perform_activity(instance_name, collector_instances, activity_name, instance_public_ip, boto_config, command_format_args)
215 |
216 | def deploy_dockerhp(args, boto_config, boto_secrets):
217 |
218 | base_config = json.load(open(args.dockerhp_config))
219 | base_config = merge_dicts(base_config, boto_secrets)
220 | # check mongo is valid
221 | if base_config.get("mongo", False):
222 | mongo_pass = base_config.get('mongo_pass', None)
223 | mongo_host = base_config.get('mongo_host', None)
224 | if mongo_pass is None:
225 | LOGGER.critical("Missing 'mongo_pass', exiting")
226 | elif mongo_host is None:
227 | LOGGER.critical("Missing 'mongo_host', exiting")
228 |
229 | if base_config.get("slack", False):
230 | if base_config.get("slack_webhook", None) is None:
231 | LOGGER.critical("Missing 'slack_webhook', exiting")
232 |
233 | if base_config.get("wbx", False):
234 | if base_config.get("wbx_webhook", None) is None:
235 | LOGGER.critical("Missing 'wbx_webhook', exiting")
236 |
237 | server_secret_key = base_config.get("server_secret_key", None)
238 | if server_secret_key is None:
239 | server_secret_key = random_alphanum_string_generator()
240 | update_config('server_secret_key', server_secret_key, boto_secrets)
241 | base_config = merge_dicts(base_config, boto_secrets)
242 | json.dump(boto_secrets, open(args.new_secrets_file, 'w'), indent=6, sort_keys=True)
243 |
244 | collector_token = boto_secrets.get('collector_token', None)
245 | if collector_token is None:
246 | collector_token = random_alphanum_string_generator()
247 | update_config('collector_token', collector_token, boto_secrets)
248 | base_config = merge_dicts(base_config, boto_secrets)
249 | json.dump(boto_secrets, open(args.new_secrets_file, 'w'), indent=6, sort_keys=True)
250 |
251 | instance_name = "dockerhp"
252 | instances_configs = {i['name']: i for i in boto_config.get('instance_descriptions', [])}
253 | command_string_parameters = instances_configs[instance_name].get('command_string_parameters', [])
254 | dc_command_format_args = command_strings_to_dict(command_string_parameters)
255 | dc_command_format_args = merge_dicts(dc_command_format_args, boto_secrets)
256 | max_count = args.dockerhp_count
257 | regions = args.dockerhp_regions
258 | if "all" in regions:
259 | regions = DCS
260 |
261 | region_processes = {}
262 | for region in regions:
263 | args = (instance_name, boto_config, "setup", dc_command_format_args, region, max_count, base_config)
264 | proc = Process(target=deploy_dockerhp_region, name=None, args=args)
265 | proc.start()
266 | region_processes[region] = proc
267 |
268 | LOGGER.info("Waiting for {} processes to complete".format(len(region_processes)))
269 | items = [(k, v) for k,v in region_processes.items()]
270 | while len(items) > 0:
271 | items = [(k, v) for k,v in region_processes.items() if v.is_alive()]
272 | LOGGER.info("Waiting for {} out of {} processes to complete.".format(len(items), len(region_processes)))
273 | if len(items) == 0:
274 | break
275 | sleep(60.0)
276 | LOGGER.info("Completed: {} deployment processes".format(len(region_processes)))
277 | # return results
278 | return region_processes
279 |
280 | def deploy_dockerhp_region(instance_name, boto_config, setup_activity_name,
281 | command_format_args, region, max_count, base_config):
282 | rdc_ai = {}
283 | rdc_ipi = {}
284 | rdc_av = {}
285 | rdc_sr = {}
286 | results = {}
287 | results = {}
288 | threads = []
289 | try:
290 | dc_ai, dc_ipi, dc_av, dc_sr = build_instance_and_setup(instance_name, boto_config, setup_activity_name="setup",
291 | command_format_args=command_format_args, region=region,
292 | max_count=max_count)
293 | rdc_ai[region] = dc_ai
294 | rdc_ipi[region] = dc_ipi
295 | rdc_av[region] = dc_av
296 | rdc_sr[region] = dc_sr
297 | if dc_ipi is None:
298 | LOGGER.critical("Public IP information is None, meaning an error occurred somewhere, skipping: {}".format(region))
299 | return
300 | if dc_ai is None:
301 | LOGGER.critical("Instance information is None, meaning an error occurred somewhere, skipping: {}".format(region))
302 | return
303 |
304 |
305 | for iid in dc_ai:
306 | args = (instance_name, region, base_config, {iid: dc_ai[iid]}, {iid:dc_ipi[iid]}, command_format_args, boto_config)
307 | thread = Thread(target=handle_dockerhp_config_update_and_start, args=args)
308 | thread.start()
309 | threads.append(thread)
310 | except:
311 | LOGGER.critical("Exception occurred when trying to initialize instances in {}".format(region))
312 | LOGGER.critical(traceback.format_exc())
313 |
314 | LOGGER.info("Waiting for {} threads to complete for {}".format(len(threads), region))
315 | while len(threads) > 0:
316 | threads = [i for i in threads if i.is_alive()]
317 | LOGGER.info("Waiting for {} threads to complete for {}".format(len(threads), region))
318 | if len(threads) == 0:
319 | break
320 | sleep(60.0)
321 | LOGGER.info("Completed: {} threads to complete for {}".format(len(threads), region))
322 | return results
323 |
324 | def deploy_collector(args, boto_config, boto_secrets):
325 | base_config = json.load(open(args.collector_config))
326 | base_config = merge_dicts(base_config, boto_secrets)
327 | # check mongo is valid
328 | if base_config.get("mongo", False):
329 | mongo_pass = base_config.get('mongo_pass', None)
330 | mongo_host = base_config.get('mongo_host', None)
331 | if mongo_pass is None:
332 | LOGGER.critical("Missing 'mongo_pass', exiting")
333 | elif mongo_host is None:
334 | LOGGER.critical("Missing 'mongo_host', exiting")
335 |
336 | if base_config.get("slack", False):
337 | if base_config.get("slack_webhook", None) is None:
338 | LOGGER.critical("Missing 'slack_webhook', exiting")
339 |
340 | if base_config.get("wbx", False):
341 | if base_config.get("wbx_webhook", None) is None:
342 | LOGGER.critical("Missing 'wbx_webhook', exiting")
343 |
344 | server_secret_key = base_config.get("server_secret_key", None)
345 | if server_secret_key is None:
346 | server_secret_key = random_alphanum_string_generator()
347 | update_config('server_secret_key', server_secret_key, boto_secrets)
348 | base_config = merge_dicts(base_config, boto_secrets)
349 | json.dump(boto_secrets, open(args.new_secrets_file, 'w'), indent=6, sort_keys=True)
350 |
351 | collector_token = boto_secrets.get('collector_token', None)
352 | if collector_token is None:
353 | collector_token = random_alphanum_string_generator()
354 | update_config('collector_token', collector_token, boto_secrets)
355 | base_config = merge_dicts(base_config, boto_secrets)
356 | json.dump(boto_secrets, open(args.new_secrets_file, 'w'), indent=6, sort_keys=True)
357 |
358 | instance_name = "dockerhp-collector"
359 | instances_configs = {i['name']: i for i in boto_config.get('instance_descriptions', [])}
360 | instance_config = instances_configs[instance_name]
361 | command_string_parameters = instance_config.get('command_string_parameters', [])
362 | dc_command_format_args = command_strings_to_dict(command_string_parameters)
363 | dc_command_format_args = merge_dicts(dc_command_format_args, boto_secrets)
364 | max_count = 1 if args.collector_count > 2 and args.collector_count < 1 else args.collector_count
365 | regions = [args.collector_region]
366 | region = args.collector_region
367 | rdc_ai, rdc_ipi, rdc_av, rdc_sr = build_instance_and_setup_multi_regions_count(instance_name, boto_config,
368 | regions, max_count, command_format_args=dc_command_format_args)
369 |
370 | dc_ai = rdc_ai[region]
371 | dc_ipi = rdc_ipi[region]
372 | dc_av = rdc_av[region]
373 | dc_sr = rdc_sr[region]
374 |
375 | collector_host = None
376 | alt_collector_host = None
377 | if len(dc_ipi) > 1:
378 | collector_host, alt_collector_host = [ip for ip in dc_ipi.values()][:2]
379 | else:
380 | collector_host = [ip for ip in dc_ipi.values()][0]
381 | alt_collector_host = collector_host
382 |
383 | update_config('collector_host', collector_host, boto_secrets)
384 | update_config('collector_alt_host', alt_collector_host, boto_secrets)
385 | base_config = merge_dicts(base_config, boto_secrets)
386 | dc_command_format_args = merge_dicts(dc_command_format_args, boto_secrets)
387 | json.dump(boto_secrets, open(args.new_secrets_file, 'w'), indent=6, sort_keys=True)
388 | json.dumps(dc_command_format_args, indent=6, sort_keys=True)
389 | handle_collector_config_update_and_start(instance_name, region, base_config, dc_ai, dc_ipi, dc_command_format_args, boto_config)
390 |
391 | def deploy_mongodb(args, boto_config, boto_secrets):
392 | mongo_user = boto_secrets.get('mongo_user', None)
393 | mongo_pass = boto_secrets.get('mongo_pass', None)
394 | if mongo_user is None:
395 | update_config('mongo_user', 'mongo_user', boto_secrets)
396 | boto_config = merge_dicts(boto_config, boto_secrets)
397 |
398 | if mongo_pass is None:
399 | update_config('mongo_pass', random_alphanum_string_generator(), boto_secrets)
400 | boto_config = merge_dicts(boto_config, boto_secrets)
401 | mongo_pass = boto_secrets.get('mongo_pass', None)
402 |
403 | instance_name = "dockerhp-mongodb"
404 | instances_configs = {i['name']: i for i in boto_config.get('instance_descriptions', [])}
405 | instance_config = instances_configs.get(instance_name)
406 | command_string_parameters = instances_configs['dockerhp-mongodb'].get('command_string_parameters', [])
407 | mdb_command_format_args = command_strings_to_dict(command_string_parameters)
408 | merge_dicts(mdb_command_format_args, boto_secrets)
409 | regions = [args.mongodb_region]
410 | max_count = 1
411 | mdb_ai, mdb_ipi, mdb_av, mdb_sr = build_instance_and_setup_multi_regions_count(instance_name, boto_config,
412 | regions, max_count, command_format_args=mdb_command_format_args)
413 | mongo_host = list(mdb_ipi[args.mongodb_region].values())[0]
414 | update_config('mongo_host', mongo_host, boto_secrets)
415 | boto_config = merge_dicts(boto_config, boto_secrets)
416 | json.dump(boto_secrets, open(args.new_secrets_file, 'w'), indent=6, sort_keys=True)
417 |
418 |
419 | def update_config(key, value, config):
420 | config[key] = value
421 |
422 | if __name__ == "__main__":
423 |
424 | args = parser.parse_args()
425 |
426 | if args.config is None or args.secrets is None:
427 | parser.print_help()
428 | LOGGER.error("must provide a secrets and config file, exiting")
429 | sys.exit(-1)
430 | elif args.collector_up and \
431 | args.collector_config == 'internal-scripts/collector_config.json':
432 | try:
433 | os.stat(args.collector_config)
434 | except:
435 | LOGGER.error("invalid base collector config ({}), please create one or update the path, exiting".format(args.collector_config))
436 | parser.print_help()
437 | sys.exit(-1)
438 | elif args.dockerhp_up and \
439 | args.dockerhp_config == 'internal-scripts/hp_config.json':
440 | try:
441 | os.stat(args.collector_config)
442 | except:
443 | LOGGER.error("invalid base collector config ({}), please create one or update the path, exiting".format(args.collector_config))
444 | parser.print_help()
445 | sys.exit(-1)
446 |
447 | boto_config = json.load(open(args.config))
448 | boto_secrets = json.load(open(args.secrets))
449 | boto_config = merge_dicts(boto_config, boto_secrets)
450 | boto.Commands.set_config(**boto_config)
451 |
452 | for i in boto_config.get('instance_descriptions', []):
453 | i['recreate_keypair'] = args.recreate_keys
454 |
455 |
456 | do_down = []
457 | if args.mongodb_down:
458 | instance_down('mongodb', regions=[args.mongodb_region])
459 | do_down.append(['mongodb', [args.mongodb_region]])
460 | if args.collector_down:
461 | instance_down('collector', regions=[args.collector_region])
462 | do_down.append(['collector', [args.collector_region]])
463 | if args.dockerhp_down:
464 | _regions = args.dockerhp_regions
465 | if "all" in args.dockerhp_regions:
466 | _regions = DCS
467 | instance_down('dockerhp', regions=_regions)
468 | do_down.append(['dockerhp', args.dockerhp_regions])
469 | if args.mongodb_delete_vols:
470 | instance_down('mongodb_vols', regions=[args.mongodb_region])
471 | do_down.append(['mongodb_vols', [args.mongodb_region]])
472 |
473 | if len(do_down):
474 | LOGGER.info("Brought down the following instance types in the following regions:")
475 | for t, r in do_down:
476 | LOGGER.info("Type: {} Regions: {}".format(t, ",".join(r)))
477 | sys.exit(0)
478 |
479 | if args.mongodb_up:
480 | LOGGER.info("Deploying mongodb")
481 | deploy_mongodb(args, boto_config, boto_secrets)
482 |
483 | if args.collector_up:
484 | LOGGER.info("Deploying collector")
485 | deploy_collector(args, boto_config, boto_secrets)
486 |
487 | if args.dockerhp_up:
488 | LOGGER.info("Deploying dockerhp")
489 | deploy_dockerhp(args, boto_config, boto_secrets)
490 |
--------------------------------------------------------------------------------
/scripts/docker_honeypot.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | from docker_honey.notify import Notifier
21 | from docker_honey.simple_commands.app import Hypercorn as App
22 | from docker_honey.server import DockerHp
23 | from docker_honey.consts import *
24 | from docker_honey.dockerhp_actions import *
25 | from docker_honey.simple_commands.util import *
26 |
27 | from time import sleep
28 | import asyncio
29 | import argparse
30 | from multiprocessing import Process
31 | from quart import Quart, jsonify, Response, request
32 | import json
33 |
34 | LOGGER = get_stream_logger(__name__)
35 | # require installation
36 | parser = argparse.ArgumentParser()
37 | parser.add_argument("-config", help="path to the configuration", type=str, default=None)
38 |
39 |
40 | NOTIFIER = None
41 |
42 | PROCESSES = []
43 | def start_command_listener(host, port, certs_path, ca_crt, server_crt, server_key, secret_key):
44 | LOGGER.info("settingup command_listener")
45 | # print(get_single_notifier().server_secret_key)
46 | app = App('docker-hp-commands', host='0.0.0.0', port=port, certs_path=certs_path,
47 | ca_crt=ca_crt, server_crt=server_crt, server_key=server_key)
48 |
49 | app.add_url_rule(HP_COMMAND_ENDPOINT, 'commands', handle_remote_commands, methods = ['POST'])
50 | try:
51 | LOGGER.info("Starting command_listener app")
52 | app.quart_run()
53 | except KeyboardInterrupt:
54 | pass
55 | except:
56 | pass
57 |
58 | LOGGER.info("Exiting from start_command_listener")
59 |
60 |
61 | def main(sensor_id, sensor_ip, notifier, ports, terminate_with_error, error_message):
62 | honeypot = DockerHp(sensor_id, sensor_ip, notifier, ports=ports,
63 | terminate_with_error=terminate_with_error, error_message=error_message)
64 | LOGGER.info("Starting main")
65 | try:
66 | loop = asyncio.get_event_loop()
67 | loop.create_task(honeypot.serve_forever())
68 | loop.run_forever()
69 | except KeyboardInterrupt:
70 | pass
71 | except:
72 | pass
73 |
74 | LOGGER.info("Exiting from main")
75 |
76 |
77 | async def wait_forever():
78 | p = [p.is_alive() for p in PROCESSES]
79 | await get_single_notifier().send_registration()
80 | sleep(60.0)
81 | while len(p) > 0:
82 | try:
83 | await get_single_notifier().send_ping()
84 | sleep(60.0)
85 | p = [p.is_alive() for p in PROCESSES]
86 | except KeyboardInterrupt:
87 | break
88 | except:
89 | pass
90 | LOGGER.info("Exiting from wait_forever")
91 |
92 | if __name__ == "__main__":
93 | args = parser.parse_args()
94 | dargs = vars(args)
95 | if args.config:
96 | config_path = args.config
97 | del dargs[CONFIG]
98 | config = json.load(open(config_path))
99 | dargs.update(config)
100 |
101 | if dargs.get('global_hostname', None):
102 | dargs['global_hostname'] = get_external_ip()
103 | dargs['global_port'] = DEFAULT_HP_LPORT
104 |
105 | terminate_with_error = dargs.get('terminate_with_error', True)
106 | error_message = dargs.get('error_message', ERROR_MESSAGE)
107 | sensor_id = dargs.get('sensor_id', None)
108 | sensor_ip = dargs.get('sensor_ip', None)
109 |
110 |
111 | if sensor_ip is None:
112 | sensor_ip = get_external_ip()
113 | dargs["sensor_ip"] = sensor_ip
114 |
115 | if sensor_id is None:
116 | sensor_id = "{}-{}".format(DEFAULT_SENSOR_NAME, sensor_ip)
117 | dargs['sensor_id'] = sensor_id
118 |
119 |
120 | listen = dargs.get("dockerhp_listen", False)
121 | listen_address = '0.0.0.0'
122 | listen_port = dargs.get("dockerhp_port", DEFAULT_HP_LPORT)
123 |
124 | listen_port = listen_port if listen_port else DEFAULT_HP_LPORT
125 | listen_address = listen_address if listen_address else DEFAULT_HP_LADDR
126 |
127 | server_ca = dargs.get("dockerhp_ca_crt", None)
128 | server_crt = dargs.get("dockerhp_crt", None)
129 | server_key = dargs.get("dockerhp_key", None)
130 | secret_key = dargs.get("server_secret_key", None)
131 | certs_path = dargs.get("certs_path", None)
132 | NOTIFIER = get_single_notifier(**dargs)
133 | # print(secret_key)
134 |
135 | PROCESSES = []
136 | try:
137 | if listen:
138 | p = Process(target=start_command_listener,
139 | args=(listen_address, listen_port, certs_path, server_ca, server_crt, server_key, secret_key))
140 | p.start()
141 | PROCESSES.append(p)
142 |
143 | p = Process(target=main,
144 | args=(sensor_id, sensor_ip, get_single_notifier(), dargs['ports'], terminate_with_error, error_message))
145 | p.start()
146 | PROCESSES.append(p)
147 |
148 | except KeyboardInterrupt:
149 | pass
150 |
151 | # print(PROCESSES)
152 | try:
153 | loop = asyncio.get_event_loop()
154 | asyncio.run(wait_forever())
155 | except KeyboardInterrupt:
156 | for p in PROCESSES:
157 | p.terminate()
158 | except:
159 | pass
160 |
161 | for p in PROCESSES:
162 | if p.is_alive():
163 | os.system('kill -9 {}'.format(p.pid))
164 |
--------------------------------------------------------------------------------
/scripts/ez_certs.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import argparse
3 | import os
4 | import tempfile
5 |
6 | __copyright__ = """
7 |
8 | Copyright 2020 Cisco Systems, Inc.
9 |
10 | Licensed under the Apache License, Version 2.0 (the "License");
11 | you may not use this file except in compliance with the License.
12 | You may obtain a copy of the License at
13 |
14 | http://www.apache.org/licenses/LICENSE-2.0
15 |
16 | Unless required by applicable law or agreed to in writing, software
17 | distributed under the License is distributed on an "AS IS" BASIS,
18 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | See the License for the specific language governing permissions and
20 | limitations under the License.
21 |
22 | """
23 | __license__ = "Apache 2.0"
24 |
25 |
26 | def create_certs(ca_name='server_ca', common_name:str=None, common_names:list=None,
27 | output_path="./ssl/", ca_path=None):
28 |
29 | common_names = common_names if common_names else []
30 |
31 | if common_name and common_name not in common_names:
32 | common_names.append(common_name)
33 |
34 | with tempfile.TemporaryDirectory() as tmpdirname:
35 | kargs = {
36 | "tmpdirname": os.path.join(tmpdirname, 'certstrap'),
37 | "ca_path": ca_path,
38 | "bin_dir": os.path.join(tmpdirname, 'certstrap/bin'),
39 | "out_dir": output_path,
40 | "ca_name": ca_name,
41 | "ca_key_path": os.path.join(output_path, "{}.key".format(ca_name)),
42 | "ca_crl_path": os.path.join(output_path, "{}.crl".format(ca_name)),
43 | "ca_crt_path": os.path.join(output_path, "{}.crt".format(ca_name)),
44 | "certstrap_url": "https://github.com/square/certstrap/releases/download/v1.2.0/certstrap-1.2.0-linux-amd64",
45 | "certstrap_bin": "{}/certstrap".format(os.path.join(tmpdirname, 'certstrap/bin')),
46 | "output_path": output_path,
47 | }
48 |
49 |
50 | os.system("mkdir -p {output_path}".format(**kargs))
51 | os.system("mkdir -p {bin_dir}".format(**kargs))
52 | os.system("curl -fLs -o {certstrap_bin} {certstrap_url}".format(**kargs))
53 | os.system("chmod +x {certstrap_bin}".format(**kargs))
54 | os.system('mkdir -p ./out/')
55 | if ca_path:
56 | os.system('cp {ca_path}/{ca_name}* ./out/'.format(**kargs))
57 | else:
58 | os.system('{certstrap_bin} init --passphrase "" --common-name {ca_name} --expires "100 years"'.format(**kargs))
59 |
60 | os.system('cp ./out/{ca_name}.crt {ca_crt_path}'.format(**kargs))
61 | os.system('cp ./out/{ca_name}.crl {ca_crl_path}'.format(**kargs))
62 | os.system('cp ./out/{ca_name}.key {ca_key_path}'.format(**kargs))
63 | for common_name in common_names:
64 | kargs.update({
65 | "common_name": common_name,
66 | "cert_path": os.path.join(output_path, "{}-cert.pem".format(common_name)),
67 | "key_path": os.path.join(output_path, "{}-key.pem".format(common_name)),
68 | "combined_path": os.path.join(output_path, "{}.pem".format(common_name)),
69 | })
70 |
71 | os.system('{certstrap_bin} request-cert --passphrase "" --common-name {common_name}'.format(**kargs))
72 | os.system('{certstrap_bin} sign {common_name} --passphrase "" --CA {ca_name} --expires "100 years"'.format(**kargs))
73 | os.system('cp ./out/{common_name}.crt {cert_path}'.format(**kargs))
74 | os.system('cp ./out/{common_name}.key {key_path}'.format(**kargs))
75 | os.system('cat {key_path} {cert_path} > {combined_path}'.format(**kargs))
76 |
77 | os.system('rm -rf ./out/'.format(**kargs))
78 |
79 |
80 | parser = argparse.ArgumentParser()
81 |
82 | parser.add_argument("-ca_name", help="ca name", default=None)
83 | parser.add_argument("-ca_path", help="path to ca info", default=None)
84 |
85 | parser.add_argument("-common_names", help="common names to create", nargs="+", default=None)
86 | parser.add_argument("-common_name", help="common name to create", default=None)
87 | parser.add_argument("-output_path", help="path to put everything in", default="./ssl")
88 |
89 | if __name__ == '__main__':
90 | args = parser.parse_args()
91 | dargs = vars(args)
92 |
93 | if args.ca_name is None:
94 | parser.print_help()
95 | sys.exit(-1)
96 | elif args.common_name is None and args.common_names is None:
97 | parser.print_help()
98 | sys.exit(-1)
99 |
100 | create_certs(**dargs)
--------------------------------------------------------------------------------
/scripts/python_cmd.sh:
--------------------------------------------------------------------------------
1 | python3 main.py -config config.json
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 |
2 | #!/usr/bin/env python
3 | from setuptools import setup, find_packages
4 | import os
5 |
6 |
7 | data_files = [(d, [os.path.join(d, f) for f in files])
8 | for d, folders, files in os.walk(os.path.join('src', 'config'))]
9 |
10 |
11 | setup(name='docker-honeypot',
12 | version='.01',
13 | description='docker honeypot used to capture attempted CREATE API calls',
14 | author='Adam Pridgen',
15 | author_email='adpridge@cisco.com',
16 | install_requires=['wheel', 'quart', 'mongoengine', 'regex', 'validators',
17 | 'ipython', 'flask', 'flask_restful', 'requests', "validator-collection",
18 | 'paramiko', 'boto3', 'netifaces', 'scp', 'hypercorn'],
19 | packages=find_packages('src'),
20 | package_dir={'': 'src'},
21 | )
22 |
--------------------------------------------------------------------------------
/src/docker_honey/__init__.py:
--------------------------------------------------------------------------------
1 | from .consts import DEFAULT_HP_LPORT
2 | from .simple_commands.app import Hypercorn
3 |
4 | setattr(Hypercorn, 'DEFAULT_PORT', DEFAULT_HP_LPORT)
--------------------------------------------------------------------------------
/src/docker_honey/collector_actions.py:
--------------------------------------------------------------------------------
1 | from .util import *
2 | from .notify import get_single_notifier, notifier_initted
3 | from .consts import *
4 | from .notify import *
5 | from .simple_commands.app import Hypercorn as App
6 | from time import sleep
7 | import asyncio
8 | import argparse
9 | from multiprocessing import Process
10 | from quart import Quart, jsonify, Response, request, url_for, make_response
11 | import jinja2
12 | from jinja2 import Environment, BaseLoader
13 | import json
14 | from .commands import *
15 | from .simple_commands.util import *
16 |
17 | BASIC_WEB_REQUEST_PAGE = '''
18 |
19 |
20 |
21 | Remote Web Request
22 |
23 |
24 |
45 |
46 |
47 | '''
48 |
49 | CA_LOGGER = get_stream_logger(__name__)
50 | BASIC_WEB_REQUEST_TEMPLATE = Environment(loader=BaseLoader).from_string(BASIC_WEB_REQUEST_PAGE)
51 |
52 | async def handle_remote_web_request_page():
53 | method = request.method
54 | await request.data
55 | await request.data
56 | if method == 'GET':
57 | return Response(BASIC_WEB_REQUEST_TEMPLATE.render(url_location=url_for("basic_submit_web_request")), 200)
58 | elif method == 'POST':
59 | form = await request.form
60 | # clean up inputs
61 | data = dict(form.items())
62 | data[DATA_PAYLOAD] = dict_or_none(form.get(DATA_PAYLOAD, None))
63 | data[HEADERS] = dict_or_none(form.get(HEADERS, None))
64 | data[PARAMETERS] = dict_or_none(form.get(PARAMETERS, None))
65 | data[JSON_PAYLOAD] = dict_or_none(form.get(JSON_PAYLOAD, None))
66 | data[SENSOR_ID] = str_or_none(form.get(SENSOR_ID, None))
67 | data[SENSOR_IP] = str_or_none(form.get(SENSOR_IP, None))
68 | data[TOKEN] = str_or_none(form.get(TOKEN, None))
69 | data[URL] = url_or_none(form.get(URL, None))
70 | url = data[URL]
71 | sensor_id = data[SENSOR_ID]
72 | sensor_ip = data[SENSOR_IP]
73 | incoming_token = data[TOKEN]
74 | skargs = {SENSOR_ID:sensor_id,
75 | SENSOR_IP: sensor_ip,
76 | TOKEN: None}
77 |
78 | if incoming_token and notifier_initted() and not await get_single_notifier().is_valid(incoming_token):
79 | return Response('', status=403)
80 | elif incoming_token is None:
81 | return Response('', status=200)
82 |
83 | sensor_infos = None
84 | do_sensor_id_custom_ip = sensor_id is not None and sensor_ip is not None
85 | if notifier_initted():
86 | if sensor_id is not None:
87 | sensor_infos = get_single_notifier().get_sensor_infos(sensor_id=sensor_id)
88 | else:
89 | sensor_infos = get_single_notifier().get_sensor_infos()
90 |
91 | if len(sensor_infos) == 0:
92 | return Response('', status=200)
93 |
94 | request_parameters = CommandHandler.build_perform_web_request_payload(**data)
95 | base_payload = {k: v for k, v in skargs.items() if not v is None}
96 | base_payload.update(request_parameters)
97 |
98 | CA_LOGGER.info("Recv'd remote web request for {} ({})".format(sensor_id, sensor_ip))
99 |
100 | all_payloads = base_payload.copy()
101 | all_payloads[SENSOR_IP] = []
102 | all_payloads[SENSOR_ID] = []
103 | all_payloads[TOKEN] = []
104 |
105 | if notifier_initted() and sensor_infos is not None and len(sensor_infos) > 0:
106 | try:
107 | # sensor_infos = get_single_notifier().get_sensor_infos(**skargs)
108 | message = "Failed to submit url to sensors: {}".format(url)
109 |
110 | token = get_single_notifier().server_secret_key
111 | if len(sensor_infos) > 1 and not do_sensor_id_custom_ip:
112 | for si in sensor_infos:
113 | payload = base_payload.copy()
114 | payload[SENSOR_IP] = si.sensor_ip
115 | payload[SENSOR_ID] = si.sensor_id
116 | payload[TOKEN] = token
117 | sensor_id = si.sensor_id
118 | sensor_ip = si.sensor_ip
119 | all_payloads[SENSOR_IP].append(sensor_ip)
120 | all_payloads[SENSOR_ID].append(sensor_id)
121 | all_payloads[TOKEN].append(si.token)
122 | CA_LOGGER.info("Submitted request to {} {} for url: {}".format(sensor_id, sensor_ip, url))
123 | await CommandHandler.submit_remote_web_request_cmd(si.sensor_id, si.sensor_ip, DEFAULT_HP_LPORT, token, payload)
124 |
125 | message = "Submitted request {} for url: {}".format(len(sensor_infos), url)
126 | elif len(sensor_infos) == 1 or do_sensor_id_custom_ip:
127 | token = get_single_notifier().server_secret_key
128 | if sensor_id is not None:
129 | si = [i for i in sensor_infos if sensor_id == i.sensor_id][0]
130 | else:
131 | si = sensor_infos[0]
132 |
133 | sensor_id = si.sensor_id if sensor_id is None else sensor_id
134 | sensor_ip = si.sensor_ip if sensor_ip is None else sensor_ip
135 | payload = base_payload.copy()
136 | payload[SENSOR_IP] = sensor_ip
137 | payload[SENSOR_ID] = sensor_id
138 | payload[TOKEN] = token
139 | all_payloads[SENSOR_IP].append(sensor_ip)
140 | all_payloads[SENSOR_ID].append(sensor_id)
141 | all_payloads[TOKEN].append(si.token)
142 | CA_LOGGER.info("Submitted request to {} {} for url: {}".format(si.sensor_id, si.sensor_ip, url))
143 | await CommandHandler.submit_remote_web_request_cmd(sensor_id, sensor_ip, DEFAULT_HP_LPORT, token, payload)
144 | message = "Submitted request to {}({}) for url: {}".format(sensor_id, sensor_ip, url)
145 | await request.data
146 | return Response(str(json.dumps(all_payloads, indent=4, sort_keys=True)), status=200)
147 | except:
148 | all_payloads['message'] = "failed to create request"
149 | all_payloads['exception'] = traceback.format_exc()
150 | return Response(str(json.dumps(all_payloads, indent=4, sort_keys=True)), status=500)
151 | return Response('', status=200)
152 |
153 | async def handle_events():
154 | events = None
155 | try:
156 | payload = json.loads(await request.data)
157 | events = payload.get(EVENTS, None)
158 | token = payload.get(TOKEN, None)
159 | sensor_ip = payload.get(SENSOR_IP, None)
160 | sensor_id = payload.get(SENSOR_ID, None)
161 | dt = payload.get(DATETIME, None)
162 | now = get_iso_time()
163 |
164 | if sensor_id is None or sensor_ip is None or token is None:
165 | return Response('', status=200)
166 | elif events is None or len(events) == 0:
167 | return Response('', status=400)
168 |
169 | CA_LOGGER.info("Recv'd {} events from {} ({}), initted: {}".format(len(events), sensor_id, sensor_ip, notifier_initted()))
170 | if notifier_initted() and not await get_single_notifier().is_valid(token):
171 | return Response('', status=403)
172 |
173 | if notifier_initted():
174 | await get_single_notifier().touch_token(token, now)
175 | await get_single_notifier().collector_notify(sensor_id, sensor_ip, token, dt, now, events)
176 | CA_LOGGER.debug("Logged {} events from {} ({})".format(len(events), sensor_id, sensor_ip))
177 | except:
178 | return Response('', status=500)
179 | return Response('', status=200)
180 |
181 |
182 | async def handle_register():
183 | events = None
184 | try:
185 | payload = json.loads(await request.data)
186 | events = payload.get(EVENTS, None)
187 | token = payload.get(TOKEN, None)
188 | sensor_ip = payload.get(SENSOR_IP, None)
189 | sensor_id = payload.get(SENSOR_ID, None)
190 | dt = payload.get(DATETIME, None)
191 | now = get_iso_time()
192 | if sensor_id is None or sensor_ip is None or token is None or dt is None:
193 | return Response('', status=200)
194 |
195 | if notifier_initted() and not await get_single_notifier().is_valid(token):
196 | return Response('', status=403)
197 |
198 | CA_LOGGER.info("Recv'd registration from {} ({})".format(sensor_id, sensor_ip))
199 | if notifier_initted():
200 | await get_single_notifier().touch_token(token, now)
201 | await get_single_notifier().register_sensor(sensor_id, sensor_ip, token, dt, now)
202 | except:
203 | traceback.print_exc()
204 | return Response('', status=500)
205 | return Response('', status=200)
206 |
207 |
208 | async def create_response_file_data(filename, sensor_result):
209 | response_info = sensor_result.response_info
210 | b64data = response_info.get(CONTENT, None)
211 | if b64data is None:
212 | data = b''
213 | else:
214 | data = base64.b64decode(b64data)
215 |
216 | response = await make_response(data)
217 | response.headers['Content-Type'] = "application/zip"
218 | response.headers['Content-Disposition'] = "inline; filename=" + filename
219 | return response
220 |
221 | async def handle_get_event(token, event_id):
222 | result_id = event_id
223 | try:
224 | CA_LOGGER.info("Recv'd get_event request".format())
225 |
226 | if notifier_initted() and await get_single_notifier().is_valid(token):
227 | result = await get_single_notifier().get_event(result_id)
228 | data = {}
229 | data['sensor_id'] = result.sensor_id
230 | data['sensor_ip'] = result.sensor_ip
231 | data['src_ip'] = result.src_ip
232 | data['src_port'] = result.src_port
233 | data['dst_ip'] = result.dst_ip
234 | data['dst_port'] = result.dst_port
235 | data['created_at'] = result.created_at
236 | data['rtype'] = result.rtype
237 | data['response'] = result.response
238 | data['request_data'] = result.request_data
239 | data['api'] = result.api
240 | data['sent'] = result.sent
241 | data['event_id'] = result.event_id
242 | return jsonify(data)
243 | except:
244 | CA_LOGGER.info("Download ID:{}, exception: {}".format(result_id, traceback.format_exc()))
245 | return Response('', status=500)
246 | return Response('', status=500)
247 |
248 | async def handle_summary_downloads(result_id):
249 | result_id = result_id
250 | events = None
251 | try:
252 | CA_LOGGER.info("Recv'd download request".format())
253 |
254 | if notifier_initted():
255 | result = await get_single_notifier().get_request_result(result_id)
256 | data = {}
257 | data['sensor_id'] = result.sensor_id
258 | data['sensor_ip'] = result.sensor_ip
259 | data['created_at'] = result.created_at
260 | data['received_at'] = result.received_at
261 | data['response_info'] = result.response_info
262 | data['request_parameters'] = result.request_parameters
263 | data['result_id'] = result.result_id
264 | return jsonify(data)
265 | except:
266 | CA_LOGGER.info("Download ID:{}, exception: {}".format(result_id, traceback.format_exc()))
267 | return Response('', status=500)
268 | return Response('', status=500)
269 |
270 | async def handle_file_downloads(result_id):
271 | result_id = result_id
272 | try:
273 | CA_LOGGER.info("Recv'd download request".format())
274 |
275 | if notifier_initted():
276 | result = await get_single_notifier().get_request_result(result_id)
277 | if result:
278 | response = await create_response_file_data('content.zip', result)
279 | return response
280 |
281 | raise Exception("result_id did not take, {} ".format(result_id))
282 | except:
283 | msg = "Download ID:{}, exception: {}".format(result_id, traceback.format_exc())
284 | CA_LOGGER.info(msg)
285 | return Response(msg, status=500)
286 | return Response('failed to find linke', status=500)
287 |
288 |
289 | async def handle_new_token():
290 | events = None
291 | try:
292 | payload = json.loads(await request.data)
293 | token = payload.get(TOKEN, None)
294 | email = payload.get(EMAIL, None)
295 | name = payload.get(NAME, None)
296 | description = payload.get(DESCRIPTION, None)
297 | is_admin = payload.get(IS_ADMIN, False)
298 | if token is None or email is None or name is None or description is None:
299 | return Response('', status=200)
300 |
301 | CA_LOGGER.info("Recv'd registration from {} ({})".format(sensor_id, sensor_ip))
302 |
303 | if notifier_initted() and not get_single_notifier().is_admin(token):
304 | return Response('', status=403)
305 |
306 | if notifier_initted():
307 | await get_single_notifier().touch_token(token, now)
308 | token_info = await get_single_notifier().new_token(token, email=email, name=name, description=description, is_admin=is_admin)
309 | return jsonify(token=token_info.token)
310 | except:
311 | return Response('', status=500)
312 | return Response('', status=500)
313 |
314 |
315 | async def handle_ping():
316 | events = None
317 | try:
318 | payload = json.loads(await request.data)
319 | events = payload.get(EVENTS, None)
320 | token = payload.get(TOKEN, None)
321 | sensor_ip = payload.get(SENSOR_IP, None)
322 | sensor_id = payload.get(SENSOR_ID, None)
323 | dt = payload.get(DATETIME, None)
324 | now = get_iso_time()
325 | CA_LOGGER.info("Recv'd ping from {} ({})".format(sensor_id, sensor_ip))
326 | if sensor_id is None or sensor_ip is None or token is None or dt is None:
327 | return Response('', status=200)
328 |
329 | if notifier_initted() and not await get_single_notifier().is_valid(token):
330 | return Response('', status=403)
331 |
332 | if notifier_initted():
333 | await get_single_notifier().touch_token(token, now)
334 | await get_single_notifier().ping_sensor(sensor_id, sensor_ip, token, dt, now)
335 | except:
336 | traceback.print_exc()
337 | return Response('', status=500)
338 | return Response('', status=200)
339 |
340 |
341 | async def handle_remote_command_responses():
342 | events = None
343 | try:
344 | payload = json.loads(await request.data)
345 | events = payload.get(EVENTS, None)
346 | token = payload.get(TOKEN, None)
347 | sensor_ip = payload.get(SENSOR_IP, None)
348 | sensor_id = payload.get(SENSOR_ID, None)
349 | dt = payload.get(DATETIME, None)
350 | now = get_iso_time()
351 |
352 | if sensor_id is None or sensor_ip is None or token is None or dt is None:
353 | CA_LOGGER.info("Failed to add results remote command results from {} ({})".format(sensor_id, sensor_ip))
354 | return Response('', status=200)
355 | CA_LOGGER.info("Recv'd remote command results from {} ({})".format(sensor_id, sensor_ip))
356 | if get_single_notifier() is None:
357 | CA_LOGGER.info("No notifer present from {} ({})".format(sensor_id, sensor_ip))
358 | if notifier_initted() and not await get_single_notifier().is_valid(token):
359 | return Response('', status=403)
360 |
361 | if notifier_initted():
362 | CA_LOGGER.info("Adding response results from {} ({})".format(sensor_id, sensor_ip))
363 | # print(str(json.dumps(payload, indent=4, sort_keys=True)))
364 | # await get_single_notifier().touch_token(token, now)
365 | try:
366 | await get_single_notifier().requests_sensor(sensor_id, sensor_ip, token, dt, now, payload)
367 | except:
368 | CA_LOGGER.info(traceback.format_exc())
369 | except:
370 |
371 | return Response('', status=500)
372 | return Response('', status=200)
373 |
374 |
375 |
376 |
--------------------------------------------------------------------------------
/src/docker_honey/commands.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | from .consts import *
21 | from .util import *
22 | import json
23 | import zipfile
24 | import asyncio
25 | import io
26 | import traceback
27 | import base64
28 | from .simple_commands.util import *
29 |
30 | class CommandHandler(object):
31 | LOGGER = get_stream_logger(__name__ + '.CommandHandler')
32 | @classmethod
33 | def execute_post(cls, collector_url, payload, verify=False):
34 | rsp = None
35 | host = collector_url.split("://")[1].split("/")[0]
36 | try:
37 | rsp = requests.post(collector_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'},
38 | verify=verify, timeout=3.001)
39 | except:
40 | cls.LOGGER.info("Failed to connect to {}.".format(host))
41 |
42 | finally:
43 | if rsp:
44 | cls.LOGGER.info("Connected to {} with response:{}.".format(host, rsp.status_code))
45 | if rsp and rsp.status_code == 200:
46 | return True
47 | return False
48 |
49 | @classmethod
50 | async def handle_commands(cls, **kargs):
51 |
52 | if COMMAND not in kargs:
53 | cls.LOGGER.info("No command specified, returning.".format())
54 | return None
55 | elif kargs[COMMAND] == REMOTE_WEB_REQUEST_CMD:
56 | cls.LOGGER.info("Handling '{}'.".format(REMOTE_WEB_REQUEST_CMD))
57 | response = await cls.perform_web_request(**kargs.get(REQUEST_PARAMETERS, {}))
58 | response[COMMAND] = REMOTE_WEB_REQUEST_CMD
59 | cls.LOGGER.info("Completed '{}'.".format(REMOTE_WEB_REQUEST_CMD))
60 | return response
61 | return None
62 |
63 | @classmethod
64 | async def submit_remote_web_request_cmd(cls, sensor_id, sensor_ip, port, token, payload):
65 | url = "https://{}:{}".format(sensor_ip, port) + HP_COMMAND_ENDPOINT
66 | cls.LOGGER.info("Submitting remote web request to {}:{}.".format(sensor_id, url))
67 | payload[TOKEN] = token
68 | payload[SENSOR_ID] = sensor_id
69 | payload[SENSOR_IP] = sensor_ip
70 | payload[COMMAND] = REMOTE_WEB_REQUEST_CMD
71 | payload[DATETIME] = get_iso_time()
72 | # cls.LOGGER.info("Submitting payload:\n {}\n".format(payload))
73 | return cls.execute_post(url, payload)
74 |
75 |
76 | @classmethod
77 | def build_perform_web_request_payload(cls, url, user_agent=DEFAULT_USER_AGENT, parameters=None, data_payload=None, json_payload=None,
78 | headers=None, method=GET, **kargs):
79 | if headers is None:
80 | headers = {}
81 |
82 | if method is None:
83 | method = GET
84 |
85 | if (data_payload or json_payload) and method == GET:
86 | method = POST
87 |
88 | request_parameters = {
89 | HEADERS: headers,
90 | METHOD: method,
91 | PARAMETERS: parameters,
92 | DATA_PAYLOAD: data_payload,
93 | JSON_PAYLOAD: json_payload,
94 | URL: url,
95 | USER_AGENT: user_agent,
96 | }
97 |
98 | return {REQUEST_PARAMETERS: request_parameters, COMMAND: REMOTE_WEB_REQUEST_CMD}
99 |
100 | @classmethod
101 | async def perform_web_request(cls, **kargs):
102 | user_agent = kargs.get(USER_AGENT, DEFAULT_USER_AGENT)
103 | parameters = kargs.get(PARAMETERS, None)
104 | json_payload = kargs.get(JSON_PAYLOAD, None)
105 | data_payload = kargs.get(DATA_PAYLOAD, None)
106 | headers = kargs.get(HEADERS, {})
107 | method = kargs.get(METHOD, GET)
108 | url = kargs.get(URL, None)
109 | cls.LOGGER.info("Submitting {} request for url {}.".format(method, url))
110 | req_meth = requests.get
111 | if method == GET:
112 | pass
113 | elif method == POST:
114 | req_meth = requests.post
115 | elif method == PUT:
116 | req_meth = requests.post
117 |
118 | rsp = None
119 | response_info = {
120 | STATUS_CODE: -1,
121 | HISTORY: [],
122 | URL: url,
123 | CONTENT: None,
124 | HEADERS: None,
125 | DATETIME: get_iso_time(),
126 | CONTENT: None,
127 | CONTENT_ENCODING: None,
128 | CONTENT_TYPE: None,
129 | }
130 | if headers is None:
131 | headers = {USER_AGENT_HEADER:user_agent}
132 | if USER_AGENT_HEADER not in headers:
133 | headers[USER_AGENT_HEADER] = user_agent
134 | try:
135 | rsp = req_meth(url, json=json_payload,
136 | data=data_payload,
137 | params=parameters,
138 | headers=headers,
139 | verify=False)
140 | data = b''
141 | response_info = {
142 | STATUS_CODE: rsp.status_code,
143 | HISTORY: [] if rsp.history is None else [{STATUS_CODE:i.status_code, URL:i.url} for i in rsp.history],
144 | URL: rsp.request.url,
145 | CONTENT: data,
146 | HEADERS: list(rsp.headers.items()),
147 | DATETIME: get_iso_time(),
148 | "client_headers": headers,
149 | }
150 |
151 | if rsp.content and len(rsp.content) > 0:
152 | memzf = io.BytesIO()
153 | zf = zipfile.ZipFile(memzf, "a", zipfile.ZIP_DEFLATED, False)
154 | summary = ''
155 | summary = "[Request Details]\n" + "\n".join(['{}: {}'.format(k, v) for k,v in kargs.items()])
156 | summary = summary + "\n[History]\n"
157 | summary = summary + "\n".join(["{} {}".format(i.status_code, i.history) for i in rsp.history])
158 | summary = summary + "\n[Content]\n"
159 | summary = summary + 'Length: {}'.format(len(rsp.content))
160 | summary = summary + '\n[Response Headers]\n' + "\n".join(['{}: {}'.format(k, v) for k,v in rsp.headers.items()])
161 | zf.writestr('summary.txt', summary.encode('ascii'))
162 | zf.writestr('content.bin', rsp.content)
163 | zf.close()
164 | memzf.seek(0)
165 | data = base64.b64encode(memzf.read())
166 | response_info[CONTENT] = data.decode('ascii')
167 | response_info[CONTENT_ENCODING] = BASE64
168 | response_info[CONTENT_TYPE] = CONTENT_TYPE_ZIP
169 | except:
170 | cls.LOGGER.info("Request failed {} request for url {}.".format(method, url))
171 | cls.LOGGER.info("Request failed {} request for url {}.".format(method, url))
172 | memzf = io.BytesIO()
173 | zf = zipfile.ZipFile(memzf, "a", zipfile.ZIP_DEFLATED, False)
174 | summary = 'Failed to connect: {}\n{}'.format(url, traceback.format_exc())
175 | zf.writestr('summary.txt', summary.encode('ascii'))
176 | zf.writestr('content.bin', b'')
177 | zf.close()
178 | memzf.seek(0)
179 | data = base64.b64encode(memzf.read())
180 | response_info[CONTENT] = data.decode('ascii')
181 | response_info[CONTENT_ENCODING] = BASE64
182 | response_info[CONTENT_TYPE] = CONTENT_TYPE_ZIP
183 |
184 |
185 | cls.LOGGER.info("Request completed {} request for url {} with status_code: {}.".format(method, url, response_info[STATUS_CODE]))
186 | results = {RESPONSE_INFO: response_info, REQUEST_PARAMETERS: kargs, COMMAND: REMOTE_WEB_REQUEST_CMD}
187 | return results
188 |
--------------------------------------------------------------------------------
/src/docker_honey/consts.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | import regex
21 | import regex
22 | import netifaces
23 |
24 | import requests
25 | from urllib3.exceptions import InsecureRequestWarning
26 | requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
27 |
28 | DEFAULT_SENSOR_NAME = 'docker_honyepot'
29 |
30 | DEFAULT_SENSOR_NAME = 'docker_honeypot'
31 | SENSOR_EXT_IP = None
32 | try:
33 | SENSOR_EXT_IP = requests.get("https://api.ipify.org/?format=json").json()['ip']
34 | except:
35 | gws = netifaces.gateways()
36 | dft = gws.get('default', {})
37 | g = sorted(dft.items(), key=lambda k: k[0])
38 | if len(g) > 0:
39 | SENSOR_EXT_IP = g[0][1][0]
40 |
41 |
42 | GLOBAL_NOTIFIER = None
43 | GLOBAL_APP = None
44 |
45 | USING_EMAIL = False
46 | DOCKER_HP_EMAIL = 'no-reply@docker-honeypot.localhost'
47 | EMAIL_KARGS = {
48 | "username": None,
49 | "password": None,
50 | "server": None,
51 | "port": None,
52 | "cc_list": None,
53 | "subject": None,
54 | }
55 |
56 | USING_SLACK = False
57 | SLACK_KARGS = {
58 | "channel": None,
59 | "username": 'docker_honyepot',
60 | "webhook": None,
61 | "icon_emoji": ":suspect:",
62 | }
63 |
64 | USING_WBX_TEAMS = False
65 | WBX_TEAMS_WEBHOOK = None
66 |
67 |
68 | SLACK_WEBHOOK_PAYLOAD = {
69 | "channel": None,
70 | "username": 'docker_honyepot',
71 | "text": None,
72 | "icon_emoji": ":suspect:",
73 | }
74 |
75 | USING_HTTP = False
76 | HTTP_VERIFY_SSL = False
77 | HTTP_TOKEN = None
78 | HTTP_CLIENT_CRT = None
79 | HTTP_CLIENT_KEY = None
80 | TOKEN = 'token'
81 | EVENTS = 'events'
82 | SENSOR_IP = 'sensor_ip'
83 | SENSOR_ID = 'sensor_id'
84 |
85 |
86 |
87 | USING_MONGO = False
88 | #MAX_DATA = 2000000000
89 | MAX_DATA = 200000 # smaller machines wont work with large buffer.
90 | PORTS = [2375, 2376, 2377, 4243, 4244]
91 | API = '1.16'
92 |
93 | KEEP_WORKING = False
94 | ERROR_MESSAGE = 'server error'
95 | DEFAULT_SUBJECT = "[DOCKERPOT] Create Attempted {src_host} to {dst_host}"
96 |
97 | DATABASE = 'docker_honeypot'
98 | REQUESTS_COLLECTION = 'connections'
99 | COMMANDS_COLLECTION = 'commands'
100 | IMAGES_COLLECTION = 'images'
101 |
102 | PING_RE = rb'^HEAD \/_ping HTTP\/1\.1.*'
103 | GET_RE = rb'^GET .*'
104 | GET_VERSION_RE = rb'^GET \/(?v[0-9]+\.[0-9]+)\/version.*'
105 | CREATE_RE = rb'^POST \/(?v[0-9]+\.[0-9]+)\/containers\/create.*'
106 | CREATE_IMAGE_RE = rb"^POST \/(?v[0-9]+\.[0-9]+)\/create\?.*"
107 | ATTACH_RE = rb'^POST \/(?v[0-9]+\.[0-9]+)\/containers\/[0-9a-f]+\/attach.*'
108 | WAIT_RE = rb"^POST \/(?v[0-9]+\.[0-9]+)\/containers\/[0-9a-f]+\/wait\?condition=removed.*"
109 | START_RE = rb'^POST \/(?v[0-9]+\.[0-9]+)\/containers\/[0-9a-f]+\/start.*'
110 | INFO_RE = rb'^GET \/(?v[0-9]+\.[0-9]+)\/info HTTP/1.1'
111 |
112 |
113 | GET_RETURN = b'''HTTP/1.1 200 OK\r\nApi-Version: {api}\r\nCache-Control: no-cache, no-store, must-revalidate\r\nContent-Type: text/plain; charset=utf-8\r\nDocker-Experimental: false\r\nOstype: linux\r\nPragma: no-cache\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\n\r\n'''
114 | PING_RETURN = b'''HTTP/1.1 200 OK\r\nApi-Version: {api}\r\nCache-Control: no-cache, no-store, must-revalidate\r\nContent-Length: {size}\r\nContent-Type: text/plain; charset=utf-8\r\nDocker-Experimental: false\r\nOstype: linux\r\nPragma: no-cache\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\n\r\n'''
115 | CREATE_RETURN = b'''HTTP/1.1 201 Created\r\nApi-Version: {api}\r\nContent-Type: application/json\r\nDocker-Experimental: false\r\nOstype: linux\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\nContent-Length: 88\r\n\r\n{{"Id":"{docker_id}","Warnings":[]}}\r\n\r\n'''
116 | CREATE_IMAGE_RETURN = b'''HTTP/1.1 200 Created\r\nApi-Version: {api}\r\nContent-Type: application/json\r\nDocker-Experimental: false\r\nOstype: linux\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\nTransfer-Encoding: chunked\r\n0\r\n'''
117 | WAIT_RETURN = b'''HTTP/1.1 200 OK\r\nApi-Version: {api}\r\nContent-Type: application/json\r\nDocker-Experimental: false\r\nOstype: linux\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\nContent-Length: {size}\r\n\r\n{data}\r\n\r\n'''
118 | ATTACH_RETURN = b'''HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n'''
119 | ERROR_RETURN = b'''HTTP/1.1 500 Internal Server Error\r\nApi-Version: {api}\r\nContent-Type: application/json\r\nDocker-Experimental: false\r\nOstype: linux\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\nContent-Length: {size}\r\n\r\n'''
120 | ERROR_DATA = {"message":"server error"}
121 |
122 | GET_VERSION_RETURN = b'''HTTP/1.1 200 OK\r\nApi-Version: {api}\r\nCache-Control: no-cache, no-store, must-revalidate\r\nContent-Length: {size}\r\nContent-Type: application/json; charset=utf-8\r\nDocker-Experimental: false\r\nOstype: linux\r\nPragma: no-cache\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\n\r\n'''
123 | GET_VERSION_DATA = {"Platform":{"Name":""},"Components":[{"Name":"Engine","Version":"16.03.8","Details":{"ApiVersion":"1.16","Arch":"amd64","BuildTime":"2015-01-18T21:26:54.000000000+00:00","Experimental":"false","GitCommit":"","GoVersion":"go1.0.8","KernelVersion":"2.4.0-42-generic","MinAPIVersion":"1.12","Os":"linux"}},{"Name":"containerd","Version":"1.0.0-0ubuntu2","Details":{"GitCommit":""}},{"Name":"runc","Version":"spec: 0.0.1-dev","Details":{"GitCommit":""}},{"Name":"docker-init","Version":"0.14.0","Details":{"GitCommit":""}}],"Version":"16.03.8","ApiVersion":"1.12","MinAPIVersion":"1.12","GitCommit":"","GoVersion":"go1.0.0","Os":"linux","Arch":"amd64","KernelVersion":"2.4.0-42-generic","BuildTime":"2015-01-18T21:26:54.000000000+00:00"}
124 |
125 |
126 | INFO_RETURN = b'''HTTP/1.1 200 OK\r\nApi-Version: {api}\r\nContent-Type: application/json\r\nDocker-Experimental: false\r\nOstype: linux\r\nServer: Docker/16.03.8 (linux)\r\nDate: {date}\r\nContent-Length: {size}\r\n\r\n'''
127 | INFO_DATA = {"ID":"","Containers":0,"ContainersRunning":1,"ContainersPaused":0,"ContainersStopped":9,"Images":6,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","141"],["Dirperm1 Supported","true"]],"SystemStatus":None,"Plugins":{"Volume":["local"],"Network":["bridge","host","ipvlan","macvlan","null","overlay"],"Authorization":None,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","local","logentries","splunk","syslog"]},"MemoryLimit":True,"SwapLimit":False,"KernelMemory":True,"KernelMemoryTCP":True,"CpuCfsPeriod":True,"CpuCfsQuota":True,"CPUShares":True,"CPUSet":True,"PidsLimit":True,"IPv4Forwarding":True,"BridgeNfIptables":True,"BridgeNfIp6tables":True,"Debug":False,"NFd":30,"OomKillDisable":True,"NGoroutines":41,"SystemTime":"{iso_date}","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"5.4.0-42-generic","OperatingSystem":"Ubuntu 20.04 LTS","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":[],"AllowNondistributableArtifactsHostnames":[],"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":[],"Secure":True,"Official":True}},"Mirrors":[]},"NCPU":8,"MemTotal":33523802112,"GenericResources":None,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"mr-reimagined","Labels":[],"ExperimentalBuild":False,"ServerVersion":"16.03.8","ClusterStore":"","ClusterAdvertise":"","Runtimes":{"runc":{"path":"runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":False,"Error":"","RemoteManagers":None},"LiveRestoreEnabled":False,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"","Expected":""},"RuncCommit":{"ID":"","Expected":""},"InitCommit":{"ID":"","Expected":""},"SecurityOptions":[],"Warnings":[]}
128 |
129 | UNKNOWN_RETURN = b''
130 |
131 | HAS_API = ['PING', 'GET_VERSION', 'CREATE', 'CREATE_IMAGE', 'WAIT', 'ATTACH', 'INFO', 'START']
132 |
133 | WAIT_RETURN_DATA = b'{"Error":{random_string},"StatusCode":{random_string}}'
134 |
135 |
136 | PING = 'PING'
137 | GET = 'GET'
138 | CREATE = 'CREATE'
139 | CREATE_IMAGE = 'CREATE_IMAGE'
140 | WAIT = 'WAIT'
141 | ATTACH = 'ATTACH'
142 | INFO = 'INFO'
143 | START = 'START'
144 | GET_VERSION = 'GET_VERSION'
145 | ERROR = 'ERROR'
146 | UNKNOWN = 'UNKNOWN'
147 |
148 | IDENTIFY = {
149 | PING: regex.compile(PING_RE),
150 | GET_VERSION: regex.compile(GET_VERSION_RE),
151 | GET: regex.compile(GET_RE),
152 | CREATE: regex.compile(CREATE_RE),
153 | CREATE_IMAGE: regex.compile(CREATE_IMAGE_RE),
154 | WAIT: regex.compile(WAIT_RE),
155 | ATTACH: regex.compile(ATTACH_RE),
156 | INFO: regex.compile(INFO_RE),
157 | START: regex.compile(START_RE),
158 |
159 | }
160 |
161 | RESPONSES = {
162 | PING: PING_RETURN,
163 | GET: GET_RETURN,
164 | GET_VERSION: GET_VERSION_RETURN,
165 | CREATE: ERROR_RETURN,
166 | CREATE_IMAGE: ERROR_RETURN,
167 | WAIT: ERROR_RETURN,
168 | ATTACH: ERROR_RETURN,
169 | INFO: ERROR_RETURN,
170 | START: ERROR_RETURN,
171 | UNKNOWN: UNKNOWN_RETURN,
172 | }
173 |
174 | DEFAULT_HTTP_PORT = 9443
175 | DATETIME = 'datetime'
176 | REGISTERED = 'registered'
177 | REGISTER_PATH = '/register'
178 | PING_PATH = '/ping'
179 |
180 |
181 | EMAIL = "email"
182 | NAME = "name"
183 | DESCRIPTION = "description"
184 | IS_ADMIN = "is_admin"
185 | FIRSTIES_TOKEN = 'there_can_be_only_one'
186 |
187 | HONEYPOT_TOKEN = "honeypot_user"
188 | HONEYPOT_DESCRIPTION = "a generic honeypot token for collection"
189 | ALLOWED_TOKEN = 'collector_token'
190 |
191 | USER_AGENT = 'user_agent'
192 | HEADERS = 'headers'
193 | JSON_PAYLOAD = 'json_payload'
194 | DATA_PAYLOAD = 'data_payload'
195 | PARAMETER_PAYLOAD = 'parameter_payload'
196 | METHOD = 'method'
197 | USER_AGENT_HEADER = 'User-Agent'
198 | DEFAULT_USER_AGENT = 'curl/7.19.4 (i386-redhat-linux-gnu) libcurl/7.19.4 NSS/3.12.2.0 zlib/1.2.3 libidn/0.6.14 libssh2/0.18'
199 | GET = 'get'
200 | POST = 'post'
201 | PUT = 'put'
202 |
203 | DEFAULT_COLLECTOR_PORT = 5000
204 | DEFAULT_COLLECTOR_PORT_ALT = 5001
205 |
206 | COMMAND = 'command'
207 | COMMAND_PERFORM_WEBREQ = "perform_web_request"
208 | REQUEST_PARAMETERS = 'request_parameters'
209 | RESPONSE_INFO = 'response_info'
210 |
211 | EVENTS_ENDPOINT = '/events'
212 | REGISTER_ENDPOINT = '/register'
213 | DOWNLOAD_ENDPOINT = '/download_request/'
214 | DOWNLOAD_LINK = "https://{host}:{port}/download_request/{result_id}"
215 | SUMMARY_ENDPOINT = "/summary/"
216 | SUMMARY_LINK = "https://{host}:{port}/summary/{result_id}"
217 |
218 | EVENT_ENDPOINT = "/event//"
219 | EVENT_LINK = "https://{host}:{port}/event/{token}/{event_id}"
220 | EVENT_ID = 'event_id'
221 |
222 | NEW_TOKEN_ENDPOINT = '/new_token'
223 | PING_ENDPOINT = '/ping'
224 | REQUESTS_ENDPOINT = '/requests'
225 | COMMANDS_ENDPOINT = '/commands'
226 | COMMANDS_RESPONSE_ENDPOINT = '/commands_response'
227 | REMOTE_REQUEST_ENDPOINT = '/remote_web_request'
228 |
229 | DEFAULT_HP_LADDR = '0.0.0.0'
230 | DEFAULT_HP_LPORT = 61023
231 | HP_COMMAND_ENDPOINT = '/commands'
232 | GLOBAL_HOSTNAME = 'global_hostname'
233 | GLOBAL_PORT = 'global_port'
234 | CONFIG = 'config'
235 | PARAMETERS = 'parameters'
236 |
237 | DEFAULT_COLLECTOR_ADDR = '127.0.0.1'
238 | DEFAULT_COLLECTOR_PORT = 5000
239 | COLLECTOR_HTTP_DEFAULTS = {
240 | "collector": False,
241 | "collector_host": DEFAULT_COLLECTOR_ADDR,
242 | "collector_port": 5000,
243 | "collector_verify_ssl": False,
244 | "collector_crt": "./ssl/collector-cert.pem",
245 | "collector_key": "./ssl/collector-key.pem",
246 | "collector_ca": "./ssl/ca-dockerhp-collector.crt",
247 | "collector_ca_name": "ca-dockerhp-collector",
248 | "collector_common_name": "dockerhp-collector",
249 | "collector_url_fmt": "https://{collector_host}:{collector_port}",
250 | "collector_token": None,
251 | "collector_alt_host": None,
252 | "collector_alt_port": None,
253 | }
254 |
255 | DOCKERHP_HTTP_DEFAULTS = {
256 | "dockerhp_listen": False,
257 | "dockerhp_host": DEFAULT_HP_LADDR,
258 | "dockerhp_port": DEFAULT_HP_LPORT,
259 | "dockerhp_verify_ssl": False,
260 | "dockerhp_crt": "./ssl/dockerhp-cert.pem",
261 | "dockerhp_key": "./ssl/dockerhp-key.pem",
262 | "dockerhp_ca": "./ssl/ca-dockerhp-collector.crt",
263 | "dockerhp_ca_name": "ca-dockerhp-collector",
264 | "dockerhp_common_name": "dockerhp",
265 | "dockerhp_url_fmt": "https://{dockerhp_host}:{dockerhp_port}",
266 | }
267 |
268 | GLOBAL_CONFIGS = {
269 | "server_secret_key": None,
270 | "global_hostname": None,
271 | "global_port": None,
272 | "certs_path": None,
273 | "error_message": ERROR_MESSAGE,
274 | }
275 |
276 | MONGO_DEFAULTS = {
277 | "mongo": False,
278 | "mongo_db": "docker_honeypot",
279 | "mongo_ssl": True,
280 | "mongo_host": "fill_in_mongo_name_or_ip",
281 | "mongo_port": 27017,
282 | "mongo_user": "mongo_user",
283 | "mongo_pass": "fill_in_mongo_password_for_access",
284 | }
285 |
286 | SLACK_DEFAULTS = {
287 | "slack": False,
288 | "slack_channel": "#tw-threat-intel",
289 | "slack_username": "docker-hp",
290 | "slack_webhook": None,
291 | "slack_emoticon": ":suspect:"
292 | }
293 |
294 | WBX_DEFAULTS = {
295 | "wbx": False,
296 | "wbx_webhook": None,
297 | }
298 |
299 | FAKE_COMMON_NAME = 'g00gle-com.info'
300 |
301 | HYPERCORN_CONFIGURATION = "hypercorn --bind '{host}:{port}' --keyfile {certs_path}/{keyfile} --certfile {certs_path}/{certfile} --ca-certs {certs_path}/{ca_certfile} {exec_path}:{app}"
302 |
303 |
304 |
305 | DOCKERHP_SG_NAME = 'docker-honeypot'
306 | DOCKERHP_SECURITY_GROUPS = [DOCKERHP_SG_NAME]
307 | DOCKERHP_SG_DESCRIPTION = 'docker-honeypot security group'
308 | DOCKERHP_IN_IP_PERMISSIONS = [
309 | {'FromPort': 4240,
310 | 'IpProtocol': 'tcp',
311 | 'IpRanges': [{'CidrIp': '0.0.0.0/0',
312 | 'Description': 'Inbound Docker Honeypot Connections'}],
313 | 'Ipv6Ranges': [],
314 | 'PrefixListIds': [],
315 | 'ToPort': 4245,
316 | 'UserIdGroupPairs': []},
317 | {'FromPort': 22,
318 | 'IpProtocol': 'tcp',
319 | 'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
320 | 'Ipv6Ranges': [],
321 | 'PrefixListIds': [],
322 | 'ToPort': 22,
323 | 'UserIdGroupPairs': []},
324 | {'FromPort': 2375,
325 | 'IpProtocol': 'tcp',
326 | 'IpRanges': [{'CidrIp': '0.0.0.0/0',
327 | 'Description': 'Inbound Docker Honeypot Connections'}],
328 | 'Ipv6Ranges': [],
329 | 'PrefixListIds': [],
330 | 'ToPort': 2380,
331 | 'UserIdGroupPairs': []}
332 | ]
333 |
334 |
335 | DOCKERHP_SG_OUT_IP_PERMISSIONS = [{'IpProtocol': '-1',
336 | 'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
337 | 'Ipv6Ranges': [],
338 | 'PrefixListIds': [],
339 | 'UserIdGroupPairs': []}
340 | ]
341 |
342 | REGION_TO_AMI = {
343 | 'us-east-1':"ami-0bcc094591f354be2",
344 | 'us-east-2':"ami-0bbe28eb2173f6167",
345 | 'us-west-1':"ami-0dd005d3eb03f66e8",
346 | 'us-west-2':"ami-0a634ae95e11c6f91",
347 | 'sa-east-1':"ami-08caf314e5abfbef4",
348 | # 'ap-east-1':"ami-107d3e61",
349 | 'ap-south-1':"ami-02b5fbc2cb28b77b8",
350 | 'ap-southeast-1':"ami-0007cf37783ff7e10",
351 | 'ap-southeast-2':"ami-0f87b0a4eff45d9ce",
352 | 'ap-northeast-1':"ami-01c36f3329957b16a",
353 | 'ap-northeast-2':"ami-05438a9ce08100b25",
354 |
355 | "eu-north-1": "ami-0363142d8c97b94c8",
356 | "eu-central-1": "ami-04932daa2567651e7",
357 | "eu-west-1": "ami-07ee42ba0209b6d77",
358 | "eu-west-2": "ami-04edc9c2bfcf9a772",
359 | "eu-west-3": "ami-03d4fca0a9ced3d1f",
360 |
361 | }
362 |
363 | DEFAULT_REGION = 'us-east-2'
364 | DEFAULT_IMAGE_ID = REGION_TO_AMI[DEFAULT_REGION]
365 | DCS = list(REGION_TO_AMI.keys())
366 |
367 |
368 | DOCKERHP_INSTALL_SYSTEMCTL_COMMANDS = [
369 | 'sudo cp hp_config.json /etc/hp_config.json'
370 | 'sudo cp docker_honeypot.service /lib/systemd/system/',
371 | 'sudo chmod 644 /lib/systemd/system/docker_honeypot.service',
372 | 'sudo systemctl daemon-reload',
373 | 'sudo systemctl enable docker_honeypot.service',
374 | 'sudo systemctl start docker_honeypot.service',
375 | 'sudo systemctl status docker_honeypot.service'
376 |
377 | ]
378 |
379 | DOCKER_SETUP_COMMANDS = [
380 | 'sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl software-properties-common git python3-pip',
381 | 'curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -',
382 | 'sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable"',
383 | 'sudo apt update && sudo apt install -y docker-ce docker-compose',
384 | 'sudo usermod -aG docker ${USER}',
385 | ]
386 |
387 | DOCKERHP_SYSTEMCTL_CONFIG = '''[Unit]
388 | Description=docker-honey pot service
389 | After=syslog.target
390 |
391 | [Service]
392 | Type=simple
393 | User=ubuntu
394 | Group=ubuntu
395 | WorkingDirectory=/home/ubuntu/dhp
396 | ExecStart=/usr/bin/python3 /home/ubuntu/dhp/scripts/docker_honeypot.py -c /etc/hp_config.json
397 |
398 | [Install]
399 | WantedBy=multi-user.target
400 | '''
401 | REMOTE_WEB_REQUEST_CMD = 'remote_web_request'
402 | URL = 'url'
403 | STATUS_CODE = 'status_code'
404 | HISTORY = 'history'
405 | CONTENT = 'content'
406 | CONTENT_ENCODING = 'content_encoding'
407 | CONTENT_TYPE = 'content_type'
408 | BASE64 = 'base64'
409 | CONTENT_TYPE_ZIP = "application/zip"
410 |
411 | WBX_DOWNLOAD_MESSAGE = '''**Downloaded URL:** `{url}` **{sensor_id} ({sensor_ip})**\n
412 | **Content Results:** {download_link}\n
413 | **Summary Link**: {summary_link}\n'''
414 |
415 | SLACK_DOWNLOAD_MESSAGE = '''*Downloaded URL:* `{url}` *{sensor_id} ({sensor_ip})**\n
416 | *Content Results:** {download_link}\n
417 | *Summary Link*: {summary_link}\n'''
418 |
--------------------------------------------------------------------------------
/src/docker_honey/dockerhp_actions.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | from .util import *
21 | from .consts import GLOBAL_NOTIFIER as NOTIFIER
22 | from .consts import *
23 | from .notify import *
24 | from .simple_commands.app import Hypercorn as App
25 | from time import sleep
26 | import asyncio
27 | import argparse
28 | from quart import Quart, jsonify, Response, request
29 | import json
30 | from .simple_commands.util import *
31 |
32 | LOGGER = get_stream_logger(__name__)
33 |
34 | async def handle_remote_commands():
35 | global NOTIFIER
36 | events = None
37 | try:
38 | payload = json.loads(await request.data)
39 | token = payload.get(TOKEN, '')
40 | sensor_ip = payload.get(SENSOR_IP, None)
41 | sensor_id = payload.get(SENSOR_ID, None)
42 | dt = payload.get(DATETIME, None)
43 | now = get_iso_time()
44 | LOGGER.info("Recv'd remote commands requests from {} ({})".format(sensor_id, sensor_ip))
45 | if sensor_id is None or sensor_ip is None or token is None or dt is None:
46 | return Response('', status=200)
47 |
48 | secret_target_token = get_single_notifier().server_secret_key
49 | secret_target_match = secret_target_token == token
50 | LOGGER.info("Authenticated incoming request with 'server_secret_key': sst: {} token{}".format(secret_target_token, token))
51 | if not secret_target_match:
52 | return Response('', status=403)
53 |
54 | LOGGER.info("Authenticated incoming request with 'server_secret_key'")
55 | if notifier_initted():
56 | get_single_notifier().start_process_commands(sensor_id, sensor_ip, token, payload)
57 | except:
58 | traceback.print_exc()
59 | return Response('', status=500)
60 | return Response('', status=200)
--------------------------------------------------------------------------------
/src/docker_honey/mongo_orm.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | from mongoengine import *
21 |
22 |
23 | class RegisteredSensor(Document):
24 | sensor_id = StringField(required=True)
25 | sensor_ip = StringField(required=True)
26 | token = StringField(required=True)
27 | created_at = StringField(required=True)
28 | received_at = StringField(required=True)
29 | last_ping = StringField(required=True)
30 |
31 | class TokenInfo(Document):
32 | creator_token = StringField(required=True)
33 | token = StringField(required=True)
34 | email = StringField(required=True)
35 | name = StringField(required=True)
36 | description = StringField(required=True)
37 | is_admin = BooleanField(required=True)
38 | is_active = BooleanField(required=True)
39 | created_at = StringField(required=True)
40 | modified_at = StringField(required=True)
41 | last_used = StringField(required=True)
42 |
43 |
44 | class PingSensor(Document):
45 | sensor_id = StringField(required=True)
46 | sensor_ip = StringField(required=True)
47 | token = StringField(required=True)
48 | created_at = StringField(required=True)
49 | received_at = StringField(required=True)
50 |
51 |
52 | class GeneralEvent(Document):
53 | sensor_id = StringField(required=True)
54 | sensor_ip = StringField(required=True)
55 | src_ip = StringField(required=True)
56 | src_port = IntField(required=True)
57 | dst_ip = StringField(required=True)
58 | dst_port = IntField(required=True)
59 | created_at = StringField(required=True)
60 | rtype = StringField(required=True)
61 | response = StringField(required=True)
62 | request = StringField(required=True)
63 | request_data = DictField()
64 | api = StringField(required=True)
65 | sent = BooleanField(required=True)
66 | event_id = StringField(required=True)
67 |
68 | class RequestResultEvent(Document):
69 | sensor_id = StringField(required=True)
70 | sensor_ip = StringField(required=True)
71 | created_at = StringField(required=True)
72 | received_at = StringField(required=True)
73 | response_info = DictField()
74 | request_parameters = DictField()
75 | result_id = StringField(required=True)
76 |
77 | class CreateEvent(Document):
78 | src_ip = StringField(required=True)
79 | src_port = IntField(required=True)
80 | dst_ip = StringField(required=True)
81 | dst_port = IntField(required=True)
82 | created_at = StringField(required=True)
83 | command = StringField(required=True)
84 | image = StringField(required=True)
85 | event_id = StringField(required=True)
86 |
--------------------------------------------------------------------------------
/src/docker_honey/notify.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | import asyncio
21 | import ssl
22 | import uuid
23 | import traceback
24 | import requests
25 | import json
26 | import urllib
27 | from .commands import CommandHandler
28 | from .consts import *
29 | from .util import *
30 | from .mongo_orm import *
31 | from mongoengine import connect
32 | from multiprocessing import Process
33 | from .simple_commands.util import *
34 | import logging
35 |
36 | def get_single_notifier(**kargs):
37 | if Notifier.GLOBAL_NOTIFIER is None:
38 | return Notifier(**kargs)
39 | return Notifier.GLOBAL_NOTIFIER
40 |
41 | def notifier_initted():
42 | return not Notifier.GLOBAL_NOTIFIER is None
43 |
44 |
45 | class Notifier(object):
46 | GLOBAL_NOTIFIER = None
47 | PROCESSES = []
48 | LOGGER = get_stream_logger(__name__ + '.Notifier')
49 | def get_base_payload(self, use_secret_key=False):
50 | token = self.collector_kargs['collector_token']
51 | if use_secret_key:
52 | token = self.server_secret_key
53 | return {TOKEN: token,
54 | SENSOR_ID: self.sensor_id,
55 | SENSOR_IP: self.sensor_ip,
56 | DATETIME: get_iso_time(),}
57 |
58 | def __init__(self, sensor_id=DEFAULT_SENSOR_NAME, sensor_ip=SENSOR_EXT_IP,
59 | is_collector=False, log_level=logging.DEBUG, supress_recon=True, **kargs):
60 | reset_logger_level(self.LOGGER, log_level)
61 | self.sensor_id = sensor_id
62 | self.sensor_ip = sensor_ip
63 | self.is_collector = is_collector
64 | self.supress_recon = supress_recon
65 | self.server_secret_key = kargs.get('server_secret_key', None)
66 |
67 | self.slack_kargs = {}
68 | self.collector_kargs = {}
69 | self.mongo_kargs = {}
70 | self.wbx_kargs = {}
71 | self.email_kargs = {}
72 | self.elk_kargs = {}
73 | self.honeypot_tokens = []
74 | self.admin_token = None
75 |
76 |
77 | for k, v in GLOBAL_CONFIGS.items():
78 | setattr(self, k, kargs.get(k, v))
79 |
80 | self.mongo_kargs = {k: kargs.get(k, v) for k,v in MONGO_DEFAULTS.items()}
81 | self.slack_kargs = {k: kargs.get(k, v) for k,v in SLACK_DEFAULTS.items()}
82 | self.wbx_kargs = {k: kargs.get(k, v) for k,v in WBX_DEFAULTS.items()}
83 | self.collector_kargs = {k: kargs.get(k, v) for k,v in COLLECTOR_HTTP_DEFAULTS.items()}
84 | self.dockerhp_kargs = {k: kargs.get(k, v) for k,v in DOCKERHP_HTTP_DEFAULTS.items()}
85 |
86 | self.allowed_token = kargs.get(ALLOWED_TOKEN, None)
87 | Notifier.GLOBAL_NOTIFIER = self
88 | # self.GLOBAL_NOTIFIER = self
89 |
90 | self.collector_kargs['collector_url'] = self.collector_kargs['collector_url_fmt'].format(**self.collector_kargs).strip("/")
91 | self.dockerhp_kargs['dockerhp_url'] = self.dockerhp_kargs['dockerhp_url_fmt'].format(**self.dockerhp_kargs)
92 |
93 | if self.mongo_kargs['mongo']:
94 | self.mongo_kargs['mongo_encode_password'] = urllib.parse.quote(self.mongo_kargs['mongo_pass'])
95 | self.mongo_kargs['mongo_host_uri'] = "mongodb://{mongo_user}:{mongo_encode_password}@{mongo_host}:{mongo_port}/".format(**self.mongo_kargs)
96 | self.mc = connect(self.mongo_kargs['mongo_db'], host=self.mongo_kargs['mongo_host_uri'],
97 | port=self.mongo_kargs['mongo_port'],
98 | # db=self.mongo_kargs['mongo_db'],
99 | username=self.mongo_kargs['mongo_user'],
100 | password=self.mongo_kargs['mongo_pass'],
101 | ssl=self.mongo_kargs['mongo_ssl'],
102 | ssl_cert_reqs=ssl.CERT_NONE,
103 | authentication_source="admin")
104 |
105 | # if self.is_collector:
106 | # self.notify_collector_startup()
107 |
108 | def get_collector_token(self):
109 | return self.collector_kargs.get('collector_token', None)
110 |
111 | def notify_collector_startup(self):
112 |
113 | token = self.admin_token
114 | cnt = 1
115 | _tokes = []
116 | for t in self.honeypot_tokens:
117 | _tokes.append('{}. `{}`'.format(cnt, t))
118 | cnt += 1
119 | h_tokens = '\n'.join(_tokes)
120 |
121 | message = "Collector Startup: Access remote commands: https://{}:{}/remote_web_request\n\n**Admin token:** `{}`\n\n**Honeypot Tokens:**\n{}".format(self.global_hostname, self.global_port, token, h_tokens)
122 | # self.LOGGER.info(message)
123 | if self.wbx_kargs['wbx']:
124 | webhook_url = self.wbx_kargs['wbx_webhook']
125 | requests.post(webhook_url, data=json.dumps({'markdown': message}), headers={'Content-Type': 'application/json'})
126 |
127 | if self.slack_kargs['slack']:
128 | message = "Collector Startup: Access remote commands: https://{}:{}/remote_web_request\n\n*Admin token:* `{}`\n\n*Honeypot Tokens:*\n{}".format(self.global_hostname, self.global_port, token, h_tokens)
129 | webhook_url = self.slack_kargs['slack_webhook']
130 | payload = self.get_slack_kargs()
131 | payload['text'] = message
132 | self.execute_post(webhook_url, payload)
133 | # requests.post(webhook_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'})
134 |
135 | async def collector_notify(self, sensor_id, sensor_ip, token, dt, now, results):
136 | if self.mongo_kargs['mongo']:
137 | try:
138 | await self.ping_sensor(sensor_id, sensor_ip, token, dt, now)
139 | await self.add_mongo_results(results)
140 | except:
141 | self.LOGGER.info("Failed to connect to and log results to mongo.")
142 | self.LOGGER.error("{}".format(traceback.format_exc()))
143 |
144 | if self.slack_kargs['slack']:
145 | try:
146 | await self.send_slack_notifications(results)
147 | except:
148 | self.LOGGER.info("Failed to connect to and log results to slack.")
149 | self.LOGGER.error("{}".format(traceback.format_exc()))
150 |
151 | if self.wbx_kargs['wbx']:
152 | try:
153 | await self.send_wbx_teams_notifications(results)
154 | except:
155 | self.LOGGER.info("Failed to connect to and log results to Webex Teams.")
156 | self.LOGGER.error("{}".format(traceback.format_exc()))
157 |
158 | await self.stdout(results)
159 |
160 | async def notify(self, results):
161 | if self.mongo_kargs['mongo']:
162 | try:
163 | await self.add_mongo_results(results)
164 | except:
165 | self.LOGGER.info("Failed to connect to and log results to mongo.")
166 | self.LOGGER.error("{}".format(traceback.format_exc()))
167 |
168 | if self.slack_kargs['slack']:
169 | try:
170 | await self.send_slack_notifications(results)
171 | except:
172 | self.LOGGER.info("Failed to connect to and log results to slack.")
173 | self.LOGGER.error("{}".format(traceback.format_exc()))
174 |
175 | if self.wbx_kargs['wbx']:
176 | try:
177 | await self.send_wbx_teams_notifications(results)
178 | except:
179 | self.LOGGER.info("Failed to connect to and log results to Webex Teams.")
180 | self.LOGGER.error("{}".format(traceback.format_exc()))
181 |
182 | if self.collector_kargs['collector']:
183 | try:
184 | await self.send_http_notifications(results)
185 | except:
186 | self.LOGGER.info("Failed to connect to and log results to HTTP endpoint.")
187 | self.LOGGER.error("{}".format(traceback.format_exc()))
188 |
189 | await self.stdout(results)
190 |
191 | async def stdout(self, results):
192 | for result in results:
193 | if result['rtype'] == CREATE and result['request_data']:
194 | kargs = result.copy()
195 | r = result['request_data'].get('Cmd', [])
196 | kargs['command'] = " ".join(r)
197 | kargs['image'] = result['request_data'].get('Image', [])
198 | self.LOGGER.info("{src_ip}:{src_port} creating image:{image} '''{command}'''".format(**kargs))
199 |
200 | async def log_register(self, sensor_name, sensor_ip, token):
201 | message = "{} ({}) registered with {}".format(sensor_name, sensor_ip, token)
202 | self.LOGGER.info(message)
203 | if self.wbx_kargs['wbx']:
204 | webhook_url = self.wbx_kargs['wbx_webhook']
205 | requests.post(webhook_url, data=json.dumps({'markdown': message}), headers={'Content-Type': 'application/json'})
206 |
207 | if self.slack_kargs['slack']:
208 | webhook_url = self.slack_kargs['slack_webhook']
209 | payload = self.get_slack_kargs()
210 | payload['text'] = message
211 | requests.post(webhook_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'})
212 |
213 | async def log_ping(self, sensor_name, sensor_ip, token):
214 | message = "{} ({}) pinged with {}".format(sensor_name, sensor_ip, token)
215 | self.LOGGER.info(message)
216 | # if self.wbx_kargs['wbx']:
217 | # webhook_url = self.wbx_kargs['wbx_webhook']
218 | # requests.post(webhook_url, data=json.dumps({'markdown': message}), headers={'Content-Type': 'application/json'})
219 |
220 | # if self.slack_kargs['slack']:
221 | # webhook_url = self.slack_kargs['slack_webhook']
222 | # payload = self.get_slack_kargs()
223 | # payload['text'] = message
224 | # requests.post(webhook_url, data=json.dumps({'markdown': message}), headers={'Content-Type': 'application/json'})
225 |
226 | async def log_new_token(self, email, name, is_admin):
227 | message = "Token created: ({}) {} {}".format(email, name, is_admin)
228 | self.LOGGER.info(message)
229 |
230 | async def get_sensor(self, sensor_id):
231 | try:
232 | sensors = RegisteredSensor.objects(sensor_id=sensor_id)
233 | if len(sensors) > 0:
234 | return sensors[0]
235 | except:
236 | self.LOGGER.error("Failed to get sensor info:\n{}".format(traceback.format_exc()))
237 | return None
238 |
239 | async def register_sensor(self, sensor_id, sensor_ip, token, dt, now):
240 | rs = await self.get_sensor(sensor_id) if self.mongo_kargs['mongo'] else None
241 | if rs:
242 | rs.last_ping = now
243 | rs.save()
244 | return True
245 | rs = RegisteredSensor(sensor_id=sensor_id, sensor_ip=sensor_ip, token=token,
246 | created_at=now, received_at=now, last_ping=dt)
247 | await self.log_register(sensor_id, sensor_ip, token)
248 | if self.mongo_kargs['mongo']:
249 | try:
250 | rs.save()
251 | return True
252 | except:
253 | self.LOGGER.error("Failed to save sensor register info:\n{}".format(traceback.format_exc()))
254 |
255 | async def get_request_result(self, result_id):
256 | try:
257 | info = RequestResultEvent.objects(result_id=result_id)
258 | if len(info) > 0:
259 | return info[0]
260 | except:
261 | self.LOGGER.error("Failed to get request result:\n{}".format(traceback.format_exc()))
262 | return None
263 |
264 | async def get_event(self, result_id):
265 | try:
266 | info = GeneralEvent.objects(event_id=result_id)
267 | if len(info) > 0:
268 | return info[0]
269 | except:
270 | self.LOGGER.error("Failed to get event info:\n{}".format(traceback.format_exc()))
271 |
272 | return None
273 |
274 | async def requests_sensor(self, sensor_id, sensor_ip, token, dt, now, payload):
275 | self.LOGGER.info("Updating last sensor_info ({}) ping: {}".format(sensor_id, dt))
276 | try:
277 | sensor_info = await self.get_sensor(sensor_id) if self.mongo_kargs['mongo'] else None
278 | if sensor_info:
279 | sensor_info.last_ping = now
280 | sensor_info.save()
281 | except:
282 | self.LOGGER.info("Failed to update sensor_info ({}): {}".format(sensor_id, traceback.format_exc()))
283 |
284 | command = payload.get(COMMAND, None)
285 | request_parameters = payload.get(REQUEST_PARAMETERS, {})
286 | response_info = payload.get(RESPONSE_INFO, {})
287 | result_id = create_token(iters=1)
288 | orm_kargs = {}
289 | orm_kargs['sensor_id'] = sensor_id
290 | orm_kargs['sensor_ip'] = sensor_ip
291 | orm_kargs['created_at'] = dt
292 | orm_kargs['received_at'] = now
293 | orm_kargs['response_info'] = response_info
294 | orm_kargs['request_parameters'] = request_parameters
295 | orm_kargs['result_id'] = result_id
296 |
297 | result = RequestResultEvent(**orm_kargs)
298 | self.LOGGER.info("Saving event result to mongodb for ({})".format(sensor_id))
299 | if self.mongo_kargs['mongo']:
300 | try:
301 | result.save()
302 | except:
303 | self.LOGGER.error("Failed to request result event:\n{}".format(traceback.format_exc()))
304 |
305 | # notify via webx ?
306 | self.LOGGER.info("Notifying webex teams for ({})".format(sensor_id))
307 | dl_kargs = {'host': self.global_hostname,
308 | 'port': self.global_port,
309 | 'result_id': result_id}
310 | url = request_parameters.get(URL, None)
311 |
312 | link = DOWNLOAD_LINK.format(**dl_kargs)
313 | summary_link = SUMMARY_LINK.format(**dl_kargs)
314 | msg_kargs = {'sensor_id': sensor_id,
315 | 'sensor_ip': sensor_ip,
316 | 'download_link': link,
317 | 'summary_link': summary_link,
318 | 'url': url}
319 |
320 | wbx = self.wbx_kargs['wbx']
321 | slack = self.slack_kargs['slack']
322 | self.LOGGER.info("Loging results to wbx_webhook: {}".format(wbx))
323 |
324 | if wbx:
325 | webhook_url = self.wbx_kargs['wbx_webhook']
326 | message = WBX_DOWNLOAD_MESSAGE.format(**msg_kargs)
327 | if payload and self.execute_post(webhook_url, {'markdown': message}):
328 | self.LOGGER.info("[+] Success, logging results to slack".format())
329 | else:
330 | self.LOGGER.info("[X] Failed, logging results to slack".format())
331 |
332 | if slack:
333 | webhook_url = self.slack_kargs['slack_webhook']
334 | message = SLACK_DOWNLOAD_MESSAGE.format(**msg_kargs)
335 | payload['text'] = "Alert: docker create"
336 | blocks = [
337 | {
338 | "type": "section",
339 | "text": {
340 | "type": "mrkdwn",
341 | "text": message
342 | }
343 | }
344 | ]
345 | payload['blocks'] = blocks
346 | if payload and self.execute_post(webhook_url, payload):
347 | self.LOGGER.info("[+] Success, logging results to slack".format())
348 | else:
349 | self.LOGGER.info("[X] Failed, logging results to slack".format())
350 |
351 | async def submit_remote_requests(self, sensor_id, sensor_ip, port, payload):
352 | now = get_iso_time()
353 | url = "https://{}:{}".format(sensor_ip, port) + PING_ENDPOINT
354 | verify = False
355 | cert=None
356 | payload[TOKEN] = self.server_secret_key
357 | if not self.execute_post(url, payload):
358 | collector_url = self.get_collector_url(use_alt=True) + PING_ENDPOINT
359 | self.execute_post(collector_url, payload)
360 |
361 | async def ping_sensor(self, sensor_id, sensor_ip, token, dt, now):
362 | sensor = await self.get_sensor(sensor_id) if self.mongo_kargs['mongo'] else None
363 | if sensor:
364 | sensor.last_ping = now
365 | sensor.save()
366 | elif self.mongo_kargs['mongo']:
367 | await self.register_sensor(sensor_id, sensor_ip, token, dt, now)
368 |
369 | rs = PingSensor(sensor_id=sensor_id, sensor_ip=sensor_ip, token=token, created_at=dt, received_at=now)
370 | await self.log_ping(sensor_id, sensor_ip, token)
371 | if self.mongo_kargs['mongo']:
372 | try:
373 | rs.save()
374 | except:
375 | self.LOGGER.error("Failed to save sensor info:\n{}".format(traceback.format_exc()))
376 |
377 | def get_sensor_infos(self, sensor_ip=None, sensor_id=None, token=None):
378 | kargs = {}
379 | if sensor_ip:
380 | kargs['sensor_ip'] = sensor_ip
381 | if sensor_id:
382 | kargs['sensor_id'] = sensor_id
383 | if token:
384 | kargs['token'] = token
385 |
386 | try:
387 | objs = RegisteredSensor.objects(**kargs)
388 | return objs
389 | except:
390 | traceback.print_exc()
391 | return []
392 |
393 | async def get_token(self, token_value):
394 | try:
395 | objs = TokenInfo.objects(token=token_value)
396 | if len(objs) > 0:
397 | return objs[0]
398 | return None
399 | except:
400 | self.LOGGER.error("Failed to get token: {}".format(traceback.format_exc()))
401 |
402 | async def get_first_token(self):
403 | try:
404 | objs = TokenInfo.objects(creator_token=FIRSTIES_TOKEN)
405 | if len(objs) > 0:
406 | return objs[0]
407 | return None
408 | except:
409 | self.LOGGER.error("Failed to find expected first token:\n{}".format(traceback.format_exc()))
410 | raise
411 |
412 |
413 | async def touch_token(self, token, now):
414 | if not self.mongo_kargs['mongo']:
415 | return False
416 |
417 | token_info = await self.get_token(token)
418 | if token_info is None:
419 | return False
420 | token_info.modified_at = now
421 | try:
422 | token_info.save()
423 | except:
424 | self.LOGGER.error("Failed to touch token:\n{}".format(traceback.format_exc()))
425 | return False
426 | return True
427 |
428 | async def is_admin(self, token, token_info=None):
429 | if not token_info is None:
430 | return token_info.is_admin
431 |
432 | token_info = await self.get_token(token)
433 | if token_info is None:
434 | return None
435 | return token_info.is_admin
436 |
437 | async def is_active(self, token, token_info=None):
438 | if not token_info is None:
439 | return token_info.is_active
440 |
441 | token_info = await self.get_token(token)
442 | if token_info is None:
443 | return None
444 | return token_info.is_active
445 |
446 | async def is_valid(self, token):
447 | ti = await self.get_token(token)
448 | return not ti is None
449 |
450 | async def new_token(self, creator_token, email='', name='', description='', is_admin=False, is_active=True):
451 | if not self.is_admin(creator_token):
452 | return None
453 | token = create_token()
454 | now = get_iso_time()
455 | token_info = await self.add_token(creator_token, token, email, name, description, is_admin, is_active, now, now, now)
456 | await self.log_new_token(email, name, is_admin)
457 | return token_info
458 |
459 | async def create_first_admin(self, email='', name='', description=''):
460 | creator_token = await self.get_first_token()
461 | if creator_token is not None:
462 | raise Exception("There can be only 1!")
463 | token = create_token()
464 | now = get_iso_time()
465 | token_info = await self.add_token(FIRSTIES_TOKEN, token, email, name, description, True, True, now, now, now)
466 | await self.log_new_token(email, name, False)
467 | return token_info
468 |
469 | async def get_honeypot_token_values(self):
470 | try:
471 | objs = TokenInfo.objects(name=HONEYPOT_TOKEN)
472 | tokens = [i.token for i in objs]
473 | except:
474 | self.LOGGER.error("Failed to get honeypot tokens:\n{}".format(traceback.format_exc()))
475 | raise
476 |
477 |
478 | async def create_honeypot_token(self, email=None, name=None, description=None):
479 | creator_token_info = await self.get_first_token()
480 | if creator_token_info is None:
481 | creator_token_info = await self.create_first_admin()
482 |
483 | email = email if email is not None else creator_token_info.email
484 | name = HONEYPOT_TOKEN
485 | description = HONEYPOT_DESCRIPTION
486 | token = create_token()
487 | now = get_iso_time()
488 | token_info = await self.add_token(creator_token_info.token, token, email, name, description, False, True, now, now, now)
489 | await self.log_new_token(email, name, False)
490 | return token_info
491 |
492 | async def add_token(self, creator_token, token, email, name, description, is_admin, is_active, created_at=None, modified_at=None, last_used=None):
493 | kargs = {
494 | "creator_token":creator_token,
495 | "token":token,
496 | "email":email,
497 | "name":name,
498 | "description":description,
499 | "is_admin":is_admin,
500 | "is_active":is_active,
501 | "created_at":get_iso_time() if created_at is None else created_at,
502 | "modified_at":get_iso_time() if modified_at is None else modified_at,
503 | "last_used":get_iso_time() if last_used is None else last_used,
504 | }
505 | rs = TokenInfo(**kargs)
506 | if self.mongo_kargs['mongo']:
507 | try:
508 | rs.save()
509 | except:
510 | self.LOGGER.error("Failed to save token info:\n{}".format(traceback.format_exc()))
511 | return rs
512 |
513 | def get_collector_url(self, use_alt=False):
514 | host = None
515 | port = None
516 | if use_alt:
517 | host = self.collector_kargs.get('collector_alt_host', None)
518 | port = self.collector_kargs.get('collector_alt_port', None)
519 |
520 |
521 | if host is None and self.collector_kargs.get('collector_host', None) is None:
522 | host = DEFAULT_COLLECTOR_HOST
523 | elif host is None:
524 | host = self.collector_kargs.get('collector_host', None)
525 |
526 |
527 | if port is None and self.collector_kargs.get('collector_port', None) is None:
528 | port = DEFAULT_COLLECTOR_PORT
529 | elif port is None:
530 | port = self.collector_kargs.get('collector_port', None)
531 |
532 | kargs = {'collector_port': port,'collector_host': host,}
533 | return self.collector_kargs['collector_url_fmt'].format(**kargs)
534 |
535 | def execute_post(self, collector_url, payload, verify=False):
536 | rsp = None
537 | host = collector_url.split("://")[1].split("/")[0]
538 | try:
539 | rsp = requests.post(collector_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'},
540 | verify=verify)
541 | except:
542 | self.LOGGER.info("Failed to connect to {}.".format(host))
543 | self.LOGGER.error("{}".format(traceback.format_exc()))
544 |
545 | finally:
546 | if rsp:
547 | self.LOGGER.info("Connected to {} with response:{}.".format(host, rsp.status_code))
548 | if rsp and (rsp.status_code >= 200 and rsp.status_code <= 299):
549 | return True
550 | return False
551 |
552 | def prune_processes(self):
553 | self.PROCESSES = [p for p in self.PROCESSES if p.is_alive()]
554 |
555 | async def send_ping(self):
556 | self.prune_processes()
557 | if not self.collector_kargs['collector']:
558 | return
559 | now = get_iso_time()
560 | collector_url = self.get_collector_url() + PING_ENDPOINT
561 | verify = False
562 | if self.collector_kargs['collector_verify_ssl'] and self.collector_kargs['collector_server_crt']:
563 | verify = self.collector_kargs['collector_server_crt']
564 |
565 | cert=None
566 |
567 | payload = self.get_base_payload()
568 |
569 | if not self.execute_post(collector_url, payload):
570 | collector_url = self.get_collector_url(use_alt=True) + PING_ENDPOINT
571 | self.execute_post(collector_url, payload)
572 |
573 | async def send_registration(self):
574 | if not self.collector_kargs['collector']:
575 | return
576 | now = get_iso_time()
577 | collector_url = self.get_collector_url() + REGISTER_ENDPOINT
578 | verify = False
579 | if self.collector_kargs['collector_verify_ssl'] and self.collector_kargs['collector_server_crt']:
580 | verify = self.collector_kargs['collector_server_crt']
581 |
582 | cert=None
583 | try:
584 | payload = self.get_base_payload()
585 | except:
586 | self.LOGGER.error("Failed to get base payload:\n{}".format(traceback.format_exc()))
587 |
588 | if not self.execute_post(collector_url, payload):
589 | collector_url = self.get_collector_url(use_alt=True) + REGISTER_ENDPOINT
590 | self.execute_post(collector_url, payload)
591 |
592 | async def send_http_notifications(self, results):
593 | self.LOGGER.info("Logging {} events to http endpoint".format(len(results)))
594 | if not self.collector_kargs['collector']:
595 | return
596 |
597 | collector_url = self.get_collector_url() + EVENTS_ENDPOINT
598 | verify = False
599 | if self.collector_kargs['collector_verify_ssl'] and self.collector_kargs['collector_server_crt']:
600 | verify = self.collector_kargs['collector_server_crt']
601 |
602 | cert=None
603 | payload = self.get_base_payload()
604 | payload[EVENTS] = results
605 | if not self.execute_post(collector_url, payload):
606 | collector_url = self.get_collector_url(use_alt=True) + EVENTS_ENDPOINT
607 | self.execute_post(collector_url, payload)
608 |
609 | async def add_mongo_results(self, results):
610 |
611 | for result in results:
612 | # not all results have requests in them
613 | result['request'] = result.get('request', '')
614 | gr = GeneralEvent(**result)
615 | if self.mongo_kargs['mongo']:
616 | try:
617 | gr.save()
618 | except:
619 | self.LOGGER.error("Failed to save GeneralEvent:\n{}".format(traceback.format_exc()))
620 | raise
621 | ce = None
622 | if gr.rtype == CREATE and gr.request_data:
623 | kargs = {}
624 | Cmd = gr.request_data.get('Cmd', [])
625 | kargs['command'] = ' '.join(Cmd)
626 | kargs['image'] = gr.request_data.get('Image', [])
627 | kargs['event_id'] = gr.event_id
628 | for k in ['src_ip', 'dst_ip', 'src_port', 'dst_port', 'created_at']:
629 | kargs[k] = result[k]
630 | ce = CreateEvent(**kargs)
631 |
632 | if self.mongo_kargs['mongo']:
633 | if ce:
634 | try:
635 | ce.save()
636 | except:
637 | self.LOGGER.error("Failed to save event info:\n{}".format(traceback.format_exc()))
638 |
639 | @classmethod
640 | async def add_elk_results(cls, results):
641 | # add results to elk from here
642 | pass
643 |
644 |
645 | def get_slack_kargs(self):
646 | slack_kargs = {}
647 | slack_kargs["channel"] = self.slack_kargs.get("slack_channel", None)
648 | slack_kargs["username"] = self.slack_kargs.get("slack_username", None)
649 | # slack_kargs["webhook"] = self.slack_kargs.get("slack_webhook", None)
650 | slack_kargs["icon_emoji"] = self.slack_kargs.get("slack_emoticon", ":suspect:")
651 | return slack_kargs
652 |
653 |
654 | async def send_slack_notifications(self, results):
655 | payload = self.get_slack_kargs()
656 | webhook_url = self.slack_kargs['slack_webhook']
657 | using_slack = self.slack_kargs['slack']
658 | # self.LOGGER.info("Loging results to slack: {}".format(using_slack))
659 | if not using_slack:
660 | return
661 |
662 | for result in results:
663 | payload = self.get_slack_kargs()
664 | token = self.admin_token
665 | if result['rtype'] == CREATE and result['request_data']:
666 | kargs = result.copy()
667 | r = result['request_data'].get('Cmd', [])
668 | kargs['command'] = " ".join(r)
669 | kargs['image'] = result['request_data'].get('Image', [])
670 | kargs['dst_ip'] = kargs['dst_ip']
671 | kargs['trigger_collector'] = ''
672 | event_id = result.get(EVENT_ID, -1)
673 | if self.is_collector:
674 | kargs['trigger_collector'] = "\n\n6. *Collector web request:* https://{}:{}/remote_web_request".format(self.global_hostname, self.global_port)
675 | if isinstance(event_id, str) and len(event_id) > 0:
676 | fmt_args = (self.global_hostname, self.global_port, token, event_id, self.server_secret_key)
677 | kargs['trigger_collector'] = kargs['trigger_collector'] + "\n\n7. *Event JSON:* https://{}:{}/event/{}/{}\n\n".format(*fmt_args)
678 |
679 |
680 | # message = ("{src_ip}:{src_port} => {dst_ip}:{dst_port} creating docker image:{image} for \'\'\'{command}\'\'\'".format(**kargs))
681 | message = ("1. *Attempting to create an contianer on {sensor_id} ({sensor_ip}) for API: {api}* \n2. Source: *{src_ip}:{src_port}* \n3. Destination: *{dst_ip}:{dst_port}*\n4. Image: *{image}*\n5. Command: `{command}`{trigger_collector}".format(**kargs))
682 | payload['text'] = "Alert: docker create"
683 | blocks = [
684 | {
685 | "type": "section",
686 | "text": {
687 | "type": "mrkdwn",
688 | "text": message
689 | }
690 | }
691 | ]
692 | payload['blocks'] = blocks
693 |
694 | elif result['rtype'] == GET_VERSION and not self.supress_recon:
695 | kargs = result.copy()
696 | kargs['dst_ip'] = kargs['dst_ip']
697 | message = ("1. *Attempting recon of {sensor_id} ({sensor_ip}) for API: {api}* \n2. Source: *{src_ip}:{src_port}*\n3. Destination: *{dst_ip}:{dst_port}*".format(**kargs))
698 | payload['text'] = "Alert: docker recon"
699 | blocks = [
700 | {
701 | "type": "section",
702 | "text": {
703 | "type": "mrkdwn",
704 | "text": message
705 | }
706 | }
707 | ]
708 | payload['blocks'] = blocks
709 |
710 | if payload and self.execute_post(webhook_url, payload):
711 | self.LOGGER.info("[+] Success, logging results to slack".format())
712 | else:
713 | self.LOGGER.info("[X] Failed, logging results to slack".format())
714 |
715 |
716 | def update_wbx_config(self, dargs):
717 | self.wbx_kargs['wbx_webhook'] = dargs.get('wbx_webhook', None)
718 | self.wbx_kargs['wbx'] = dargs.get('wbx', False) and dargs.get('wbx_webhook', None) is not None
719 |
720 | async def send_wbx_teams_notifications(self, results):
721 | webhook_url = self.wbx_kargs['wbx_webhook']
722 | wbx = self.wbx_kargs['wbx']
723 | self.LOGGER.info("Loging results to wbx_webhook: {}".format(wbx))
724 | if wbx is None:
725 | return
726 |
727 | for result in results:
728 | payload = None
729 | token = self.admin_token
730 | if result['rtype'] == CREATE and result['request_data']:
731 | kargs = result.copy()
732 | r = result['request_data'].get('Cmd', [])
733 | event_id = result.get('event_id', None)
734 | kargs['command'] = " ".join(r)
735 | kargs['image'] = result['request_data'].get('Image', [])
736 | kargs['dst_ip'] = kargs['dst_ip']
737 | kargs['trigger_collector'] = ''
738 | if self.is_collector:
739 | kargs['trigger_collector'] = "\n\n6. **Collector web request:** https://{}:{}/remote_web_request\n\n".format(self.global_hostname, self.global_port)
740 | if isinstance(event_id, str) and len(event_id) > 0:
741 | fmt_args = (self.global_hostname, self.global_port, token, event_id, self.server_secret_key)
742 | kargs['trigger_collector'] = kargs['trigger_collector'] + "\n\n7. **Event JSON:** https://{}:{}/event/{}/{}\n\n".format(*fmt_args)
743 | message = ("1. **Attempting to create an image on **{sensor_id} ({sensor_ip})** for API: {api}** \n2. **Source:** {src_ip}:{src_port} \n3. **Destination:** {dst_ip}:{dst_port}\n4. **Image:** {image}\n5. **Command:** `{command}`{trigger_collector}".format(**kargs))
744 | # self.LOGGER.info("Sending results {} to wbx_webhook".format(result['rtype']))
745 | payload = {'markdown': message}
746 | elif result['rtype'] == GET_VERSION and not self.supress_recon:
747 | kargs = result.copy()
748 | kargs['dst_ip'] = self.sensor_ip if self.sensor_ip else kargs['dst_ip']
749 | message = ("1. **Attempting recon for **{sensor_id} ({sensor_ip})** API:** {api}\n2. **Source:** {src_ip}:{src_port}\n3. **Destination:** {dst_ip}:{dst_port}".format(**kargs))
750 | # self.LOGGER.info("Sending results {} to wbx_webhook".format(result['rtype']))
751 | payload = {'markdown': message}
752 |
753 | if payload and self.execute_post(webhook_url, payload):
754 | self.LOGGER.info("[+] Success, logging results to wbx".format())
755 | else:
756 | self.LOGGER.info("[X] Failed, logging results to wbx".format())
757 |
758 | async def send_request_results(self, response_payload: dict):
759 | http_url = self.collector_kargs['collector_url'].strip("/") + REQUESTS_ENDPOINT
760 | verify = False
761 | if self.collector_kargs['collector_verify_ssl'] and self.collector_kargs['collector_server_crt']:
762 | verify = self.collector_kargs['collector_server_crt']
763 |
764 | payload = self.get_base_payload()
765 | payload.update(request_results)
766 | requests.post(http_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'},
767 | verify=verify, cert=cert)
768 |
769 | def start_process_commands(self, sensor_id, sensor_ip, token, payload):
770 | self.LOGGER.info("Starting process to handle remote command".format())
771 | p = Process(target=self.handle_incoming_commands,
772 | args=(sensor_id, sensor_ip, token, payload))
773 | p.start()
774 | self.PROCESSES.append(p)
775 |
776 | def handle_incoming_commands(self, sensor_id, sensor_ip, token, payload):
777 | async def doit():
778 | response = await CommandHandler.handle_commands(**payload)
779 | collector_url = self.get_collector_url() + COMMANDS_RESPONSE_ENDPOINT
780 | verify = False
781 | cert=None
782 | response.update(self.get_base_payload())
783 | if not self.execute_post(collector_url, response):
784 | collector_url = self.get_collector_url(use_alt=True) + COMMANDS_RESPONSE_ENDPOINT
785 | self.execute_post(collector_url, response)
786 | asyncio.run(doit())
787 |
--------------------------------------------------------------------------------
/src/docker_honey/server.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 |
21 | from .consts import *
22 | from .util import *
23 | from .notify import Notifier
24 | from .commands import CommandHandler
25 |
26 | import select
27 | import base64
28 | import json
29 | import logging
30 | import traceback
31 | import asyncio
32 | import logging
33 | from threading import Timer
34 | from .simple_commands.util import *
35 |
36 |
37 | class DockerHp(object):
38 | LOGGER = get_stream_logger(__name__ + '.DockerHp')
39 | def __init__(self, sensor_id, sensor_ip, notifier, ports=[2375,],
40 | terminate_with_error=True, error_message=ERROR_MESSAGE,
41 | level=logging.DEBUG):
42 | self.sensor_ip = sensor_ip
43 | self.sensor_id = sensor_id
44 | self.notifier = notifier
45 | self.terminate_with_error = terminate_with_error
46 | self.error_message = error_message
47 | self.keep_working = False
48 | self.ports = ports
49 | reset_logger_level(self.LOGGER, level)
50 |
51 | # self.timer = Timer(3.0, self.ping)
52 | # self.timer.start()
53 | self.listener_socks = {}
54 | for port in ports:
55 | try:
56 | server = create_listener_sock(port)
57 | server.setblocking(0)
58 | self.listener_socks[port] = server
59 | except:
60 | print('Unable to start server on port:{}'.format(port))
61 |
62 | self.registered = False
63 |
64 | async def ping(self):
65 | if not self.registered:
66 | await self.notifier.send_registration()
67 | else:
68 | await self.notifier.ping()
69 | self.timer = Timer(60.0, self.ping)
70 | self.timer.start()
71 |
72 | async def consume_request(self, client, address, send_response=True):
73 | create_data = None
74 | src_ip, src_port = client.getpeername()
75 | dst_ip, dst_port = client.getsockname()
76 | created_at = get_iso_time()
77 | data = recv_until_done(client) #(client.recv(MAX_DATA))
78 | b64req = base64.b64encode(data).decode('ascii')
79 | if data == b'':
80 | self.LOGGER.info("failed connection from: {}".format(address))
81 | return {'sensor_id': self.sensor_id, 'sensor_ip': self.sensor_ip,
82 | 'src_ip': src_ip, 'src_port': src_port, 'dst_ip': dst_ip, 'dst_port': dst_port, "created_at":created_at,
83 | 'rtype': UNKNOWN, 'response': None, "request": b64req, "request_data": None, "api": None, 'sent': False,
84 | 'event_id': create_token()}
85 | rtype = get_handler_type(data)
86 | self.LOGGER.info("Handling connection from {}:{} for {}".format(address[0], address[1], rtype))
87 | rdata = create_response(rtype, data)
88 |
89 | if data.find(b'Content-Type: application/json\r\n') > -1:
90 | create_data = extract_json_data(data)
91 |
92 | kargs = get_match_group(rtype, data)
93 | api = API if not 'api' in kargs else kargs['api'].decode('ascii').lstrip('v')
94 | src_ip, src_port = client.getpeername()
95 | dst_ip, dst_port = client.getsockname()
96 | return {'sensor_id': self.sensor_id, 'sensor_ip': self.sensor_ip,
97 | 'src_ip': src_ip, 'src_port': src_port, 'dst_ip': dst_ip, 'dst_port': dst_port, "created_at":created_at,
98 | 'rtype': rtype, 'response': rdata, "request": b64req, 'request_data': create_data, 'api': api, 'sent': False,
99 | 'event_id': create_token() }
100 |
101 | async def honeypot_connection(self, client, address, send_after_ping=False):
102 | result = await self.consume_request(client, address)
103 | results = [result]
104 | # facilitate follow-on docker client communication
105 | self.LOGGER.info("Handled connection type:{} from {}:{}".format(result['rtype'], address[0], address[1]))
106 | if result['rtype'] == PING:
107 | client.send(result['response'].encode('ascii'))
108 | result['sent'] = True
109 | # Nothing else to do, likely a port scanner
110 | elif result['rtype'] == GET_VERSION or result['rtype'] == GET:
111 | client.send(result['response'].encode('ascii'))
112 | result['sent'] = True
113 | return results
114 | elif result['rtype'] == UNKNOWN:
115 | client.send(UNKNOWN_RETURN)
116 | result['sent'] = True
117 | return results
118 |
119 | result = await self.consume_request(client, address)
120 | results.append(result)
121 |
122 | if result['rtype'] and send_after_ping:
123 | try:
124 | client.send(result['response'].encode('ascii'))
125 | result['sent'] = True
126 | except:
127 | pass
128 | return results
129 |
130 | async def handle_next_clients(self):
131 | inputs = [s for s in self.listener_socks.values()]
132 | readable, _, _ = select.select(inputs, [], inputs)
133 | for server_sock in readable:
134 | try:
135 | results = await self.honeypot_next_client(server_sock)
136 | await self.notifier.notify(results)
137 | except KeyboardInterrupt:
138 | self.keep_working = False
139 | break
140 | except:
141 | traceback.print_exc()
142 |
143 | async def honeypot_next_client(self, server_sock):
144 | client, address = server_sock.accept()
145 | client.settimeout(3.0)
146 | results = await self.honeypot_connection(client, address)
147 | if len(results) < 2 or results[0]['rtype'] != PING:
148 | self.LOGGER.info('Not a full honeypot connection')
149 | elif self.terminate_with_error and len(results) >= 1 and results[0]['rtype'] == 'PING':
150 | created_at = get_iso_time()
151 | api = results[1]['api'] if results[1]['api'] else API
152 | rdata = generate_error(api=api, error_message=self.error_message)
153 | src_ip, src_port = client.getpeername()
154 | dst_ip, dst_port = client.getsockname()
155 |
156 | result = {'sensor_id': self.sensor_id, 'sensor_ip': self.sensor_ip,
157 | 'src_ip': src_ip, 'src_port': src_port, 'dst_ip': dst_ip, 'dst_port': dst_port, 'created_at': created_at,
158 | 'rtype': ERROR, 'response': rdata, 'request_data': None, 'api': api, 'sent': False,
159 | 'event_id': create_token()}
160 | results.append(result)
161 | try:
162 | client.send(rdata.encode('ascii'))
163 | result = True
164 | client.close()
165 | except:
166 | pass
167 | return results
168 |
169 | async def serve_forever(self):
170 | self.keep_working = True
171 | try:
172 | while self.keep_working:
173 | await self.handle_next_clients()
174 | except:
175 | traceback.print_exc()
176 | self.LOGGER.info("Exiting serve_forever")
177 |
178 | async def handle_collector_request(self, **kargs):
179 | payload = await CommandHandler.handle_web_request(**kargs)
180 | payload['sensor_ip'] = self.sensor_ip
181 | payload['sensor_id'] = self.sensor_id
182 | notifier = Notifier.GLOBAL_NOTIFIER
183 |
--------------------------------------------------------------------------------
/src/docker_honey/simple_commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ciscocsirt/dhp/be45587f57205b70c2e3c603d9da943c46aceaa9/src/docker_honey/simple_commands/__init__.py
--------------------------------------------------------------------------------
/src/docker_honey/simple_commands/actions.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | from .consts import *
21 | from .util import *
22 | from . import boto
23 | from . import ssh
24 | import traceback
25 | import json
26 | import os
27 | import time
28 |
29 | import logging
30 | ACTION_LOGGER = get_stream_logger(__name__)
31 |
32 | command_strings_to_dict = lambda x: {i['name']: i['value'] for i in x}
33 |
34 | def perform_activity(instance_name, all_instances, activity_name,
35 | instance_public_ip, boto_config, command_format_args, username=UBUNTU):
36 | # get instance config
37 | instances_configs = {i['name']: i for i in boto_config.get('instance_descriptions', [])}
38 | instance_config = instances_configs.get(instance_name)
39 |
40 | # get activities and actions for setup sequence
41 | instance_activities = instance_config.get('activities')
42 | activity = instance_activities.get(activity_name)
43 | all_actions = boto_config.get("actions")
44 | keypath = boto_config.get('ssh_key_path', '')
45 | # print(json.dumps(activity, sort_keys=True, indent=6),
46 | # json.dumps(instance_activities, sort_keys=True, indent=6),
47 | # activity_name)
48 |
49 | ssh_reqs = {}
50 | for iid, iinfo in all_instances.items():
51 | key_filename = list(all_instances.values())[0]['KeyName']
52 | key_file = os.path.join(keypath, key_filename)
53 | ssh_reqs[iid] = {'key_file': key_file, 'host': instance_public_ip[iid], 'username': username}
54 | ACTION_LOGGER.info("Performing {} for {} ({} instances)".format(activity_name, instance_name, len(all_instances)))
55 | return perform_instance_activities(instance_name, all_instances, activity_name, activity,
56 | all_actions, ssh_reqs, command_format_args, boto_config)
57 |
58 |
59 | def perform_instance_activities(instance_name:str, all_instances:dict, activity_name:str,
60 | activity: dict, all_actions:dict, ssh_reqs: dict,
61 | command_format_args, boto_config):
62 | # iterate over the actions and then execut them.
63 | # FIXME multi threading required here
64 | # need to redo how results are managed and returned
65 | steps = activity.get('steps')
66 | activity_results = {'instance_name':instance_name,
67 | 'activity_name': activity_name,
68 | "step_results": [],
69 | "steps": steps,
70 | "command_format_args": command_format_args}
71 | # print(activity_name, '\n', json.dumps(activity, indent=6, sort_keys=True))
72 | # print("all_actions", '\n', json.dumps(all_actions, indent=6, sort_keys=True))
73 | unpack_ssh_reqs = lambda reqs: (reqs['host'], reqs['key_file'], reqs['username'])
74 | for action in steps:
75 | cactivity = all_actions.get(action, None)
76 | if cactivity is None:
77 | msg = "'{}' from '{}' activity steps ({}) is not a defined activity in the orchestration description."
78 | msg = msg.format(action, activity_name, steps)
79 | ACTION_LOGGER.critical(msg)
80 | raise Exception(msg)
81 | atype = cactivity.get('type')
82 | pre_wait = cactivity.get('pre_wait', 0.0)
83 | time.sleep(pre_wait)
84 | aresults = {'name': action,
85 | 'type':atype,
86 | "results":[],
87 | }
88 |
89 | if atype == 'commands':
90 | # create the command list
91 | commands = [i.format(**command_format_args) for i in cactivity.get('commands', [])]
92 | aresults["commands"] = commands
93 | # TODO execute the commands
94 | for instance_id, ssh_req in ssh_reqs.items():
95 | host, key_file, username = unpack_ssh_reqs(ssh_req)
96 | ACTION_LOGGER.debug("Performing {}:{} ({} elements) for {}@{} with {}".format(activity_name, atype, len(commands), username, host, key_file))
97 | result = ssh.Commands.execute_commands(commands, host=host, key_filename=key_file, username=username)
98 | outcome = {'instance_id': instance_id, "host": host, 'result': result}
99 | aresults["results"].append(outcome)
100 | elif atype == 'upload_files':
101 | dst_src = {}
102 | for scp_args in cactivity.get('files', []):
103 | src = scp_args.get('src')
104 | dst = scp_args.get('dst')
105 | dst_src[dst] = src
106 | aresults["dst_src_files"] = dst_src
107 | # scp the files over
108 | for instance_id, ssh_req in ssh_reqs.items():
109 | ACTION_LOGGER.debug("Performing {}:{} ({} elements) for {}@{} with {}".format(activity_name, atype, len(dst_src), username, host, key_file))
110 | host, key_file, username = unpack_ssh_reqs(ssh_req)
111 | result = ssh.Commands.upload_files(dst_src, host=host, key_filename=key_file, username=username)
112 | outcome = {'instance_id': instance_id, "host": host, 'result': result}
113 | aresults["results"].append(outcome)
114 | activity_results['step_results'].append(aresults)
115 | elif atype == "boto":
116 | aresults["command_parameters"] = cactivity.get('command_parameters', [])
117 | aresults["commands"] = cactivity.get('commands', [])
118 | # scp the files over
119 | for instance_id, ssh_req in ssh_reqs.items():
120 | host, key_file, username = unpack_ssh_reqs(ssh_req)
121 | ACTION_LOGGER.debug("Invalid activity {}:{} for {}@{} with {}".format(activity_name, atype, username, host, key_file))
122 | outcome = {'instance_id': instance_id, "host": host, 'result': "Unsupported action"}
123 | aresults["results"].append(outcome)
124 | activity_results['step_results'].append(aresults)
125 | else:
126 | for instance_id, ssh_req in ssh_reqs.items():
127 | host, key_file, username = unpack_ssh_reqs(ssh_req)
128 | ACTION_LOGGER.debug("Invalid activity {}:{} for {}@{} with {}".format(activity_name, atype, username, host, key_file))
129 | outcome = {'instance_id': instance_id, "host": host, 'result': "Unsupported action"}
130 | aresults["results"].append(outcome)
131 | activity_results['step_results'].append(aresults)
132 | post_wait = cactivity.get('post_wait', 0.0)
133 | time.sleep(post_wait)
134 | return activity_results
135 |
136 |
137 | def build_instance_and_setup(instance_name, config, setup_activity_name="setup", command_format_args: dict=None, region=None, max_count=None):
138 | #initialize the boto command
139 | ACTION_LOGGER.debug("Initializing the boto.Commands klass".format())
140 | boto.Commands.set_config(**config)
141 |
142 | # get instance config
143 | instances_configs = {i['name']: i for i in config.get('instance_descriptions', [])}
144 | instance_config = instances_configs.get(instance_name)
145 |
146 | # prep format arguments for env
147 | config_command_format_args = command_strings_to_dict(instance_config.get('command_string_parameters', []))
148 | command_format_args = command_format_args if isinstance(command_format_args, dict) and len(command_format_args) else {}
149 |
150 | config_command_format_args.update(command_format_args)
151 | command_format_args = config_command_format_args
152 | # get activities and actions for setup sequence
153 | instance_activities = instances_configs.get('activities')
154 | all_actions = config.get('actions')
155 |
156 |
157 | # ssh key stuff
158 | username = instance_config.get('username', UBUNTU)
159 | keypath = config.get('ssh_key_path', '')
160 |
161 | # use the config to set up the hosts
162 | ACTION_LOGGER.info("Creating {} instances in {} for '{}'".format(max_count, region, instance_name))
163 | all_instances, all_volumes = boto.Commands.build_instance_region(region, instance_name, config, max_count=max_count)
164 | ACTION_LOGGER.info("Created {} instances and {} volumes for '{}' in {}".format(len(all_instances), len(all_volumes), instance_name, region))
165 | instance_public_ip = boto.Commands.get_instance_public_ips([i for i in all_instances], **config)
166 |
167 | # create path to ssh key
168 | key_filename = list(all_instances.values())[0]['KeyName']
169 | key_file = os.path.join(keypath, key_filename)
170 |
171 | # perform setup activity
172 | setup_results = None
173 | ACTION_LOGGER.info("Setting-up {} instances and {} volumes for '{}' in {} with activity: '{}'".format(len(all_instances), len(all_volumes), instance_name, region, setup_activity_name))
174 | try:
175 | setup_results = perform_activity(instance_name, all_instances, setup_activity_name, instance_public_ip, config, command_format_args)
176 | except:
177 | ACTION_LOGGER.info("Failed setup: {} ".format(traceback.format_exc()))
178 | return all_instances, instance_public_ip, all_volumes, setup_results
179 |
180 | def build_instance_and_setup_multi_regions_count(instance_name, config, regions, max_count, command_format_args=None, setup_activity_name="setup"):
181 | #initialize the boto command
182 | boto.Commands.set_config(**config)
183 | all_instances = {}
184 | all_volumes = {}
185 | instance_id_key = {}
186 | keypath = config.get('ssh_key_path', '')
187 | backup_config = config.copy()
188 |
189 | all_instances = {}
190 | instance_public_ip = {}
191 | all_volumes = {}
192 | setup_results = {}
193 |
194 | for region in regions:
195 | try:
196 | ai, ipi, av, sr = build_instance_and_setup(instance_name, config, setup_activity_name=setup_activity_name,
197 | command_format_args=command_format_args, region=region,
198 | max_count=max_count)
199 | except:
200 | ACTION_LOGGER.critical("Exception occurred when trying to initialize instances in {}".format(region))
201 | ACTION_LOGGER.critical(traceback.format_exc())
202 | all_instances[region] = None
203 | instance_public_ip[region] = None
204 | all_volumes[region] = None
205 | setup_results[region] = None
206 | continue
207 | all_instances[region] = ai
208 | instance_public_ip[region] = ipi
209 | all_volumes[region] = av
210 | setup_results[region] = sr
211 |
212 | return all_instances, instance_public_ip, all_volumes, setup_results
213 |
--------------------------------------------------------------------------------
/src/docker_honey/simple_commands/app.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | from quart import Quart, jsonify, Response, request
21 | import os
22 | import tempfile
23 | import ssl
24 | import traceback
25 | import asyncio
26 | from hypercorn.config import Config as HyperConfig
27 | from hypercorn.asyncio import serve
28 | from .consts import *
29 | from .util import *
30 | import logging
31 |
32 | class Hypercorn(Quart):
33 | APP_TEMP_DIR = tempfile.TemporaryDirectory()
34 | CERTS_PATH = os.path.join(APP_TEMP_DIR.name, 'ssl')
35 | LOGGER = get_stream_logger(__name__ + '.Hypercorn')
36 | APP_TEMP_DIR = tempfile.TemporaryDirectory()
37 | CERTS_PATH = os.path.join(APP_TEMP_DIR.name, 'ssl')
38 | DEFAULT_PORT = 8000
39 |
40 | def __init__(self, name, host='0.0.0.0', port=DEFAULT_PORT,
41 | ca_crt=None, server_crt=None, server_key=None,
42 | certs_path=CERTS_PATH, debug=False):
43 | super().__init__(name)
44 | self._App_host = host
45 | self._App_port = port
46 |
47 | self._App_server_ca = None
48 | self._App_server_key = None
49 | self._App_server_crt = None
50 |
51 |
52 | if ca_crt is not None and \
53 | server_crt is not None and \
54 | server_key is not None:
55 | self._App_server_ca = os.path.join(certs_path if certs_path else '', ca_crt)
56 | self._App_server_key = os.path.join(certs_path if certs_path else '', server_key)
57 | self._App_server_crt = os.path.join(certs_path if certs_path else '', server_crt)
58 |
59 | self._App_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
60 | self._App_context.load_verify_locations(self._App_server_ca)
61 | self._App_context.load_cert_chain(self._App_server_crt, self._App_server_key)
62 | else:
63 | self._App_context = None
64 |
65 | def add_route(rule, endpoint, view_func, provide_automatic_options=None, **options):
66 | self.add_url_rule(rule, endpoint, view_func, provide_automatic_options, **options)
67 |
68 | def do_run(self):
69 | try:
70 | self.run(self._App_host, self._App_port, ssl_context=self._App_context)
71 | except:
72 | self.LOGGER.error('Unable to start server on {}:{}'.format(self._App_host, self._App_port))
73 | self.LOGGER.error('{}'.format(traceback.format_exc()))
74 | raise
75 |
76 | def quart_run(self):
77 | # looked at the hypercorn and quart Python project to figure out
78 | # how to start the application separately, without going through
79 | # the Quart.app.run APIs
80 | config = HyperConfig()
81 | config.debug = self.debug
82 | config.access_log_format = "%(h)s %(r)s %(s)s %(b)s %(D)s"
83 | config.accesslog = self.LOGGER
84 | config.bind = ["{host}:{port}".format(**{'host':self._App_host,
85 | 'port':self._App_port})]
86 | config.certfile = self._App_server_crt
87 | config.keyfile = self._App_server_key
88 |
89 | config.errorlog = config.accesslog
90 | config.use_reloader = True
91 | scheme = "https" if config.ssl_enabled else "http"
92 |
93 | self.LOGGER.info("Running on {}://{} (CTRL + C to quit)".format(scheme, config.bind[0]))
94 | loop = None #asyncio.get_event_loop()
95 | if loop is not None:
96 | loop.set_debug(debug or False)
97 | loop.run_until_complete(serve(self, config))
98 | else:
99 | asyncio.run(serve(self, config), debug=config.debug)
100 |
101 |
--------------------------------------------------------------------------------
/src/docker_honey/simple_commands/consts.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 | HIBERNATEABLE = ["m3", "m4", "m5", "c3", "c4", "c5", "r3", "r4", "r5"]
21 | STANDARD_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
22 |
23 | DEFAULT_PORT = 8000
24 | UBUNTU = 'ubuntu'
25 |
26 | COPY_FILE = 'sudo cp {src} {dst}'
27 |
28 | INSTALL_SYSTEMCTL_COMMANDS = [
29 | 'sudo cp {service_name}.service /lib/systemd/system/',
30 | 'sudo chmod 644 /lib/systemd/system/{service_name}.service',
31 | 'sudo systemctl daemon-reload',
32 | 'sudo systemctl enable {service_name}.service',
33 | 'sudo systemctl start {service_name}.service',
34 | 'sudo systemctl status {service_name}.service'
35 | ]
36 |
37 | DOCKER_SETUP_COMMANDS = [
38 | 'sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl software-properties-common git python3-pip',
39 | 'curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -',
40 | 'sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable"',
41 | 'sudo apt update && sudo apt install -y docker-ce docker-compose',
42 | 'sudo usermod -aG docker {username}',
43 | ]
44 |
45 |
46 | IPIFY_URL = "https://api.ipify.org/?format=json"
47 |
48 | REGION_TO_AMI = {
49 | 'us-east-1':"",
50 | 'us-east-2':"",
51 | 'us-west-1':"",
52 | 'us-west-2':"",
53 | 'sa-east-1':"",
54 | # 'ap-east-1':"ami-107d3e61",
55 | 'ap-south-1':"",
56 | 'ap-southeast-1':"",
57 | 'ap-southeast-2':"",
58 | 'ap-northeast-1':"",
59 | 'ap-northeast-2':"",
60 |
61 | "eu-north-1": "",
62 | "eu-central-1": "",
63 | "eu-west-1": "",
64 | "eu-west-2": "",
65 | "eu-west-3": "",
66 |
67 | }
68 |
69 | DEFAULT_REGION = 'us-east-2'
70 | DCS = list(REGION_TO_AMI.keys())
71 |
72 | MATCH_KEYS = [
73 | 'image_architecture',
74 | 'image_owner_alias',
75 | 'image_owner_id',
76 | 'image_virtualization_type',
77 | ]
--------------------------------------------------------------------------------
/src/docker_honey/simple_commands/ssh.py:
--------------------------------------------------------------------------------
1 | from .util import *
2 | from .consts import *
3 | import os
4 | import paramiko
5 | import scp
6 | import io
7 | import time
8 |
9 | class Commands(object):
10 | LOGGER = get_stream_logger(__name__ + '.Commands')
11 |
12 | @classmethod
13 | def get_client(cls, host, retrys=5, fail_timeout=10.0, **kargs):
14 | client = paramiko.SSHClient()
15 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
16 | base_timeout = fail_timeout
17 | keys = ['username', 'key_filename', 'password']
18 | _kargs = {k:kargs.get(k) for k in keys if kargs.get(k, None)}
19 | username = kargs.get('username')
20 | # print(_kargs)
21 | success = False
22 | while retrys > 0:
23 | try:
24 | client.connect(host, **_kargs)
25 | success = True
26 | cls.LOGGER.info("Connected to {}@{}".format(username, host))
27 | break
28 | except:
29 | retrys += -1
30 | cls.LOGGER.info("Failed connection, sleeping for {} to {}@{}".format(fail_timeout, username, host))
31 | time.sleep(fail_timeout)
32 | fail_timeout = fail_timeout * 2
33 | client = paramiko.SSHClient()
34 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
35 | return client if success else None
36 |
37 | @classmethod
38 | def upload_file(cls, src, dst, host=None, port=22, key_filename=None, password=None, username=UBUNTU, client=None):
39 | return cls.upload_files({dst:src}, host=host, port=port, key_filename=key_filename, password=password, username=username, client=client)
40 |
41 | @classmethod
42 | def upload_files(cls, dsts_srcs, host=None, port=22, key_filename=None, password=None, username=UBUNTU, client=None):
43 | output = []
44 | client = cls.get_client(host=host, port=port, key_filename=key_filename, password=password, username=username, client=client)
45 | scp_client = scp.SCPClient(client.get_transport())
46 | cls.LOGGER.info("SCP uploading {} files".format(len(dsts_srcs)))
47 | for dst, src in dsts_srcs.items():
48 | cls.LOGGER.debug("SCP uploading {} --> {}".format(src, dst))
49 | scp_client.put(src, dst)
50 | return True
51 |
52 | @classmethod
53 | def upload_bytes(cls, src_buffer:bytes, dst, host=None, port=22, key_filename=None, password=None, username=UBUNTU):
54 | return cls.upload_multi_bytes({dst:src_buffer}, host=host, port=port, key_filename=key_filename, password=password, username=username)
55 |
56 | @classmethod
57 | def upload_multi_bytes(cls, dst_src_buffer:dict, host, port=22, key_filename=None, password=None, username=UBUNTU):
58 | output = []
59 | client = cls.get_client(host=host, port=port, key_filename=key_filename, password=password, username=username)
60 | scp_client = scp.SCPClient(client.get_transport())
61 | cls.LOGGER.info("SCP uploading {} files".format(len(dst_src_buffer)))
62 | for dst, src_buffer in dst_src_buffer.items():
63 | new_file = io.BytesIO(src_buffer)
64 | cls.LOGGER.debug("SCP uploading src_buffer --> {}".format(dst))
65 | scp_client.putfo(new_file, dst)
66 | return True
67 |
68 | @classmethod
69 | def execute_commands(cls, commands, client=None, host=None, port=22, key_filename=None, password=None, username=UBUNTU, debug=False, **cmd_kargs):
70 | if client is None and host:
71 | client = cls.get_client(host, port=port, key_filename=key_filename, password=password, username=username)
72 | elif client is None:
73 | raise Exception("paramiko.SSHClient or ssh parameters required")
74 |
75 | output = []
76 | cls.LOGGER.info("Executing {} commands".format(len(commands)))
77 | for cmd in commands:
78 | _cmd = cmd.format(**cmd_kargs)
79 | cls.LOGGER.debug("SSH executing '{}' on host".format(_cmd))
80 | _, stdout, stderr = client.exec_command(_cmd)
81 | results = {'command': cmd, 'stdout': None, 'stderr': None}
82 | results['stdout'] = stdout.read()
83 | results['stderr'] = stderr.read()
84 | if debug:
85 | cls.LOGGER.info("SSH execute results '{}' on host".format(results['stdout']))
86 | output.append(results)
87 | return results
88 |
89 | @classmethod
90 | def install_docker(cls, client=None, host=None, port=22, key_filename=None,
91 | password=None, username=UBUNTU, commands=DOCKER_SETUP_COMMANDS):
92 | output = cls.execute_commands(commands, client=client, host=host, port=port, key_filename=key_filename, password=password, username=username)
93 | client.close()
94 | return output
95 |
96 | @classmethod
97 | def sudo_copy_file(cls, src, dst, client=None, host=None, port=22,
98 | key_filename=None, password=None, username=UBUNTU):
99 | # src_dst
100 | src_dst = [{"src":src, 'dst': dst}]
101 | return cls.sudo_copy_files(src_dst, client=client, host=host, port=port, key_filename=key_filename, password=password, username=username)
102 |
103 |
104 | @classmethod
105 | def sudo_copy_files(cls, src_dst, client=None, host=None, port=22, key_filename=None, password=None, username=UBUNTU):
106 | commands = [SUDO_COPY_FILE.format(**i) for i in src_dst]
107 | return cls.exec_commands(client=client, host=host, port=port, key_filename=key_filename, password=password, username=username)
108 |
--------------------------------------------------------------------------------
/src/docker_honey/simple_commands/util.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import collections.abc
3 | import logging
4 | import os
5 | import tempfile
6 | import socket
7 | import string
8 | import json
9 | import hashlib
10 | import random
11 | from datetime import datetime
12 | import uuid
13 | import requests
14 | import netifaces
15 |
16 | from .consts import *
17 |
18 | get_server_date = lambda : datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT')
19 | get_docker_id = lambda : hashlib.sha256(get_server_date().encode('ascii')).hexdigest()
20 | get_random_data = lambda : random_string_generator()
21 | get_iso_time = lambda: datetime.now().isoformat()
22 |
23 | def create_token(iters=1):
24 | return "-".join([str(uuid.uuid4()) for i in range(0, iters)])
25 |
26 | def random_string_generator(str_size=25, allowed_chars=string.ascii_letters + string.punctuation):
27 | return ''.join(random.choice(allowed_chars) for x in range(str_size))
28 |
29 | def get_external_ip():
30 | ext_ip = ''
31 |
32 | gws = netifaces.gateways()
33 | dft = gws.get('default', {})
34 | g = sorted(dft.items(), key=lambda k: k[0])
35 | if len(g) > 0:
36 | ext_ip = g[0][1][0]
37 |
38 | try:
39 | ext_ip = requests.get(IPIFY_URL).json()['ip']
40 | except:
41 | pass
42 | return ext_ip
43 |
44 |
45 | def get_stream_logger(name, level=logging.DEBUG, lformat=STANDARD_FORMAT):
46 | # create logger
47 | logger = logging.getLogger(name)
48 | logger.setLevel(level)
49 |
50 | # create console handler and set level to debug
51 | ch = logging.StreamHandler()
52 | ch.setLevel(level)
53 |
54 | # create formatter
55 | formatter = logging.Formatter(lformat)
56 |
57 | # add formatter to ch
58 | ch.setFormatter(formatter)
59 | # add ch to logger
60 | logger.addHandler(ch)
61 | return logger
62 |
63 | def reset_logger_level(logger, level):
64 | logger.setLevel(level)
65 | for handler in logger.handlers:
66 | handler.setLevel(level)
67 | return logger
68 |
69 | def merge_secrets(base_file_json, secrets_json_file):
70 | base_dict = json.load(open(base_file_json))
71 | secrets_dict = json.load(open(secrets_json_file))
72 | return merge_dicts(base_dict, secrets_dict)
73 |
74 | # https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
75 | def merge_dicts(orig_dict, new_dict):
76 | for key, val in new_dict.items():
77 | if isinstance(val, collections.Mapping):
78 | tmp = merge_dicts(orig_dict.get(key, { }), val)
79 | orig_dict[key] = tmp
80 | elif isinstance(val, list):
81 | if orig_dict.get(key, None) is None:
82 | orig_dict[key] = []
83 | orig_dict[key] = (orig_dict.get(key, []) + val)
84 | else:
85 | orig_dict[key] = new_dict[key]
86 | return orig_dict
--------------------------------------------------------------------------------
/src/docker_honey/util.py:
--------------------------------------------------------------------------------
1 | __copyright__ = """
2 |
3 | Copyright 2020 Cisco Systems, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software
12 | distributed under the License is distributed on an "AS IS" BASIS,
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | See the License for the specific language governing permissions and
15 | limitations under the License.
16 |
17 | """
18 | __license__ = "Apache 2.0"
19 |
20 |
21 | import os
22 | import tempfile
23 | import socket
24 | import string
25 | import json
26 | import hashlib
27 | import random
28 | from datetime import datetime
29 | import uuid
30 | import requests
31 | import netifaces
32 | from validator_collection import validators, checkers
33 | from .consts import *
34 |
35 | get_server_date = lambda : datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT')
36 | get_docker_id = lambda : hashlib.sha256(get_server_date().encode('ascii')).hexdigest()
37 | get_random_data = lambda : random_string_generator()
38 | get_iso_time = lambda: datetime.now().isoformat()
39 |
40 | def create_token(iters=1):
41 | return "-".join([str(uuid.uuid4()) for i in range(0, iters)])
42 |
43 | def random_string_generator(str_size=25, allowed_chars=string.ascii_letters + string.punctuation):
44 | return ''.join(random.choice(allowed_chars) for x in range(str_size))
45 |
46 | def random_alphanum_string_generator(str_size=25, allowed_chars=string.ascii_letters + string.punctuation):
47 | return random_string_generator(str_size=str_size, allowed_chars=string.ascii_letters)
48 |
49 |
50 | def get_external_ip():
51 | ext_ip = ''
52 |
53 | gws = netifaces.gateways()
54 | dft = gws.get('default', {})
55 | g = sorted(dft.items(), key=lambda k: k[0])
56 | if len(g) > 0:
57 | ext_ip = g[0][1][0]
58 |
59 | try:
60 | ext_ip = requests.get("https://api.ipify.org/?format=json").json()['ip']
61 | except:
62 | pass
63 | return ext_ip
64 |
65 | def extract_json_data(req):
66 | data = None
67 | if isinstance(req, bytes):
68 | data_l = req.split(b'\r\n\r\n')
69 | if len(data_l) > 0:
70 | data = b"\r\n\r\n".join(data_l[1:])
71 | else:
72 | return None
73 |
74 | if isinstance(req, str):
75 | data_l = req.split('\r\n\r\n')
76 | if len(data_l) > 0:
77 | data = "\r\n\r\n".join(data_l[1:])
78 | else:
79 | return None
80 | try:
81 | if data:
82 | return json.loads(data)
83 | except:
84 | raise
85 |
86 | return None
87 |
88 | def get_match_group(rtype, req):
89 | if not rtype in IDENTIFY:
90 | return {}
91 | r = IDENTIFY[rtype].match(req)
92 | if r is None:
93 | return {}
94 | return r.groupdict()
95 |
96 | def generate_error(error_message='server error', api=API):
97 | fmt = RESPONSES.get(ERROR, ERROR_RETURN)
98 | ed = ERROR_DATA.copy()
99 | ed['message'] = error_message
100 | data = json.dumps(ed)
101 | size = len(data)
102 | kargs = {'api': api}
103 | kargs.update({
104 | 'docker_id': get_docker_id(),
105 | 'date': get_server_date(),
106 | 'size': size,
107 | 'iso_date': get_iso_time(),
108 | })
109 | resp = fmt.decode('ascii').format(**kargs)+data
110 | return resp
111 |
112 |
113 | def create_response(rtype, req):
114 | size = 0
115 | data = b''
116 | fmt = RESPONSES.get(rtype, GET_RETURN)
117 | kargs = get_match_group(rtype, req)
118 | kargs['api'] = API if not 'api' in kargs else kargs['api'].decode('ascii').lstrip('v')
119 |
120 | if INFO_RETURN == fmt:
121 | data = json.dumps(INFO_DATA).replace('{iso_date}', get_iso_time())
122 | size = len(data)
123 | elif WAIT_RETURN == fmt:
124 | data = json.dumps(WAIT_RETURN_DATA).replace('{random_string}', get_random_data())
125 | size = len(data)
126 | elif ERROR_RETURN == fmt:
127 | data = json.dumps(ERROR_DATA)
128 | size = len(data)
129 | elif GET_VERSION_RETURN == fmt:
130 | data = json.dumps(GET_VERSION_DATA).replace('{api}', kargs['api'])
131 | size = len(data)
132 |
133 | kargs.update({
134 | 'docker_id': get_docker_id(),
135 | 'date': get_server_date(),
136 | 'size': size,
137 | 'iso_date': get_iso_time(),
138 | })
139 | if isinstance(data, bytes):
140 | data = data.decode('ascii')
141 |
142 | resp = fmt.decode('ascii').format(**kargs)+data
143 | return resp
144 |
145 | def get_handler_type(data):
146 | rtype = UNKNOWN
147 | for name, re in IDENTIFY.items():
148 | if re.match(data):
149 | rtype = name
150 | break
151 | return rtype
152 |
153 | def get_docker_sock():
154 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
155 | sock.connect('/var/run/docker.sock')
156 | return sock
157 |
158 | def create_listener_sock(port):
159 | s = socket.socket()
160 | s.bind(('', port))
161 | s.listen(10)
162 | return s
163 |
164 |
165 | def recv_until_done(client):
166 | data = b''
167 | while True:
168 | new_data = b''
169 | try:
170 | new_data = client.recv(MAX_DATA)
171 | except:
172 | pass
173 | data = data + new_data
174 | if data.find(b'GET') == 0 and new_data.find(b'\r\n\r\n') > 3:
175 | return data
176 | elif data.find(b'GET') == 0 and new_data.find(b'\n\n') > 3:
177 | return data
178 | elif new_data == b'':
179 | break
180 | return data
181 |
182 |
183 | def create_certs(ca_name='server_ca', common_name:str=None, common_names:list=None,
184 | output_path="./ssl/"):
185 |
186 | common_names = common_names if common_names else []
187 |
188 | if common_name and common_name not in common_names:
189 | common_names.append(common_name)
190 |
191 | with tempfile.TemporaryDirectory() as tmpdirname:
192 | kargs = {
193 | "tmpdirname": os.path.join(tmpdirname, 'certstrap'),
194 | "bin_dir": os.path.join(tmpdirname, 'certstrap/bin'),
195 | "out_dir": output_path,
196 | "ca_name": ca_name,
197 | "ca_key_path": os.path.join(output_path, "{}.key".format(ca_name)),
198 | "ca_crl_path": os.path.join(output_path, "{}.crl".format(ca_name)),
199 | "ca_crt_path": os.path.join(output_path, "{}.crt".format(ca_name)),
200 | "certstrap_url": "https://github.com/square/certstrap/releases/download/v1.2.0/certstrap-1.2.0-linux-amd64",
201 | "certstrap_bin": "{}/certstrap".format(os.path.join(tmpdirname, 'certstrap/bin')),
202 | "output_path": output_path,
203 | }
204 | os.system("mkdir -p {output_path}".format(**kargs))
205 | os.system("mkdir -p {bin_dir}".format(**kargs))
206 | os.system("curl -fLs -o {certstrap_bin} {certstrap_url}".format(**kargs))
207 | os.system("chmod +x {certstrap_bin}".format(**kargs))
208 | os.system('{certstrap_bin} init --passphrase "" --common-name {ca_name} --expires "100 years"'.format(**kargs))
209 |
210 | os.system('cp ./out/{ca_name}.crt {ca_crt_path}'.format(**kargs))
211 | os.system('cp ./out/{ca_name}.crl {ca_crl_path}'.format(**kargs))
212 | os.system('cp ./out/{ca_name}.key {ca_key_path}'.format(**kargs))
213 | for common_name in common_names:
214 | kargs.update({
215 | "common_name": common_name,
216 | "cert_path": os.path.join(output_path, "{}-cert.pem".format(common_name)),
217 | "key_path": os.path.join(output_path, "{}-key.pem".format(common_name)),
218 | "combined_path": os.path.join(output_path, "{}.pem".format(common_name)),
219 | })
220 |
221 | os.system('{certstrap_bin} request-cert --passphrase "" --common-name {common_name}'.format(**kargs))
222 | os.system('{certstrap_bin} sign {common_name} --passphrase "" --CA {ca_name} --expires "100 years"'.format(**kargs))
223 | os.system('cp ./out/{common_name}.crt {cert_path}'.format(**kargs))
224 | os.system('cp ./out/{common_name}.key {key_path}'.format(**kargs))
225 | os.system('cat {key_path} {cert_path} > {combined_path}'.format(**kargs))
226 |
227 | os.system('rm -rf ./out/'.format(**kargs))
228 |
229 | def dict_or_none(data):
230 | try:
231 | i = json.loads(data)
232 | if isinstance(i, dict):
233 | return i
234 | elif isinstance(i, list) and all([len(j) == 2 and isinstance(j, list) for j in i]):
235 | return {str(j[0]): j[1] for j in i}
236 | elif isinstance(i, int) or isinstance(i, str):
237 | return {str(i): ''}
238 | except:
239 | return None
240 |
241 | def str_or_none(data):
242 | try:
243 | i = json.loads(data)
244 | j = str(i)
245 | if len(j) > 0:
246 | return data
247 | return None
248 | except:
249 | if len(data) > 0:
250 | return data
251 | return None
252 |
253 | def url_or_none(data):
254 | try:
255 | return validators.url(data, allow_special_ips = True)
256 | except:
257 | return None
258 |
259 |
--------------------------------------------------------------------------------