├── .env
├── .gitignore
├── .idea
├── dictionaries
│ └── root.xml
├── inspectionProfiles
│ └── Project_Default.xml
├── misc.xml
├── modules.xml
├── skiptracer.iml
├── vcs.xml
└── workspace.xml
├── Changelog
├── Dockerfile
├── LICENSE
├── MANIFEST.in
├── README.md
├── docker-compose.yml
├── docs
├── build-plugin.md
└── inprogress-readme.md
├── requirements.txt
├── setup.cfg
├── setup.py
├── skiptracer.py
├── src
└── skiptracer
│ ├── __init__.py
│ ├── __main__.py
│ ├── banner.py
│ ├── colors
│ └── default_colors.py
│ ├── datasaver.py
│ ├── menus
│ ├── default_menus.py
│ └── help_menu.py
│ ├── plugins
│ ├── __init__.py
│ ├── advance_background_checks
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── base.py
│ ├── fouroneone
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── haveibeenpwned
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── knowem
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── linkedin
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── myspace
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── namechk2
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── plate
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── proxygrabber.py
│ ├── tinder
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── true_people
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── truthfinder
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── twitter
│ │ ├── __init__.py
│ │ └── __main__.py
│ ├── who_call_id
│ │ ├── __init__.py
│ │ └── __main__.py
│ └── whoismind
│ │ ├── __init__.py
│ │ └── __main__.py
│ └── skiptracer.py
├── storage
├── freemail.db
└── user-agents.db
└── test
├── test_advanced_background_checks.rst
└── test_runner.py
/.env:
--------------------------------------------------------------------------------
1 | HAVEIBEENPWNED_API_KEY=some-api-key-here
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | plugins/__pycache__/*
4 | *.py[cod]
5 | *$py.class
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | .hypothesis/
49 | .pytest_cache/
50 |
51 | # Translations
52 | *.mo
53 | *.pot
54 |
55 | # Django stuff:
56 | *.log
57 | local_settings.py
58 | db.sqlite3
59 |
60 | # Flask stuff:
61 | instance/
62 | .webassets-cache
63 |
64 | # Scrapy stuff:
65 | .scrapy
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 |
107 | # .idea
108 | .idea
109 |
--------------------------------------------------------------------------------
/.idea/dictionaries/root.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | accountr
5 | advancedbackgroundchecks
6 | agerange
7 | apprage
8 | datakey
9 | datareq
10 | datasets
11 | datastring
12 | dictkey
13 | emailmenu
14 | encres
15 | encresdic
16 | fname
17 | funclist
18 | googleplus
19 | gselect
20 | haveibeenpwned
21 | helpmenu
22 | instagram
23 | intromenu
24 | jload
25 | keylist
26 | knowem
27 | linkedin
28 | lname
29 | moddict
30 | myspace
31 | namechk
32 | namemenu
33 | ncook
34 | paywall
35 | phonemenu
36 | platemenu
37 | printfun
38 | reddit
39 | screenname
40 | sitecookie
41 | snmenu
42 | whoismind
43 | xmod
44 | xservice
45 |
46 |
47 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/skiptracer.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 | true
81 | DEFINITION_ORDER
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 | 1530203147065
148 |
149 |
150 | 1530203147065
151 |
152 |
153 | 1530203503447
154 |
155 |
156 |
157 | 1530203503447
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 | AlchemyModelDependency
211 | #skiptracer
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
--------------------------------------------------------------------------------
/Changelog:
--------------------------------------------------------------------------------
1 | This file documents any additions or modifications to the project
2 | +---------+-----------------+--------------------------------------------------+
3 | | AUTHOR | DATE | DESCRIPTION |
4 | +---------+-----------------+--------------------------------------------------+
5 | WSC -: Oct. 31st, 2018 :- Fixed handling of selection menus and pausing between executed modules
6 | WSC -: Oct. 31st, 2018 :- Fixed ABC module for JSON parsing of returned datasets
7 | WSC -: Nov. 1st, 2018 :- Fixed Linkedin module to stow credential pairs for Authed Scanning
8 | * * * * | -- Module will produce a clear text file on disk (caution)
9 | WSC -: Nov. 5th, 2018 :- Added reporter.py to plugins directory for report generation
10 | WSC -: Nov. 5th, 2018 :- Added DOCX report generator to intro menu
11 | * * * * | -- Option 88 to execute module
12 | WSC -: Nov. 5th, 2018 :- Added Changelog, as requested by: 'Maxe'
13 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-slim
2 |
3 | MAINTAINER sietekk "sietekk@gmail.com"
4 |
5 | COPY requirements.txt /app/requirements.txt
6 |
7 | RUN pip3 install --no-cache-dir -r /app/requirements.txt
8 |
9 | COPY . /app
10 |
11 | WORKDIR /app/src
12 |
13 | FROM ubuntu:latest
14 | MAINTAINER Furkan SAYIM
15 |
16 | RUN apt-get update \
17 | && apt-get install git -y \
18 | && apt-get install python -y \
19 | && apt-get install python-pip -y \
20 | && git clone https://github.com/xillwillx/skiptracer.git
21 |
22 | RUN pip install -r skiptracer/requirements.txt
23 |
24 | CMD python skiptracer.py
25 |
26 | WORKDIR /skiptracer
27 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/MANIFEST.in
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Account moved to: https://gitlab.com/illwill
2 |
3 |
4 |
5 |
6 | ## No longer supported at this time.
7 |
8 |
9 | ## Python 3 migration in progress, please do not report issues yet, as the code and the sources need a bunch of upgrading.
10 |
11 |
12 |
13 | Project moved to https://gitlab.com/illwill/skiptracer
14 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 |
3 | services:
4 | skiptracer:
5 | build:
6 | context: .
7 | dockerfile: Dockerfile
8 | command: "python3 -m skiptracer"
9 |
--------------------------------------------------------------------------------
/docs/build-plugin.md:
--------------------------------------------------------------------------------
1 | # How to build a plugin guide
2 |
3 |
4 | The following document provides a guide to building a plugin.
5 |
6 |
7 | Skiptracer uses a plugin architecture.
8 |
9 | Plugins are added to the following dir:
10 |
11 | ```
12 | src/skiptracer/plugins/
13 | ```
14 |
15 | Plugins should be added in a folder under here with their plugin name and
16 | should contain a __main__.py and __init__.py
17 |
18 |
19 | The following demonstrates the basics of the __init__.py file:
20 |
21 | ```
22 |
23 | from ..base import PageGrabber
24 | from ...colors.default_colors import DefaultBodyColors as bc
25 | from .. import proxygrabber
26 |
27 |
28 |
29 |
30 | import builtins as bi
31 |
32 |
33 | class MyClassGrabber(PageGrabber):
34 | """ Give your class a name """
35 |
36 |
37 | def get_info(self, , category):
38 | """
39 | Each class needs this method.
40 | Pass in any params you need to process
41 | e.g. email, name etc.
42 | """
43 |
44 | return
45 |
46 | ```
47 |
48 |
49 |
50 | Once added, to register your plugin, add it to the setup.py under the
51 | skiptracer.plugins section under entry_points.
52 |
53 | For example:
54 |
55 | ```
56 | 'skiptracer.plugins': [
57 | 'myplugin = skiptracer.plugins.myplugin:MyClassGrabber',
58 | ...
59 | ]
60 | ```
61 |
62 |
63 | If your plugin requires parameters, please add these to the setup.cfg. For
64 | example:
65 |
66 | ```
67 | [plugin.myplugin]
68 | homepageurl = https://www.example.com
69 | loginurl = https://www.example.com/uas/login-submit
70 | logouturl = https://www.example.com/m/logout
71 | viewbyemail = https://example.com/sales/gmail/profile/viewByEmail/
72 | sessionkey = ""
73 | sessionpassword = ""
74 | ```
75 |
76 | These values can then be set either through the commandline or via the .env file.
77 |
78 | ## Plugin Menu Configuration
79 |
80 | As noted in the main readme, the menus in Skiptracer are configurable and handled by the setup.cfg file
81 | located in the package/source code.
82 |
83 | When adding a new plugin, you will need to update the setup.cfg to config
84 | which menus the plugin will be displayed under. A plugin can appear under 1:n
85 | menus.
86 |
87 | For example if your plugin supports both email and phone based scraping features
88 | then it can be added to both menus.
89 |
90 | To do this, simply edit the setup.cfg and add the plugin name, title and description to
91 | the menu you wish it to appear under, for example:
92 |
93 | ```
94 | [menu.email]
95 | myplugin = ["My Plugin","Check if user exposes information through some site"]
96 | ```
97 |
98 | It's that simple.
99 |
100 |
--------------------------------------------------------------------------------
/docs/inprogress-readme.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Python 3 migration in progress, please do not report issues yet, as the code and the sources need a bunch of upgrading.
4 |
5 |
6 | ## Skiptracer - OSINT scraping framework
7 |    [](https://microbadger.com/images/xshuden/skiptracer "Get your own image badge on microbadger.com") [](https://microbadger.com/images/xshuden/skiptracer "Get your own version badge on microbadger.com")
8 |
9 | 
10 |
11 | Initial attack vectors for recon usually involve utilizing pay-for-data/API (Recon-NG), or paying to utilize transforms (Maltego) to get data mining results. Skiptracer utilizes some basic python webscraping (BeautifulSoup) of PII paywall sites to compile passive information on a target on a ramen noodle budget.
12 |
13 |
14 | Background:
15 | -----------
16 | The following recording from DEFCON 26 Recon Village provides background on Skiptracer:
17 | [DEFCON 26 Recon Village Skiptracer Talk](https://www.youtube.com/watch?v=3mEOkwrxfsU)
18 |
19 |
20 | Installation
21 | -------------
22 | There are a number of ways to install/access Skiptracer. These are as follows.
23 |
24 | ### From source
25 |
26 | ```
27 | $ git clone https://github.com/xillwillx/skiptracer.git skiptracer
28 | ```
29 | __Install source__
30 | ```
31 | $ pip3 install -e skiptracer
32 | ```
33 | __Run__
34 | ```
35 | $ python3 -m skiptracer
36 | ```
37 |
38 | ### From requirements
39 |
40 | ```
41 | $ git clone https://github.com/xillwillx/skiptracer.git skiptracer
42 | cd skiptracer
43 | ```
44 |
45 | __Install requirements__
46 | ```
47 | $ pip3 install -r requirements.txt
48 | ```
49 |
50 | __Run__
51 | ```
52 | $ python3 -m skiptracer
53 | ```
54 |
55 |
56 | ### From Buscador
57 |
58 | Skiptracer is included in the Buscador OS.
59 |
60 | You can obtain a copy from the IntelTechniques website:
61 |
62 | [Buscador - Intel Techniques](https://inteltechniques.com/buscador/)
63 |
64 |
65 | ### Dockerize Environment
66 |
67 | You may run this application in a dockerized environment using the command:
68 |
69 | ```bash
70 | docker-compose run --rm skiptracer
71 | ```
72 |
73 | Note: the `--rm` flag will remove the container after execution.
74 |
75 | To use the DockerHub image run:
76 |
77 | ```
78 | $ docker run -it --name skiptracer xshuden/skiptracer
79 | OR
80 | $ docker run --rm -it --name skiptracer xshuden/skiptracer # container is deleted when you're done
81 | ```
82 |
83 |
84 | Usage
85 | ----
86 |
87 | Once Skiptarcer is launched, the menu system can be used to navigate between plugins and
88 | execute them, passing in parameters from the command line.
89 |
90 | Currently supported features include:
91 |
92 | * Phone
93 | * Email
94 | * Screen names
95 | * Real names
96 | * Addresses
97 | * IP
98 | * Hostname
99 | * Breach Credentials
100 |
101 | The plugin framework will allow contributors to submit new modules for different websites to help collect as much data as possible with minimal work. This makes Skiptracer your one-stop-shop to help you collect relevant information about a target to help expand your attack surface.
102 |
103 |
104 |
105 | Extending and configuring Skiptracer
106 | ------------------------------------
107 |
108 | The following section describes how to configure and extend
109 | Skiptracer's functionality using plugins and .cfg files.
110 |
111 | ### Plugins
112 |
113 |
114 | Skiptracer uses a plugin architecture.
115 |
116 | Plugins are added to the following dir:
117 |
118 | ```
119 | src/skiptracer/plugins/
120 | ```
121 |
122 | Plugins should be added in a folder under here with their plugin name and
123 | should contain a __main__.py and __init__.py
124 |
125 | Once added, to register them, please add them to the setup.py under the
126 | skiptracer.plugins section under entry_points.
127 |
128 | For example:
129 |
130 | ```
131 | 'skiptracer.plugins': [
132 | 'myplugin = skiptracer.plugins.myplugin:MyNewSiteGrabber',
133 | ...
134 | ]
135 | ```
136 |
137 |
138 | If your plugin requires parameters, please add these to the setup.cfg. For
139 | example:
140 |
141 | ```
142 | [plugin.myplugin]
143 | homepageurl = https://www.example.com
144 | loginurl = https://www.example.com/uas/login-submit
145 | logouturl = https://www.example.com/m/logout
146 | viewbyemail = https://example.com/sales/gmail/profile/viewByEmail/
147 | sessionkey = ""
148 | sessionpassword = ""
149 | ```
150 |
151 | These values can then be set either through the commandline or via the .env file.
152 |
153 | ### Plugin Menu Configuration
154 |
155 | The menus in Skiptracer are configurable and handled by the setup.cfg file
156 | located in the package/source code.
157 |
158 | When adding a new plugin, you will need to update the setup.cfg to config
159 | which menus the plugin will be displayed under. A plugin can appear under 1:n
160 | menus.
161 |
162 | For example if your plugin supports both email and phone based scraping features
163 | then it can be added to both menus.
164 |
165 | To do this, simply edit the setup.cfg and add the plugin name, title and description to
166 | the menu you wish it to appear under, for example:
167 |
168 | ```
169 | [menu.email]
170 | myplugin = ["My Plugin","Check if user exposes information through some site"]
171 | ```
172 |
173 | ### Tests
174 |
175 | Tests are handled via doc test. They can be run via:
176 |
177 | ```
178 | python3 -m doctest test_runner.py
179 | ```
180 |
181 |
182 | Full details on how to use Skiptracer are on the wiki located [here](https://github.com/xillwillx/skiptracer/wiki)
183 |
184 | To-Do
185 | ----
186 | Skiptracer is intedned to be a community driven application. If you are interested in helping out drop us a note.
187 |
188 | * Finish converting to Python3 - Py2 EoL is 1/1/20
189 | * Add more API support
190 | * More Options from other countries so not so U.S.-centric results
191 | * Bypass some of the methods being used to block scapers, i.e. headless selenium
192 | * Ideas?
193 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | appnope==0.1.0
2 | backcall==0.2.0
3 | beautifulsoup4==4.9.1
4 | bs4==0.0.1
5 | certifi==2020.6.20
6 | cfscrape==2.1.1
7 | chardet==3.0.4
8 | click==7.1.2
9 | decorator==4.4.2
10 | idna==2.10
11 | ipdb==0.13.3
12 | ipython==7.16.1
13 | ipython-genutils==0.2.0
14 | jedi==0.17.1
15 | lxml==4.5.1
16 | numpy==1.19.0
17 | parso==0.7.0
18 | pexpect==4.8.0
19 | pickleshare==0.7.5
20 | pprint==0.1
21 | prompt-toolkit==3.0.5
22 | ptyprocess==0.6.0
23 | Pygments==2.6.1
24 | python-dotenv==0.13.0
25 | requests==2.24.0
26 | selenium==3.141.0
27 | simplejson==3.17.0
28 | six==1.15.0
29 | -e git+git@github.com:xillwillx/skiptracer.git@167f357e309c23f41a733cb112afcdbb9346b9bd#egg=skiptracer
30 | soupsieve==2.0.1
31 | tqdm==4.47.0
32 | traitlets==4.3.3
33 | urllib3==1.25.9
34 | wcwidth==0.2.5
35 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # Sphinx configuration
2 | [build_sphinx]
3 | build_dir = build/doc
4 |
5 | [menu.email]
6 | linkedin = ["LinkedIn-Sales","Check if user exposes information through LinkedIn"]
7 | haveibeenpwned = ["HaveIBeenPwned", "Check email against known compromised networks"]
8 | myspace = ["Myspace", "Check if users account has a registered account"]
9 | whoismind = ["WhoisAmped", "Check email to registered domains"]
10 | advance_background_checks = ["AdvancedBackgroundChecks", "Run email through public page of paid access"]
11 |
12 | [menu.name]
13 | truthfinder = ["Truth Finder", "Run name through public page of paid access"]
14 | true_people = ["True People", "Run email through public page of paid access"]
15 | advance_background_checks = ["AdvancedBackgroundChecks", "Run name through public page of paid access"]
16 |
17 | [menu.phone]
18 | true_people = ["True People", "Reverse telephone trace on given number"]
19 | who_call_id = ["Who Called", "Reverse telephone trace on given number"]
20 | fouroneone_info = ["Four One One", "Reverse telephone trace on given number"]
21 | advance_background_checks = ["AdvancedBackgroundChecks", "Run name through public page of paid access"]
22 |
23 | [menu.screenname]
24 | twitter = ["Twitter", "Run screenname and grab tweets"]
25 | knowem = ["Knowem", "Run screenname through to determine registered sites"]
26 | namechk2 = ["NameChk", "Run screenname through to determine registered sites"]
27 | tinder = ["Tinder", "Run screenname and grab information if registered"]
28 |
29 | [menu.plate]
30 | plate = ["Plate Search", "Run known vehicle plates against a database"]
31 |
32 | # Plugin parameters
33 | # e.g. URLS
34 | [plugin.linkedin]
35 | homepageurl = https://www.linkedin.com
36 | loginurl = https://www.linkedin.com/uas/login-submit
37 | logouturl = https://www.linkedin.com/m/logout
38 | viewbyemail = https://linkedin.com/sales/gmail/profile/viewByEmail/
39 | sessionkey = ""
40 | sessionpassword = ""
41 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | from setuptools.command.install import install
3 | import io
4 |
5 |
6 | def read(*filenames, **kwargs):
7 | encoding = kwargs.get('encoding', 'utf-8')
8 | sep = kwargs.get('sep', '\n')
9 | buf = []
10 | for filename in filenames:
11 | with io.open(filename, encoding=encoding) as f:
12 | buf.append(f.read())
13 | return sep.join(buf)
14 |
15 |
16 | long_description = read('README.md')
17 |
18 | setup(
19 | name='skiptracer',
20 | version='3.0.0',
21 | description='OSINT python webscaping framework',
22 | long_description=long_description,
23 | maintainer='xillwillx',
24 | license='Apache 2.0',
25 | url='https://github.com/xillwillx/skiptracer',
26 | package_dir={'': 'src'},
27 | include_package_data=True,
28 | packages=find_packages('src'),
29 | plugins=[],
30 | hello=[],
31 | entry_points={
32 | 'console_script': [
33 | 'skiptracer = skiptracer.__main__:main'
34 | ],
35 | 'skiptracer.plugins': [
36 | 'fouroneone_info = skiptracer.plugins.fouroneone:FourOneOneGrabber',
37 | 'haveibeenpwned = skiptracer.plugins.haveibeenpwned:HaveIBeenPwwnedGrabber',
38 | 'knowem = skiptracer.plugins.knowem:KnowemGrabber',
39 | 'linkedin = skiptracer.plugins.linkedin:LinkedInSalesGrabber',
40 | 'myspace = skiptracer.plugins.myspace:MySpaceGrabber',
41 | 'namechk2 = skiptracer.plugins.namechk2:NameChkGrabber',
42 | 'plate = skiptracer.plugins.plate:VinGrabber',
43 | 'tinder = skiptracer.plugins.tinder:TinderGrabber',
44 | 'true_people = skiptracer.plugins.true_people:TruePeopleGrabber',
45 | 'truthfinder = skiptracer.plugins.truthfinder:TruthFinderGrabber',
46 | 'twitter = skiptracer.plugins.twitter:TwitterGrabber',
47 | 'who_call_id = skiptracer.plugins.who_call_id:WhoCallIdGrabber',
48 | 'whoismind = skiptracer.plugins.whoismind:WhoisMindGrabber',
49 | 'advance_background_checks = skiptracer.plugins.advance_background_checks:AdvanceBackgroundGrabber'
50 | ],
51 | 'skiptracer.menus': [
52 | 'default_menus = skiptracer.menus.default_menus:DefaultMenus'
53 | ],
54 | 'skiptracer.colors': [
55 | 'default_colors = skiptracer.colors.default_colors:DefaultBodyColors'
56 | ]
57 | },
58 | install_requires=[
59 | 'bs4',
60 | 'lxml',
61 | 'requests',
62 | 'ipdb',
63 | 'pprint',
64 | 'click',
65 | 'cfscrape',
66 | 'numpy',
67 | 'simplejson',
68 | 'tqdm',
69 | 'selenium',
70 | 'python-dotenv'
71 | ]
72 | )
73 |
--------------------------------------------------------------------------------
/skiptracer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #!/usr/bin/env python
3 | from __future__ import print_function
4 | from plugins.menus import menus
5 | from plugins.banner import Logo
6 |
7 | import sys
8 | import signal
9 | try:
10 | import __builtin__ as bi
11 | except BaseException:
12 | import builtins as bi
13 | import ast
14 | from plugins.colors import BodyColors as bc
15 |
16 |
17 | def signal_handler(signal, frame):
18 | print("")
19 | sys.exit(0)
20 |
21 |
22 | signal.signal(signal.SIGINT, signal_handler)
23 |
24 | bi.search_string = None
25 | bi.lookup = None
26 | bi.output = None
27 | bi.outdata = dict()
28 | bi.webproxy = None
29 | bi.proxy = None
30 | bi.debug = False
31 |
32 | Logo().banner()
33 |
34 |
35 | if __name__ == "__main__": # If true, run main function of framework
36 | try:
37 | if str(bi.output).lower() == "y":
38 | bi.filename = raw_input(
39 | "[Please provide the filename for output? (somefile.txt|somefile.json)]: ")
40 |
41 | def writeout():
42 | import json
43 | try:
44 | pg.write_file(json.dumps(bi.outdata), bi.filename)
45 | print((" [" +
46 | bc.CRED +
47 | "X" +
48 | bc.CEND +
49 | "] " +
50 | bc.CYLW +
51 | " Output written to disk: ./%s\n" +
52 | bc.CEND) %
53 | bi.filename)
54 | except Exception as nowriteJSON:
55 | if bi.debug:
56 | print((" [" +
57 | bc.CRED +
58 | "X" +
59 | bc.CEND +
60 | "] " +
61 | bc.CYLW +
62 | "Output failed to write to disk %s\n" +
63 | bc.CEND) %
64 | nowriteJSON)
65 | else:
66 | print(
67 | " [" +
68 | bc.CRED +
69 | "X" +
70 | bc.CEND +
71 | "] " +
72 | bc.CYLW +
73 | "Output failed to write to disk %s\n" +
74 | bc.CEND)
75 | menus().intromenu()
76 | except Exception as failedmenu:
77 | print("Failed menu: %s" % (failedmenu))
78 | pass
79 |
--------------------------------------------------------------------------------
/src/skiptracer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/__init__.py
--------------------------------------------------------------------------------
/src/skiptracer/__main__.py:
--------------------------------------------------------------------------------
1 | from .skiptracer import SkipTracer
2 | from .banner import Banner
3 |
4 |
5 | def main():
6 | """
7 | Start skip tracer
8 | """
9 | plugins = plugin_processor('skiptracer.plugins', 'all')
10 | banner = Banner()
11 | banner.banner()
12 | skiptracer = SkipTracer(plugins)
13 |
14 |
15 | def plugin_processor(cat, plugins):
16 | """
17 | Return a list of plugins
18 | to use
19 | """
20 | plugins_to_use = {}
21 | plugins_to_use[cat] = plugins.split(',')
22 | return plugins_to_use
23 |
24 |
25 | if __name__ == "__main__":
26 | main()
27 |
--------------------------------------------------------------------------------
/src/skiptracer/banner.py:
--------------------------------------------------------------------------------
1 | from .colors.default_colors import DefaultBodyColors as bc
2 |
3 |
4 | class Banner():
5 | """
6 | Display the SkipTracer banner
7 | using the BodyColors lib
8 | to set text color.
9 | """
10 |
11 | def banner(self):
12 | """
13 | Print the banner
14 | out to the screen
15 | """
16 | print("")
17 | print("\t\t.▄▄ · ▄ •▄ ▪ ▄▄▄·▄▄▄▄▄▄▄▄ ▄▄▄· ▄▄· ▄▄▄ .▄▄▄ ")
18 | print("\t\t▐█ ▀. █▌▄▌▪██ ▐█ ▄█•██ ▀▄ █·▐█ ▀█ ▐█ ▌▪▀▄.▀·▀▄ █·")
19 | print("\t\t▄▀▀▀█▄▐▀▀▄·▐█· ██▀· ▐█.▪▐▀▀▄ ▄█▀▀█ ██ ▄▄▐▀▀▪▄▐▀▀▄ ")
20 | print("\t\t▐█▄▪▐█▐█.█▌▐█▌▐█▪·• ▐█▌·▐█•█▌▐█ ▪▐▌▐███▌▐█▄▄▌▐█•█▌")
21 | print(
22 | ("\t\t {},.-~*´¨¯¨`*·~-.¸{}-({}by{})-{},.-~*´¨¯¨`*·~-.¸{} \n").format(
23 | bc.CRED,
24 | bc.CYLW,
25 | bc.CCYN,
26 | bc.CYLW,
27 | bc.CRED,
28 | bc.CEND))
29 | print(
30 | ("\t\t\t {}▀ █ █ █▀▄▀█ {}█▀▀█ {}█▀▀▄ {}").format(
31 | bc.CBLU,
32 | bc.CRED,
33 | bc.CBLU,
34 | bc.CEND))
35 | print(
36 | ("\t\t\t {}█ █ █ █ ▀ █ {}█ █ {}█▀▀▄{}").format(
37 | bc.CBLU,
38 | bc.CRED,
39 | bc.CBLU,
40 | bc.CEND))
41 | print(
42 | ("\t\t\t {}▀ ▀ ▀ ▀ ▀ {}▀▀▀▀ {}▀▀▀ {}").format(
43 | bc.CBLU,
44 | bc.CRED,
45 | bc.CBLU,
46 | bc.CEND))
47 | print(("\t\t\t {} https://illmob.org {}\n").format(bc.CYLW, bc.CEND))
48 |
--------------------------------------------------------------------------------
/src/skiptracer/colors/default_colors.py:
--------------------------------------------------------------------------------
1 | class DefaultBodyColors: # Sets colorization for application use
2 | def __init__(self): # Initialize the values for color
3 | pass
4 | CCYN = '\033[96m'
5 | CRED = '\033[91m'
6 | CGRN = '\033[92m'
7 | CYLW = '\033[93m'
8 | CBLU = '\033[94m'
9 | CPRP = '\033[95m'
10 | CEND = '\033[0m'
11 | CFON = '\33[5m'
12 |
--------------------------------------------------------------------------------
/src/skiptracer/datasaver.py:
--------------------------------------------------------------------------------
1 | try:
2 | import __builtin__ as bi
3 | except BaseException:
4 | import builtins as bi
5 |
6 |
7 | class DataSaver()
8 |
9 | bi.outdata = dict()
10 | bi.output = ''
11 |
12 | def __init__():
13 | bi.webproxy = input("[Do we wish to enable proxy support? (Y/n)]: ")
14 | bi.output = input(
15 | "[Do we wish to save returned data to disk? (Y/n)]: ")
16 | if str(bi.output).lower() == "y":
17 | bi.filename = input(
18 | "[Please provide the filename for output? (somefile.txt|somefile.json)]: ")
19 | self.writeout()
20 |
21 | def writeout(self):
22 | """
23 | Display output text
24 | """
25 | try:
26 | pg.write_file(json.dumps(bi.outdata), bi.filename)
27 | print((" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
28 | " Output written to disk: ./%s\n" + bc.CEND) % bi.filename)
29 | except Exception as nowriteJSON:
30 | if bi.debug:
31 | print((" [" +
32 | bc.CRED +
33 | "X" +
34 | bc.CEND +
35 | "] " +
36 | bc.CYLW +
37 | "Output failed to write to disk %s\n" +
38 | bc.CEND) %
39 | nowriteJSON)
40 | else:
41 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
42 | "Output failed to write to disk %s\n" + bc.CEND)
43 |
--------------------------------------------------------------------------------
/src/skiptracer/menus/default_menus.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import print_function
3 | from pkg_resources import get_distribution
4 | from .help_menu import HelpMenu
5 |
6 | import sys
7 | import configparser
8 | import pkg_resources
9 | import ast
10 | import builtins as bi
11 |
12 |
13 | class DefaultMenus():
14 |
15 | plugin_list = {}
16 | config = []
17 | emodules = []
18 | nmodules = []
19 | pmodules = []
20 | snmodules = []
21 | plmodules = []
22 | search_string = ''
23 |
24 | default_items = [
25 | {'key': 'all', 'text': 'All - Run all modules associated with this group'},
26 | {'key': 'back', 'text': 'Back - Return to main menu'},
27 | {'key': 'exit', 'text': 'Exit - Terminate the application'}
28 | ]
29 |
30 | ltypes = [
31 | {'key': 'proxy', 'text': 'Proxy - Set a request proxy'},
32 | {'key': 'email', 'text': 'Email - Search targets by email address'},
33 | {'key': 'name', 'text': 'Name - Search targets by First Last name combination'},
34 | {'key': 'phone', 'text': 'Phone - Search targets by telephone number'},
35 | {'key': 'screen', 'text': 'Screen Name - Search targets by known alias'},
36 | {'key': 'license', 'text': 'License Plate - Search targets by license plate'},
37 | {'key': 'profiler',
38 | 'text': 'Profiler - A "Guess Who" Q&A interactive user interface'},
39 | {'key': 'help', 'text': 'Help - Details the application and use cases'},
40 | {'key': 'exit', 'text': 'Exit - Terminate the application'}
41 | ]
42 |
43 | def __init__(self, plugins):
44 | """
45 | Get a list of plugins
46 | """
47 | self.plugin_list = plugins
48 | self.config = configparser.ConfigParser()
49 | get_plugin_cats = pkg_resources.resource_filename(
50 | 'skiptracer', '../../setup.cfg')
51 | self.config.read(get_plugin_cats)
52 |
53 | def useproxy(self):
54 | """
55 | Generate a new proxy
56 | for masking requests
57 | """
58 | if str(bi.webproxy).lower() == "y":
59 | bi.proxy = pg.new_proxy()
60 | return True
61 | else:
62 | return False
63 |
64 | def helpmenu(self):
65 | """
66 | Display help text
67 | to user
68 | """
69 | HelpMenu()
70 |
71 | def intromenu(self):
72 | """
73 | Top level intro menu
74 | """
75 | self.search_string = ""
76 | bi.lookup = ''
77 | if self.useproxy():
78 | print("\t [" + bc.CRED + "::ATTENTION::" + bc.CEND + "]" +
79 | bc.CYLW + " Proxied requests are unreliable " + bc.CEND +
80 | "[" + bc.CRED + "::ATTENTION::" + bc.CEND + "]")
81 |
82 | gselect = ""
83 | for i, v in enumerate(self.ltypes):
84 | print('[' + str(i + 1) + '] -' + self.ltypes[i]['text'])
85 |
86 | try:
87 | selection = int(input("[!] Lookup menu - Please select a number:"))
88 | gselect = self.ltypes[selection - 1]['key']
89 | except Exception as failselect:
90 | print("Please use an integer value for your selection!")
91 |
92 | if gselect == "":
93 | self.intromenu()
94 | if gselect == "exit":
95 | sys.exit()
96 | if gselect == "proxy":
97 | self.proxymenu()
98 | if gselect == "email":
99 | self.emailmenu()
100 | if gselect == "name":
101 | self.namemenu()
102 | if gselect == "phone":
103 | self.phonemenu()
104 | if gselect == "screen":
105 | self.snmenu()
106 | if gselect == "license":
107 | self.platemenu()
108 | if gselect == "profiler":
109 | self.profiler()
110 | if gselect == "help":
111 | self.helpmenu()
112 |
113 | def grabplugins(self, plugin_type, plugin_list):
114 | """
115 | Grab a list of relevant plugins.
116 | plugin_type = ref to variable to store list of plugin modules
117 | plugin_list = the list from the setup.cfg to use
118 | """
119 | for i in plugin_list:
120 | tc = ast.literal_eval(plugin_list[i])
121 | plugin_type.append({'key': i, 'text': tc[0] + " - " + tc[1]})
122 |
123 | return plugin_type + self.default_items
124 |
125 | def grabuserchoice(self, plugin_type, textsub):
126 | """
127 | Function to grab user choice.
128 | plugin_type = var with list of plugin modules
129 | textsub = String to display in menu e.g. Email, Name
130 | """
131 | gselect = ""
132 |
133 | print(" [!] " + textsub + " search menu - Please select a number")
134 |
135 | for i, v in enumerate(plugin_type):
136 | print(' [' + str(i + 1) + '] -' + plugin_type[i]['text'])
137 |
138 | try:
139 | selection = int(input(" [!] Select a number to continue: "))
140 | gselect = plugin_type[selection - 1]['key']
141 | except Exception as failselect:
142 | print("Please use an integer value for your selection!")
143 |
144 | return gselect
145 |
146 | def selectchoice(self, menu, mtype, error, plugins, gselect):
147 | """
148 | Select a menu item and then
149 | action it.
150 | """
151 | print ("mtype is: "+str(mtype))
152 | if gselect == "":
153 | menu()
154 | if gselect == "exit":
155 | sys.exit()
156 | if gselect == "back":
157 | self.intromenu()
158 |
159 | if not self.search_string or self.search_string == '':
160 | self.search_string = input(error)
161 |
162 | print()
163 | self.useproxy()
164 | if gselect != "all":
165 | self.plugin_list[gselect]().get_info(self.search_string, mtype)
166 |
167 | if gselect == "all":
168 | for i in plugins:
169 | self.plugin_list[i]().get_info(self.search_string, mtype)
170 | menu()
171 |
172 | def proxymenu(self):
173 | """
174 | Set a proxy for requests
175 | """
176 |
177 | print("Proxy development in progress")
178 |
179 | def emailmenu(self):
180 | """
181 | Display the email modules to the
182 | user.
183 | """
184 |
185 | self.emodules = []
186 | self.emodules = self.grabplugins(
187 | self.emodules, self.config['menu.email'])
188 | gselect = self.grabuserchoice(self.emodules, "E-Mail")
189 |
190 | self.selectchoice(
191 | self.emailmenu,
192 | "email",
193 | "[What is the marks email address? - ex: username@domain.tld]:",
194 | self.config['menu.email'],
195 | gselect
196 | )
197 |
198 | def namemenu(self):
199 | """
200 | Print menu for
201 | name matching plugins
202 | """
203 | self.nmodules = []
204 | self.nmodules = self.grabplugins(
205 | self.nmodules, self.config['menu.name'])
206 | gselect = self.grabuserchoice(self.nmodules, "Name")
207 |
208 | self.selectchoice(
209 | self.namemenu,
210 | "name",
211 | "[What is the marks name? - ex: First Lastname]: ",
212 | self.config['menu.name'],
213 | gselect
214 | )
215 |
216 | def phonemenu(self):
217 | """
218 | Display the phone
219 | menu to the user.
220 | """
221 | self.pmodules = []
222 | self.pmodules = self.grabplugins(
223 | self.pmodules, self.config['menu.phone'])
224 | gselect = self.grabuserchoice(self.pmodules, "Phone")
225 | self.selectchoice(
226 | self.phonemenu,
227 | "phone",
228 | "[What is the marks phone number? - ex: 1234567890]: ",
229 | self.config['menu.phone'],
230 | gselect
231 | )
232 |
233 | def snmenu(self):
234 | """
235 | Screen Name grabbing tools menu
236 | """
237 | self.snmodules = self.grabplugins(
238 | self.snmodules, self.config['menu.screenname'])
239 | gselect = self.grabuserchoice(self.snmodules, "Screen Name")
240 | self.selectchoice(
241 | self.snmenu,
242 | "screenname",
243 | "[What is the marks screenname? - ex: (Ac1dBurn|Zer0Cool)]: ",
244 | self.config['menu.screenname'],
245 | gselect
246 | )
247 |
248 | def platemenu(self):
249 | """
250 | Enter a plate number
251 | """
252 | self.plmodules = self.grabplugins(
253 | self.plmodules, self.config['menu.plate'])
254 | gselect = self.grabuserchoice(self.plmodules, "Plate Number")
255 |
256 | self.selectchoice(
257 | self.platemenu,
258 | "plate",
259 | "[What is the marks vehicle plate number? - ex: (XYZ123|0U812)]: ",
260 | self.config['menu.plate'],
261 | gselect
262 | )
263 |
264 | def profiler(self):
265 | """
266 | Profiler output - guess who interactive interface
267 | """
268 | fname = input("\t[Whats the users first name? - ex: Alice]: ")
269 | lname = input("\t[Whats the users last name? - ex: Smith]: ")
270 | bi.name = fname + " " + lname
271 | bi.agerange = input("\t[Whats the marks age range? - ex: 18-100]: ")
272 | bi.apprage = input("\t[Whats the marks suspected age? - ex: 18]: ")
273 | bi.state = input(
274 | "\t[Whats state does the mark live in? - ex: (FL|Florida)]: ")
275 | bi.city = input(
276 | "\t[Whats city does the mark live in? - ex: Orlando]: ")
277 | bi.zip = input(
278 | "\t[Whats the zipcode the mark lives in? - ex: 12345]: ")
279 | bi.phone = input(
280 | "\t[What is a known phone number for the mark? - ex: 1234567890]: ")
281 | bi.screenname = input(
282 | "\t[What are the known aliasis of the mark? - ex: (Ac1dBurn|Zer0cool)]: ")
283 | bi.plate = input(
284 | "\t[Does the mark have a known license plate? - ex: (ABC1234|XYZ123)]: ")
285 | bi.email = input(
286 | "\t[What is the marks email address? - ex: username@domain.tld]: ")
287 |
288 | self.intromenu()
289 |
--------------------------------------------------------------------------------
/src/skiptracer/menus/help_menu.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #!/usr/bin/env python
3 | from __future__ import print_function
4 | from pkg_resources import get_distribution
5 |
6 | import sys
7 | import configparser
8 | import pkg_resources
9 | import ast
10 |
11 | try:
12 | import __builtin__ as bi
13 | except BaseException:
14 | import builtins as bi
15 |
16 |
17 | class HelpMenu():
18 | """
19 | Default help menu
20 | and text.
21 | """
22 |
23 | def __init__(self):
24 | self.display_help()
25 |
26 | def display_help(self):
27 | """
28 | Help text
29 | """
30 |
31 | print("\t[INFORMATION]::")
32 | print("""
33 | This application is designed to query and parse 3rd party services in an automated fashion,
34 | to increase productivity, while conducting a background investigation. This application
35 | can be useful when trying to find hard to allocate targets. The following represent the types
36 | of searches that can be performed.
37 | \tEmail: 'Investigate with known email address'
38 | \tName: 'Investigate with knwon First/Last name'
39 | \tPhone: 'Investigate with known Phone Number'
40 | \tScreenName: 'Investigate with known Screen Name'
41 | \tPlate: 'Investigate with known License Plate'
42 | Each of these catagories offers different modules that request 3rd party sites after the information
43 | has been submitted by the user. for example the application may request a target email address.
44 | Using these classifiers, can reveal additional information that can be utiized within the application.
45 | These classifiers may reveal telephone, physicall address, or other useful data.
46 | All modules included in the classifier may be run with the 'ALL' qualifier or individually. Additionally,
47 | users can choose to reset the query string and continue using the same interface without having to restart
48 | the application.
49 | The following section will detail specifics about the modules offered for each classifier.
50 | :: EMAIL ::
51 | Requires a user to supply a fully qualified Email address:
52 | -: Format: username@domain.tld
53 | This class of searches include the following modules:
54 | -: LinkedIn - Check if user exposes information through LinkedIn
55 | -: HaveIBeenPwned - Check email against known compromised networks
56 | -: Myspace - Check if users account has a registered account
57 | -: AdvancedBackgroundChecks - Run email through public page of paid access
58 | :: NAME ::
59 | Requires a user to supply a First and Last name:
60 | -: Format: Alice Smith
61 | This class of searches include the following modules:
62 | -: Truth Finder - Check if a targets name using Truth Finder
63 | -: True People - Check a targets name using True People
64 | -: AdvancedBackgroundChecks - Checks targets name through ABC
65 | :: PHONE ::
66 | Requires a user to supply a US based telephone number
67 | -: Format: 123 456 7890
68 | This class of searches include the following modules
69 | -: True People - Check if targets phone number using True People
70 | -: WhoCalled - WhoCalled reverse lookup of telephone number
71 | -: 411 - Reverse telephone lookup from 411 of telephone number
72 | -: AdvancedBackgroundChecks - Checks targets phone number through ABC
73 | :: SCNAME ::
74 | Requires a user to supply a known screenname:
75 | -: Format: crazy8s
76 | This class of searches icludes the following modules:
77 | -: Knowem - Checks screen name against numerous sites for registered account
78 | -: NameChk - Checks screen name against numerous sites for registered account
79 | -: Tinder - Checks if screen name against Tinder known users
80 | :: PLATE ::
81 | Requires user to supply a known plate
82 | -: Format: 123456
83 | This class of searche include the following modules:
84 | -: Plate Search - Runs known plates through nationwide Database
85 | """)
86 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/__init__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/advance_background_checks/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from __future__ import absolute_import
3 |
4 | from ..base import PageGrabber
5 | from ...colors.default_colors import DefaultBodyColors as bc
6 | from .. import proxygrabber
7 | from time import sleep
8 |
9 | import re
10 | import logging
11 | import json
12 | import base64 as b64
13 | import sys
14 |
15 | try:
16 | import __builtin__ as bi
17 | except BaseException:
18 | import builtins as bi
19 |
20 |
21 | class AdvanceBackgroundGrabber(PageGrabber):
22 | """
23 | Grab data from Advanced Background
24 | site
25 | """
26 | url = ""
27 |
28 | def __init__(self):
29 | """
30 | Load up AdvanceBackgroundGrabber plugin configs
31 | """
32 | super(AdvanceBackgroundGrabber, self).__init__()
33 |
34 | def get_info(self, lookup, information):
35 | """
36 | Uniform call for framework to launch function in a way to single out the
37 | calls per URL
38 | """
39 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
40 | bc.CCYN + "AdvanceBackgroundChecks" + bc.CEND)
41 |
42 | self.abc_try(lookup, information)
43 |
44 | def check_for_captcha(self):
45 | """
46 | Check for CAPTCHA, if proxy enabled,try new proxy w/ request, else
47 | report to STDOUT about CAPTCHA
48 | """
49 | captcha = self.soup.find('div', attrs={'class': 'g-recaptcha'})
50 |
51 | if bi.webproxy and captcha is not None:
52 | try:
53 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
54 | "Switching proxy, trying again...\n" + bc.CEND)
55 | bi.proxy = proxygrabber.new_proxy()
56 | self.abc_try(lookup, information)
57 | return True
58 | except Exception as badproxy:
59 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
60 | "Bad proxy...\n" + bc.CEND)
61 | pass
62 | if captcha is not None:
63 | print(
64 | " [" +
65 | bc.CRED +
66 | "X" +
67 | bc.CEND +
68 | "] " +
69 | bc.CYLW +
70 | "Captch detected, use a proxy or complete challenge in browser\n" +
71 | bc.CEND)
72 | return True
73 | else:
74 | return False
75 |
76 | def makephone(self, information):
77 | """
78 | Format the phone number splitting on
79 | whitespace or hyphens
80 | """
81 | try:
82 | if str(information).split("-")[1]:
83 | dashphone = information
84 | return dashphone
85 | except BaseException:
86 | pass
87 |
88 | try:
89 | if str(information).split(" ")[1]:
90 | dashphone = '{}-{}-{}'.format(
91 | information[0:3], information[5:8], information[9:])
92 | return dashphone
93 | except BaseException:
94 | pass
95 |
96 | try:
97 | # If len of data is 10 and is an integer, break and format
98 | # as needed for URL
99 | if len(information) == 10:
100 | dashphone = '{}-{}-{}'.format(
101 | information[0:3], information[3:6], information[6:])
102 | return dashphone
103 | if len(information) != 10:
104 | print(
105 | " [" +
106 | bc.CRED +
107 | "X" +
108 | bc.CEND +
109 | "] " +
110 | bc.CYLW +
111 | "Check search string, should be 10 digits.\n" +
112 | bc.CEND)
113 | return
114 | except BaseException:
115 | return
116 |
117 | def grab_phone(self, information):
118 | """
119 | Create phone number format
120 | """
121 | try:
122 | self.num = self.makephone(information)
123 | if self.num is None:
124 | return
125 | self.url = "https://www.advancedbackgroundchecks.com/{}".format(
126 | self.num)
127 | except Exception as e:
128 | print(
129 | " [" +
130 | bc.CRED +
131 | "X" +
132 | bc.CEND +
133 | "] " +
134 | bc.CYLW +
135 | "Could not produce required URL.\n" +
136 | bc.CEND)
137 | return
138 |
139 | def grab_email(self, information):
140 | """
141 | Grab the targets email
142 | """
143 |
144 | if str(information).split('@')[1]:
145 | email = str(
146 | b64.b64encode(
147 | information.encode('utf-8'))).split("b'")[1]
148 | email = email.split("'")[0]
149 | self.url = "https://www.advancedbackgroundchecks.com/emails/" + email
150 |
151 | def grab_name(self, information):
152 | """
153 | Grab the targets Name
154 | """
155 | if str(information).split(' ')[1]:
156 | self.url = "https://www.advancedbackgroundchecks.com/name/{}".format(
157 | str(information).replace(' ', '-'))
158 |
159 | def find_results(self, lookup):
160 | """
161 | Check if the search found any results.
162 | If so return them, otherwise return an
163 | empty string
164 | """
165 | script_html = ""
166 |
167 | if self.soup.find(
168 | 'div', {'id': 'no-result-widgets'}): # Report if there are no results to STDOUT
169 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
170 | bc.CYLW + "No results were found.\n" + bc.CEND)
171 | return script_html
172 |
173 | checkres = self.soup.findAll("h1")
174 |
175 | if lookup == "phone":
176 | for xcheck in checkres:
177 | if xcheck.text in [
178 | "We could not find any results based on your search criteria. Please review your search and try again, or try our sponsors for more information.", "Top Results for " + str(self.num)]:
179 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
180 | bc.CYLW + "No results were found.\n" + bc.CEND)
181 | return script_html
182 |
183 | script_html = self.soup.find_all(
184 | 'script', type="application/ld+json") # Scrape for JSON within DOM
185 |
186 | return script_html
187 |
188 | def grab_json_data(self, script_html):
189 | """
190 | Grab the JSON data and load it
191 | """
192 | script_html = script_html.get_text().strip() # Format data for JSON load
193 | script_html = script_html.replace("\n", "")
194 | script_html = script_html.replace("\t", "")
195 | person_list = json.loads(script_html) # Loads data as JSON
196 | return person_list
197 |
198 | def get_person_list(person_list):
199 | """
200 | Iterate through person list
201 | and grab results
202 | """
203 | for person in person_list:
204 | addrfirst = 0
205 | pnext = 0
206 | if pnext >= 1:
207 | print(" [" + bc.CGRN + "!" + bc.CEND + "] " +
208 | bc.CRED + "Next finding: " + bc.CEND)
209 |
210 | self.url2 = person['@id'] # set additional 2nd level URL
211 | self.source2 = self.get_source(self.url2) # request 2nd level url
212 | self.soup2 = self.get_dom(self.source2) # grab 2nd level DOM
213 | script_html2 = self.soup2.find_all(
214 | 'script', type="application/ld+json") # Scrape for JSON within DOM
215 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " + bc.CRED +
216 | "Name: " + bc.CEND + str(person.get("name")))
217 |
218 | if person.get("birthDate"): # Set DoB
219 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " + bc.CRED +
220 | "D.o.B: " + bc.CEND + str(person.get("birthDate")))
221 |
222 | if person.get("additionalName"): # Set additional names AKA
223 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
224 | bc.CRED + "Alias: " + bc.CEND)
225 |
226 | for xaka in person.get(
227 | "additionalName"): # For each AKA, select the name
228 | print(
229 | " [" +
230 | bc.CGRN +
231 | "=" +
232 | bc.CEND +
233 | "] " +
234 | bc.CRED +
235 | "AKA: " +
236 | bc.CEND +
237 | str(xaka))
238 |
239 | if len(script_html2) <= 1:
240 | print(
241 | " [" +
242 | bc.CRED +
243 | "X" +
244 | bc.CEND +
245 | "] " +
246 | bc.CYLW +
247 | "Unable to re-try request... Try again later...\n" +
248 | bc.CEND)
249 | return
250 | else:
251 | script_html2 = script_html2[1]
252 | script_html2 = script_html2.get_text().strip() # Format data for JSON load
253 | script_html2 = script_html2.replace("\n", "")
254 | script_html2 = script_html2.replace("\t", "")
255 | person_list2 = json.loads(script_html2) # Loads dat
256 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
257 | bc.CRED + "Phone: " + bc.CEND)
258 |
259 | for tele in person_list2['telephone']:
260 | print(
261 | " [" +
262 | bc.CGRN +
263 | "=" +
264 | bc.CEND +
265 | "] " +
266 | bc.CRED +
267 | "#: " +
268 | bc.CEND +
269 | str(tele))
270 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
271 | bc.CRED + "Email: " + bc.CEND)
272 |
273 | for email in person_list2['email']:
274 | print(" [" + bc.CGRN + "=" + bc.CEND + "] " +
275 | bc.CRED + "Addr: " + bc.CEND + str(email))
276 |
277 | if person.get("address"): # Set Addresses
278 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
279 | bc.CRED + "Addresses.: " + bc.CEND)
280 | for addy in person.get(
281 | "address"): # For each address, select the information and store
282 | addrfirst += 1
283 | if addrfirst == 1:
284 | print(
285 | " [" +
286 | bc.CGRN +
287 | "=" +
288 | bc.CEND +
289 | "] " +
290 | bc.CRED +
291 | "Current Address: " +
292 | bc.CEND)
293 | else:
294 | print(
295 | " [" +
296 | bc.CGRN +
297 | "=" +
298 | bc.CEND +
299 | "] " +
300 | bc.CRED +
301 | "Prev. Address: " +
302 | bc.CEND)
303 | print(" [" +
304 | bc.CGRN +
305 | "-" +
306 | bc.CEND +
307 | "] " +
308 | bc.CRED +
309 | "Street: " +
310 | bc.CEND +
311 | str(addy.get("streetAddress")))
312 | print(" [" +
313 | bc.CGRN +
314 | "-" +
315 | bc.CEND +
316 | "] " +
317 | bc.CRED +
318 | "City: " +
319 | bc.CEND +
320 | str(addy.get("addressLocality")))
321 | print(" [" +
322 | bc.CGRN +
323 | "-" +
324 | bc.CEND +
325 | "] " +
326 | bc.CRED +
327 | "State: " +
328 | bc.CEND +
329 | str(addy.get("addressRegion")))
330 | print(" [" +
331 | bc.CGRN +
332 | "-" +
333 | bc.CEND +
334 | "] " +
335 | bc.CRED +
336 | "ZipCode: " +
337 | bc.CEND +
338 | str(addy.get("postalCode")))
339 | address_list.append({"city": addy.get("addressLocality"),
340 | "state": addy.get("addressRegion"),
341 | "zip_code": addy.get("postalCode"),
342 | "address": addy.get("streetAddress")})
343 |
344 | if person.get("relatedTo"): # Set Relatives
345 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
346 | bc.CRED + "Related: " + bc.CEND)
347 | for xrelate in [item.get("name") for item in person.get(
348 | "relatedTo")]: # For each relative, select the information and store
349 | print(
350 | " [" +
351 | bc.CGRN +
352 | "=" +
353 | bc.CEND +
354 | "] " +
355 | bc.CRED +
356 | "Known Relative: " +
357 | bc.CEND +
358 | str(xrelate))
359 | self.info_list.append({"name": person.get("name"),
360 | "birth_date": person.get("birthDate"),
361 | "additional_names": person.get("additionalName"),
362 | "telephone": person_list2['telephone'],
363 | "email": person_list2['email'],
364 | "address_list": address_list,
365 | "related_to": [item.get("name") for item in person.get("relatedTo")]})
366 | pnext += 1
367 |
368 | def abc_try(self, information, lookup):
369 | """
370 | Determins different URL constructs based on user supplied data
371 | """
372 |
373 | address_list = []
374 | if lookup == "phone":
375 | self.grab_phone(information)
376 |
377 | if lookup == "email": # Make the URL for email lookup, set email True
378 | self.grab_email(information)
379 |
380 | if lookup == "name": # Make the URL for name lookup, set email to False
381 | self.grab_name(information)
382 |
383 | self.source = self.get_source(self.url)
384 | self.soup = self.get_dom(self.source)
385 |
386 | if self.check_for_captcha() == True:
387 |
388 | print((" [" + bc.CRED + "X" + bc.CEND + "] " +
389 | bc.CYLW + "Goto: {}" + bc.CEND).format(self.url)
390 | )
391 |
392 | self.iscomplete = input(
393 | " [" + bc.CRED + "!" + bc.CEND + "] " + bc.CYLW +
394 | "Have you completed the CAPTCHA? " + bc.CEND
395 | )
396 |
397 | if str(self.iscomplete).lower() in ['no', False, 0]:
398 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
399 | "User has not completed the CAPTCHA\n" + bc.CEND)
400 | return False
401 |
402 | script_html = self.find_results(lookup)
403 |
404 | if len(script_html) == 2: # Check len on results
405 | # Set the desired value to iterate over
406 | script_html = script_html[1]
407 | else:
408 | print(
409 | " [" +
410 | bc.CRED +
411 | "X" +
412 | bc.CEND +
413 | "] " +
414 | bc.CYLW +
415 | "Unable to complete request... Try again later...\n" +
416 | bc.CEND)
417 | return
418 |
419 | person_list = self.grab_json_data(script_html)
420 | self.get_person_list(person_list)
421 |
422 | print()
423 | return self.info_list
424 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/advance_background_checks/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/advance_background_checks/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/base.py:
--------------------------------------------------------------------------------
1 | """Base Scraping Class"""
2 | from __future__ import print_function
3 | from __future__ import absolute_import
4 | from lxml import etree
5 | from bs4 import BeautifulSoup
6 | from . import proxygrabber
7 | from dotenv import dotenv_values
8 |
9 | import requests
10 | import random
11 | import requests
12 | import json
13 | # monkey patch socket to use only IPv4
14 | import socket
15 | import pkg_resources
16 | import builtins as bi
17 |
18 | og = socket.getaddrinfo
19 |
20 |
21 | def ng(*args, **kwargs):
22 | res = og(*args, **kwargs)
23 | return [r for r in res if r[0] == socket.AF_INET]
24 |
25 |
26 | socket.getaddrinfo = ng
27 |
28 |
29 | def random_line():
30 | """
31 | Gets random User-Agent string from local DB file
32 | """
33 | get_user_agents = pkg_resources.resource_filename(
34 | 'skiptracer', '../../storage/user-agents.db')
35 | afile = open(get_user_agents)
36 | line = next(afile)
37 | for num, aline in enumerate(afile):
38 | if random.randrange(num + 2):
39 | continue
40 | line = aline
41 | return line.strip()
42 |
43 |
44 | class PageGrabber:
45 | """
46 | Base PageGrabber Class
47 | Base function to import request functionality in modules
48 | """
49 |
50 | def __init__(self):
51 | """
52 | Initialize defaults as needed
53 | """
54 | self.env = dotenv_values()
55 | self.info_dict = {}
56 | self.info_list = []
57 | self.ua = random_line()
58 | self.proxy = {}
59 |
60 | def get_source(self, url):
61 | """
62 | Returns source code from given URL
63 | """
64 | headers = {"User-Agent": self.ua}
65 | reqcom = 0
66 | requests.packages.urllib3.disable_warnings()
67 | results = ""
68 |
69 | while reqcom < 5:
70 | try:
71 | if bi.proxy != '':
72 | proxy = str(bi.proxy).split(":")[1].strip()
73 | xproto = str(bi.proxy).split(":")[0].strip()
74 | self.proxy = {str(xproto): str(proxy).strip()}
75 | results = requests.get(
76 | url,
77 | headers=headers,
78 | proxies=self.proxy,
79 | timeout=10,
80 | verify=False,
81 | allow_redirects=True
82 | ).text
83 | else:
84 | results = requests.get(
85 | url,
86 | headers=headers,
87 | timeout=10,
88 | verify=False,
89 | allow_redirects=True
90 | ).text
91 | reqcom = 5
92 | except Exception as failedreq:
93 | if bi.webproxy:
94 | bi.proxy = proxygrabber.new_proxy()
95 | reqcom = reqcom + 1
96 | else:
97 | print(failedreq)
98 | reqcom = reqcom + 1
99 | return results.encode('ascii', 'ignore').decode("utf-8")
100 |
101 | def post_data(self, url, data):
102 | """
103 | Sends POST request of given DATA, URL
104 | """
105 | headers = {"User-Agent": self.ua}
106 | reqcom = 0
107 | requests.packages.urllib3.disable_warnings()
108 | while reqcom == 0:
109 | try:
110 | results = requests.post(
111 | url,
112 | headers=headers,
113 | proxies=self.proxy,
114 | timeout=10,
115 | verify=False,
116 | allow_redirects=True,
117 | data=postdata
118 | ).text
119 | reqcom = 1
120 | return results.encode('ascii', 'ignore').decode("utf-8")
121 | except Exception as failedreq:
122 | if bi.webproxy:
123 | bi.proxy = proxygrabber.new_proxy()
124 | return
125 |
126 | def get_dom(self, source):
127 | """
128 | Returns BeautifulSoup DOM
129 | """
130 | return BeautifulSoup(source, 'lxml')
131 |
132 | def get_html(self, source):
133 | """
134 | Returns BeautifulSoup DOM
135 | """
136 | return BeautifulSoup(source, 'html.parser')
137 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/fouroneone/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from __future__ import absolute_import
3 | from ..base import PageGrabber
4 | from ...colors.default_colors import DefaultBodyColors as bc
5 | import re
6 | import logging
7 | try:
8 | import __builtin__ as bi
9 | except BaseException:
10 | import builtins as bi
11 |
12 |
13 | class FourOneOneGrabber(PageGrabber):
14 | """
15 | 411.com scraper for reverse telephone lookups
16 | """
17 |
18 | def get_info(self, phone_number, lookup):
19 | """
20 | returns information about given telephone number
21 | """
22 | print("[" + bc.CPRP + "?" + bc.CEND + "] " + bc.CCYN + "411" + bc.CEND)
23 | url = 'https://411.info/reverse/?r={}'.format(phone_number)
24 | source = self.get_source(url)
25 |
26 | try:
27 | soup = self.get_dom(source)
28 | name = soup.find('div', attrs={'class': 'cname'})
29 | if name:
30 | name = name.text.strip()
31 | else:
32 | name = "Unknown"
33 | except BaseException:
34 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
35 | "No source returned, try again later ...\n" + bc.CEND)
36 | return
37 |
38 | for itemText in soup.find_all(
39 | 'div', attrs={'class': re.compile('adr_.*')}):
40 | street = itemText.find('span', itemprop='streetAddress')
41 | if street:
42 | street = street.text.replace("\t", "").replace(",", "")
43 | street = street.strip()
44 | else:
45 | street = "Unknown"
46 | town = itemText.find('span', itemprop='addressLocality')
47 | if town:
48 | town = town.text.strip()
49 | else:
50 | town = "Unknown"
51 | state = itemText.find('span', itemprop='addressRegion')
52 | if state:
53 | state = state.text.strip()
54 | else:
55 | state = "Unknown"
56 | zipcode = itemText.find('span', itemprop='postalCode')
57 | if zipcode:
58 | zipcode = zipcode.text.strip()
59 | else:
60 | zipcode = "Unknown"
61 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
62 | bc.CRED + "Name: " + bc.CEND + str(name))
63 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
64 | bc.CRED + "Street: " + bc.CEND + str(street))
65 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
66 | bc.CRED + "State: " + bc.CEND + str(state))
67 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
68 | bc.CRED + "City: " + bc.CEND + str(town))
69 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
70 | bc.CRED + "Zip: " + bc.CEND + str(zipcode))
71 | self.info_dict.update({
72 | "name": name,
73 | "street": street,
74 | "town": town,
75 | "state": state,
76 | "zipcode": zipcode
77 | })
78 |
79 | if len(self.info_dict) == 0:
80 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
81 | "No source returned, try again later ...\n" + bc.CEND)
82 | return {}
83 | else:
84 | print()
85 | return self.info_dict
86 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/fouroneone/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/fouroneone/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/haveibeenpwned/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from __future__ import absolute_import
3 | from ..base import PageGrabber
4 | from ...colors.default_colors import DefaultBodyColors as bc
5 | from .. import proxygrabber
6 |
7 | import logging
8 | import json
9 | import ast
10 | import cfscrape
11 | try:
12 | import __builtin__ as bi
13 | except BaseException:
14 | import builtins as bi
15 |
16 |
17 | class HaveIBeenPwwnedGrabber(PageGrabber):
18 | """
19 | HackedEmails.com scraper for email compromise lookups
20 | """
21 |
22 | def get_info(self, email, category):
23 | """
24 | Uniform call for framework
25 | """
26 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
27 | bc.CCYN + "HaveIbeenPwned" + bc.CEND)
28 | self.count = 0
29 | self.resurl = 0
30 | self.trymore(email)
31 |
32 | def trymore(self, email):
33 | """
34 | Actual logic for lookup and re-try
35 | """
36 | while self.resurl == 0:
37 |
38 | self.count += 1
39 | url = 'https://haveibeenpwned.com/api/v3/breachedaccount/{}'.format(
40 | email)
41 |
42 | scraper = cfscrape.create_scraper()
43 | headers = {
44 | 'user-agent': self.ua,
45 | 'hibp-api-key': self.env['HAVEIBEENPWNED_API_KEY']
46 | }
47 | self.source = scraper.get(url, headers=headers).content
48 | self.source = str(
49 | self.source).replace(
50 | "true",
51 | "True").replace(
52 | "false",
53 | "False")
54 |
55 | self.source = ast.literal_eval(self.source) # cast string to bytes
56 | self.source = self.source.decode('utf8') # decode string
57 | try:
58 | self.source = ast.literal_eval(self.source) # cast string to dict
59 | except:
60 | print("Probably being rate limited/blocked")
61 | print(self.source)
62 | return {}
63 |
64 | self.resurl = 1
65 | if self.source['statusCode'] and self.source['statusCode'] == 401:
66 | print(str(self.source['statusCode']) +" : "+ self.source['message'])
67 | return {}
68 |
69 | for dataset in self.source:
70 | self.result = dataset
71 |
72 |
73 | if self.result:
74 | print (self.result)
75 | self.breach = self.result['BreachDate']
76 | self.domain = self.result['Domain']
77 | self.title = self.result['Title']
78 | self.exposes = self.result['DataClasses']
79 | self.info_dict.update(
80 | {
81 | "BreachDate": self.breach,
82 | "Domain": self.domain,
83 | "Title": self.title,
84 | "DataExposed": self.exposes})
85 | print(
86 | " [" +
87 | bc.CGRN +
88 | "+" +
89 | bc.CEND +
90 | "] " +
91 | bc.CRED +
92 | "Dump Name: " +
93 | bc.CEND +
94 | self.title)
95 | print(
96 | " [" +
97 | bc.CGRN +
98 | "=" +
99 | bc.CEND +
100 | "] " +
101 | bc.CRED +
102 | "Domain: " +
103 | bc.CEND +
104 | self.domain)
105 | print(
106 | " [" +
107 | bc.CGRN +
108 | "=" +
109 | bc.CEND +
110 | "] " +
111 | bc.CRED +
112 | "Breach: " +
113 | bc.CEND +
114 | self.breach)
115 | print(
116 | " [" +
117 | bc.CGRN +
118 | "=" +
119 | bc.CEND +
120 | "] " +
121 | bc.CRED +
122 | "Exposes: " +
123 | bc.CEND)
124 | for xpos in self.exposes:
125 | print(
126 | " [" +
127 | bc.CGRN +
128 | "-" +
129 | bc.CEND +
130 | "] " +
131 | bc.CRED +
132 | "DataSet: " +
133 | bc.CEND +
134 | xpos)
135 | else:
136 | print(
137 | " [" +
138 | bc.CRED +
139 | "X" +
140 | bc.CEND +
141 | "] " +
142 | bc.CYLW +
143 | "No results were found.\n" +
144 | bc.CEND)
145 |
146 | print()
147 | return self.info_dict
148 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/haveibeenpwned/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/haveibeenpwned/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/knowem/__init__.py:
--------------------------------------------------------------------------------
1 | from ..base import PageGrabber
2 | from ...colors.default_colors import DefaultBodyColors as bc
3 | import re
4 | import logging
5 | import requests
6 |
7 |
8 | class KnowemGrabber(PageGrabber):
9 | """
10 | knowem.com scraper for screenname lookups
11 | by: 0daysimpson & illwill
12 | """
13 |
14 | def get_info(self, username, type):
15 | """
16 | returns information about given hndle
17 | """
18 | try:
19 | username = username.split("@")[0]
20 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
21 | bc.CCYN + "Knowem" + bc.CEND)
22 | url = "https://knowem.com/usercheckv2.php?target="
23 | networks = ["Blogger", "BuzzFeed", "DailyMotion", "Etsy",
24 | "facebook", "foursquare", "Hubpages", "Imgur", "Issuu",
25 | "LinkedIn", "LiveJournal", "MySpace", "Photobucket", "Pinterest",
26 | "reddit", "scribd", "soundcloud", "Tumblr", "Twitter",
27 | "Typepad", "vimeo", "Wordpress", "YouTube"]
28 | for social in networks:
29 | request_url = url + social + "&username=" + username
30 | user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36"
31 | headers = {
32 | "referer": (
33 | "https://knowem.com/checkusernames.php?u=" +
34 | username),
35 | "X-Requested-With": "XMLHttpRequest",
36 | "User-Agent": user_agent}
37 | response = requests.get(url=request_url, headers=headers).text
38 | if (re.search(pattern='Sorry', string=response)):
39 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
40 | bc.CRED + "Account: " + bc.CEND + str(social))
41 | self.info_dict.update({
42 | "Account": social})
43 | except Exception as staging:
44 | print((" [" + bc.CRED + "DEBUG" + bc.CEND + "] " +
45 | bc.CYLW + "Failed at staging: " + bc.CEND) % staging)
46 |
47 | if len(self.info_dict) == 0:
48 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
49 | "No source returned, try again later ...\n" + bc.CEND)
50 | return
51 | else:
52 | print("")
53 | return
54 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/knowem/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/knowem/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/linkedin/__init__.py:
--------------------------------------------------------------------------------
1 | from bs4 import BeautifulSoup
2 | from ..base import PageGrabber
3 | from ...colors.default_colors import DefaultBodyColors as bc
4 | import requests
5 | import configparser
6 | import pkg_resources
7 |
8 |
9 | class LinkedInSalesGrabber(PageGrabber):
10 | """
11 | LinkedIn.com sales scraper for email lookups
12 | """
13 |
14 | config = []
15 | soup = ""
16 | client = {}
17 | homepageurl = ""
18 | loginurl = ""
19 | logouturl = ""
20 | viewbyemail = ""
21 | login_information = {
22 | 'session_key': '',
23 | 'session_password': '',
24 | 'loginCsrfParam': '',
25 | }
26 |
27 | def __init__(self):
28 | """
29 | Load up LinkedIn plugin configs
30 | """
31 | super(LinkedInSalesGrabber, self).__init__()
32 | self.config = configparser.ConfigParser()
33 | get_plugin_cats = pkg_resources.resource_filename(
34 | 'skiptracer', '../../setup.cfg')
35 | self.config.read(get_plugin_cats)
36 | self.homepageurl = self.config['plugin.linkedin']['homepageurl']
37 | self.loginurl = self.config['plugin.linkedin']['loginurl']
38 | self.logouturl = self.config['plugin.linkedin']['logouturl']
39 | self.viewbyemail = self.config['plugin.linkedin']['viewbyemail']
40 | self.login_information['session_key'] = self.config['plugin.linkedin']['sessionkey']
41 | self.login_information['session_password'] = self.config['plugin.linkedin']['sessionpassword']
42 | self.client = requests.Session() # Establish the session()
43 | source = self.client.get(self.homepageurl).content # Request source
44 | self.soup = self.get_dom(source) # BS DOM
45 |
46 | def grab_data(self, el, attr, attrval, title, gettext):
47 | """
48 | Pass to this function the following:
49 | el = element to find e.g. div
50 | attr = attribute to find e.g. class or id
51 | attrval = attribute value to find e.g. li-profile-name
52 | title = Title to display is results e.g. Name, Phone
53 | """
54 |
55 | try:
56 | if gettext == False:
57 | val = self. grab_data_attr()
58 | else:
59 | val = self.grab_data_text(el, attr, attrval, gettext)
60 |
61 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
62 | bc.CRED + title + ": " + bc.CEND + str(company))
63 | except BaseException:
64 | val = ""
65 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
66 | "No " + title + " can be found.\n" + bc.CEND)
67 | pass
68 | return val
69 |
70 | def grab_data_text(self, el, attr, attrval):
71 | """
72 | Pass to this function the following:
73 | el = element to find e.g. div
74 | attr = attribute to find e.g. class or id
75 | attrval = attribute value to find e.g. li-profile-name
76 | """
77 |
78 | return self.soup.find(el, {attr: attrval}).get_text()
79 |
80 | def grab_data_attr(self, el, attr, attrval, title):
81 | """
82 | Pass to this function the following:
83 | el = element to find e.g. div
84 | attr = attribute to find e.g. class or id
85 | attrval = attribute value to find e.g. li-profile-name
86 | getext = attribute text to grab e.g. href
87 | """
88 |
89 | return self.soup.find(el, attrs={attr: attrval})[gettext]
90 |
91 | def grab_name(self):
92 | """
93 | Grabs a first + last name from LinkedIn DOM
94 | and constructs a single name string
95 | """
96 |
97 | try:
98 | fname = self.grab_data(
99 | 'span',
100 | 'id',
101 | 'li-profile-name',
102 | 'First name',
103 | 'data-fname')
104 | lname = self.grab_data(
105 | 'span',
106 | 'id',
107 | 'li-profile-name',
108 | 'Last name',
109 | 'data-lname')
110 | name = str(fname) + " " + str(lname)
111 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " + bc.CRED +
112 | "Name: " + bc.CEND + str(fname) + " " + str(lname))
113 | except BaseException:
114 | name = ""
115 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
116 | bc.CYLW + "No username can be found.\n" + bc.CEND)
117 | return name
118 |
119 | def grab_csrf(self):
120 | """
121 | Grab CSRF token
122 | """
123 | csrf = ""
124 | try:
125 | csrf = self.soup.find(id="loginCsrfParam-login")['value']
126 | except BaseException:
127 | print("No CSRF token found, skipping")
128 | finally:
129 | self.login_information['loginCsrfParam'] = csrf
130 |
131 | def get_info(self, email, category):
132 | """
133 | Requires AUTH, login and request AUTHENTICATED pages from linkedin
134 | """
135 |
136 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
137 | bc.CCYN + "LinkedIn" + bc.CEND)
138 | self.grab_csrf()
139 |
140 | if self.login_information['session_key'] == '':
141 | # If no modifications of default u/p, print error, return
142 | if login_information['session_password'] == '':
143 | print(
144 | " [" +
145 | bc.CRED +
146 | "X" +
147 | bc.CEND +
148 | "] " +
149 | bc.CYLW +
150 | "This module requires authentication to use it properly.\n" +
151 | bc.CEND)
152 | return
153 |
154 | results = "None"
155 | try:
156 | self.client.post(self.loginurl, data=self.login_information)
157 | results = client.get(self.viewbyemail + str(email)).text
158 | except Exception as failedlinkedinauth:
159 | print((" [" +
160 | bc.CRED +
161 | "X" +
162 | bc.CEND +
163 | "] " +
164 | bc.CYLW +
165 | "This module did not properly authenticate: %s" +
166 | bc.CEND) %
167 | failedlinkedinauth)
168 |
169 | self.soup = self.get_dom(results)
170 | self.get_source(self.logouturl) # Log out of LinkedIn, kills sessionID
171 | profile = self.grab_data('a', 'class', 'li-hover-under li-txt-black-85',
172 | 'Profile', 'href')
173 | name = self.grab_name()
174 | location = self.grab_data('div', 'class', 'li-user-location',
175 | 'Location', False)
176 | company = self.grab_data('span', 'class', 'li-user-title-company',
177 | 'Company', False)
178 | title = self.grab_data('div', 'class', 'li-user-title',
179 | 'Job Title', False)
180 | email = self.grab_data('span', 'id', 'email', 'Email', False)
181 |
182 | self.info_dict.update({
183 | "profile": profile,
184 | "name": name,
185 | "location": location,
186 | "company": company,
187 | "title": title,
188 | "email": email
189 | })
190 |
191 | print()
192 | return self.info_dict
193 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/linkedin/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/linkedin/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/myspace/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | #######################################################################
3 | # myspace scraper - returns user url of email address #
4 | #######################################################################
5 |
6 | from ..base import PageGrabber
7 | from ...colors.default_colors import DefaultBodyColors as bc
8 | import re
9 | import logging
10 |
11 | try:
12 | import __builtin__ as bi
13 | except BaseException:
14 | import builtins as bi
15 |
16 |
17 | class MySpaceGrabber(PageGrabber):
18 | """
19 | Myspace.com scraper for email lookups
20 | """
21 |
22 | def __init__(self):
23 | """
24 | Load up MySpaceGrabber plugin configs
25 | """
26 | super(MySpaceGrabber, self).__init__()
27 |
28 | def get_name(self, soup):
29 | """
30 | Check if a name exists
31 | """
32 | name = False
33 | try:
34 | name = soup.select('h6')[0].text.strip()
35 | except BaseException:
36 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
37 | bc.CYLW + "No Myspace account found.\n" + bc.CEND)
38 | finally:
39 | return name
40 |
41 | def get_acct_dets(self, soup):
42 | """
43 | Get the account details
44 | """
45 | account = False
46 | try:
47 | accountr = soup.select('h6')[0].a.get('href').strip()
48 | account = "https://myspace.com{}".format(accountr)
49 | except BaseException:
50 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
51 | bc.CYLW + "No Myspace account found.\n" + bc.CEND)
52 | finally:
53 | return account
54 |
55 | def get_location_from_acct(self, account):
56 | """
57 | Get location data using the
58 | account data
59 | """
60 | location = "Unknown"
61 | try:
62 | source = self.get_source(account)
63 | soup = self.get_dom(source)
64 | location = soup.find('div', attrs={'class': 'location_white location '})[
65 | 'data-display-text']
66 | except BaseException:
67 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
68 | "Unable to find location data for " + account + ".\n" + bc.CEND)
69 | finally:
70 | return location
71 |
72 | def get_info(self, email, category):
73 | """
74 | Looksup user accounts by given email
75 | """
76 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
77 | bc.CCYN + "Myspace" + bc.CEND)
78 | url = 'https://myspace.com/search/people?q={}'.format(email)
79 | source = self.get_source(url)
80 | soup = self.get_dom(source)
81 | name = self.get_name(soup)
82 | location = "Unknown"
83 | account = "Not found"
84 |
85 | if name != False:
86 | account = self.get_acct_dets(soup)
87 | if account != False:
88 | location = self.get_location_from_acct(account)
89 |
90 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
91 | bc.CRED + "Acct: " + bc.CEND + str(account))
92 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
93 | bc.CRED + "Name: " + bc.CEND + str(name))
94 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
95 | bc.CRED + "Loc: " + bc.CEND + str(location) + "\n")
96 | self.info_dict.update({
97 | "name": name,
98 | "account": account,
99 | "location": location,
100 | })
101 |
102 | return self.info_dict
103 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/myspace/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/myspace/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/namechk2/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | #
3 | # NameChk scraper: no1special
4 | #
5 | from bs4 import BeautifulSoup
6 | from lxml import html
7 | from requests.utils import quote
8 | from ..base import PageGrabber
9 | from ...colors.default_colors import DefaultBodyColors as bc
10 | import json
11 | import unicodedata
12 | import requests
13 | import lxml.html
14 |
15 | try:
16 | from urllib import urlencode
17 | except ImportError:
18 | from urllib.parse import urlencode
19 |
20 |
21 | class NameChkGrabber(PageGrabber):
22 | """
23 | Myspace.com scraper for email lookups
24 | """
25 |
26 | def get_info(self, email, type):
27 | """
28 | Looksup user accounts by given email
29 | """
30 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
31 | bc.CCYN + "NameChk" + bc.CEND)
32 | username = str(email).split("@")[0]
33 | ses = requests.Session()
34 | webproxy = False # this needs to be a setting
35 | proxy = "" # placeholder for now
36 |
37 | if webproxy:
38 | proto = proxy.split("/")[0].split(":")[0]
39 | r = ses.get('https://namechk.com/', proxies={proto: bi.proxy})
40 | else:
41 | r = ses.get('https://namechk.com/')
42 | cookies = r.cookies.get_dict()
43 | services = ["facebook", "youtube", "twitter", "instagram",
44 | "blogger", "googleplus", "twitch", "reddit", "ebay", "wordpress",
45 | "pinterest", "yelp", "slack", "github", "basecamp", "tumblr",
46 | "flickr", "pandora", "producthunt", "steam", "myspace",
47 | "foursquare", "okcupid", "vimeo", "ustream", "etsy",
48 | "soundcloud", "bitbucket", "meetup", "cashme", "dailymotion",
49 | "aboutme", "disqus", "medium", "behance", "photobucket", "bitly",
50 | "cafemom", "coderwall", "fanpop", "deviantart", "goodreads",
51 | "instructables", "keybase", "kongregate", "livejournal",
52 | "stumbleupon", "angellist", "lastfm", "slideshare", "tripit",
53 | "fotolog", "paypal", "dribbble", "imgur", "tracky", "flipboard",
54 | "vk", "kik", "codecademy", "roblox", "gravatar", "trip", "pastebin",
55 | "coinbase", "blipfm", "wikipedia", "ello", "streamme", "ifttt",
56 | "webcredit", "codementor", "soupio", "fiverr", "trakt", "hackernews",
57 | "five00px", "spotify", "pof", "houzz", "contently", "buzzfeed",
58 | "tripadvisor", "hubpages", "scribd", "venmo", "canva", "creativemarket",
59 | "bandcamp", "wikia", "reverbnation", "wattpad", "designspiration",
60 | "colourlovers", "eyeem", "kanoworld", "askfm", "smashcast", "badoo",
61 | "newgrounds", "younow", "patreon", "mixcloud", "gumroad", "quora"]
62 | soup = self.get_dom(r.text)
63 | try:
64 | csrf = str(soup.find_all(name="meta")[-1]).split('"')[1]
65 | except Exception as e:
66 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
67 | bc.CYLW + "Could not find CSRF token.\n" + bc.CEND)
68 | pass # return # print e
69 | tree = html.fromstring(r.text)
70 |
71 | def get_cookie(cookies):
72 | for x in cookies.keys():
73 | return '{}:{}; '.format(x, cookies[x]),
74 |
75 | def get_token():
76 | return list(
77 | set(tree.xpath("//input[@name='authenticity_token']/@value")))[0]
78 | token = get_token()
79 | headers = {"authority": "namechk.com",
80 | "method": "POST",
81 | "path": "/services/checks",
82 | "scheme": "https",
83 | "accept": "*/*;q=0.5, text/javascript, application/javascript, application/ecmascript, application/x-ecmascript",
84 | "accept-encoding": "gzip, deflate, br",
85 | "accept-language": "en-US,en;q=0.9",
86 | "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
87 | "origin": "https://namechk.com",
88 | "referer": "https://namechk.com/",
89 | "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36",
90 | "x-csrf-token": csrf,
91 | "x-requested-with": "XMLHttpRequest",
92 | }
93 | ncook = "_ga=GA1.2.1058625756.1526852807; _gid=GA1.2.371808416.1526852807; _fssid=9c20a864-551e-470f-bd74-6640f9cc9058; __qca=P0-1810536716-1526852807185; _fsuid=e091827a-8a09-4cb9-b841-4bb78b6bc579; __gads=ID=6af13fe549a859bd:T=1526852808:S=ALNI_MZI5yxUiBsOz-2qmDmok0tVeISwvw;" + str(get_cookie(cookies)[
94 | 0])
95 | headers['cookie'] = ncook
96 | data = [
97 | ('utf8', '%E2%9C%93'),
98 | ('authenticity_token', quote(token, safe="")),
99 | ('q', username),
100 | ('m', ''),
101 | ]
102 | if webproxy:
103 | proto = proxy.split("/")[0].split(":")[0]
104 | r = ses.post(
105 | 'https://namechk.com/',
106 | headers=headers,
107 | data=data,
108 | proxies={
109 | proto: proxy})
110 | else:
111 | r = ses.post('https://namechk.com/', headers=headers, data=data)
112 | try:
113 | cookies = r.cookies.get_dict()
114 | cooked = str(get_cookie(cookies)[0])
115 | except Exception as e:
116 | # print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"Could not locate required cookies.\n"+bc.CEND)
117 | pass
118 | try:
119 | encres = r.text.encode('ascii', 'ignore').decode('utf8')
120 | encresdic = json.loads(encres)
121 | datareq = {}
122 | except Exception as e:
123 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
124 | "Could not load results into JSON format.\n" + bc.CEND)
125 | return # print e
126 |
127 | for xservice in services:
128 | for dictkey in encresdic.keys():
129 | datareq["token"] = quote(encresdic[dictkey], safe="")
130 | datareq['fat'] = quote(csrf, safe="")
131 | datastring = ""
132 | try:
133 | for datakey in datareq.keys():
134 | datastring += "{}={}&".format(datakey, datareq[datakey])
135 | datastring += "service={}".format(xservice)
136 | except Exception as e:
137 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
138 | bc.CYLW + "Could not find CSRF token.\n" + bc.CEND)
139 | return
140 | try:
141 | response = ses.post(
142 | 'https://namechk.com/services/check',
143 | headers=headers,
144 | data=datastring)
145 | jload = json.loads(response.text)
146 | if jload['available'] == False:
147 | if jload['callback_url'] == "":
148 | pass
149 | else:
150 | print(
151 | " [" +
152 | bc.CGRN +
153 | "+" +
154 | bc.CEND +
155 | "] " +
156 | bc.CRED +
157 | "Acct Exists: " +
158 | bc.CEND +
159 | "{}".format(
160 | jload['callback_url']))
161 |
162 | except Exception as e:
163 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
164 | "Could not find required datasets.\n" + bc.CEND)
165 | return # pass
166 | print()
167 | return
168 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/namechk2/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/namechk2/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/plate/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 | from ..base import PageGrabber
3 | from ...colors.default_colors import DefaultBodyColors as bc
4 | import re
5 |
6 |
7 | try:
8 | raw_input # Python 2
9 | except NameError:
10 | raw_input = input # Python 3
11 |
12 |
13 | class VinGrabber(PageGrabber):
14 | """
15 | faxvin.com scraper for plate lookups
16 | """
17 |
18 | def get_info(self, plate, type):
19 | """
20 | returns information about given plate number
21 | """
22 | print(
23 | "[" +
24 | bc.CPRP +
25 | "?" +
26 | bc.CEND +
27 | "] " +
28 | bc.CCYN +
29 | "FaxVin" +
30 | bc.CEND)
31 | state = raw_input(
32 | " [" +
33 | bc.CRED +
34 | "!" +
35 | bc.CEND +
36 | "] " +
37 | bc.CYLW +
38 | "Please enter 2 letter abbreviated state - ex: (AL=Alabama|CO=Colorado) " +
39 | bc.CEND).upper()
40 | plate = plate.upper()
41 | url = 'https://www.faxvin.com/license-plate-lookup/result?plate={}&state={}'.format(
42 | plate, state)
43 | source = self.get_source(url)
44 | soup = self.get_html(source)
45 | if soup.body.find_all(string=re.compile(
46 | '.*{0}.*'.format('Sorry, the plate your currently looking for is not available.')), recursive=True):
47 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
48 | bc.CYLW + "No plate found.\n" + bc.CEND)
49 | return
50 | try:
51 | table = soup.find('table', attrs={'class': 'tableinfo'})
52 | except BaseException:
53 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
54 | "No source returned, try again later ...\n" + bc.CEND)
55 | return
56 | try:
57 | cells = table.findAll("td")
58 | except BaseException:
59 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
60 | bc.CYLW + "No results were found ...\n" + bc.CEND)
61 | return
62 | vin = cells[0].b.text
63 | make = cells[1].b.text
64 | model = cells[2].b.text
65 | year = cells[3].b.text
66 | trim = cells[4].b.text
67 | style = cells[5].b.text
68 | engine = cells[6].b.text
69 | plant = cells[7].b.text
70 | age = cells[8].b.text
71 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
72 | bc.CRED + "Plate: " + bc.CEND + str(plate))
73 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
74 | bc.CRED + "State: " + bc.CEND + str(state))
75 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
76 | bc.CRED + "VIN: " + bc.CEND + str(vin))
77 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
78 | bc.CRED + "Make: " + bc.CEND + str(make))
79 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
80 | bc.CRED + "Model: " + bc.CEND + str(model))
81 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
82 | bc.CRED + "Year: " + bc.CEND + str(year))
83 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
84 | bc.CRED + "Trim: " + bc.CEND + str(trim))
85 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
86 | bc.CRED + "Style: " + bc.CEND + str(style))
87 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
88 | bc.CRED + "Engine: " + bc.CEND + str(engine))
89 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
90 | bc.CRED + "Plant: " + bc.CEND + str(plant))
91 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
92 | bc.CRED + "Age: " + bc.CEND + str(age))
93 |
94 | self.info_dict.update({
95 | "plate": plate,
96 | "state": state,
97 | "vin": vin,
98 | "make": make,
99 | "model": model,
100 | "year": year,
101 | "trim": trim,
102 | "style": style,
103 | "engine": engine,
104 | "plant": plant,
105 | "age": age
106 | })
107 |
108 | if len(self.info_dict) == 0:
109 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
110 | "No source returned, try again later ...\n" + bc.CEND)
111 | return
112 | else:
113 | print()
114 | return
115 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/plate/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/plate/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/proxygrabber.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from __future__ import absolute_import
3 | from lxml.html import fromstring
4 | from itertools import cycle
5 | from ..colors.default_colors import DefaultBodyColors as bc
6 |
7 | import requests
8 | import traceback
9 | import os
10 | import random
11 | import time
12 | import random
13 |
14 | try:
15 | import __builtin__ as bi
16 | except BaseException:
17 | import builtins as bi
18 |
19 | # props to scrapehero for proxy cycler
20 | storage_dir = os.path.abspath(
21 | os.path.join(
22 | os.path.dirname(__file__),
23 | os.pardir,
24 | 'storage'))
25 | output_file = "%s%sproxies.txt" % (storage_dir, os.sep)
26 |
27 |
28 | def remove_proxy(fn, remline):
29 | """
30 | Removes a bad proxy from proxies.txt
31 | """
32 | f = open(fn, "r+")
33 | d = f.readlines()
34 | f.seek(0)
35 | for i in d:
36 | if i != str(remline):
37 | f.write(i)
38 | f.truncate()
39 | f.close()
40 |
41 |
42 | def write_file(d, fn):
43 | """
44 | used to write out files to disk
45 | """
46 | t = open(fn, "a")
47 | t.write(d)
48 | t.close()
49 | t = None
50 |
51 |
52 | def get_proxies():
53 | """
54 | Initial request to generate proxy list
55 | """
56 | print("\n[" + bc.CPRP + "?" + bc.CEND + "] " +
57 | bc.CCYN + "Proxy List Generator" + bc.CEND)
58 | url = 'https://free-proxy-list.net/'
59 | response = requests.get(url)
60 | parser = fromstring(response.text)
61 | proxies = set()
62 | for i in parser.xpath('//tbody/tr')[::]:
63 | if i.xpath('.//td[5][contains(text(),"elite proxy")]'):
64 | proxy = ":".join([i.xpath('.//td[1]/text()')[0],
65 | i.xpath('.//td[2]/text()')[0]])
66 | proxies.add(proxy)
67 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
68 | "Testing proxies, please wait till complete..." + bc.CEND)
69 | return proxies
70 |
71 |
72 | def new_proxy():
73 | """
74 | Select random proxy form list, if no list,
75 | generate a new one and test them for connectivity (living)
76 | """
77 | cwd = os.getcwd()
78 | try:
79 | now = time.time()
80 | if os.stat(str(cwd) + '/storage/proxies.txt').st_mtime < now - 7 * 86000:
81 | os.remove(str(cwd) + '/storage/proxies.txt')
82 | with open(str(cwd) + '/storage/proxies.txt', 'r') as proxies:
83 | bi.proxy = str(random.choice(proxies.readlines())).strip()
84 | proxy = bi.proxy
85 | print(
86 | "\t [" +
87 | bc.CRED +
88 | "::ATTENTION::" +
89 | bc.CEND +
90 | "]" +
91 | bc.CYLW +
92 | " Proxy: " +
93 | bi.proxy +
94 | bc.CEND +
95 | " [" +
96 | bc.CRED +
97 | "::ATTENTION::" +
98 | bc.CEND +
99 | "]")
100 | return proxy
101 | except Exception as noproxyfile: # Start generating the proxy list
102 | proxies = get_proxies() # Call to grab results, returns a list
103 | proxy_pool = cycle(proxies) # Shuffle list
104 | url = 'https://api.ipify.org?format=json'
105 | for i in range(1, 11): # Choose random 10 proxies and test from pool
106 | proxy = random.choice(list(proxies))
107 | for xproto in ['http', 'https']:
108 | try:
109 | print((" [" +
110 | bc.CRED +
111 | "X" +
112 | bc.CEND +
113 | "] " +
114 | bc.CYLW +
115 | "Testing %s proxy: %s" +
116 | bc.CEND) %
117 | (str(xproto).strip(), str(proxy).strip()))
118 | response = requests.get(
119 | url, proxies={xproto: proxy}, timeout=2)
120 | if response:
121 | write_file(
122 | str(xproto) +
123 | "://" +
124 | str(proxy) +
125 | "\n",
126 | output_file)
127 | except BaseException:
128 | pass
129 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
130 | "Finished testing proxies, continue.\n" + bc.CEND)
131 | bi.proxy = new_proxy()
132 | return bi.proxy
133 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/tinder/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from __future__ import absolute_import
3 | #
4 | # Tinder Module - illwill
5 | #
6 | from ..base import PageGrabber
7 | from ...colors.default_colors import DefaultBodyColors as bc
8 | import re
9 | import logging
10 | import requests
11 |
12 |
13 | class TinderGrabber(PageGrabber):
14 | """
15 | Tinder scraper for screenname lookups
16 | """
17 |
18 | def get_info(self, username, type): # returns information about given hndle
19 | print(
20 | "[" +
21 | bc.CPRP +
22 | "?" +
23 | bc.CEND +
24 | "] " +
25 | bc.CCYN +
26 | "Tinder" +
27 | bc.CEND)
28 | url = "https://www.gotinder.com/@%s" % (username)
29 | source = self.get_source(url)
30 | soup = self.get_dom(source)
31 | print(
32 | " [" +
33 | bc.CGRN +
34 | "+" +
35 | bc.CEND +
36 | "] " +
37 | bc.CRED +
38 | "User: " +
39 | bc.CEND +
40 | "%s" %
41 | username)
42 | if soup.body.findAll(
43 | text='Looking for Someone?'): # check if CAPTCHA was triggered
44 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
45 | bc.CYLW + "No Profile Found.\n" + bc.CEND)
46 | return
47 | try:
48 | photo = soup.find("img", id="user-photo")
49 | if photo:
50 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
51 | bc.CRED + "Photo: " + bc.CEND + (photo['src']))
52 | else:
53 | photo = "unknown"
54 | except BaseException:
55 | pass
56 | try:
57 | name = soup.find("span", id="name")
58 | if name:
59 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
60 | bc.CRED + "Name: " + bc.CEND + name.text)
61 | else:
62 | photo = "unknown"
63 | except BaseException:
64 | pass
65 | try:
66 | teaser = soup.find("span", id="teaser")
67 | if name:
68 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
69 | bc.CRED + "Bio: " + bc.CEND + teaser.text)
70 | else:
71 | photo = "unknown"
72 | except BaseException:
73 | pass
74 | try:
75 | age = soup.find("span", id="age")
76 | if name:
77 | age = (age.text).replace(',', '')
78 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
79 | bc.CRED + "Age: " + bc.CEND + age.strip())
80 | else:
81 | photo = "unknown"
82 | except BaseException:
83 | pass
84 |
85 | self.info_dict.update({
86 | "photo": photo,
87 | "name": name,
88 | "bio": teaser,
89 | "age": age
90 | })
91 |
92 | if len(self.info_dict) == 0:
93 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
94 | "No source returned, try again later ...\n" + bc.CEND)
95 | return
96 | else:
97 | print()
98 | return
99 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/tinder/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/tinder/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/true_people/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | #
4 | # TruePeopleSearch.com scraper
5 | #
6 | from ..base import PageGrabber
7 | from .. import proxygrabber
8 | from ...colors.default_colors import DefaultBodyColors as bc
9 | import re
10 |
11 | try:
12 | import __builtin__ as bi
13 | except ImportError:
14 | import builtins as bi
15 |
16 | try:
17 | raw_input # Python 2
18 | except NameError:
19 | raw_input = input # Python 3
20 |
21 | import operator
22 |
23 |
24 | class TruePeopleGrabber(PageGrabber):
25 | """
26 | Check for CAPTCHA, if proxy enabled,try new proxy w/ request, else
27 | report to STDOUT about CAPTCHA
28 | """
29 | source = ""
30 | soup = ""
31 | url = ""
32 | url2 = "" # rid URL
33 |
34 | def __init__(self):
35 | """
36 | Load up True People plugin configs
37 | """
38 | super(TruePeopleGrabber, self).__init__()
39 |
40 | def get_info(self, information, lookup):
41 | """
42 | Uniform call for framework to launch function in a way to single out the
43 | calls per URL
44 | """
45 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
46 | bc.CCYN + "TruePeopleSearch" + bc.CEND)
47 |
48 | self.true_try(lookup, information)
49 |
50 | def check_for_captcha(self):
51 | captcha = self.soup.find('div', attrs={'class': 'g-recaptcha'})
52 | if bi.webproxy and captcha is not None:
53 | try:
54 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
55 | "Switching proxy, trying again...\n" + bc.CEND)
56 | bi.proxy = proxygrabber.new_proxy()
57 | self.true_try(lookup, information)
58 | return True
59 | except Exception as badproxy:
60 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
61 | "Bad proxy...\n" + bc.CEND)
62 | pass
63 | if captcha is not None:
64 | print(
65 | " [" +
66 | bc.CRED +
67 | "X" +
68 | bc.CEND +
69 | "] " +
70 | bc.CYLW +
71 | "Captch detected, use a proxy or complete challenge in browser\n" +
72 | bc.CEND)
73 | return True
74 | else:
75 | return False
76 |
77 | def makephone(self, information):
78 | """
79 | Find user supplied data format, adjust as needed for URL
80 | """
81 | try:
82 | if str(information).split("-")[1]:
83 | dashphone = '({})-{}-{}'.format(
84 | information[0:3], information[5:8], information[9:])
85 | return dashphone
86 | except Exception as e:
87 | pass
88 | try:
89 | if str(information).split(" ")[
90 | 1]: # Can it be split by a whitespace, if so, break and format as needed for the URL
91 | dashphone = '({})-{}-{}'.format(
92 | information[0:3], information[5:8], information[9:])
93 | return dashphone
94 | except Exception as e:
95 | pass
96 | try:
97 | # If len of data is 10 and is an integer, break and format
98 | # as needed for URL
99 | if len(information) == 10:
100 | dashphone = '({})-{}-{}'.format(
101 | information[0:3], information[3:6], information[6:])
102 | return dashphone
103 | except Exception as e:
104 | print(
105 | " [" +
106 | bc.CRED +
107 | "X" +
108 | bc.CEND +
109 | "] " +
110 | bc.CYLW +
111 | "Did not detect a phone number\n" +
112 | bc.CEND)
113 | return
114 |
115 | def phone(self, information):
116 | """
117 | Create the URL with the phone number
118 | """
119 | phonere = re.compile(
120 | r'(\d\d\d\d\d\d\d\d\d\d|\d\d\d[\s.-]\d\d\d[\s.-]\d\d\d\d)')
121 |
122 | if phonere.findall(information):
123 | self.url = 'https://www.truepeoplesearch.com/results?phoneno={}'.format(
124 | self.makephone(information))
125 |
126 | def name(self, information):
127 | """
128 | City state and zip lookup
129 | """
130 | agerange = raw_input(
131 | " [" + bc.CRED + "!" + bc.CEND + "] " + bc.CYLW +
132 | "Please enter an age range, ex: 18-120 " + bc.CEND)
133 | citystatezip = raw_input(
134 | " [" + bc.CRED + "!" + bc.CEND + "] " + bc.CYLW +
135 | "Please enter a city,state,or zip - ex: (AL|Alabama|12345) " +
136 | bc.CEND)
137 | if str(information).split(' ')[1]:
138 | self.url = "https://www.truepeoplesearch.com/results?name={}&agerange={}&citystatezip={}".format(
139 | str(information).replace(' ', '%20'), agerange, citystatezip)
140 |
141 | def find_all_shallow(self):
142 | """
143 | Check if any records were found
144 | """
145 |
146 | recordcount = self.soup.findAll('div', {'class', 'card-summary'})
147 |
148 | if len(recordcount) == 0:
149 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
150 | bc.CYLW + "No results were found.\n" + bc.CEND)
151 | return False
152 |
153 | return True
154 |
155 | def get_rid(self, lookup, x):
156 | """
157 | Attempt to grab the
158 | rid.
159 | """
160 | rid = False
161 | try:
162 | if lookup == 'name':
163 | rid = str(x).split(";")[3].split('"')[0]
164 | if lookup == 'phone':
165 | rid = str(x).split(";")[1].split('"')[0]
166 | except Exception as e:
167 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
168 | bc.CYLW + "No more results were found.\n" + bc.CEND)
169 | finally:
170 | return rid
171 |
172 | def get_rid_source(self):
173 | """
174 | Grab the source of the page linked to
175 | the rid
176 | """
177 | got_source = False
178 | try:
179 | self.source2 = self.get_source(self.url2)
180 | got_source = True
181 | except Exception as e:
182 | print(e)
183 | got_source = False
184 | finally:
185 | return got_source
186 |
187 | def grab_name(self):
188 | """
189 | Grab the users name from
190 | the DOM
191 | """
192 | name = "Unknown"
193 | try:
194 | nc = self.soup2.find('span', {'class': 'h2'})
195 | nc1 = str(nc).split(">")[3]
196 | name = str(" ".join(str(nc1).split())).split("<")[0]
197 | print((" [" + bc.CGRN + "+" + bc.CEND + "] " +
198 | bc.CRED + "Name: " + bc.CEND + "%s") % (name))
199 | except Exception as e:
200 | print(e)
201 | finally:
202 | return name
203 |
204 | def grab_age(self):
205 | """
206 | Grab the user age from the
207 | DOM
208 | """
209 | age = "Unknown"
210 | try:
211 | age1 = self.soup2.find('span', {'class': 'content-value'})
212 | age2 = " ".join(str(age1).split())
213 | age = age2.split(">")[1].split("<")[0].split()[1]
214 | print((" [" + bc.CGRN + "+" + bc.CEND + "] " +
215 | bc.CRED + "Age: " + bc.CEND + "%s") % (age))
216 | except Exception as e:
217 | print(e)
218 | finally:
219 | return age
220 |
221 | def grab_akalist(self):
222 | """
223 | Grab the users AKA list from
224 | the DOM
225 | """
226 | aklist = "Unknown"
227 | try:
228 | aklist = []
229 | aka = self.soup2.find_all(
230 | 'a', {'class': 'link-to-more', 'data-link-to-more': 'aka'})
231 | if len(aka) >= 1:
232 | print(
233 | " [" +
234 | bc.CGRN +
235 | "+" +
236 | bc.CEND +
237 | "] " +
238 | bc.CRED +
239 | "Alias: " +
240 | bc.CEND)
241 | aka = set(aka)
242 | for xaka in aka:
243 | xakas = str(xaka).split('>')[1].split('<')[0]
244 | aklist.append(xakas)
245 | print((" [" +
246 | bc.CGRN +
247 | "=" +
248 | bc.CEND +
249 | "] " +
250 | bc.CRED +
251 | "AKA: " +
252 | bc.CEND +
253 | "%s") %
254 | (xakas))
255 | except Exception as e:
256 | print(e)
257 | finally:
258 | return aklist
259 |
260 | def grab_address(self):
261 | """
262 | Grab the users address
263 | from the DOM
264 | """
265 | address = "Unknown"
266 | try:
267 | address = self.soup2.find_all(
268 | 'a', {'class': 'link-to-more', 'data-link-to-more': 'address'})
269 | except Exception as e:
270 | print(e)
271 | finally:
272 | return address
273 |
274 | def grab_related(self):
275 | """
276 | Grab the related values
277 | from the DOM
278 | """
279 | rellist = "Unknown"
280 | try:
281 | related = self.soup2.find_all(
282 | 'a', {'class': 'link-to-more', 'data-link-to-more': 'relative'})
283 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
284 | bc.CRED + "Related:" + bc.CEND)
285 | related = set(related)
286 | rellist = []
287 | for xrelate in related:
288 | xrels = str(xrelate).split(">")[1].split("<")[0]
289 | rellist.append(xrels)
290 | for xrel in set(rellist):
291 | print((" [" +
292 | bc.CGRN +
293 | "=" +
294 | bc.CEND +
295 | "] " +
296 | bc.CRED +
297 | "Known Relative: " +
298 | bc.CEND +
299 | "%s") %
300 | xrel)
301 | except Exception as e:
302 | print(e)
303 | finally:
304 | return rellist
305 |
306 | def grab_associate(self):
307 | """
308 | Grab a list of associate data
309 | from the DOM
310 | """
311 | asso = "Unknown"
312 | try:
313 | associate = self.soup2.find_all(
314 | 'a', {'class': 'link-to-more', 'data-link-to-more': 'associate'})
315 | associate = set(associate)
316 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
317 | bc.CRED + "Associate(s):" + bc.CEND)
318 | asso = []
319 | for xassociate in associate:
320 | assoc = str(xassociate).split(">")[1].split("<")[0]
321 | asso.append(assoc)
322 | print((" [" +
323 | bc.CGRN +
324 | "=" +
325 | bc.CEND +
326 | "] " +
327 | bc.CRED +
328 | "Known Associate: " +
329 | bc.CEND +
330 | "%s") %
331 | assoc)
332 | except Exception as e:
333 | print(e)
334 | finally:
335 | return asso
336 |
337 | def grab_prev_addr(self, address):
338 | """
339 | Grab previous address info
340 | from the DOM
341 | """
342 | prev = "Unknown"
343 | lives = "Unknown"
344 | try:
345 | curaddr = 0
346 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
347 | bc.CRED + "Address:" + bc.CEND)
348 | prev = []
349 | for xaddr in address:
350 | adr = " ".join(str(xaddr).split())
351 | adrs = " ".join(adr.split(">")[1::])
352 | addr = adrs.replace(
353 | "
= 1:
398 | print(
399 | " [" +
400 | bc.CGRN +
401 | "+" +
402 | bc.CEND +
403 | "] " +
404 | bc.CRED +
405 | "Phone: " +
406 | bc.CEND)
407 | for xnum in phone:
408 | try:
409 | xnums = str(xnum).split(">")[1].split("<")[0]
410 | plist.append(xnums)
411 | print((" [" +
412 | bc.CGRN +
413 | "=" +
414 | bc.CEND +
415 | "] " +
416 | bc.CRED +
417 | "#: " +
418 | bc.CEND +
419 | "%s") %
420 | xnums)
421 | except Exception as w:
422 | pass
423 | except Exception as e:
424 | print(e)
425 | finally:
426 | return plist
427 |
428 | def find_all_deep(self, lookup):
429 | """
430 | Deep search for records
431 | """
432 | age = ""
433 | name = ""
434 | aklist = ""
435 | rellist = ""
436 | asso = ""
437 | prev = ""
438 | lives = ""
439 | plist = ""
440 |
441 | try:
442 |
443 | deep = self.soup.find_all(
444 | 'a', {
445 | 'class': [
446 | 'btn', 'btn-success', 'btn-lg',
447 | 'detail-link', 'shadow-form'
448 | ]}
449 | )
450 |
451 | for x in set(deep):
452 | rid = self.get_rid(lookup, x)
453 | if rid == False:
454 | return False
455 |
456 | self.url2 = self.url + "&" + rid
457 | if self.get_rid_source() == False:
458 | return False
459 |
460 | try:
461 | self.soup2 = self.get_dom(self.source2)
462 | except Exception as e:
463 | print(e)
464 |
465 | name = self.grab_name()
466 | age = self.grab_age()
467 | aklist = self.grab_akalist()
468 | address = self.grab_address()
469 | relist = self.grab_related()
470 | asso = self.grab_associate()
471 | prev, lives = self.grab_prev_addr(address)
472 | plist = self.grab_phone_list()
473 |
474 | self.info_dict.update({name: {
475 | "age": age,
476 | "alias": aklist,
477 | "lives": lives,
478 | "lived": prev,
479 | "phone": plist,
480 | "related": rellist,
481 | "associate": asso}
482 | })
483 | except Exception as e:
484 | print(e)
485 |
486 | def get_source_html(self):
487 | """
488 | grab the source files
489 | """
490 | self.source = self.get_source(self.url)
491 | self.soup = self.get_dom(self.source)
492 |
493 | def true_try(self, lookup, information):
494 | """
495 | Determins different URL constructs based on user supplied data
496 | """
497 | address_list = []
498 | if lookup == "phone":
499 | self.phone(information)
500 |
501 | if lookup == "name":
502 | self.name(information)
503 |
504 | if lookup in ['name', 'phone']:
505 | self.get_source_html()
506 |
507 | if self.check_for_captcha() == True:
508 | print((" [" + bc.CRED + "X" + bc.CEND + "] " +
509 | bc.CYLW + "Goto: {}" + bc.CEND).format(self.url)
510 | )
511 |
512 | self.iscomplete = raw_input(
513 | " [" + bc.CRED + "!" + bc.CEND + "] " + bc.CYLW +
514 | "Have you completed the CAPTCHA? " + bc.CEND
515 | )
516 |
517 | if str(self.iscomplete).lower() in ['no', False, 0]:
518 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
519 | "User has not completed the CAPTCHA\n" + bc.CEND)
520 | return
521 | else:
522 | self.get_source_html()
523 |
524 | if self.find_all_shallow():
525 | self.find_all_deep(lookup)
526 | else:
527 | return False
528 |
529 | print()
530 | return self.info_dict
531 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/true_people/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/true_people/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/truthfinder/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | #
4 | # TruePeopleSearch.com scraper
5 | #
6 | from ..base import PageGrabber
7 | from .. import proxygrabber
8 | from ...colors.default_colors import DefaultBodyColors as bc
9 | import re
10 |
11 | try:
12 | import __builtin__ as bi
13 | except ImportError:
14 | import builtins as bi
15 |
16 | try:
17 | raw_input # Python 2
18 | except NameError:
19 | raw_input = input # Python 3
20 |
21 |
22 | class TruthFinderGrabber(PageGrabber):
23 | """
24 | Check for CAPTCHA, if proxy enabled,try new proxy w/ request, else
25 | report to STDOUT about CAPTCHA
26 | """
27 | url = ""
28 |
29 | def get_info(self, lookup, information):
30 | """
31 | Uniform call for framework to launch function in a way to single out the
32 | calls per URL
33 | """
34 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
35 | bc.CCYN + "TruthFinder" + bc.CEND)
36 | # Actual logic to run + re-try request
37 | self.truth_try(lookup, information)
38 |
39 | def check_for_captcha(self):
40 | captcha = self.soup.find('div', attrs={'class': 'g-recaptcha'})
41 | if bi.webproxy and captcha is not None:
42 | try:
43 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
44 | "Switching proxy, trying again...\n" + bc.CEND)
45 | bi.proxy = proxygrabber.new_proxy()
46 | self.true_try(lookup, information)
47 | return True
48 | except Exception as badproxy:
49 | pass
50 | if captcha is not None:
51 | print(
52 | " [" +
53 | bc.CRED +
54 | "X" +
55 | bc.CEND +
56 | "] " +
57 | bc.CYLW +
58 | "Captcha detected, use a proxy or complete challenge in browser\n" +
59 | bc.CEND)
60 | return True
61 | else:
62 | return False
63 |
64 | def makephone(information):
65 | """
66 | Find user supplied data format, adjust as needed for URL
67 | """
68 | try:
69 | if str(information).split(
70 | "-")[1]: # Can it be split bu a "-", everything is ok
71 | dashphone = '({})-{}-{}'.format(
72 | information[0:3], information[5:8], information[9:])
73 | return dashphone
74 | except BaseException:
75 | pass
76 | try:
77 | if str(information).split(" ")[
78 | 1]: # Can it be split by a whitespace, if so, break and format as needed for the URL
79 | dashphone = '({})-{}-{}'.format(
80 | information[0:3], information[5:8], information[9:])
81 | return dashphone
82 | except BaseException:
83 | pass
84 | try:
85 | # If len of data is 10 and is an integer, break and format
86 | # as needed for URL
87 | if len(information) == 10:
88 | dashphone = '({})-{}-{}'.format(
89 | information[0:3], information[3:6], information[6:])
90 | return dashphone
91 | except BaseException:
92 | print(
93 | " [" +
94 | bc.CRED +
95 | "X" +
96 | bc.CEND +
97 | "] " +
98 | bc.CYLW +
99 | "Did not detect a phone number\n" +
100 | bc.CEND)
101 | return
102 |
103 | def set_city_state_zip(self, citystatezip):
104 | """
105 | Set the city, state, zip
106 | value for the object.
107 | """
108 | if citystatezip:
109 | self.state = citystatezip
110 | else:
111 | self.state = "ALL"
112 |
113 | def set_age(self, age):
114 | """
115 | Set the age
116 | value for the object.
117 | """
118 | if age:
119 | self.age = "true"
120 | else:
121 | self.age = "false"
122 |
123 | def set_gender(self, gender):
124 | """
125 | Set the gender
126 | value for the object.
127 | """
128 | if gender:
129 | self.gndr = "&gender={}".format(gender)
130 | else:
131 | self.gndr = "&gender="
132 |
133 | def split_name(self, information):
134 | """
135 | Split the name down into
136 | first name and last name
137 | """
138 | try:
139 | if len(str(information).split(' ')) in [2, 3]:
140 | if len(str(information).split(' ')) == 2:
141 | self.fname = str(information).split(" ")[0]
142 | self.lname = str(information).split(" ")[1]
143 | if len(str(information).split(' ')) == 3:
144 | self.fname = str(information).split(" ")[0]
145 | self.lname = str(information).split(" ")[2]
146 | except BaseException:
147 | print(
148 | " [" +
149 | bc.CRED +
150 | "X" +
151 | bc.CEND +
152 | "] " +
153 | bc.CYLW +
154 | "Failed to parse serarch string, lookup name.\n" +
155 | bc.CEND)
156 |
157 | def truth_try(self, information, lookup):
158 | """
159 | Builds out different URL constructs based on user supplied data
160 | """
161 |
162 | address_list = []
163 |
164 | if lookup == "phone":
165 | phonere = re.compile(
166 | '(\d\d\d\d\d\d\d\d\d\d|\d\d\d[\s.-]\d\d\d[\s.-]\d\d\d\d)')
167 |
168 | if phonere.findall(information):
169 | try:
170 | self.url = 'https://www.truepeoplesearch.com/results?phoneno={}'.format(
171 | makephone(information))
172 | except Exception as e:
173 | pass
174 |
175 | if lookup == "name": # Make the URL for name lookup
176 | citystatezip = input(
177 | " [" +
178 | bc.CRED +
179 | "!" +
180 | bc.CEND +
181 | "] " +
182 | bc.CYLW +
183 | "Please enter a city,state,or zip - ex: (AL=Alabama|CO=Colorado) " +
184 | bc.CEND)
185 | gender = input(
186 | " [" +
187 | bc.CRED +
188 | "!" +
189 | bc.CEND +
190 | "] " +
191 | bc.CYLW +
192 | "Please enter the persons biological sex - ex: (M|F) " +
193 | bc.CEND)
194 | age = input(
195 | " [" +
196 | bc.CRED +
197 | "!" +
198 | bc.CEND +
199 | "] " +
200 | bc.CYLW +
201 | "Is the person older than 30? - ex: (Y|n) " +
202 | bc.CEND)
203 |
204 | self.set_city_state_zip(citystatezip)
205 | self.set_age(age)
206 | self.set_gender(gender)
207 | self.split_name(information)
208 |
209 | self.url = "https://www.truthfinder.com/results/?utm_source=VOTER&traffic%5Bsource%5D=VOTER&utm_medium=pre-pop&traffic%5Bmedium%5D=pre-pop&utm_campaign=&traffic%5Bcampaign%5D=srapi%3A&utm_term=1&traffic%5Bterm%5D=1&utm_content=&traffic%5Bcontent%5D=&s1=&s2=srapi&s3=1&s4=&s5=&city=&firstName={}&lastName={}&page=r&state={}{}&qLocation=true&qRelatives=true&qOver30={}".format(
210 | self.fname,
211 | self.lname,
212 | self.state,
213 | self.gndr,
214 | self.age)
215 |
216 | self.source = self.get_source(self.url)
217 | self.soup = self.get_dom(self.source)
218 |
219 | try:
220 | ul = self.soup.findAll("ul")
221 | for xul in ul:
222 | perlen = len(str(xul).split("\n"))
223 | broken = str(xul).split("\n")
224 | if perlen >= 10: # Check is len is greater than 10 to futher process
225 | try:
226 | # should be static to the results (searched name)
227 | name = broken[3].split("<")[0]
228 | print((" [" +
229 | bc.CGRN +
230 | "+" +
231 | bc.CEND +
232 | "] " +
233 | bc.CRED +
234 | "Name: " +
235 | bc.CEND +
236 | "%s") %
237 | (name))
238 | try:
239 | # find position + 1 space to left
240 | akaloc = broken.index('aka:') + 1
241 | aka = broken[akaloc].split("<")[0].replace(
242 | ", ", ",") # grab actual dataset
243 | print(
244 | " [" + bc.CGRN + "+" + bc.CEND + "] " + bc.CRED + "Alias: " + bc.CEND)
245 | # set sorted unique
246 | akalist = sorted(set(str(aka).split(",")))
247 | for xaka in akalist: # for each entry in sorted unique list
248 | print((" [" +
249 | bc.CGRN +
250 | "=" +
251 | bc.CEND +
252 | "] " +
253 | bc.CRED +
254 | "AKA: " +
255 | bc.CEND +
256 | "%s") %
257 | (xaka))
258 | except BaseException: # in case of failure
259 | akalist = ['unknown']
260 | print(
261 | " [" +
262 | bc.CGRN +
263 | "+" +
264 | bc.CEND +
265 | "] " +
266 | bc.CRED +
267 | "AKA: " +
268 | bc.CEND +
269 | "Unknown")
270 | pass
271 | try:
272 | ageloc = broken.index('') + 2
273 | age = broken[ageloc].split(">")[1].split("<")[0]
274 | if age:
275 | print((" [" +
276 | bc.CGRN +
277 | "+" +
278 | bc.CEND +
279 | "] " +
280 | bc.CRED +
281 | "Age: " +
282 | bc.CEND +
283 | "%s") %
284 | (age))
285 | except BaseException:
286 | age = 'unknown'
287 | print((" [" +
288 | bc.CGRN +
289 | "+" +
290 | bc.CEND +
291 | "] " +
292 | bc.CRED +
293 | "Age: " +
294 | bc.CEND +
295 | "%s") %
296 | (age))
297 | pass
298 | try:
299 | locloc = broken.index('') + 2
300 | print(broken)
301 | locations = broken[locloc]
302 | locations = locations.replace(
303 | ", ",
304 | ":").replace(
305 | "",
306 | ",").replace(
307 | "",
308 | " ").replace(
309 | ", ",
310 | "")
311 | print(
312 | " [" +
313 | bc.CGRN +
314 | "+" +
315 | bc.CEND +
316 | "] " +
317 | bc.CRED +
318 | "Location(s): " +
319 | bc.CEND)
320 | for xlocal in locations.split(","):
321 | print((" [" +
322 | bc.CGRN +
323 | "=" +
324 | bc.CEND +
325 | "] " +
326 | bc.CRED +
327 | "City:State:" +
328 | bc.CEND +
329 | "%s") %
330 | (xlocal))
331 | except BaseException:
332 | locals = ['unknown']
333 | print(
334 | " [" +
335 | bc.CGRN +
336 | "+" +
337 | bc.CEND +
338 | "] " +
339 | bc.CRED +
340 | "Location: " +
341 | bc.CEND +
342 | "Unknown")
343 | pass
344 | try:
345 | relloc = broken.index('') + 1
346 | if broken[relloc].split('"')[1] == "No Relatives":
347 | print(
348 | " [" +
349 | bc.CGRN +
350 | "+" +
351 | bc.CEND +
352 | "] " +
353 | bc.CRED +
354 | "Relative(s): " +
355 | bc.CEND +
356 | "Unknown")
357 | else:
358 | print(
359 | " [" + bc.CGRN + "+" + bc.CEND + "] " + bc.CRED + "Relative(s): " + bc.CEND)
360 | relatives = broken[int(
361 | relloc) + 2].replace("\n", ",")
362 | relatives = relatives.replace(
363 | "",
364 | ",").replace(
365 | "",
366 | "").replace(
367 | ", ",
368 | "")
369 | relate = relatives.split(",")
370 | for xrel in sorted(set(relate)):
371 | print((" [" +
372 | bc.CGRN +
373 | "=" +
374 | bc.CEND +
375 | "] " +
376 | bc.CRED +
377 | "Related: " +
378 | bc.CEND +
379 | "%s") %
380 | (xrel))
381 | except BaseException:
382 | relate = ['unknown']
383 | pass
384 | except BaseException:
385 | pass
386 | print()
387 | self.info_dict.update({
388 | "name": name,
389 | "age": age,
390 | "aka": sorted(set(akalist)),
391 | "locations": sorted(set(locals)),
392 | "relatives": sorted(set(relate)),
393 | })
394 | bi.outdata['truthfinder'] = self.info_dict
395 | except BaseException:
396 | pass
397 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/truthfinder/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/truthfinder/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/twitter/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Twitter Scraper: Requires users to install additional non standard libraries
3 | #
4 | from ..base import PageGrabber
5 | from ...colors.default_colors import DefaultBodyColors as bc
6 | from sys import platform
7 |
8 | import builtins as bi
9 | import time
10 |
11 | try:
12 | from bs4 import BeautifulSoup as bs
13 | except Exception as e:
14 | print(
15 | " [" +
16 | bc.CRED +
17 | "X" +
18 | bc.CEND +
19 | "] " +
20 | bc.CYLW +
21 | "Failed at importing BeautifulSoup from bs4: {}\n" +
22 | bc.CEND).format(e)
23 | try:
24 | from selenium.webdriver import Firefox
25 | from selenium.webdriver.common.by import By
26 | from selenium.webdriver.common.keys import Keys
27 | from selenium.webdriver.support.ui import Select
28 | from selenium.webdriver.support.ui import WebDriverWait
29 | from selenium.webdriver.firefox.options import Options
30 | except Exception as e:
31 | print(
32 | " [" +
33 | bc.CRED +
34 | "X" +
35 | bc.CEND +
36 | "] " +
37 | bc.CYLW +
38 | "Failed at importing selenium requirements: {}\n" +
39 | bc.CEND).format(e)
40 | try:
41 | from tqdm import tqdm
42 | except Exception as e:
43 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
44 | "Failed at importing tqdm from tdqm: {}\n" + bc.CEND).format(e)
45 | import os
46 |
47 |
48 | class TwitterGrabber(PageGrabber):
49 | """
50 | Class for grabbing a Twitter screenname
51 | to extract OSINT
52 | """
53 |
54 | def get_info(self, screenname, type):
55 | """
56 | Grab the info from
57 | the screen name passed in
58 | """
59 |
60 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
61 | bc.CCYN + "Twitter" + bc.CEND)
62 | print(" [" + bc.CGRN + "!" + bc.CEND + "] " + bc.CRED +
63 | "Module takes some time to load, please wait!" + bc.CEND)
64 |
65 | options = Options()
66 |
67 | try:
68 | options.add_argument('--headless')
69 | b = Firefox(
70 | executable_path='/usr/bin/geckodriver',
71 | firefox_options=options)
72 | wait = WebDriverWait(b, timeout=5)
73 | b.get('https://twitter.com/{}'.format(screenname))
74 | except Exception as e:
75 | print(e)
76 | print(
77 | " [" +
78 | bc.CRED +
79 | "X" +
80 | bc.CEND +
81 | "] " +
82 | bc.CYLW +
83 | "Failed at making the initial request: {}\n" +
84 | bc.CEND) # .format(e)
85 |
86 | try:
87 | soup = bs(b.page_source, 'lxml')
88 | validname = str(soup.h1).split()[-2].split('/')[1].split('"')[0]
89 | avatar = str(
90 | soup.findAll(
91 | 'img', {
92 | 'class', 'avatar', 'js-action-profile-avatar'})[3]['src'])
93 | profnav = soup.find_all('span', {'class', 'ProfileNav-value'})
94 | except Exception as e:
95 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
96 | "Unable to make initial soup: {}\n" + bc.CEND) # .format(e)
97 | try:
98 | if len(profnav) >= 5:
99 | datal = list()
100 | for x in profnav[0:len(profnav) - 1:]:
101 | datal.append(
102 | " ".join(
103 | str(x).replace(
104 | "\n",
105 | " ").split()).split('"')[3])
106 | tcount, fgcount, fscount, likes = datal[:4]
107 | page = int(tcount) / 20 + 1
108 | nap = 1
109 | estt = page * nap / 60
110 | except Exception as e:
111 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
112 | "Failed at making the datalist: {}\n" + bc.CEND) # .format(e)
113 |
114 | try:
115 | scrapeall = raw_input(
116 | " [!] Do you want to capture all tweets ?\n [!] Estimated time to complete: " +
117 | str(estt) +
118 | "m (Y/n) ")
119 | if scrapeall.lower() in [1, 'y', 'true', 'on']:
120 | for i in tqdm(range(1, page)):
121 | b.execute_script(
122 | "window.scrollTo(0, document.body.scrollHeight);")
123 | time.sleep(nap)
124 | except Exception as e:
125 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
126 | "Failed at scrolling site: {}\n" + bc.CEND)
127 | try:
128 | h = b.page_source
129 | soup = bs(h, 'lxml')
130 | datelist = list()
131 | timelist = list()
132 | for d in soup.findAll('li', {'class', 'js-stream-item'}):
133 | print("\n [+]", "-" * 80)
134 | if 'Retweeted' in d.p.text:
135 | print(" [" + bc.CGRN + "!" + bc.CEND + "] " +
136 | bc.CRED + "Retweet: " + bc.CEND)
137 | if str(d.span).split()[3] == 'Icon--pinned':
138 | print(" [" + bc.CGRN + "!" + bc.CEND + "] " +
139 | bc.CRED + "Pinned: " + bc.CEND)
140 | try:
141 | tlist = d.findAll('a')
142 | dt = tlist[1]['title']
143 | except Exception as e:
144 | try:
145 | dt = tlist[2]['title']
146 | except Exception as e:
147 | print(
148 | " [" +
149 | bc.CRED +
150 | "X" +
151 | bc.CEND +
152 | "] " +
153 | bc.CYLW +
154 | "Unable to find datetime: {}\n" +
155 | bc.CEND).format(e)
156 | pass
157 | datelist.append(dt)
158 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
159 | bc.CRED + "Date/Time: " + bc.CEND + str(dt))
160 | try:
161 | timestamp = str(tlist[1]).split()[-3].split('"')[1]
162 | if len(timestamp) < 10:
163 | timestamp = str(tlist[2]).split()[-3].split('"')[1]
164 | if len(timestamp) < 10:
165 | timestamp = "Conversation Extension"
166 | except Exception as e:
167 | print(e)
168 | pass
169 | if timestamp:
170 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
171 | bc.CRED + "Timestamp: " + bc.CEND + str(timestamp))
172 | try:
173 | posttitle = str(d.p.a['title'])
174 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
175 | bc.CRED + "Title: " + bc.CEND + str(posttitle))
176 | except Exception as e:
177 | #print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"Unable to find title: {}\n"+bc.CEND).format(e)
178 | pass
179 | try:
180 | postdata = d.p.text
181 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
182 | bc.CRED + "Content:\n" + bc.CEND)
183 | print(postdata)
184 | except Exception as e:
185 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
186 | "Unable to find content: {}\n" + bc.CEND).format(e)
187 | pass
188 | except Exception as e:
189 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
190 | "Can not make soup, phase 2: {}\n" + bc.CEND) # .format(e)
191 |
192 | try:
193 | if platform == 'linux' or platform == 'linux2':
194 | os.popen(
195 | 'ps -A xf | grep firefox | grep marionette | sed -r "s/^[ ]{1,4}([0-9]{1,7})(.*)/\1/g" | xargs kill -9 2>/dev/null')
196 | elif platform == 'darwin':
197 | os.popen(
198 | 'ps -Axf | grep firefox | grep marionette | sed "s/^[ ]{1,4}([0-9]{1,7})(.*)/\1/g" | xargs kill -9 2>/dev/null')
199 |
200 | except Exception as e:
201 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
202 | "Unable to kill Firefox headless: {}\n" + bc.CEND).format(e)
203 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/twitter/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/twitter/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/who_call_id/__init__.py:
--------------------------------------------------------------------------------
1 | """Whocallid.com search module"""
2 | from __future__ import print_function
3 | from __future__ import absolute_import
4 | from ..base import PageGrabber
5 | from ...colors.default_colors import DefaultBodyColors as bc
6 | import re
7 | import logging
8 | try:
9 | import __builtin__ as bi
10 | except BaseException:
11 | import builtins as bi
12 |
13 |
14 | class WhoCallIdGrabber(PageGrabber):
15 | """
16 | WhoCallID sales scraper for reverse telephone lookups
17 | """
18 |
19 | def get_name(self):
20 | """
21 | Grab the users name
22 | """
23 | name = "Unknown"
24 | try:
25 | name = self.soup.find('h2', attrs={'class': 'name'})
26 | if name:
27 | name = name.text.strip()
28 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
29 | bc.CRED + "Name: " + bc.CEND + str(name))
30 | except BaseException:
31 | pass
32 | finally:
33 | return name
34 |
35 | def get_location(self):
36 | """
37 | Get the location
38 | """
39 | location = "Unknown"
40 | try:
41 | location = self.soup.find('h3', attrs={'class': 'location'})
42 | if location:
43 | location = location.text.strip()
44 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
45 | bc.CRED + "Location: " + bc.CEND + str(location))
46 | except BaseException:
47 | pass
48 | finally:
49 | return location
50 |
51 | def get_phone_type(self):
52 | """
53 | Get the phone type
54 | """
55 | phone_type = "Unknown"
56 | try:
57 | phone_type = self.soup.find("img").attrs['alt']
58 | if phone_type:
59 | phone_type = phone_type.strip()
60 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
61 | bc.CRED + "Phone Type: " + bc.CEND + str(phone_type))
62 | except BaseException:
63 | pass
64 | finally:
65 | return phone_type
66 |
67 | def get_carrier(self, phone_number):
68 | """
69 | Get the phone carrier info
70 | """
71 | carrier = ""
72 | try:
73 | self.url = "https://whocalld.com/+1{}?carrier".format(phone_number)
74 | self.source = self.get_source(self.url)
75 | self.soup = self.get_dom(self.source)
76 | carrier = soup.find('span', attrs={'class': 'carrier'})
77 | except BaseException:
78 | pass
79 | finally:
80 | return carrier
81 |
82 | def process_carrier(self, carrier):
83 | """
84 | Take the carrier info and process it
85 | """
86 | try:
87 | if carrier:
88 | carrier = carrier.text
89 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
90 | bc.CRED + "Carrier: " + bc.CEND + str(carrier))
91 | else:
92 | carrier = ""
93 | except BaseException:
94 | carrier = ""
95 | finally:
96 | return carrier
97 |
98 | def get_city(self):
99 | """
100 | Grab the city info
101 | """
102 | city = ""
103 | try:
104 | city = self.soup.find('span', attrs={'class': 'city'})
105 | if city:
106 | city = city.text
107 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
108 | bc.CRED + "City: " + bc.CEND + str(city))
109 | except BaseException:
110 | pass
111 | finally:
112 | return city
113 |
114 | def get_state(self):
115 | """
116 | Grab the state info
117 | """
118 | state = ""
119 | try:
120 | state = self.soup.find('span', attrs={'class': 'state'})
121 | if state:
122 | state = state.text
123 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
124 | bc.CRED + "State: " + bc.CEND + str(state))
125 | except BaseException:
126 | pass
127 | finally:
128 | return state
129 |
130 | def get_time(self):
131 | """
132 | Grab time info
133 | """
134 | time = ""
135 | try:
136 | time = self.soup.find('span', attrs={'class': 'time'})
137 | if time:
138 | time = time.text
139 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
140 | bc.CRED + "Time: " + bc.CEND + str(time))
141 | except BaseException:
142 | pass
143 | finally:
144 | return time
145 |
146 | def get_info(self, phone_number, lookup):
147 | """
148 | Request, scrape and return values found
149 | """
150 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
151 | bc.CCYN + "WhoCalld" + bc.CEND)
152 | # Get phone info
153 | self.url = 'https://whocalld.com/+1{}'.format(phone_number)
154 | self.source = self.get_source(self.url)
155 | self.soup = self.get_dom(self.source)
156 |
157 | try:
158 | if self.soup.body.find_all(string=re.compile(
159 | '.*{0}.*'.format('country')), recursive=True):
160 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
161 | bc.CYLW + "No WhoCallID data returned\n" + bc.CEND)
162 | return
163 | except:
164 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
165 | bc.CYLW + "Unable to extract data. Is the site online?\n" + bc.CEND)
166 |
167 | name = self.get_name()
168 | location = self.get_location()
169 | phone_type = self.get_phone_type()
170 | carrier = self.get_carrier(phone_number)
171 | carrier = self.process_carrier(carrier)
172 | city = self.get_city()
173 | state = self.get_state()
174 | time = self.get_time()
175 |
176 | self.info_dict.update({
177 | "carrier": carrier,
178 | "city": city,
179 | "location": location,
180 | "name": name,
181 | "phone_type": phone_type,
182 | "state": state,
183 | "time": time
184 | })
185 |
186 | print()
187 | return self.info_dict
188 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/who_call_id/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/who_call_id/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/plugins/whoismind/__init__.py:
--------------------------------------------------------------------------------
1 | #######################################################################
2 | # whoismind scraper - returns domains associated with email #
3 | #######################################################################
4 |
5 | from __future__ import print_function
6 | from ..base import PageGrabber
7 | from ...colors.default_colors import DefaultBodyColors as bc
8 | import numpy as np
9 |
10 | try:
11 | import __builtin__ as bi
12 | except ImportError:
13 | import builtins as bi
14 |
15 |
16 | class WhoisMindGrabber(PageGrabber):
17 | """
18 | WhoisMind scraper for registered domains by email lookups
19 | """
20 |
21 | def __init__(self):
22 | """
23 | Load up WhoisMindGrabber plugin configs
24 | """
25 | super(WhoisMindGrabber, self).__init__()
26 |
27 | def get_info(self, email, category):
28 | """
29 | Request and processes results, sorted unique, remove blanks
30 | """
31 | try:
32 | print("[" + bc.CPRP + "?" + bc.CEND + "] " +
33 | bc.CCYN + "WhoisMind" + bc.CEND)
34 | url = 'https://whoisamped.com/email/{}{}'.format(email, '.html')
35 | source = self.get_source(url)
36 | soup = self.get_dom(source)
37 | href = soup.findAll('a')
38 |
39 | except Exception as urlgrabfailed:
40 | print(" [" + bc.CRED + "X" + bc.CEND + "] " + bc.CYLW +
41 | "WhoisMind failed to produce the URL" + bc.CEND)
42 | whoisdb = list()
43 |
44 | try:
45 | for hreftag in href:
46 | if hreftag.text != "" and hreftag.text in hreftag['href']:
47 | domain = hreftag.text
48 | print(" [" + bc.CGRN + "+" + bc.CEND + "] " +
49 | bc.CRED + "Domain: " + bc.CEND + domain)
50 | whoisdb.append({"domain": domain})
51 | except Exception as whoisfailed:
52 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
53 | bc.CYLW + "WhoisMind returned no results" + bc.CEND)
54 | return
55 | if len(whoisdb) == 0:
56 | print(" [" + bc.CRED + "X" + bc.CEND + "] " +
57 | bc.CYLW + "WhoisMind returned no results" + bc.CEND)
58 | else:
59 | self.info_list.append(list(np.unique(np.array(whoisdb))))
60 | bi.outdata['whoismind'] = self.info_list[0]
61 | print()
62 | return self.info_list
63 |
--------------------------------------------------------------------------------
/src/skiptracer/plugins/whoismind/__main__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xillwillx/skiptracer/fbc1f8c88907db3014c6c64d08b7ded814a9c172/src/skiptracer/plugins/whoismind/__main__.py
--------------------------------------------------------------------------------
/src/skiptracer/skiptracer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #!/usr/bin/env python
3 | from __future__ import print_function
4 |
5 | import pkg_resources
6 | import sys
7 | import signal
8 | import json
9 | import ast
10 |
11 | try:
12 | import __builtin__ as bi
13 | except BaseException:
14 | import builtins as bi
15 |
16 |
17 | class SkipTracer:
18 | """
19 | Kick off the SkipTracer
20 | program
21 | """
22 | #bi.search_string = ''
23 | bi.lookup = ''
24 | bi.webproxy = ""
25 | bi.proxy = ""
26 | bi.debug = False
27 |
28 | inc_plugins = {}
29 | plugins_plugin = "skiptracer.plugins"
30 | menus_plugin = "skiptracer.menus"
31 | colors_plugin = "skiptracer.colors"
32 | loaded_plugins_plugin_dict = {}
33 | loaded_menus_plugin_dict = {}
34 | loaded_colors_plugin_dict = {}
35 |
36 | def __init__(self, plugins):
37 | """
38 | Load all the different types
39 | of plugin
40 | """
41 | self.inc_plugins = plugins
42 |
43 | self.loaded_plugins_plugin_dict = self.load_plugins(
44 | self.plugins_plugin)
45 |
46 | self.loaded_menus_plugin_dict = self.load_plugins(
47 | self.menus_plugin)
48 |
49 | self.loaded_colors_plugin_dict = self.load_plugins(
50 | self.colors_plugin)
51 |
52 | # only supporting default menu for now
53 | self.loaded_menus_plugin_dict['default_menus'](
54 | self.loaded_plugins_plugin_dict).intromenu()
55 |
56 | def load_plugins(self, plugin):
57 | """
58 | Load the plugin and store
59 | object in an array
60 | """
61 | plugin_dict = {}
62 |
63 | for p in pkg_resources.iter_entry_points(plugin):
64 | plugin_dict[p.name] = p.load()
65 | return plugin_dict
66 |
--------------------------------------------------------------------------------
/storage/freemail.db:
--------------------------------------------------------------------------------
1 | comcast
2 | gmail
3 | yahoo
4 | aol
5 | hotmail
6 | outlook
7 |
--------------------------------------------------------------------------------
/storage/user-agents.db:
--------------------------------------------------------------------------------
1 | Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )
2 | Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)
3 | Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5
4 | Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9
5 | Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7
6 | Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14
7 | Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14
8 | Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27
9 | Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2
10 | Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7
11 | Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0
12 | Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3
13 | Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6
14 | Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6
15 | Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1
16 | Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre
17 | Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10
18 | Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)
19 | Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5
20 | Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)
21 | Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
22 | Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
23 | Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0
24 | Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2
25 | Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1
26 | Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1
27 | Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0
28 | Mozilla/5.0 (Windows NT 6.0; rv:14.0) Gecko/20100101 Firefox/14.0.1
29 | Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1
30 | Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0) Gecko/16.0 Firefox/16.0
31 | Mozilla/5.0 (Windows NT 6.2; rv:19.0) Gecko/20121129 Firefox/19.0
32 | Mozilla/5.0 (Windows NT 6.2; rv:20.0) Gecko/20121202 Firefox/20.0
33 | Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Maxthon 2.0)
34 | Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre
35 | Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )
36 | Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)
37 | Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a
38 | Mozilla/2.02E (Win95; U)
39 | Mozilla/3.01Gold (Win95; I)
40 | Mozilla/4.8 [en] (Windows NT 5.1; U)
41 | Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)
42 | Opera/7.50 (Windows XP; U)
43 | Opera/7.50 (Windows ME; U) [en]
44 | Opera/7.51 (Windows NT 5.1; U) [en]
45 | Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.0
46 | Opera/9.25 (Windows NT 6.0; U; en)
47 | Opera/9.80 (Windows NT 5.2; U; en) Presto/2.2.15 Version/10.10
48 | Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10
49 | Opera/9.80 (Windows NT 6.1; U; en) Presto/2.7.62 Version/11.01
50 | Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00
51 | Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14
52 | Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2
53 | Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10
54 | Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18
55 | Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12
56 | Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1
57 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Camino/2.2.1
58 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre Camino/2.2a1pre
59 | Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8
60 | Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3
61 | Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13
62 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1
63 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2
64 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7
65 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3
66 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.4 (KHTML like Gecko) Chrome/22.0.1229.79 Safari/537.4
67 | Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0
68 | Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3
69 | Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5
70 | Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14
71 | Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15
72 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
73 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0
74 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0
75 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1
76 | Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20120813 Firefox/16.0
77 | Mozilla/4.0 (compatible; MSIE 5.15; Mac_PowerPC)
78 | Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.15
79 | Opera/9.0 (Macintosh; PPC Mac OS X; U; en)
80 | Opera/9.20 (Macintosh; Intel Mac OS X; U; en)
81 | Opera/9.64 (Macintosh; PPC Mac OS X; U; en) Presto/2.1.1
82 | Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.6.30 Version/10.61
83 | Opera/9.80 (Macintosh; Intel Mac OS X 10.4.11; U; en) Presto/2.7.62 Version/11.00
84 | Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/85.8
85 | Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/125.8
86 | Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr-fr) AppleWebKit/312.5 (KHTML, like Gecko) Safari/312.3
87 | Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/418.8 (KHTML, like Gecko) Safari/419.3
88 | ELinks (0.4pre5; Linux 2.6.10-ac7 i686; 80x33)
89 | ELinks/0.9.3 (textmode; Linux 2.6.9-kanotix-8 i686; 127x41)
90 | ELinks/0.12~pre5-4
91 | Links/0.9.1 (Linux 2.4.24; i386;)
92 | Links (2.1pre15; Linux 2.4.26 i686; 158x61)
93 | Links (2.3pre1; Linux 2.6.38-8-generic x86_64; 170x48)
94 | Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/0.8.12
95 | w3m/0.5.1
96 | Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian
97 | Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8
98 | Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9
99 | Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7
100 | Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0
101 | Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15
102 | Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15
103 | Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24
104 | Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1
105 | Mozilla/5.0 Slackware/13.37 (X11; U; Linux x86_64; en-US) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41
106 | Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1
107 | Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2
108 | Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5
109 | Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.5
110 | Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)
111 | Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8
112 | Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12
113 | Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11
114 | Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2
115 | Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5
116 | Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3
117 | Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9
118 | Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8
119 | Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre
120 | Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
121 | Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
122 | Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
123 | Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre
124 | Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0
125 | Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0
126 | Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1
127 | Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0
128 | Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1
129 | Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0
130 | Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0
131 | Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:14.0) Gecko/20100101 Firefox/14.0.1
132 | Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14
133 | Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)
134 | Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0
135 | Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0
136 | Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2
137 | Mozilla/5.0 (X11; Linux i686; rv:14.0) Gecko/20100101 Firefox/14.0.1 Iceweasel/14.0.1
138 | Mozilla/5.0 (X11; Linux x86_64; rv:15.0) Gecko/20120724 Debian Iceweasel/15.02
139 | Konqueror/3.0-rc4; (Konqueror/3.0-rc4; i686 Linux;;datecode)
140 | Mozilla/5.0 (compatible; Konqueror/3.3; Linux 2.6.8-gentoo-r3; X11;
141 | Mozilla/5.0 (compatible; Konqueror/3.5; Linux 2.6.30-7.dmz.1-liquorix-686; X11) KHTML/3.5.10 (like Gecko) (Debian package 4:3.5.10.dfsg.1-1 b1)
142 | Mozilla/5.0 (compatible; Konqueror/3.5; Linux; en_US) KHTML/3.5.6 (like Gecko) (Kubuntu)
143 | Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre
144 | Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7
145 | MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23
146 | Opera/9.64 (X11; Linux i686; U; Linux Mint; nb) Presto/2.1.1
147 | Opera/9.80 (X11; Linux i686; U; en) Presto/2.2.15 Version/10.10
148 | Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00
149 | Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34
150 | Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12
151 | Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)
152 | ELinks (0.4.3; NetBSD 3.0.2PATCH sparc64; 141x19)
153 | Links (2.1pre15; FreeBSD 5.3-RELEASE i386; 196x84)
154 | Lynx/2.8.7dev.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8d
155 | w3m/0.5.1
156 | Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0
157 | Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3
158 | Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16
159 | Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6
160 | Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3
161 | Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5
162 | Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8
163 | Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0
164 | Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15
165 | Mozilla/5.0 (compatible; Konqueror/3.5; NetBSD 4.0_RC3; X11) KHTML/3.5.7 (like Gecko)
166 | Mozilla/5.0 (compatible; Konqueror/3.5; SunOS) KHTML/3.5.1 (like Gecko)
167 | Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko
168 | Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)
169 | Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)
170 | Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; BOLT/2.800) AppleWebKit/534.6 (KHTML, like Gecko) Version/5.0 Safari/534.6.3
171 | Mozilla/5.0 (Linux; Android 4.4.2; SAMSUNG-SM-T537A Build/KOT49H) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.141 Safari/537.36
172 | Mozilla/5.0 (Linux; Android 8.0.0; Pixel XL Build/OPR6.170623.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.107 Mobile Safari/537.36
173 | Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; DEVICE INFO) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Mobile Safari/537.36 Edge/12.0
174 | Mozilla/5.0 (Android; Mobile; rv:35.0) Gecko/35.0 Firefox/35.0
175 | Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.12; Microsoft ZuneHD 4.3)
176 | Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 7.11)
177 | Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0) Asus;Galaxy6
178 | Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)
179 | Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)
180 | Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch)
181 | Mozilla/1.22 (compatible; MSIE 5.01; PalmOS 3.0) EudoraWeb 2.1
182 | Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
183 | Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1
184 | Mozilla/5.0 (Maemo; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1
185 | Mozilla/5.0 (Maemo; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1
186 | Mozilla/5.0 (Android 6.0.1; Mobile; rv:48.0) Gecko/48.0 Firefox/48.0
187 | Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016
188 | Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020
189 | Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025
190 | Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0
191 | Opera/9.51 Beta (Microsoft Windows; PPC; Opera Mobi/1718; U; en)
192 | Opera/9.60 (J2ME/MIDP; Opera Mini/4.1.11320/608; U; en) Presto/2.2.0
193 | Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14320/554; U; cs) Presto/2.2.0
194 | Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; ru) Presto/2.4.18 Version/10.00
195 | Opera/10.61 (J2ME/MIDP; Opera Mini/5.1.21219/19.999; en-US; rv:1.9.3a5) WebKit/534.5 Presto/2.6.30
196 | Opera/9.80 (Android; Opera Mini/7.5.33361/31.1543; U; en) Presto/2.8.119 Version/11.1010
197 | Opera/9.80 (J2ME/MIDP; Opera Mini/8.0.35626/37.8918; U; en) Presto/2.12.423 Version/12.16
198 | Mozilla/5.0 (Linux; Android 5.1.1; Nexus 7 Build/LMY47V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.78 Safari/537.36 OPR/30.0.1856.93524
199 | Opera/9.80 (Android; Opera Mini/9.0.1829/66.318; U; en) Presto/2.12.423 Version/12.16
200 | Opera/9.80 (Linux i686; Opera Mobi/1040; U; en) Presto/2.5.24 Version/10.00
201 | POLARIS/6.01 (BREW 3.1.5; U; en-us; LG; LX265; POLARIS/6.01/WAP) MMP/2.0 profile/MIDP-2.1 Configuration/CLDC-1.1
202 | Mozilla/5.0 (X11; U; Linux x86_64; en-gb) AppleWebKit/534.35 (KHTML, like Gecko) Chrome/11.0.696.65 Safari/534.35 Puffin/2.9174AP
203 | Mozilla/5.0 (X11; U; Linux x86_64; en-us) AppleWebKit/534.35 (KHTML, like Gecko) Chrome/11.0.696.65 Safari/534.35 Puffin/2.9174AT
204 | Mozilla/5.0 (iPod; U; CPU iPhone OS 6_1 like Mac OS X; en-HK) AppleWebKit/534.35 (KHTML, like Gecko) Chrome/11.0.696.65 Safari/534.35 Puffin/3.9174IP Mobile
205 | Mozilla/5.0 (X11; U; Linux x86_64; en-AU) AppleWebKit/534.35 (KHTML, like Gecko) Chrome/11.0.696.65 Safari/534.35 Puffin/3.9174IT
206 | Mozilla/5.0 (X11; U; Linux i686; en-gb) AppleWebKit/534.35 (KHTML, like Gecko) Chrome/11.0.696.65 Safari/534.35 Puffin/2.0.5603M
207 | Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.114 Safari/537.36 Puffin/4.5.0IT
208 | Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17
209 | Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5
210 | Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5
211 | Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25
212 | Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; XBLWP7; ZuneWP7) UCBrowser/2.9.0.263
213 | Mozilla/5.0 (Linux; U; Android 2.3.3; en-us ; LS670 Build/GRI40) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1/UCBrowser/8.6.1.262/145/355
214 | DoCoMo/2.0 SH901iC(c100;TB;W24H12)
215 | Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)
216 | Mozilla/4.0 (compatible; MSIE 6.0; j2me) ReqwirelessWeb/3.5
217 | Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1
218 | BlackBerry7520/4.0.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/5.0.3.3 UP.Link/5.1.2.12 (Google WAP Proxy/1.0)
219 | Nokia6100/1.0 (04.01) Profile/MIDP-1.0 Configuration/CLDC-1.0
220 | Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1
221 | Mozilla/5.0 (compatible; archive.org_bot +http://www.archive.org/details/archive.org_bot)
222 | Mozilla/5.0 (compatible; archive.org_bot; Wayback Machine Live Record; +http://archive.org/details/archive.org_bot)
223 | Mozilla/5.0 (compatible; alexa site audit/1.0; +http://www.alexa.com/help/webmasters; )
224 | EmailWolf 1.00
225 | facebookexternalhit/1.1
226 | facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)
227 | Facebot
228 | Gaisbot/3.0 (robot@gais.cs.ccu.edu.tw; http://gais.cs.ccu.edu.tw/robot.php)
229 | grub-client-1.5.3; (grub-client-1.5.3; Crawl your own stuff with http://grub.org)
230 | Gulper Web Bot 0.2.4 (www.ecsl.cs.sunysb.edu/~maxim/cgi-bin/Link/GulperBot)
231 | Screaming Frog SEO Spider/8.1
232 | TurnitinBot (https://turnitin.com/robot/crawlerinfo.html)
233 | Twitterbot/1.0
234 | Xenu Link Sleuth/1.3.8
235 | Mozilla/3.0 (compatible; NetPositive/2.1.1; BeOS)
236 | Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.9a1) Gecko/20060702 SeaMonkey/1.5a
237 | Mozilla/5.0 (OS/2; U; OS/2; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Arora/0.11.0 Safari/533.3
238 | Mozilla/5.0 (OS/2; Warp 4.5; rv:10.0.12) Gecko/20100101 Firefox/10.0.12
239 | Mozilla/5.0 (OS/2; Warp 4.5; rv:24.0) Gecko/20100101 Firefox/24.0
240 | Mozilla/5.0 (OS/2; Warp 4.5; rv:31.0) Gecko/20100101 Firefox/31.0
241 | Mozilla/5.0 (OS/2; Warp 4.5; rv:38.0) Gecko/20100101 Firefox/38.0
242 | Mozilla/5.0 (OS/2; Warp 4.5; rv:45.0) Gecko/20100101 Firefox/45.0
243 | Mozilla/5.0 (OS/2; U; OS/2; en-US) AppleWebKit/533.3 (KHTML, like Gecko) QupZilla/1.3.1 Safari/533.3
244 | Mozilla/5.0 (OS/2; Warp 4.5; rv:10.0.12) Gecko/20130108 Firefox/10.0.12 SeaMonkey/2.7.2
245 | Mozilla/5.0 (OS/2; Warp 4.5; rv:24.0) Gecko/20100101 Firefox/24.0 SeaMonkey/2.21
246 | Mozilla/5.0 (OS/2; Warp 4.5; rv:31.0) Gecko/20100101 Firefox/31.0 SeaMonkey/2.28
247 | Mozilla/5.0 (OS/2; Warp 4.5; rv:38.0) Gecko/20100101 Firefox/38.0 SeaMonkey/2.35
248 | Mozilla/5.0 (OS/2; Warp 4.5; rv:45.0) Gecko/20100101 Firefox/45.0 SeaMonkey/2.42.9esr
249 | Adobe Application Manager 2.0
250 | AndroidDownloadManager/5.1 (Linux; U; Android 5.1; Z820 Build/LMY47D)
251 | Download Demon/3.5.0.11
252 | Offline Explorer/2.5
253 | SuperBot/4.4.0.60 (Windows XP)
254 | WebCopier v4.6
255 | Web Downloader/6.9
256 | WebZIP/3.5 (http://www.spidersoft.com)
257 | Wget/1.9 cvs-stable (Red Hat modified)
258 | Wget/1.9.1
259 | Wget/1.12 (freebsd8.1)
260 | Bloglines/3.1 (http://www.bloglines.com)
261 | everyfeed-spider/2.0 (http://www.everyfeed.com)
262 | FeedFetcher-Google; ( http://www.google.com/feedfetcher.html)
263 | Gregarius/0.5.2 ( http://devlog.gregarius.net/docs/ua)
264 | Mozilla/5.0 (PLAYSTATION 3; 2.00)
265 | Mozilla/5.0 (PLAYSTATION 3; 1.10)
266 | Mozilla/4.0 (PSP (PlayStation Portable); 2.00)
267 | Opera/9.30 (Nintendo Wii; U; ; 2047-7; en)
268 | wii libnup/1.0
269 | Java/1.6.0_13
270 | libwww-perl/5.820
271 | Peach/1.01 (Ubuntu 8.04 LTS; U; en)
272 | Python-urllib/2.5
273 | HTMLParser/1.6
274 | Jigsaw/2.2.5 W3C_CSS_Validator_JFouffa/2.0
275 | W3C_Validator/1.654
276 | W3C_Validator/1.305.2.12 libwww-perl/5.64
277 | P3P Validator
278 | CSSCheck/1.2.2
279 | WDG_Validator/1.6.2
280 | facebookscraper/1.0( http://www.facebook.com/sharescraper_help.php)
281 | grub-client-1.5.3; (grub-client-1.5.3; Crawl your own stuff with http://grub.org)
282 | iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)
283 | Microsoft URL Control - 6.00.8862
284 | Roku/DVP-4.1 (024.01E01250A)
285 | Mozilla/5.0 (SMART-TV; X11; Linux armv7l) AppleWebkit/537.42 (KHTML, like Gecko) Chromium/25.0.1349.2 Chrome/25.0.1349.2 Safari/537.42
286 | SearchExpress
287 |
--------------------------------------------------------------------------------
/test/test_advanced_background_checks.rst:
--------------------------------------------------------------------------------
1 | *******************************************
2 | Advanced Background Check basic doc tests
3 | *******************************************
4 |
5 | .. contents:: Table of Contents
6 |
7 | Introduction
8 | ************
9 |
10 | Basic doc tests implementing unit test framework
11 | for testing Advanced Background Checks plugin functionality.
12 |
13 | Tests are implemented via RST files.
14 |
15 | Importing Advanced Background Checks Plugin
16 | *******************************************
17 |
18 | >>> from skiptracer.plugins.advance_background_checks import AdvanceBackgroundGrabber
19 | >>> abc_obj = AdvanceBackgroundGrabber()
20 |
21 |
--------------------------------------------------------------------------------
/test/test_runner.py:
--------------------------------------------------------------------------------
1 | import doctest
2 | doctest.testfile("test_advanced_background_checks.rst")
3 |
--------------------------------------------------------------------------------