├── .gitignore ├── README.md ├── ip-osint.py ├── modules ├── arin.py ├── cencys.py ├── core.py ├── hurricane.py ├── ripe.py ├── securitytrails.py └── whois.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_STORE 2 | 3 | # 4 | modules/__pycache__/ 5 | output/* 6 | 7 | 8 | 9 | #chromedriver 10 | modules/chromedriver* 11 | 12 | # Virtualenv related 13 | bin/ 14 | include/ 15 | pip-selfcheck.json 16 | static-server/media-root/status/cfe/ 17 | 18 | # Django related 19 | # src//settings/local.py 20 | # static-cdn/ # any collected static files 21 | 22 | 23 | # Byte-compiled / optimized / DLL files 24 | __pycache__/ 25 | *.py[cod] 26 | *$py.class 27 | 28 | # C extensions 29 | *.so 30 | 31 | # Distribution / packaging 32 | .Python 33 | build/ 34 | develop-eggs/ 35 | dist/ 36 | downloads/ 37 | eggs/ 38 | .eggs/ 39 | lib/ 40 | lib64/ 41 | parts/ 42 | sdist/ 43 | var/ 44 | wheels/ 45 | *.egg-info/ 46 | .installed.cfg 47 | *.egg 48 | 49 | # PyInstaller 50 | # Usually these files are written by a python script from a template 51 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 52 | *.manifest 53 | *.spec 54 | 55 | # Installer logs 56 | pip-log.txt 57 | pip-delete-this-directory.txt 58 | 59 | # Unit test / coverage reports 60 | htmlcov/ 61 | .tox/ 62 | .coverage 63 | .coverage.* 64 | .cache 65 | nosetests.xml 66 | coverage.xml 67 | *.cover 68 | .hypothesis/ 69 | 70 | # Translations 71 | *.mo 72 | *.pot 73 | 74 | # Django stuff: 75 | *.log 76 | local_settings.py 77 | 78 | # Flask stuff: 79 | instance/ 80 | .webassets-cache 81 | 82 | # Scrapy stuff: 83 | .scrapy 84 | 85 | # Sphinx documentation 86 | docs/_build/ 87 | 88 | # PyBuilder 89 | target/ 90 | 91 | # Jupyter Notebook 92 | .ipynb_checkpoints 93 | 94 | # pyenv 95 | .python-version 96 | 97 | # celery beat schedule file 98 | celerybeat-schedule 99 | 100 | # SageMath parsed files 101 | *.sage.py 102 | 103 | # Environments 104 | .env 105 | .venv 106 | env/ 107 | venv/ 108 | ENV/ 109 | 110 | # Spyder project settings 111 | .spyderproject 112 | .spyproject 113 | 114 | # Rope project settings 115 | .ropeproject 116 | 117 | # mkdocs documentation 118 | /site 119 | 120 | # mypy 121 | .mypy_cache/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ### ⚠️WARNING: This project now become part of [Metabigor](https://github.com/j3ssie/Metabigor) project ⚠️ 3 | 4 | IPOsint 5 | ============ 6 | Discovery IP Address space of the target 7 | 8 | # What is IPOsint? 9 | IPOsint allows you to discover IP Address of the target from a great resource without register or any API key 10 | 11 | # Installation 12 | ``` 13 | git clone https://github.com/j3ssie/IPOsint 14 | pip3 install -r requirements.txt 15 | ``` 16 | 17 | # How to use 18 | 19 | ``` 20 | ./ip-osint.py -t company_name 21 | ``` 22 | 23 | 24 | # Awesome resource have been used 25 | * [Whois](http://whois.domaintools.com) 26 | * [Ripe](https://apps.db.ripe.net/) 27 | * [Arin](https://whois.arin.net/ui/query.do) 28 | * [Hurricane](https://bgp.he.net/) 29 | * [Censys](https://censys.io/) 30 | * [securitytrails](https://securitytrails.com/) 31 | 32 | 33 | # Disclaimer 34 | This tool is for educational purposes only. You are responsible for your own actions. If you mess something up or break any laws while using this software, it's your fault, and your fault only. 35 | 36 | 37 | # Contact 38 | [@j3ssiejjj](https://twitter.com/j3ssiejjj) 39 | -------------------------------------------------------------------------------- /ip-osint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | import os, sys, glob 4 | import argparse 5 | from pprint import pprint 6 | 7 | # # import modules 8 | from modules import core 9 | from modules import whois 10 | from modules import ripe 11 | from modules import arin 12 | from modules import hurricane 13 | from modules import cencys 14 | from modules import securitytrails 15 | 16 | 17 | 18 | # Console colors 19 | W = '\033[1;0m' # white 20 | R = '\033[1;31m' # red 21 | G = '\033[1;32m' # green 22 | O = '\033[1;33m' # orange 23 | B = '\033[1;34m' # blue 24 | Y = '\033[1;93m' # yellow 25 | P = '\033[1;35m' # purple 26 | C = '\033[1;36m' # cyan 27 | GR = '\033[1;37m' # gray 28 | colors = [G,R,B,P,C,O,GR] 29 | 30 | 31 | ############# 32 | # IPOsint 33 | ############# 34 | 35 | __author__ = '@j3ssiejjj' 36 | __version__ = '1.0' 37 | 38 | 39 | ### Global stuff 40 | current_path = os.path.dirname(os.path.realpath(__file__)) 41 | ### 42 | 43 | options = { 44 | 'target' : '', 45 | 'verbose' : False, 46 | 'cwd' : current_path, 47 | 'cidr_regex' : "((\d){1,3}\.){3}(\d){1,3}(\/(\d){1,3})?", 48 | # gonna match these: 1.2.3.4 - 5.6.7.8 49 | 'range_ip_regex' : '((\d){1,3}\.){3}(\d){1,3}(\/(\d){1,3})?\s\-\s((\d){1,3}\.){3}(\d){1,3}(\/(\d){1,3})?' 50 | } 51 | 52 | 53 | 54 | def cowsay(): 55 | print ("""{1} 56 | ----------------------------- 57 | < You didn't say the {2}MAGIC WORD{1} > 58 | ----------------------------- 59 | \ ^__^ 60 | \ (oo)\_______ 61 | (__)\ )\/ 62 | \||----w | 63 | || || Contact: {2}{3}{1} 64 | """.format(C, G, P, __author__)) 65 | 66 | 67 | def parsing_argument(args): 68 | if args.target: 69 | options['target'] = args.target 70 | 71 | if args.verbose: 72 | options['verbose'] = True 73 | 74 | if args.output: 75 | options['output'] = args.output 76 | else: 77 | options['output'] = args.target 78 | 79 | 80 | if args.target_list: 81 | if os.path.exists(args.target_list): 82 | with open(args.target_list, 'r+') as ts: 83 | targetlist = ts.read().splitlines() 84 | 85 | for target in targetlist: 86 | options['target'] = target 87 | single_target() 88 | print("{2}>++('> >++('{1}>{2} Target done: {0} {1}<{2}')++< <')++<".format(target, P, G)) 89 | 90 | else: 91 | single_target() 92 | 93 | 94 | really_uniq() 95 | 96 | def single_target(): 97 | whois.Whois(options) 98 | ripe.Ripe(options) 99 | arin.Arin(options) 100 | hurricane.Hurricane(options) 101 | cencys.Censys(options) 102 | securitytrails.SecurityTrails(options) 103 | 104 | 105 | 106 | 107 | def really_uniq(): 108 | core.print_good("Unique the output") 109 | 110 | with open(options['output'], 'r+') as o: 111 | output = o.read().splitlines() 112 | 113 | output = core.strip_private_ip(output) 114 | with open(options['output'], 'w+') as o: 115 | for item in set(output): 116 | o.write(item + "\n") 117 | 118 | core.check_output(options['output']) 119 | 120 | def update(): 121 | os.system('git fetch --all && git reset --hard origin/master') 122 | sys.exit(0) 123 | 124 | def main(): 125 | cowsay() 126 | parser = argparse.ArgumentParser(description="Discovery IP Space of the target") 127 | parser.add_argument('-t','--target' , action='store', dest='target', help='type company name gona give you better result') 128 | parser.add_argument('-T','--target_list' , action='store', dest='target_list', help='list of target') 129 | parser.add_argument('-o','--output' , action='store', dest='output', help='output') 130 | parser.add_argument('--update', action='store_true', help='update lastest from git') 131 | parser.add_argument('-v', '--verbose', action='store_true', help='turn on verbose message') 132 | 133 | args = parser.parse_args() 134 | if len(sys.argv) == 1: 135 | # help_message() 136 | sys.exit(0) 137 | 138 | core.install_webdrive() 139 | if args.update: 140 | update() 141 | 142 | parsing_argument(args) 143 | 144 | 145 | if __name__ == '__main__': 146 | main() 147 | -------------------------------------------------------------------------------- /modules/arin.py: -------------------------------------------------------------------------------- 1 | import re, os, json 2 | import requests 3 | import urllib3 4 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 5 | 6 | from . import core 7 | 8 | 9 | class Arin(): 10 | """docstring for Whois""" 11 | def __init__(self, options): 12 | self.options = options 13 | core.print_banner("Starting scraping IP from Arin") 14 | try: 15 | self.initial() 16 | except: 17 | core.print_bad("Some thing wrong with Arin module") 18 | 19 | 20 | def initial(self): 21 | real_data = self.get_real_content() 22 | 23 | #get the raw IP as normal 24 | ips = core.grep_the_IP(real_data, self.options['cidr_regex']) 25 | core.write_to_output(ips, self.options['output']) 26 | 27 | #get the range of ip 28 | range_ips = core.grep_the_IP(real_data, self.options['range_ip_regex']) 29 | if len(range_ips) > 0: 30 | core.print_good("Range IP detect") 31 | for item in range_ips: 32 | #1.2.3.4 - 5.6.7.8 33 | start = item.split('-')[0].strip() 34 | end = item.split('-')[1].strip() 35 | 36 | ips2 = core.get_IP_from_range(start, end) 37 | core.write_to_output(ips2, self.options['output']) 38 | 39 | 40 | 41 | #doing a logic based on some web site to get the real content 42 | def get_real_content(self): 43 | target = self.options['target'] 44 | 45 | url = "https://whois.arin.net:443/ui/query.do" 46 | core.print_verbose(url, self.options) 47 | 48 | headers = {"User-Agent": "Mozilla/5.0 (X11; FreeBSD amd64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate", "Referer": "https://whois.arin.net/ui/query.do", "Content-Type": "application/x-www-form-urlencoded", "DNT": "1", "Connection": "close", "Upgrade-Insecure-Requests": "1"} 49 | data={"xslt": "https://localhost:8080/whoisrws/servlet/arin.xsl", "flushCache": "false", "queryinput": target, "whoisSubmitButton": " "} 50 | r = requests.post(url, headers=headers, data=data, verify=False) 51 | 52 | return r.text 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /modules/cencys.py: -------------------------------------------------------------------------------- 1 | import re, os, json, time 2 | import requests 3 | import ssl 4 | import socket 5 | import hashlib 6 | from bs4 import BeautifulSoup 7 | 8 | from . import core 9 | 10 | 11 | class Censys(): 12 | """docstring for Whois""" 13 | def __init__(self, options): 14 | self.options = options 15 | core.print_banner("Starting scraping IP from Censys") 16 | self.initial() 17 | 18 | def initial(self): 19 | real_data = self.get_real_content() 20 | 21 | #get the raw IP as normal 22 | ips = core.grep_the_IP(real_data, self.options['cidr_regex']) 23 | core.write_to_output(ips, self.options['output']) 24 | 25 | #get the range of ip 26 | range_ips = core.grep_the_IP(real_data, self.options['range_ip_regex']) 27 | if len(range_ips) > 0: 28 | core.print_good("Range IP detect") 29 | for item in range_ips: 30 | #1.2.3.4 - 5.6.7.8 31 | start = item.split('-')[0].strip() 32 | end = item.split('-')[1].strip() 33 | 34 | ips2 = core.get_IP_from_range(start, end) 35 | core.write_to_output(ips2, self.options['output']) 36 | 37 | 38 | 39 | #doing a logic based on some web site to get the real content 40 | def get_real_content(self): 41 | target = self.options['target'] 42 | try: 43 | cert_fin = self.get_cert_fingerprint(target) 44 | except: 45 | core.print_bad("Your target seem doesn't run on SSL") 46 | return '' 47 | 48 | url = "https://censys.io/ipv4?q={0}".format(cert_fin) 49 | core.print_verbose(url, self.options) 50 | 51 | response = core.open_with_chrome(url) 52 | 53 | false_positive = 'Censys only allows 10 search queries' 54 | if false_positive in response: 55 | core.print_bad("You're seem to be block from Censys. Please try to run this tool via Proxy") 56 | 57 | # print(cert_fin) 58 | final_res = self.get_all_page(url, response) 59 | 60 | return final_res 61 | 62 | #check if more page or not 63 | def get_all_page(self, url, response): 64 | more_response = response 65 | 66 | #parsing 67 | soup = BeautifulSoup(response, 'lxml') 68 | divs = soup.find_all('div') 69 | 70 | 71 | for div in divs: 72 | try: 73 | if 'SearchResultSectionHeader__subheading' in div['class']: 74 | raw_data = div.text 75 | except: 76 | pass 77 | 78 | 79 | #check if there is more than 1 page or not 80 | try: 81 | #should return like '1/4' 82 | num_page = raw_data.split("Page: ")[1].split("\n")[0] 83 | current = int(num_page.split('/')[0]) 84 | total = int(num_page.split('/')[1]) 85 | 86 | if current < total: 87 | for i in range(current, total): 88 | page_url = url + "&page=" + str(i + 1) 89 | core.print_verbose(page_url, self.options) 90 | more_response += core.open_with_chrome(page_url) 91 | except: 92 | return response 93 | 94 | return more_response 95 | 96 | 97 | #get cert fingerprint 98 | def get_cert_fingerprint(self, addr): 99 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 100 | sock.settimeout(2) 101 | wrappedSocket = ssl.wrap_socket(sock) 102 | 103 | try: 104 | wrappedSocket.connect((addr, 443)) 105 | except: 106 | response = False 107 | else: 108 | der_cert_bin = wrappedSocket.getpeercert(True) 109 | pem_cert = ssl.DER_cert_to_PEM_cert(wrappedSocket.getpeercert(True)) 110 | 111 | #Thumbprint 112 | thumb_md5 = hashlib.md5(der_cert_bin).hexdigest() 113 | thumb_sha1 = hashlib.sha1(der_cert_bin).hexdigest() 114 | thumb_sha256 = hashlib.sha256(der_cert_bin).hexdigest() 115 | 116 | wrappedSocket.close() 117 | return thumb_sha256 118 | 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /modules/core.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | import time 4 | import ipaddress 5 | import platform 6 | import requests 7 | import shutil 8 | import zipfile 9 | from urllib.parse import urlparse 10 | from bs4 import BeautifulSoup 11 | from selenium import webdriver 12 | from selenium.webdriver.chrome.options import Options 13 | 14 | 15 | def get_IP_from_range(start, end): 16 | ips = [] 17 | start_ip = ipaddress.IPv4Address(start) 18 | end_ip = ipaddress.IPv4Address(end) 19 | for ip_int in range(int(start_ip), int(end_ip)): 20 | ip = str(ipaddress.IPv4Address(ip_int)) 21 | ips.append(ip) 22 | return ips 23 | 24 | #just grep the IP address 25 | 26 | 27 | def grep_the_IP(data, cird_regex): 28 | ips = [] 29 | p = re.compile(cird_regex) 30 | 31 | for m in p.finditer(data): 32 | ips.append(m.group()) 33 | print_info(m.group()) 34 | return ips 35 | 36 | #strip out the private IP 37 | 38 | 39 | def strip_private_ip(data): 40 | new_data = [] 41 | for item in data: 42 | try: 43 | if not ipaddress.ip_address(item).is_private: 44 | new_data.append(item) 45 | except: 46 | new_data.append(item) 47 | 48 | return new_data 49 | 50 | #write the list of data to a file 51 | 52 | 53 | def write_to_output(data, output_file): 54 | with open(output_file, 'a+') as o: 55 | for item in set(data): 56 | o.write(item + "\n") 57 | 58 | 59 | # just beatiful soup the html 60 | def just_soup(html): 61 | soup = BeautifulSoup(html, "lxml") 62 | return soup 63 | 64 | # Possible paths for Google Chrome on porpular OS 65 | chrome_paths = [ 66 | "/usr/bin/chromium", 67 | "/usr/bin/google-chrome-stable", 68 | "/usr/bin/google-chrome", 69 | "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome", 70 | "/Applications/Chromium.app/Contents/MacOS/Chromium", 71 | "C:/Program Files (x86)/Google/Chrome/Application/chrome.exe", 72 | ] 73 | 74 | 75 | def get_chrome_binary(): 76 | for chrome_binary in chrome_paths: 77 | if os.path.isfile(chrome_binary): 78 | return chrome_binary 79 | 80 | print_bad("Not found Chrome binary on your system") 81 | 82 | 83 | # get version of your chrome 84 | def get_chrome_version(): 85 | chrome_binary = get_chrome_binary() 86 | chrome_version = os.popen( 87 | '"{0}" -version'.format(chrome_binary)).read().lower() 88 | chrome_app = os.path.basename(os.path.normpath(chrome_binary)).lower() 89 | # just get some main release 90 | version = chrome_version.split(chrome_app)[1].strip().split(' ')[0] 91 | relative_version = '.'.join(version.split('.')[:2]) 92 | return relative_version 93 | 94 | 95 | def install_webdrive(): 96 | current_path = os.path.dirname(os.path.realpath(__file__)) 97 | chromedrive_check = shutil.which(current_path + "/chromedriver") 98 | 99 | if chromedrive_check: 100 | return current_path + "/chromedriver" 101 | 102 | print_info("Download chromedriver") 103 | relative_version = get_chrome_version() 104 | 105 | if float(relative_version) < 73: 106 | print_info("Unsupport Chromium version support detected: {0}".format(relative_version)) 107 | print_bad("You need to update your Chromium.(e.g: sudo apt install chromium -y)") 108 | return 109 | 110 | chrome_driver_url = 'https://sites.google.com/a/chromium.org/chromedriver/downloads' 111 | # predefine download url 112 | download_url = 'https://chromedriver.storage.googleapis.com/index.html?path=74.0.3729.6/' 113 | r = requests.get(chrome_driver_url, allow_redirects=True) 114 | if r.status_code == 200: 115 | soup = just_soup(r.text) 116 | lis = soup.find_all("li") 117 | for li in lis: 118 | if 'If you are using Chrome version' in li.text: 119 | if relative_version in li.text: 120 | download_url = li.a.get('href') 121 | 122 | parsed_url = urlparse(download_url) 123 | zip_chromdriver = parsed_url.scheme + "://" + parsed_url.hostname + \ 124 | "/" + parsed_url.query.split('=')[1] 125 | 126 | os_check = platform.platform() 127 | if 'Darwin' in os_check: 128 | zip_chromdriver += "chromedriver_mac64.zip" 129 | elif 'Win' in os_check: 130 | zip_chromdriver += "chromedriver_win32.zip" 131 | elif 'Linux' in os_check: 132 | zip_chromdriver += "chromedriver_linux64.zip" 133 | else: 134 | zip_chromdriver += "chromedriver_linux64.zip" 135 | 136 | # print_info("Download: {0}".format(zip_chromdriver)) 137 | r3 = requests.get(zip_chromdriver, allow_redirects=True) 138 | 139 | open(current_path + "/chromedriver.zip", 'wb').write(r3.content) 140 | 141 | with open(current_path + '/chromedriver.zip', 'rb') as f: 142 | z = zipfile.ZipFile(f) 143 | for name in z.namelist(): 144 | z.extract(name, current_path) 145 | 146 | os.chmod(current_path + "/chromedriver", 0o775) 147 | if not shutil.which(current_path + "/chromedriver"): 148 | print_bad("Some thing wrong with chromedriver") 149 | sys.exit(-1) 150 | 151 | ##open url with chromedriver 152 | 153 | 154 | def open_with_chrome(url, delay=5): 155 | options = Options() 156 | options.add_argument("--headless") 157 | options.add_argument("--no-sandbox") 158 | options.add_argument("--ignore-certificate-errors") 159 | 160 | current_path = os.path.dirname(os.path.realpath(__file__)) 161 | chromedrive_check = os.path.isfile(current_path + "/chromedriver") 162 | if not chromedrive_check: 163 | raise ValueError("Some thing wrong with chromedriver path") 164 | 165 | chromedriver = current_path + '/chromedriver' 166 | browser = webdriver.Chrome(executable_path=chromedriver, options=options) 167 | 168 | browser.get(url) 169 | 170 | #wait for get the right response 171 | time.sleep(delay) 172 | response = browser.page_source 173 | browser.close() 174 | 175 | return response 176 | 177 | 178 | def false_positive(ip): 179 | #some IP are just example on some resouces 180 | black_list = ['141.212.120.90'] 181 | if ip in black_list: 182 | return True 183 | 184 | return False 185 | 186 | 187 | ######## print beautify 188 | # Console colors 189 | W = '\033[1;0m' # white 190 | R = '\033[1;31m' # red 191 | G = '\033[1;32m' # green 192 | O = '\033[1;33m' # orange 193 | B = '\033[1;34m' # blue 194 | Y = '\033[1;93m' # yellow 195 | P = '\033[1;35m' # purple 196 | C = '\033[1;36m' # cyan 197 | GR = '\033[1;37m' # gray 198 | colors = [G, R, B, P, C, O, GR] 199 | 200 | info = '{0}[*]{1} '.format(B, GR) 201 | ques = '{0}[?]{1} '.format(C, GR) 202 | bad = '{0}[-]{1} '.format(R, GR) 203 | good = '{0}[+]{1} '.format(G, GR) 204 | 205 | verbose = '{1}[{0}VERBOSE{1}] '.format(G, GR) 206 | 207 | 208 | def print_verbose(text, options): 209 | if options['verbose']: 210 | print(verbose + text) 211 | 212 | 213 | def print_banner(text): 214 | print('{1}--~~~=:>[ {2}{0}{1} ]>'.format(text, G, C)) 215 | 216 | 217 | def print_info(text): 218 | print(info + text) 219 | 220 | 221 | def print_ques(text): 222 | print(ques + text) 223 | 224 | 225 | def print_good(text): 226 | print(good + text) 227 | 228 | 229 | def print_bad(text): 230 | print(bad + text) 231 | 232 | 233 | def check_output(output): 234 | print('{1}--==[ Check the output: {2}{0}'.format(output, G, P)) 235 | -------------------------------------------------------------------------------- /modules/hurricane.py: -------------------------------------------------------------------------------- 1 | import re, os, json, time 2 | 3 | from selenium import webdriver 4 | from selenium.webdriver.chrome.options import Options 5 | 6 | from . import core 7 | 8 | 9 | class Hurricane(): 10 | """docstring for Whois""" 11 | def __init__(self, options): 12 | self.options = options 13 | core.print_banner("Starting scraping IP from Hurricane") 14 | try: 15 | self.initial() 16 | except: 17 | core.print_bad("Some thing wrong with Hurricane module") 18 | 19 | 20 | 21 | def initial(self): 22 | real_data = self.get_real_content() 23 | 24 | #get the raw IP as normal 25 | ips = core.grep_the_IP(real_data, self.options['cidr_regex']) 26 | core.write_to_output(ips, self.options['output']) 27 | 28 | #get the range of ip 29 | range_ips = core.grep_the_IP(real_data, self.options['range_ip_regex']) 30 | if len(range_ips) > 0: 31 | core.print_good("Range IP detect") 32 | for item in range_ips: 33 | #1.2.3.4 - 5.6.7.8 34 | start = item.split('-')[0].strip() 35 | end = item.split('-')[1].strip() 36 | 37 | ips2 = core.get_IP_from_range(start, end) 38 | core.write_to_output(ips2, self.options['output']) 39 | 40 | 41 | 42 | #doing a logic based on some web site to get the real content 43 | def get_real_content(self): 44 | target = self.options['target'] 45 | url = "https://bgp.he.net/search?search[search]={0}&commit=Search".format(target) 46 | core.print_verbose(url, self.options) 47 | response = core.open_with_chrome(url) 48 | return response 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /modules/ripe.py: -------------------------------------------------------------------------------- 1 | import re, os, json 2 | import requests 3 | import urllib3 4 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 5 | 6 | from . import core 7 | 8 | 9 | class Ripe(): 10 | """docstring for Whois""" 11 | def __init__(self, options): 12 | self.options = options 13 | core.print_banner("Starting scraping IP from Ripe") 14 | try: 15 | self.initial() 16 | except: 17 | core.print_bad("Something wrong with Ripe modules") 18 | 19 | def initial(self): 20 | real_data = self.get_real_content() 21 | 22 | #get the raw IP as normal 23 | ips = core.grep_the_IP(real_data, self.options['cidr_regex']) 24 | core.write_to_output(ips, self.options['output']) 25 | 26 | #get the range of ip 27 | range_ips = core.grep_the_IP(real_data, self.options['range_ip_regex']) 28 | if len(range_ips) > 0: 29 | core.print_good("Range IP detect") 30 | for item in range_ips: 31 | #1.2.3.4 - 5.6.7.8 32 | start = item.split('-')[0].strip() 33 | end = item.split('-')[1].strip() 34 | 35 | ips2 = core.get_IP_from_range(start, end) 36 | core.write_to_output(ips2, self.options['output']) 37 | 38 | 39 | 40 | #doing a logic based on some web site to get the real content 41 | def get_real_content(self): 42 | target = self.options['target'] 43 | 44 | url = "https://apps.db.ripe.net:443/db-web-ui/api/whois/search?abuse-contact=true&flags=B&ignore404=true&limit=100&managed-attributes=true&offset=0&query-string={0}&resource-holder=true".format(target) 45 | core.print_verbose(url, self.options) 46 | 47 | headers = {"User-Agent": "Mozilla/5.0 (X11; FreeBSD amd64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36", "Accept": "application/json, text/plain, */*", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate", "Referer": "https://apps.db.ripe.net/db-web-ui/", "X-Requested-With": "XMLHttpRequest", "DNT": "1", "Connection": "close", "Cache-Control": "max-age=0"} 48 | 49 | r = requests.get(url, headers=headers, verify=False) 50 | 51 | return r.text 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /modules/securitytrails.py: -------------------------------------------------------------------------------- 1 | import re, os, json, time 2 | 3 | from selenium import webdriver 4 | from selenium.webdriver.chrome.options import Options 5 | 6 | from . import core 7 | 8 | 9 | class SecurityTrails(): 10 | """docstring for Whois""" 11 | def __init__(self, options): 12 | self.options = options 13 | core.print_banner("Starting scraping IP from SecurityTrails") 14 | core.install_webdrive() 15 | self.initial() 16 | 17 | def initial(self): 18 | real_data = self.get_real_content() 19 | 20 | #get the raw IP as normal 21 | ips = core.grep_the_IP(real_data, self.options['cidr_regex']) 22 | core.write_to_output(ips, self.options['output']) 23 | 24 | #get the range of ip 25 | range_ips = core.grep_the_IP(real_data, self.options['range_ip_regex']) 26 | if len(range_ips) > 0: 27 | core.print_good("Range IP detect") 28 | for item in range_ips: 29 | #1.2.3.4 - 5.6.7.8 30 | start = item.split('-')[0].strip() 31 | end = item.split('-')[1].strip() 32 | 33 | ips2 = core.get_IP_from_range(start, end) 34 | core.write_to_output(ips2, self.options['output']) 35 | 36 | 37 | 38 | #doing a logic based on some web site to get the real content 39 | def get_real_content(self): 40 | target = self.options['target'] 41 | url = "https://securitytrails.com/domain/{0}/history/a".format(target) 42 | core.print_verbose(url, self.options) 43 | response = core.open_with_chrome(url) 44 | 45 | return response 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /modules/whois.py: -------------------------------------------------------------------------------- 1 | import re, os, json 2 | import requests 3 | import urllib3 4 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 5 | 6 | from . import core 7 | 8 | 9 | class Whois(): 10 | """docstring for Whois""" 11 | def __init__(self, options): 12 | self.options = options 13 | core.print_banner("Starting scraping IP from Whois") 14 | try: 15 | self.initial() 16 | except: 17 | core.print_bad("Something wrong with Whois modules") 18 | 19 | def initial(self): 20 | real_data = self.get_real_content() 21 | 22 | ips = core.grep_the_IP(real_data, self.options['cidr_regex']) 23 | 24 | core.write_to_output(ips, self.options['output']) 25 | 26 | #doing a logic based on some web site to get the real content 27 | def get_real_content(self): 28 | target = self.options['target'] 29 | url = "http://whois.domaintools.com:80/go/?q={0}&service=whois".format(target) 30 | core.print_verbose(url, self.options) 31 | 32 | headers = {"User-Agent": "Mozilla/5.0 (X11; FreeBSD amd64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate", "Referer": "http://whois.domaintools.com/", "DNT": "1", "Connection": "close", "Upgrade-Insecure-Requests": "1"} 33 | 34 | r = requests.get(url, headers=headers, verify=False) 35 | 36 | return r.text 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | lxml==4.2.4 2 | beautifulsoup4==4.6.3 3 | pycryptodome==3.7.2 4 | pyOpenSSL==17.5.0 5 | requests==2.19.1 6 | selenium-requests==1.3 7 | urllib3==1.23 8 | crypto==1.4.1 9 | cryptography==2.1.4 --------------------------------------------------------------------------------