├── APIs
├── __init__.py
├── dnsdumpster_api.py
└── utils.py
├── install
├── requirements.txt
├── install.sh
└── setup.py
├── config.py
├── .gitignore
├── README.md
└── subdoler.py
/APIs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/install/requirements.txt:
--------------------------------------------------------------------------------
1 | progressbar
2 | tmuxp
3 | xlsxwriter
4 | PySocks
5 | requests
6 | beautifulsoup4
7 | requests
8 | aiodns
9 | aiohttp
10 | aiomultiprocess
11 | aiosqlite
12 | beautifulsoup4
13 | certifi
14 | dnspython
15 | netaddr
16 | plotly
17 | pyppeteer
18 | PyYAML
19 | requests
20 | retrying
21 | shodan
22 | texttable
23 | lxml
24 | uvloop
25 |
--------------------------------------------------------------------------------
/install/install.sh:
--------------------------------------------------------------------------------
1 | apt install -y python3 python3-pip
2 | apt install -y tmux golang tor snapd
3 | sudo snap install amass
4 |
5 | git clone https://github.com/laramies/theHarvester ../APIs/theHarvester
6 | git clone https://github.com/PaulSec/API-dnsdumpster.com ../APIs/API-dnsdumpster.com
7 | git clone https://github.com/davidtavarez/pwndb ../APIs/pwndb
8 | git clone https://github.com/OJ/gobuster ../APIs/gobuster
9 | git clone https://github.com/aboul3la/Sublist3r ../APIs/Sublist3r
10 |
11 | wget https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/bitquark-subdomains-top100000.txt -O ../APIs/bitquark-subdomains-top100000.txt
12 | cp ../APIs/dnsdumpster_api.py ../APIs/API-dnsdumpster.com/api.py
13 | pip3 install -r requirements.txt
14 | cd ../APIs/gobuster && go get && go build
15 |
--------------------------------------------------------------------------------
/APIs/dnsdumpster_api.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from dnsdumpster.DNSDumpsterAPI import DNSDumpsterAPI
3 | import base64
4 | import sys
5 |
6 |
7 | def get_args():
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument('-f', '--domains_file', required=True, action='store', help='Domains file')
10 | parser.add_argument('-o', '--output_file', required=True, action='store', help='Output file')
11 | my_args = parser.parse_args()
12 | return my_args
13 |
14 |
15 | def main():
16 | args = get_args()
17 | results_file = args.output_file
18 | res_file = open(results_file, "a")
19 | domains = open(args.domains_file).read().splitlines()
20 | for d in domains:
21 | res = DNSDumpsterAPI(True).search(d)
22 | if len(res) > 1:
23 | for entry in res['dns_records']['host']:
24 | val = entry['domain']
25 | print("%s" % val)
26 | res_file.write(val+"\n")
27 |
28 | main()
29 |
30 |
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | # File paths
4 | program_path = os.getcwd() + "/"
5 | apis_folder_path = program_path + "APIs/"
6 | dnsdumpster_script_file = apis_folder_path + "API-dnsdumpster.com/api.py"
7 | pwndb_script_file = apis_folder_path + "pwndb/pwndb.py"
8 | harvester_location = apis_folder_path + "theHarvester/"
9 | gobuster_file = apis_folder_path + "gobuster/gobuster"
10 | sublist3r_file = apis_folder_path + "Sublist3r/sublist3r.py"
11 |
12 | # Subdomain Enumeration Setting
13 | amass_active = True
14 | gobuster_active = True
15 | gobuster_dictionary = apis_folder_path + "bitquark-subdomains-top100000.txt"
16 | gobuster_threads = 10
17 | dnsdumpster_active = True
18 | sublist3r_active = True
19 | fdns_active = False
20 | fdns_file = "/media/root/Seagate Expansion Drive/fdns.json.gz"
21 | theharvester_active = True
22 | pwndb_active = True
23 | dig_timeout = 5
24 | blacklist_words = "akamai,telefonica,microsoft"
25 | tmux_session_name = "subdoler"
26 |
27 | # Temporary files
28 | temp_domains_file = "subdoler_temp_domains"
29 | temp_ranges_file = "subdoler_temp_ranges"
30 | temp_companies_file = "subdoler_temp_companies"
31 | amass_output_file = "subdoler_temp_amass"
32 | dnsdumpster_output_file = "subdoler_temp_dnsdumpster"
33 | gobuster_output_file = "subdoler_temp_gobuster"
34 | fdns_output_file = "subdoler_temp_fdns"
35 | pwndb_output_file = "subdoler_temp_pwndb"
36 | harvester_output_file = "subdoler_temp_harvester"
37 | tmuxp_yaml_file = "subdoler_temp.yaml"
38 | sublist3r_output_file = "subdoler_temp_sublist3r"
39 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | stash.sqlite
2 | config.py
3 | config.*
4 | APIs/API-dnsdumpster.com/*
5 | APIs/*.txt
6 | APIs/*.csv
7 | APIs/pwndb/*
8 | APIs/theHarvester/*
9 | APIs/Sublist3r/*
10 | *.csv
11 | APIs/dnsdumpster_api.py
12 |
13 | APIs/API-dnsdumpster.com/
14 | APIs/gobuster/
15 | APIs/pwndb/
16 | APIs/theHarvester/
17 | res_subdoler
18 | res_subdoler/
19 |
20 | # Byte-compiled / optimized / DLL files
21 | __pycache__/
22 | *.py[cod]
23 | *$py.class
24 |
25 | # C extensions
26 | *.so
27 |
28 | # Distribution / packaging
29 | .Python
30 | build/
31 | develop-eggs/
32 | dist/
33 | downloads/
34 | eggs/
35 | .eggs/
36 | lib/
37 | lib64/
38 | parts/
39 | sdist/
40 | var/
41 | wheels/
42 | *.egg-info/
43 | .installed.cfg
44 | *.egg
45 | MANIFEST
46 |
47 | # PyInstaller
48 | # Usually these files are written by a python script from a template
49 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
50 | *.manifest
51 | *.spec
52 |
53 | # Installer logs
54 | pip-log.txt
55 | pip-delete-this-directory.txt
56 |
57 | # Unit test / coverage reports
58 | htmlcov/
59 | .tox/
60 | .coverage
61 | .coverage.*
62 | .cache
63 | nosetests.xml
64 | coverage.xml
65 | *.cover
66 | .hypothesis/
67 | .pytest_cache/
68 |
69 | # Translations
70 | *.mo
71 | *.pot
72 |
73 | # Django stuff:
74 | *.log
75 | local_settings.py
76 | db.sqlite3
77 |
78 | # Flask stuff:
79 | instance/
80 | .webassets-cache
81 |
82 | # Scrapy stuff:
83 | .scrapy
84 |
85 | # Sphinx documentation
86 | docs/_build/
87 |
88 | # PyBuilder
89 | target/
90 |
91 | # Jupyter Notebook
92 | .ipynb_checkpoints
93 |
94 | # pyenv
95 | .python-version
96 |
97 | # celery beat schedule file
98 | celerybeat-schedule
99 |
100 | # SageMath parsed files
101 | *.sage.py
102 |
103 | # Environments
104 | .env
105 | .venv
106 | env/
107 | venv/
108 | ENV/
109 | env.bak/
110 | venv.bak/
111 |
112 | # Spyder project settings
113 | .spyderproject
114 | .spyproject
115 |
116 | # Rope project settings
117 | .ropeproject
118 |
119 | # mkdocs documentation
120 | /site
121 |
122 | # mypy
123 | .mypy_cache/
124 |
--------------------------------------------------------------------------------
/install/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, Command
2 | import os
3 | import distutils.cmd
4 |
5 |
6 | class CleanCommand(Command):
7 | user_options = []
8 | def initialize_options(self):
9 | pass
10 | def finalize_options(self):
11 | pass
12 | def run(self):
13 | os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info stash.sqlite __pycache__')
14 |
15 |
16 | class InstallDependencies(Command):
17 | user_options = []
18 | def initialize_options(self):
19 | pass
20 | def finalize_options(self):
21 | pass
22 | def run(self):
23 | os.system("apt install -y python3 python3-pip")
24 | os.system("apt install -y tmux golang tor snapd")
25 | os.system("sudo snap install amass")
26 | os.system("git clone https://github.com/laramies/theHarvester ../APIs/theHarvester")
27 | os.system("git clone https://github.com/PaulSec/API-dnsdumpster.com ../APIs/API-dnsdumpster.com")
28 | os.system("git clone https://github.com/davidtavarez/pwndb ../APIs/pwndb")
29 | os.system("git clone https://github.com/OJ/gobuster ../APIs/gobuster")
30 | os.system("git clone https://github.com/aboul3la/Sublist3r ../APIs/Sublist3r")
31 | os.system("wget https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/bitquark-subdomains-top100000.txt -O ../APIs/bitquark-subdomains-top100000.txt")
32 | os.system("cp ../APIs/dnsdumpster_api.py ../APIs/API-dnsdumpster.com/api.py")
33 | os.system("pip3 install -r requirements.txt")
34 | os.system("cd ../APIs/gobuster && go get && go build")
35 |
36 |
37 | setup(
38 | name='Subdoler',
39 | version='0.1.0',
40 | description='A package to list subdomains',
41 | install_requires=[
42 | "tmuxp",
43 | "six>=1.12.0",
44 | "dnsdumpster>=0.5",
45 | "requests>=2.21.0",
46 | "beautifulsoup4>=4.8.1",
47 | "progressbar33>=2.4",
48 | "xlsxwriter>=1.2.6",
49 | "aiodns>=2.0.0",
50 | "beautifulsoup4>=4.8.0",
51 | "dnspython>=1.16.0",
52 | "flake8>=3.7.8",
53 | "gevent>=1.4.0",
54 | "grequests>=0.4.0",
55 | "mypy>=0.740",
56 | "netaddr>=0.7.19",
57 | "plotly>=4.2.1",
58 | "pytest>=5.2.0",
59 | "PyYaml>=5.1.2",
60 | "requests>=2.22.0",
61 | "shodan>=1.19.0",
62 | "texttable>=1.6.2",
63 | "retrying>=1.3.3",
64 | ],
65 | cmdclass={
66 | 'clean': CleanCommand,
67 | 'install_dependencies': InstallDependencies
68 | }
69 | )
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Subdoler
2 |
3 | Subdoler is a subdomain lister which calculates:
4 |
5 | - [1. IP ranges, domains and subdomains from a list of companies](#1)
6 | - [2. Domains and subdomains from a list of IP ranges](#2)
7 | - [3. Subdomains from a list of domains](#3)
8 | - [4. IP ranges and domains (no subdomains) from a list of companies](#4)
9 | - [5. Domains (no subdomains) from a list of ranges](#5)
10 |
11 |
12 | When calculating the subdomains, it creates a TMUX session. You can wait until the programs end or process everything later [with -p](#6). Also, you can kill the tmux session [with -k](#7).
13 |
14 | --------------------------
15 |
16 | One of these arguments is necessary:
17 | - -c: File of companies. Ex: *./subdoler.py -c /tmp/companies.txt*
18 | - -C: List of companies. Ex: *./subdoler.py -C company1,company2*
19 | - -r: File of IP ranges. Ex: *./subdoler.py -r /tmp/ip_ranges.txt*
20 | - -R: List of IP ranges. Ex: *./subdoler.py -R 10.20.30.40/24,11.21.31.41/22*
21 | - -d: File of domains. Ex: *./subdoler.py -d /tmp/domains.txt*
22 | - -R: List of domains. Ex: *./subdoler.py -D company1.com,company2.es*
23 | - -k: Kill tmux session. Ex: *./subdoler.py -k*
24 |
25 | Optional arguments:
26 | - -o: Output directory. Ex: *./subdoler.py -c /tmp/companies.txt -o /tmp/subdoler_results*
27 | - -cf: Country filter for IP range extraction from IPv4info. Ex: *./subdoler.py -c /tmp/companies.txt -cf ES,IT,US*
28 | - -ns: No subdomain calculation. Ex: *./subdoler.py -r /tmp/ip_ranges.txt -ns*
29 | - -p: Process results (useful for closing everything except the tmux session and process the resulting files some hours later). Ex: *./subdoler.py -o /tmp/subdoler_results -p*
30 |
31 | --------------------------
32 |
33 | You can decide which programs are used for subdomain calculation setting the value of these options to *True* in the [config.py](https://github.com/ricardojoserf/subdoler/blob/master/config.py) file:
34 |
35 | * Options to enumerate subdomains:
36 |
37 | * **amass_active** - Use [Amass](https://github.com/OWASP/Amass) in passive scan mode
38 |
39 | * **gobuster_active** - Use [Gobuster](https://github.com/OJ/gobuster) in bruteforce mode with a custom dictionary (using [this](https://github.com/danielmiessler/SecLists) by default)
40 |
41 | * **sublist3r_active** - Use [Sublist3r](https://github.com/aboul3la/Sublist3r)
42 |
43 | * **dnsdumpster_active** - Use the [DNSDumpster unofficial API](https://github.com/PaulSec/API-dnsdumpster.com)
44 |
45 | * **fdns_active** - Use [FDNS](https://opendata.rapid7.com/sonar.fdns_v2/). For this, [download this file](https://opendata.rapid7.com/sonar.fdns_v2/) and set its path in [config.py](https://github.com/ricardojoserf/subdoler/blob/master/config.py)
46 |
47 | * Options to enumerate leaked information:
48 |
49 | * **theharvester_active** - Use [theHarvester](https://github.com/laramies/theHarvester) to search leaked email addresses
50 |
51 | * **pwndb_active** - Use [PwnDB](https://github.com/davidtavarez/pwndb) to search leaked credentials (the service *tor* needs to get started, it asks for root privileges)
52 |
53 | ---------------------------------------------
54 |
55 | ## Installation
56 |
57 | ```
58 | git clone https://github.com/ricardojoserf/subdoler
59 | cd subdoler/install
60 | sh install.sh
61 | ```
62 |
63 | ---------------------------------------------
64 |
65 | ## 1. IP ranges, domains and subdomains from a list of companies (**-c** or **-C**)
66 |
67 | It calculates the IP ranges of the companies in IPv4info, extracts the domains in these IPs and then the subdomains:
68 |
69 | From a file:
70 |
71 | ```
72 | python3 subdoler.py -c COMPANIES_FILE -o OUTPUT_DIRECTORY
73 | ```
74 |
75 | From a comma separated list:
76 |
77 | ```
78 | python3 subdoler.py -C company1,company2 -o OUTPUT_DIRECTORY
79 | ```
80 |
81 | First, the IP ranges of each company are calculated:
82 |
83 | 
84 |
85 | 
86 |
87 | Second, the domains in these IP ranges:
88 |
89 | 
90 |
91 | Third, the subdomains of these domains are calculated using a Tmux session:
92 |
93 | 
94 |
95 | Then, the program will wait until the user enters a key:
96 |
97 | - If it is **'q'**, it will quit and you can calculate the data later using the option **'-p' (--process)**
98 |
99 | - If it is not 'q', it will calculate the data in the files.
100 |
101 | 
102 |
103 |
104 | Finally, the unique subdomains and the leaked information are listed and the output is stored in different files int he output directory:
105 |
106 | 
107 |
108 |
109 | 
110 |
111 |
112 | Different files are created in the specified output directory:
113 |
114 | - **main_domains.txt**: It contains the domains (hostnames) from the IP ranges calculated
115 |
116 | - **subdomain_by_source.csv**: It contains the subdomains with the program which discovered them, the reverse lookup IP and which range it is part of
117 |
118 | - **ranges_information.csv**: It contains information about the ranges
119 |
120 | - **leaked_information.txt**: It contains the leaked email accounts and credentials
121 |
122 | - **results.xlsx**: It contains all the information in an Excel file with different sheets
123 |
124 |
125 | 
126 |
127 | 
128 |
129 |
130 | ---------------------------------------------
131 |
132 | ## 2. Domains and subdomains from a list of IP ranges (**-r** or **-R**)
133 |
134 |
135 | It skips the step of calculating the ranges of the companies, working with the IP ranges directly.
136 |
137 | From a file:
138 |
139 | ```
140 | python3 subdoler.py -r RANGES_FILE -o OUTPUT_DIRECTORY
141 | ```
142 |
143 | 
144 |
145 |
146 | From a comma separated list:
147 |
148 | ```
149 | python3 subdoler.py -R companyrange1,companyrange2 -o OUTPUT_DIRECTORY
150 | ```
151 |
152 | 
153 |
154 | ---------------------------------------------
155 |
156 | ## 3. Subdomains from a list of domains (**-d** or **-D**)
157 |
158 |
159 | It skips the steps of calculating the ranges of the companies and the domains in the IP ranges, extracting the subdomains from the domains list directly:
160 |
161 | From a file:
162 |
163 | ```
164 | python3 subdoler.py -d DOMAINS_FILE -o OUTPUT_DIRECTORY
165 | ```
166 |
167 | 
168 |
169 |
170 | From a comma separated list:
171 |
172 | ```
173 | python3 subdoler.py -D domain1,domain2,domain3 -o OUTPUT_DIRECTORY
174 | ```
175 |
176 | 
177 |
178 |
179 | ----------------------------------------------
180 |
181 | ## 4. IP ranges and domains (no subdomains) from a list of companies (**-c** or **-C** and **-ns**)
182 |
183 | Using the option **--no_subdomains** (-ns), the step of calculating the subdomains is skipped, calculating just the IP ranges of the companies and the domains in them:
184 |
185 | ```
186 | python3 subdoler.py -ns -c COMPANIES_FILE -o OUTPUT_DIRECTORY
187 | ```
188 |
189 | 
190 |
191 | 
192 |
193 | ---------------------------------------------
194 |
195 | ## 5. Domains (no subdomains) from a list of ranges (**-r** or **-R** and **-ns**)
196 |
197 | ```
198 | python3 subdoler.py -ns -r RANGES_FILE -o OUTPUT_DIRECTORY
199 | ```
200 |
201 | 
202 |
203 | 
204 |
205 | ----------------------------------------------
206 |
207 | ## 6. Process files (**-p**)
208 |
209 | ```
210 | python3 subdoler.py -o OUTPUT_DIRECTORY --process
211 | ```
212 |
213 | 
214 |
215 |
216 | ----------------------------------------------
217 |
218 | ## 7. Process files (**-p**)
219 |
220 | ```
221 | python3 subdoler.py -k
222 | ```
223 |
--------------------------------------------------------------------------------
/APIs/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import requests
4 | from bs4 import BeautifulSoup
5 | import distutils.spawn
6 |
7 | ipv4_base_url = "http://ipv4info.com"
8 |
9 |
10 | # Get url from IPv4info
11 | def get_info_url(company_name):
12 | search_url = ipv4_base_url + "/?act=check&ip="+company_name
13 | response = requests.get(search_url)
14 | if response.history:
15 | for resp in response.history:
16 | return resp.headers['Location']
17 | else:
18 | print("Failed to get IPv4info page for that company")
19 | sys.exit(1)
20 |
21 |
22 | # Get range in slash notation
23 | def get_ranges(company_name):
24 | array_aux = [{'range': 32, 'val': 1}, {'range': 31, 'val': 2}, {'range': 30, 'val': 4}, {'range': 29, 'val': 8}, {'range': 28, 'val': 16}, {'range': 27, 'val': 32}, {'range': 26, 'val': 64}, {'range': 25, 'val': 128}, {'range': 24, 'val': 256}, {'range': 23, 'val': 512}, {'range': 22, 'val': 1024}, {'range': 21, 'val': 2048}, {'range': 20, 'val': 4096}, {'range': 19, 'val': 8192}, {'range': 18, 'val': 16384}, {'range': 17, 'val': 32768}, {'range': 16, 'val': 65536}, {'range': 15, 'val': 131072}, {'range': 14, 'val': 262144}, {'range': 13, 'val': 524288}, {'range': 12, 'val': 1048576}, {'range': 11, 'val': 2097152}, {'range': 10, 'val': 4194304}, {'range': 9, 'val': 8388608}, {'range': 8, 'val': 16777216}, {'range': 7, 'val': 33554432}, {'range': 6, 'val': 67108864}, {'range': 5, 'val': 134217728}, {'range': 4, 'val': 268435456}, {'range': 3, 'val': 536870912}, {'range': 2, 'val': 1073741824}, {'range': 1, 'val': 2147483648}]
25 | calc_ranges = []
26 | ranges_info = []
27 | info_url = ipv4_base_url + get_info_url(company_name)
28 | r = requests.get(info_url)
29 | soup = BeautifulSoup(r.content, 'html.parser')
30 | for i in soup.findAll('tr'):
31 | vals = i.findAll('td')
32 | if len(vals) == 10:
33 | first_ip = vals[2].getText()
34 | last_ip = vals[3].getText()
35 | range_size = vals[4].getText()
36 | asn = vals[6].getText().replace("\n", " ")
37 | block_name = vals[7].getText()
38 | organization = vals[8].getText()
39 | country = ""
40 | for e in vals[9].findAll('a'):
41 | country += e.getText() + " "
42 | ranges_info.append({'organization': organization, 'block_name': block_name, 'first_ip': first_ip, 'last_ip': last_ip, 'range_size': range_size, 'asn': asn, 'country': country})
43 | # Range translation to slash notation
44 | if "Size" not in range_size:
45 | for j in array_aux:
46 | if (int(range_size)-int(j['val'])) <=0:
47 | range_val = first_ip+"/"+str(j['range'])
48 | calc_ranges.append({'name':vals[8].getText(), 'range': range_val, 'country': country})
49 | ranges_info.append({'organization': organization, 'block_name': block_name, 'first_ip': first_ip, 'last_ip': last_ip, 'range_size': range_size, 'asn': asn, 'country': country, 'range': range_val})
50 | break
51 | return calc_ranges, ranges_info
52 |
53 |
54 | # Get base
55 | def get_base(val, index):
56 | base = 0
57 | for i in range(0, index):
58 | if (val - 2**(7-i)) >= 0:
59 | base += 2**(7-i)
60 | val -= 2**(7-i)
61 | return base
62 |
63 |
64 | # IP resolution
65 | def resolve_ip(ip_addr, output_file):
66 | comando = "a=$(nslookup " + ip_addr +" | grep name | awk '{print $4}'); if [ ${#a} -ge 1 ]; then echo "+ ip_addr +" - $a | sed -e 's/.$//'; echo $a | tr ' ' '\n' | sed -e 's/.$//' >> "+output_file+"; fi;"
67 | os.system(comando)
68 |
69 |
70 | # Subdomain ordering
71 | def order_subdomains(output_file):
72 | f = open(output_file).read().splitlines()
73 | common_extensions = ["com","co","es","net","org","us"]
74 | possible_domains = []
75 | print("\n"+"-"*25+"\n"+"Domains list"+"\n"+"-"*25)
76 | for i in f:
77 | if len(i)>2:
78 | splitted = i.split(".")
79 | if splitted[len(splitted)-2] not in common_extensions:
80 | pd = splitted[len(splitted)-2]+"."+splitted[len(splitted)-1]
81 | if pd not in possible_domains:
82 | print("- "+ pd)
83 | possible_domains.append(pd)
84 | else:
85 | pd = splitted[len(splitted)-3]+"."+splitted[len(splitted)-2]+"."+splitted[len(splitted)-1]
86 | if pd not in possible_domains:
87 | print("- "+ pd)
88 | possible_domains.append(pd)
89 |
90 | print("\n"+"-"*25+"\n"+"Subdomains list"+"\n"+"-"*25)
91 | aux_arr = []
92 | for i in f:
93 | for p in possible_domains:
94 | if p in i:
95 | aux_arr.append({'dom':p,'subdom':i})
96 | for p in possible_domains:
97 | print("Domain "+ p)
98 | for i in aux_arr:
99 | if i['dom'] == p:
100 | print("- "+ i['subdom'])
101 | return f
102 |
103 |
104 | # IP address analysis
105 | # Source: https://medium.com/@sadatnazrul/checking-if-ipv4-address-in-network-python-af61a54d714d
106 | def ip_to_binary(ip):
107 | octet_list_int = ip.split(".")
108 | octet_list_bin = [format(int(i), '08b') for i in octet_list_int]
109 | binary = ("").join(octet_list_bin)
110 | return binary
111 |
112 |
113 | def get_addr_network(address, net_size):
114 | ip_bin = ip_to_binary(address)
115 | network = ip_bin[0:32-(32-net_size)]
116 | return network
117 |
118 |
119 | def ip_in_prefix(ip_address, prefix):
120 | [prefix_address, net_size] = prefix.split("/")
121 | net_size = int(net_size)
122 | prefix_network = get_addr_network(prefix_address, net_size)
123 | ip_network = get_addr_network(ip_address, net_size)
124 | return ip_network == prefix_network
125 |
126 |
127 | # Binaries path calculation
128 | def bin_path(name1, name2):
129 | if distutils.spawn.find_executable(name1) is not None:
130 | return name1
131 | elif distutils.spawn.find_executable(name2) is not None:
132 | return name2
133 | else:
134 | return "notfound"
135 |
136 |
137 | # Range Analysis
138 | def analyze_range(arr_points, length_, output_file, counter, len_ranges):
139 | if length_ < 8:
140 | aux1 = 8 - length_
141 | first_ = get_base(int(arr_points[0]), int(8 - aux1))
142 | last_ = first_ + 2**aux1 - 1
143 | first_ip = str(first_) + ".0.0.0"
144 | last_ip = str(last_) + ".255.255.255"
145 | print("\n"+"["+str(counter)+"/"+str(len_ranges)+"] "+"Range: "+first_ip+"-"+last_ip+"\n")
146 | for j in range(first_, last_):
147 | for i in range(0,255):
148 | for h in range(0,255):
149 | for g in range(0,255):
150 | resolve_ip(str(j) + "." + str(i) + "." + str(h) + "." + str(g), output_file)
151 | elif length_ < 16:
152 | aux1 = 16 - length_
153 | first_ = get_base(int(arr_points[1]), int(8 - aux1))
154 | last_ = first_ + 2**aux1 - 1
155 | first_ip = arr_points[0] + "." + str(first_) + ".0.0"
156 | last_ip = arr_points[0] + "." + str(last_) + ".255.255"
157 | print("\n"+"["+str(counter)+"/"+str(len_ranges)+"] "+"Range: "+first_ip+"-"+last_ip+"\n")
158 | for j in range(first_, last_):
159 | for i in range(0,255):
160 | for h in range(0,255):
161 | resolve_ip(arr_points[0]+"."+ str(j) + "." + str(i) + "." + str(h), output_file)
162 | elif length_ < 24:
163 | aux1 = 24 - length_
164 | first_ = get_base(int(arr_points[2]), int(8 - aux1))
165 | last_ = first_ + 2**aux1 - 1
166 | first_ip = arr_points[0] + "." + arr_points[1] + "." + str(first_) + ".0"
167 | last_ip = arr_points[0] + "." + arr_points[1] + "." + str(last_) + ".255"
168 | print("\n"+"["+str(counter)+"/"+str(len_ranges)+"] "+"Range: "+first_ip+"-"+last_ip+"\n")
169 | for j in range(first_, last_):
170 | for i in range(0,255):
171 | resolve_ip(arr_points[0]+"."+arr_points[1] + "." + str(j) + "." + str(i), output_file)
172 | elif length_ < 32:
173 | aux1 = 32 - length_
174 | first_ = get_base(int(arr_points[3]), int(8 - aux1))
175 | last_ = first_ + 2**aux1 - 1
176 | first_ip = arr_points[0] + "." + arr_points[1] + "." + arr_points[2] + "." + str(first_)
177 | last_ip = arr_points[0] + "." + arr_points[1] + "." + arr_points[2] + "." + str(last_)
178 | print("\n"+"["+str(counter)+"/"+str(len_ranges)+"] "+"Range: "+first_ip+"-"+last_ip+"\n")
179 | for j in range(first_, last_):
180 | resolve_ip(arr_points[0] + "." + arr_points[1] + "." + arr_points[2] + "." + str(j), output_file)
181 | elif length_ == 32:
182 | resolve_ip(arr_points[0] + "." + arr_points[1] + "." + arr_points[2] + "." + arr_points[3], output_file)
183 | else:
184 | print("Wrong IP format")
185 | sys.exit(1)
186 |
187 |
188 | # Range Processing and Calculation
189 | def range_extractor(ranges_file, companies_file, output_file, country_filter):
190 | target_countries = None
191 | if country_filter is not None:
192 | target_countries = country_filter.split(",")
193 | ranges = []
194 | ranges_info = None
195 | if ranges_file is not None:
196 | ranges = open(ranges_file).read().splitlines()
197 | if companies_file is not None:
198 | companies = open(companies_file).read().splitlines()
199 | for c in companies:
200 | calc_ranges, ranges_info = get_ranges(c)
201 | print("\nCompany: "+c+"\n")
202 | for r in calc_ranges:
203 | print("- Range: %s \tName: %s "%(r['range'], r['name']))
204 | # Check if the range is in the country filter -cf option
205 | if target_countries is not None:
206 | for c in target_countries:
207 | if c in r['country']:
208 | ranges.append(r['range'])
209 | # If there is no country filter option
210 | else:
211 | ranges.append(r['range'])
212 | if len(calc_ranges) == 0:
213 | print(" - No data found")
214 | counter = 0
215 | len_ranges = len(ranges)
216 | for r in ranges:
217 | counter += 1
218 | try:
219 | length_ = int(r.split("/")[1])
220 | arr_points = r.split("/")[0].split(".")
221 | analyze_range(arr_points, length_, output_file, counter, len_ranges)
222 | except:
223 | pass
224 | if os.path.isfile(output_file):
225 | order_subdomains(output_file)
226 | return output_file, ranges, ranges_info
--------------------------------------------------------------------------------
/subdoler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 | from APIs.utils import bin_path, ip_in_prefix, range_extractor
3 | from config import *
4 | from six.moves import input
5 | import progressbar
6 | import subprocess
7 | import xlsxwriter
8 | import argparse
9 | import time
10 | import sys
11 | import csv
12 | import six
13 | import os
14 |
15 |
16 | unique_subdomains = {}
17 |
18 |
19 | def get_args():
20 | parser = argparse.ArgumentParser()
21 | parser.add_argument('-c', '--companies_file', required=False, default=None, action='store', help='File with ranges to analyze')
22 | parser.add_argument('-C', '--companies_list', required=False, default=None, action='store', help='Comma separated list of companies')
23 | parser.add_argument('-d', '--domains_file', required=False, default=None, action='store', help='File with a list of domains')
24 | parser.add_argument('-D', '--domains_list', required=False, default=None, action='store', help='Comma separated list of domains')
25 | parser.add_argument('-r', '--ranges_file', required=False, default=None, action='store', help='File with ranges to analyze')
26 | parser.add_argument('-R', '--ranges_list', required=False, default=None, action='store', help='Comma separated list of ranges')
27 | parser.add_argument('-o', '--output_directory', required=False, default="res_subdoler", action='store', help='Output directory')
28 | parser.add_argument('-cf', '--country_filter', required=False, action='store', help='Country filter for the list of IP ranges calculated in IPv4info')
29 | parser.add_argument('-ns', '--no_subdomains', required=False, action='store_true', help='Do not list subdomains (just ranges and domains)')
30 | parser.add_argument('-p', '--process', required=False, action='store_true', help='Process files in the folder')
31 | parser.add_argument('-k', '--kill', required=False, action='store_true', help='Kill subdoler')
32 | my_args = parser.parse_args()
33 | return my_args
34 |
35 |
36 | def get_commands(domains_file, output_directory):
37 | python_bin = bin_path("python3", "python")
38 | if not os.path.isfile(domains_file):
39 | print("\n"+"No domains calculated. Exiting...")
40 | sys.exit(1)
41 | domains = open(domains_file).read().splitlines()
42 | amass_cmd = "amass enum --passive -d "+",".join(domains)+" -o "+output_directory+"/"+amass_output_file + "; echo Finished"
43 | dnsdumpster_cmd = python_bin+" "+dnsdumpster_script_file+" -f "+domains_file+" -o "+output_directory+"/"+dnsdumpster_output_file +"; echo Finished"
44 | fdns_cmd = "zcat '"+fdns_file+"' | egrep '(" + "|\\.".join(domains) + ")' | cut -d ',' -f 2 | cut -d '\"' -f 4 | tee "+output_directory+"/"+fdns_output_file
45 | gobuster_cmd = ""
46 | theharvester_cmd = ""
47 | sublist3r_cmd = ""
48 | pwndb_cmd = "service tor start; "
49 | for d in range(0, len(domains)):
50 | domain = domains[d]
51 | gobuster_cmd += "echo "+str(d+1)+"/"+str(len(domains))+" "+domain+"; "+gobuster_file+" dns -t "+str(gobuster_threads)+" -w "+gobuster_dictionary+" -d "+domain+" -o "+output_directory+"/"+gobuster_output_file+"_"+domain+"; "
52 | sublist3r_cmd += "echo "+str(d+1)+"/"+str(len(domains))+" "+domain+"; " + python_bin + " " + sublist3r_file + " -d " + domain +" -o "+output_directory+"/"+sublist3r_output_file+"_"+domain+"; "
53 | current_location = os.getcwd() + "/"
54 | theharvester_cmd += "echo "+str(d+1)+"/"+str(len(domains))+" "+domain+"; cd "+harvester_location+" && "+python_bin+" theHarvester.py -d " + domain + " -b google | grep -v cmartorella | grep '@' >> "+current_location+output_directory+"/"+harvester_output_file+"; "
55 | pwndb_cmd += "echo "+str(d+1)+"/"+str(len(domains))+" "+domain+"; " + python_bin + " " + pwndb_script_file + " --target @" + domain + " | grep '@' | grep -v donate | awk '{print $2}' >> "+output_directory+"/"+pwndb_output_file+"; "
56 | gobuster_cmd += "echo Finished"
57 | theharvester_cmd += "echo Finished"
58 | pwndb_cmd += "echo Finished"
59 | sublist3r_cmd += "echo Finished"
60 | commands = []
61 | commands.append({"title":"Amass - Passive Scan Mode", "command": amass_cmd, "active": amass_active})
62 | commands.append({"title":"DNSDumpster - Subdomains", "command": dnsdumpster_cmd, "active": dnsdumpster_active})
63 | commands.append({"title":"FDNS - Subdomain lister", "command": fdns_cmd, "active": fdns_active})
64 | commands.append({"title":"Gobuster - Subdomain bruteforce", "command": gobuster_cmd, "active": gobuster_active})
65 | commands.append({"title":"Sublist3r", "command": sublist3r_cmd, "active": sublist3r_active})
66 | commands.append({"title":"TheHarvester", "command": theharvester_cmd, "active": theharvester_active})
67 | commands.append({"title":"Pwndb", "command": pwndb_cmd, "active": pwndb_active})
68 | return commands
69 |
70 |
71 | def create_tmux_file(commands, output_directory):
72 | os.system("tmux kill-session -t subdoler 2>/dev/null")
73 | f = open(output_directory+"/"+tmuxp_yaml_file,"w")
74 | f.write("session_name: "+tmux_session_name+"\n")
75 | f.write("windows:"+"\n")
76 | f.write("- window_name: dev window"+"\n")
77 | f.write(" layout: tiled"+"\n")
78 | f.write(" panes:"+"\n")
79 | for i in commands:
80 | if i["active"]:
81 | f.write(' - shell_command:\n ')
82 | cmd_ = i["command"].replace(";", "\n -")
83 | f.write(' - echo {0} \n'.format(i["title"]))
84 | f.write(' - {0} \n'.format(cmd_))
85 |
86 |
87 | def create_tmux_session(output_directory):
88 | os.system("tmuxp load "+output_directory+""+tmuxp_yaml_file+";")
89 |
90 |
91 | def write_ip_list(ip_list, workbook):
92 | worksheet = workbook.add_worksheet("Unique IP addresses")
93 | row = 0
94 | col = 0
95 | ip_list.sort()
96 | for ip in ip_list:
97 | worksheet.write(row, col, ip)
98 | row += 1
99 |
100 |
101 | def check_ip(string_):
102 | import socket
103 | try:
104 | socket.inet_aton(string_)
105 | return True
106 | except socket.error:
107 | return False
108 |
109 |
110 | def dig_short(val_):
111 | try:
112 | calculated_ips = subprocess.Popen(["dig", "+short", val_], stdout=subprocess.PIPE, encoding='utf8').communicate(timeout = dig_timeout)[0].replace("\n"," ").split(" ")
113 | except Exception as e:
114 | calculated_ips = ''
115 | if isinstance(calculated_ips, list):
116 | calculated_ips.remove('')
117 | if calculated_ips == []:
118 | calculated_ips = ''
119 | return calculated_ips
120 |
121 |
122 | def get_range(calculated_ip, ranges):
123 | ip_in_range = ''
124 | if ranges is not None:
125 | for r in ranges:
126 | if ip_in_prefix(calculated_ip, r) is True:
127 | return r
128 | return ''
129 |
130 |
131 | def write_to_files(worksheet, writer, row, col, data_array):
132 | writer.writerow(data_array)
133 | for i in data_array:
134 | worksheet.write(row, col, i)
135 | col += 1
136 | col = 0
137 | row += 1
138 | return worksheet, writer,row,col
139 |
140 |
141 | def calculate_subdomain_info(output_directory, workbook, ranges, unique_subdomains):
142 | row = 0
143 | col = 0
144 | worksheet = workbook.add_worksheet("Subdomain by source")
145 | for i in ["Subdomain", "Source", "IP", "Reversed IP", "IP in range"]:
146 | worksheet.write(row, col, i)
147 | col += 1
148 | col = 0
149 | row += 1
150 | ip_list = []
151 | print("\nCalculating data from "+str(len(unique_subdomains))+" total entries")
152 | csv_file = open(output_directory+"subdomain_by_source.csv","w+")
153 | writer = csv.writer(csv_file)
154 | writer.writerow(["Subdomain", "Source", "IP", "Reversed IP", "IP in range"])
155 | bar = progressbar.ProgressBar(maxval=len(unique_subdomains), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
156 | bar.start()
157 | bar_counter = 0
158 | for subdomain in unique_subdomains.keys():
159 | bar_counter += 1
160 | bar.update(bar_counter)
161 | calculated_ips = dig_short(subdomain)
162 | if calculated_ips == '':
163 | data_array = [subdomain, unique_subdomains[subdomain], '', '', '']
164 | worksheet,writer,row,col = write_to_files(worksheet, writer, row, col, data_array)
165 | else:
166 | for calculated_ip in calculated_ips:
167 | if (check_ip(calculated_ip)) and (calculated_ip not in ip_list):
168 | ip_list.append(calculated_ip)
169 | if calculated_ip == ";;":
170 | break
171 | reverse_dns = dig_short(calculated_ip)
172 | reverse_dns = ','.join(reverse_dns)
173 | ip_in_range = get_range(calculated_ip, ranges)
174 | data_array = [subdomain, unique_subdomains[subdomain], calculated_ip, reverse_dns, ip_in_range]
175 | worksheet,writer,row,col = write_to_files(worksheet, writer, row, col, data_array)
176 | bar.finish()
177 | return ip_list
178 |
179 |
180 | def get_subdomain_info(res_files, workbook):
181 | for f in res_files:
182 | f_name = f['name']
183 | if os.path.isfile(f_name):
184 | file_values = open(f_name).read().splitlines()
185 | blacklist_words_list = blacklist_words.split(",")
186 | for bw in blacklist_words_list:
187 | for fv in file_values:
188 | if bw in fv:
189 | file_values.remove(fv)
190 | print ("Not analyzing %s, %s"%(fv,str(len(file_values))))
191 | file_values.sort()
192 | print("Calculating data from "+str(len(file_values))+" entries from "+f['code'])
193 | for v in file_values:
194 | if len(v) > 2:
195 | source_ = f['code']
196 | if v not in unique_subdomains.keys():
197 | unique_subdomains[v] = source_
198 | elif source_ not in unique_subdomains[v]:
199 | unique_subdomains[v] += ", " + source_
200 | else:
201 | pass
202 | return unique_subdomains
203 |
204 |
205 | def get_unique_subdomains(output_dir, workbook):
206 | row = 0
207 | col = 0
208 | csv_file = open(output_dir+"unique_subdomains.txt","w+")
209 | writer = csv.writer(csv_file)
210 | print("\n"+"-"*25+"\n"+"Subdomains (total: "+str(len(unique_subdomains))+")\n"+"-"*25)
211 | worksheet = workbook.add_worksheet("Unique subdomains")
212 | for u in sorted(unique_subdomains, key=unique_subdomains.get):
213 | print("- %s" % u)
214 | worksheet.write(row, col, u)
215 | writer.writerow([u])
216 | row += 1
217 |
218 |
219 | def get_leaked_information(output_directory, workbook):
220 | csv_file = open(output_directory+"leaked_information.txt","w+")
221 | writer = csv.writer(csv_file)
222 | print("\n"+"-"*25+"\n"+"Leaked information"+"\n"+"-"*25)
223 | worksheet = workbook.add_worksheet("Leaked emails (theHarvester)")
224 | row = 0
225 | col = 0
226 | if os.path.isfile(output_directory+"/"+harvester_output_file):
227 | print("\n"+"-"*25+"Leaked emails: "+"-"*25+"\n")
228 | file_values = open(output_directory+"/"+harvester_output_file).read().splitlines()
229 | file_values.sort()
230 | for v in file_values:
231 | print(v)
232 | worksheet.write(row, col, v)
233 | writer.writerow([v])
234 | row += 1
235 | worksheet = workbook.add_worksheet("Leaked credentials (Pwndb)")
236 | row = 0
237 | col = 0
238 | if os.path.isfile(output_directory+"/"+pwndb_output_file):
239 | print("\n"+"-"*25+"Leaked credentials: "+"-"*25+"\n")
240 | file_values = open(output_directory+"/"+pwndb_output_file).read().splitlines()
241 | file_values.sort()
242 | for v in file_values:
243 | print(v)
244 | worksheet.write(row, col, v)
245 | writer.writerow([v])
246 | row += 1
247 |
248 |
249 | def get_range_info(output_directory, workbook, ranges_info):
250 | if ranges_info is not None:
251 | row = 0
252 | col = 0
253 | csv_file = open(output_directory+"/ranges_information.csv","w+")
254 | writer = csv.writer(csv_file)
255 | worksheet = workbook.add_worksheet("Ranges information")
256 | heading = ["Organization", "Block name", "First IP", "Last IP", "Range size", "ASN", "Country"]
257 | writer.writerow(heading)
258 | for i in heading:
259 | worksheet.write(row, col, i)
260 | col += 1
261 | col = 0
262 | row += 1
263 | for r in ranges_info:
264 | file_values = [r['organization'], r['block_name'], r['first_ip'], r['last_ip'], r['range_size'], r['asn'], r['country']]
265 | if r['organization'] != "Organization":
266 | writer.writerow(file_values)
267 | for val in file_values:
268 | worksheet.write(row, col, val)
269 | col += 1
270 | col = 0
271 | row += 1
272 |
273 |
274 | def get_domains(output_directory, workbook, domains_file):
275 | row = 0
276 | col = 0
277 | domains_ = open(domains_file).read().splitlines()
278 | domains_.sort()
279 | csv_file = open(output_directory+"main_domains.txt","w+")
280 | writer = csv.writer(csv_file)
281 | worksheet = workbook.add_worksheet("Main domains")
282 | print("-------------------------\nDomains (total: "+str(len(domains_))+")\n-------------------------")
283 | for d in domains_:
284 | if d != "":
285 | print("- %s" % d)
286 | worksheet.write(row, col, d)
287 | writer.writerow([d])
288 | row += 1
289 | print(" ")
290 |
291 |
292 | def parse_files(output_directory,final_file):
293 | list_domains = []
294 | for f in os.listdir(output_directory):
295 | if f.startswith(final_file):
296 | file_values = open(output_directory+f).read().splitlines()
297 | for v in file_values:
298 | if "Found" in v:
299 | list_domains.append(v.split(" ")[1])
300 | else:
301 | list_domains.append(v)
302 | with open(output_directory+final_file, 'w') as txt_file:
303 | for dom in list_domains:
304 | txt_file.write(dom+ "\n")
305 |
306 |
307 | def analyze(output_directory, ranges, ranges_info, domains_file, dont_list_subdomains):
308 | res_files = [{'name': output_directory+amass_output_file,'code':'Amass'},{'name': output_directory+dnsdumpster_output_file,'code':'DNSDumpster API'},{'name': output_directory+sublist3r_output_file,'code':'Sublist3r'},{'name': output_directory+gobuster_output_file,'code':'Gobuster'},{'name': output_directory+fdns_output_file,'code':'FDNS'}]
309 | workbook = xlsxwriter.Workbook(output_directory+"results.xlsx")
310 | # Main domains
311 | if domains_file is not None:
312 | get_domains(output_directory, workbook, domains_file)
313 | if not dont_list_subdomains:
314 | # Parse and join Gobuster and Sublist3r files
315 | parse_files(output_directory,gobuster_output_file)
316 | parse_files(output_directory,sublist3r_output_file)
317 | # Subdomains by source
318 | unique_subdomains = get_subdomain_info(res_files, workbook)
319 | # IP list
320 | ip_list = calculate_subdomain_info(output_directory, workbook, ranges, unique_subdomains)
321 | # Unique IP address
322 | write_ip_list(ip_list, workbook)
323 | # Unique subdomains
324 | get_unique_subdomains(output_directory, workbook)
325 | # Leaked information
326 | get_leaked_information(output_directory, workbook)
327 | # Range information
328 | get_range_info(output_directory, workbook, ranges_info)
329 | workbook.close()
330 | print ("\n"+"Cleaning temporary files...")
331 | clean_cmd = "touch "+output_directory+"subdoler_temp_; rm "+output_directory+"/*subdoler_temp_*;"
332 | os.system(clean_cmd)
333 | print("Done! Output saved in "+output_directory)
334 |
335 |
336 | def print_banner():
337 | print( "")
338 | print( " .d8888b. 888 888 888")
339 | print( "d88P Y88b 888 888 888 ")
340 | print( "Y88b. 888 888 888 ")
341 | print( " *Y888b. 888 888 88888b. .d88888 .d88b. 888 .d88b. 888d888 ")
342 | print( " *Y88b. 888 888 888 *88b d88* 888 d88**88b 888 d8P Y8b 888P")
343 | print( " *888 888 888 888 888 888 888 888 888 888 88888888 888 ")
344 | print( "Y88b d88P Y88b 888 888 d88P Y88b 888 Y88..88P 888 Y8b. 888 ")
345 | print( " *Y8888P* *Y88888 88888P* *Y88888 *Y88P* 888 *Y8888 888")
346 | print( "")
347 | print( " - A (hopefully) less painful way to list subdomains - ")
348 | print( "")
349 |
350 |
351 | def print_usage():
352 | print("Error: Domains, ranges or company file or comma separated list is necessary.")
353 | print("\nOne of these arguments is necessary:")
354 | print(" + -c: File of companies. Ex: ./subdoler.py -c /tmp/companies.txt")
355 | print(" + -C: List of companies. Ex: ./subdoler.py -C company1,company2")
356 | print(" + -r: File of IP ranges. Ex: ./subdoler.py -r /tmp/ip_ranges.txt")
357 | print(" + -R: List of IP ranges. Ex: ./subdoler.py -R 10.20.30.40/24,11.21.31.41/22")
358 | print(" + -d: File of domains. Ex: ./subdoler.py -d /tmp/domains.txt")
359 | print(" + -D: List of domains. Ex: ./subdoler.py -D company1.com,company2.es")
360 | print(" + -k: Kill tmux session. Ex: ./subdoler.py -k")
361 | print("\nOptional arguments:")
362 | print(" + -o: Output directory. Ex: ./subdoler.py -c /tmp/companies.txt -o /tmp/subdoler_results")
363 | print(" + -cf: Country filter for IP range extraction from IPv4info. Ex: ./subdoler.py -c /tmp/companies.txt -cf ES,IT,US")
364 | print(" + -ns: No subdomain calculation. Ex: ./subdoler.py -r /tmp/ip_ranges.txt -ns")
365 | print(" + -p: Process results (useful for closing everything except the tmux session and process the resulting files some hours later). Ex: ./subdoler.py -o /tmp/subdoler_results -p")
366 | print("")
367 | sys.exit(1)
368 |
369 |
370 | def check_python_version():
371 | if not (sys.version_info > (3, 0)):
372 | print ("Sorry, Python 2 is not supported!")
373 | sys.exit(1)
374 |
375 |
376 | def create_directory(output_directory):
377 | if not os.path.exists(output_directory):
378 | os.makedirs(output_directory)
379 |
380 |
381 | def create_file_from_list(list_, fname_, output_directory):
382 | values_ = list_.split(",")
383 | fname_ = output_directory+"/"+fname_
384 | with open(fname_, 'w') as f:
385 | for item in values_:
386 | f.write("%s\n" % item)
387 | return fname_
388 |
389 |
390 | def delete_blacklisted_terms(output_directory, domains_file):
391 | # Delete domains with terms blacklisted in the config file (such as "akamai" or "telefonica")
392 | with open(domains_file, "r") as f:
393 | lines = f.readlines()
394 | blacklist_words_list = blacklist_words.split(",")
395 | dummy_domains_file = output_directory+"/"+temp_domains_file+"_dummy"
396 | with open(dummy_domains_file, "w") as f:
397 | for line in lines:
398 | detected = False
399 | for bw in blacklist_words_list:
400 | if bw in line:
401 | detected = True
402 | if not detected and len(line) > 2:
403 | f.write(line)
404 | os.rename(dummy_domains_file, domains_file)
405 |
406 |
407 | def kill():
408 | os.system("tmux kill-session -t "+tmux_session_name)
409 | sys.exit(1)
410 |
411 |
412 | def main():
413 | check_python_version()
414 | args = get_args()
415 | domains_file = args.domains_file
416 | ranges_file = args.ranges_file
417 | companies_file = args.companies_file
418 | dont_list_subdomains = args.no_subdomains
419 | process = args.process
420 | output_directory = args.output_directory
421 | output_directory = output_directory + "/" if not output_directory.endswith("/") else output_directory
422 | create_directory(output_directory)
423 | kill_flag = args.kill
424 | if args.domains_list is not None:
425 | domains_file = create_file_from_list(args.domains_list, temp_domains_file, output_directory)
426 | if args.ranges_list is not None:
427 | ranges_file = create_file_from_list(args.ranges_list, temp_ranges_file, output_directory)
428 | if args.companies_list is not None:
429 | companies_file = create_file_from_list(args.companies_list, temp_companies_file, output_directory)
430 | # Print usage if there is not enough information
431 | if (domains_file is None) and (ranges_file is None) and (companies_file is None) and (process is False) and (kill_flag is False):
432 | print_usage()
433 | if kill_flag:
434 | kill()
435 | ranges = None
436 | ranges_info = None
437 | if not process:
438 | if domains_file is None:
439 | try:
440 | country_filter = args.country_filter
441 | domains_file, ranges, ranges_info = range_extractor(ranges_file, companies_file, (output_directory+"/"+temp_domains_file), country_filter)
442 | if len(ranges) >= 1:
443 | delete_blacklisted_terms(output_directory, domains_file)
444 | except Exception as e:
445 | print("There was an error, maybe too many connections to IPv4info? \nError %s"%(str(e)))
446 | sys.exit(1)
447 | if not dont_list_subdomains:
448 | commands = get_commands(domains_file, output_directory)
449 | create_tmux_file(commands, output_directory)
450 | create_tmux_session(output_directory)
451 | print("Options:\n\n - 'p': Process the files now\n - 'k': Kill the TMUX session \n - Other: Quit and process the results later with parameter '-p'\n")
452 | user_input = input("Press a key: ")
453 | if user_input == 'p' or user_input == 'P':
454 | print("\nAnalyzing files...\n")
455 | analyze(output_directory, ranges, ranges_info, domains_file, dont_list_subdomains)
456 | elif user_input == 'k' or user_input == 'K':
457 | print("\nKilling TMUX session...")
458 | kill()
459 | else:
460 | print("\nExiting...")
461 | sys.exit(1)
462 | else:
463 | analyze(output_directory, ranges, ranges_info, domains_file, dont_list_subdomains)
464 |
465 |
466 | if __name__== "__main__":
467 | main()
468 |
--------------------------------------------------------------------------------