├── README.md ├── autorecon.py ├── csvspider.py ├── dns_listener.py ├── ftpscanner.py ├── hurricaneElectricLookup.py ├── ipListToCidr.py ├── parse_ntds.py ├── parse_wdigest.py ├── rbackup.py ├── s3-acl.py └── subDomainBruteforcer.rb /README.md: -------------------------------------------------------------------------------- 1 | scripts 2 | ======= 3 | 4 | Scripts that I've written that others may find useful 5 | -------------------------------------------------------------------------------- /autorecon.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | import dns.resolver 5 | import pyasn 6 | import signal 7 | import netaddr 8 | import logging 9 | import multiprocessing 10 | import time 11 | from threading import Event 12 | import queue 13 | 14 | 15 | def domain_worker(resolver, recursive, subdomains, wild_cards, domain_queue, asn_queue, 16 | network_queue, address_queue, domain_results, done): 17 | """ 18 | Domain worker performs the following: 19 | domain to ip resolution 20 | Args: 21 | domain_queue: 22 | asn_queue: 23 | network_queue: 24 | address_queue: 25 | domain_results: 26 | """ 27 | while not done.is_set(): 28 | try: 29 | domain = domain_queue.get(timeout=1) 30 | logging.debug("Domain popped from the queue: {}".format(domain)) 31 | 32 | if domain in domain_results: 33 | logging.debug("Domain already in results: {}".format(domain)) 34 | continue 35 | 36 | domain_ip_addresses = resolve_domain(resolver, domain, wild_cards) 37 | 38 | if domain_ip_addresses is not None: 39 | for ip_addr in domain_ip_addresses: 40 | if ip_addr in wild_cards: 41 | continue 42 | address_queue.put(ip_addr) 43 | if domain not in domain_results: 44 | domain_queue.put(domain) 45 | domain_results.append(domain) 46 | if recursive: 47 | for sub in subdomains: 48 | fqdn = '.'.join([sub, domain]) 49 | domain_queue.put(fqdn) 50 | for ip_addr in domain_ip_addresses: 51 | address_queue.put(ip_addr) 52 | 53 | domain_queue.task_done() 54 | except queue.Empty: 55 | time.sleep(1) 56 | continue 57 | except Exception as e: 58 | logging.error(e, exc_info=True) 59 | pass 60 | 61 | 62 | def address_worker(asndb_file, domain_queue, asn_queue, network_queue, 63 | address_queue, address_results, rdns_results, done): 64 | """ 65 | Address worker performs the following tasks: 66 | xyz 67 | Args: 68 | asndb_file: 69 | domain_queue: 70 | asn_queue: 71 | network_queue: 72 | address_queue: 73 | address_results: 74 | rdns_results: 75 | """ 76 | asndb_file = asndb_file 77 | 78 | while not done.is_set(): 79 | try: 80 | ip_addr = address_queue.get(timeout=1) 81 | logging.debug("IP Address popped from the queue: {}".format(ip_addr)) 82 | 83 | if ip_addr is not None and ip_addr not in address_results: 84 | address_results.append(ip_addr) 85 | rdns = get_rdns(ip_addr) 86 | 87 | if rdns is not None and rdns not in rdns_results: 88 | rdns_results.append(rdns) 89 | 90 | ip_asn = (get_asn(ip_addr, asndb_file)) 91 | 92 | if ip_asn is not None: 93 | asn_queue.put(ip_asn) 94 | address_queue.task_done() 95 | 96 | except queue.Empty: 97 | time.sleep(1) 98 | continue 99 | except Exception as e: 100 | logging.error(e, exc_info=True) 101 | pass 102 | 103 | 104 | def asn_worker(asndb_file, domain_queue, asn_queue, network_queue, address_queue, asn_results, done): 105 | asndb_file = asndb_file 106 | 107 | while not done.is_set(): 108 | try: 109 | asn = asn_queue.get(timeout=1) 110 | logging.debug("ASN popped from the queue: {}".format(asn)) 111 | 112 | if asn is not None and asn in asn_results: 113 | logging.debug("ASN already in results: {}".format(asn)) 114 | continue 115 | 116 | if asn is not None: 117 | 118 | asn_results.append(asn) 119 | 120 | asn_networks = get_networks(asn, asndb_file) 121 | 122 | if asn_networks is not None: 123 | for network in asn_networks: 124 | network_queue.put(network) 125 | asn_queue.task_done() 126 | 127 | except queue.Empty: 128 | time.sleep(1) 129 | continue 130 | except Exception as e: 131 | logging.error(e, exc_info=True) 132 | pass 133 | 134 | 135 | def network_worker(asndb_file, domain_queue, asn_queue, network_queue, address_queue, network_results, done): 136 | asndb_file = asndb_file 137 | 138 | while not done.is_set(): 139 | try: 140 | network = network_queue.get(timeout=1) 141 | logging.debug("Network popped from the queue: {}".format(network)) 142 | 143 | if network is not None and network in network_results: 144 | logging.debug("Network already in results: {}".format(network)) 145 | continue 146 | 147 | if network is not None: 148 | network_results.append(network) 149 | 150 | network_queue.task_done() 151 | 152 | except queue.Empty: 153 | time.sleep(1) 154 | continue 155 | 156 | except Exception as e: 157 | logging.error(e, exc_info=True) 158 | pass 159 | 160 | 161 | def signal_handler(signal, frame): 162 | """ 163 | This method appeases drone. 164 | """ 165 | print("Caught signal, exiting...") 166 | sys.exit(signal) 167 | 168 | 169 | def get_asn(ip_addr, asndb_file): 170 | """ 171 | Returns the Autonomous System number of a network IP address. 172 | Args: 173 | ip_addr: A network IP address. 174 | asndb_file: The ASN database file to perform lookups against. 175 | Returns: 176 | String: The Autonomous System number. 177 | """ 178 | logging.debug("Attempting to find ASN for: {}".format(ip_addr)) 179 | 180 | try: 181 | asndb = pyasn.pyasn(asndb_file) 182 | result = asndb.lookup(ip_addr)[0] 183 | logging.info("Found ASN: ASN{} - {}".format(result, ip_addr)) 184 | return result 185 | except Exception as e: 186 | logging.error(e, exc_info=True) 187 | pass 188 | 189 | 190 | def get_networks(asn, asndb_file): 191 | """ 192 | Returns the network blocks of an Autonomous System number. 193 | Args: 194 | asn: An Autonomous System number. 195 | asndb_file: The ASN database file to perform lookups against. 196 | Returns: 197 | List: Network blocks in CIDR format. 198 | """ 199 | logging.debug("Attempting to find networks for ASN: {}".format(asn)) 200 | 201 | try: 202 | asndb = pyasn.pyasn(asndb_file) 203 | return asndb.get_as_prefixes(asn) 204 | except Exception as e: 205 | logging.error(e, exc_info=True) 206 | pass 207 | 208 | 209 | def get_wildcard(resolver, domain): 210 | """ 211 | Checks to see if a wildcard exists. 212 | Args: 213 | domain: Domain where we are checking the wildcard. 214 | Returns: 215 | String: Returns either the valid wildcard address or returns 0.0.0.0. 216 | """ 217 | import random 218 | import string 219 | 220 | resolver = resolver 221 | random_len = random.randint(7, 12) 222 | random_str = ''.join(random.choice(string.ascii_lowercase) for i in range(random_len)) 223 | fqdn = '.'.join([random_str, domain]) 224 | 225 | ip_addr = None 226 | 227 | try: 228 | records = resolver.query(fqdn, "A") 229 | for record in records: 230 | ip_addr = record.address 231 | logging.debug("Found Wildcard Address: {}".format(ip_addr)) 232 | return ip_addr 233 | except Exception as e: 234 | pass 235 | 236 | 237 | def resolve_domain(resolver, domain, wild_cards): 238 | """ 239 | Resolves the A records for a given domain. 240 | Args: 241 | domain: domain that we are looking for subdomains against. 242 | Returns: 243 | List: Return a list of IP addresses based on valid A records. 244 | """ 245 | logging.debug("Attempting to resolve: {}".format(domain)) 246 | resolver = resolver 247 | wildcard_addr = get_wildcard(resolver, domain) 248 | if wildcard_addr not in wild_cards: 249 | wild_cards.append(wildcard_addr) 250 | valid = [] 251 | 252 | try: 253 | records = resolver.query(domain) 254 | for record in records: 255 | if record.address not in wild_cards: 256 | logging.info("Found subdomain: {} - {}".format(domain, record.address)) 257 | valid.append(record.address) 258 | else: 259 | logging.debug("Wildcard found in list: {}".format(wild_cards)) 260 | return valid 261 | except: 262 | pass 263 | 264 | 265 | def get_rdns(ip): 266 | """ 267 | Checks to see if a PTR record exists for a given IP address. 268 | Args: 269 | ip: IP address. 270 | Returns: 271 | String or None: PTR record if it exists. 272 | """ 273 | logging.debug("Attempting to get RDNS for: {}".format(ip)) 274 | 275 | try: 276 | address = dns.reversename.from_address(ip) 277 | records = dns.resolver.query(address, "PTR") 278 | 279 | for record in records: 280 | rdns = record.target 281 | logging.info("Found RDNS: {} - {}".format(str(rdns), ip)) 282 | return str(rdns) 283 | except Exception as e: 284 | pass 285 | 286 | 287 | def get_ips(network): 288 | """ 289 | Gets the individual IP addresses in given network range. 290 | Args: 291 | network: network range in cidr notation. 292 | Returns: 293 | List of IP addresses. 294 | """ 295 | logging.debug("Attempting to get IPs for network: {}".format(network)) 296 | ip_addresses = [] 297 | 298 | for ip in netaddr.IPNetwork(network): 299 | ip_addresses.append(str(ip)) 300 | 301 | return ip_addresses 302 | 303 | 304 | def report(domains, asns=None, networks=None, addresses=None, rdns=None): 305 | """ 306 | Prints the sets of given domains, autonomous system numbers, networks, PTRs, and IP addresses if user wants it. 307 | Args: 308 | domains: set of domains gathered. 309 | asns: set of autonomous system numbers gathered. 310 | networks: set of network ranges gathered. 311 | addresses: set of IP addresses gathered. 312 | rdns: set of PTR records 313 | """ 314 | if domains is not None: 315 | print_border("DOMAINS ({})".format(len(domains))) 316 | print("{}".format("\n".join(str(x) for x in domains))) 317 | 318 | if asns is not None: 319 | print_border("AUTONOMOUS SYSTEM NUMBERS ({})".format(len(asns))) 320 | print(*asns, sep="\n") 321 | 322 | if networks is not None: 323 | networks = netaddr.cidr_merge(list(networks)) 324 | print_border("NETWORK RANGES ({})".format(len(networks))) 325 | print(*networks, sep="\n") 326 | 327 | if addresses is not None: 328 | print_border("IP ADDRESSES ({})".format(len(addresses))) 329 | print(*addresses, sep="\n") 330 | 331 | if rdns is not None: 332 | print_border("RDNS RECORDS ({})".format(len(rdns))) 333 | print(*rdns, sep="\n") 334 | 335 | 336 | def print_border(text): 337 | """ 338 | Prints a border around a given string. 339 | Args: 340 | text: A string of text to put a border around. 341 | """ 342 | border = "*" * len(text) 343 | print(border) 344 | print(text) 345 | print(border) 346 | 347 | 348 | def main(): 349 | # parse the arguments provided to us 350 | parser = argparse.ArgumentParser() 351 | parser.add_argument("--domain", required=True, help="domain or file with a list of domains") 352 | parser.add_argument("--asndb", required=True, help="latest asndb file") 353 | parser.add_argument("--subdomains", required=False, help="file of subdomains separate by newlines") 354 | parser.add_argument("--threads", required=False, type=int, help="number of threads to use") 355 | parser.add_argument("--nameservers", required=False, help="file of specific nameservers to use") 356 | parser.add_argument("--debug", type=int, required=False, 357 | help="levels of verbosity: 1 - Informational, 2 - Debugging, 3 - Errors") 358 | parser.add_argument("--recursive", required=False, type=bool, 359 | help="If set, valid subdomains will be added to domain queue") 360 | 361 | args = parser.parse_args() 362 | 363 | domain = args.domain 364 | asndb_file = args.asndb 365 | subdomains_file = args.subdomains 366 | threads = args.threads 367 | nameservers_file = args.nameservers 368 | debug = args.debug 369 | recursive = args.recursive 370 | 371 | signal.signal(signal.SIGINT, signal_handler) 372 | 373 | # initialize our work queues 374 | domain_queue = multiprocessing.JoinableQueue() 375 | asn_queue = multiprocessing.JoinableQueue() 376 | network_queue = multiprocessing.JoinableQueue() 377 | address_queue = multiprocessing.JoinableQueue() 378 | 379 | # initialize our result sets 380 | manager = multiprocessing.Manager() 381 | domain_results = manager.list() 382 | asn_results = manager.list() 383 | network_results = manager.list() 384 | address_results = manager.list() 385 | rdns_results = manager.list() 386 | wild_cards = manager.list() 387 | 388 | if threads is None: 389 | threads = 10 390 | 391 | if recursive is not None: 392 | recursive = True 393 | 394 | if debug == 1: 395 | logging.basicConfig(level=logging.INFO) 396 | elif debug == 2: 397 | logging.basicConfig(level=logging.DEBUG) 398 | elif debug == 3: 399 | logging.basicConfig(level=logging.ERROR) 400 | 401 | # check to see if argument is a file, if so, read it line for line and add to domains list 402 | if os.path.exists(domain): 403 | try: 404 | domains = [line.strip() for line in open(domain)] 405 | except IOError: 406 | logging.error("Failed to open {}".format(domain)) 407 | sys.exit(1) 408 | else: 409 | domains = [] 410 | domains.append(domain) 411 | 412 | # if a subdomains file was specified, attempt to open it and add it to the subdomains list 413 | if subdomains_file is not None: 414 | if os.path.exists(subdomains_file): 415 | try: 416 | subdomains = [line.strip() for line in open(subdomains_file)] 417 | except IOError: 418 | logging.error("Failed to open {}".format(subdomains_file)) 419 | sys.exit(1) 420 | else: 421 | logging.error("File {} does not exist".format(subdomains_file)) 422 | sys.exit(1) 423 | else: 424 | subdomains = [] 425 | 426 | # check to see if asndb file exists 427 | if asndb_file is not None: 428 | if os.path.exists(asndb_file): 429 | pass 430 | else: 431 | logging.error("File {} does not exist".format(asndb_file)) 432 | sys.exit(1) 433 | 434 | # if a nameservers file was specified, attempt to open it and add it to the nameservers list 435 | if nameservers_file is not None: 436 | if os.path.exists(nameservers_file): 437 | try: 438 | nameservers = [line.strip() for line in open(nameservers_file)] 439 | except IOError: 440 | logging.error("Failed to open {}".format(nameservers_file)) 441 | sys.exit(1) 442 | else: 443 | logging.error("File {} does not exist".format(nameservers_file)) 444 | sys.exit(1) 445 | else: 446 | # no nameservers file specified, let's use Google's 447 | nameservers = ["8.8.8.8", "8.8.4.4"] 448 | 449 | resolver = dns.resolver.Resolver() 450 | resolver.nameservers = nameservers 451 | 452 | domain_worker_threads = [] 453 | asn_worker_threads = [] 454 | network_worker_threads = [] 455 | address_worker_threads = [] 456 | 457 | done = manager.Event() 458 | done.clear() 459 | 460 | for domain in domains: 461 | domain_queue.put(domain) 462 | for sub in subdomains: 463 | fqdn = ".".join([sub, domain]) 464 | domain_queue.put(fqdn) 465 | 466 | for domainWorker in range(threads): 467 | p = multiprocessing.Process( 468 | target=domain_worker, 469 | args=(resolver, recursive, subdomains, wild_cards, domain_queue, 470 | asn_queue, network_queue, address_queue, domain_results, done)) 471 | domain_worker_threads.append(p) 472 | p.start() 473 | 474 | for asnWorker in range(1,2): 475 | p = multiprocessing.Process( 476 | target=asn_worker, 477 | args=(asndb_file, domain_queue, asn_queue, network_queue, address_queue, asn_results, done)) 478 | asn_worker_threads.append(p) 479 | p.start() 480 | 481 | for networkWorker in range(1,2): 482 | p = multiprocessing.Process( 483 | target=network_worker, 484 | args=(asndb_file, domain_queue, asn_queue, network_queue, address_queue, network_results, done)) 485 | network_worker_threads.append(p) 486 | p.start() 487 | 488 | 489 | for addressWorker in range(1,2): 490 | p = multiprocessing.Process( 491 | target=address_worker, 492 | args=(asndb_file, domain_queue, asn_queue, network_queue, address_queue, 493 | address_results, rdns_results, done)) 494 | address_worker_threads.append(p) 495 | p.start() 496 | 497 | while domain_queue.qsize() > 0 or asn_queue.qsize() > 0 or address_queue.qsize() > 0 or network_queue.qsize() > 0: 498 | time.sleep(2) 499 | 500 | done.set() 501 | 502 | for p in domain_worker_threads: 503 | p.join(timeout=5) 504 | for p in asn_worker_threads: 505 | p.join(timeout=5) 506 | for p in network_worker_threads: 507 | p.join(timeout=5) 508 | for p in address_worker_threads: 509 | p.join(timeout=5) 510 | 511 | report(domains=domain_results, 512 | asns=asn_results, 513 | networks=network_results, 514 | addresses=address_results, 515 | rdns=rdns_results) 516 | 517 | 518 | if __name__ == '__main__': 519 | main() 520 | -------------------------------------------------------------------------------- /csvspider.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import urllib.request 4 | import urllib.parse 5 | import ssl 6 | import sys 7 | 8 | roots = [] 9 | suffix = "/CVS/Entries" 10 | 11 | ctx = ssl.create_default_context() 12 | ctx.check_hostname = False 13 | ctx.verify_mode = ssl.CERT_NONE 14 | 15 | 16 | def parse_entries(entries, target): 17 | for line in entries: 18 | line = line.decode("utf-8").rstrip() 19 | 20 | if line.startswith('D/'): 21 | print("{}{}".format(target,line.split("/")[1])) 22 | roots.append("{}{}/".format(target,line.split("/")[1])) 23 | 24 | elif line.startswith('/'): 25 | print("{}{}".format(target, line.split("/")[1])) 26 | 27 | def main(): 28 | if (len(sys.argv) != 2): 29 | print("Usage: python3 {} ".format(sys.argv[0])) 30 | sys.exit(1) 31 | 32 | start = sys.argv[1] 33 | 34 | if not start.endswith("/"): 35 | start += "/" 36 | 37 | roots.append(start) 38 | 39 | while roots: 40 | target = roots.pop() 41 | 42 | if(target.startswith("https://")): 43 | try: 44 | r = urllib.request.urlopen("{}{}".format(target, suffix), context=ctx) 45 | except Exception as e: 46 | print("Exception: {} - {}".format(e, target)) 47 | next 48 | elif(target.startswith("http://")): 49 | try: 50 | r = urllib.request.urlopen("{}{}".format(target, suffix)) 51 | except Exception as e: 52 | print("Exception: {} - {}".format(e, target)) 53 | next 54 | else: 55 | print("Target should start with http or https") 56 | sys.exit(1) 57 | 58 | data = r.readlines() 59 | parse_entries(data, target) 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /dns_listener.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | """ 4 | ------------------------------------------------------------------------------- 5 | Name: dns_listener.py 6 | Purpose: Listens for incoming DNS requests, prints if argv1 is in qname. 7 | Author: Justin Kennedy (@jstnkndy) 8 | ------------------------------------------------------------------------------- 9 | """ 10 | 11 | import sys 12 | import argparse 13 | import signal 14 | from scapy.all import * 15 | 16 | 17 | def pkt_callback(pkt, domain): 18 | if DNSQR in pkt: 19 | if domain in pkt[DNS].qd.qname: 20 | print pkt[DNS].qd.qname 21 | 22 | def signal_handler(signal, frame): 23 | print "Caught signal, exiting!"; 24 | sys.exit(signal) 25 | 26 | def main(): 27 | parser = argparse.ArgumentParser() 28 | 29 | parser.add_argument("--interface", help="Interface to sniff on") 30 | parser.add_argument("--domain", help="String to match on") 31 | 32 | if len(sys.argv) != 5: 33 | parser.print_help() 34 | sys.exit(1) 35 | 36 | signal.signal(signal.SIGINT, signal_handler) 37 | 38 | args = parser.parse_args() 39 | 40 | interface = args.interface 41 | domain = args.domain 42 | filter = 'udp and port 53' 43 | 44 | sniff(iface=interface, filter=filter, prn=lambda x: pkt_callback(x, domain)) 45 | 46 | if __name__ == "__main__": 47 | main() -------------------------------------------------------------------------------- /ftpscanner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | ------------------------------------------------------------------------------- 5 | Name: ftpscanner.py 6 | Purpose: Threaded anonymous ftp scanner 7 | Author: Justin Kennedy (@jstnkndy) 8 | ------------------------------------------------------------------------------- 9 | """ 10 | 11 | import Queue 12 | import threading 13 | import iptools 14 | import sys 15 | import os 16 | from ftplib import FTP 17 | 18 | # Constant Variables 19 | MAX_THREADS = 100 20 | TIMEOUT = 2 21 | 22 | class ThreadFTP(threading.Thread): 23 | def __init__(self, queue): 24 | threading.Thread.__init__(self) 25 | self.queue = queue 26 | def run(self): 27 | while True: 28 | host = self.queue.get() 29 | try: 30 | ftp = FTP(host, timeout=TIMEOUT) 31 | if ftp: 32 | if ftp.login("anonymous", "jsmith@aol.com"): 33 | ls = ftp.nlst() 34 | print "Success: %s %s" % (host, ls) 35 | except: 36 | pass 37 | self.queue.task_done() 38 | 39 | def usage(): 40 | print 'Usage: python %s ' % sys.argv[0] 41 | 42 | def main(): 43 | if len(sys.argv) != 2: 44 | usage() 45 | sys.exit() 46 | 47 | queue = Queue.Queue() 48 | 49 | if os.path.exists(sys.argv[1]): 50 | hosts = [line.strip() for line in open(sys.argv[1])] 51 | else: 52 | hosts = iptools.IpRangeList(sys.argv[1]) 53 | 54 | for host in hosts: 55 | queue.put(host) 56 | for thr in range(MAX_THREADS): 57 | t = ThreadFTP(queue) 58 | t.setDaemon(True) 59 | t.start() 60 | queue.join() 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /hurricaneElectricLookup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | ------------------------------------------------------------------------------- 5 | Name: hurricaneElectricLookup.py 6 | Purpose: Look up target network ranges using bgp.he.net 7 | Author: Justin Kennedy (@jstnkndy) 8 | ------------------------------------------------------------------------------- 9 | """ 10 | 11 | from bs4 import BeautifulSoup 12 | import requests 13 | import sys 14 | 15 | def usage(): 16 | print "Usage: %s " % sys.argv[0] 17 | 18 | 19 | def main(): 20 | if len(sys.argv) != 2: 21 | usage() 22 | sys.exit() 23 | 24 | search = sys.argv[1] 25 | 26 | headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36"} 27 | url = "http://bgp.he.net/search?search%5Bsearch%5D=" + search + "&commit=Search" 28 | browser = requests.get(url, headers=headers) 29 | 30 | soup = BeautifulSoup(browser.text) 31 | table = soup.find("table") 32 | 33 | try: 34 | rows = table.findAll("tr") 35 | except: 36 | print "No results found" 37 | sys.exit() 38 | 39 | for row in rows: 40 | tds = row.findAll("td") 41 | try: 42 | a = str(tds[0].get_text()) 43 | b = str(tds[1].get_text()) 44 | print "%s, %s" % (a,b) 45 | except: 46 | continue 47 | 48 | if __name__ == "__main__": 49 | main() -------------------------------------------------------------------------------- /ipListToCidr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | ------------------------------------------------------------------------------- 5 | Name: ipListToCidr.py 6 | Purpose: Condense an ip list to cidr ranges 7 | Author: Justin Kennedy (@jstnkndy) 8 | ------------------------------------------------------------------------------- 9 | """ 10 | 11 | import netaddr 12 | import sys 13 | 14 | def usage(): 15 | print "Usage:", sys.argv[0], "" 16 | 17 | def main(): 18 | if len(sys.argv) < 2: 19 | usage() 20 | sys.exit() 21 | 22 | ipFile = open(sys.argv[1]) 23 | ipAddresses = [i for i in ipFile.readlines()] 24 | ipAddresses = sorted(ipAddresses) 25 | cidrs = netaddr.cidr_merge(ipAddresses) 26 | for cidr in cidrs: 27 | print cidr 28 | 29 | if __name__ == '__main__': 30 | main() 31 | 32 | -------------------------------------------------------------------------------- /parse_ntds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | ------------------------------------------------------------------------------- 5 | Name: parse_ntds.py 6 | Purpose: Takes the output of dsusers.py, output of hashcat, and prints 7 | the usernames, hashes, and cracked passwords together 8 | Author: Justin Kennedy (@jstnkndy) 9 | ------------------------------------------------------------------------------- 10 | """ 11 | 12 | import sys,re,argparse 13 | 14 | def main(): 15 | parser = argparse.ArgumentParser() 16 | 17 | parser.add_argument("--ntds", help="Output file from dsusers.py") 18 | parser.add_argument("--cracked", help="Output file from cracking") 19 | 20 | args = parser.parse_args() 21 | 22 | if (len(sys.argv) != 5): 23 | parser.print_help() 24 | sys.exit(1) 25 | 26 | users = {} 27 | pw_hashes = {} 28 | 29 | with file(args.ntds) as ntds_file: 30 | for line in ntds_file: 31 | if ':::' in line: 32 | line = ''.join(line.split()) 33 | username = re.split(":", line)[0] 34 | pw_hash = re.findall(r"([a-z0-9]{32})", line)[0] 35 | users[username] = pw_hash 36 | 37 | with file(args.cracked) as cracked_file: 38 | for line in cracked_file: 39 | line = ''.join(line.split()) 40 | pw_hash = re.split(":", line)[0] 41 | password = re.split(":", line)[1:][0] 42 | pw_hashes[pw_hash] = password 43 | 44 | for pw_hash in users: 45 | if users[pw_hash] in pw_hashes: 46 | print "{}, {}, {}".format(pw_hash, users[pw_hash], pw_hashes[users[pw_hash]]) 47 | 48 | if __name__ == '__main__': 49 | main() -------------------------------------------------------------------------------- /parse_wdigest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | ------------------------------------------------------------------------------- 5 | Name: parse_wdigest.py 6 | Purpose: Takes the output of mimikatz's wdigest and outputs it cleaner 7 | Author: Justin Kennedy (@jstnkndy) 8 | ------------------------------------------------------------------------------- 9 | """ 10 | 11 | import sys,re 12 | 13 | def usage(): 14 | print 'Usage: python {} '.format(sys.argv[0]) 15 | 16 | def main(): 17 | if len(sys.argv) != 2: 18 | usage() 19 | exit() 20 | 21 | username_regex = re.compile(r'\*\s+Username\s+:\s+(.*)') 22 | domain_regex = re.compile(r'\*\sDomain\s+:\s(.*)') 23 | password_regex = re.compile(r'\*\s+Password\s+:\s+(.*)') 24 | 25 | usernames, domains, passwords = [], [], [] 26 | 27 | with open(sys.argv[1], 'r') as input: 28 | data = input.read() 29 | 30 | [usernames.append(username) for username in re.findall(username_regex, data)] 31 | [domains.append(domain) for domain in re.findall(domain_regex, data)] 32 | [passwords.append(password) for password in re.findall(password_regex, data)] 33 | 34 | for i in range(0, len(usernames)): 35 | print '{}\\{} - {}'.format(domains[i], usernames[i], passwords[i]) 36 | 37 | if __name__ == '__main__': 38 | main() -------------------------------------------------------------------------------- /rbackup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | ------------------------------------------------------------------------------- 5 | Name: rbackup.py 6 | Purpose: Remote cisco configuration backup 7 | Author: Justin Kennedy (@jstnkndy) 8 | ------------------------------------------------------------------------------- 9 | """ 10 | 11 | import sys 12 | import datetime 13 | import pexpect 14 | 15 | def login(host, username, password): 16 | """ Prompt Types """ 17 | newkey = 'yes/no' 18 | passprompt = 'password:' 19 | 20 | cisco = pexpect.spawn('ssh %s@%s' % (username, host)) 21 | response = cisco.expect([newkey, passprompt, pexpect.EOF]) 22 | 23 | if response == 0: 24 | cisco.sendline('yes') 25 | response = cisco.expect([newkey, passprompt, pexpect.EOF]) 26 | if response == 1: 27 | cisco.sendline(password) 28 | cisco.expect('.*>') 29 | return cisco 30 | elif response == 2: 31 | print "Unable to login to:", host 32 | pass 33 | 34 | def enlogin(child, enpass): 35 | child.sendline('enable') 36 | child.expect('Password: ') 37 | child.sendline(enpass) 38 | child.expect('.*') 39 | 40 | def get_prompt(child): 41 | return ''.join(child.after).strip() 42 | 43 | def get_enprompt(child): 44 | return ''.join(child.after).strip() 45 | 46 | def backup_runconf(child, enprompt, log, hostname): 47 | child.sendline('terminal length 0') 48 | child.expect(enprompt) 49 | child.sendline('show run') 50 | child.expect(enprompt) 51 | 52 | fh = open(log, 'w') 53 | fh.write(child.before) 54 | fh.close 55 | 56 | print "[+] Backup Successful on:", hostname 57 | 58 | def usage(): 59 | print 'Usage: python %s ' % sys.argv[0] 60 | 61 | def main(): 62 | if len(sys.argv) != 2: 63 | usage() 64 | sys.exit() 65 | 66 | username = 'admin' 67 | password = '' 68 | enpass = '' 69 | hosts = open(sys.argv[1]) 70 | 71 | for host in hosts: 72 | host = host.strip() 73 | cisco = login(host, username, password) 74 | 75 | if cisco: 76 | prompt = get_prompt(cisco) 77 | enlogin(cisco, enpass) 78 | enprompt = get_enprompt(cisco) 79 | hostname = enprompt[:len(enprompt) - 1] 80 | log = str(hostname) + "-" + str(datetime.date.today()) 81 | 82 | backup_runconf(cisco, enprompt, log, hostname) 83 | 84 | if __name__ == '__main__': 85 | main() 86 | -------------------------------------------------------------------------------- /s3-acl.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | import sys 4 | from collections import deque 5 | from threading import Thread, Lock 6 | from queue import Queue 7 | 8 | 9 | lock = Lock() 10 | 11 | 12 | def safe_print(*args, **kwargs): 13 | with lock: 14 | print(*args, **kwargs) 15 | 16 | 17 | def worker(q): 18 | while True: 19 | bucket_name, object_name = q.get() 20 | check_permissions(bucket_name, object_name) 21 | q.task_done() 22 | 23 | 24 | def check_permissions(bucket, key): 25 | s3 = boto3.resource("s3") 26 | 27 | try: 28 | object_acl = s3.ObjectAcl(bucket, key) 29 | 30 | for grantee in object_acl.grants: 31 | if grantee['Grantee']['Type'] != "Group": 32 | continue 33 | if 'AllUsers' in grantee['Grantee']['URI']: 34 | safe_print(f"s3://{bucket}/{key} - AllUsers - {grantee['Permission']}") 35 | continue 36 | if 'AuthenticatedUsers' in grantee['Grantee']['URI']: 37 | safe_print(f"s3://{bucket}/{key} - AuthenticatedUsers - {grantee['Permission']}") 38 | continue 39 | except: 40 | pass 41 | 42 | 43 | def main(): 44 | parser = argparse.ArgumentParser() 45 | 46 | parser.add_argument("-a", "--all-buckets", action='store_true', help="get and check all buckets") 47 | parser.add_argument("-b", "--bucket", help="bucket to check") 48 | parser.add_argument("-t", "--threads", type=int, default=10, help="number of threads to use") 49 | args = parser.parse_args() 50 | 51 | if len(sys.argv) < 2: 52 | parser.print_help() 53 | sys.exit(-1) 54 | 55 | if not (args.all_buckets or args.bucket): 56 | print("Either --all-buckets or --bucket is required") 57 | sys.exit(-1) 58 | 59 | ''' 60 | https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html: 61 | The mechanism in which Boto3 looks for credentials is to search through a list of possible locations and stop as 62 | soon as it finds credentials. The order in which Boto3 searches for credentials is: 63 | 64 | - Passing credentials as parameters in the boto.client() method 65 | - Passing credentials as parameters when creating a Session object 66 | - Environment variables 67 | - Shared credential file (~/.aws/credentials) 68 | - AWS config file (~/.aws/config) 69 | - Assume Role provider 70 | - Boto2 config file (/etc/boto.cfg and ~/.boto) 71 | - Instance metadata service on an Amazon EC2 instance that has an IAM role configured. 72 | ''' 73 | s3resource = boto3.resource("s3") 74 | 75 | buckets = deque() 76 | object_queue = Queue() 77 | 78 | if args.all_buckets: 79 | s3client = boto3.client("s3") 80 | response = s3client.list_buckets() 81 | 82 | for bucket in response['Buckets']: 83 | buckets.append(bucket['Name']) 84 | 85 | if args.bucket: 86 | if args.bucket not in buckets: 87 | buckets.append(args.bucket) 88 | 89 | for i in range(args.threads): 90 | t = Thread(target=worker, args=(object_queue,)) 91 | t.daemon = True 92 | t.start() 93 | 94 | for bucket in buckets: 95 | for s3_object in s3resource.Bucket(bucket).objects.all(): 96 | if not s3_object.key.endswith("/"): 97 | object_queue.put((s3_object.bucket_name, s3_object.key)) 98 | 99 | object_queue.join() 100 | 101 | 102 | if __name__ == "__main__": 103 | main() 104 | 105 | -------------------------------------------------------------------------------- /subDomainBruteforcer.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | =begin 4 | ------------------------------------------------------------------------------- 5 | Name: subDomainBruteforcer.rb 6 | Purpose: Script to quickly resolve subdomains to network addresses 7 | Author: Justin Kennedy (@jstnkndy) 8 | ------------------------------------------------------------------------------ 9 | =end 10 | 11 | require 'resolv' 12 | 13 | def resolve_dns(subdomain, wildcard_address) 14 | begin 15 | subdomain_addresses = Resolv.getaddresses(subdomain) 16 | subdomain_addresses.each do |subdomain_address| 17 | if subdomain_address != wildcard_address 18 | puts "#{subdomain_address} - #{subdomain}" 19 | end 20 | end 21 | rescue StandardError => error 22 | end 23 | end 24 | 25 | def check_for_wildcard(wildcard_domain) 26 | begin 27 | wildcard_address = Resolv.getaddress(wildcard_domain) 28 | rescue StandardError => error 29 | end 30 | end 31 | 32 | def usage 33 | puts "Usage: #{$0} " 34 | exit 35 | end 36 | 37 | domains_wordlist, subdomains_wordlist = ARGV 38 | usage unless ARGV.length == 2 39 | 40 | domains = [] 41 | subs = [] 42 | 43 | File.read(domains_wordlist).split("\n").each { |domain| domains << domain} 44 | File.read(subdomains_wordlist).split("\n").each { |sub| subs << sub } 45 | 46 | domains.each do |domain| 47 | wildcard_address = check_for_wildcard("thishouldneverexist.#{domain}") 48 | subs.each do |sub| 49 | subdomain = "#{sub}.#{domain}" 50 | resolve_dns(subdomain, wildcard_address) 51 | end 52 | end 53 | --------------------------------------------------------------------------------