├── .gitignore ├── README ├── tshark.commands ├── bruteForceAuthLogParser.py ├── filemoving.py ├── tsharkIPcount.py ├── findWords.py ├── weblogAnalysis.py ├── IPaddressHistogram.py ├── pandasWebLogAnalysis.py ├── NMAPparser.py ├── serverFilesHTTP.py ├── gephiGraphingTest.py ├── geolocationOSX.py ├── wordlistCreator.py ├── chartmaker.py ├── pcap2csv.py ├── trace.py ├── d3jsBarChart.ipynb ├── plottingCandleSticks.py └── pandasAuthLogAnalysis.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | This is a collection of useful tools I found on the Interwebs or created out of need. Feel free to use and resuse etc. -------------------------------------------------------------------------------- /tshark.commands: -------------------------------------------------------------------------------- 1 | tshark -o column.format:""Source", "%s", "Destination", "%d"" -z conv,tcp -r inFile.pcap > outFile.txt 2 | 3 | tshark -V -r infile.pcap > outputfile.txt 4 | 5 | tshark -o column.format:""No.", "%m", "Time", "%Yt", "Time", "%Tt", "Time", "%Rt", "Source", "%s", "Destination", "%d", "Protocol", "%p", "Info", "%i", "Length", "%L"" -z conv,tcp -r inFile.cap > outFile.txt -------------------------------------------------------------------------------- /bruteForceAuthLogParser.py: -------------------------------------------------------------------------------- 1 | def IPsearch(data): 2 | for line in data: 3 | if "Failed password for root from" in line: 4 | find_match = ip.search(line) 5 | IPaddress = find_match.group() 6 | if len(IPaddress) > 6: 7 | Ip = IPaddress.split(" ")[0] 8 | IpHitListing[Ip] = IpHitListing.get(Ip, 0) + 1 9 | return IpHitListing -------------------------------------------------------------------------------- /filemoving.py: -------------------------------------------------------------------------------- 1 | import re 2 | import string 3 | import os 4 | import shutil 5 | 6 | alphabet = string.lowercase 7 | path = "/Users/antigen/Downloads/" 8 | destination = '/Users/antigen/Downloads/files' 9 | 10 | 11 | for letter in alphabet: 12 | try: 13 | os.makedirs(path+letter, 0755) 14 | except: 15 | pass 16 | 17 | for a,b,file in os.walk(destination): 18 | for item in file: 19 | for letter in alphabet: 20 | if re.search('^'+letter, item): 21 | shutil.copyfile(item, path+letter+"/"+item) 22 | 23 | -------------------------------------------------------------------------------- /tsharkIPcount.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | ''' 4 | This program takes a file and counts up packets to each IP address found in PCAP. 5 | Replaces the commands below. 6 | ''' 7 | ########## 8 | # bash way 9 | ########## 10 | # cat test2.csv | awk {'print $5'} | sort | uniq -c 11 | 12 | ! tshark -r test.pcap -T fields -e frame.number -e eth.src -e eth.dst -e ip.src -e ip.dst -e frame.len -E header=y -E separator=" " > test2.csv 13 | data = ! cat test2.csv 14 | 15 | 16 | IpHitListing = {} 17 | 18 | for line in data: 19 | Ip = line.split(' ')[4] 20 | if 6 < len(Ip) <= 15: 21 | IpHitListing[Ip] = IpHitListing.get(Ip,0) + 1 -------------------------------------------------------------------------------- /findWords.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | This program is for finding words out of lists or books that end or being with certain letters or numbers of basic patterns 4 | ''' 5 | 6 | data = open ("namesFoundOnInternet.txt").readlines() 7 | 8 | #process each line and strip return characters 9 | #append each work matching "end with s" 10 | #create mylist filled with words ending in s 11 | for line in data: 12 | b = list(line.strip()) 13 | if b[-1] == "s": 14 | mylist.append(line) 15 | fout = open ('names.txt', 'w') 16 | for line in mylist: 17 | fout.write(line) 18 | fout.close() 19 | 20 | #process each line and strip return characters 21 | #append each work matching "begin with c" 22 | #create mylist filled with words begin with c 23 | mylist2=[] 24 | for line in mylist: 25 | b = list(line.strip()) 26 | if b[0] == "c": 27 | mylist2.append(line) 28 | 29 | fout = open ('names2.txt', 'w') 30 | for line in mylist2: 31 | fout.write(line) 32 | fout.close() 33 | -------------------------------------------------------------------------------- /weblogAnalysis.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | ''' 3 | This program takes in a apache www-media.log and provides basic report 4 | ''' 5 | 6 | for collections import Counters 7 | 8 | ipAddressList = [] 9 | methodList = [] 10 | requestedList = [] 11 | referalList = [] 12 | mylist = [] 13 | 14 | data = open('www-media.log').readlines() 15 | for line in data: 16 | ipAddressList.append(line.split()[0]) 17 | requestedList.append(line.split()[6]) 18 | methodList.append(line.split()[5]) 19 | referalList.append(line.split()[10]) 20 | 21 | count_ip = Counter(ipAddressList) 22 | count_requested = Counter(requestedList) 23 | count_method = Counter(methodList) 24 | count_referal = Counter(referalList) 25 | 26 | count_ip.most_common() 27 | count_requested.most_common() 28 | count_method.most_common() 29 | count_referal.most_common() 30 | 31 | ''' 32 | This is how you do the same thing in pandas!!!!!!!!!! 33 | 34 | import pandas 35 | data = open('www-media.log').readlines() 36 | frame = pandas.DataFrame([x.split() for x in data]) 37 | 38 | countIP = frame[0].value_counts() 39 | countRequested = frame[6].value_counts() 40 | countReferal = frame[10].value_counts() 41 | 42 | print countIP 43 | print countRequested 44 | print countReferal 45 | 46 | ''' -------------------------------------------------------------------------------- /IPaddressHistogram.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | ''' 3 | This program can reads a target file and counts up IP address hits into a dict 4 | 5 | >>>CalculateApacheIpHits('www-access.log') 6 | { 7 | '10.0.1.14': 18, 8 | '10.0.1.2': 241, 9 | } 10 | 11 | Isolate specific IP addresses with this query 12 | 13 | >>>HitsDictionary = CalculateApacheIpHits("www-access.log") 14 | >>>print HitsDictionary['10.0.1.2'] 15 | >>>18 16 | 17 | ''' 18 | 19 | 20 | def CalculateApacheIpHits(logfile_pathname): 21 | # Make a dictionary to store IP addresses and their hit counts 22 | # and read the contents of the log file line by line 23 | IpHitListing = {} 24 | Contents = open(logfile_pathname, "r").xreadlines( ) 25 | # You can use .readlines in old Python, but if the log is huge... 26 | 27 | # Go through each line of the logfile 28 | for line in Contents: 29 | # Split the string to isolate the IP address 30 | Ip = line.split(" ")[0] 31 | 32 | # Ensure length of the IP address is proper (see discussion) 33 | if 6 < len(Ip) <= 15: 34 | # Increase by 1 if IP exists; else set hit count = 1 35 | IpHitListing[Ip] = IpHitListing.get(Ip, 0) + 1 36 | 37 | return IpHitListing 38 | 39 | -------------------------------------------------------------------------------- /pandasWebLogAnalysis.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | ''' 3 | This program uses pandas to load logs into DataFrame for analysis 4 | ''' 5 | import pandas 6 | data = open('www-media.log').readlines() 7 | frame = pandas.DataFrame([x.split() for x in data]) 8 | 9 | countIP = frame[0].value_counts() 10 | countRequested = frame[6].value_counts() 11 | countReferal = frame[10].value_counts() 12 | 13 | print countIP 14 | print countRequested 15 | print countReferal 16 | 17 | def get_bruteRoot(data): 18 | for line in data: 19 | if 'Failed password for root' in line: 20 | mylist.append(line) 21 | frame = pandas.DataFrame([x.split() for x in mylist]) 22 | return frame[10].value_counts() 23 | 24 | def get_brute2(data, searchTerm): 25 | for line in data: 26 | if 'Failed password for '+searchTerm in line: 27 | mylist.append(line) 28 | frame = pandas.DataFrame([x.split() for x in mylist]) 29 | return frame[10].value_counts() 30 | 31 | def get_failedPasswordInvalidUser(data): 32 | for line in data: 33 | if 'Failed password for invalid user' in line: 34 | mylist.append(line) 35 | failedframe = pandas.DataFrame([x.split() for x in mylist]) 36 | return failedframe[10].value_counts() 37 | 38 | def get_loginHistory(data): 39 | for line in data: 40 | if 'Failed password for' in line: 41 | mylist.append(line) 42 | frame = pandas.DataFrame([x.split() for x in mylist]) 43 | return frame[8].value_counts() -------------------------------------------------------------------------------- /NMAPparser.py: -------------------------------------------------------------------------------- 1 | #/usr/bin/python 2 | ''' 3 | This program rips thru text dumps of NMAP text output and puts it into a pickled datafile for PythonPandas analysis 4 | ''' 5 | 6 | import re 7 | import cPickle as pickle 8 | import datetime 9 | 10 | data = ! cat * 11 | newDict = {} 12 | newList = [] 13 | splitLine = ['0.0.0.0','0.0.0.0'] 14 | pattern = re.compile('[0-9]{1,5}/tcp') 15 | nowTime = datetime.datetime.now().strftime('%Y-%m-%d') 16 | 17 | for line in data: 18 | if 'Nmap scan report for ' in line: 19 | cutLine = line[21:] 20 | splitLine = cutLine.split(' ') 21 | if len(splitLine) == 1: 22 | splitLine.insert(1, splitLine[0]) 23 | if len(splitLine) == 2: 24 | asdf = splitLine[1].lstrip('(') 25 | asdf = asdf.rstrip(')') 26 | splitLine.insert(1, asdf) 27 | splitLine.pop() 28 | 29 | match = pattern.search(line) 30 | if match: # looking for listings of open ports 31 | newList.append(match.group()) 32 | 33 | if len(line) == 0: # this is the end scan object and creates a DICT with all the objects 34 | newDict [splitLine[0]]=[splitLine[1],newList] 35 | newList = [] 36 | 37 | # saving pickled file to disk 38 | pickle.dump( newDict, open( "saveNewDict."+nowTime, "wb", True ) ) 39 | 40 | #printing output 41 | for key, value in newDict.items(): 42 | a,b = value 43 | if len(b) > 0: 44 | print 45 | print key, a, 46 | for item in b: 47 | print item, -------------------------------------------------------------------------------- /serverFilesHTTP.py: -------------------------------------------------------------------------------- 1 | import os 2 | import posixpath 3 | import urllib 4 | import BaseHTTPServer 5 | from SimpleHTTPServer import SimpleHTTPRequestHandler 6 | 7 | # modify this to add additional routes 8 | ROUTES = ( 9 | # [url_prefix , directory_path] 10 | ['/media', '/var/www/media'], 11 | ['', '/var/www/site'] # empty string for the 'default' match 12 | ) 13 | 14 | class RequestHandler(SimpleHTTPRequestHandler): 15 | 16 | def translate_path(self, path): 17 | """translate path given routes""" 18 | 19 | # set default root to cwd 20 | root = os.getcwd() 21 | 22 | # look up routes and set root directory accordingly 23 | for pattern, rootdir in ROUTES: 24 | if path.startswith(pattern): 25 | # found match! 26 | path = path[len(pattern):] # consume path up to pattern len 27 | root = rootdir 28 | break 29 | 30 | # normalize path and prepend root directory 31 | path = path.split('?',1)[0] 32 | path = path.split('#',1)[0] 33 | path = posixpath.normpath(urllib.unquote(path)) 34 | words = path.split('/') 35 | words = filter(None, words) 36 | 37 | path = root 38 | for word in words: 39 | drive, word = os.path.splitdrive(word) 40 | head, word = os.path.split(word) 41 | if word in (os.curdir, os.pardir): 42 | continue 43 | path = os.path.join(path, word) 44 | 45 | return path 46 | 47 | if __name__ == '__main__': 48 | BaseHTTPServer.test(RequestHandler, BaseHTTPServer.HTTPServer) 49 | -------------------------------------------------------------------------------- /gephiGraphingTest.py: -------------------------------------------------------------------------------- 1 | #some import 2 | import org.gephi.graph.api as graph_api 3 | 4 | #we do not need to init a project 5 | 6 | #Get a graph model - it exists because gephi has created the workspace 7 | graphModel = gephi.getLookup().lookup(graph_api.GraphController).getModel() 8 | 9 | #Create three nodes 10 | n0 = graphModel.factory().newNode("n0") #we just remove the type Node and the ; 11 | n0.getNodeData().setLabel("Node 0") 12 | n1 = graphModel.factory().newNode("n1") 13 | n1.getNodeData().setLabel("Node 1") 14 | n2 = graphModel.factory().newNode("n2") 15 | n2.getNodeData().setLabel("Node 2") 16 | 17 | #Create three edges 18 | e1 = graphModel.factory().newEdge(n1, n2, 1., True)#we remove Edge, true->True and 1f -> 1. 19 | #it was in java : Edge e1 = graphModel.factory().newEdge(n1, n2, 1f, true); 20 | e2 = graphModel.factory().newEdge(n0, n2, 2., True) 21 | e3 = graphModel.factory().newEdge(n2, n0, 2., True) #This is e2's mutual edge 22 | 23 | #Append as a Directed Graph 24 | directedGraph = graphModel.getDirectedGraph() 25 | directedGraph.addNode(n0) 26 | directedGraph.addNode(n1) 27 | directedGraph.addNode(n2) 28 | directedGraph.addEdge(e1) 29 | directedGraph.addEdge(e2) 30 | directedGraph.addEdge(e3) 31 | 32 | #Count nodes and edges 33 | print "Nodes: ", directedGraph.getNodeCount(), " Edges: ",directedGraph.getEdgeCount() #python does not transform objects into str. We use a more pythonic way to present output 34 | 35 | #Get a UndirectedGraph now and count edges 36 | undirectedGraph = graphModel.getUndirectedGraph() 37 | print "Edges: ", undirectedGraph.getEdgeCount() #The mutual edge is automatically merged 38 | 39 | #Iterate over nodes 40 | for n in directedGraph.getNodes() : 41 | neighbors = directedGraph.getNeighbors(n).toArray() 42 | print n.getNodeData().getLabel(), "has", len(neighbors), "neighbors" 43 | 44 | 45 | #Iterate over edges 46 | for e in directedGraph.getEdges() : 47 | print e.getSource().getNodeData().getId(), " -> ", e.getTarget().getNodeData().getId() 48 | 49 | 50 | #Find node by id 51 | node2 = directedGraph.getNode("n2") 52 | 53 | #Get degree 54 | print "Node2 degree: ", directedGraph.getDegree(node2) -------------------------------------------------------------------------------- /geolocationOSX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Julien Deudon (initbrain) - 20/03/2012 15h35 3 | # modified to english version by Dan Gleebits 20/06/2012 4 | # modified to run on OS X by James Armitage 25/06/2012 5 | # modified to process in python Dan Gleebits 26/06/2012 6 | # parsing xml Vincent Ohprecio 01/10/2012 7 | 8 | from commands import getoutput 9 | import re, urllib2, webbrowser 10 | import json as simplejson 11 | import xml.etree.ElementTree as ET 12 | 13 | airport_scan_xml = '/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport --scan -x' 14 | address_match = '([a-fA-F0-9]{1,2}[:|\-]?){6}' 15 | 16 | def get_signal_strengths(): 17 | signal_by_address = {} 18 | root = ET.fromstring(getoutput(airport_scan_xml)) 19 | networks = root.getchildren()[0] 20 | 21 | for network in networks: 22 | # First "string" child is MAC address 23 | address = network.find("string").text 24 | # Eighth "integer" is signal strength 25 | strength = abs(int(network.findall("integer")[7].text)) 26 | signal_by_address[address] = strength 27 | 28 | return signal_by_address 29 | 30 | def convert_dict_to_json(signal_by_address): 31 | location_request = { 32 | "version": "1.1.0", 33 | "request_address": False, 34 | "wifi_towers": [], 35 | } 36 | 37 | for address, signal in signal_by_address.items(): 38 | tower = {"mac_address": address, "signal_strength": signal} 39 | location_request["wifi_towers"].append(tower) 40 | 41 | return simplejson.JSONEncoder().encode(location_request) 42 | 43 | def post_json_and_get_lat_long(json): 44 | output = simplejson.loads(urllib2.urlopen('https://www.google.com/loc/json', json).read()) 45 | 46 | return output["location"]["latitude"], output["location"]["longitude"] 47 | 48 | 49 | if __name__ == "__main__": 50 | print "[+] Scanning network" 51 | signal_by_address = get_signal_strengths() 52 | 53 | json = convert_dict_to_json(signal_by_address) 54 | 55 | print "[+] Sending the request to Google" 56 | loc = post_json_and_get_lat_long(json) 57 | 58 | map_url = "http://maps.google.com/maps?q=%s,%s" % loc 59 | print "[+] Google Map" 60 | print map_url 61 | 62 | webbrowser.open(map_url) 63 | -------------------------------------------------------------------------------- /wordlistCreator.py: -------------------------------------------------------------------------------- 1 | alphabet='abcdefghijklmnopqrstuvwxyz' 2 | numbers = '0123456789' 3 | upperAlpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 4 | 5 | with open("test2.txt", "a") as myfile: 6 | 7 | for l1 in alphabet: 8 | final1 = l1+'\n' 9 | myfile.write(final1) 10 | 11 | for l2 in alphabet: 12 | final2 = l1+l2+'\n' 13 | myfile.write(final2) 14 | 15 | for l3 in alphabet: 16 | final3 = l1+l2+l3+'\n' 17 | myfile.write(final3) 18 | 19 | for l4 in alphabet: 20 | final4 = l1+l2+l3+l4+'\n' 21 | myfile.write(final4) 22 | 23 | for l5 in alphabet: 24 | final5 = l1+l2+l3+l4+l5+'\n' 25 | myfile.write(final5) 26 | 27 | for l6 in alphabet: 28 | final6 = l1+l2+l3+l4+l5+l6+'\n' 29 | myfile.write(final6) 30 | 31 | for l7 in alphabet: 32 | final7 = l1+l2+l3+l4+l5+l6+l7+'\n' 33 | myfile.write(final7) 34 | 35 | for l8 in alphabet: 36 | final8 = l1+l2+l3+l4+l5+l6+l7+l8+'\n' 37 | myfile.write(final8) 38 | 39 | import hashlib 40 | fh = open ('test2.txt') 41 | fw = open ('sha1Hashed.txt', 'w') 42 | for i in range(10000): 43 | password = fh.readline().strip() 44 | hex = hashlib.sha1(password).hexdigest() 45 | fw.write(password+','+hex+'\n') 46 | fh.close 47 | fw.close 48 | ''' 49 | # terrible code to generate 8 char wordlist 50 | 51 | 52 | alphabet='abcdefghijklmnopqrstuvwxyz' 53 | numbers = '0123456789' 54 | 55 | for l1 in alphabet: 56 | for l2 in alphabet: 57 | for l3 in alphabet: 58 | for l4 in alphabet: 59 | for l5 in alphabet: 60 | for l6 in alphabet: 61 | for l7 in alphabet: 62 | for l8 in alphabet: 63 | final1 = l1+'\n' 64 | final2 = l1+l2+'\n' 65 | final3 = l1+l2+l3+'\n' 66 | final4 = l1+l2+l3+l4+'\n' 67 | final5 = l1+l2+l3+l4+l5+'\n' 68 | final6 = l1+l2+l3+l4+l5+l6+'\n' 69 | final7 = l1+l2+l3+l4+l5+l6+l7+'\n' 70 | final8 = l1+l2+l3+l4+l5+l6+l7+l8+'\n' 71 | with open("test.txt", "a") as myfile: 72 | myfile.write(final1) 73 | myfile.write(final2) 74 | myfile.write(final3) 75 | myfile.write(final4) 76 | myfile.write(final5) 77 | myfile.write(final6) 78 | myfile.write(final7) 79 | myfile.write(final8) 80 | 81 | ''' 82 | -------------------------------------------------------------------------------- /chartmaker.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | ''' 3 | Program loads pickled data of NMAP scans and parses them into GoogleCharts API html webpage for visualization 4 | Writes both pie.html and bar.html to disk 5 | ''' 6 | 7 | 8 | import cPickle as pickle 9 | webcounter = 0 10 | SSLwebcounter=0 11 | Web8080=0 12 | Windows135=0 13 | Windows139=0 14 | Windows445=0 15 | Windows3389=0 16 | 17 | dump = pickle.load(open('saveNewDict.2012-05-10')) 18 | 19 | for item in dump.items(): 20 | if len(item[1][1])>0: print item 21 | 22 | for item in dump.items(): 23 | dnsName = item[0] 24 | ipPorts = item[1] 25 | ipaddress = item[1][0] 26 | ports = item[1][1] 27 | if len(ports)>0: 28 | print 29 | print dnsName,ipaddress 30 | for i in range(len(ports)): 31 | print '*'*100 32 | print ipaddress, ports 33 | if '80/tcp' in ports: 34 | webcounter += 1 35 | if '443/tcp' in ports: 36 | SSLwebcounter +=1 37 | if '8080/tcp' in ports: 38 | Web8080 +=1 39 | if '135/tcp' in ports: 40 | Windows135 += 1 41 | if '139/tcp' in ports: 42 | Windows139 +=1 43 | if '445/tcp' in ports: 44 | Windows445 +=1 45 | if '3389/tcp' in ports: 46 | Windows3389 +=1 47 | 48 | wList = ['Webservers',webcounter] 49 | sslList = ['SSL Boxes',SSLwebcounter] 50 | web8080List = ['Web8080',Web8080] 51 | W135List = ['Windows135',Windows135] 52 | W139List = ['Windows139',Windows139] 53 | W445List = ['Windows445',Windows445] 54 | W3389List = ['RDP3389',Windows3389] 55 | 56 | htmlCodeStart = ''' 57 | 58 |
59 | 60 | 61 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | ''' 87 | 88 | 89 | htmlCodeBarChart = ''' 90 | ]); 91 | var options = {'title':'Distribution External