├── pastebin
├── test.test
└── downloaded
│ └── test.test
├── rss
├── news
│ └── test.test
├── filter.dat
└── feeds.dat
├── config
├── ports.txt
├── rssfilter.dat
├── webscrape.dat
└── rssfeeds.dat
├── plugins
├── __init__.py
├── domainip.py
├── instag.py
├── portlook.py
├── webscrape.py
├── ipinfo.py
├── sslscan.py
├── pyscrape.py
├── newsfeed.py
├── asciis.py
├── oshodan.py
├── fblookup.py
├── linked.py
└── oscrtwitter.py
├── .gitignore
├── setup
├── requirements.txt
├── DEPENDENCY_CHECK.py
├── README.md
└── setup.py
├── README.md
└── OSCARf.py
/pastebin/test.test:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/rss/news/test.test:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/pastebin/downloaded/test.test:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/config/ports.txt:
--------------------------------------------------------------------------------
1 | 443
2 | 8443
3 | 4443
4 | 1443
5 | 8888
6 | 8080
--------------------------------------------------------------------------------
/rss/filter.dat:
--------------------------------------------------------------------------------
1 | ddos
2 | hacked
3 | leak
4 | owned
5 | shooting
6 | bombing
7 | attack
8 |
--------------------------------------------------------------------------------
/config/rssfilter.dat:
--------------------------------------------------------------------------------
1 | ddos
2 | hacked
3 | leak
4 | owned
5 | shooting
6 | bombing
7 | attack
8 |
--------------------------------------------------------------------------------
/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | modules = glob.glob(os.path.dirname(__file__)+"/*.py")
4 | __all__ = [ os.path.basename(f)[:-3] for f in modules]
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | plugins/db.py
3 | auth/
4 | *.pyc
5 | *.jpg
6 | *.csv
7 | .swp
8 | .DS_Store
9 | .idea
10 | pastebin/seen-pastes.txt
11 |
12 | HOST-RESULTS.txt
13 |
14 | NET-RESULTS.txt
15 |
16 | config/pSearch.dat
17 |
18 | config/pSearch.old.dat
19 |
20 | config/pSearch.dat
21 |
22 | config/pSearch.dat
23 |
--------------------------------------------------------------------------------
/config/webscrape.dat:
--------------------------------------------------------------------------------
1 | (([a-z]+:\/){0,1}\/.*.zip)
2 | (([a-z]+:\/){0,1}\/.*.pdf)
3 | (([a-z]+:\/){0,1}\/.*.rar)
4 | (([a-z]+:\/){0,1}\/.*.doc)
5 | (([a-z]+:\/){0,1}\/.*.docx)
6 | (([a-z]+:\/){0,1}\/.*.xls)
7 | (([a-z]+:\/){0,1}\/.*.xlsx)
8 | (([a-z]+:\/){0,1}\/.*.sql)
9 | (([a-z]+:\/){0,1}\/.*.pem)
10 | (([a-z]+:\/){0,1}\/.*.key)
11 | (([a-z]+:\/){0,1}\/.*.txt)
12 | (([a-z]+:\/){0,1}\/.*.7z)
13 | (([a-z]+:\/){0,1}\/.*.ppt)
14 | (([a-z]+:\/){0,1}\/.*.pptx)
15 |
--------------------------------------------------------------------------------
/setup/requirements.txt:
--------------------------------------------------------------------------------
1 | Pillow==2.6.1
2 | beautifulsoup4==4.3.2
3 | click==3.3
4 | colorama==0.3.2
5 | mechanize==0.2.5
6 | readline==6.2.4.1
7 | requests==2.4.3
8 | shodan==1.1.2
9 | simplejson==3.6.4
10 | tweepy==2.3.0
11 | twitter==1.15.0
12 | wsgiref==0.1.2
13 | cryptography==0.8
14 | dnspython==1.12.0
15 | enum34==1.0.4
16 | ipaddr==2.1.11
17 | netaddr==0.7.13
18 | netifaces==0.10.4
19 | pyasn1==0.1.7
20 | pyOpenSSL==0.14
21 | timeout==0.1.2
22 | ws4py==0.3.4
--------------------------------------------------------------------------------
/plugins/domainip.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Module to get the ip address of a given domain."""
3 | #import socket :P -- Now that is how you comment in code.
4 | import socket
5 |
6 | def c():
7 | """Get ipaddress of domain..GO DNS!"""
8 | domain = raw_input("Domain: ")
9 | #try to resolve.
10 | try:
11 | chk = socket.gethostbyname_ex(domain)
12 | except Exception:
13 | print "[+]ERROR: could not get hostname!"
14 | raise
15 | print "\nIP Address of", domain, ":", chk[2], "\n"
16 | return
17 |
--------------------------------------------------------------------------------
/plugins/instag.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Module to check if a username returns to a valid instagram user"""
3 | import urllib2
4 |
5 |
6 | def checker(usernom):
7 | """Check username. Send data to instagram and check status code."""
8 | #use username from FB profile and try it against instagram
9 | inst = "http://instagram.com/"+usernom
10 | try:
11 | data = urllib2.urlopen(inst)
12 | valid = True
13 | #If we get a 404/not found, then set valid to 0
14 | except urllib2.HTTPError:
15 | valid = False
16 | if valid:
17 | print
18 | print "---------"
19 | print "Found "+usernom+" on instagram!"
20 | print "Profile link: http://instagram.com/"+usernom
21 | print "---------\n"
22 | elif not valid:
23 | return
24 |
--------------------------------------------------------------------------------
/plugins/portlook.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from bs4 import BeautifulSoup as bsoup
5 | import urllib
6 |
7 |
8 | def lookup():
9 | while True:
10 | try:
11 | url = 'https://isc.sans.edu/api/port/'
12 | port = raw_input('Enter port: ')
13 | fullUrl = url + port
14 | try:
15 | data = urllib.urlopen(fullUrl)
16 | except:
17 | print 'Could not connect to SANS website.'
18 | raise
19 | i = bsoup(data)
20 |
21 | # print i
22 |
23 | serviceUDP = i.html.body.services.udp.service
24 | serviceTCP = i.html.body.services.tcp.service
25 | udpName = i.html.body.services.udp.findAll('name')[0]
26 | tcpName = i.html.body.services.tcp.findAll('name')[0]
27 | try:
28 | print
29 | print '----------UDP-----------'
30 | print 'UDP Service Name: ' + serviceUDP.string
31 | print 'UDP Name: ' + udpName.string
32 | print
33 | except:
34 | pass
35 | try:
36 | print '----------TCP-----------'
37 | print 'TCP Service Name: ' + serviceTCP.string
38 | print 'TCP Name: ' + tcpName.string
39 | print
40 | print
41 | except:
42 | pass
43 | print '----------END-----------\n'
44 | except KeyboardInterrupt:
45 | raise
46 |
47 |
48 | # return
49 |
--------------------------------------------------------------------------------
/setup/DEPENDENCY_CHECK.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | from time import sleep
6 |
7 | print "\nOSCAR-F requires various lib/modules to properly work."
8 | print "This script serves to check if you have those libs and",
9 | print 'will create the "auth" dir.\n'
10 |
11 | sleep(2)
12 |
13 | try:
14 | import readline
15 | print '[+]FOUND: readline'
16 | except:
17 | print '[+]MISSING: readline'
18 |
19 | try:
20 | import tweepy
21 | print '[+]FOUND: tweepy'
22 | except:
23 | print '[+]MISSING: tweepy'
24 |
25 | try:
26 | import twitter
27 | print '[+]FOUND: Twitter'
28 | except:
29 | print '[+]MISSING: Twitter'
30 |
31 | try:
32 | import feedparser
33 | print '[+]FOUND: feedparser'
34 | except:
35 | print '[+]MISSING: feedparser'
36 |
37 | try:
38 | import shodan
39 | print '[+]FOUND: shodan'
40 | except:
41 | print '[+]MISSING: shodan'
42 |
43 | try:
44 | import bs4
45 | print '[+]FOUND: bs4 (beautifulsoup 4)'
46 | except:
47 | print '[+]MISSING: bs4 (beautifulsoup 4)'
48 |
49 | try:
50 | from PIL import Image
51 | print '[+]FOUND: PIL/Pillow'
52 | except:
53 | print '[+]MISSING: PIL/Pillow module'
54 |
55 | try:
56 | import mechanize
57 | print "[+]FOUND: mechanize"
58 | except:
59 | print "[+]MISSING: mechanize"
60 |
61 | if not os.path.exists('../auth'):
62 | print "Adding the 'auth' directory. Be sure to run the setup",
63 | print "script after this one!!!"
64 | os.mkdir('../auth')
65 | print "To install dependencies: pip install -r requirements.txt"
66 |
--------------------------------------------------------------------------------
/plugins/webscrape.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import urllib2
4 | import re
5 | # not all systems have readline...if not, just pass and continue.
6 | try:
7 | import readline # nice when you need to use arrow keys and backspace
8 | except:
9 | pass
10 | import sys
11 |
12 |
13 | def scrape():
14 | site = raw_input("Enter page: ")
15 |
16 | #open site. read so we can read in a string context
17 | #test for valid and complete URL
18 | try:
19 | data = urllib2.urlopen(site).read()
20 | except ValueError:
21 | print "INVALID URL: Be sure to include protocol (e.g. HTTP)"
22 | return
23 |
24 | #print data
25 |
26 | #try an open the pattern file.
27 | try:
28 | patternFile = open('config/webscrape.dat', 'r').read().splitlines()
29 | except:
30 | print "There was an error opening the webscrape.dat file"
31 | raise
32 | #create counter for counting regex expressions from webscrape.dat
33 | counter = 0
34 | #for each loop so we can process each specified regex
35 | for pattern in patternFile:
36 | m = re.findall(pattern, data)
37 | #m will return as true/false. Just need an if m:
38 | if m:
39 | for i in m:
40 | #open output/results file...append because we are cool
41 | outfile = open('scrape-RESULTS.txt', 'a')
42 | #print m
43 | outfile.write(str(i))
44 | outfile.write("\n") # may be needed. can always be removed.
45 |
46 | #close the file..or else
47 | outfile.close()
48 | counter+=1
49 | print "Scrape item " + str(counter) + " successsful. Data output to scrape-RESULTS.txt."
50 | else: # only need an else because m is boolean
51 | counter+=1
52 | print "No match for item " + str(counter) + ". Continuing."
53 | # Continue the loop if not a match so it can go on to the next
54 | # sequence
55 | # NOTE: you don't *really* need an else here...
56 | continue
57 |
--------------------------------------------------------------------------------
/plugins/ipinfo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | #import urllib2
5 | #import json
6 | import time
7 | import webbrowser
8 | import requests
9 |
10 |
11 | def lookup(ip):
12 |
13 | ipUrl = 'http://ip-api.com/json/'
14 | ip_query = ipUrl + ip
15 | try:
16 | #data = urllib2.urlopen(ip_query)
17 | #jsonResponse = json.load(data)
18 | data = requests.get(ip_query)
19 | jsonResponse = data.json()
20 | print
21 | print '-- IP Results --'
22 | print 'IP: ', jsonResponse['query']
23 | print 'ISP: ', jsonResponse['isp']
24 | print 'ASN: ', jsonResponse['as']
25 | print 'Country: ', jsonResponse['country']
26 | print 'City: ', jsonResponse['city']
27 | print 'Region: ', jsonResponse['regionName']
28 | print 'Longitude: ', jsonResponse['lon']
29 | print 'Latitude: ', jsonResponse['lat']
30 | print '-- END --'
31 | print
32 | except:
33 | print '[+]ERROR: Could not connect to IP lookup site. Trying another!'
34 | try:
35 | ipUrl = 'https://freegeoip.net/json/'
36 | ip_query = ipUrl + ip
37 | #data = urllib2.urlopen(ip_query)
38 | #jsonResponse = json.load(data)
39 | data = requests.get(ip_query)
40 | jsonResponse = data.json()
41 | print
42 | print '-- IP Results --'
43 | print 'IP: ', jsonResponse['ip']
44 | print 'Country: ', jsonResponse['country_name']
45 | print 'City: ', jsonResponse['city']
46 | print 'Region: ', jsonResponse['region_name']
47 | print 'Longitude: ', jsonResponse['longitude']
48 | print 'Latitude: ', jsonResponse['latitude']
49 | print '-- END --'
50 | print
51 | except:
52 | print 'Could not connect to second lookup site!'
53 | raise
54 |
55 | # wait 3 seconds until going back to the main menu
56 |
57 | time.sleep(3)
58 | avcheck = \
59 | raw_input('Check IP with AlienVault (opens in browser) [y/n]: ')
60 | if avcheck == 'y' or avcheck == 'Y':
61 | alienvault(ip)
62 | else:
63 | pass
64 | return
65 |
66 |
67 | def alienvault(ip):
68 | n = 2
69 | url = 'http://www.alienvault.com/apps/rep_monitor/ip/' + ip
70 | webbrowser.open(url, new=n)
71 | return
72 |
--------------------------------------------------------------------------------
/rss/feeds.dat:
--------------------------------------------------------------------------------
1 | http://rss.cnn.com/rss/cnn_world.rss
2 | http://rss.cnn.com/rss/cnn_us.rss
3 | http://rss.cnn.com/rss/cnn_allpolitics.rss
4 | http://rss.cnn.com/rss/cnn_crime.rss
5 | http://rss.cnn.com/rss/cnn_tech.rss
6 | http://rss.cnn.com/rss/cnn_health.rss
7 | http://feeds.reuters.com/reuters/healthNews
8 | http://feeds.reuters.com/Reuters/PoliticsNews
9 | http://feeds.reuters.com/reuters/scienceNews
10 | http://feeds.reuters.com/reuters/technologyNews
11 | http://feeds.reuters.com/reuters/topNews
12 | http://feeds.reuters.com/Reuters/domesticNews
13 | http://feeds.reuters.com/Reuters/worldNews
14 | http://feeds.foxnews.com/foxnews/health
15 | http://feeds.foxnews.com/foxnews/politics
16 | http://feeds.foxnews.com/foxnews/science
17 | http://feeds.foxnews.com/foxnews/tech
18 | http://feeds.foxnews.com/foxnews/national
19 | http://feeds.foxnews.com/foxnews/world
20 | http://feeds.bbci.co.uk/news/world/rss.xml
21 | http://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml
22 | http://rt.com/rss/news/
23 | http://www.exploit-db.com/rss.xml
24 | http://www.f-secure.com/weblog/weblog.rss
25 | http://nakedsecurity.sophos.com/feed/
26 | http://www.reddit.com/r/netsec/.rss
27 | http://www.reddit.com/r/blackhat/.rss
28 | http://feeds.feedburner.com/theHackerNews_com
29 | http://www.nytimes.com/services/xml/rss/nyt/Science.xml
30 | http://rss.nytimes.com/services/xml/rss/nyt/Technology.xml
31 | http://www.nytimes.com/services/xml/rss/nyt/US.xml
32 | http://www.nytimes.com/services/xml/rss/nyt/World.xml
33 | http://rssfeeds.usatoday.com/usatoday-NewsTopStories
34 | http://rssfeeds.usatoday.com/UsatodaycomWorld-TopStories
35 | http://rssfeeds.usatoday.com/UsatodaycomNation-TopStories
36 | http://rssfeeds.usatoday.com/UsatodaycomHealth-TopStories
37 | http://rssfeeds.usatoday.com/TP-OnPolitics
38 | http://rssfeeds.usatoday.com/usatoday-TechTopStories
39 | http://publicintelligence.net/category/news/feed/rss/
40 | http://publicintelligence.net/category/documents/feed/rss/
41 | http://feeds.feedburner.com/NationalTerrorAlertResourceCenter
42 | http://feeds.wired.com/wired/index
43 | http://www.pcworld.com/index.rss
44 | https://www.cia.gov/news-information/your-news/cia-newsroom/RSS.xml
45 | http://www.us-cert.gov/ncas/current-activity.xml
46 | http://www.darpa.mil/Rss.aspx
47 | http://hosted.ap.org/lineups/TECHHEADS.rss
48 | http://hosted.ap.org/lineups/WORLDHEADS.rss
49 | http://hosted.ap.org/lineups/POLITICSHEADS.rss
50 | http://hosted.ap.org/lineups/SCIENCEHEADS.rss
51 | http://hosted.ap.org/lineups/TOPHEADS.rss
--------------------------------------------------------------------------------
/config/rssfeeds.dat:
--------------------------------------------------------------------------------
1 | http://rss.cnn.com/rss/cnn_world.rss
2 | http://rss.cnn.com/rss/cnn_us.rss
3 | http://rss.cnn.com/rss/cnn_allpolitics.rss
4 | http://rss.cnn.com/rss/cnn_crime.rss
5 | http://rss.cnn.com/rss/cnn_tech.rss
6 | http://rss.cnn.com/rss/cnn_health.rss
7 | http://feeds.reuters.com/reuters/healthNews
8 | http://feeds.reuters.com/Reuters/PoliticsNews
9 | http://feeds.reuters.com/reuters/scienceNews
10 | http://feeds.reuters.com/reuters/technologyNews
11 | http://feeds.reuters.com/reuters/topNews
12 | http://feeds.reuters.com/Reuters/domesticNews
13 | http://feeds.reuters.com/Reuters/worldNews
14 | http://feeds.foxnews.com/foxnews/health
15 | http://feeds.foxnews.com/foxnews/politics
16 | http://feeds.foxnews.com/foxnews/science
17 | http://feeds.foxnews.com/foxnews/tech
18 | http://feeds.foxnews.com/foxnews/national
19 | http://feeds.foxnews.com/foxnews/world
20 | http://feeds.bbci.co.uk/news/world/rss.xml
21 | http://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml
22 | http://rt.com/rss/news/
23 | http://www.exploit-db.com/rss.xml
24 | http://www.f-secure.com/weblog/weblog.rss
25 | http://nakedsecurity.sophos.com/feed/
26 | http://www.reddit.com/r/netsec/.rss
27 | http://www.reddit.com/r/blackhat/.rss
28 | http://feeds.feedburner.com/theHackerNews_com
29 | http://www.nytimes.com/services/xml/rss/nyt/Science.xml
30 | http://rss.nytimes.com/services/xml/rss/nyt/Technology.xml
31 | http://www.nytimes.com/services/xml/rss/nyt/US.xml
32 | http://www.nytimes.com/services/xml/rss/nyt/World.xml
33 | http://rssfeeds.usatoday.com/usatoday-NewsTopStories
34 | http://rssfeeds.usatoday.com/UsatodaycomWorld-TopStories
35 | http://rssfeeds.usatoday.com/UsatodaycomNation-TopStories
36 | http://rssfeeds.usatoday.com/UsatodaycomHealth-TopStories
37 | http://rssfeeds.usatoday.com/TP-OnPolitics
38 | http://rssfeeds.usatoday.com/usatoday-TechTopStories
39 | http://publicintelligence.net/category/news/feed/rss/
40 | http://publicintelligence.net/category/documents/feed/rss/
41 | http://feeds.feedburner.com/NationalTerrorAlertResourceCenter
42 | http://feeds.wired.com/wired/index
43 | http://www.pcworld.com/index.rss
44 | https://www.cia.gov/news-information/your-news/cia-newsroom/RSS.xml
45 | http://www.us-cert.gov/ncas/current-activity.xml
46 | http://www.darpa.mil/Rss.aspx
47 | http://hosted.ap.org/lineups/TECHHEADS.rss
48 | http://hosted.ap.org/lineups/WORLDHEADS.rss
49 | http://hosted.ap.org/lineups/POLITICSHEADS.rss
50 | http://hosted.ap.org/lineups/SCIENCEHEADS.rss
51 | http://hosted.ap.org/lineups/TOPHEADS.rss
--------------------------------------------------------------------------------
/setup/README.md:
--------------------------------------------------------------------------------
1 | OSCAR-F
2 | =======
3 |
4 | Python OSINT Platform
5 |
6 | **OSCARf was coded/designed in Linux and Mac. Don't complain if you are running Windows and you cannot get it working**
7 | Windows support is something that I am going to address at a later date.
8 |
9 | OSCAR-F is designed to aid in the process of information gathering. It was formed with the idea of not having to open
10 | so many tabs in a browser.
11 |
12 | There are a few bugs in OSCAR-F, however, we are slowly working on crushing them and working on features.
13 |
14 | ## Installing
15 |
16 | **All setup files are located within the setup directory!**
17 |
18 | OSCAR uses a few libraries. These include:
19 |
20 | - Twitter
21 | - tweepy
22 | - feedparser
23 | - shodan
24 | - readline
25 | - pillow
26 |
27 | These can be installed via pip: `pip install -r requirements.txt`
28 |
29 | **Please note that you will need to setup ONE twitter app for you/your business.**
30 |
31 | ~**You will probably need to use sudo to run the setup script. This is becasue it creates files and directories.**~
32 |
33 | The the readline feature is completely optional.
34 |
35 | Please be sure to run the `DEPENDENCY_CHECK` script first! Additionally, as noted above, dependencies can be installed via `pip install -r requirements.txt`
36 |
37 | After running the dependency check, run the setup.py script. This will allow you to setup all necessary auth files/data.
38 | **PLEASE NOTE THAT THE SETUP SCRIPT WILL NOT INSTALL MISSING LIBRARIES! Please use pip.**
39 |
40 | ## To setup Twitter Application
41 |
42 | Navigate to: https://apps.twitter.com/ and setup a new application. Please note the name and keys associated with it.
43 | You sould only need to have a read only application!
44 |
45 | ## Shodan API KEY
46 |
47 | Once you have a ShodanHQ account or login, go to: http://www.shodanhq.com/api_doc and copy the API key. Please note that if you want to use ALL of the shodan functionality of OSCAR, you will need to purchase an "Unlocked API"
48 |
49 | View API KEY: http://www.shodanhq.com/api_doc
50 |
51 | You can purchase the "Unlocked API" addon here: http://www.shodanhq.com/data/addons
52 |
53 | ## To scrape pastebin
54 |
55 | To scrape pastebin, add regex strings to /config/pSearch.dat located in the root directory. After this, proceed to use oscar.
56 |
57 | ## To edit rss filter options
58 |
59 | Edit the keywords in /config/rssfilter.dat
60 |
61 | ## To add/remove rss feeds
62 |
63 | Edit rss links in /config/rssfeeds.dat
64 |
65 | ## To scrape web source code
66 |
67 | Edit regex info in /config/webscrape.dat in the root. The path will change soon.
68 |
--------------------------------------------------------------------------------
/plugins/sslscan.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | try:
4 | from time import sleep
5 | import os
6 | import sys
7 | from netaddr import *
8 | from socket import socket
9 | import OpenSSL
10 | import ssl
11 | from timeout import timeout
12 | import thread
13 | except Exception, e:
14 | print e
15 | raise
16 |
17 | ips = []
18 | out = {}
19 |
20 | def starter():
21 | init(ips)
22 | def init(ips):
23 | multi = raw_input("Are we looking up multiple IP's/Networks? [y/n]: ")
24 | if multi == "Y" or multi == "y":
25 | multiNetwork = True
26 | elif multi == "n" or multi == "N":
27 | multiNetwork = False
28 | else:
29 | print "Invalid input!"
30 | starter()
31 | if multiNetwork:
32 | try:
33 | while True:
34 | cpy = raw_input("IP or IP/CIDR: ")
35 | if "/" in cpy:
36 | i = cpy.rstrip()
37 | ip_list = list(IPNetwork(i))
38 | for e in sorted(ip_list):
39 | st = str(e)
40 | ips.append(st)
41 | else:
42 | cpy = cpy.rstrip()
43 | ips.append(cpy)
44 | except KeyboardInterrupt:
45 | raise
46 | else:
47 | cpy = raw_input("IP or IP/CIDR: ")
48 | cpy = cpy.rstrip()
49 | ips.append(cpy)
50 | trigger(ips,out)
51 |
52 | def getcert(a):
53 | """Get SSL Cert CN"""
54 | refPorts = open('config/ports.txt', 'r').readlines()
55 | for port in refPorts:
56 | # Make sure we don't have any extra characters like \n or \r
57 | port = port.rstrip()
58 | try:
59 | # time to connect!
60 | cert = ssl.get_server_certificate((a, port))
61 | except Exception, e:
62 | # If it can't connect go to the next iteration so we don't waste time
63 | continue
64 | try:
65 | # use openssl to pull cert information
66 | c = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
67 | subj = c.get_subject()
68 | comp = subj.get_components()
69 | for data in comp:
70 | if 'CN' in data:
71 | out[a] = a,data[1]
72 | elif 'CN' not in data:
73 | continue
74 | else:
75 | continue
76 | except Exception,e:
77 | # if openssl fails to get information, return nothing
78 | continue
79 |
80 |
81 | def trigger(ips,out):
82 | """Start our SSL search/thread"""
83 | for ip in ips:
84 | thread.start_new_thread(getcert, (ip,))
85 | # Sleep so our threads don't get out of control
86 | sleep(2)
87 | sleep(3)
88 | for val in out:
89 | print out[val]
90 |
91 |
92 |
93 |
94 |
95 |
--------------------------------------------------------------------------------
/plugins/pyscrape.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import urllib
4 | import time
5 | import thread
6 | import sys
7 | import os
8 | import re
9 | import datetime
10 |
11 |
12 | def downloader(url, filename):
13 | patternFile = open('config/pSearch.dat', 'r').read().splitlines()
14 | data = ""
15 | try:
16 | try:
17 | data = urllib.urlopen(url).read()
18 | except:
19 | print "Error connecting to pastebin!"
20 | raise
21 | for pattern in patternFile:
22 | m = re.findall("'"+pattern+"'", data)
23 | if m:
24 | print ""
25 | print "Found Match"
26 | print "Expr:"+pattern
27 | print ""
28 | now = datetime.datetime.now()
29 | stime = str(time.ctime())
30 | hardFile = "pasteebin/downloaded/{0}-{1}.txt".format(filename,stime)
31 | file = open(hardFile, 'w')
32 | file.write(data)
33 | file.close
34 | else:
35 | continue
36 | except:
37 | #print "There was a file IO error"
38 | pass
39 |
40 |
41 | def starter():
42 | if not os.path.exists("pastebin"):
43 | os.mkdir("pastebin")
44 | os.mkdir("pastebin/downloaded")
45 | if not os.path.exists("pastebin/seen-pastes.txt"):
46 | chkf = open("pastebin/seen-pastes.txt", 'w')
47 | chkf.close()
48 | print "Scraping for pastes...\n"
49 | while True:
50 | try:
51 | time.sleep(1)
52 | #open the pastebin archive
53 | data = urllib.urlopen("http://pastebin.com/archive").read()
54 | data = data.split('
')
55 | data = data[1]
56 | data = data.split('
')
57 | data = data[0]
58 | data = data.replace(' ', "!!HTML!!")
60 | data = data.split("!!HTML!!")
61 | for i in data:
62 | i = i.split("\">")
63 | i = i[0]
64 | if not ((" | " in i) or ("= score_threshold}
66 | sorted_dictionary = sorted(filtered_dictionary.iteritems(),
67 | key=lambda x: x[1],
68 | reverse=True)
69 | #start thread for file IO performance
70 | thread.start_new_thread(newsthread, (sorted_dictionary,))
71 | #for lines in sorted_dictionary:
72 | # saveFile = open('rss/newsraw_news.csv','a')
73 | # print lines,"\n"
74 | # saveFile.write(str(lines))
75 | # saveFile.write('\n')
76 | # saveFile.close()
77 | #f1 = csv.reader(open('rss/news/raw_news.csv', 'rb'))
78 | #writer = csv.writer(open("rss/news/NEWS-OUT.csv", "wb"))
79 | #feedOut = set()
80 | #for row in f1:
81 | # if row[0] not in feedOut:
82 | # writer.writerow(row)
83 | # feedOut.add( row[0] )
84 | #return
85 |
--------------------------------------------------------------------------------
/plugins/asciis.py:
--------------------------------------------------------------------------------
1 | #!/user/bin/env python
2 |
3 | import random
4 |
5 |
6 | def asciiart():
7 | r1 = """
8 | ####### ###### ###### ### ######## ########
9 | ## ## ## ## ## ## ## ## ## ## ##
10 | ## ## ## ## ## ## ## ## ##
11 | ## ## ###### ## ## ## ######## ####### ######
12 | ## ## ## ## ######### ## ## ##
13 | ## ## ## ## ## ## ## ## ## ## ##
14 | ####### ###### ###### ## ## ## ## ##
15 |
16 | """
17 | r2 = """
18 | ..#######...######...######.....###....########..........########
19 | .##.....##.##....##.##....##...##.##...##.....##.........##......
20 | .##.....##.##.......##........##...##..##.....##.........##......
21 | .##.....##..######..##.......##.....##.########..#######.######..
22 | .##.....##.......##.##.......#########.##...##...........##......
23 | .##.....##.##....##.##....##.##.....##.##....##..........##......
24 | ..#######...######...######..##.....##.##.....##.........##......
25 | """
26 | r3 = """
27 | ======================================================================
28 | === ===== ===== ====== ===== ============= =
29 | == == === ==== === === ==== ==== ==== ============ =======
30 | = ==== == ==== == ========= == === ==== ============ =======
31 | = ==== === ======= ======== ==== == === ============ =======
32 | = ==== ===== ===== ======== ==== == ==== == ===
33 | = ==== ======= === ======== == ==== ============ =======
34 | = ==== == ==== == ======== ==== == ==== ============ =======
35 | == == === ==== === === == ==== == ==== ============ =======
36 | === ===== ===== === ==== == ==== ============ =======
37 | ======================================================================
38 | """
39 |
40 | r4 = """
41 | .------..------..------..------..------..------..------.
42 | |O.--. ||S.--. ||C.--. ||A.--. ||R.--. ||-.--. ||F.--. |
43 | | :/\: || :/\: || :/\: || (\/) || :(): || (\/) || :(): |
44 | | :\/: || :\/: || :\/: || :\/: || ()() || :\/: || ()() |
45 | | '--'O|| '--'S|| '--'C|| '--'A|| '--'R|| '--'-|| '--'F|
46 | `------'`------'`------'`------'`------'`------'`------'
47 | """
48 | r5 = """
49 | .-') ('-. _ .-')
50 | ( OO ). ( OO ).-.( \( -O )
51 | .-'),-----. (_)---\_) .-----. / . --. / ,------. ,------.
52 | ( OO' .-. '/ _ | ' .--./ | \-. \ | /`. ' .-') ('-| _.---'
53 | / | | | |\ :` `. | |('-..-'-' | | | / | | _( OO) (OO|(_/
54 | \_) | |\| | '..`''.) /_) |OO )\| |_.' | | |_.' |(,------./ | '--.
55 | \ | | | |.-._) \ || |`-'| | .-. | | . '.' '------'\_)| .--'
56 | `' '-' '\ /(_' '--'\ | | | | | |\ \ \| |_)
57 | `-----' `-----' `-----' `--' `--' `--' '--' `--'
58 |
59 | """
60 |
61 | r6 = """
62 | _______ _______ _______ _______ ______ _______
63 | | || || || _ || _ | | |
64 | | _ || _____|| || |_| || | || ____ | ___|
65 | | | | || |_____ | || || |_||_ |____| | |___
66 | | |_| ||_____ || _|| || __ | | ___|
67 | | | _____| || |_ | _ || | | | | |
68 | |_______||_______||_______||__| |__||___| |_| |___|
69 | """
70 | r7 = """
71 | OOO SSS CCC AA RRRR FFFF
72 | O O S C A A R R F
73 | O O SSS C AAAA RRRR --- FFF
74 | O O S C A A R R F
75 | OOO SSSS CCC A A R RR F
76 |
77 | """
78 |
79 | r8 = """
80 | 01001111 01010011 01000011 01000001 01010010 00101101 01000110
81 | """
82 |
83 | r9 = """
84 |
85 | ### #### # # # #### #
86 | # # # # # # # # # ###
87 | # # # ### ##### #### ### # # #
88 | # # # # # # # # ###
89 | ### #### # # # # # #
90 | """
91 | pick = [r1, r2, r3, r4, r5, r6, r7, r8, r9]
92 | print(random.choice(pick))
93 |
--------------------------------------------------------------------------------
/plugins/oshodan.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import thread
5 | import shodan
6 | try:
7 | shodan_key_file = open('auth/shodankey.txt', 'r')
8 | shodan_key_line = shodan_key_file.readlines()
9 | SHODAN_API_KEY = shodan_key_line[1].rstrip()
10 | sho_api = shodan.Shodan(SHODAN_API_KEY)
11 | shodan_key_file.close()
12 | except:
13 | sho_api = None
14 |
15 |
16 | def menu():
17 | print '**************************************'
18 | print '* Shodan Search -- NinjaSl0th *'
19 | print '**************************************'
20 | print
21 | print '[1] Search by subnet (add cidr value) ex:/24'
22 | print '[2] Search by hostname (ex: computing.site.com)'
23 | print '[3] Search for printers by organization (ex: Microsoft)'
24 | print '[4] Search for ports by organization (supported ports: ',
25 | print 'http://www.shodanhq.com/help/filters#port)'
26 | print '[5] Stream for SSL certs *REQUIRES STREAMING API ACCESS*'
27 | print '[0] Exit'
28 | print
29 | select_mod()
30 |
31 |
32 | def downloader(res_out, search_query):
33 |
34 | # search_query = sho_api.search(data)
35 |
36 | outfile = open(res_out, 'a')
37 | for result in search_query['matches']:
38 | outfile.write('--START--\n')
39 | outfile.write('IP: %s \n' % result['ip_str'])
40 | encout = result['data'].encode('UTF-8')
41 | #outfile.write(result['data'])
42 | outfile.write(encout)
43 |
44 | # print result['hostnames'][0]
45 |
46 | try:
47 | outfile.write(result['hostnames'][0])
48 | except:
49 | pass
50 | outfile.write('''''')
51 | outfile.write('\n')
52 | a = result['port']
53 | b = result['os']
54 | outfile.write('Port:')
55 | outfile.write(str(a))
56 | outfile.write('\n')
57 | outfile.write('Detected OS:')
58 | outfile.write(str(b))
59 | outfile.write('\n')
60 | outfile.write('--END--\n\n')
61 | outfile.write('''''')
62 | outfile.close()
63 |
64 |
65 | def select_mod():
66 | if sho_api is None:
67 | print "Missing key; Please install a Shodan API key in",
68 | print"./auth/shodankey"
69 | return
70 | menu_select = raw_input('Please enter an option: ')
71 | if menu_select == '1':
72 | sub_search(sho_api)
73 | elif menu_select == '2':
74 | host_search(sho_api)
75 | elif menu_select == '3':
76 | print_search(sho_api)
77 | elif menu_select == '4':
78 | port_search(sho_api)
79 | elif menu_select == '5':
80 | cert_search(sho_api)
81 | elif menu_select == '0':
82 | return
83 | else:
84 | print 'please enter a valid choice'
85 | menu()
86 |
87 |
88 | def sub_search(sho_api):
89 | res_out = 'NET-RESULTS.txt'
90 | prefix = 'net:'
91 | query = raw_input('Enter Subnet (with cidr): ')
92 | data = prefix + query
93 | try:
94 | search_query = sho_api.search(data)
95 | print 'Results found: %s' % search_query['total']
96 | thread.start_new_thread(downloader, (res_out, search_query))
97 |
98 | print 'Results have been exported to: NET-RESULTS.txt'
99 | menu()
100 | except Exception, e:
101 | print 'Error: %s' % e
102 | menu()
103 |
104 |
105 | def host_search(sho_api):
106 | res_out = 'HOST-RESULTS.txt'
107 | prefix = 'hostname:'
108 | query = raw_input('Please enter hostname: ')
109 | data = prefix + query
110 | try:
111 | search_query = sho_api.search(data)
112 | print 'Results found: %s' % search_query['total']
113 | thread.start_new_thread(downloader, (res_out, search_query))
114 |
115 | print 'Results have been exported to: HOST-RESULTS.txt'
116 | menu()
117 | except Exception, e:
118 | print 'Error: %s' % e
119 | menu()
120 |
121 |
122 | def print_search(sho_api):
123 | res_out = 'PRINTER-RESULTS.txt'
124 | prefix = 'org:'
125 | query = raw_input('Please enter company/org: ')
126 | data = prefix + '"' + query + '"' + ' print'
127 | try:
128 | search_query = sho_api.search(data)
129 | print 'Results found: %s' % search_query['total']
130 | thread.start_new_thread(downloader, (res_out, search_query))
131 |
132 | print 'Results have been exported to: PRINTER-RESULTS.txt'
133 | menu()
134 | except Exception, e:
135 | print 'Error: %s' % e
136 | menu()
137 |
138 |
139 | def port_search(sho_api):
140 | res_out = 'PORT-RESULTS.txt'
141 | prefix = 'org:'
142 | query = raw_input('Please enter company/org: ')
143 | port_prefix = 'port:'
144 | search_port = raw_input('Please enter the port: ')
145 | data = prefix + '"' + query + '"' + port_prefix + search_port
146 | try:
147 | search_query = sho_api.search(data)
148 | print 'Results found: %s' % search_query['total']
149 | thread.start_new_thread(downloader, (res_out, search_query))
150 |
151 | print 'Results have been exported to: PORT-RESULTS.txt'
152 | menu()
153 | except Exception, e:
154 | print 'Error: %s' % e
155 | menu()
156 |
157 | def cert_search(sho_api):
158 | #res_out = 'CERTS.txt'
159 | try:
160 | for banner in sho_api.stream.ports(['443']):
161 | if 'opts' in banner and 'pem' in banner['opts']:
162 | print banner['opts']['pem']
163 | except Exception, e:
164 | print 'Error: %s' % e
165 | menu()
166 |
--------------------------------------------------------------------------------
/plugins/fblookup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | #import json
5 | import urllib2
6 | import urllib
7 | import os
8 | import thread
9 | from time import sleep
10 | import requests
11 |
12 | # PIL is used to open/process images with python
13 |
14 | try:
15 | from PIL import Image
16 | except:
17 | print 'PIL/Pillow module is not installed'
18 | print 'sudo pip install pillow'
19 |
20 |
21 | # instagram lookup gunction. Very basic for now.
22 |
23 | def instacheck(usernom):
24 |
25 | # use username from FB profile and try it against instagram
26 |
27 | inst = 'http://instagram.com/' + usernom
28 | try:
29 | data = urllib2.urlopen(inst)
30 | valid = 1
31 | except urllib2.HTTPError:
32 |
33 | # If we get a 404/not found, then set valid to 0
34 |
35 | valid = 0
36 | if valid:
37 | print
38 | print '---------'
39 | print 'Found ' + usernom + ' on instagram too!'
40 | print 'Profile link: http://instagram.com/' + usernom
41 | print '---------\n'
42 | elif not valid:
43 | pass
44 |
45 |
46 | # download FB profile as a function being called via new thread.
47 |
48 | def downloader(a):
49 | if not os.path.exists('FBpics'):
50 | os.mkdir('FBpics')
51 |
52 | # full url for FB image
53 |
54 | data = 'https://graph.facebook.com/' + a + '/picture?type=large'
55 |
56 | # open the image using urllib
57 |
58 | pic = urllib.urlopen(data)
59 |
60 | # open and write bytes as a jpg. Save image as the user id + .jpg
61 |
62 | f = open('FBpics/' + a + '.jpg', 'wb')
63 | f.write(pic.read())
64 | f.close()
65 | sleep(1)
66 |
67 |
68 | def FBInfo():
69 | FBurl = 'https://graph.facebook.com/'
70 | print 'Please enter the username or ID of the target'
71 | print 'EX: bobsmith3'
72 | userName = raw_input(': ')
73 |
74 | # construct full url for graph lookup
75 |
76 | fullURL = FBurl + userName
77 | try:
78 | #data = urllib2.urlopen(fullURL)
79 | data = requests.get(fullURL)
80 | #jsonResponse = json.load(data)
81 | #print jsonResponse
82 | print data.json()
83 | print '\n'
84 | except:
85 |
86 | pass
87 | return
88 |
89 |
90 | def FBUsr():
91 | FBurl = 'https://graph.facebook.com/'
92 | peopleInput = raw_input('How many people would you like to lookup?: ')
93 | people = 0 # set to 0 to skip if user input is invalid
94 | myCounter = 1
95 |
96 | # validate user input
97 | try:
98 | people = int(peopleInput)
99 | except ValueError:
100 | print "INVALID INPUT: Enter a number..."
101 |
102 | while myCounter <= people:
103 | print '\n'
104 | print 'Please enter the username of the person - type ''exit!'' to quit'
105 | print 'EX: bobsmith3'
106 | userName = raw_input(': ')
107 | fullURL = FBurl + userName
108 |
109 | # bail out early if the user chooses to exit
110 | if userName == "exit!":
111 | return
112 |
113 | try:
114 | try:
115 | #data = urllib2.urlopen(fullURL)
116 | data = requests.get(fullURL)
117 | #except urllib2.HTTPError:
118 | except:
119 | print 'There was an error connecting to Facebook!'
120 | return
121 |
122 | # load the JSON response from FB
123 |
124 | #jsonResponse = json.load(data)
125 | res = data.json()
126 | print '\n', '\n', '\n'
127 |
128 | # Try and set the f_link var to the link section of json data..
129 | # If not found, then the link will just be something null or
130 | # a message
131 |
132 | try:
133 | #f_link = jsonResponse['link']
134 | #fblink1 = 'https://facebook.com/' + jsonResponse['id']
135 | fblink1 = 'https://facebook.com/' + res['id']
136 | req = requests.get(fblink1)
137 | cookieref = req.cookies['reg_fb_ref']
138 | decoded = urllib.unquote(cookieref)
139 | f_link = decoded
140 | except:
141 |
142 | # can still generate link, just will not get username.
143 |
144 | #f_link = 'https://facebook.com/' + jsonResponse['id']
145 | f_link = 'https://facebook.com/' + res['id']
146 |
147 | try:
148 | #gender = jsonResponse['gender']
149 | #locale = jsonResponse['locale']
150 | gender = res['gender']
151 | locale = res['locale']
152 | except:
153 | gender = ""
154 | locale = ""
155 | print '---------------Results-------------------'
156 | #print jsonResponse['id'], '\n', jsonResponse['name'], '\n', \
157 | #gender, '\n', locale, \
158 | #'\n', f_link, '\n'
159 | print res['id'], '\n', res['name'], '\n', \
160 | gender, '\n', locale, \
161 | '\n', f_link, '\n'
162 | print '---------------Results-------------------\n'
163 | #a = jsonResponse['id']
164 | a = res['id']
165 | dlprof = raw_input('Download Profile Picture?[y/n]: ')
166 | if dlprof == 'y' or dlprof == 'Y':
167 |
168 | # start a thread to download the image
169 |
170 | thread.start_new_thread(downloader, (a, ))
171 | view = raw_input('View downloaded image?[y/n]: ')
172 | if view == 'y' or view == 'Y':
173 | try:
174 | img = Image.open('FBpics/' + a + '.jpg')
175 | img.show()
176 | except:
177 | print 'There was an error opening the file'
178 | pass
179 | else:
180 | pass
181 | else:
182 | pass
183 | #if jsonResponse['username']:
184 | if res['username']:
185 | #usernom = jsonResponse['username']
186 | usernom = res['username']
187 |
188 | # thread.start_new_thread(instacheck,(usernom,))
189 |
190 | # Check for the username on instagram-check function
191 |
192 | instacheck(usernom)
193 |
194 | # sleep(5)
195 |
196 | myCounter += 1
197 | sleep(2)
198 | except:
199 |
200 | pass
201 | return
202 |
--------------------------------------------------------------------------------
/plugins/linked.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from bs4 import BeautifulSoup as bsoup
5 | #import requests as req
6 | import mechanize
7 | #import urllib
8 | #import urllib2
9 | #import re
10 | import csv
11 | #import sys
12 | #import json
13 | #import os
14 | import time
15 | #import random
16 | import cookielib
17 | import thread
18 |
19 |
20 | def start():
21 | pos = raw_input('Enter position: ')
22 | pos = pos.lower()
23 | com = raw_input('Enter company: ')
24 | com = com.lower()
25 | useout = raw_input('Save output? [y/n]: ')
26 | global saveout
27 | if useout == 'y':
28 | saveout = True
29 | else:
30 | saveout = False
31 | bing(pos, com, saveout)
32 |
33 |
34 | def csvwrite(p1, c1, plink):
35 | saveFile = open('OUTPUT.csv', 'a')
36 | saveFile.write(p1 + '+' + c1 + '+' + plink)
37 | saveFile.write('\n')
38 | saveFile.close()
39 |
40 |
41 | def bing(pos, com, saveout):
42 |
43 | # Setup browser using mechanize.
44 |
45 | br = mechanize.Browser()
46 |
47 | # Accept cookies...we need a cookiejar. :)
48 |
49 | cj = cookielib.LWPCookieJar()
50 | br.set_cookiejar(cj)
51 | br.set_handle_equiv(True)
52 |
53 | # br.set_handle_gzip(True)
54 |
55 | br.set_handle_redirect(True)
56 | br.set_handle_referer(True)
57 | br.set_handle_robots(False)
58 |
59 | br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),
60 | max_time=1)
61 |
62 | # user agent. This can always be changed or read from a file later
63 |
64 | br.addheaders = [('User-agent',
65 | 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2)'
66 | 'AppleWebKit/537.17 (KHTML, like Gecko)'
67 | 'Chrome/24.0.1309.1 Safari/537.17'
68 | )]
69 |
70 | # craft url for bing :D
71 |
72 | burl = 'http://www.bing.com/search?q=site:linkedin.com ' + '"' \
73 | + pos + '"' + ' ' + '"' + com + '"'
74 |
75 | # replace any spaces in our query with url encoded values
76 |
77 | r = br.open(burl.replace(' ', '%20'))
78 |
79 | # read html
80 |
81 | html = r.read()
82 |
83 | # parse html through beautifulsoup
84 |
85 | soup = bsoup(html)
86 |
87 | # result = soup.find_all('div', {"class" : "sb_tlst"})
88 |
89 | result = (soup.find_all('li',
90 | {'class': 'b_algo'
91 | })
92 | if soup.find_all('li',
93 | {'class': 'b_algo'
94 | })
95 | else soup.find_all('div',
96 | {'class': 'sb_tlst'}))
97 | refs = []
98 | for i in result:
99 | link = i.a['href'] # Get Hrefs from bing's source
100 |
101 | # Quick validation to ensure we are actually getting linkedin
102 |
103 | if '/dir/' in link \
104 | or 'groupItem' in link or not 'linkedin.com' in link:
105 | continue
106 | else:
107 | refs.append(link)
108 |
109 | # use set to help remove possible duplicate links
110 |
111 | nodupLinks = set(refs) # try and remove duplicates
112 |
113 | # Add the company name to an array. This is for a future idea
114 |
115 | comp = []
116 | comp.append(com)
117 |
118 | for links in nodupLinks:
119 |
120 | # open a link from the links array.
121 |
122 | r = br.open(links)
123 | html = r.read()
124 | soup = bsoup(html) # Have bs4 read the html from linkedin
125 |
126 | # look for the title element within linkedin
127 |
128 | result_name = soup.find('title')
129 | result_name = result_name.string.strip().split(' | ')
130 |
131 | # Some validation to ensure that we get position info if
132 | # there is/isn't a headline
133 |
134 | result_title = (soup.find('p',
135 | {'class': 'headline-title title'
136 | })
137 | if soup.find('p',
138 | {'class': 'headline-title title'
139 | })
140 | else soup.find('ul', {'class': 'current'})
141 | or soup.find('p', {'class': 'title'}))
142 | try:
143 |
144 | # Validation to ensure that the company name IS a current
145 | # position...not a past one
146 |
147 | if not comp[0] in result_title.string.lower():
148 | continue
149 | else:
150 |
151 | # results of the query
152 |
153 | print
154 | print '-----------START--------------'
155 | print result_name[0].encode('utf-8'), \
156 | result_title.string.encode('utf-8')
157 | print links
158 | print '------------END---------------'
159 | print
160 |
161 | # check for boolean val on saveout.
162 |
163 | if saveout:
164 | result_title.string = result_title.string.strip('\n')
165 | restitle = result_title.string.encode('utf-8')
166 | resnom = result_name[0].encode('utf-8')
167 | thread.start_new_thread(csvwrite,
168 | (resnom, restitle, links))
169 | else:
170 |
171 | # skip the DL process id saveout is set to no/0
172 |
173 | pass
174 | except:
175 | pass
176 | # sleep real quick before crushing linkedin with requests
177 | time.sleep(2)
178 | refs = [] # clear temporary array after running
179 | comp = [] # clear temporary array after running
180 |
181 | # now that search is done, do another or return to main
182 | print """
183 | End of results...
184 | 1. Search again
185 | 0. Return
186 | """
187 | retCheck = raw_input("Choose an option:")
188 | if retCheck == "1":
189 | start()
190 | elif retCheck == "0":
191 | return
192 |
193 | # function to try and remove duplicates from the lists.
194 |
195 | def dupr(saveout):
196 |
197 | f1 = csv.reader(open('OUTPUT.csv', 'r'), delimiter='+')
198 | f2 = csv.writer(open('LINKEDIN_OUT.csv', 'w'), delimiter='+')
199 |
200 | links = set()
201 | for row in f1:
202 | if row[2] not in links:
203 | f2.writerow(row)
204 | links.add(row[2])
205 | return
206 |
--------------------------------------------------------------------------------
/OSCARf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Python script to aid in the collection of OSINT data"""
3 |
4 | """
5 | ____ ________________ ________
6 | \ \ / /_ \_____ \ \_____ \ ______ ______
7 | \ Y / | |/ ____/ / | \\____ \/ ___/
8 | \ / | / \ / | \ |_> >___ \
9 | \___/ |___\_______ \ \_______ / __/____ >
10 | \/ \/|__| \/
11 | v12 operations
12 | """
13 |
14 | #External libraries needed to use oscar:
15 | #twitter
16 | #tweepy
17 | #feedparser
18 | #shodan
19 | #readline
20 | #beautifulsoup
21 |
22 | #oscar will automatically try to import first.
23 | #On exception it will alert the user
24 | import urllib2
25 |
26 | import sys
27 | import os
28 | import time
29 |
30 |
31 | try:
32 | import readline
33 | except:
34 | pass
35 |
36 | #################
37 | # LOCAL IMPORTS #
38 | #################
39 | from plugins import *
40 |
41 | #----Why 2 twitter libs?----#
42 | #The auth for the twitter lib is nicer as it can create an auth file
43 | #to read from. the twitter auth will also open a browser window where
44 | # you will accept the app to use your twitter account. Read and Write
45 | #is what it requires. When accepted, you will get a pin to enter into
46 | # the application. You will not be prompted for a pin after getting a
47 | #token.
48 | #----END----#
49 | # imports for the streaming lib
50 |
51 | try:
52 | import tweepy
53 | #from tweepy import *
54 | from tweepy.streaming import *
55 | except:
56 | print "[+]ERROR: Unable to import the tweepy library installed!!"
57 | print "You will not be able to use the twitter collection side of oscar!"
58 |
59 | #Twitter lib for AUTH
60 | try:
61 | import twitter
62 | from twitter.oauth import write_token_file, read_token_file
63 | from twitter.oauth_dance import oauth_dance
64 | except:
65 | print "[+]ERROR: Unable to import the twitter library installed!"
66 | print "You will not be able to use the twitter collection side of oscar!"
67 |
68 |
69 | try:
70 | #Open file for twitter app auth
71 | tappfile = open('auth/'+'twitter_app.dat', 'r')
72 | tappline = tappfile.readlines()
73 | APP_NAME = tappline[1].rstrip()
74 | CONSUMER_KEY = tappline[3].rstrip()
75 | CONSUMER_SECRET = tappline[5].rstrip()
76 | tappfile.close()
77 |
78 | #file that Oauth data is stored
79 | TOKEN_FILE = 'auth/'+'token.txt'
80 |
81 | try:
82 | (oauth_token, oauth_token_secret) = read_token_file(TOKEN_FILE)
83 | except IOError, e:
84 | print "Please run the setup.py file to get your token file!"
85 | exit()
86 |
87 | t_auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
88 | t_auth.set_access_token(oauth_token, oauth_token_secret)
89 | t_api = tweepy.API(t_auth)
90 | except:
91 | t_auth = None
92 | t_api = None
93 |
94 | __author__ = "NinjaSl0th - @ninjasl0th"
95 |
96 | def main():
97 | """Main Function"""
98 | time.sleep(3)
99 | try:
100 | os.system('clear')
101 | except Exception:
102 | os.system('cls')
103 | finally:
104 | asciis.asciiart()
105 | try:
106 | print """
107 | OSCAR (Open Source Collection And Recon) Framework
108 | (CTRL+C returns back to main menu)
109 | -------------
110 | 1. Social Networking
111 | -------------
112 | 2. Shodan
113 | -------------
114 | 3. News
115 | -------------
116 | 4. Network Info
117 | -------------
118 | 5. Pastebin Scraper
119 | -------------
120 | 6. Web Tools
121 | -------------
122 |
123 | 0. Exit OSCAR
124 | """
125 | opt = raw_input("Enter an option: ")
126 | if opt == "1":
127 | socialMenu()
128 | elif opt == "2":
129 | oscrShodan()
130 | main()
131 | elif opt == "3":
132 | news()
133 | elif opt == "4":
134 | networkMod()
135 | elif opt == "5":
136 | pasteScrape()
137 | elif opt == "6":
138 | webtools()
139 | #wscrape()
140 | elif opt == "0":
141 | print "Thanks for using OSCAR!"
142 | sys.exit(0)
143 | else:
144 | print "You entered an invalid option!"
145 | main()
146 | except (KeyboardInterrupt):
147 | main()
148 |
149 |
150 | ###########################
151 | #-- Social Media Menu --#
152 | ###########################
153 | def socialMenu():
154 | """Select Social Media Source"""
155 |
156 | print """
157 | 1. Twitter
158 | 2. FaceBook
159 | 3. LinkedIn
160 | 4. Check username on instagram
161 | 0. Return
162 | """
163 | opt = raw_input("Enter an option: ")
164 | if opt == "1":
165 | twitMenu()
166 | elif opt == "2":
167 | fbMenu()
168 | elif opt == "3":
169 | linkedin()
170 | elif opt == "4":
171 | instachek()
172 | elif opt == "0":
173 | main()
174 | else:
175 | print "You entered an invalid choice!"
176 | socialMenu()
177 |
178 |
179 | ###########################
180 | #-- Twitter Collection -- #
181 | ###########################
182 |
183 |
184 | def twitMenu():
185 | """Menu for twitter"""
186 | if t_auth is None or t_api is None:
187 | print "Twitter is disabled; please install an API key for twitter"
188 | return
189 | print """
190 | 1. Live stream twitter (saved as csv)
191 | 2. Live stream NO LOGGING!
192 | 3. Gather last X tweets from user
193 | 4. View recent follows
194 | 5. View recent followers
195 | 6. Get count of mentions of another user (last 200 tweets)
196 | 7. Search for tweet
197 | 8. Add user to sqlite db
198 | 9. Delete all your tweets.
199 | 10. Delete all favorites
200 | 0. Return
201 | """
202 | opt = raw_input("Enter an option: ")
203 | if opt == "1":
204 | oscrtwitter.lv_stream(t_auth)
205 | elif opt == "2":
206 | oscrtwitter.lv_streamno(t_auth)
207 | elif opt == "3":
208 | oscrtwitter.hist_tweet(t_api)
209 | twitMenu()
210 | elif opt == "4":
211 | oscrtwitter.rcntFllw(t_api)
212 | twitMenu()
213 | elif opt == "5":
214 | oscrtwitter.rcntFllwrs(t_api)
215 | twitMenu()
216 | elif opt == "6":
217 | oscrtwitter.mentionCount(t_api)
218 | twitMenu()
219 | elif opt == "7":
220 | oscrtwitter.twitSearch(t_api)
221 | twitMenu()
222 | elif opt == "8":
223 | oscrtwitter.twitlookup(t_api)
224 | twitMenu()
225 | elif opt == "9":
226 | oscrtwitter.batch_delete(t_api)
227 | twitMenu()
228 | elif opt == "10":
229 | oscrtwitter.favdelete(t_api)
230 | twitMenu()
231 | elif opt == "0":
232 | main()
233 | else:
234 | print "[+]ERROR: You entered an invalid option!"
235 | twitMenu()
236 |
237 |
238 | ########################
239 | ## --- FB Analysis -- ##
240 | ########################
241 |
242 | def fbMenu():
243 | """Facebook Menu"""
244 | print """
245 | 1. Get user info - Raw JSON Dump/Not Formatted
246 | 2. Get user info - Formatted, Lookup multiple users
247 | 0. Return
248 | """
249 | opt = raw_input("Enter an input: ")
250 | if opt == "1":
251 | fblookup.FBInfo()
252 | fbMenu()
253 | elif opt == "2":
254 | fblookup.FBUsr()
255 | fbMenu()
256 | elif opt == "0":
257 | main()
258 | else:
259 | print "You entered an invalid option"
260 | fbMenu()
261 |
262 |
263 | def instachek():
264 | """Initiate Instagram username checker"""
265 | usernom = raw_input("Enter username: ")
266 | instag.checker(usernom)
267 | socialMenu()
268 |
269 | #############################
270 | #-- News Feed Integration --#
271 | #############################
272 |
273 |
274 | def news():
275 | """Launch the newsfeed reader"""
276 | newsfeed.newsStart()
277 | main()
278 |
279 |
280 | ###############
281 | #-- IP Info --#
282 | ###############
283 | def ipInfo():
284 | """IP Address lookup function"""
285 | ip = raw_input("Enter IP: ")
286 | ip = ip.rstrip()
287 | ipinfo.lookup(ip)
288 | networkMod()
289 |
290 |
291 | def prtLook():
292 | """Function to call the portlookup lib"""
293 | portlook.lookup()
294 | networkMod()
295 |
296 |
297 | def networkMod():
298 | """Function to choose what network lookup tool to use"""
299 | print """
300 | 1. Lookup IP Address
301 | 2. Port Lookup (SANS website)
302 | 3. Domain to IP
303 | 0. Return
304 | """
305 | opt = raw_input('Enter an option: ')
306 | if opt == "1":
307 | ipInfo()
308 | elif opt == "2":
309 | prtLook()
310 | elif opt == "3":
311 | domainip.c()
312 | elif opt == "0":
313 | main()
314 | else:
315 | print "Invalid option!"
316 | networkMod()
317 | networkMod()
318 |
319 |
320 | ######################
321 | #- Pastebin Scraper -#
322 | ######################
323 | def pasteScrape():
324 | """Initiate pastebin scraper"""
325 | try:
326 | pyscrape.starter()
327 | except KeyboardInterrupt:
328 | pyscrape.stopper()
329 | main()
330 |
331 |
332 | def linkedin():
333 | """Start linkedin search tool"""
334 | linked.start()
335 | time.sleep(5)
336 | if linked.saveout:
337 | linked.dupr(linked.saveout)
338 | main()
339 |
340 |
341 | def oscrShodan():
342 | """Call/launch the Shodan module"""
343 | oshodan.menu()
344 | main()
345 |
346 |
347 | def wscrape():
348 | """Call/launch the web scraper module"""
349 | webscrape.scrape()
350 | main()
351 |
352 | def getcn():
353 | sslscan.starter()
354 |
355 | def webtools():
356 | """Menu for web tools"""
357 | print """
358 | 1. Web Source Scraper
359 | 2. SSL CN grabber
360 |
361 | 0. Back
362 | """
363 | opt = raw_input("Enter an option: ")
364 | if opt == "1":
365 | wscrape()
366 | elif opt == "2":
367 | getcn()
368 | elif opt == "0":
369 | main()
370 | else:
371 | print "Invalid Option!"
372 | webtools()
373 | webtools()
374 |
375 |
376 | if __name__ == "__main__":
377 | # users may wish to import part of this...
378 | main()
379 |
--------------------------------------------------------------------------------
/plugins/oscrtwitter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | # import sqlite3 for DB operations
5 |
6 | try:
7 | import sqlite3 as db
8 | except:
9 | print "[+]ERROR: could not import sqlite3. You won't be able to add",
10 | print "users to a sqliteDB!"
11 | pass
12 |
13 | try:
14 | import tweepy
15 | from tweepy import *
16 | from tweepy.streaming import *
17 | except:
18 | print '[+]ERROR: Unable to import the tweepy library installed!!'
19 | print 'You will not be able to use the twitter collection side of oscar!'
20 |
21 | import sys
22 | import thread
23 | import os
24 |
25 | def hist_tweet(t_api):
26 | con = ''
27 | try:
28 | while con != 'n' or con != 'N' or con != 'No' or con != 'no':
29 | num_tweets = input('How many tweets (200 maximum): ')
30 | targetUsr = raw_input('Enter target user: ')
31 | user = t_api.get_user(targetUsr)
32 | print '\n'
33 | print 'Username: ', user.screen_name
34 | print 'Follower count: ', user.followers_count
35 | print '\n'
36 | target_tweets = t_api.user_timeline(targetUsr,count=num_tweets)
37 | counter = 0
38 | for tweet in target_tweets:
39 | counter += 1
40 | print counter, '-', tweet.text
41 | con = raw_input('Run again?(y/n): ')
42 | if con == 'no' or con == 'n' or con == 'N' or con == 'No':
43 | return
44 | except KeyboardInterrupt:
45 | return
46 |
47 |
48 | # streaming class for the tweepy lib
49 |
50 | def lv_stream(t_auth):
51 |
52 | # ask for the specified filter. Supports standard string. Hashtags too :P
53 |
54 | filt = raw_input('Enter filter option: ')
55 |
56 | class listen(StreamListener):
57 |
58 | def on_status(self, status):
59 | try:
60 |
61 | # print the status....use utf, or else.
62 |
63 | print status.author.screen_name.encode('utf-8') + ': ' \
64 | + status.text.encode('utf-8')
65 | print '\n'
66 | saveFile = open('STREAM_%s.csv' % filt, 'a')
67 | saveFile.write(str(time.ctime()) + ',,'+ status.author.screen_name.encode('utf-8') +',,' + status.text.encode('utf-8'))
68 | saveFile.write('\n')
69 | saveFile.close()
70 | return True
71 | except (BaseException, KeyboardInterrupt, SystemExit):
72 | #print 'failed on data, ', str(e)
73 | sleep(3)
74 | return
75 |
76 | def on_error(self, status):
77 | return
78 |
79 | twitterStream = Stream(t_auth, listen())
80 | twitterStream.filter(track=[filt])
81 |
82 |
83 | def lv_streamno(t_auth):
84 |
85 | # ask for the specified filter. Supports standard string. Hashtags too :P
86 |
87 | filt = raw_input('Enter filter option: ')
88 |
89 | class listen(StreamListener):
90 |
91 | def on_status(self, status):
92 | try:
93 |
94 | # print the status....use utf, or else.
95 |
96 | print status.author.screen_name.encode('utf-8') + ': ' \
97 | + status.text.encode('utf-8')
98 | print '\n'
99 | return True
100 | except (BaseException, KeyboardInterrupt, SystemExit):
101 | #print 'failed on data, ', str(e)
102 | sleep(3)
103 | return
104 |
105 | def on_error(self, status):
106 | return
107 |
108 | twitterStream = Stream(t_auth, listen())
109 | twitterStream.filter(track=[filt])
110 |
111 |
112 | # This can get confusing. Friends are people that are mutual followers.
113 | # That is, those users who target follows and is followed by
114 |
115 | def rcntFllw(t_api):
116 | targetUsr = raw_input('Enter target user: ')
117 | user = t_api.get_user(targetUsr)
118 | for friend in user.friends():
119 | print friend.screen_name
120 | print '\n'
121 | return
122 |
123 |
124 | def rcntFllwrs(t_api):
125 | targetUsr = raw_input('Enter target user: ')
126 | user = t_api.get_user(targetUsr)
127 | for friends in user.followers(count=100):
128 | print friends.screen_name
129 | print '\n'
130 | return
131 |
132 |
133 | def mentionCount(t_api):
134 | names = []
135 | print """
136 | 1. Retrieve details
137 | 2. Count specific mentions
138 | """
139 | opt = raw_input('Enter an option: ')
140 | if opt == '1':
141 | targetUsr = raw_input('Please enter a username: ')
142 | user = t_api.get_user(targetUsr)
143 | # 200 is the api limit per query currently?
144 | target_tweets = t_api.user_timeline(targetUsr, count=200)
145 | saveFile = open('tweets.txt', 'a')
146 | for tweet in target_tweets:
147 |
148 | # print tweet.text
149 |
150 | saveFile.write(tweet.text.encode('utf-8'))
151 | saveFile.write('\n')
152 | saveFile.close()
153 | mentionCount(api)
154 | if opt == '2':
155 | try:
156 | tweetedUsrs = open('tweets.txt').read().splitlines()
157 | except:
158 | print 'There was an error opening the tweets file.',
159 | print 'Did you run the first option?'
160 | return
161 |
162 | for lines in tweetedUsrs:
163 | tos = re.findall('@([A-Za-z0-9_]{1,15})', lines)
164 | for twitUsr in tos:
165 | names.append(twitUsr)
166 | while True:
167 | try:
168 | targetUsr2 = \
169 | raw_input('Enter a twitter handle (without the @)'
170 | 'Ctrl+C to return: ')
171 | print 'Number of times mentioned: ', \
172 | names.count(targetUsr2)
173 | except KeyboardInterrupt:
174 | print '\n'
175 | return
176 |
177 |
178 | # twitMenu()
179 |
180 | def twitSearch(t_api):
181 | t_query = raw_input('Enter search: ')
182 | t_res = tweepy.Cursor(t_api.search, q=t_query, count=10,
183 | result='recent',
184 | include_entities=True).items()
185 | while True:
186 | try:
187 | tweet = t_res.next()
188 | print tweet.user.screen_name.encode('utf-8'), ':', \
189 | tweet.created_at, ':', tweet.text.encode('utf-8')
190 | print # print an extra line...just for readability
191 | sleep(5) # sleep so it is human readable
192 | except tweepy.TweepError:
193 |
194 | # if tweepy encounters an error, sleep for fifteen minutes..this will
195 | # help against API bans.
196 |
197 | sleep(60 * 15)
198 | except KeyboardInterrupt:
199 | return
200 |
201 | def twitdelete(t_api, stat):
202 | try:
203 | t_api.destroy_status(stat)
204 | print "Deleted:", stat
205 | except:
206 | print "Failed to delete:", stat
207 |
208 | """Copied from https://gist.github.com/davej/113241 <- credit where credit is due"""
209 | def batch_delete(t_api):
210 | print "You are about to Delete all tweets from the account @%s." % t_api.verify_credentials().screen_name
211 | print "Does this sound ok? There is no undo! Type yes to carry out this action."
212 | do_delete = raw_input("> ")
213 | if do_delete.lower() == 'yes':
214 | for status in tweepy.Cursor(t_api.user_timeline).items():
215 | try:
216 | #t_api.destroy_status(status.id)
217 | thread.start_new_thread(twitdelete, (t_api, status.id,))
218 | #print "Deleted:", status.id
219 | except:
220 | print "Failed to delete:", status.id
221 | sleep(.5)
222 | return
223 | """
224 | def fdelete(t_api, stat):
225 | try:
226 | t_api.destroy_favorite(stat)
227 | print "Deleted:", stat
228 | except:
229 | print "Failed to delete:", stat
230 | """
231 |
232 | def favdelete(t_api):
233 | print "You are about to Delete all favorites from the account @%s." % t_api.verify_credentials().screen_name
234 | print "Does this sound ok? There is no undo! Type yes to carry out this action."
235 | do_delete = raw_input("> ")
236 | if do_delete.lower() == 'yes':
237 | for status in tweepy.Cursor(t_api.favorites).items():
238 | try:
239 | t_api.destroy_favorite(status.id)
240 | #thread.start_new_thread(fdelete, (t_api, status.id,))
241 | print "Deleted:", status.id
242 | except:
243 | print "Failed to delete:", status.id
244 | sleep(3)
245 | return
246 |
247 | def twitlookup(t_api):
248 | try:
249 | conn = db.connect('test.db')
250 | c = conn.cursor()
251 | except:
252 | print '[+]ERROR: Could not connect to db'
253 | return
254 | targetUsr = raw_input('Please enter a username: ')
255 |
256 | # check if user is in the SQLite db or not
257 |
258 | c.execute('SELECT count(*) FROM twitter WHERE username = (?)',(targetUsr, ))
259 | data = c.fetchone()[0]
260 | if data == 0:
261 | try:
262 | user = t_api.get_user(targetUsr)
263 | except:
264 | print 'User does not exist on twitter!'
265 | return
266 | print 'Username: ', user.screen_name
267 | followers = user.followers_count
268 | print 'Followers: ', followers
269 | tweets = t_api.user_timeline(targetUsr, count=1)
270 | for tweet in tweets:
271 | lastTweet = tweet.text
272 | print 'Latest tweet: \n', lastTweet
273 | try:
274 | conn.execute("INSERT INTO twitter (username, followers, lasttweet)"
275 | "VALUES (?, ?, ?)",
276 | (targetUsr, followers, lastTweet))
277 | conn.commit()
278 | except:
279 | print '[+]ERROR: Could not update databse'
280 | conn.close()
281 | return
282 | else:
283 | print targetUsr, ' is already in the database.'
284 | print 'Updating user information....'
285 | user = t_api.get_user(targetUsr)
286 | print 'Username: ', user.screen_name
287 | followers = user.followers_count
288 | print 'Followers: ', followers
289 | tweets = t_api.user_timeline(targetUsr, count=1)
290 | for tweet in tweets:
291 | lastTweet = tweet.text
292 | print 'Latest tweet: \n', lastTweet
293 | try:
294 | c.execute('UPDATE twitter set followers = ?, lasttweet ='
295 | '? where username = ?',
296 | (followers, lastTweet, targetUsr))
297 | conn.commit()
298 | conn.close()
299 | except:
300 | print '[+]ERROR: Could not update databse'
301 | return
302 | return
303 |
--------------------------------------------------------------------------------