├── Red ├── nodos.txt ├── robert.vsSocket.sh ├── WinClient.ps1 ├── WinClientJSON.ps1 ├── data.json ├── escaneo.sh ├── apps.txt ├── WinWrapper.py ├── 192.168.1.26.txt ├── robert.vs.sh └── jsonconverter.sh ├── Scrapy └── tutorial │ ├── tutorial │ ├── __init__.py │ ├── items.pyc │ ├── __init__.pyc │ ├── settings.pyc │ ├── spiders │ │ ├── __init__.pyc │ │ ├── dmoz_spider.pyc │ │ ├── patchespider.pyc │ │ ├── exploit_spider.pyc │ │ ├── __init__.py │ │ ├── patchespider.py │ │ ├── exploit_spider.py │ │ └── dmoz_spider.py │ ├── pipelines.py │ ├── items.py │ └── settings.py │ └── scrapy.cfg ├── Doc └── foto.png ├── data.json ├── jsonparser.py ├── README.md ├── queries.sql ├── queries.py └── tables.sql /Red/nodos.txt: -------------------------------------------------------------------------------- 1 | 192.168.1.26 2 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Red/robert.vsSocket.sh: -------------------------------------------------------------------------------- 1 | while true; do (vs.sh | nc -l 9000 ); done; -------------------------------------------------------------------------------- /Doc/foto.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Doc/foto.png -------------------------------------------------------------------------------- /Red/WinClient.ps1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Red/WinClient.ps1 -------------------------------------------------------------------------------- /Red/WinClientJSON.ps1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Red/WinClientJSON.ps1 -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/items.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Scrapy/tutorial/tutorial/items.pyc -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Scrapy/tutorial/tutorial/__init__.pyc -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/settings.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Scrapy/tutorial/tutorial/settings.pyc -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Scrapy/tutorial/tutorial/spiders/__init__.pyc -------------------------------------------------------------------------------- /Red/data.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "Products":[ 4 | {"product":"Microsoft 2003 Server", "version":""}, 5 | {"product":"Firefox", "version":"34.0"} 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /Red/escaneo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | PORT=9000 5 | for i in $(cat nodos.txt) 6 | do 7 | echo $i 8 | nc $i $PORT > $i.txt 9 | 10 | done 11 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/dmoz_spider.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Scrapy/tutorial/tutorial/spiders/dmoz_spider.pyc -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/patchespider.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Scrapy/tutorial/tutorial/spiders/patchespider.pyc -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/exploit_spider.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adon90/vulsecure/master/Scrapy/tutorial/tutorial/spiders/exploit_spider.pyc -------------------------------------------------------------------------------- /data.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "Products":[ 4 | {"vendor": "Microsoft" ,"product":"Windows Server 2003", "version":"R2"}, 5 | {"vendor": "Mozilla", "product":"Firefox", "version":"39.0"} 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /Red/apps.txt: -------------------------------------------------------------------------------- 1 | IIS 2 | Apache 3 | Flash 4 | Adobe reader 5 | Firefox 6 | Chrome 7 | iExplorer 8 | MySQL 9 | (SO) https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833%28v=vs.85%29.aspx 10 | Java -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/__init__.py: -------------------------------------------------------------------------------- 1 | # This package will contain the spiders of your Scrapy project 2 | # 3 | # Please refer to the documentation for information on how to create and manage 4 | # your spiders. 5 | -------------------------------------------------------------------------------- /jsonparser.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | with open('data.json') as data_file: 5 | data = json.load(data_file) 6 | 7 | for product in data['Products']: 8 | print product['vendor'] 9 | print product['product'] 10 | print product['version'] 11 | 12 | # print(data['Products'][0]['product']) 13 | 14 | 15 | -------------------------------------------------------------------------------- /Scrapy/tutorial/scrapy.cfg: -------------------------------------------------------------------------------- 1 | # Automatically created by: scrapy startproject 2 | # 3 | # For more information about the [deploy] section see: 4 | # https://scrapyd.readthedocs.org/en/latest/deploy.html 5 | 6 | [settings] 7 | default = tutorial.settings 8 | 9 | [deploy] 10 | #url = http://localhost:6800/ 11 | project = tutorial 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Project Name: Vulsecure 2 | Description: Detects installed software, scraps vulnerabilities and patches. 3 | Dependencies: PHP, MySQL, Python, Scrapy 4 | Basic Usage: Client agents wait for the server to order them to retrieve and send information about installed software. The server scraps periodically the vulnerability and patches sources. 5 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/pipelines.py: -------------------------------------------------------------------------------- 1 | 2 | # -*- coding: utf-8 -*- 3 | 4 | # Define your item pipelines here 5 | # 6 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting 7 | # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html 8 | 9 | 10 | class TutorialPipeline(object): 11 | def process_item(self, item, spider): 12 | return item 13 | -------------------------------------------------------------------------------- /Red/WinWrapper.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import subprocess 3 | 4 | s = socket.socket() 5 | s.bind(('localhost', 9000)) 6 | s.listen(1) 7 | 8 | while True: 9 | sc, addr = s.accept() 10 | p = subprocess.check_output(["powershell.exe", "-ExecutionPolicy", "Unrestricted", r"C:\Users\kaiser\Documents\GitWorkspace\vulsecure\Red\WinClientJSON.ps1"]) 11 | sc.send(p) 12 | sc.close() 13 | 14 | s.close() 15 | -------------------------------------------------------------------------------- /queries.sql: -------------------------------------------------------------------------------- 1 | select * from Vulnerabilities_cve inner join Products on Vulnerabilities_cve.CVE = Products.CVE where Product="Windows 2003 Server"\G 2 | 3 | select * from Vulnerabilities_cve inner join Products on Vulnerabilities_cve.CVE = Products.CVE inner join Exploits on Products.CVE=Exploits.CVE where Product="Windows 2003 Server"\G 4 | 5 | select * from Patches inner join Patch_CVE on Patches.patch_id = Patch_CVE.patch_id inner join Products on Products.CVE=Patch_CVE.cve where Product="Windows 2003 Server"\G 6 | 7 | select * from Vulnerabilities_cve inner join Products on Vulnerabilities_cve.CVE = Products.CVE inner join Exploits on Products.CVE=Exploits.CVE where Product="Windows 2003 Server" and Vulnerabilities_cve.CVE="CVE-2008-2245"\G 8 | 9 | -------------------------------------------------------------------------------- /queries.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import MySQLdb as mdb 5 | import sys 6 | import os 7 | servername = "localhost" 8 | username = "root" 9 | password = "root" 10 | dbname = "vulsecure" 11 | 12 | def connection(): 13 | 14 | 15 | global servername, username, password, dbname 16 | 17 | con = mdb.connect(servername, username, password, dbname) 18 | 19 | return con; 20 | 21 | 22 | def insert_product (vendor,product,version): 23 | 24 | 25 | 26 | con = connection() 27 | 28 | 29 | 30 | with con: 31 | 32 | cur = con.cursor() 33 | 34 | 35 | 36 | cur.execute("INSERT INTO Products(vendor,product,version) VALUES(%s,%s,%s)",(vendor,product,version)) 37 | con.commit() 38 | -------------------------------------------------------------------------------- /Red/192.168.1.26.txt: -------------------------------------------------------------------------------- 1 | -------:::VulSecure 0.1 client:::-------- 2 | 3 | NODE IP: 192.168.1.26 4 | 5 | HARDWARE INFO: 6 | 7 | Architecture: x86_64 8 | CPU op-mode(s): 32-bit, 64-bit CPU(s): 8 On-line CPU(s) list: 0-7 CPU family: 6 CPU MHz: 800.000 NUMA node0 CPU(s): 0-7 9 | 10 | 11 | OPERATING SYSTEM INFO: 12 | 13 | Distributor ID: Ubuntu Description: Ubuntu 14.04.2 LTS Release: 14.04 Codename: trusty 14 | 15 | 16 | SOFTWARE PRODUCTS INFO: 17 | 18 | Product: IIS 19 | Not installed on this node 20 | 21 | Product: apache2 22 | Version: 2.4.7-1ubuntu4.8 23 | 24 | Product: Flash 25 | Not installed on this node 26 | 27 | Product: acroread 28 | Version: 9.5.5-1precise1 29 | 30 | Product: Firefox 31 | Version: 39.0+build5-0ubuntu0.14.04.1 32 | 33 | Product: Chrome 34 | Not installed on this node 35 | 36 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/items.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define here the models for your scraped items 4 | # 5 | # See documentation in: 6 | # http://doc.scrapy.org/en/latest/topics/items.html 7 | 8 | import scrapy 9 | 10 | 11 | class TutorialItem(scrapy.Item): 12 | # define the fields for your item here like: 13 | # name = scrapy.Field() 14 | pass 15 | 16 | class VulnerabilityItem(scrapy.Item): 17 | 18 | CVE = scrapy.Field() 19 | Description = scrapy.Field() 20 | P_Date = scrapy.Field() 21 | U_Date = scrapy.Field() 22 | Score = scrapy.Field() 23 | Type = scrapy.Field() 24 | 25 | class ProductItem(scrapy.Item): 26 | 27 | Vendor = scrapy.Field() 28 | Product = scrapy.Field() 29 | Version = scrapy.Field() 30 | CVE = scrapy.Field() 31 | 32 | class ExploitItem(scrapy.Item): 33 | 34 | ID = scrapy.Field() 35 | CVE = scrapy.Field() 36 | Date = scrapy.Field() 37 | Exploit = scrapy.Field() -------------------------------------------------------------------------------- /Red/robert.vs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################# 4 | # # 5 | # vulSecure client 0.1 # 6 | # # 7 | ################################# 8 | 9 | # Scanning from apps.txt 10 | 11 | name=""; 12 | version=""; 13 | 14 | echo -e "-------:::VulSecure 0.1 client:::-------- \n" 15 | 16 | echo -e "NODE IP: 192.168.1.26 \n" 17 | 18 | echo -e "HARDWARE INFO: \n" 19 | 20 | echo $(lscpu | grep Architecture) 21 | 22 | echo $(lscpu | grep CPU) 23 | 24 | echo -e "\n" 25 | 26 | echo -e "OPERATING SYSTEM INFO: \n" 27 | 28 | echo $(lsb_release -a 2> /dev/null) 29 | 30 | echo -e "\n" 31 | 32 | echo -e "SOFTWARE PRODUCTS INFO: \n" 33 | { 34 | while read softwareProduct 35 | do 36 | 37 | name=$softwareProduct 38 | 39 | echo Product: $name 40 | 41 | version=$(dpkg-query --status $softwareProduct |grep Version) 42 | 43 | if [ ! -z "${version}" ]; then 44 | echo -e "$version \n" 45 | else 46 | echo -e "Not installed on this node\n" 47 | fi 48 | 49 | done < apps.txt; 50 | } 2> /dev/null 51 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/patchespider.py: -------------------------------------------------------------------------------- 1 | import scrapy 2 | from scrapy.contrib.spiders import CrawlSpider, Rule 3 | from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor 4 | from tutorial.items import ProductItem 5 | from tutorial.items import VulnerabilityItem 6 | #from queries import * 7 | 8 | 9 | 10 | class patchSpider(CrawlSpider): 11 | name = "patch" 12 | allowed_domains = ["itsecdb.com"] 13 | start_urls = [ 14 | "http://www.itsecdb.com/oval/definitions/class-4-Patch/4/" 15 | ] 16 | rules = [ 17 | Rule(SgmlLinkExtractor(restrict_xpaths=('//table[@class="listtable"]//tr[position()=2]/td/a')), callback='parse_item')] 18 | 19 | def parse_item(self, response): 20 | 21 | 22 | id = response.xpath('//td[@id="contenttd"]/h1/a/text()').extract()[0] 23 | #patch 24 | print id 25 | description = response.xpath('//div[@class="ovaldescription"]/text()').extract()[0].strip() 26 | print description 27 | references = response.xpath('//ul[@class="ovalreferencesul"]/li/a/text()').extract() 28 | for c in references: 29 | 30 | if "CVE" in c: 31 | print c 32 | print response.url 33 | #print cves 34 | -------------------------------------------------------------------------------- /Red/jsonconverter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################# 4 | # # 5 | # vulSecure client 0.1 # 6 | # # 7 | ################################# 8 | 9 | # Scanning from apps.txt 10 | 11 | name=""; 12 | version=""; 13 | 14 | # NODE INFO 15 | 16 | 17 | echo -e "{\"Node\":[{\"MAC\":\""mac1"\", \"cores\":\""8"\", \"ram\":\""16"\"}]}" 18 | 19 | echo -e "\n" 20 | 21 | # OS INFO 22 | 23 | name=$(lsb_release -a 2> /dev/null | grep Description | awk ' {print $2} ') 24 | 25 | version=$(lsb_release -a 2> /dev/null | grep Description | awk ' {print $3} ') 26 | 27 | echo -e "{\"OS\":[{\"name\":\"$name"\", \"version\":\"$version"\"}]}" 28 | 29 | echo -e "\n" 30 | 31 | # SOFTWARE PRODUCTS INFO 32 | 33 | 34 | echo -e "{\n\"Products\":[" 35 | 36 | 37 | { 38 | while read softwareProduct 39 | do 40 | 41 | name=$softwareProduct 42 | 43 | version=$(dpkg-query --status $softwareProduct |grep Version | awk '{print $2}') 44 | 45 | if [ ! -z "${version}" ]; then 46 | echo -e {\"product\":\""$name"\", \"version\":\"$version\"}, | sed 's/\},\n\]/\}\n\[/g' 47 | 48 | else 49 | echo -e {\"product\":\""$name"\", \"version\":\"Not installed\"}, | sed 's/\},\n\]/\}\n\]/g' 50 | fi 51 | 52 | done < apps.txt; 53 | 54 | echo -e "]\n}" 55 | } 2> /dev/null 56 | 57 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/exploit_spider.py: -------------------------------------------------------------------------------- 1 | import scrapy 2 | from scrapy.contrib.spiders import CrawlSpider, Rule 3 | from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor 4 | from tutorial.items import ExploitItem 5 | from scrapy.selector import HtmlXPathSelector 6 | #from queries import * 7 | import re 8 | 9 | 10 | #count = 0 11 | class exploitSpider(CrawlSpider): 12 | name = "exploitSpider" 13 | allowed_domains = ["www.exploit-db.com"] 14 | start_urls = [ 15 | "https://www.exploit-db.com/search/?order_by=date&order=desc&pg="+str(i)+"&action=search" for i in range(1,2000) 16 | ] 17 | 18 | rules = [Rule(SgmlLinkExtractor(restrict_xpaths=('//td[@class="description"]/a')), callback='parse_item')] 19 | def parse_item(self, response): 20 | exploit = ExploitItem() 21 | 22 | #print "parsing........." 23 | 24 | cve_null = response.xpath("//table[@class='exploit_list']//tr[position()=1]/td[position()=2]/text()").extract()[0] 25 | #print cve_null 26 | if cve_null.strip(" ") != "N/A": 27 | 28 | #print "CVE Detected!!!!!!!!!!" 29 | exploit['ID'] = response.xpath("//table[@class='exploit_list']//tr[position()=1]/td[position()=1]/text()").extract()[0] 30 | exploit['CVE']=response.xpath("//table[@class='exploit_list']//tr[position()=1]/td[position()=2]/a/text()").extract()[0] 31 | exploit['Date'] = response.xpath("//table[@class='exploit_list']//tr[position()=2]/td[position()=3]/text()").extract()[0] 32 | exploit['Exploit'] = response.xpath("//div[@id='container']/pre/text()").extract()[0] 33 | #print "CVE----------------->"+exploit['CVE'] 34 | #print "ID----------------->"+exploit['ID'] 35 | #print "Date----------------->"+exploit['Date'] 36 | #print "Exploit----------------->"+exploit['Exploit'] 37 | #insert_exploit(exploit['ID'],"CVE-"+exploit['CVE'],exploit['Date'],exploit['Exploit']) 38 | 39 | 40 | -------------------------------------------------------------------------------- /tables.sql: -------------------------------------------------------------------------------- 1 | create table Products( 2 | 3 | product_id int not null auto_increment, 4 | vendor varchar(60) not null, 5 | product varchar(60), 6 | version varchar(30), 7 | linux_command varchar(30), 8 | window_command varchar(30), 9 | primary key (product_id) 10 | 11 | ); 12 | 13 | 14 | create table Patches( 15 | 16 | patch_id varchar(60) not null, 17 | description varchar(1000), 18 | url varchar(60), 19 | pub_date date, 20 | up_date date, 21 | primary key (patch_id) 22 | 23 | ); 24 | 25 | create table Patch_cve( 26 | 27 | patch_id varchar(60), 28 | cve varchar(14), 29 | foreign key (patch_id) references Patches(patch_id), 30 | foreign key (cve) references VulnerabilitiesCVE(cve) 31 | ); 32 | 33 | create table VulnerabilitiesCVE 34 | ( 35 | cve varchar(14) not null, 36 | description varchar(2000), 37 | pub_date date, 38 | up_date date, 39 | score decimal(3,1), 40 | type varchar(100), 41 | url varchar(65), 42 | primary key (cve) 43 | ); 44 | 45 | 46 | 47 | 48 | create table Product_cve( 49 | 50 | product_id int, 51 | cve varchar(14), 52 | foreign key (product_id) references Products(product_id), 53 | foreign key (cve) references VulnerabilitiesCVE(cve) 54 | ); 55 | 56 | 57 | create table Exploits ( 58 | exploit_id varchar(9) not null, 59 | cve varchar(30) not null, 60 | date date, 61 | exploit varchar (10000) not null, 62 | primary key (exploit_id), 63 | foreign key (cve) references VulnerabilitiesCVE(cve) 64 | 65 | ); 66 | 67 | 68 | 69 | create table Nodes ( 70 | 71 | mac varchar(20) not null, 72 | cores int, 73 | ram int, 74 | primary key (mac) 75 | 76 | ); 77 | 78 | create table OS ( 79 | 80 | os_id int not null auto_increment, 81 | name varchar(30), 82 | version varchar(30), 83 | primary key (os_id) 84 | 85 | ); 86 | 87 | create table Nodes_OS_Product ( 88 | 89 | mac varchar(20) not null, 90 | product_id int, 91 | os_id int, 92 | ip varchar (15), 93 | foreign key (mac) references Nodes(mac), 94 | foreign key (product_id) references Products(product_id), 95 | foreign key (os_id) references OS(os_id) 96 | 97 | ); 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/spiders/dmoz_spider.py: -------------------------------------------------------------------------------- 1 | import scrapy 2 | from scrapy.contrib.spiders import CrawlSpider, Rule 3 | from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor 4 | from tutorial.items import ProductItem 5 | from tutorial.items import VulnerabilityItem 6 | #from queries import * 7 | 8 | 9 | 10 | class CVESpider(CrawlSpider): 11 | name = "cve" 12 | allowed_domains = ["cvedetails.com"] 13 | start_urls = [ 14 | "http://www.cvedetails.com/vulnerability-list/" 15 | ] 16 | rules = [Rule(SgmlLinkExtractor(restrict_xpaths=('//div[@class="paging"]/a[position()=1]')), follow=True), 17 | Rule(SgmlLinkExtractor(restrict_xpaths=('//tr/td[@nowrap]/a')), callback='parse_item')] 18 | 19 | def parse_item(self, response): 20 | #hxs = HtmlXPathSelector(response) 21 | 22 | vulnerability = VulnerabilityItem() 23 | product = ProductItem() 24 | datenote = response.xpath("//span[@class='datenote']/text()").extract()[0] 25 | date = datenote.split('\t') 26 | date_pub_aux = date[1] 27 | date_pub_list = date_pub_aux.split() 28 | date_pub = date_pub_list[3] 29 | date_upd_aux = date[2] 30 | date_upd_list = date_upd_aux.split() 31 | date_upd = date_upd_list[4] 32 | # Extract links 33 | cve = response.xpath("//h1/a[@title][position() = 1]/text()").extract()[0] # Xpath selector for tag(s) 34 | description = response.xpath("//td/div[@class='cvedetailssummary']/text()").extract()[0] 35 | vulnerability['CVE'] = cve 36 | vulnerability['Description'] = description.split('\t')[1] 37 | vulnerability['P_Date'] = date_pub 38 | vulnerability['U_Date'] = date_upd 39 | vulnerability['Score'] = response.xpath("//td/div[@class='cvssbox']/text()").extract()[0] 40 | 41 | type_aux = response.xpath("//table//tr[position()=8]/td/span/text()").extract() 42 | typez = ' '.join(type_aux) 43 | vulnerability['Type'] = typez 44 | 45 | 46 | url = response.url 47 | 48 | print cve 49 | 50 | if (vulnerability['Score'] != '0.0'): 51 | 52 | # insert_vulnerabilities (vulnerability['CVE'],vulnerability['Description'],vulnerability['P_Date']\ 53 | # ,vulnerability['U_Date'], vulnerability['Score'],vulnerability['Type'],url) 54 | pass 55 | ############PRODUCT########### 56 | 57 | product['Vendor'] = response.xpath("//table[@id='vulnprodstable']//tr/td[position() = 3]/a/text()").extract() 58 | product['Product'] = response.xpath("//table[@id='vulnprodstable']//td[position() = 4]/a/text()").extract() 59 | product['Version'] = response.xpath("//table[@id='vulnprodstable']//td[position() = 5]/text()").extract() 60 | 61 | 62 | 63 | for x in range(len(product['Vendor'])): 64 | #print cve 65 | # print product['Vendor'][x] 66 | # print product['Product'][x] 67 | # print product['Version'][x].strip() 68 | #insert_product (product['Vendor'][x],product['Product'][x],product['Version'][x].strip(),cve) 69 | pass 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /Scrapy/tutorial/tutorial/settings.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Scrapy settings for tutorial project 4 | # 5 | # For simplicity, this file contains only settings considered important or 6 | # commonly used. You can find more settings consulting the documentation: 7 | # 8 | # http://doc.scrapy.org/en/latest/topics/settings.html 9 | # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html 10 | # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html 11 | 12 | BOT_NAME = 'tutorial' 13 | 14 | SPIDER_MODULES = ['tutorial.spiders'] 15 | NEWSPIDER_MODULE = 'tutorial.spiders' 16 | 17 | 18 | # Crawl responsibly by identifying yourself (and your website) on the user-agent 19 | #USER_AGENT = 'tutorial (+http://www.yourdomain.com)' 20 | 21 | # Configure maximum concurrent requests performed by Scrapy (default: 16) 22 | CONCURRENT_REQUESTS=16 23 | 24 | # Configure a delay for requests for the same website (default: 0) 25 | # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay 26 | # See also autothrottle settings and docs 27 | #DOWNLOAD_DELAY=3 28 | # The download delay setting will honor only one of: 29 | #CONCURRENT_REQUESTS_PER_DOMAIN=16 30 | #CONCURRENT_REQUESTS_PER_IP=16 31 | 32 | # Disable cookies (enabled by default) 33 | #COOKIES_ENABLED=False 34 | 35 | # Disable Telnet Console (enabled by default) 36 | #TELNETCONSOLE_ENABLED=False 37 | 38 | # Override the default request headers: 39 | #DEFAULT_REQUEST_HEADERS = { 40 | # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 41 | # 'Accept-Language': 'en', 42 | #} 43 | 44 | # Enable or disable spider middlewares 45 | # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html 46 | #SPIDER_MIDDLEWARES = { 47 | # 'tutorial.middlewares.MyCustomSpiderMiddleware': 543, 48 | #} 49 | 50 | # Enable or disable downloader middlewares 51 | # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html 52 | #DOWNLOADER_MIDDLEWARES = { 53 | # 'tutorial.middlewares.MyCustomDownloaderMiddleware': 543, 54 | #} 55 | 56 | # Enable or disable extensions 57 | # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html 58 | #EXTENSIONS = { 59 | # 'scrapy.telnet.TelnetConsole': None, 60 | #} 61 | 62 | # Configure item pipelines 63 | # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html 64 | #ITEM_PIPELINES = { 65 | # 'tutorial.pipelines.SomePipeline': 300, 66 | #} 67 | 68 | # Enable and configure the AutoThrottle extension (disabled by default) 69 | # See http://doc.scrapy.org/en/latest/topics/autothrottle.html 70 | # NOTE: AutoThrottle will honour the standard settings for concurrency and delay 71 | #AUTOTHROTTLE_ENABLED=True 72 | # The initial download delay 73 | #AUTOTHROTTLE_START_DELAY=5 74 | # The maximum download delay to be set in case of high latencies 75 | #AUTOTHROTTLE_MAX_DELAY=60 76 | # Enable showing throttling stats for every response received: 77 | #AUTOTHROTTLE_DEBUG=False 78 | 79 | # Enable and configure HTTP caching (disabled by default) 80 | # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings 81 | #HTTPCACHE_ENABLED=True 82 | #HTTPCACHE_EXPIRATION_SECS=0 83 | #HTTPCACHE_DIR='httpcache' 84 | #HTTPCACHE_IGNORE_HTTP_CODES=[] 85 | #HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage' 86 | 87 | ####CRAWL IN ORDER#### 88 | #DEPTH_PRIORITY = 1 89 | #SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue' 90 | #SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue' 91 | --------------------------------------------------------------------------------