├── .DS_Store
├── .gitattributes
├── LICENSE
├── geckodriver.log
├── getMainDomain.py
├── getTargets.py
├── ghostdriver.log
├── nbscan
├── root_domain.py
├── scan_start.py
└── subdomain.py
├── ok.txt
└── target.txt
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mstxq17/eduSrcToolSet/d8123a5824ffb1d547e56f7188ab93f21d46884e/.DS_Store
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 mstxq17
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/geckodriver.log:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mstxq17/eduSrcToolSet/d8123a5824ffb1d547e56f7188ab93f21d46884e/geckodriver.log
--------------------------------------------------------------------------------
/getMainDomain.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 | # -*- coding:utf-8 -*-
3 | # 用于获取一级域名
4 |
5 | import tldextract
6 | from urllib.parse import urlparse
7 |
8 |
9 | def Test():
10 | url = 'm.windowscentral.com'
11 | # 一级域名
12 | domain = tldextract.extract(url).domain
13 | # 二级域名
14 | subdomain = tldextract.extract(url).subdomain
15 | # 后缀
16 | suffix = tldextract.extract(url).suffix
17 | print("获取到的一级域名:{}".format(domain))
18 | print("获取到二级域名:{}".format(subdomain))
19 | print("获取到的url后缀:{}".format(suffix))
20 |
21 | def main():
22 | # Test()
23 | main_domains = []
24 | filename = "target.txt"
25 | with open(filename, 'r') as f1, open("ok.txt", 'w') as f2:
26 | for url in f1:
27 | # u = urlparse(str(url.strip()))
28 | # print(url.strip())
29 | u = urlparse("http://" + url.strip() + '/').netloc
30 | # print(u)
31 | domain = tldextract.extract(u).domain + '.' + tldextract.extract(url).suffix
32 | f2.write(domain + '\n')
33 | print("Done!")
34 | if __name__ == '__main__':
35 | main()
--------------------------------------------------------------------------------
/getTargets.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 |
4 | import requests
5 | import json
6 | import os
7 | from urllib.parse import urlparse
8 | from selenium import webdriver
9 |
10 | host = "https://api.eol.cn:443/"
11 | query_url = "gkcx/api/?access_token=&admissions=¢ral=&department=&dual_class=&f211=&f985=&is_doublehigh=&is_dual_class=&keyword=&nature=&page={page}&province_id=&request_type=1&school_type=&signsafe=&size={size}&sort=view_total&type=&uri=apidata/api/gk/school/lists"
12 |
13 | command = """curl -i -s -k -X $'POST' \
14 | -H $'Host: api.eol.cn' -H $'Connection: close' -H $'Content-Length: 299' -H $'Accept: application/json, text/plain, */*' -H $'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36' -H $'Content-Type: application/json;charset=UTF-8' -H $'Origin: https://gkcx.eol.cn' -H $'Sec-Fetch-Site: same-site' -H $'Sec-Fetch-Mode: cors' -H $'Sec-Fetch-Dest: empty' -H $'Accept-Encoding: gzip, deflate' -H $'Accept-Language: zh-CN,zh;q=0.9,ja;q=0.8' \
15 | --data-binary $'{{\"access_token\":\"\",\"admissions\":\"\",\"central\":\"\",\"department\":\"\",\"dual_class\":\"\",\"f211\":\"\",\"f985\":\"\",\"is_doublehigh\":\"\",\"is_dual_class\":\"\",\"keyword\":\"\",\"nature\":\"\",\"page\":1,\"province_id\":43,\"request_type\":1,\"school_type\":\"\",\"size\":15,\"sort\":\"view_total\",\"type\":\"\",\"uri\":\"apidata/api/gk/school/lists\"}}' \
16 | $'https://api.eol.cn/gkcx/api/?access_token=&admissions=¢ral=&department=&dual_class=&f211=&f985=&is_doublehigh=&is_dual_class=&keyword=&nature=&page={page}&province_id=43&request_type=1&school_type=&signsafe=&size={size}&sort=view_total&type=&uri=apidata/api/gk/school/lists'"""
17 | school_dict = {}
18 | for p in range(0, 10):
19 | response_text = os.popen(command.format(page=p, size=15)).read().split("\n")
20 | body_text = response_text[-1]
21 | school_list = json.loads(body_text)['data']['item']
22 | for sl in school_list:
23 | school_name,school_id = sl['name'], sl['school_id']
24 | school_dict[school_id] = school_name
25 | print(len(school_dict))
26 | print(school_dict)
27 |
28 | # 获取主域名
29 | shool_url = []
30 | _host = "https://gkcx.eol.cn/school/"
31 | for key in school_dict.keys():
32 | shool_url.append(_host + str(key))
33 | print(shool_url)
34 | # url = "c2479"
35 | # browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver2.3')
36 | # browser.get(url)
37 | # # print(browser.page_source)
38 | # # soup = BeautifulSoup(browser.page_source, 'html.parser')
39 | # # print(soup.find)
40 | # elem = browser.find_element_by_xpath('//*[@id="root"]/div/div/div/div/div/div/div[2]/div/div/div[3]/div[2]/div[4]/div[1]/span[2]/a[1]')
41 | # print(elem.get_attribute("href"))
42 | # browser.close()
43 |
44 | # url = "http://nyzsjy.hynu.cn/"
45 | # netloc = urlparse(url).netloc
46 |
47 | browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver2.3')
48 | netlocs = []
49 | for url in shool_url:
50 | try:
51 | print("handling this ——"+str(url))
52 | browser.get(url)
53 | elem = browser.find_element_by_xpath('//*[@id="root"]/div/div/div/div/div/div/div[2]/div/div/div[3]/div[2]/div[4]/div[1]/span[2]/a[1]')
54 | netlocs.append(urlparse(elem.get_attribute("href")).netloc)
55 | except Exception as e:
56 | print(e)
57 | print(f"fail---{url}")
58 | browser.close()
59 | # 写入文件
60 | with open("target.txt", 'w') as f:
61 | for domain in netlocs:
62 | try:
63 | f.write(domain.strip() + '\n')
64 | except Exception as e:
65 | print(e)
66 | print(domain.strip())
67 | print("Done!")
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/ghostdriver.log:
--------------------------------------------------------------------------------
1 | [INFO - 2021-02-25T17:38:36.426Z] GhostDriver - Main - running on port 59602
2 | [INFO - 2021-02-25T17:38:36.568Z] Session [4a02de30-7790-11eb-a811-89d8f5dcba50] - page.settings - {"XSSAuditingEnabled":false,"javascriptCanCloseWindows":true,"javascriptCanOpenWindows":true,"javascriptEnabled":true,"loadImages":true,"localToRemoteUrlAccessEnabled":false,"userAgent":"Mozilla/5.0 (Macintosh; Intel Mac OS X) AppleWebKit/538.1 (KHTML, like Gecko) PhantomJS/2.1.1 Safari/538.1","webSecurityEnabled":true}
3 | [INFO - 2021-02-25T17:38:36.568Z] Session [4a02de30-7790-11eb-a811-89d8f5dcba50] - page.customHeaders: - {}
4 | [INFO - 2021-02-25T17:38:36.568Z] Session [4a02de30-7790-11eb-a811-89d8f5dcba50] - Session.negotiatedCapabilities - {"browserName":"phantomjs","version":"2.1.1","driverName":"ghostdriver","driverVersion":"1.2.0","platform":"mac-unknown-64bit","javascriptEnabled":true,"takesScreenshot":true,"handlesAlerts":false,"databaseEnabled":false,"locationContextEnabled":false,"applicationCacheEnabled":false,"browserConnectionEnabled":false,"cssSelectorsEnabled":true,"webStorageEnabled":false,"rotatable":false,"acceptSslCerts":false,"nativeEvents":true,"proxy":{"proxyType":"direct"}}
5 | [INFO - 2021-02-25T17:38:36.568Z] SessionManagerReqHand - _postNewSessionCommand - New Session Created: 4a02de30-7790-11eb-a811-89d8f5dcba50
6 | [INFO - 2021-02-25T17:43:36.426Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
7 | [INFO - 2021-02-25T17:48:36.429Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
8 | [INFO - 2021-02-25T18:02:53.627Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
9 | [INFO - 2021-02-25T18:54:25.665Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
10 | [INFO - 2021-02-25T19:55:16.440Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
11 | [INFO - 2021-02-25T20:46:48.498Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
12 | [INFO - 2021-02-25T21:38:21.359Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
13 | [INFO - 2021-02-25T22:39:11.972Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
14 | [INFO - 2021-02-25T23:30:44.667Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
15 | [INFO - 2021-02-26T00:16:00.294Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
16 | [INFO - 2021-02-26T00:21:00.295Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
17 | [INFO - 2021-02-26T00:26:00.299Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
18 | [INFO - 2021-02-26T00:31:00.300Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
19 | [INFO - 2021-02-26T00:52:59.171Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
20 | [INFO - 2021-02-26T00:57:59.172Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
21 | [INFO - 2021-02-26T01:02:59.180Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
22 | [INFO - 2021-02-26T01:07:59.185Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
23 | [INFO - 2021-02-26T01:12:59.191Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
24 | [INFO - 2021-02-26T01:17:59.195Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
25 | [INFO - 2021-02-26T01:22:59.201Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
26 | [INFO - 2021-02-26T01:27:59.209Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
27 | [INFO - 2021-02-26T01:32:59.216Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
28 | [INFO - 2021-02-26T01:37:59.221Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
29 | [INFO - 2021-02-26T01:42:59.225Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
30 | [INFO - 2021-02-26T01:47:59.226Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
31 | [INFO - 2021-02-26T01:52:59.232Z] SessionManagerReqHand - _cleanupWindowlessSessions - Asynchronous Sessions clean-up phase starting NOW
32 |
--------------------------------------------------------------------------------
/nbscan/root_domain.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding:utf-8 -*-
3 | # 通过接口去反查注册域名获取根资产
4 |
5 | import urllib2,requests
6 | import time
7 | from bs4 import BeautifulSoup
8 |
9 | class root(object):
10 | def __init__(self):
11 | self.set_aizhan = set()
12 | self.set_aizhan2 = set()
13 | self.set_aizhan3 = set()
14 | self.set_bugscaner = set()
15 |
16 | # 统一调用接口
17 | def get_root_url(self,url):
18 | self.aizhan(url)
19 | self.aizhan2(url)
20 | self.aizhan3(url)
21 | self.bugscaner(url)
22 | # 取并集
23 | total_rootdomain = self.set_aizhan | self.set_aizhan2 | self.set_aizhan3 | self.set_bugscaner
24 | # 返回总结果
25 | return total_rootdomain
26 |
27 |
28 | def aizhan(self,url):
29 | print '[+]Record number reverse checking'
30 | self.set_aizhan.add(url)
31 | url1 = 'https://icp.aizhan.com/%s/'%url
32 | try:
33 | r = urllib2.urlopen(url1).read()
34 | b = BeautifulSoup(r,'lxml')
35 | for i in b.find_all('span',class_='blue'):
36 | if '
' in str(i):
37 | a = str(i).replace('\t','').replace('
','\n').replace('\n','').replace('','').split()
38 | for i in a:
39 | self.set_aizhan.add(i.strip('www.'))
40 | else:
41 | try:
42 | self.set_aizhan.add(i.string.strip().strip('www.'))
43 | except:pass
44 | continue
45 | return self.set_aizhan
46 | except:print '[-]aizhan:error'
47 |
48 | def aizhan2(self,url):
49 | try:
50 | print '[+]Email reverse checking'
51 | url = 'https://www.aizhan.com/cha/%s/'%url
52 | r = urllib2.urlopen(url).read()
53 | b = BeautifulSoup(r,'lxml')
54 | for i in b.find_all('a', target='_blank'):
55 | if 'reverse-whois' in str(i) and 'emailCode' in str(i):
56 | urla = i['href']
57 | r = urllib2.urlopen(urla).read()
58 | b1 = BeautifulSoup(r,'lxml')
59 | for a in b1.find_all('a', rel='nofollow'):
60 | if 'www.miibeian.gov.cn' not in str(a):
61 | self.set_aizhan2.add(a.string)
62 | for x in b1.find_all('a'):
63 | if 'whois.aizhan.com/reverse-whois' in str(x):
64 | url1 = x['href']
65 | url = urllib2.urlopen(url1)
66 | time.sleep(1)
67 | b = BeautifulSoup(url.read(), 'lxml')
68 | for q in b.find_all('a', rel='nofollow'):
69 | if 'www.miibeian.gov.cn' not in str(q):
70 | self.set_aizhan2.add(q.string)
71 | except:print '[-]aizhan2:error'
72 | return self.set_aizhan2
73 |
74 | def aizhan3(self,url):
75 | print '[+]registrant reverse checking'
76 | try:
77 | url = 'https://www.aizhan.com/cha/%s/'%url
78 | r = urllib2.urlopen(url).read()
79 | b = BeautifulSoup(r,'lxml')
80 | for i in b.find_all('a',target='_blank'):
81 | if 'reverse-whois' in str(i) and 'registrant' in str(i):
82 | url1 = i['href']
83 | r = requests.get(url1).text
84 | b1 = BeautifulSoup(r,'lxml')
85 | for a in b1.find_all('a',rel='nofollow'):
86 | if 'www.miibeian.gov.cn' not in str(a):
87 | self.set_aizhan3.add(a.string)
88 | except:print '[-]aizhan3:error'
89 | return self.set_aizhan3
90 |
91 | def bugscaner(self,url):
92 | print '[+]bugscaner Email reverse checking'
93 | try:
94 | rurl = 'http://whois.bugscaner.com/'
95 | r = requests.get(url=rurl + url).content
96 | b = BeautifulSoup(r, 'lxml')
97 | for i in b.find_all('a', class_='btn btn-success'):
98 | if 'email' in i['href']:
99 | emailurl = rurl + i['href']
100 | r1 = requests.get(emailurl).content
101 | b1 = BeautifulSoup(r1, 'lxml')
102 | tbody = b1.find_all('tbody')
103 | for url in BeautifulSoup(str(tbody), 'lxml').find_all('a'):
104 | if '/register/' not in url['href'] and '/email/' not in url['href']:
105 | self.set_bugscaner.add(url.string)
106 | except:print '[-]bugscaner error'
107 | return self.set_bugscaner
--------------------------------------------------------------------------------
/nbscan/scan_start.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding:utf-8 -*-
3 |
4 | import threading
5 | import Queue
6 | from root_domain import root
7 | from subdomain import subdomain
8 |
9 | # 构建个队列
10 | queue_ = Queue.Queue()
11 | # 获取ip
12 | def get_ip(url):
13 | url1 = "http://www.ip138.com/ips138.asp?ip=%s&action=2" % url
14 | r = requests.get(url1,timeout=1).content
15 | b = BeautifulSoup(r, 'lxml')
16 | for i in b.find_all('font'):
17 | if url in str(i):
18 | return i.string.split('>>')[1].strip()
19 | # 多线程类
20 | class MyThread(threading.Thread):
21 | def __init__(self, func):
22 | super(MyThread, self).__init__()
23 | self.func = func
24 | def run(self):
25 | self.func()
26 |
27 | def worker():
28 | global queue_
29 | while not queue_.empty():
30 | task = queue.get()
31 | url = task.split('+')[0]
32 | try:
33 | title = BeautifulSoup(requests.get('http://' + url, timeout=1).content, 'lxml').title
34 | ip = get_ip(url.split(':')[0])
35 | if title == None:
36 | title = 'None'
37 | except:
38 | pass
39 |
40 | def thread_start(subdomain_total):
41 | global queue_
42 | thread_count = 100
43 | threads = []
44 | ports = [80, 8080, 8000, 8081, 7001, 8089]
45 | for domain in list(subdomain_total):
46 | for port in ports:
47 | # 存任务
48 | queue_.put(url + ':' + str(port))
49 | for i in range(thread_count):
50 | # 多线程实例
51 | thread = MyThread(worker)
52 | thread.start()
53 | threads.append(thread)
54 | for thread in threads:
55 | # 控制最后输出
56 | thread.join()
57 |
58 |
59 | def scan_start(url):
60 | # 获取根资产
61 | total_rootdomain = root().get_root_url(url)
62 | # 获取子域名,保存在subdomain_total
63 | subdomain_total = set()
64 | for item in list(total_rootdomain):
65 | # 字符串处理
66 | item = str(item)
67 | subdomain_total = subdomain_total | subdomain(item).get_subdomain()
68 | # 多线程验证存活状态 多线程效率比较高,采用队列控制
69 | thread_start(subdomain_total)
70 | # 在多线程中处理数据库存储操作
71 |
72 |
73 |
74 | if __name__ == '__main__':
75 | url = 'uc.cn'
76 | scan_start(url)
--------------------------------------------------------------------------------
/nbscan/subdomain.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding:utf-8 -*-
3 | # 遍历根资产的子域名
4 |
5 | import sys
6 | import requests
7 | import threading
8 | import time,json,re
9 | from bs4 import BeautifulSoup
10 |
11 | class subdomain(object):
12 | def __init__(self,url):
13 | self.url = url
14 | self.set_dns = set()
15 | self.set_ip138 = set()
16 | self.set_crt = set()
17 | self.set_baidu = set()
18 | self.set_find = set()
19 |
20 | # 统一调用接口
21 | def get_subdomain(self):
22 | # 调用5个线程
23 | threads = []
24 | threads.append(threading.Thread(target=self.dns))
25 | threads.append(threading.Thread(target=self.ip138))
26 | threads.append(threading.Thread(target=self.crt))
27 | threads.append(threading.Thread(target=self.baidu))
28 | threads.append(threading.Thread(target=self.find))
29 | for i in threads:
30 | i.start()
31 | for i in threads:
32 | i.join()
33 | subdomain_total = self.set_dns | self.set_ip138 | self.set_crt | self.set_baidu | self.set_find
34 | return subdomain_total
35 |
36 | def dns(self):
37 | url = 'https://www.virustotal.com/vtapi/v2/domain/report?apikey=0ad3c01b1ff7952bc8cbb4370ef4a0c53201d2daffe113efb1d2fef484e16e58&domain=' + self.url
38 | try:
39 | r = requests.get(url)
40 | time.sleep(10)
41 | r_dict = json.loads(r.text)
42 | for i in r_dict['subdomains']:
43 | set_dns.add(i)
44 | print '[!]subdomain:'+str(len(dns_set))
45 | return set_dns
46 | except:
47 | print '[-]subdomains:error'
48 | return
49 | #virustotal dns
50 |
51 | def ip138(self):
52 | url1 = 'http://site.ip138.com/%s/domain.htm'%self.url
53 | try :
54 | r = requests.get(url1)
55 | b = BeautifulSoup(r.content,'lxml')
56 | for i in b.find_all('a',href=re.compile('%s'%self.url),target='_blank',rel=''):
57 | self.set_ip138.add(i.string)
58 | print '[!]ip138:'+ str(len(ip138_set))
59 | return self.set_ip138
60 | except:
61 | print '[-]IP137 interface failed'
62 | return
63 |
64 | #ip137 interface
65 |
66 | def crt(self):
67 | url1 = 'https://crt.sh/?q=%25.' + self.url
68 | try:
69 | r = requests.get(url1).content
70 | b = BeautifulSoup(r,'lxml')
71 | for i in b.find_all('td',class_='',style=''):
72 | if '' not in str(i) and '*.' not in str(i):
73 | self.set_crt.add(i.string)
74 | print '[!]crt:' + str(len(crt_set))
75 | return self.set_crt
76 | except:
77 | print '[-]crt interface failed'
78 | return
79 |
80 | def baidu(self):
81 | url_r = 'http://ce.baidu.com/index/getRelatedSites?site_address=%s' % self.url
82 | try:
83 | r = requests.get(url_r).content
84 | jr = json.loads(r)
85 | urls = jr['data']
86 | for url in urls:
87 | url = url['domain']
88 | self.set_baidu.add(url)
89 | print '[!]baidu:%s' % str(len(baidu_set))
90 | return self.set_baidu
91 | except:
92 | print 'Baidu interface failed'
93 | return
94 | def find(self):
95 | url = 'https://findsubdomains.com/subdomains-of/%s'%self.url
96 | try:
97 | r = requests.get(url).content
98 | b = BeautifulSoup(r, 'lxml')
99 | for c in b.find_all(attrs={'class': 'js-domain-name domains', 'class': 'domains js-domain-name'}):
100 | self.set_find.add(c.string.strip())
101 | print '[!]find:' + str(len(find_set))
102 | return self.set_find
103 | except:
104 | print '[-]find interface failed'
105 | return
--------------------------------------------------------------------------------
/ok.txt:
--------------------------------------------------------------------------------
1 | csu.edu.cn
2 | hnu.edu.cn
3 | usc.edu.cn
4 | csust.edu.cn
5 | xtu.edu.cn
6 | hnust.cn
7 | hunnu.edu.cn
8 | hut.edu.cn
9 | gotonudt.cn
10 | csuft.edu.cn
11 | hnist.cn
12 | jysd.com
13 | hnie.edu.cn
14 | hutb.edu.cn
15 | hnucm.edu.cn
16 | hunau.edu.cn
17 | csmu.edu.cn
18 | xnu.edu.cn
19 | ccsu.cn
20 | hnsyu.net
21 | huse.cn
22 | huas.cn
23 | hnit.edu.cn
24 | hnfnu.edu.cn
25 | bibibi.net
26 | hynu.cn
27 | hncu.net
28 | hieu.edu.cn
29 | hnmu.com.cn
30 | hhtc.edu.cn
31 | hnwu.edu.cn
32 | university-hr.cn
33 | hnjtzy.com.cn
34 | xtu.edu.cn
35 | csust.edu.cn
36 | xzyesf.cn
37 | csmzxy.com
38 | csuft.edu.cn
39 | cssf.cn
40 | hnpolice.com
41 | hnrpc.com
42 | hnswxy.com
43 | hnhgzy.com
44 | 58.20.171.67.
45 | hnucc.com
46 | bibibi.net
47 | nfdx.net
48 | hnmmc.cn
49 | hunnu.edu.cn
50 | hntky.com
51 | hnust.cn
52 | hnkjxy.net.cn
53 | cavtc.cn
54 | usc.edu.cn
55 | good-edu.cn
56 | cszyedu.cn
57 | hniit.edu.cn
58 | hunau.edu.cn
59 | hnjt.edu.cn
60 | zjjxy-zhxy.net
61 | hnist.cn
62 | hnucm.edu.cn
63 | hunangy.com
64 | huas.cn
65 | hntcmc.net
66 | hynu.cn
67 | hniu.cn
68 | hnsfjy.cn
69 | hnebp.edu.cn
70 | hnieyy.cn
71 | cdzy.cn
72 | hnyzzy.com
73 | hnyyyz.com
74 | hncpu.com
75 | cshbxy.com
76 | hnwmxy.com
77 | hycgy.com
78 | hnvc.net.cn
79 | university-hr.cn
80 | hnyyjsxy.com
81 | zjjhy.net
82 | hnyesf.com
83 | hnjdzy.net
84 | hnflc.cn
85 | cswszy.com
86 | cseptc.net
87 | arthn.com
88 | hnmeida.com.cn
89 | university-hr.cn
90 | hnlrzy.net
91 | hnlgzy.net
92 | yvtc.edu.cn
93 | hntyxy.net
94 | bibibi.net
95 | bxxy.com
96 | hngfxy.com
97 | hnslsdxy.com
98 | zzptc.com
99 | hnupc.com
100 | hnvist.cn
101 | hnydxy.com
102 | hnsoftedu.com
103 | hnyszy.com.cn
104 | hhvtc.com.cn
105 | hnshzy.cn
106 | dzkjxy.cn
107 | czzy-edu.com
108 | hnevc.com
109 | hnyzy.cn
110 | good-edu.cn
111 | hngsxy.com
112 | syzyedu.com
113 | jysd.com
114 | hnjd.net.cn
115 | sanyedu.com
116 | hnxxc.com
117 | 2823333.com
118 | xxmzy.org.cn
119 | hnxxjsxy.com
120 | hhsfgz.com
121 | bysjy.com.cn
122 | csysgz.com
123 | hyyesf.com
124 | hnyznc.com
125 | hngeelyedu.cn
126 | csttc.cn
127 |
--------------------------------------------------------------------------------
/target.txt:
--------------------------------------------------------------------------------
1 | zhaosheng.csu.edu.cn
2 | admi.hnu.edu.cn
3 | zsw.usc.edu.cn
4 | zs.csust.edu.cn
5 | zhaosheng.xtu.edu.cn
6 | zs.hnust.cn
7 | zsb.hunnu.edu.cn
8 | zsb.hut.edu.cn
9 | www.gotonudt.cn
10 | zs.csuft.edu.cn
11 | zjc.hnist.cn
12 | jsuzs.jysd.com
13 | www.hnie.edu.cn
14 | admi.hutb.edu.cn
15 | zhaosheng.hnucm.edu.cn
16 | zs.hunau.edu.cn
17 | www.csmu.edu.cn
18 | zs.xnu.edu.cn
19 | zsjy.ccsu.cn
20 | zsjy.hnsyu.net
21 | zs.huse.cn
22 | zj.huas.cn
23 | zs.hnit.edu.cn
24 | zhaosheng.hnfnu.edu.cn
25 | hncyzs.bibibi.net
26 | zs.hynu.cn
27 | zj.hncu.net
28 | zs.hieu.edu.cn
29 | zs.hnmu.com.cn
30 | zsb.hhtc.edu.cn
31 | zsc.hnwu.edu.cn
32 | hnrkuzs.university-hr.cn
33 | zsw.hnjtzy.com.cn
34 | xxxy.xtu.edu.cn
35 | www.csust.edu.cn
36 | www.xzyesf.cn
37 | zs.csmzxy.com
38 | swxy.csuft.edu.cn
39 | zsxx.cssf.cn
40 | www.hnpolice.com
41 | zs.hnrpc.com
42 | www.hnswxy.com
43 | www.hnhgzy.com
44 | 58.20.171.67:8164
45 | www.hnucc.com
46 | hngczyzs.bibibi.net
47 | www.nfdx.net
48 | www.hnmmc.cn:85
49 | sdw.hunnu.edu.cn
50 | zsw.hntky.com
51 | xxzs.hnust.cn
52 | zsb.hnkjxy.net.cn
53 | zs.cavtc.cn
54 | csxy.usc.edu.cn
55 | hnutdzs.good-edu.cn
56 | csjyc.cszyedu.cn
57 | zsb.hniit.edu.cn
58 | dfyzs.hunau.edu.cn
59 | zs.hnjt.edu.cn
60 | zs.zjjxy-zhxy.net
61 | zjc.hnist.cn
62 | xxxy.hnucm.edu.cn
63 | www.hunangy.com
64 | furzj.huas.cn
65 | zsxx.hntcmc.net
66 | nyzsjy.hynu.cn
67 | zs.hniu.cn
68 | zs.hnsfjy.cn
69 | zsw.hnebp.edu.cn
70 | www.hnieyy.cn
71 | www.cdzy.cn
72 | hnyzzy.com
73 | zsjy.hnyyyz.com
74 | zjc.hncpu.com
75 | zsxx.cshbxy.com
76 | www.hnwmxy.com
77 | zs.hycgy.com
78 | zsw.hnvc.net.cn
79 | hnbemczs.university-hr.cn
80 | zs.hnyyjsxy.com
81 | zs.zjjhy.net
82 | zsjyc.hnyesf.com
83 | zs.hnjdzy.net
84 | www.hnflc.cn
85 | zsw.cswszy.com
86 | zsxx.cseptc.net
87 | www.arthn.com
88 | zsjy.hnmeida.com.cn
89 | 56eduzs.university-hr.cn
90 | www.hnlrzy.net
91 | www.hnlgzy.net
92 | zsjy.yvtc.edu.cn
93 | www.hntyxy.net
94 | xtywzyzs.bibibi.net
95 | www.bxxy.com
96 | www.hngfxy.com
97 | www.hnslsdxy.com
98 | zs.zzptc.com
99 | www.hnupc.com
100 | zs.hnvist.cn
101 | www.hnydxy.com
102 | www.hnsoftedu.com:85
103 | zsxxw.hnyszy.com.cn
104 | zs.hhvtc.com.cn
105 | www.hnshzy.cn
106 | cn.dzkjxy.cn
107 | zs.czzy-edu.com
108 | www2.hnevc.com
109 | www.hnyzy.cn
110 | ldzy.good-edu.cn
111 | www.hngsxy.com
112 | www.syzyedu.com
113 | yyvtczs.jysd.com
114 | zsxx.hnjd.net.cn
115 | www.sanyedu.com
116 | www.hnxxc.com
117 | www.2823333.com
118 | zsc.xxmzy.org.cn
119 | www.hnxxjsxy.com
120 | hhsfgz.com
121 | xnyesf.bysjy.com.cn
122 | www.csysgz.com
123 | www.hyyesf.com
124 | zsjyzdc.hnyznc.com
125 | www.hngeelyedu.cn
126 | www.csttc.cn
127 |
--------------------------------------------------------------------------------