├── README.md └── X-Fofa.py /README.md: -------------------------------------------------------------------------------- 1 | # X-Fofa 2 | 基于Fofa会员前提,调用API,超过1w条不会扣F币 3 | 4 | 5 | ![image](https://user-images.githubusercontent.com/55974091/174513563-3e0592e3-7abc-4262-8075-cf808e7fca33.png) 6 | 7 | 8 | ### 用法 9 | * Usage: python3 X-Fofa.py -q 'app = xxx' -o result.txt 10 | * Usage: python3 X-Fofa.py -q 'app = xxx' -all 1 -o result.txt 11 | 12 | image 13 | -------------------------------------------------------------------------------- /X-Fofa.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from array import array 3 | import base64,os,requests 4 | import argparse,sys 5 | import json,datetime 6 | 7 | email = '' 8 | key = '' 9 | 10 | full = 'false' #搜索结果是否包含一年前的,fofa的默认搜索结果为一年内数据 11 | 12 | def remove_duplicates(output): 13 | print("[+] 正在去重...") 14 | f_read=open('log.log','r',encoding='utf-8') #将需要去除重复值的txt文本重命名text.txt 15 | f_write=open(output,'w',encoding='utf-8') #去除重复值之后,生成新的txt文本 --“去除重复值后的文本.txt” 16 | data=set() 17 | for a in [a.strip('\n') for a in list(f_read)]: 18 | if a not in data: 19 | f_write.write(a+'\n') 20 | data.add(a) 21 | f_read.close() 22 | f_write.close() 23 | os.remove('log.log') 24 | print("[+] 已去重") 25 | 26 | def getFofa(page,size,search,output,all): 27 | 28 | global result 29 | with open('{}'.format('log.log'),'a+') as w: 30 | if '1' not in all: 31 | search64 = base64.b64encode(search.encode('utf-8')).decode('utf-8') 32 | url = "https://fofa.info/api/v1/search/all?email={}&key={}&qbase64={}&page={}&size={}&full={}".format(email,key,search64,page,size,full) 33 | print("[+] 关键字: {}".format(search)) 34 | print(url) 35 | response = requests.get(url) 36 | res = json.loads((response.content).decode('utf-8')) 37 | 38 | for i in range(len(res["results"])): 39 | url = res["results"][i][0] 40 | if 'http' not in url: 41 | url = 'http://{}'.format(url) 42 | w.write('{}\n'.format(url)) 43 | 44 | if '1' in all: 45 | size = 10000 46 | search64 = base64.b64encode(search.encode('utf-8')).decode('utf-8') 47 | url = "https://fofa.info/api/v1/search/all?email={}&key={}&qbase64={}&page={}&size={}&full={}".format(email,key,search64,page,size,full) 48 | response = requests.get(url) 49 | res = json.loads((response.content).decode('utf-8')) 50 | 51 | if res["size"] > 10000: 52 | if "after=" in search or 'before' in search: 53 | print("[+ 设置选项为all,经过查询后发现当前数据超过一万条,请不要在语法中定义时间日期关键词after/before") 54 | sys.exit() 55 | print("[+] 数据超过一万条,防止使用F币,调用日期语法分批获得近90天内的数据:") 56 | 57 | now_time = datetime.datetime.now() 58 | now_date = now_time.strftime('%Y-%m-%d') 59 | 60 | for i in range(1,92): 61 | 62 | before_time = now_time + datetime.timedelta(days=-i+2) 63 | before_time_nyr = before_time.strftime('%Y-%m-%d') 64 | 65 | searchTime = search + '&& before = "{}"'.format(before_time_nyr) 66 | search64 = base64.b64encode(searchTime.encode('utf-8')).decode('utf-8') 67 | url = "https://fofa.info/api/v1/search/all?email={}&key={}&qbase64={}&page={}&size={}&full={}".format(email,key,search64,page,size,full) 68 | print("[+] before: {}".format(before_time_nyr)) 69 | print("[+] 关键字: {}".format(searchTime)) 70 | print(url) 71 | response = requests.get(url) 72 | res = json.loads((response.content).decode('utf-8')) 73 | for i in range(len(res["results"])): 74 | url = res["results"][i][0] 75 | if 'http' not in url: 76 | url = 'http://{}'.format(url) 77 | w.write('{}\n'.format(url)) 78 | else: 79 | print("[+] 关键字: {}".format(search)) 80 | print(url) 81 | for i in range(len(res["results"])): 82 | url = res["results"][i][0] 83 | if 'http' not in url: 84 | url = 'http://{}'.format(url) 85 | w.write('{}\n'.format(url)) 86 | 87 | remove_duplicates(output) 88 | 89 | print("File save as {}/{}".format(os.getcwd(),output)) 90 | 91 | if __name__ == '__main__': 92 | 93 | parser =argparse.ArgumentParser(description="python3 fofa.py -q 'app = xxx' -p 3 -o 某系统.txt" ) 94 | parser.add_argument('-q', '--query', default='', help="xxx系统") 95 | parser.add_argument('-p', '--page', default='1', help="100") 96 | parser.add_argument('-s', '--size', default='10000', help="100") 97 | parser.add_argument('-all', '--all', default='False', help="100") 98 | parser.add_argument('-o', '--output', default='result.txt', help="xxx.txt") 99 | args = parser.parse_args() 100 | query = '' 101 | page = '' 102 | output = '' 103 | if args.query: 104 | query = args.query 105 | if args.page: 106 | page = args.page 107 | if args.size: 108 | size = args.size 109 | if args.all: 110 | all = args.all 111 | if args.output: 112 | output = args.output 113 | getFofa(page,size,query,output,all) 114 | --------------------------------------------------------------------------------