├── email
├── requirements.txt
└── README.md
├── infect.sh
├── doc
└── README.md
├── web
├── httpscan
│ ├── requirements.txt
│ ├── signatures
│ │ ├── example.sig
│ │ └── gophish.sig
│ └── README.md
├── fun_files.txt
├── plot_response_time.sh
├── headers.py
├── check_fastest_cache_version.py
├── README.md
├── cmsdetect.py
├── is_wp.py
├── http_test_methods.py
├── webscan.py
├── waf
│ └── waf_test_keywords.py
├── http_test.py
├── watch_response_time.py
├── check_fun_files.py
├── proxychecker.py
└── phishingkits.txt
├── visualization
├── README.md
├── heatmap.py
└── timeline_bar.py
├── ghidra_scripts
├── README.md
└── yaracrypto.py
├── shorturls
└── README.md
├── censys
├── censyslib.py
├── censysdiff.py
├── censysipentries.py
├── censyssearch.py
└── censysip.py
├── pt
├── README.md
├── get_quota.py
├── get_project_iocs.py
├── get_subdomains.py
├── get_ip_domains.py
├── get_hashes.py
├── get_osint.py
└── domains_on_iplist.py
├── macos
├── macho_print_lief.py
├── README.md
├── extract_kext_kk.py
├── check_kext_kk.py
├── macho_rename_section.py
├── symhash.py
└── macho_print.py
├── shodan
├── README.md
├── extractips.py
├── shodan_cobaltstrike.py
├── rawshodan.py
├── shodan_ssh_history.py
└── shodanhistory.py
├── format
├── iprange.py
├── ipconvert.py
├── punycode.py
├── README.md
├── countbytes.py
├── extract_ttld.py
├── unxor.py
├── csv2md.py
├── csvcut.py
├── parsezip.py
├── fixtar.py
└── parsetar.py
├── network
├── cidr_range.py
├── README.md
├── test_website.py
├── check_umbrella.py
├── check_ripe_last_route.py
├── cidr_reduce.py
├── list_mullvad_ips.py
├── dns_resolve.py
├── checkpoint_banner.py
└── dns_resolve_mx.py
├── osint
├── README.md
├── protonkey.py
├── truecaller.py
└── google_doc_info.py
├── ooni
├── README.md
├── download_measurements.py
└── get_ooni_telegram.py
├── resources
└── README.md
├── iocs
└── extract_hashes.py
├── elf
├── miasm_sandbox.py
└── unxor.py
├── twilio
└── read_sms.py
├── threats
├── README.md
├── quad9.py
├── pithus_send.py
├── cobaltstrike_decode.py
└── urlscan.py
├── pe
├── count_zero.py
├── unxor.py
├── build_shellcode_pe_elfesteem.py
├── update_ep.py
├── build_shellcode_pe.py
├── get_imphash.py
├── extract_sig.py
├── pesearch.py
├── extract_sections_python.py
├── common_strings.py
├── README.md
├── print_signature.py
├── checkpesize.py
├── petimeline.py
└── get_richheaderhash.py
├── unxor.py
├── csv_extract.py
├── misp
├── misplib.py
├── misp2sig.py
├── xsearch_misp.py
├── yaraxcheck.py
├── README.md
└── mispcopy.py
├── forensic
├── README.md
├── filetimeline.py
├── extract_chrome_history.py
└── ios_unpack.py
├── mqtt-get.py
├── disassemble.py
├── .gitignore
├── android
├── dexofuzzy2gephi.py
├── yaradex.py
├── README.md
├── extract_rsrc_strings.py
├── koodous_search.py
├── get_dex.py
├── is_obfuscated.py
├── extract_firebase.py
├── get_package_name.py
├── print_frosting.py
├── get_method_code.py
├── get_certificate.py
├── koodous_tag.py
└── download_androguard_report.py
├── certs
├── get_crtsh_subdomains.py
└── listcerts.py
├── hostnametoips.py
├── goo.gl
├── README.md
└── api.py
├── harpoon-extra
└── domain_location.py
├── miasm
├── simu_sc_linux64.py
└── simu_sc_linux.py
├── twitter
└── graph-followers.py
├── cloudfare_certs.py
└── cloudcidrs.py
/email/requirements.txt:
--------------------------------------------------------------------------------
1 | eml_parser[filemagic]
2 |
--------------------------------------------------------------------------------
/infect.sh:
--------------------------------------------------------------------------------
1 | name=`md5sum $1 | cut -d ' ' -f 1`
2 | 7z a $name.zip $1 -pinfected
3 |
--------------------------------------------------------------------------------
/doc/README.md:
--------------------------------------------------------------------------------
1 | # doc
2 |
3 | * `check_docx.py` : check for weird stuff in docx files
4 |
--------------------------------------------------------------------------------
/web/httpscan/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | PyYAML
3 | cryptography
4 | beautifoulsoup4
5 | lxml
6 |
--------------------------------------------------------------------------------
/visualization/README.md:
--------------------------------------------------------------------------------
1 | # Visualizations
2 |
3 | * `ip_map.py` : Create a world map of origin of IP addresses
4 |
--------------------------------------------------------------------------------
/email/README.md:
--------------------------------------------------------------------------------
1 | # Email stuff
2 |
3 | * `parseeml_old.py` : extract parts of an email (not parsing headers yet)
4 | * `parseeml.py` : extract parts of an email
5 |
--------------------------------------------------------------------------------
/web/httpscan/signatures/example.sig:
--------------------------------------------------------------------------------
1 | ---
2 | examplesig:
3 | tests:
4 | - name: index
5 | path: index.html
6 | code: 200
7 | content: "
"
8 | condition: all
9 |
--------------------------------------------------------------------------------
/ghidra_scripts/README.md:
--------------------------------------------------------------------------------
1 | # Ghidra scripts
2 |
3 | * `yaracrypto.py` : identifies cryptographic constants using yara, version taken from [ghidraninja](https://github.com/ghidraninja/ghidra_scripts) and fixed. Requires yara in the path and `yara-crypto.yar` in the same folder
4 |
--------------------------------------------------------------------------------
/shorturls/README.md:
--------------------------------------------------------------------------------
1 | # Short Urls
2 |
3 | * `bitly.py` : Tool to request the Bitly API (see documentation [here](https://dev.bitly.com/api.html)). Works with python 2 and python 3. (API is deprecated)
4 | * `urlteamdl.py` : download archives from [URL Team](https://archive.org/details/UrlteamWebCrawls?&sort=-publicdate&page=3)
5 |
6 |
--------------------------------------------------------------------------------
/censys/censyslib.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import ConfigParser
3 | import os
4 |
5 | def get_apikey():
6 | """Parse configuration file, returns a list of servers"""
7 | config = ConfigParser.ConfigParser()
8 | config.read(os.path.join(os.path.expanduser("~"), ".censys"))
9 | return (config.get('Censys', 'id'), config.get('Censys', 'secret'))
10 |
--------------------------------------------------------------------------------
/pt/README.md:
--------------------------------------------------------------------------------
1 | # Passive Total scripts
2 |
3 | * `domains_on_iplist.py` : Get all domains for a list of IPs and confirm that they still resolve on these IPs
4 | * `get_ip_domains.py` : extract all the domains for an IP address
5 | * `get_subdomains.py` : list subdomains of a domain
6 | * `get_quota.py` : get PT quota
7 | * `get_hashes.py` : get hashes for a list of domains
8 |
--------------------------------------------------------------------------------
/macos/macho_print_lief.py:
--------------------------------------------------------------------------------
1 | import lief
2 | import argparse
3 | import hashlib
4 |
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Print Mach-O information')
8 | parser.add_argument('MACHO', help='Mach-o file')
9 | args = parser.parse_args()
10 |
11 |
12 | binary = lief.parse(args.MACHO)
13 | print(binary)
14 |
--------------------------------------------------------------------------------
/shodan/README.md:
--------------------------------------------------------------------------------
1 | # Shodan scripts
2 |
3 | https://www.shodan.io/
4 |
5 | * `rawshodan.py` : Show summary or full raw shodan data
6 | * `shodanhistory.py` : Show shodan historical data for ports 22, 80 and 443.
7 | * `shodan_ssh_history.py` : build a history of the IP based on ssh fingerprint
8 | * `shodan_cobaltstrike.py` : download CobaltStrike servers using JARM fingerprint
9 |
--------------------------------------------------------------------------------
/format/iprange.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import ipaddress
3 |
4 |
5 | if __name__ == "__main__":
6 | parser = argparse.ArgumentParser(description='Convert IP range to IPs')
7 | parser.add_argument('IPRANGE', help='IP range like 192.168.1.0/24')
8 | args = parser.parse_args()
9 |
10 | for ip in ipaddress.ip_network(args.IPRANGE, False).hosts():
11 | print(ip)
12 |
--------------------------------------------------------------------------------
/macos/README.md:
--------------------------------------------------------------------------------
1 | # Mac OS stuff
2 |
3 | * `check_kext_kk.py` : analyse kext and KnockKnock files
4 | * `extract_kext_kk.py`: extract SHA1 from kext and KnockKnock files
5 | * `macho_print_lief.py` : print raw data from LIEF
6 | * `macho_print.py` : print information on a Mach-O file
7 | * `macho_rename_section.py` : rename a Macho section
8 | * `symhash.py` : compute symhash of a Mach-O file
9 |
--------------------------------------------------------------------------------
/web/fun_files.txt:
--------------------------------------------------------------------------------
1 | humans.txt
2 | robots.txt
3 | sitemap.xml
4 | clientaccesspolicy.xml
5 | admin
6 | .DS_Store
7 | .git
8 | .svn
9 | .htaccess
10 | .htaccess~
11 | .htpasswd
12 | htpasswd
13 | .htpasswd~
14 | htaccess
15 | .gitignore
16 | log
17 | logs
18 | log.txt
19 | logs.txt
20 | index.html
21 | index.html~
22 | index.php
23 | index.php~
24 | .well-known
25 | .sftpConfig.json
26 |
--------------------------------------------------------------------------------
/network/cidr_range.py:
--------------------------------------------------------------------------------
1 | import ipaddress
2 | import argparse
3 |
4 | if __name__ == "__main__":
5 | parser = argparse.ArgumentParser(description="Show first and last IP address in an IP range")
6 | parser.add_argument("IPRANGE", help="IP range")
7 | args = parser.parse_args()
8 |
9 | l = list(ipaddress.ip_network(args.IPRANGE, False))
10 | print(l[0])
11 | print(l[-1])
12 |
--------------------------------------------------------------------------------
/osint/README.md:
--------------------------------------------------------------------------------
1 | # OSINT scripts
2 |
3 | * `google_doc_info.py`: Get information about a Google document
4 | * `protonkey.py`: Get information on the protonmail key and creation date
5 | * `waybackimages.py` : Download images from the wayback machine and extract exif information. Script written by Justin Seitz for [AutomatingOSINT.com](http://www.automatingosint.com/blog/2016/12/vacuuming-image-metadata-from-the-wayback-machine/)
6 |
--------------------------------------------------------------------------------
/web/httpscan/signatures/gophish.sig:
--------------------------------------------------------------------------------
1 | ---
2 | gophish:
3 | tests:
4 | - name: homepage
5 | path: /
6 | content: "404 page not found"
7 | code: 404
8 | - name: static
9 | path: /static/
10 | code: 200
11 | - name: gitignore
12 | path: /static/.gitignore
13 | code: 200
14 | content: "!.gitignore"
15 | condition: all
16 |
--------------------------------------------------------------------------------
/ooni/README.md:
--------------------------------------------------------------------------------
1 | # OONI scripts
2 |
3 | * `download_measurements.py`: download raw measurements from OONI (maybe buggy, [oonidata](https://github.com/ooni/data) is an alternative that is likely more reliable)
4 | * `get_ooni_signal_status.py`: provides data on recent Signal measurements
5 | * `get_ooni_telegram.py`: provide data on recent Telegram measurements
6 | * `get_ooni_website_status.py`: provide data on recent website measurements
7 |
--------------------------------------------------------------------------------
/format/ipconvert.py:
--------------------------------------------------------------------------------
1 | import ipaddress
2 | import argparse
3 |
4 | if __name__ == '__main__':
5 | parser = argparse.ArgumentParser(description='Process an IP')
6 | parser.add_argument('IP')
7 | args = parser.parse_args()
8 |
9 | if "." in args.IP:
10 | ip = ipaddress.IPv4Address(args.IP)
11 | print(int(ip))
12 | else:
13 | ip = ipaddress.IPv4Address(int(args.IP))
14 | print(str(ip))
15 |
--------------------------------------------------------------------------------
/censys/censysdiff.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import argparse
5 | import json
6 | import requests
7 |
8 |
9 | if __name__ == '__main__':
10 | parser = argparse.ArgumentParser(description='Diff two IPs in Censys')
11 | parser.add_argument('IP1', help='IP1')
12 | parser.add_argument('IP2', help='IP2')
13 | args = parser.parse_args()
14 |
15 | key = get_apikey()
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/format/punycode.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 |
4 |
5 | if __name__ == "__main__":
6 | parser = argparse.ArgumentParser(description='Convert puny code domains')
7 | parser.add_argument('DOMAIN', help='DOMAIN to be converted')
8 | args = parser.parse_args()
9 |
10 | if args.DOMAIN.startswith("xn--"):
11 | print(args.DOMAIN.decode('idna'))
12 | else:
13 | print(args.DOMAIN.decode('utf-8').encode('idna'))
14 |
--------------------------------------------------------------------------------
/resources/README.md:
--------------------------------------------------------------------------------
1 | # Resources
2 |
3 | Interesting resources for analysts :
4 |
5 | * drivers.csv : List of legitimate Windows drivers taken from [NSA ShadowBrokers files](https://www.randhome.io/blog/2017/04/14/shadowbrokers-easter-gift/)
6 | * processes.csv : List of process names for a lot of programs from [NSA ShadowBrokers files](https://www.randhome.io/blog/2017/04/14/shadowbrokers-easter-gift/) (including hash of NSA tools renamed from "SAFE" to "NSA_TOOLS")
7 |
--------------------------------------------------------------------------------
/format/README.md:
--------------------------------------------------------------------------------
1 | # Format
2 |
3 | * `extract_ttld.py` : extract (badly /!\) TLDs from a list of domains
4 | * `countbytes.py` : count number of bytes per value in a file
5 | * `csv2md.py` : Convert csv to markdown files
6 | * `csvcut.py` : Get a column from a CSV file
7 | * `fixtar.py` : tries to repair a broken TAR archive
8 | * `iprange.py` : list IPs in an IP range
9 | * `parsetar.py` : parse a TAR file
10 | * `punycode.py` : encode or decode punycode domains
11 |
--------------------------------------------------------------------------------
/format/countbytes.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | if __name__ == '__main__':
4 | parser = argparse.ArgumentParser(description='Count number of bytes per values in a file')
5 | parser.add_argument('FILE', help='A file, any file')
6 | args = parser.parse_args()
7 |
8 | values = [0]*256
9 |
10 | with open(args.FILE, 'rb') as f:
11 | data = f.read()
12 | for d in data:
13 | values[d] += 1
14 |
15 | for i, d in enumerate(values, start=0):
16 | print("0x{:02x} - {}".format(i, d))
17 |
--------------------------------------------------------------------------------
/censys/censysipentries.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding: utf-8 -*-
3 | import censys.query
4 | import argparse
5 | import os
6 | import json
7 | import time
8 | from censyslib import *
9 |
10 | def get_ipv4records(censys):
11 | series = cc.get_series_details("ipv4")
12 | return sorted(series["tables"])[::-1]
13 |
14 |
15 | if __name__ == '__main__':
16 | key = get_apikey()
17 |
18 | cc = censys.query.CensysQuery(api_id=key[0], api_secret=key[1])
19 |
20 | series = get_ipv4records(cc)
21 | for ip in series:
22 | print(ip)
23 |
24 |
--------------------------------------------------------------------------------
/web/plot_response_time.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | if [ $# -eq 0 ]
3 | then
4 | echo "No file given"
5 | exit 0
6 | fi
7 |
8 | if [ ! -f $1 ]
9 | then
10 | echo "Bad file"
11 | exit 1
12 | fi
13 |
14 | file=$1
15 | name=${file%.*}
16 |
17 | cat << __EOF | gnuplot
18 | set xdata time
19 | set key box
20 | set xlabel 'Time'
21 | set ylabel 'Response Time' font 'Arial,12'
22 | set autoscale
23 | set timefmt "%Y-%m-%d %H:%M:%S"
24 | set term png
25 | set offsets 0, 0, 1, 0
26 | set output 'plot1.png'
27 | plot '$file' using 1:4 title '$name' with linespoints
28 | __EOF
29 |
30 |
--------------------------------------------------------------------------------
/iocs/extract_hashes.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import argparse
4 |
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='Process some integers.')
8 | parser.add_argument('FILE', help="File to search for hashes")
9 | args = parser.parse_args()
10 |
11 | with open(args.FILE) as f:
12 | data = f.read().split("\n")
13 |
14 | hashes = set()
15 |
16 | for d in data:
17 | r = re.search("[0-9a-fA-F]{64}", d)
18 | if r:
19 | hashes.add(r.group())
20 |
21 | for h in hashes:
22 | print(h)
23 |
--------------------------------------------------------------------------------
/elf/miasm_sandbox.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from pdb import pm
3 | from miasm.analysis.sandbox import Sandbox_Linux_x86_64
4 | from miasm.jitter.jitload import log_func
5 |
6 | # Insert here user defined methods
7 |
8 | # Parse arguments
9 | parser = Sandbox_Linux_x86_64.parser(description="ELF sandboxer")
10 | parser.add_argument("filename", help="ELF Filename")
11 | options = parser.parse_args()
12 |
13 | # Create sandbox
14 | sb = Sandbox_Linux_x86_64(options.filename, options, globals())
15 |
16 | log_func.setLevel(logging.ERROR)
17 |
18 | # Run
19 | sb.run()
20 |
21 | assert(sb.jitter.run is False)
22 |
--------------------------------------------------------------------------------
/twilio/read_sms.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from twilio.rest import Client
3 |
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser()
7 | parser.add_argument("ACCOUNT_SID")
8 | parser.add_argument("AUTH_TOKEN")
9 | args = parser.parse_args()
10 |
11 | client = Client(args.ACCOUNT_SID, args.AUTH_TOKEN)
12 |
13 | messages = client.messages.list(limit=20)
14 |
15 | print("{} messages retrieved".format(len(messages)))
16 | for record in messages:
17 | print("{} -> {} : {}".format(
18 | record.from_,
19 | record.to,
20 | record.body
21 | ))
22 |
--------------------------------------------------------------------------------
/pt/get_quota.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import os
4 | import sys
5 | import json
6 |
7 | if __name__ == '__main__':
8 | conf_file = os.path.join(os.path.expanduser("~"), ".config/passivetotal/api_config.json")
9 | if os.path.isfile(conf_file):
10 | with open(conf_file, 'r') as f:
11 | conf = json.loads(f.read())
12 | else:
13 | print('No config file')
14 | sys.exit(1)
15 |
16 | auth = (conf['username'], conf['api_key'])
17 | r = requests.get('https://api.passivetotal.org/v2/account/quota', auth=auth)
18 | print(json.dumps(r.json(), indent=4, sort_keys=True))
19 |
--------------------------------------------------------------------------------
/threats/README.md:
--------------------------------------------------------------------------------
1 | # Threat stuff
2 |
3 | * `quad9.py` : Check if a domain is blocked by [Quad9](https://quad9.net/) (you can test with `isitblocked.org`)
4 | * `urlscan.py` : Query [urlscan.io](https://urlscan.io/)
5 | * `cobaltstrike_config.py` : extract the configuration of a Cobalt Strike payload (from [Amnesty repo](https://github.com/AmnestyTech/investigations/tree/master/2020-09-25_finfisher))
6 | * `cobaltstrike_decode.py` : decode an obfuscated Cobalt Strike payload (from [Amnesty repo](https://github.com/AmnestyTech/investigations/tree/master/2020-09-25_finfisher))
7 | * `pithus_send.py`: send an APK to [Pithus](https://beta.pithus.org/)
8 |
--------------------------------------------------------------------------------
/format/extract_ttld.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 |
4 | # Extract TTLD from a list of domains
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Extract TTLDs from a list of domains')
8 | parser.add_argument('FILE', help='an integer for the accumulator')
9 | args = parser.parse_args()
10 |
11 | with open(args.FILE, 'r') as f:
12 | data = f.read().split("\n")
13 |
14 | ttlds = set()
15 |
16 | for d in data:
17 | if d.strip() != "":
18 | ttlds.add(".".join(d.strip().split(".")[-2:]))
19 |
20 | for ttld in ttlds:
21 | print(ttld)
22 |
--------------------------------------------------------------------------------
/pe/count_zero.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import lief
3 |
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(description='Process some integers.')
7 | parser.add_argument('PE', help='an integer for the accumulator')
8 | args = parser.parse_args()
9 |
10 | binary = lief.parse(args.PE)
11 |
12 | i = 0
13 | for s in binary.sections:
14 | c = s.content.count(0)
15 | print("{} - {} - {} zeros (total {} - {:.2f}%)".format(
16 | i,
17 | s.name,
18 | c,
19 | s.size,
20 | (c/s.size)*100
21 | )
22 | )
23 | i += 1
24 |
--------------------------------------------------------------------------------
/unxor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | if __name__ == '__main__':
5 | parser = argparse.ArgumentParser(description='Xor a file')
6 | parser.add_argument('FILE', help='A file')
7 | parser.add_argument('VALUE', help='Xor value')
8 | args = parser.parse_args()
9 |
10 | with open(args.FILE, "rb") as f:
11 | data = f.read()
12 |
13 | if args.VALUE.startswith("0x"):
14 | value = int(args.VALUE, 16)
15 | else:
16 | value = int(args.VALUE)
17 |
18 | res = bytearray()
19 | for d in data:
20 | res.append(d ^ value)
21 |
22 |
23 | with open("a.out", "wb+") as f:
24 | f.write(res)
25 |
--------------------------------------------------------------------------------
/macos/extract_kext_kk.py:
--------------------------------------------------------------------------------
1 | import json
2 | import argparse
3 |
4 | if __name__ == "__main__":
5 | parser = argparse.ArgumentParser(description='Extract SHA1 kext and KnockKnock files')
6 | parser.add_argument('JSONFILE', help='JSON File saved by kext or knock knock')
7 | args = parser.parse_args()
8 |
9 | with open(args.JSONFILE) as f:
10 | data = json.loads(f.read())
11 |
12 | hashes = set()
13 | for k in data.keys():
14 | for l in data[k]:
15 | if "hashes" in l.keys():
16 | if 'sha1' in l['hashes']:
17 | hashes.add(l['hashes']['sha1'])
18 |
19 | for l in hashes:
20 | print(l)
21 |
--------------------------------------------------------------------------------
/format/unxor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import base64
3 |
4 |
5 | if __name__ == "__main__":
6 | parser = argparse.ArgumentParser(description='XOR a string with all 256 possibilities')
7 | parser.add_argument('STRING', help="string to xor")
8 | parser.add_argument('--base64', '-b', action="store_true", help="Base64 decode the string first")
9 | args = parser.parse_args()
10 |
11 | if args.base64:
12 | entry = base64.b64decode(args.STRING)
13 | else:
14 | entry = args.STRING.encode("utf-8")
15 |
16 | for i in range(256):
17 | bb = bytearray()
18 | aa = entry
19 | for aaa in aa:
20 | bb.append(aaa ^ i)
21 | print(bb.decode('utf-8', errors="replace"))
22 |
--------------------------------------------------------------------------------
/csv_extract.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import argparse
3 |
4 | if __name__ == "__main__":
5 | parser = argparse.ArgumentParser(description='Process some integers.')
6 | parser.add_argument('COLUMN', type=int, default=0,
7 | help='Column of the file')
8 | parser.add_argument('FILE', help='CSV file')
9 | parser.add_argument('--delimiter', '-d', default=',', help='Delimiter')
10 | parser.add_argument('--quotechar', '-q', default='"', help='Quote char')
11 | args = parser.parse_args()
12 |
13 | with open(args.FILE) as csvfile:
14 | reader = csv.reader(csvfile, delimiter=args.delimiter, quotechar=args.quotechar)
15 |
16 | for row in reader:
17 | print(row[args.COLUMN])
18 |
19 |
20 |
--------------------------------------------------------------------------------
/shodan/extractips.py:
--------------------------------------------------------------------------------
1 | import json
2 | import argparse
3 |
4 | if __name__ == "__main__":
5 | parser = argparse.ArgumentParser(description='Extract IP addresses from Shodan results')
6 | parser.add_argument('JSONFILE', help='JSON File')
7 | args = parser.parse_args()
8 |
9 | line = "{}"
10 | with open(args.JSONFILE, 'r') as f:
11 | while line != "":
12 | data = json.loads(line)
13 | if "ip" in data:
14 | ip = data["ip"]
15 | print("{}.{}.{}.{}".format(
16 | (ip >> 24) & 0xff,
17 | (ip >> 16) & 0xff,
18 | (ip >> 8) & 0xff,
19 | ip & 0xff
20 | ))
21 | line = f.readline()
22 |
--------------------------------------------------------------------------------
/pe/unxor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | PEHEADER = b'MZ\x90\x00\x03\x00\x00\x00'
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Unxor a PE file')
8 | parser.add_argument('FILE', help='Xor encoded PE file')
9 | args = parser.parse_args()
10 |
11 | with open(args.FILE, 'rb') as f:
12 | data = f.read()
13 |
14 | res = [a^b for a, b in zip(PEHEADER, data[0:len(PEHEADER)])]
15 | if res.count(res[0]) == len(res):
16 | # Xor key found
17 | print("Key identified {}".format(hex(res[0])))
18 | with open("a.out", "wb+") as f:
19 | f.write(bytearray([a^res[0] for a in data]))
20 | print("Decoded payload written in a.out")
21 | else:
22 | print("Key not found")
23 |
--------------------------------------------------------------------------------
/elf/unxor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | ELFHEADER = b'\x7fELF\x02\x01\x01\x00\x00\x00'
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Unxor an ELF file')
8 | parser.add_argument('FILE', help='Xor encoded ELF file')
9 | args = parser.parse_args()
10 |
11 | with open(args.FILE, 'rb') as f:
12 | data = f.read()
13 |
14 | res = [a^b for a, b in zip(ELFHEADER, data[0:len(ELFHEADER)])]
15 | if res.count(res[0]) == len(res):
16 | # Xor key found
17 | print("Key identified {}".format(hex(res[0])))
18 | with open("a.out", "wb+") as f:
19 | f.write(bytearray([a^res[0] for a in data]))
20 | print("Decoded payload written in a.out")
21 | else:
22 | print("Key not found")
23 |
--------------------------------------------------------------------------------
/osint/protonkey.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import argparse
3 | from datetime import datetime
4 |
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(
8 | description='Check PGP key of a protonmail account')
9 | parser.add_argument('EMAIL', help="Protonmail email")
10 | args = parser.parse_args()
11 |
12 | r = requests.get(
13 | 'https://api.protonmail.ch/pks/lookup?op=index&search={}'.format(
14 | args.EMAIL))
15 | res = r.text
16 | if res.startswith("info:1:0"):
17 | print("This email address doesn't exist")
18 | else:
19 | print(res)
20 | creation = res.split("\r\n")[1].split(":")[4]
21 | d = datetime.fromtimestamp(int(creation))
22 | print("Creation date: {}".format(d))
23 |
--------------------------------------------------------------------------------
/pe/build_shellcode_pe_elfesteem.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import argparse
3 | from elfesteem import pe_init
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(description='Build a PE from a shellcode')
7 | parser.add_argument('SHELLCODE',help='Shellcode')
8 | args = parser.parse_args()
9 |
10 | # Get the shellcode
11 | with open(args.SHELLCODE, "rb") as f:
12 | data = f.read()
13 | # Generate a PE
14 | pe = pe_init.PE(wsize=32)
15 | # Add a ".text" section containing the shellcode to the PE
16 | s_text = pe.SHList.add_section(name=".text", addr=0x1000, data=data)
17 | # Set the entrypoint to the shellcode's address
18 | pe.Opthdr.AddressOfEntryPoint = s_text.addr
19 | # Write the PE to "sc_pe.py"
20 | open('sc_pe.exe', 'w').write(str(pe))
21 |
--------------------------------------------------------------------------------
/pe/update_ep.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import struct
3 |
4 | if __name__ == "__main__":
5 | parser = argparse.ArgumentParser(description='Replace the entry point of an exe')
6 | parser.add_argument('EXEFILE', help='Exe file')
7 | parser.add_argument('ADDR', help='Address in hexdecimal')
8 | args = parser.parse_args()
9 |
10 | ep = int(args.ADDR, 16)
11 | ep_address = 0x138
12 |
13 | with open(args.EXEFILE, 'rb') as f:
14 | data = f.read()
15 |
16 | print("Current entry point: {}".format(hex(struct.unpack('I', data[ep_address:ep_address+4])[0])))
17 | new = bytearray(data)
18 | new[ep_address:ep_address+4] = struct.pack('I', ep)
19 |
20 | with open(args.EXEFILE + '.patch', 'wb+') as f:
21 | f.write(new)
22 | print("Patched in {}".format(args.EXEFILE+".patch"))
23 |
24 |
--------------------------------------------------------------------------------
/macos/check_kext_kk.py:
--------------------------------------------------------------------------------
1 | import json
2 | import argparse
3 |
4 | if __name__ == "__main__":
5 | parser = argparse.ArgumentParser(description='Analyse kext and KnockKnock files')
6 | parser.add_argument('JSONFILE', help='JSON File saved by kext or knock knock')
7 | args = parser.parse_args()
8 |
9 | with open(args.JSONFILE) as f:
10 | data = json.loads(f.read())
11 |
12 | for k in data.keys():
13 | print("Checking {}".format(k))
14 | for l in data[k]:
15 | if "VT detection" in l:
16 | if not l["VT detection"].startswith("0/"):
17 | print("Suspicious detection in VT:")
18 | print(json.dumps(l, indent=4))
19 | else:
20 | print("Suspicious detection in VT:")
21 | print(json.dumps(l, indent=4))
22 |
--------------------------------------------------------------------------------
/misp/misplib.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | try:
3 | import ConfigParser
4 | except ImportError:
5 | # Python 3
6 | import configparser as ConfigParser
7 | import os
8 |
9 | def parse_config():
10 | """Parse configuration file, returns a list of servers"""
11 | config = ConfigParser.ConfigParser()
12 | config.read(os.path.join(os.path.expanduser("~"), ".misp"))
13 | servers = {}
14 | for s in config.sections():
15 | try:
16 | info = {
17 | 'url': config.get(s, 'url'),
18 | 'key': config.get(s, 'key')
19 | }
20 | servers[s.lower()] = info
21 | if config.get(s, 'default').lower() == 'true':
22 | servers['default'] = info
23 | except ConfigParser.NoOptionError:
24 | pass
25 |
26 | return servers
27 |
--------------------------------------------------------------------------------
/macos/macho_rename_section.py:
--------------------------------------------------------------------------------
1 | import lief
2 | import argparse
3 | import os
4 |
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='Rename the __cfstring section in __text')
8 | parser.add_argument('MACHO', help='Mach-O binary')
9 | parser.add_argument('NAME', help='Name of the section')
10 | parser.add_argument('NEWNAME', help='Name of the section')
11 | args = parser.parse_args()
12 |
13 | binary = lief.parse(args.MACHO)
14 | found = False
15 | for s in binary.sections:
16 | if s.name == args.NAME:
17 | s.name = args.NEWNAME
18 | print("Section found")
19 | found = True
20 | break
21 |
22 | if not found:
23 | print("This section was not found in this binary")
24 | else:
25 | binary.write(args.MACHO + "_renamed")
26 |
27 |
--------------------------------------------------------------------------------
/forensic/README.md:
--------------------------------------------------------------------------------
1 | # Forensic scripts
2 |
3 | Two scripts here to help creating timeline on Linux live systems :
4 | * `filetimeline.py` : get a list of files in a folder with their change time, modification time and birth time using stat (which does not give the creation time even if the file system has it)
5 | * `mactime.py` : convert this list of files into a csv timeline
6 |
7 | Misc :
8 | * `extract_chrome_history.py`: extract history from a Chrome History Sqlite file
9 |
10 | * On Windows, stored in `C:\Users\\AppData\Local\Google\Chrome\User Data\Default`
11 | * On Mac OS, stored in `/Users//Library/Application Support/Google/Chrome/Default`
12 | * On Linux, stored in `/home//.config/google-chrome/Default`
13 | * `ios_unpack.py` : unpack iOS backup folder from iTunes or [libimobiledevice](https://www.libimobiledevice.org/)
14 |
15 |
--------------------------------------------------------------------------------
/osint/truecaller.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import requests
4 | import json
5 |
6 | def get_info(phone):
7 | r = requests.get(
8 | "https://search5.truecaller.com/v2/search",
9 | params = {
10 | "q":phone,
11 | "countryCode": "",
12 | "type": 4,
13 | "locAddr": "",
14 | "placement": "SEARCHRESULTS,HISTORY,DETAILS",
15 | "adId": "",
16 | "clientId": 1,
17 | "myNumber": "lS59d72f4d1aefae62ba0c1979l_Dl7_DEj9CPstICL1dRnD",
18 | "registerId": "645710775"
19 | }
20 | )
21 | return r.json()
22 |
23 | if __name__ == "__main__":
24 | parser = argparse.ArgumentParser()
25 | parser.add_argument("PHONE")
26 | args = parser.parse_args()
27 |
28 | print(json.dumps(get_info(args.PHONE), indent=4, sort_keys=True))
29 |
--------------------------------------------------------------------------------
/visualization/heatmap.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import numpy as np;
3 | import pandas as pd
4 | import calmap # Use my fork https://github.com/Te-k/calmap
5 | import matplotlib.pyplot as plt
6 | import argparse
7 |
8 | # references
9 | # https://pythonhosted.org/calmap/
10 |
11 | if __name__ == '__main__':
12 | parser = argparse.ArgumentParser(description='Create a heatmap based on csv file')
13 | parser.add_argument('FILE', help='Csv file, like 2018-07-01;1 for 1 incident that day')
14 | parser.add_argument('--sep', '-s', default=';',
15 | help='Separator for the csv file (default is ;)')
16 | args = parser.parse_args()
17 |
18 | df=pd.read_csv(args.FILE, sep=args.sep,header=None)
19 | dates = pd.to_datetime(df[0])
20 | events = pd.Series(np.array(df[1]), index=dates)
21 | calmap.yearplot(events, year=min(dates).year)
22 | plt.show()
23 |
--------------------------------------------------------------------------------
/format/csv2md.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python2
2 | import csv
3 | import argparse
4 | import sys
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='Convert a csv to a Markdown file')
8 | parser.add_argument('CSVFILE', help='CSV file to be converted')
9 | parser.add_argument('-n', '--no-header', help="No header in the CSV file", action="store_true")
10 | parser.add_argument('-d', '--delimiter', default=",",
11 | help="No header in the CSV file")
12 |
13 | args = parser.parse_args()
14 |
15 | firstline = not args.no_header
16 | with open(args.CSVFILE, 'r') as csvfile:
17 | reader = csv.reader(csvfile, delimiter=',')
18 | for row in reader:
19 | print("|%s|" % "|".join(row))
20 | if firstline:
21 | print(("|:---------------------" * len(row)) + "|")
22 | firstline = False
23 |
24 |
--------------------------------------------------------------------------------
/pt/get_project_iocs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import os
4 | import sys
5 | import json
6 | import argparse
7 |
8 |
9 | if __name__ == '__main__':
10 | parser = argparse.ArgumentParser(description='Process some integers.')
11 | parser.add_argument('PROJECT', help='Passive Total project')
12 | args = parser.parse_args()
13 |
14 | conf_file = os.path.join(os.path.expanduser("~"), ".config/passivetotal/api_config.json")
15 | if os.path.isfile(conf_file):
16 | with open(conf_file, 'r') as f:
17 | conf = json.loads(f.read())
18 | else:
19 | print('No config file')
20 | sys.exit(1)
21 |
22 | auth = (conf['username'], conf['api_key'])
23 | r = requests.get('https://api.passivetotal.org/v2/artifact',
24 | params={'project': args.PROJECT}, auth=auth)
25 | for a in r.json()['artifacts']:
26 | print(a['query'])
27 |
--------------------------------------------------------------------------------
/web/headers.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python2
2 | import argparse
3 | import os
4 | import socket
5 | from urlparse import urlparse
6 |
7 |
8 | if __name__ == "__main__":
9 | parser = argparse.ArgumentParser(description='Get HTTP headers')
10 | parser.add_argument('host', metavar='HOST', help='Host targeted')
11 | args = parser.parse_args()
12 |
13 | # valid the host
14 | hosturl = urlparse(args.host)
15 | if hosturl.netloc == '':
16 | # XXX: remove path after hostname
17 | host = hosturl.path
18 | else:
19 | host = hosturl.netloc
20 |
21 |
22 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
23 | # FIXME : does not support HTTPs
24 | s.connect((host, 80))
25 | s.sendall("GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % host)
26 | data = s.recv(100000)
27 | s.close()
28 |
29 | sep = data.find("\r\n\r\n")
30 | headers = data[:sep]
31 | content = data[sep+4:]
32 |
33 | print(headers)
34 |
--------------------------------------------------------------------------------
/pe/build_shellcode_pe.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import argparse
3 | from lief import PE
4 |
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Build a PE from a shellcode')
8 | parser.add_argument('SHELLCODE',help='Shellcode')
9 | args = parser.parse_args()
10 |
11 | # Get the shellcode
12 | with open(args.SHELLCODE, "rb") as f:
13 | data = f.read()
14 |
15 | binary32 = PE.Binary("pe_from_scratch", PE.PE_TYPE.PE32)
16 |
17 | section_text = PE.Section(".text")
18 | section_text.content = [c for c in data] # Take a list(int)
19 | section_text.virtual_address = 0x1000
20 |
21 | section_text = binary32.add_section(section_text, PE.SECTION_TYPES.TEXT)
22 |
23 | binary32.optional_header.addressof_entrypoint = section_text.virtual_address
24 | builder = PE.Builder(binary32)
25 | builder.build_imports(True)
26 | builder.build()
27 | builder.write("sc_pe.exe")
28 |
--------------------------------------------------------------------------------
/web/check_fastest_cache_version.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import requests
3 |
4 |
5 | if __name__ == "__main__":
6 | parser = argparse.ArgumentParser(description='Check if WP Fastest cache is installed and the version')
7 | parser.add_argument('DOMAIN', help="Domain")
8 | args = parser.parse_args()
9 |
10 | if args.DOMAIN.startswith("http"):
11 | domain = args.DOMAIN.rstrip("/") + "/"
12 | else:
13 | domain = "http://" + args.DOMAIN.rstrip("/") + "/"
14 |
15 | r = requests.get(domain + "wp-content/plugins/wp-fastest-cache/readme.txt")
16 | if r.status_code != 200:
17 | print("WP Fastest cache not found (HTTP {})".format(r.status_code))
18 | else:
19 | text = r.text
20 | version = text.split("\n")[6].split(" ")[2]
21 | if version != "1.2.2":
22 | print("/!\ Insecure Fastest Cache version: {}".format(version))
23 | else:
24 | print("Latest Fastest Cache version 1.2.2")
25 |
--------------------------------------------------------------------------------
/censys/censyssearch.py:
--------------------------------------------------------------------------------
1 | import configparser
2 | import os
3 | from pathlib import Path
4 | from censys.search import CensysHosts
5 | import argparse
6 |
7 | config_path = os.path.join(str(Path.home()), ".config", "censys", "censys.cfg")
8 |
9 | def get_config() -> configparser.ConfigParser:
10 | """Reads and returns config.
11 | Returns:
12 | configparser.ConfigParser: Config for Censys.
13 | """
14 | config = configparser.ConfigParser()
15 | if os.path.isfile(config_path):
16 | config.read(config_path)
17 | else:
18 | print("Config not found")
19 | return config
20 |
21 |
22 | if __name__ == "__main__":
23 | parser = argparse.ArgumentParser(description='Make censys IP search')
24 | parser.add_argument('QUERY', help="Censys query")
25 | args = parser.parse_args()
26 |
27 | h = CensysHosts()
28 |
29 | query = h.search(args.QUERY, per_page=100)
30 | for r in query():
31 | print(r['ip'])
32 |
33 |
34 |
--------------------------------------------------------------------------------
/network/README.md:
--------------------------------------------------------------------------------
1 | # Network scripts
2 |
3 | * `check_umbrella.py` : check if domains are in [Cisco Umbrella Top million websites](https://umbrella.cisco.com/blog/cisco-umbrella-1-million)
4 | * `check_ripe_last_route.py` : check last time a BGP route was advertised by an AS using RIPE API
5 | * `checkpoint_banner.py` : get the hostname from a checkpoint firewall admin service
6 | * `cidr_range.py` : print first and last IP address of a CIDR range
7 | * `cidr_reduce.py`: reduce list of IPs in CIDR ranges (IPv4 only so far)
8 | * `extract_iocs.py` : extract potential network indicators from a PCAP file using tshark
9 | * `dns_resolve.py` : resolve domains, results in a CSV file
10 | * `dns_resolve_mx.py` : resolve MX entries from a list of domains
11 | * `domains_timeline.py`: makes a timeline of domains registration based on Whois data
12 | * `list_mullvad_ips.py`: list IPs of mullvad servers
13 | * `test_website.py` : check a domain list and remove those not having a valid website
14 |
--------------------------------------------------------------------------------
/mqtt-get.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import paho.mqtt.client as mqtt
3 | import argparse
4 |
5 |
6 | def on_connect(client, userdata, flags, rc):
7 | print("[+] Connection successful")
8 | client.subscribe('#', qos = 1) # Subscribe to all topics
9 | client.subscribe('$SYS/#') # Broker Status (Mosquitto)
10 |
11 | def on_message(client, userdata, msg):
12 | print('[+] Topic: %s - Message: %s' % (msg.topic, msg.payload))
13 |
14 | if __name__ == "__main__":
15 | parser = argparse.ArgumentParser(description='Try to subscribe to some MQTT servers')
16 | parser.add_argument('--port', '-p', type=int, default=1883, help="port")
17 | parser.add_argument('SERVER', help="Server IP address")
18 | args = parser.parse_args()
19 |
20 | client = mqtt.Client(client_id = "MqttClient")
21 | client.on_connect = on_connect
22 | client.on_message = on_message
23 | client.connect(args.SERVER, args.port, 60)
24 | client.loop_forever()
25 |
--------------------------------------------------------------------------------
/shodan/shodan_cobaltstrike.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import shodan
3 | import argparse
4 | import os
5 | import sys
6 |
7 |
8 | if __name__ == '__main__':
9 | parser = argparse.ArgumentParser(description='Extract list of Cobalt Strike servers from Shodan')
10 | parser.add_argument('--key', '-k', help='Shodan API key')
11 | args = parser.parse_args()
12 |
13 | # Deal with the key first
14 | if args.key:
15 | key = args.key
16 | else:
17 | cpath = os.path.expanduser('~/.shodan/api_key')
18 | if os.path.isfile(cpath):
19 | with open(cpath, 'r') as f:
20 | key = f.read().strip()
21 | else:
22 | print("No API key found")
23 | sys.exit(1)
24 |
25 |
26 | api = shodan.Shodan(key)
27 | # Cobalt Strike JARM signature
28 | res = api.search("ssl.jarm:07d14d16d21d21d07c42d41d00041d24a458a375eef0c576d23a7bab9a9fb1")
29 | for ip in res['matches']:
30 | print(ip['ip_str'])
31 |
--------------------------------------------------------------------------------
/format/csvcut.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import argparse
3 |
4 | if __name__ == '__main__':
5 | parser = argparse.ArgumentParser(description='Cut columns from a CSV file')
6 | parser.add_argument('FILE', help='CSV file')
7 | parser.add_argument('--delimiter', '-d', default=',', help='Delimiter')
8 | parser.add_argument('--quotechar', '-q', default='"', help="Quote char")
9 | parser.add_argument('--cut', '-c', type=int, help="Column to get")
10 | parser.add_argument('--uniq', '-u', action='store_true', help="Only print uniq values")
11 | args = parser.parse_args()
12 |
13 | lines = set()
14 |
15 | with open(args.FILE) as csvfile:
16 | reader = csv.reader(csvfile, delimiter=args.delimiter, quotechar=args.quotechar)
17 | for row in reader:
18 | if args.uniq:
19 | lines.add(row[args.cut])
20 | else:
21 | print(row[args.cut])
22 |
23 | if args.uniq:
24 | for d in lines:
25 | print(d)
26 |
--------------------------------------------------------------------------------
/disassemble.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | from capstone import *
4 |
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Disassemble')
8 | parser.add_argument('--type', '-t', choices=['x86', 'x86-64', 'ARM', 'ARM64'],
9 | help='Type of architecture')
10 | parser.add_argument('FILE', help='binary file')
11 | args = parser.parse_args()
12 |
13 | with open(args.FILE, 'rb') as f:
14 | code = f.read()
15 |
16 | if args.type == 'x86':
17 | md = Cs(CS_ARCH_X86, CS_MODE_32)
18 | elif args.type == 'x86-64':
19 | md = Cs(CS_ARCH_X86, CS_MODE_64)
20 | elif args.type == 'ARM':
21 | md = Cs(CS_ARCH_ARM, CS_MODE_ARM)
22 | elif args.type == 'ARM64':
23 | md = Cs(CS_ARCH_ARM64, CS_MODE_ARM)
24 | else:
25 | md = Cs(CS_ARCH_X86, CS_MODE_32)
26 |
27 |
28 | for (address, size, mnemonic, op_str) in md.disasm_lite(code, 0x0):
29 | print("0x%x:\t%s\t%s" %(address, mnemonic, op_str))
30 |
--------------------------------------------------------------------------------
/network/test_website.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import requests
3 |
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(description='Parse a list of websites and remove those not working')
7 | parser.add_argument('WEBSITEFILE', help='Text file with a list of websites')
8 | args = parser.parse_args()
9 |
10 | with open(args.WEBSITEFILE) as f:
11 | data = [a.strip() for a in f.read().split('\n')]
12 |
13 | for d in data:
14 | if d != '':
15 | try:
16 | if d.startswith('http'):
17 | r = requests.get(d, timeout=5)
18 | else:
19 | r = requests.get("http://{}/".format(d), timeout=5)
20 | if r.status_code == 200:
21 | print(d)
22 | except requests.exceptions.ConnectionError:
23 | pass
24 | except requests.exceptions.ReadTimeout:
25 | pass
26 | except requests.exceptions.TooManyRedirects:
27 | pass
28 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 | *.swp
9 |
10 | # Distribution / packaging
11 | .Python
12 | env/
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *,cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 |
56 | # Sphinx documentation
57 | docs/_build/
58 |
59 | # PyBuilder
60 | target/
61 |
62 | #Ipython Notebook
63 | .ipynb_checkpoints
64 |
--------------------------------------------------------------------------------
/threats/quad9.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | import sys
3 | import argparse
4 | import json
5 | import requests
6 |
7 |
8 | if __name__ == '__main__':
9 | parser = argparse.ArgumentParser(description='Process some integers.')
10 | parser.add_argument('DOMAIN', help='Domain to be checked')
11 | parser.add_argument('--type', '-t', default='A', help='DNS Type')
12 | parser.add_argument('--verbose', '-v', action='store_true', help='Display results')
13 | args = parser.parse_args()
14 |
15 | params = {
16 | 'name': args.DOMAIN,
17 | 'type': args.type,
18 | 'ct': 'application/dns-json',
19 | }
20 | r = requests.get("https://dns.quad9.net:5053/dns-query", params=params)
21 | if r.status_code != 200:
22 | print('Problem querying quad9 :(')
23 | sys.exit(1)
24 | if r.json()['Status'] == 3:
25 | print("{} - BLOCKED".format(args.DOMAIN))
26 | else:
27 | print("{} - NOT BLOCKED".format(args.DOMAIN))
28 | if args.verbose:
29 | print(json.dumps(r.json(), indent=4))
30 |
--------------------------------------------------------------------------------
/android/dexofuzzy2gephi.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import networkx as nx
4 |
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Convert dexofuzzy output to gephi graph')
8 | parser.add_argument('JSONFILE', help='Json file')
9 | args = parser.parse_args()
10 |
11 | with open(args.JSONFILE, 'r') as f:
12 | data = json.loads(f.read())
13 |
14 | G = nx.Graph()
15 | for s in data:
16 | # Add node
17 | if not G.has_node(s['file_sha256']):
18 | G.add_node(s['file_sha256'])
19 | # Add cluster
20 | for c in s['clustering']:
21 | if s['file_sha256'] != c['file_sha256']:
22 | # Add node
23 | if not G.has_node(c['file_sha256']):
24 | G.add_node(c['file_sha256'])
25 | if not G.has_edge(s['file_sha256'], c['file_sha256']):
26 | G.add_edge(s['file_sha256'], c['file_sha256'])
27 |
28 | nx.write_gexf(G, 'output.gexf')
29 | print("Gephi file written : output.gexf")
30 |
--------------------------------------------------------------------------------
/network/check_umbrella.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import sys
4 |
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='Check if domains are in the Umbrealla top 1million list')
8 | parser.add_argument('DOMAINLIST', help='List of domains')
9 | parser.add_argument('UMBRELLALIST', help='Cisco Umbrella top 1million list')
10 | args = parser.parse_args()
11 |
12 | umbrella = {}
13 | with open(args.UMBRELLALIST) as f:
14 | for l in f.read().split('\n'):
15 | if l.strip() == '':
16 | continue
17 | ll = l.strip().split(',')
18 | umbrella[ll[1]] = ll[0]
19 |
20 | with open(args.DOMAINLIST) as f:
21 | data = f.read().split('\n')
22 | data.remove('')
23 |
24 | for d in data:
25 | if d.strip() == "":
26 | continue
27 | if d.strip() in umbrella.keys():
28 | print("{} in the umbrella list at {} position".format(
29 | d.strip(),
30 | umbrella[d.strip()]
31 | ))
32 |
--------------------------------------------------------------------------------
/pe/get_imphash.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import pefile
3 | import os
4 |
5 | def get_imphash(exe_path):
6 | try:
7 | pe = pefile.PE(exe_path)
8 | return pe.get_imphash()
9 | except pefile.PEFormatError:
10 | return None
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser(description='Process some Pe files')
14 | parser.add_argument('TARGET', help='Target file or folder')
15 | args = parser.parse_args()
16 |
17 | if os.path.isfile(args.TARGET):
18 | res = get_imphash(args.TARGET)
19 | if res:
20 | print("{} - {}".format(args.TARGET, res))
21 | else:
22 | print("{} - Not a PE file".format(args.TARGET))
23 | elif os.path.isdir(args.TARGET):
24 | for r, d, f in os.walk(args.TARGET):
25 | for file in f:
26 | res = get_imphash(os.path.join(r, file))
27 | if res:
28 | print("{} - {}".format(file, res))
29 | else:
30 | print("{} - Not a PE file".format(file))
31 |
--------------------------------------------------------------------------------
/pt/get_subdomains.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import os
4 | import sys
5 | import json
6 | import argparse
7 | from passivetotal.libs.dns import DnsRequest
8 | from passivetotal.libs.enrichment import EnrichmentRequest
9 |
10 | def get_config():
11 | conf_file = os.path.join(os.path.expanduser("~"), ".config/passivetotal/api_config.json")
12 | if os.path.isfile(conf_file):
13 | with open(conf_file, 'r') as f:
14 | conf = json.loads(f.read())
15 | else:
16 | print('No config file')
17 | sys.exit(1)
18 | return conf
19 |
20 |
21 | if __name__ == '__main__':
22 | parser = argparse.ArgumentParser(description='List subdomains for a domain')
23 | parser.add_argument('DOMAIN', help='Domain')
24 | args = parser.parse_args()
25 |
26 | conf = get_config()
27 |
28 | client = EnrichmentRequest(conf['username'], conf['api_key'])
29 | raw_results = client.get_subdomains(query=args.DOMAIN)
30 | for s in raw_results['subdomains']:
31 | print(s + '.' + raw_results['primaryDomain'])
32 |
--------------------------------------------------------------------------------
/android/yaradex.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import yara
3 | import os
4 | import sys
5 | import argparse
6 | from androguard.core.bytecodes.apk import APK
7 | from androguard.core import androconf
8 |
9 |
10 | if __name__ == '__main__':
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument("YARARULE", help="Path of the yara rule file")
13 | parser.add_argument("PATH", help="Path of the executable to check")
14 | args = parser.parse_args()
15 |
16 |
17 | if not os.path.isfile(args.PATH):
18 | print("Invalid snoopdroid dump path")
19 | sys.exit(-1)
20 | if not os.path.isfile(args.YARARULE):
21 | print("Invalid path for yara rule")
22 | sys.exit(-1)
23 |
24 | if androconf.is_android(args.PATH) != "APK":
25 | print("This is not an APK file")
26 | sys.exit(-1)
27 |
28 | rules = yara.compile(filepath=args.YARARULE)
29 |
30 | apk = APK(args.PATH)
31 | dex = apk.get_dex()
32 | res = rules.match(data=dex)
33 | if len(res) > 0:
34 | print("Matches: {}".format(", ".join([r.rule for r in res])))
35 |
--------------------------------------------------------------------------------
/certs/get_crtsh_subdomains.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import psycopg2
4 |
5 | if __name__ == "__main__":
6 | parser = argparse.ArgumentParser(description='Get subdomains of a domain from crt.sh certificates')
7 | parser.add_argument('DOMAIN', help='Domain')
8 | args = parser.parse_args()
9 |
10 | subdomains = []
11 | conn = psycopg2.connect("dbname=certwatch user=guest host=crt.sh")
12 | conn.set_session(autocommit=True)
13 | cur = conn.cursor()
14 | # Tips from Randorisec https://www.randori.com/blog/enumerating-subdomains-with-crt-sh/
15 | cur.execute("""
16 | select distinct(lower(name_value))
17 | FROM certificate_and_identities cai
18 | WHERE plainto_tsquery('{}') @@ identities(cai.CERTIFICATE) AND
19 | lower(cai.NAME_VALUE) LIKE ('%.{}')
20 | """.format(args.DOMAIN, args.DOMAIN))
21 | for entry in cur.fetchall():
22 | if entry[0].startswith("*.") and not keep_wildcard:
23 | continue
24 | subdomains.append(entry[0])
25 |
26 | for p in subdomains:
27 | print(p)
28 |
--------------------------------------------------------------------------------
/pe/extract_sig.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import argparse
3 | import pefile
4 | import os
5 | import sys
6 |
7 |
8 | if __name__ == '__main__':
9 | parser = argparse.ArgumentParser(description='Extract PE Signature')
10 | parser.add_argument('PEFILE', help='PE File')
11 | parser.add_argument('--output', '-o', help='Output file')
12 | args = parser.parse_args()
13 |
14 | if not os.path.isfile(args.PEFILE):
15 | print("Invalid path")
16 | sys.exit(-1)
17 |
18 | if args.output:
19 | output = args.output
20 | else:
21 | output = args.PEFILE + '.sig'
22 |
23 | pe = pefile.PE(args.PEFILE)
24 |
25 | address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress
26 | size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size
27 | if address == 0:
28 | print('Source file not signed')
29 | sys.exit(0)
30 |
31 | signature = pe.write()[address+8:]
32 | f = open(output, 'wb+')
33 | f.write(signature)
34 | f.close()
35 |
--------------------------------------------------------------------------------
/hostnametoips.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import dns.resolver
3 | import argparse
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(description='Resolve domains and get list of IPs')
7 | parser.add_argument('FILE', help='File containing list of domains')
8 | parser.add_argument('--verbose', '-v', action='store_true',
9 | help='File containing list of domains')
10 | args = parser.parse_args()
11 |
12 | with open(args.FILE) as f:
13 | data = f.read().split('\n')
14 |
15 | res = dns.resolver.Resolver()
16 |
17 | ips = set()
18 | for d in data:
19 | if d.strip != '':
20 | try:
21 | ans = res.query(d.strip(), "A")
22 | for ip in ans:
23 | if args.verbose:
24 | print("%s - %s" % (d.strip(), ip.to_text()))
25 | ips.add(ip.to_text())
26 | except dns.resolver.NXDOMAIN:
27 | if args.verbose:
28 | print("%s - no domain" % d.strip())
29 |
30 | for ip in ips:
31 | print(ip)
32 |
--------------------------------------------------------------------------------
/web/httpscan/README.md:
--------------------------------------------------------------------------------
1 | # HTTP Scanner
2 |
3 | ## HOWTO
4 |
5 | ## Phishing kit fingerprint
6 |
7 | ### Example
8 |
9 | ```
10 | $ python httpscan.py -s example.com -F
11 | Fingerprinting example.com
12 | -> match on examplesig
13 | ```
14 |
15 | ### Signature format
16 |
17 | Signature uses YAML format and are close to yara signature, tests and condition
18 |
19 | Example:
20 | ```yaml
21 | ---
22 | examplesig:
23 | tests:
24 | - name: index
25 | path: index.html
26 | code: 200
27 | content: ""
28 | condition: all
29 | ```
30 |
31 | Tests should contains name and path and at least one of the following criteria:
32 | * Code : HTTP return code
33 | * content : look for content
34 |
35 | Condition can be "any" or "all"
36 |
37 |
38 | ## HTTP Fingerprint notes
39 |
40 | ### Header field ordering
41 |
42 | nginx:
43 | ```
44 | Server: nginx
45 | Date: Sat, 21 Jan 2017 03:57:35 GMT
46 | Content-Type: text/html
47 | Last-Modified: Sun, 02 Oct 2016 05:00:32 GMT
48 | Transfer-Encoding: chunked
49 | Connection: keep-alive
50 | Content-Encoding: gzip
51 |
52 | ```
53 |
--------------------------------------------------------------------------------
/goo.gl/README.md:
--------------------------------------------------------------------------------
1 | # Googl URL shortener tools
2 |
3 | Tool to get information about goo.gl shortened urls through the API. To create a key, check [here](https://developers.google.com/url-shortener/v1/getting_started#APIKey). Works in python 2 and python 3.
4 |
5 | Key should be stored in `~/.goo.gl`:
6 | ```
7 | [API]
8 | key: KEYHERE
9 | ```
10 |
11 | **Help** :
12 | ```bash
13 | $ python api.py -h
14 | usage: api.py [-h] [--hash HASH] [--file FILE]
15 |
16 | Check goo.gl infos through the API
17 |
18 | optional arguments:
19 | -h, --help show this help message and exit
20 | --hash HASH, -H HASH HASH of a link
21 | --file FILE, -f FILE Get hashes from a file
22 | ```
23 |
24 | **Check a hash**:
25 | ```bash
26 | $ python api.py -H fbsS
27 | {
28 | "analytics":{
29 | "allTime":{
30 | "browsers":[
31 | {
32 | "count":"6607390",
33 | "id":"Chrome"
34 | },
35 | [SNIP]
36 | "created":"2009-12-13T07:22:55.000+00:00",
37 | "id":"http://goo.gl/fbsS",
38 | "kind":"urlshortener#url",
39 | "longUrl":"http://www.google.com/",
40 | "status":"OK"
41 | }
42 | ```
43 |
--------------------------------------------------------------------------------
/web/README.md:
--------------------------------------------------------------------------------
1 | # HTTP Tests
2 |
3 | Bunch of random scripts to tests stuff on web servers :
4 | * `check_fastest_cache_version.py`: check if the Wordpress plugin WP Fastest cache is installed and up-to-date
5 | * `check_fun_files.py` : test if a list of files exist on the server (python2/3, minimal imports)
6 | * `fun_files.txt` : handmade list of interesting files (used by default, quite limited ~20 tests)
7 | * `phishingkits.txt` : list of phishing kit names imported from https://github.com/0xd34db33f/scriptsaw/blob/master/ruby/phish_kit_finder.rb
8 | * `http_test_methods.py`: test different HTTP methods on the server (python2 only)
9 | * `http_test.py` : test different HTTP headers (python2)
10 | * `headers.py` : show server's headers (python2)
11 | * `watch_response_time.py` : download and log server's response time (python2)
12 | * `server_image_fingerprint.py` : fingerprint the web server based on images accessible (python2)
13 | * `plot_response_time.sh` : show server's response time based on log generated by `watch_response_time.py` (uses gnuplot)
14 | * `waf` : script to bypass Web Applicaton Firewall (python2, old)
15 | * `proxychecker.py` : Check if an IP is running an open proxy
16 |
--------------------------------------------------------------------------------
/certs/listcerts.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import operator
4 | from pycrtsh import Crtsh
5 | from collections import Counter
6 |
7 | if __name__ == "__main__":
8 | parser = argparse.ArgumentParser(description='List certificates for a domain')
9 | parser.add_argument('DOMAIN', help='an integer for the accumulator')
10 | args = parser.parse_args()
11 |
12 | crt = Crtsh()
13 | index = crt.search(args.DOMAIN)
14 | domains = []
15 | print("Certificates")
16 | for c in index:
17 | data = crt.get(c["id"], type="id")
18 | print("%s\t%s\t%s\t%s" % (
19 | data["subject"]["commonName"],
20 | data["not_before"].isoformat(),
21 | data["not_after"].isoformat(),
22 | data["sha1"]
23 | )
24 | )
25 | if "alternative_names" in data["extensions"]:
26 | domains += list(set([a[2:] if a.startswith("*.") else a for a in data["extensions"]["alternative_names"]]))
27 |
28 | print("\nDomains")
29 | count = Counter(domains)
30 | for d in sorted(count.items(), key=operator.itemgetter(1), reverse=True):
31 | print("-%s: %i occurences" % (d[0], d[1]))
32 |
--------------------------------------------------------------------------------
/pe/pesearch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import pefile
4 | import argparse
5 | import sys
6 | import hashlib
7 | import datetime
8 |
9 | """Search a string in a PE file
10 | Author : Tek
11 | Date : 05/24/2017
12 | """
13 |
14 | if __name__ == "__main__":
15 | parser = argparse.ArgumentParser(description='Search string in a PE file')
16 | parser.add_argument('STRING', help='a string')
17 | parser.add_argument('FILE', help='a PE file')
18 | args = parser.parse_args()
19 |
20 | fin = open(args.FILE, 'rb')
21 | data = fin.read()
22 | fin.close()
23 |
24 | # Search for physical location
25 | pos = data.find(args.STRING)
26 | if pos == -1:
27 | print("String not found...")
28 | sys.exit(1)
29 |
30 | print('Position in the file : 0x%x' % pos)
31 |
32 | # Search position in the PE
33 | pe = pefile.PE(data=data)
34 | # Check in sections first
35 | for s in pe.sections:
36 | if (pos >= s.PointerToRawData) and (pos <= s.PointerToRawData + s.SizeOfRawData):
37 | vaddr = pe.OPTIONAL_HEADER.ImageBase + pos - s.PointerToRawData + s.VirtualAddress
38 | print("In section %s at address 0x%x" % (s.Name, vaddr))
39 |
--------------------------------------------------------------------------------
/visualization/timeline_bar.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import numpy as np
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 | import argparse
6 |
7 | if __name__ == '__main__':
8 | parser = argparse.ArgumentParser(description='Process a timeline')
9 | parser.add_argument('FILE', help='CSV file')
10 | args = parser.parse_args()
11 |
12 |
13 | # Read file
14 | df = pd.read_csv(args.FILE)
15 | c1_date = df.columns[0]
16 | c2_type = df.columns[1]
17 | # List the different types
18 | types = df[c2_type].unique()
19 |
20 | # Convert the first column to datetime
21 | df['day'] = pd.to_datetime(df[c1_date])
22 | mint = df['day'].min()
23 | maxt = df['day'].max()
24 | df = df.set_index('day')
25 |
26 | #dg = df.groupby([df.day.dt.year, df.day.dt.month, c2_type]).count()
27 | data = {'months': pd.period_range(mint, maxt, freq='M')}
28 | for d in types:
29 | dg = df[(df[c2_type] == d)]
30 | dg2 = dg.groupby(dg.index.to_period('M')).count()
31 | data[d] = dg2.reindex(pd.period_range(mint, maxt, freq='M'))[c2_type].values
32 |
33 |
34 | dff = pd.DataFrame(data)
35 |
36 | ax = dff.set_index('months').plot(kind='bar')
37 | ax.set_xticklabels(dff['months'].dt.strftime('%b, %Y'))
38 | plt.xticks(rotation=70)
39 | plt.show()
40 |
--------------------------------------------------------------------------------
/pt/get_ip_domains.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import os
4 | import sys
5 | import json
6 | import argparse
7 | from passivetotal.libs.dns import DnsRequest
8 |
9 | def get_config():
10 | conf_file = os.path.join(os.path.expanduser("~"), ".config/passivetotal/api_config.json")
11 | if os.path.isfile(conf_file):
12 | with open(conf_file, 'r') as f:
13 | conf = json.loads(f.read())
14 | else:
15 | print('No config file')
16 | sys.exit(1)
17 | return conf
18 |
19 |
20 | if __name__ == '__main__':
21 | parser = argparse.ArgumentParser(description='Extract all domains from an IP address')
22 | parser.add_argument('IP', help='an IP address')
23 | args = parser.parse_args()
24 |
25 | conf = get_config()
26 |
27 | client = DnsRequest(conf['username'], conf['api_key'])
28 | raw_results = client.get_passive_dns(query=args.IP)
29 | print("{} domains identified".format(len(raw_results["results"])))
30 |
31 | csvout = open("csv.out", "w+")
32 | csvout.write("Domain,First,Last,Type\n")
33 | for r in raw_results["results"]:
34 | csvout.write("{},{},{},{}\n".format(
35 | r['resolve'],
36 | r['firstSeen'],
37 | r['lastSeen'],
38 | r['recordType']
39 | ))
40 | print("extracted in csv.out")
41 |
--------------------------------------------------------------------------------
/censys/censysip.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding: utf-8 -*-
3 | import censys.ipv4
4 | import os
5 | import argparse
6 | import json
7 | from censyslib import *
8 |
9 |
10 | if __name__ == '__main__':
11 | parser = argparse.ArgumentParser(description='Request censys IPv4 database')
12 | parser.add_argument('--search', '-s', help='Search term in Censys database')
13 | parser.add_argument('--ip', '-i', help='Check info on the given IP')
14 | parser.add_argument('--max-results', '-m', default=100, type=int, help='Max number of results')
15 | parser.add_argument('--verbose', '-v', action='store_true', help='Verbose mode')
16 | args = parser.parse_args()
17 |
18 | key = get_apikey()
19 |
20 | cc = censys.ipv4.CensysIPv4(api_id=key[0], api_secret=key[1])
21 |
22 | if args.search is not None:
23 | it = cc.search(args.search)
24 | results = []
25 | try:
26 | for i in range(args.max_results):
27 | results.append(it.next())
28 | except StopIteration:
29 | pass
30 |
31 | # print IP list
32 | for ip in results:
33 | print(ip['ip'])
34 | elif args.ip is not None:
35 | ip = cc.view(args.ip)
36 | print(json.dumps(ip, sort_keys=True, indent=4, separators=(',', ': ')))
37 | else:
38 | parser.print_help()
39 |
40 |
--------------------------------------------------------------------------------
/network/check_ripe_last_route.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import argparse
3 | import sys
4 | from dateutil.parser import parse
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='List IP prefixes advertised by an AS with their last date of advertismeent')
8 | parser.add_argument('ASN', help="AS Number")
9 | args = parser.parse_args()
10 |
11 | try:
12 | asn = int(args.ASN)
13 | except ValueError:
14 | try:
15 | asn = int(args.ASN[2:])
16 | except ValueError:
17 | print("Invalid AS number")
18 | sys.exit(-1)
19 |
20 | r = requests.get("https://stat.ripe.net/data/routing-history/data.json?min_peers=0&resource=AS{}".format(asn))
21 | if r.status_code != 200:
22 | print("Request failed : HTTP {}".format(r.status_code))
23 | sys.exit(-1)
24 |
25 | data = r.json()
26 |
27 | last_route = None
28 |
29 | for prefix in data["data"]['by_origin'][0]['prefixes']:
30 | print(prefix["prefix"] + " - " + prefix["timelines"][-1]["endtime"])
31 | dd = parse(prefix["timelines"][-1]["endtime"])
32 | if last_route is None:
33 | last_route = dd
34 | else:
35 | if dd > last_route:
36 | last_route = dd
37 |
38 | print("")
39 | print("Last route advertised: {}".format(last_route))
40 |
--------------------------------------------------------------------------------
/threats/pithus_send.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import requests
3 | import os
4 |
5 | def get_key():
6 | path = os.path.expanduser("~/.pithus")
7 | if os.path.isfile(path):
8 | with open(path) as f:
9 | return f.read().strip()
10 | return None
11 |
12 |
13 | class Pithus(object):
14 | def __init__(self, api_key):
15 | self.api_key = api_key
16 | self.base_url = "https://beta.pithus.org/api/"
17 |
18 | def send(self, data):
19 | files = {"file": data}
20 | headers = {"Authorization": "Token " + self.api_key}
21 | r = requests.post(self.base_url + "upload", files=files, headers=headers)
22 | if r.status_code != 200:
23 | raise Exception("Booo, that didn't work, HTTP code {}".format(r.status_code))
24 | return r.json()
25 |
26 |
27 | if __name__ == "__main__":
28 | parser = argparse.ArgumentParser(description="Upload an APK to Pithus")
29 | parser.add_argument("FILEPATH", help="File path")
30 | args = parser.parse_args()
31 |
32 | pithus = Pithus(get_key())
33 | with open(args.FILEPATH, "rb") as f:
34 | data = f.read()
35 |
36 | try:
37 | r = pithus.send(data)
38 | except Exception as r:
39 | print("Upload failed")
40 | print(e)
41 | else:
42 | print("Upload success")
43 | print("https://beta.pithus.org/report/" + r["file_sha256"])
44 |
--------------------------------------------------------------------------------
/macos/symhash.py:
--------------------------------------------------------------------------------
1 | import lief
2 | import os
3 | import argparse
4 | from hashlib import md5
5 |
6 |
7 | def symhash(path):
8 | """
9 | Compute symhash
10 | Based on https://github.com/threatstream/symhash
11 | https://www.anomali.com/blog/symhash
12 | """
13 | sym_list = []
14 | binary = lief.parse(path)
15 | if isinstance(binary, lief.MachO.Binary):
16 | for s in binary.imported_symbols:
17 | sym_list.append(s.name)
18 | return md5(','.join(sorted(sym_list)).encode()).hexdigest()
19 | return None
20 |
21 |
22 | if __name__ == "__main__":
23 | parser = argparse.ArgumentParser(description='Compute Symhash on Mach-O files')
24 | parser.add_argument('PATH', help="file or folder")
25 | args = parser.parse_args()
26 |
27 | if os.path.isdir(args.PATH):
28 | for r, d, f in os.walk(args.PATH):
29 | for file in f:
30 | res = symhash(os.path.join(r, file))
31 | if res:
32 | print("{:40} - {}".format(file, res))
33 | else:
34 | print("{:40} - Not a Mach-O file".format(file))
35 | elif os.path.isfile(args.PATH):
36 | res = symhash(args.PATH)
37 | if res:
38 | print("{} - {}".format(args.PATH, res))
39 | else:
40 | print("{} - Not a Mach-O file".format(args.PATH))
41 | else:
42 | print("Invalid Path")
43 |
44 |
--------------------------------------------------------------------------------
/pe/extract_sections_python.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import struct
4 |
5 |
6 | def get_sections_from_pe(fpath):
7 | """
8 | This is a dirty hack
9 | """
10 | with open(fpath, 'rb') as f:
11 | data = f.read()
12 | pe_addr = struct.unpack('I', data[0x3c:0x40])[0]
13 | nb_sections = struct.unpack('H', data[pe_addr+6:pe_addr+8])[0]
14 | optional_header_size = struct.unpack('H', data[pe_addr+20:pe_addr+22])[0]
15 | section_addr = pe_addr + 24 + optional_header_size
16 | image_base = struct.unpack('I', data[pe_addr+24+28:pe_addr+24+32])[0]
17 | i = section_addr
18 | sections = []
19 | for j in range(nb_sections):
20 | sections.append([
21 | data[i:i+8].decode('utf-8').strip('\x00'),
22 | struct.unpack('I', data[i+8:i+12])[0], #VirtSize
23 | struct.unpack('I', data[i+12:i+16])[0], #VirtAddress
24 | struct.unpack('I', data[i+16:i+20])[0], #RawSize
25 | struct.unpack('I', data[i+20:i+24])[0], #RawData
26 | ])
27 | i += 40
28 | return sections, image_base
29 |
30 |
31 | if __name__ == '__main__':
32 | parser = argparse.ArgumentParser(description='Parse sections from a PE file in pure python')
33 | parser.add_argument('PE', help='PE file')
34 | args = parser.parse_args()
35 |
36 | sections, image_base = get_sections_from_pe(args.PE)
37 | for s in sections:
38 | print(s)
39 |
--------------------------------------------------------------------------------
/harpoon-extra/domain_location.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import sys
4 | import os
5 | from dns import resolver, reversename
6 | from harpoon.commands.ip import CommandIp
7 |
8 |
9 | if __name__ == '__main__':
10 | parser = argparse.ArgumentParser(description='Give information on domain location')
11 | parser.add_argument('FILE', help='File containing domain list')
12 | args = parser.parse_args()
13 |
14 | if not os.path.isfile(args.FILE):
15 | print('File does not exist')
16 | sys.exit(1)
17 |
18 | with open(args.FILE, 'r') as f:
19 | data = f.read().split('\n')
20 |
21 | cip = CommandIp()
22 |
23 | print('Domain;IP;ASN;AS Name;Country;City')
24 | for domain in data:
25 | try:
26 | answers = resolver.query(domain, 'A')
27 | except (resolver.NoAnswer, resolver.NXDOMAIN):
28 | print("%s;;;;;" % domain)
29 | except resolver.NoNameservers:
30 | print("%s;;;;;" % domain)
31 | else:
32 | for rdata in answers:
33 | info = cip.ipinfo(rdata.address)
34 | print("%s;%s;%i;%s;%s;%s" % (
35 | domain,
36 | rdata.address,
37 | info['asn'],
38 | info['asn_name'],
39 | info['country'],
40 | info['city']
41 | )
42 | )
43 |
--------------------------------------------------------------------------------
/android/README.md:
--------------------------------------------------------------------------------
1 | # Android
2 |
3 | Scripts relying mostly on [androguard](https://github.com/androguard/androguard)
4 |
5 | * `androguard_json.py` : generate a JSON with information about the APK like Koodous does
6 | * `get_package_name.py` : extract package name
7 | * `get_dex.py` : extrac classes.dex file from APKs
8 | * `yaradex.py` : run a yara rule over the dex classes of an APK file
9 | * `koodous_tag.py` : comment on some Koodous samples
10 | * `koodous_search.py` : search in Koodous
11 | * `download_androguard_report.py` : download androguard report from Koodous, copy of [this script](https://github.com/Koodous/androguard-yara/blob/master/download_androguard_report.py) updated for Python 3
12 | * `dump_android_backup.py`: decrypt and decompress an Android backup files (modified version of [this script](https://github.com/FloatingOctothorpe/dump_android_backup))
13 | * `extract_firebase.py` : check if firebase address in ressources
14 | * `extract_rsrc_strings.py` : list all strings in resources
15 | * `get_method_code.py` : extract code, byte code or hex code from a method
16 | * `get_certificate.py` : extract certificate information
17 | * `print_frosting.py` : check if an APK contains Google Play Metadata (also called frosting) ([ref](https://stackoverflow.com/questions/48090841/security-metadata-in-android-apk/51857027#51857027))
18 | * `snoopdroid_vt_check.py` : check snoopdroid results on VT
19 | * `is_obfuscated.py` : check if class names are obfuscated or not
20 |
--------------------------------------------------------------------------------
/network/cidr_reduce.py:
--------------------------------------------------------------------------------
1 | import ipaddress
2 | import argparse
3 |
4 | if __name__ == "__main__":
5 | parser = argparse.ArgumentParser(
6 | description="Reduce IPs into IP ranges from a file"
7 | )
8 | parser.add_argument("FILE", help="File containing list of IP addresses")
9 | args = parser.parse_args()
10 |
11 | with open(args.FILE, "r") as f:
12 | data = list(set([d.strip() for d in f.read().split()]))
13 |
14 | res = []
15 | entries = sorted(data)
16 | while len(entries) != 0:
17 | ip = entries.pop()
18 | ipp = ipaddress.ip_address(ip)
19 | if ipp.version != 4:
20 | print("{} is not an IPv4 address, skipping".format(ip))
21 | continue
22 | cidr = 32
23 | cidr_found = False
24 | while not cidr_found:
25 | potential_net = ipaddress.ip_network("{}/{}".format(ip, cidr), False)
26 | print(potential_net)
27 | for host in potential_net.hosts():
28 | if str(host) != ip and str(host) not in entries:
29 | cidr_found = True
30 | cidr += 1
31 | break
32 | cidr -= 1
33 | network = ipaddress.ip_network("{}/{}".format(ip, cidr), False)
34 | for host in network.hosts():
35 | if str(host) in entries:
36 | entries.remove(str(host))
37 |
38 | res.append(str(network))
39 |
40 | for entry in sorted(res):
41 | print(entry)
42 |
--------------------------------------------------------------------------------
/pt/get_hashes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import os
4 | import sys
5 | import json
6 | import argparse
7 | from passivetotal.libs.dns import DnsRequest
8 | from passivetotal.libs.enrichment import EnrichmentRequest
9 |
10 | def get_config():
11 | conf_file = os.path.join(os.path.expanduser("~"), ".config/passivetotal/api_config.json")
12 | if os.path.isfile(conf_file):
13 | with open(conf_file, 'r') as f:
14 | conf = json.loads(f.read())
15 | else:
16 | print('No config file')
17 | sys.exit(1)
18 | return conf
19 |
20 |
21 | if __name__ == '__main__':
22 | parser = argparse.ArgumentParser(description='Get hashes from PT for domains')
23 | parser.add_argument('FILE', help='File with list of domains')
24 | args = parser.parse_args()
25 |
26 | conf = get_config()
27 |
28 | with open(args.FILE, 'r') as f:
29 | domains = list(set([d.strip() for d in f.read().split()]))
30 |
31 | client = EnrichmentRequest(conf['username'], conf['api_key'])
32 |
33 | for domain in domains:
34 | if domain == '':
35 | continue
36 | print(f"################ {domain}")
37 | try:
38 | raw_results = client.get_malware(query=domain)
39 | if raw_results['success']:
40 | for s in raw_results['results']:
41 | print(s)
42 | else:
43 | print("Request failed")
44 | except:
45 | print("Something failed")
46 |
--------------------------------------------------------------------------------
/pt/get_osint.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import os
4 | import sys
5 | import json
6 | import argparse
7 | from passivetotal.libs.dns import DnsRequest
8 | from passivetotal.libs.enrichment import EnrichmentRequest
9 |
10 | def get_config():
11 | conf_file = os.path.join(os.path.expanduser("~"), ".config/passivetotal/api_config.json")
12 | if os.path.isfile(conf_file):
13 | with open(conf_file, 'r') as f:
14 | conf = json.loads(f.read())
15 | else:
16 | print('No config file')
17 | sys.exit(1)
18 | return conf
19 |
20 |
21 | if __name__ == '__main__':
22 | parser = argparse.ArgumentParser(description='Get OSINT from PT for domains')
23 | parser.add_argument('FILE', help='File with list of domains')
24 | args = parser.parse_args()
25 |
26 | conf = get_config()
27 |
28 | with open(args.FILE, 'r') as f:
29 | domains = list(set([d.strip() for d in f.read().split()]))
30 |
31 | client = EnrichmentRequest(conf['username'], conf['api_key'])
32 |
33 | for domain in domains:
34 | if domain == '':
35 | continue
36 | print(f"################ {domain}")
37 | try:
38 | raw_results = client.get_osint(query=domain)
39 | if raw_results['success']:
40 | for s in raw_results['results']:
41 | print(s)
42 | else:
43 | print("Request failed")
44 | except:
45 | print("Something failed")
46 |
--------------------------------------------------------------------------------
/threats/cobaltstrike_decode.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import struct
3 | import sys
4 |
5 | """
6 | Decrypt a Cobalt Strike encrypted beacon
7 |
8 | Author: Etienne Maynier, Amnesty Tech
9 | Date: March 2020
10 | """
11 |
12 | def xor(a, b):
13 | return bytearray([a[0]^b[0], a[1]^b[1], a[2]^b[2], a[3]^b[3]])
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser(description='Decode an encoded Cobalt Strike beacon')
18 | parser.add_argument('PAYLOAD', help='an integer for the accumulator')
19 | args = parser.parse_args()
20 |
21 | with open(args.PAYLOAD, "rb") as f:
22 | data = f.read()
23 |
24 | # The base address of the sample change depending on the code
25 | ba = data.find(b"\xe8\xd4\xff\xff\xff")
26 | if ba == -1:
27 | ba = data.find(b"\xe8\xd0\xff\xff\xff")
28 | if ba == -1:
29 | print("Base Address not found")
30 | sys.exit(1)
31 | ba += 5
32 |
33 | key = data[ba:ba+4]
34 | print("Key : {}".format(key))
35 | size = struct.unpack("I", xor(key, data[ba+4:ba+8]))[0]
36 | print("Size : {}".format(size))
37 |
38 | res = bytearray()
39 | i = ba+8
40 | while i < (len(data) - ba - 8):
41 | d = data[i:i+4]
42 | res += xor(d, key)
43 | key = d
44 | i += 4
45 |
46 | if not res.startswith(b"MZ"):
47 | print("Invalid decoding, no PE header")
48 |
49 | with open("a.out", "wb+") as f:
50 | f.write(res)
51 | print("PE file extracted in a.out")
52 |
--------------------------------------------------------------------------------
/android/extract_rsrc_strings.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python3
2 | import sys
3 | import argparse
4 | from androguard.core.bytecodes import apk
5 | from androguard.core import androconf
6 | from androguard.core.bytecodes.axml import ARSCParser
7 | from lxml import etree
8 |
9 |
10 | if __name__ == '__main__':
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument("PATH", help="Path of the resource file")
13 | args = parser.parse_args()
14 |
15 | ret_type = androconf.is_android(args.PATH)
16 | if ret_type == "APK":
17 | a = apk.APK(args.PATH)
18 | arscobj = a.get_android_resources()
19 | if not arscobj:
20 | print("The APK does not contain a resources file!", file=sys.stderr)
21 | sys.exit(0)
22 | elif ret_type == "ARSC":
23 | with open(args.PATH, 'rb') as fp:
24 | arscobj = ARSCParser(fp.read())
25 | if not arscobj:
26 | print("The resources file seems to be invalid!", file=sys.stderr)
27 | sys.exit(1)
28 | else:
29 | print("Unknown file type!", file=sys.stderr)
30 | sys.exit(1)
31 |
32 | xmltree = arscobj.get_public_resources(arscobj.get_packages_names()[0])
33 | x = etree.fromstring(xmltree)
34 | for elt in x:
35 | if elt.get('type') == 'string':
36 | val = arscobj.get_resolved_res_configs(int(elt.get('id')[2:], 16))[0][1]
37 | print('{}\t{}\t{}'.format(
38 | elt.get('id'),
39 | elt.get('name'),
40 | val
41 | ))
42 |
--------------------------------------------------------------------------------
/android/koodous_search.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import os
4 | import sys
5 | import requests
6 |
7 |
8 | def search(key, query):
9 | """
10 | Search in Koodous
11 | """
12 | url = "https://api.koodous.com/apks"
13 | headers = {"Authorization":"Token {}".format(key)}
14 | params = {'search':query}
15 | results = []
16 | finished = False
17 | next = None
18 | while not finished:
19 | if next:
20 | r = requests.get(url=next, headers=headers)
21 | else:
22 | r = requests.get(url=url, headers=headers, params=params)
23 | if r.status_code != 200:
24 | return results
25 | data = r.json()
26 | results += data['results']
27 | if data.get('next', None):
28 | next = data['next']
29 | else:
30 | finished = True
31 | return results
32 |
33 |
34 | """
35 | Query a list of apps on Koodous, downvote and comment on all of them
36 | """
37 | if __name__ == '__main__':
38 | parser = argparse.ArgumentParser()
39 | parser.add_argument("QUERY", help="Query to be done on Koodous")
40 | args = parser.parse_args()
41 |
42 | koodous_conf = os.path.expanduser("~/.koodous")
43 | if not os.path.isfile(koodous_conf):
44 | print("Please add your Koodous key to ~/.koodous")
45 | sys.exit(-1)
46 |
47 | with open(koodous_conf, 'r') as f:
48 | key = f.read().strip()
49 |
50 | apks = search(key, args.QUERY)
51 | for app in apks:
52 | print(app['sha256'])
53 |
--------------------------------------------------------------------------------
/pe/common_strings.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import re
4 | import string
5 |
6 | def strings(data):
7 | # Inspired by https://github.com/Neo23x0/yarGen/blob/master/yarGen.py
8 | strings_full = re.findall(b"[\x1f-\x7e]{6,}", data)
9 | strings_wide = re.findall(b"(?:[\x1f-\x7e][\x00]){6,}", data)
10 | return strings_full, strings_wide
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument('FILE', nargs='+',
15 | help='File to be processed')
16 | args = parser.parse_args()
17 |
18 | if len(args.FILE) < 2:
19 | print("You need at least 2 files")
20 | else:
21 | ascii_strings = None
22 | wide_strings = None
23 | for f in args.FILE:
24 | with open(f, 'rb') as fin:
25 | print("Reading {}".format(f))
26 | asciii, wide = strings(fin.read())
27 | if ascii_strings:
28 | ascii_strings = ascii_strings.intersection(asciii)
29 | else:
30 | ascii_strings = set(asciii)
31 | if wide_strings:
32 | wide_strings = wide_strings.intersection(wide)
33 | else:
34 | wide_strings = set(wide)
35 | i = 0
36 | for s in ascii_strings:
37 | print("$string{} = \"{}\" ascii".format(i, s.decode('utf-8')))
38 | i += 1
39 | for s in wide_strings:
40 | print("$string{} = \"{}\" wide".format(i, s.decode('utf-16')))
41 | i += 1
42 |
--------------------------------------------------------------------------------
/forensic/filetimeline.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 | import argparse
5 |
6 |
7 | def get_stat(file_path):
8 | """
9 | Get stat information from a filepath
10 | Returns (PATH, SIZE, Access Time, Modification Time, Change Time, uid, gid, access rights)
11 | """
12 | stat = os.stat(file_path)
13 | return [
14 | file_path,
15 | stat.st_size,
16 | stat.st_atime,
17 | stat.st_mtime,
18 | stat.st_ctime,
19 | stat.st_uid,
20 | stat.st_gid,
21 | oct(stat.st_mode)
22 | ]
23 |
24 |
25 | if __name__ == '__main__':
26 | parser = argparse.ArgumentParser(description='Create a timeline of files')
27 | parser.add_argument('PATH', help='Path of the folder to create the timeline')
28 | parser.add_argument('--output', '-o', help='Output file path')
29 | args = parser.parse_args()
30 |
31 | if not os.path.exists(args.PATH):
32 | print("Directory does not exist")
33 | sys.exit(1)
34 |
35 | fout = open(args.output, "a+")
36 | fout.write("|".join(["Path", "Size", "Access Time", "Modification Time", "Change Time", "uid", "gid", "access rights"]) + "\n")
37 |
38 | count = 0
39 | for root, dirs, files in os.walk(args.PATH):
40 | for name in files:
41 | infos = get_stat(os.path.join(root, name))
42 | fout.write("|".join([str(a) for a in infos]) + "\n")
43 | count += 1
44 |
45 | fout.close()
46 |
47 | print("Information on %i files stored in %s" % (count, args.output))
48 |
--------------------------------------------------------------------------------
/android/get_dex.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python3
2 | import os
3 | import sys
4 | import argparse
5 | import hashlib
6 | from androguard.core.bytecodes.apk import APK
7 | from androguard.core import androconf
8 |
9 |
10 | def get_dex(apk_path):
11 | """
12 | Extract the package name of an APK
13 | """
14 | a = APK(apk_path)
15 | return a.get_dex()
16 |
17 |
18 | if __name__ == '__main__':
19 | parser = argparse.ArgumentParser()
20 | parser.add_argument("PATH", help="Path to a file or folder")
21 | args = parser.parse_args()
22 |
23 | if os.path.isdir(args.PATH):
24 | for f in os.listdir(args.PATH):
25 | apk_path = os.path.join(args.PATH, f)
26 | if os.path.isfile(apk_path):
27 | if androconf.is_android(apk_path) == 'APK':
28 | dex_filename = os.path.splitext(apk_path)[0] + '.classes.dex'
29 | if not os.path.exists(dex_filename):
30 | with open(dex_filename, 'wb') as f:
31 | f.write(get_dex(apk_path))
32 | print("Dex file {} created".format(dex_filename))
33 | elif os.path.isfile(args.PATH):
34 | dex_filename = os.path.splitext(args.PATH)[0] + '.classes.dex'
35 | if os.path.exists(dex_filename):
36 | print("{} already exist".format(dex_filename))
37 | else:
38 | with open(dex_filename, 'wb') as f:
39 | f.write(get_dex(args.PATH))
40 | print("Dex file {} created".format(dex_filename))
41 | else:
42 | print("Invalid path")
43 | sys.exit(-1)
44 |
--------------------------------------------------------------------------------
/android/is_obfuscated.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python3
2 | import os
3 | import sys
4 | import argparse
5 | import hashlib
6 | from androguard.misc import AnalyzeAPK
7 | from androguard.core import androconf
8 |
9 |
10 | def has_classnames_obfuscated(dx):
11 | """
12 | Check if the APK has the class names obfuscated
13 | Count the number of classes with a name of one character
14 | Returns True of more than 50% of classes have names of 1 char
15 | """
16 | cn = [c.name[1:-1].split('/') for c in dx.get_classes()]
17 | cnn = [len(a.split('$')[0]) for b in cn for a in b]
18 | return (cnn.count(1) / len(cnn)) > 0.5
19 |
20 |
21 | if __name__ == '__main__':
22 | parser = argparse.ArgumentParser()
23 | parser.add_argument("PATH", help="Path to a file or folder")
24 | args = parser.parse_args()
25 |
26 | if os.path.isdir(args.PATH):
27 | for f in os.listdir(args.PATH):
28 | if os.path.isfile(f):
29 | if androconf.is_android(os.path.join(args.PATH, f)) == 'APK':
30 | a, d, dx = AnalyzeAPK(os.path.join(args.PATH, f))
31 | if has_classnames_obfuscated(dx):
32 | print('{:45} - OBFUSCATED'.format(f))
33 | else:
34 | print('{:45} - NOT OBFUSCATED'.format(f))
35 | elif os.path.isfile(args.PATH):
36 | a, d, dx = AnalyzeAPK(args.PATH)
37 | if has_classnames_obfuscated(dx):
38 | print("Obfuscated")
39 | else:
40 | print("Not obfuscated")
41 | else:
42 | print("Invalid path")
43 | sys.exit(-1)
44 |
--------------------------------------------------------------------------------
/android/extract_firebase.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python3
2 | import os
3 | import sys
4 | import argparse
5 | from androguard.core.bytecodes import apk
6 | from androguard.core import androconf
7 | from androguard.core.bytecodes.axml import ARSCParser
8 | from lxml import etree
9 |
10 |
11 | def get_firebase(fpath):
12 | a = apk.APK(fpath)
13 | arscobj = a.get_android_resources()
14 | if not arscobj:
15 | return None
16 | xmltree = arscobj.get_public_resources(arscobj.get_packages_names()[0])
17 | x = etree.fromstring(xmltree)
18 | for elt in x:
19 | if elt.get('type') == 'string':
20 | val = arscobj.get_resolved_res_configs(int(elt.get('id')[2:], 16))[0][1]
21 | if val.endswith('firebaseio.com'):
22 | return val
23 | return None
24 |
25 |
26 | if __name__ == '__main__':
27 | parser = argparse.ArgumentParser()
28 | parser.add_argument("PATH", help="Path of a folder")
29 | args = parser.parse_args()
30 |
31 |
32 | if os.path.isdir(args.PATH):
33 | for f in os.listdir(args.PATH):
34 | fpath = os.path.join(args.PATH, f)
35 | if os.path.isfile(fpath):
36 | if androconf.is_android(fpath) == 'APK':
37 | r = get_firebase(fpath)
38 | if r:
39 | print("{} : {}".format(fpath, r))
40 | elif os.path.isfile(args.PATH):
41 | if androconf.is_android(args.PATH) == 'APK':
42 | r = get_firebase(args.PATH)
43 | if r:
44 | print(r)
45 | else:
46 | print("Please give an APK file or a folder")
47 |
--------------------------------------------------------------------------------
/network/list_mullvad_ips.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import requests
4 | from IPy import IP
5 |
6 | URL = "https://mullvad.net/en/servers/__data.json"
7 |
8 |
9 | def is_ip(target):
10 | """
11 | Test if a string is an IP address
12 | """
13 | if isinstance(target, str):
14 | try:
15 | IP(target)
16 | return True
17 | except ValueError:
18 | return False
19 | else:
20 | return False
21 |
22 |
23 | if __name__ == "__main__":
24 | r = requests.get(URL, stream=True)
25 | print("Hostname,ipv4,ipv6")
26 | for line in r.iter_lines():
27 | data = json.loads(line)
28 |
29 | for index in range(len(data.get("data", []))):
30 | if not isinstance(data["data"][index], str):
31 | continue
32 | if data["data"][index].endswith(".relays.mullvad.net"):
33 | try:
34 | if is_ip(data["data"][index+1]):
35 | ipv4 = data["data"][index+1]
36 | ipv6 = data["data"][index+2]
37 | elif is_ip(data["data"][index+4]):
38 | ipv4 = data["data"][index+4]
39 | ipv6 = data["data"][index+5]
40 | elif is_ip(data["data"][index+2]):
41 | ipv4 = data["data"][index+2]
42 | ipv6 = data["data"][index+3]
43 | else:
44 | continue
45 | except IndexError:
46 | continue
47 | print("{},{},{}".format(data["data"][index], ipv4, ipv6))
48 |
--------------------------------------------------------------------------------
/pe/README.md:
--------------------------------------------------------------------------------
1 | # PE
2 |
3 | * build_shellcode_pe.py : build a PE file from a shellcode
4 | * checkpesize.py : Check that the size of a PE file is correct
5 | * common_strings.py : identify strings in common between several files
6 | * disitool.py : python program to extract PE signatures by [Didier Stevens](https://blog.didierstevens.com/programs/disitool/)
7 | * extract_sig.py : extract the digital signature from a PE file
8 | * extract_sections_python.py : extract information on sections in pure python
9 | * getnetguids.py : Script from [Cylance](https://github.com/cylance/GetNETGUIDs/blob/master/getnetguids.py), see [this blog post](https://medium.com/@seifreed/hunting-net-malware-40235e11dc05), updated for python 3
10 | * get_imphash.py : extract imp hash of PE files
11 | * get_richheaderhash.py ; Extract RichPE hash of PE files
12 | * pecheck.py : pecheck developed by [Didier Stevens](https://blog.didierstevens.com/)
13 | * pe.py : display information about a PE file (python2)
14 | * pescanner.py : display information about PE files, script by Michael Ligh and published in the [Malware Analysts Cookbook](https://www.amazon.fr/Malware-Analysts-Cookbook-DVD-Techniques/dp/0470613033) (python 2)
15 | * pesearch.py : search for a string in a PE file
16 | * petimeline.py : Create a timeline of PE/DLL timestamp
17 | * print_signature.py : check if PE files are signed
18 | * py2exe_unpack.py : extract and decompyle py2exe payloads (mostly copied from [unpy2exe](https://github.com/matiasb/unpy2exe))
19 | * pyinstxtractor.py : extract the contents of a PyInstaller generated Windows executable file by Extreme Coders ([source](https://sourceforge.net/projects/pyinstallerextractor/))
20 | * unxor.py : Check if the file is a xored PE file and if yes unxor it (single byte key only)
21 |
--------------------------------------------------------------------------------
/android/get_package_name.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python3
2 | import os
3 | import sys
4 | import argparse
5 | import hashlib
6 | from androguard.core.bytecodes.apk import APK
7 | from androguard.core import androconf
8 |
9 |
10 | def get_package_name(apk_path):
11 | """
12 | Extract the package name of an APK
13 | """
14 | a = APK(apk_path)
15 | return a.get_package(), a.get_androidversion_code()
16 |
17 |
18 | def get_sha256(path):
19 | """
20 | Get SHA256 hash of the given file
21 | """
22 | m = hashlib.sha256()
23 | with open(path, 'rb') as fin:
24 | m.update(fin.read())
25 | return m.hexdigest()
26 |
27 |
28 | if __name__ == '__main__':
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("PATH", help="Path to a file or folder")
31 | args = parser.parse_args()
32 |
33 | if os.path.isdir(args.PATH):
34 | for f in os.listdir(args.PATH):
35 | if os.path.isfile(f):
36 | if androconf.is_android(os.path.join(args.PATH, f)) == 'APK':
37 | pkg_name, pkg_version = get_package_name(os.path.join(args.PATH, f))
38 | print('{:45} - {:20} - {}'.format(
39 | f,
40 | pkg_name,
41 | pkg_version
42 | )
43 | )
44 | elif os.path.isfile(args.PATH):
45 | print("File:\t {}".format(os.path.basename(args.PATH)))
46 | print("SHA256:\t {}".format(get_sha256(args.PATH)))
47 | pkg_name, pkg_version = get_package_name(args.PATH)
48 | print("Package: {}".format(pkg_name))
49 | print("Version: {}".format(pkg_version))
50 | else:
51 | print("Invalid path")
52 | sys.exit(-1)
53 |
--------------------------------------------------------------------------------
/osint/google_doc_info.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import requests
3 | import json
4 |
5 | # Inspired by https://github.com/Malfrats/xeuledoc/tree/master
6 |
7 |
8 | def get_info(_id: str):
9 | """
10 | Get information about an URL and returns info about it
11 | """
12 | headers = {
13 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
14 | "X-Origin": "https://drive.google.com"
15 | }
16 | r = requests.get(
17 | "https://clients6.google.com/drive/v2beta/files/{}?fields=alternateLink%2CcopyRequiresWriterPermission%2CcreatedDate%2Cdescription%2CdriveId%2CfileSize%2CiconLink%2Cid%2Clabels(starred%2C%20trashed)%2ClastViewedByMeDate%2CmodifiedDate%2Cshared%2CteamDriveId%2CuserPermission(id%2Cname%2CemailAddress%2Cdomain%2Crole%2CadditionalRoles%2CphotoLink%2Ctype%2CwithLink)%2Cpermissions(id%2Cname%2CemailAddress%2Cdomain%2Crole%2CadditionalRoles%2CphotoLink%2Ctype%2CwithLink)%2Cparents(id)%2Ccapabilities(canMoveItemWithinDrive%2CcanMoveItemOutOfDrive%2CcanMoveItemOutOfTeamDrive%2CcanAddChildren%2CcanEdit%2CcanDownload%2CcanComment%2CcanMoveChildrenWithinDrive%2CcanRename%2CcanRemoveChildren%2CcanMoveItemIntoTeamDrive)%2Ckind&supportsTeamDrives=true&enforceSingleParent=true&key=AIzaSyC1eQ1xj69IdTMeii5r7brs3R90eck-m7k".format(_id),
18 | headers=headers
19 | )
20 | if r.status_code != 200:
21 | print("Invalid answer: {}".format(r.status_code))
22 | print(r.text)
23 | return {}
24 | return r.json()
25 |
26 |
27 | if __name__ == "__main__":
28 | parser = argparse.ArgumentParser(description='Get information about a shared document')
29 | parser.add_argument("DOCID", help="ID of the doc")
30 | args = parser.parse_args()
31 |
32 | print(json.dumps(get_info(args.DOCID), indent=4))
33 |
--------------------------------------------------------------------------------
/miasm/simu_sc_linux64.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 | from miasm.jitter.csts import PAGE_READ, PAGE_WRITE, EXCEPT_SYSCALL
3 | from miasm.analysis.machine import Machine
4 | from pdb import pm
5 |
6 | SOCKETCALL = {
7 | 1: "SYS_SOCKET",
8 | 2: "SYS_BIND",
9 | 3: "SYS_CONNECT",
10 | 4: "SYS_LISTEN",
11 | 5: "SYS_ACCEPT",
12 | 6: "SYS_GETSOCKNAME",
13 | 7: "SYS_GETPEERNAME",
14 | 8: "SYS_SOCKETPAIR",
15 | 9: "SYS_SEND",
16 | 10: "SYS_RECV",
17 | 11: "SYS_SENDTO",
18 | 12: "SYS_RECVFROM",
19 | 13: "SYS_SHUTDOWN",
20 | 14: "SYS_SETSOCKOPT",
21 | 15: "SYS_GETSOCKOPT",
22 | 16: "SYS_SENDMSG",
23 | 17: "SYS_RECVMSG",
24 | 18: "SYS_ACCEPT4",
25 | 19: "SYS_RECVMMSG",
26 | 20: "SYS_SENDMMSG"
27 | }
28 |
29 |
30 | def get_str(jit, addr):
31 | data = jit.vm.get_mem(addr, 10)
32 | return data[:data.find(b'\x00')].decode('utf-8')
33 |
34 |
35 | def exception_int(jitter):
36 | print("SYSCALL {}".format(jitter.cpu.EAX))
37 | jitter.cpu.set_exception(0)
38 | return True
39 |
40 | if __name__ == '__main__':
41 | parser = ArgumentParser(description="x86 64 basic Jitter")
42 | parser.add_argument("filename", help="x86 64 shellcode filename")
43 | parser.add_argument("-j", "--jitter",
44 | help="Jitter engine",
45 | default="python")
46 | args = parser.parse_args()
47 |
48 | myjit = Machine("x86_64").jitter(args.jitter)
49 | myjit.init_stack()
50 |
51 | data = open(args.filename, 'rb').read()
52 | run_addr = 0x40000000
53 | myjit.vm.add_memory_page(run_addr, PAGE_READ | PAGE_WRITE, data)
54 | #myjit.set_trace_log()
55 | myjit.add_exception_handler(EXCEPT_SYSCALL, exception_int)
56 | myjit.run(run_addr)
57 |
--------------------------------------------------------------------------------
/shodan/rawshodan.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import shodan
3 | import argparse
4 | import os
5 | import sys
6 | import json
7 | from dateutil.parser import parse
8 | from datetime import datetime, timedelta
9 |
10 |
11 | if __name__ == '__main__':
12 | parser = argparse.ArgumentParser(description='Fingerprint a system based on Shodan information')
13 | parser.add_argument('IP', help='IP')
14 | parser.add_argument('--history', '-H', action='store_true', help='IP')
15 | parser.add_argument('--key', '-k', help='Shodan API key')
16 | parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
17 |
18 | args = parser.parse_args()
19 |
20 | # Deal with the key first
21 | if args.key:
22 | key = args.key
23 | else:
24 | cpath = os.path.expanduser('~/.shodan/api_key')
25 | if os.path.isfile(cpath):
26 | with open(cpath, 'r') as f:
27 | key = f.read().strip()
28 | else:
29 | print("No API key found")
30 | sys.exit(1)
31 |
32 |
33 | api = shodan.Shodan(key)
34 | try:
35 | res = api.host(args.IP, history=args.history)
36 | except shodan.exception.APIError:
37 | print("IP not found in Shodan")
38 | else:
39 | if args.verbose:
40 | print(json.dumps(res, sort_keys=False, indent=4))
41 | else:
42 | print("%i entries:" % len(res['data']))
43 | i = 0
44 | for d in res['data']:
45 | print(d['timestamp'])
46 | print(d['_shodan']['module'])
47 | print("%s/%i" % (d['transport'], d['port']))
48 | print(d['data'])
49 | if 'html' in d:
50 | print(d['html'])
51 | if 'http' in d:
52 | print(json.dumps(d['http']))
53 | print('')
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/format/parsezip.py:
--------------------------------------------------------------------------------
1 | from struct import unpack
2 | import argparse
3 | import sys
4 |
5 |
6 | class Buffer(object):
7 | """
8 | Buffer that handles bytes objects
9 | """
10 | def __init__(self, data: bytes):
11 | self._data = data
12 | self._index = 0
13 |
14 | def __len__(self) -> int:
15 | return len(self._data)
16 |
17 | @property
18 | def index(self) -> int:
19 | return self._index
20 |
21 | @index.setter
22 | def index(self, value: int):
23 | if value:
24 | self._index = value
25 |
26 | def read(self, size):
27 | data = self._data[self._index:self._index + size]
28 | self._index += size
29 | return data
30 |
31 | def read_int(self, little: bool = False):
32 | data, = unpack(" 0:
21 | try:
22 | res = resolver.query(dd, "A")
23 | except (resolver.NoAnswer, resolver.NXDOMAIN):
24 | results[dd] = [True, ""]
25 | if args.verbose:
26 | print("{}: NXDOMAIN".format(dd))
27 | except resolver.NoNameservers:
28 | results[dd] = [False, "SERVFAIL"]
29 | if args.verbose:
30 | print("{}: SERVFAIL".format(dd))
31 | except exception.Timeout:
32 | results[dd] = [False, "Timeout"]
33 | if args.verbose:
34 | print("{}: Timeout".format(dd))
35 | else:
36 | addr = [r.address for r in res]
37 | results[dd] = [True, addr]
38 | if args.verbose:
39 | print("{}: {}".format(dd, addr))
40 | with open("resolutions.csv", "w+") as f:
41 | f.write("Domain,Success,Resolution\n")
42 | for domain in results.keys():
43 | f.write("{},{},{}\n".format(
44 | domain,
45 | results[domain][0],
46 | ";".join(results[domain][1])
47 | ))
48 |
49 | print("Results written in resolutions.csv")
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/web/cmsdetect.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import random
4 | import string
5 | import requests
6 | from urllib.parse import urljoin
7 |
8 | SIGNATURES = {
9 | 'wordpress': ['wp-admin', 'wp-login.php'],
10 | 'drupal': ['CHANGELOG.txt', '/user/login', '/user/register', '/node/'],
11 | # 'opencart': ['index.php?route']
12 | # 'Joomla': ['option=com_']
13 | }
14 |
15 | USERAGENT = 'CMS-Detect/v0.1'
16 | headers = {'User-Agent': USERAGENT}
17 |
18 |
19 | def detect(domain, page):
20 | r = requests.get(urljoin(domain, page), headers=headers)
21 | return r.status_code
22 |
23 |
24 | def check_random_page(domain):
25 | random_string = ''.join(
26 | random.choice(string.ascii_lowercase + string.digits)
27 | for _ in range(15)
28 | )
29 | r = requests.get(urljoin(domain, random_string), headers=headers)
30 | return r.status_code
31 |
32 |
33 | if __name__ == '__main__':
34 | parser = argparse.ArgumentParser(description='Detect a CMS')
35 | parser.add_argument('DOMAIN', help='domain')
36 | parser.add_argument(
37 | '--test', '-t', choices=['drupal', 'wordpress', 'all'],
38 | default='all', help='List of CMS to be tested'
39 | )
40 | args = parser.parse_args()
41 |
42 | if not args.DOMAIN.startswith('http'):
43 | target = 'http://' + args.DOMAIN
44 | else:
45 | target = args.DOMAIN
46 |
47 | s = check_random_page(target)
48 | if s == 200:
49 | print("Returns 200 for non-existing pages, this script is useless")
50 |
51 | if args.test == 'all':
52 | for cms in SIGNATURES:
53 | print(cms.capitalize())
54 | for i in SIGNATURES[cms]:
55 | print("\t%s - %i" % (urljoin(target, i), detect(target, i)))
56 | else:
57 | for i in SIGNATURES[args.test]:
58 | print("%s - %i" % (urljoin(target, i), detect(target, i)))
59 |
--------------------------------------------------------------------------------
/pe/print_signature.py:
--------------------------------------------------------------------------------
1 | import lief
2 | import argparse
3 | import os
4 | import sys
5 |
6 |
7 | if __name__ == '__main__':
8 | parser = argparse.ArgumentParser(description='Process some PE Files')
9 | parser.add_argument('FILE', help='PE File')
10 | parser.add_argument('--verbose', '-v', action="store_true", help='PE File')
11 | args = parser.parse_args()
12 |
13 | if os.path.isdir(args.FILE):
14 | # Directory, check all
15 | for f in os.listdir():
16 | if os.path.isfile(os.path.join(args.FILE, f)):
17 | binary = lief.parse(os.path.join(args.FILE, f))
18 | if binary:
19 | if binary.has_signature:
20 | print("{} - SIGNED".format(f))
21 | else:
22 | if args.verbose:
23 | print("{} - NOT SIGNED".format(f))
24 | else:
25 | if args.verbose:
26 | print("{} - NOT A PE FILE".format(f))
27 | elif os.path.isdir(os.path.join(args.FILE, f)):
28 | if args.verbose:
29 | print("{} - Directory".format(f))
30 | elif os.path.isfile(args.FILE):
31 | binary = lief.parse(args.FILE)
32 | if binary.has_signature:
33 | if args.verbose:
34 | for c in binary.signature.certificates:
35 | print(c)
36 | print("")
37 | else:
38 | issuer_serial = ":".join(map(lambda e : "{:02x}".format(e), binary.signature.signer_info.issuer[1]))
39 | for c in binary.signature.certificates:
40 | serial = ":".join(map(lambda e : "{:02x}".format(e), c.serial_number))
41 | if serial == issuer_serial:
42 | print(c)
43 | else:
44 | print("This binary is not signed")
45 | else:
46 | print("Invalid file path")
47 |
--------------------------------------------------------------------------------
/web/is_wp.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import argparse
4 | import sys
5 |
6 |
7 | def is_wp(domain):
8 | if not domain.startswith('http'):
9 | domain = 'http://' + domain + '/'
10 |
11 | try:
12 | r = requests.get(domain + '/wp-login.php')
13 | if r.status_code == 200:
14 | return True
15 | else:
16 | return False
17 | except requests.exceptions.ConnectionError:
18 | return False
19 |
20 | if __name__ == '__main__':
21 | parser = argparse.ArgumentParser(description='Check if a website is done with WordPress')
22 | subparsers = parser.add_subparsers(help='Subcommand')
23 | parser_a = subparsers.add_parser('domain', help='Information on a domain')
24 | parser_a.add_argument('DOMAIN', help='Domain')
25 | parser_a.set_defaults(subcommand='domain')
26 | parser_b = subparsers.add_parser('file', help='List of domains')
27 | parser_b.add_argument('FILE', help='File path')
28 | parser_b.set_defaults(subcommand='file')
29 |
30 | args = parser.parse_args()
31 |
32 | if 'subcommand' in args:
33 | if args.subcommand == 'domain':
34 | if is_wp(args.DOMAIN):
35 | print('Definively a Wordpress website')
36 | else:
37 | print('NOPE')
38 | elif args.subcommand == 'file':
39 | try:
40 | with open(args.FILE) as f:
41 | data = f.read().split('\n')
42 | except FileNotFoundError:
43 | print('File does not exist')
44 | sys.exit(1)
45 | for d in data:
46 | if d.strip() == '':
47 | continue
48 | if is_wp(d.strip()):
49 | print('%s;Yes' % d.strip())
50 | else:
51 | print('%s;No' % d.strip())
52 | else:
53 | parser.print_help()
54 | else:
55 | parser.print_help()
56 |
--------------------------------------------------------------------------------
/android/print_frosting.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 | from androguard.core import androconf
5 | from androguard.misc import AnalyzeAPK
6 |
7 |
8 | # Ref : https://stackoverflow.com/questions/48090841/security-metadata-in-android-apk/51857027#51857027
9 | BLOCK_TYPES = {
10 | 0x7109871a: 'SIGNv2',
11 | 0xf05368c0: 'SIGNv3',
12 | 0x2146444e: 'Google Metadata',
13 | 0x42726577: 'Padding'
14 | }
15 |
16 |
17 | if __name__ == '__main__':
18 | parser = argparse.ArgumentParser(description='Process some apks')
19 | parser.add_argument('APK', help='APK')
20 | args = parser.parse_args()
21 |
22 |
23 | if os.path.isdir(args.APK):
24 | for f in os.listdir(args.APK):
25 | apk_path = os.path.join(args.APK, f)
26 | if os.path.isfile(apk_path):
27 | if androconf.is_android(apk_path) == 'APK':
28 | a, d, dx = AnalyzeAPK(apk_path)
29 | a.parse_v2_v3_signature()
30 | if 0x2146444e in a._v2_blocks:
31 | print("{} : FROSTING".format(f))
32 | else:
33 | print("{} : NOPE".format(f))
34 | else:
35 | print("{} not an APK".format(f))
36 | else:
37 | if androconf.is_android(args.APK) == 'APK':
38 | a, d, dx = AnalyzeAPK(args.APK)
39 | if a.is_signed_v1():
40 | print("V1 Signature")
41 | if a.is_signed_v2():
42 | print("V2 Signature")
43 | if a.is_signed_v3():
44 | print("V3 Signature")
45 | print("")
46 | print("Signing Blocks:")
47 | for b in a._v2_blocks:
48 | if b in BLOCK_TYPES.keys():
49 | print("\t{}".format(BLOCK_TYPES[b]))
50 | else:
51 | print("\tUnknown block {}".format(hex(b)))
52 | else:
53 | print("Not an APK file")
54 |
--------------------------------------------------------------------------------
/twitter/graph-followers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import json
3 | import argparse
4 | import networkx as nx
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='Generate a Gephi graph based on a json file')
8 | parser.add_argument('INPUT', help='INPUT JSON FILE')
9 | parser.add_argument('OUTPUT', help='Output Gephi file')
10 | parser.add_argument('--type', '-t', default="all", choices=["all", "followers", "following"],
11 | help="Type of graph (all, followers, following)")
12 | args = parser.parse_args()
13 |
14 | # read json file
15 | f = open(args.INPUT, 'r')
16 | data = json.load(f)
17 | f.close()
18 |
19 | # Create graph
20 | G=nx.DiGraph()
21 | for user in data:
22 | ui = data[user]
23 | # Create user if it does not exist
24 | if ui["id"] not in G.nodes():
25 | G.add_node(ui["id"], label=user, name=user)
26 | G.node[ui["id"]]['viz'] = {'color': {'r': 255, 'g': 0, 'b': 0, 'a': 0}, 'size': 50}
27 | if args.type in ["all", "followers"]:
28 | # For each follower
29 | for f in ui["followers"]:
30 | if f not in G.nodes():
31 | G.add_node(f, label=str(f), name=str(f))
32 | G.node[f]['viz'] = { 'size': 1}
33 | G.add_edge(f, ui["id"])
34 | else:
35 | G.add_edge(f, ui["id"])
36 | G.node[f]['viz']['size'] += 1
37 | if args.type in ["all", "following"]:
38 | # Following
39 | for f in ui["followings"]:
40 | if f not in G.nodes():
41 | G.add_node(f, label=str(f), name=str(f))
42 | G.node[f]['viz'] = { 'size': 1}
43 | G.add_edge(ui["id"], f)
44 | else:
45 | G.add_edge(ui["id"], f)
46 | G.node[f]['viz']['size'] += 1
47 |
48 | nx.write_gexf(G, args.OUTPUT)
49 |
--------------------------------------------------------------------------------
/misp/misp2sig.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding: utf-8 -*-
3 | import argparse
4 | import sys
5 | import ConfigParser
6 | import urllib
7 | from collections import Counter
8 | from misp import MispServer, MispEvent
9 | from misplib import parse_config
10 |
11 | """Tool to create signatures from MISP events
12 | Author : Tek
13 | Date : 01/02/2017
14 | """
15 |
16 | if __name__ == "__main__":
17 | parser = argparse.ArgumentParser(description='Command line interface to MISP servers')
18 | parser.add_argument('--server', '-s', help='Server used for the request', required=True)
19 | parser.add_argument('--event', '-e', help='Event infos', type=int, required=True)
20 | parser.add_argument('--dst', '-d', choices=['gmailsearch'], required=True,
21 | help='Search for attributes of this type')
22 | args = parser.parse_args()
23 |
24 | config = parse_config()
25 | if args.server is not None:
26 | if args.server.lower() in config.keys():
27 | server = MispServer(url=config[args.server.lower()]['url'],
28 | apikey=config[args.server.lower()]['key'],
29 | ssl_chain=False)
30 | else:
31 | print("Server not found, quitting...")
32 | sys.exit(1)
33 |
34 | else:
35 | if 'default' not in config.keys():
36 | print("No default severs in MISP conf, quitting...")
37 | sys.exit(1)
38 | else:
39 | server = MispServer(url=config['default']['url'],
40 | apikey=config['default']['key'],
41 | ssl_chain=False)
42 |
43 | if args.dst == "gmailsearch":
44 | event = server.events.get(args.event)
45 | attributes = filter(
46 | lambda x:x.type in ['domain', 'email-src', 'email-subject'] and x.to_ids,
47 | event.attributes
48 | )
49 | sig = " OR ".join(map(lambda x: '”' + x.value + '”', attributes))
50 | print(sig)
51 | print("\n")
52 | print("https://mail.google.com/mail/u/0/#search/" + urllib.quote(sig))
53 |
54 |
--------------------------------------------------------------------------------
/web/http_test_methods.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python2
2 | import argparse
3 | import os
4 | import socket
5 | from urlparse import urlparse
6 | import re
7 | import httplib
8 |
9 | def send_request(method, host, path):
10 | conn = httplib.HTTPConnection(host)
11 | conn.request(method, path)
12 | return conn.getresponse()
13 |
14 | def print_response(res):
15 | print "HTTP/1.1 %i %s" % (res.status, res.reason)
16 | for header in res.getheaders():
17 | print "%s: %s" % (header[0].capitalize(), header[1])
18 | print ""
19 |
20 |
21 | if __name__ == "__main__":
22 | parser = argparse.ArgumentParser(description='Identify the options supported by the web server')
23 | parser.add_argument('-t', '--test', help='Tests all the methods', action='store_true')
24 | parser.add_argument('-v', '--verbose', help='verbose mode', action="count", default=0)
25 | parser.add_argument('host', metavar='HOST', help='Host targeted')
26 | args = parser.parse_args()
27 |
28 | # valid the host
29 | hosturl = urlparse(args.host)
30 | if hosturl.netloc == '':
31 | host = hosturl.path
32 | else:
33 | host = hosturl.netloc
34 |
35 |
36 | if args.test:
37 | print "Testing all HTTP methods"
38 | for method in ["GET", "OPTIONS", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"]:
39 | res =send_request(method, host, "/")
40 | if args.verbose > 0:
41 | print "%s /" % method
42 | print_response(res)
43 | #print "%s : %i %s" % (method, res.status, res.reason)
44 | #print "%s\n" % repr(res.getheaders())
45 | else:
46 | if res.status == 404 or res.status == 400 or res.status == 405:
47 | print "%s: BLOCKED" % method
48 | else:
49 | print "%s: AUTHORIZED" % method
50 | else:
51 | res = send_request("OPTIONS", host, "/")
52 | if res.getheader('allow'):
53 | print "Methods allowed: %s" % res.getheader('allow')
54 | else:
55 | print "No response from the server to OPTIONS method"
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/misp/xsearch_misp.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding: utf-8 -*-
3 | import argparse
4 | import sys
5 | from collections import Counter
6 | from misp import MispServer, MispEvent, MispTransportError, MispAttribute
7 | from misplib import parse_config
8 |
9 | if __name__ == "__main__":
10 | parser = argparse.ArgumentParser(description='Search for MISP attributes in another MISP instance')
11 | parser.add_argument('SERVER_SOURCE', help='Server having the IOCs')
12 | parser.add_argument('EVENT_SOURCE', help='Event having new IOCs', type=int)
13 | parser.add_argument('SERVER_DEST', help='Server for the research')
14 | parser.add_argument('-v', '--verbose', action='count', default=0)
15 | args = parser.parse_args()
16 |
17 | config = parse_config()
18 |
19 | if args.SERVER_SOURCE.lower() not in config.keys():
20 | print("Unknown source server, quitting...")
21 | sys.exit(1)
22 | else:
23 | source_server = MispServer(url=config[args.SERVER_SOURCE.lower()]['url'],
24 | apikey=config[args.SERVER_SOURCE.lower()]['key'],
25 | ssl_chain=False)
26 |
27 | if args.SERVER_DEST.lower() not in config.keys():
28 | print("Unknown destination server, quitting...")
29 | sys.exit(1)
30 | else:
31 | dest_server = MispServer(url=config[args.SERVER_DEST.lower()]['url'],
32 | apikey=config[args.SERVER_DEST.lower()]['key'],
33 | ssl_chain=False)
34 |
35 | try:
36 | source_event = source_server.events.get(args.EVENT_SOURCE)
37 | except MispTransportError:
38 | print("Impossible to find the event source, quitting")
39 | sys.exit(1)
40 |
41 | for attr in source_event.attributes:
42 | if attr.category != 'Internal reference':
43 | res = dest_server.attributes.search(value=attr.value)
44 | if len(res) == 0:
45 | if args.verbose > 0:
46 | print("Attr %s: no results" % attr.value)
47 | else:
48 | print("Attr %s, result founds" % attr.value)
49 | for event in res:
50 | print("\t-> %i - %s" % (event.id, event.info))
51 |
--------------------------------------------------------------------------------
/network/checkpoint_banner.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import argparse
3 | import os
4 |
5 | def check_ip(ip, port):
6 | try:
7 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
8 | s.settimeout(10)
9 | s.connect((ip, port))
10 | s.sendall(b"\x51\x00\x00\x00\x00\x00\x00\x21")
11 | data = s.recv(4)
12 | if data and data == b"Y\x00\x00\x00":
13 | #print("Checkpoint Firewall")
14 | s.sendall(b"\x00\x00\x00\x0bsecuremote\x00")
15 | data = s.recv(200)
16 | return data[4:-8].strip(b"\x00").decode('utf-8').split(",")
17 | except socket.timeout:
18 | return None
19 | except ConnectionRefusedError:
20 | return None
21 | except OSError:
22 | return None
23 | return None
24 |
25 | if __name__ == "__main__":
26 | parser = argparse.ArgumentParser(description='Extract Checkpoint Banner')
27 | subparsers = parser.add_subparsers(help='subcommand')
28 | parser_a = subparsers.add_parser("ip")
29 | parser_a.add_argument('IP', help='IP address')
30 | parser_a.add_argument('--port', '-p', type=int, default=264, help='Port')
31 | parser_a.set_defaults(subcommand='ip')
32 | parser_b = subparsers.add_parser("list")
33 | parser_b.add_argument('FILE', help='List of IP addresses')
34 | parser_b.add_argument('--port', '-p', type=int, default=264, help='Port')
35 | parser_b.set_defaults(subcommand='list')
36 | args = parser.parse_args()
37 |
38 | if 'subcommand' in args:
39 | if args.subcommand == 'ip':
40 | print(check_ip(args.IP, args.port))
41 | elif args.subcommand == 'list':
42 | with open(args.FILE) as f:
43 | data = f.read().split('\n')
44 | for ip in data:
45 | if ip.strip() == "":
46 | continue
47 | info = check_ip(ip, args.port)
48 | if info:
49 | print("{} - {} - {}".format(ip, info[0], info[1]))
50 | else:
51 | print("{} - No data".format(ip))
52 | else:
53 | parser.print_help()
54 | else:
55 | parser.print_help()
56 |
57 |
58 |
--------------------------------------------------------------------------------
/forensic/extract_chrome_history.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sqlite3
3 | import csv
4 | from datetime import datetime
5 |
6 | """
7 | Schema
8 | CREATE TABLE urls(id INTEGER PRIMARY KEY AUTOINCREMENT,url LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL);
9 | CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration INTEGER DEFAULT 0 NOT NULL,incremented_omnibox_typed_score BOOLEAN DEFAULT FALSE NOT NULL);
10 | """
11 |
12 | def convert_timestamp(tmstp):
13 | return datetime.fromtimestamp(int(tmstp)/ 1000000 - 11644473600)
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser(description='Process some integers.')
18 | parser.add_argument('FILE', help='History file')
19 | parser.add_argument('--filter', '-f', help='Filter on the url')
20 | args = parser.parse_args()
21 |
22 |
23 | query = "SELECT urls.id, urls.url, urls.title, urls.visit_count, urls.typed_count, urls.last_visit_time, urls.hidden, visits.visit_time, visits.from_visit, visits.visit_duration, visits.transition, visit_source.source FROM urls JOIN visits ON urls.id = visits.url LEFT JOIN visit_source ON visits.id = visit_source.id"
24 | if args.filter:
25 | query += ' WHERE urls.url like "%{}%"'.format(args.filter)
26 | query += " ORDER BY visits.visit_time;"
27 |
28 | conn = sqlite3.connect(args.FILE)
29 | c = conn.cursor()
30 |
31 |
32 | print("url_id,url,title,#visits,typed_count,last_visit_time,hidden,visit_time,from_visit,visit_duration,transition,source")
33 | for row in c.execute(query):
34 | print("{},{},\"{}\",{},{},{},{},{},{},{},{},{}".format(
35 | row[0],
36 | row[1],
37 | row[2].replace('"', '""'),
38 | row[3],
39 | row[4],
40 | convert_timestamp(row[5]).strftime("%Y-%m-%d %H:%M:%S:%f"),
41 | row[6],
42 | convert_timestamp(row[7]).strftime("%Y-%m-%d %H:%M:%S:%f"),
43 | row[8],
44 | row[9],
45 | row[10],
46 | row[11]
47 | ))
48 |
--------------------------------------------------------------------------------
/android/get_method_code.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import argparse
4 | from androguard.core import androconf
5 | from androguard.misc import AnalyzeAPK
6 |
7 | if __name__ == '__main__':
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument("APK")
10 | parser.add_argument("CLASS", help="Class name for instance google.a.b.c")
11 | parser.add_argument("METHOD", help="Method name")
12 | parser.add_argument("--verbose", "-v", help="Verbose mode", action="store_true")
13 | parser.add_argument("--bytecode", "-b", help="Give bytecode", action="store_true")
14 | parser.add_argument("--hex", "-H", help="Give hex string of the bytecode", action="store_true")
15 | args = parser.parse_args()
16 |
17 | if not os.path.isfile(args.APK):
18 | print("This file does not exist")
19 | sys.exit(-1)
20 |
21 | if androconf.is_android(args.APK) != 'APK':
22 | print("This is not an APK file :o")
23 | sys.exit(-1)
24 | else:
25 | a, d, dx = AnalyzeAPK(args.APK)
26 | class_name = args.CLASS.replace('.', '/')
27 | if args.verbose:
28 | print("Searching for {}".format(class_name))
29 | cc = [d for d in dx.get_classes() if class_name in d.name]
30 | if len(cc) == 0:
31 | print("Class not found")
32 | else:
33 | for c in cc:
34 | methods = [m for m in c.get_methods() if m.get_method().name == args.METHOD]
35 | print("{} methods found in {}".format(len(methods), c.name))
36 | for m in methods:
37 | m.get_method().show_info()
38 | if args.bytecode:
39 | for i in m.get_method().get_instructions():
40 | print("{:>24} {:20} {:12}".format(
41 | i.get_hex(),
42 | i.get_name(),
43 | i.get_output()
44 | ))
45 | else:
46 | m.get_method().source()
47 | if args.hex:
48 | print("{{ {} }}".format(
49 | ' '.join([k.get_hex() for k in m.get_method().get_instructions()])
50 | ))
51 |
--------------------------------------------------------------------------------
/forensic/ios_unpack.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import sys
3 | import os
4 | import argparse
5 | import sqlite3
6 | from shutil import copyfile
7 |
8 |
9 | if __name__ == "__main__":
10 | parser = argparse.ArgumentParser(description='Convert iOS Backup to flat files')
11 | parser.add_argument("INPUT_FOLDER", help="Folder of the iOS backup extracted")
12 | parser.add_argument("OUTPUT_FOLDER", help="Output folder")
13 | parser.add_argument("--verbose", "-v", action="store_true",
14 | help="Verbose mode")
15 | args = parser.parse_args()
16 |
17 | if not os.path.isdir(args.INPUT_FOLDER):
18 | print("Invalid input folder")
19 | sys.exit(-1)
20 | if not os.path.isdir(args.OUTPUT_FOLDER):
21 | os.mkdir(args.OUTPUT_FOLDER)
22 |
23 | # Check if there is the Manifest.db file
24 | manifest = os.path.join(args.INPUT_FOLDER, "Manifest.db")
25 | if not os.path.isfile(manifest):
26 | if os.path.isfile(os.path.join(args.INPUT_FOLDER, "Manifest.mbdb")):
27 | print("Manifest.mbdb not implemented yet, sorry")
28 | sys.exit(-1)
29 | else:
30 | print("Manifest file not found, something is wrong")
31 | sys.exit(-1)
32 |
33 | conn = sqlite3.connect(manifest)
34 | c = conn.cursor()
35 | copied = 0
36 | not_found = 0
37 | for row in c.execute('select * from Files'):
38 | # Test if file exists
39 | infile = os.path.join(args.INPUT_FOLDER, row[0][0:2], row[0])
40 | outfile = os.path.join(args.OUTPUT_FOLDER, row[1], row[2])
41 | if os.path.isfile(infile):
42 | if row[2]:
43 | # Make directories
44 | dirpath = os.path.join(args.OUTPUT_FOLDER, row[1], os.path.dirname(row[2]))
45 | if not os.path.isdir(dirpath):
46 | os.makedirs(dirpath)
47 | copyfile(infile, outfile)
48 | copied += 1
49 | if args.verbose:
50 | print("Copied {} to {}".format(row[0], outfile))
51 | else:
52 | if args.verbose:
53 | print("File {} not found".format(row[0]))
54 | not_found += 1
55 |
56 | print("{} files not found".format(not_found))
57 | print("{} files copied".format(copied))
58 |
--------------------------------------------------------------------------------
/ghidra_scripts/yaracrypto.py:
--------------------------------------------------------------------------------
1 | # Run Yara with crypto patterns on the current file and create bookmarks and EOL comments for findings.
2 | #@author Thomas Roth code@stacksmashing.net
3 | #@category Ghidra Ninja
4 | #@keybinding
5 | #@menupath
6 | #@toolbar
7 |
8 | import ghidra_ninja_helpers as gn
9 | import subprocess
10 | import tempfile
11 | import os
12 | from ghidra.program.model.listing import CodeUnit
13 |
14 |
15 | def convert_phys_addr(addr):
16 | # Convert physical address to logical address
17 | for section in currentProgram.getMemory().getBlocks():
18 | pointer_to_rawdata = section.getSourceInfos()[0].fileBytesOffset
19 | rawdata_size = section.getSourceInfos()[0].length
20 | if (addr >= pointer_to_rawdata) and (addr <= pointer_to_rawdata + rawdata_size):
21 | # This is it
22 | return section.start.offset + addr - pointer_to_rawdata
23 | return None
24 |
25 |
26 | def add_bookmark_comment(addr, text):
27 | gaddr = currentProgram.getAddressFactory().getDefaultAddressSpace().getAddress(addr)
28 | createBookmark(gaddr, "yara", text)
29 | cu = currentProgram.getListing().getCodeUnitAt(gaddr)
30 | cu.setComment(CodeUnit.EOL_COMMENT, text)
31 |
32 |
33 | # Start
34 | file_location = currentProgram.getExecutablePath()
35 | rule_location = os.path.join(gn.PATH, "yara-crypto.yar")
36 |
37 | if not os.path.isfile(file_location):
38 | print("File not found at {}".format(file_location))
39 | sys.exit(-1)
40 | if not os.path.isfile(rule_location):
41 | print("yara rules not found at {}".format(rule_location))
42 | sys.exit(-1)
43 |
44 | current_rule = None
45 | output = subprocess.check_output(["yara", "--print-string-length", rule_location, file_location], stderr=None)
46 | for line in output.splitlines():
47 | if line.startswith("0x"):
48 | if current_rule:
49 | addr_int = int(line.split(":")[0][2:], 16)
50 | vaddr = convert_phys_addr(addr_int)
51 | if vaddr:
52 | print("Found : {} - {} - {}".format(current_rule, hex(addr_int), hex(vaddr)))
53 | add_bookmark_comment(vaddr, current_rule)
54 | else:
55 | print("Physical address {} cannot be converted".format(hex(addr_int)))
56 | else:
57 | current_rule = line.split(" ")[0]
58 |
--------------------------------------------------------------------------------
/threats/urlscan.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | import json
3 | import argparse
4 | import requests
5 |
6 |
7 | class UrlScan(object):
8 | def __init__(self):
9 | self.url = "https://urlscan.io/api/v1/"
10 |
11 | def search(self, query, size=100, offset=0):
12 | params = {
13 | 'q': query,
14 | 'size': size,
15 | 'offset': offset
16 | }
17 | r = requests.get(self.url + "search/", params=params)
18 | return r.json()
19 |
20 | def view(self, uid):
21 | r = requests.get(self.url + 'result/' + uid)
22 | return r.json()
23 |
24 |
25 | if __name__ == '__main__':
26 | parser = argparse.ArgumentParser(description='Query urlscan')
27 | subparsers = parser.add_subparsers(help='Subcommand')
28 | parser_a = subparsers.add_parser('search', help='Search in urlscan')
29 | parser_a.add_argument('QUERY', help='DOMAIN to be queried')
30 | parser_a.add_argument('--raw', '-r', action='store_true', help='Shows raw results')
31 | parser_a.set_defaults(subcommand='search')
32 | parser_c = subparsers.add_parser('view', help='View urlscan analysis')
33 | parser_c.add_argument('UID', help='UId of the analysis')
34 | parser_c.set_defaults(subcommand='view')
35 | args = parser.parse_args()
36 |
37 | if 'subcommand' in args:
38 | us = UrlScan()
39 | if args.subcommand == 'search':
40 | # Search
41 | res = us.search(args.QUERY)
42 | if args.raw:
43 | print(json.dumps(res, sort_keys=True, indent=4))
44 | else:
45 | if len(res['results']) > 0:
46 | for r in res['results']:
47 | print("{} - {} - {} - https://urlscan.io/result/{}".format(
48 | r["task"]["time"],
49 | r["page"]["url"],
50 | r["page"]["ip"],
51 | r["_id"]
52 | )
53 | )
54 | else:
55 | print("No results for this query")
56 | elif args.subcommand == 'view':
57 | print(json.dumps(us.view(args.UID), sort_keys=True, indent=4))
58 | else:
59 | parser.print_help()
60 | else:
61 | parser.print_help()
62 |
--------------------------------------------------------------------------------
/shodan/shodan_ssh_history.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import shodan
3 | import argparse
4 | import os
5 | import sys
6 | from dateutil.parser import parse
7 | from dateutil import parser
8 |
9 |
10 | if __name__ == '__main__':
11 | # Arguments with argparse
12 | parser = argparse.ArgumentParser(description='Fingerprint a system based on Shodan information')
13 | parser.add_argument('IP', help='IP')
14 | parser.add_argument('--key', '-k', help='Shodan API key')
15 | parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
16 | args = parser.parse_args()
17 |
18 | # Deal with the key first, either from the arguments or from the
19 | # standard file used by the shodan tool
20 | if args.key:
21 | key = args.key
22 | else:
23 | cpath = os.path.expanduser('~/.shodan/api_key')
24 | if os.path.isfile(cpath):
25 | with open(cpath, 'r') as f:
26 | key = f.read().strip()
27 | else:
28 | print("No API key found")
29 | sys.exit(1)
30 |
31 | # creates the API object with the key as parameter
32 | api = shodan.Shodan(key)
33 | data = {}
34 | try:
35 | # Get all the data on this host
36 | res = api.host(args.IP, history=True)
37 | except shodan.exception.APIError:
38 | # Raises an exception if the IP has no data
39 | print("IP not found in Shodan")
40 | sys.exit(0)
41 |
42 | # Go through the data
43 | for event in res['data']:
44 | if event['_shodan']['module'] != 'ssh':
45 | continue
46 |
47 | fingerprint = event['ssh']['fingerprint']
48 | date = parse(event['timestamp'])
49 | if fingerprint not in data:
50 | data[fingerprint] = {'first': date, 'last': date, 'fingerprint': fingerprint}
51 | else:
52 | if data[fingerprint]['first'] > date:
53 | data[fingerprint]['first'] = date
54 | if data[fingerprint]['last'] < date:
55 | data[fingerprint]['last'] = date
56 |
57 | # Print the result
58 | for val in sorted(data.values(), key=lambda x:x['first']):
59 | print('%s - %s -> %s' % (
60 | val['fingerprint'],
61 | val['first'].strftime('%Y-%m-%d'),
62 | val['last'].strftime('%Y-%m-%d')
63 | )
64 | )
65 |
--------------------------------------------------------------------------------
/android/get_certificate.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python3
2 | import os
3 | import sys
4 | import argparse
5 | from androguard.core.bytecodes.apk import APK
6 | from androguard.core import androconf
7 |
8 |
9 | def convert_x509_name(name):
10 | """
11 | Convert x509 name to a string
12 | """
13 | types = {
14 | 'country_name': 'C',
15 | 'state_or_province_name': 'ST',
16 | 'locality_name': 'L',
17 | 'organization_name': 'O',
18 | 'organizational_unit_name': 'OU',
19 | 'common_name': 'CN',
20 | 'email_address': 'emailAddress'
21 | }
22 |
23 | return '/'.join(['{}={}'.format(types[attr], name.native[attr]) for attr in name.native])
24 |
25 |
26 | if __name__ == '__main__':
27 | parser = argparse.ArgumentParser()
28 | parser.add_argument("APK", help="Path to an APK file")
29 | args = parser.parse_args()
30 |
31 | if os.path.isdir(args.APK):
32 | for f in os.listdir(args.APK):
33 | if os.path.isfile(f):
34 | if androconf.is_android(os.path.join(args.APK, f)) == 'APK':
35 | apk = APK(os.path.join(args.APK, f))
36 | if len(apk.get_certificates()) > 0:
37 | cert = apk.get_certificates()[0]
38 | print("{} : {} - {}".format(os.path.join(args.APK, f), cert.sha1_fingerprint.replace(' ', ''), convert_x509_name(cert.issuer)))
39 | else:
40 | print("{} : no certificate".format(os.path.join(args.APK, f)))
41 | elif os.path.isfile(args.APK):
42 | apk = APK(args.APK)
43 |
44 | if len(apk.get_certificates()) > 0:
45 | cert = apk.get_certificates()[0]
46 | print("SHA1: {}".format(cert.sha1_fingerprint.replace(' ', '')))
47 | print('Serial: {:X}'.format(cert.serial_number))
48 | print("Issuer: {}".format(convert_x509_name(cert.issuer)))
49 | print("Subject: {}".format(convert_x509_name(cert.subject)))
50 | print("Not Before: {}".format(cert['tbs_certificate']['validity']['not_before'].native.strftime('%b %-d %X %Y %Z')))
51 | print("Not After: {}".format(cert['tbs_certificate']['validity']['not_after'].native.strftime('%b %-d %X %Y %Z')))
52 | else:
53 | print("No certificate here, weird")
54 | else:
55 | print("Invalid file path")
56 | sys.exit(-1)
57 |
--------------------------------------------------------------------------------
/web/webscan.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import requests
3 | import yara
4 | from io import StringIO
5 | from urllib.parse import urljoin, urlparse
6 | from lxml import etree
7 |
8 |
9 | def extract_suburls(webpage, url):
10 | """
11 | Extract javascript links from every page
12 | """
13 | if webpage.strip() == "":
14 | return set()
15 | urlp = urlparse(url)
16 | parser = etree.HTMLParser()
17 | tree = etree.parse(StringIO(webpage), parser)
18 | res = set()
19 | for s in tree.xpath('//script[@src]/@src'):
20 | if s.startswith('http'):
21 | parsed = urlparse(s)
22 | if parsed.netloc == urlp.netloc:
23 | res.add(s)
24 | else:
25 | res.add(urljoin(url, s))
26 | return res
27 |
28 |
29 | if __name__ == "__main__":
30 | parser = argparse.ArgumentParser(description='Scan a website for a specific yara rule')
31 | parser.add_argument('URL', help='URL of the website to scan')
32 | parser.add_argument('YARARULE', help='Yara rule')
33 | parser.add_argument('--verbose', '-v', action='store_true', help='Verbose mode')
34 | args = parser.parse_args()
35 |
36 | # TODO : split the url likely
37 |
38 | todo = set([args.URL])
39 | done = set()
40 |
41 | rules = yara.compile(filepath=args.YARARULE)
42 | found = []
43 |
44 | while len(todo) > 0:
45 | url = todo.pop()
46 | if args.verbose:
47 | print("Scanning {}".format(url))
48 | r = requests.get(url)
49 | if r.status_code != 200:
50 | if args.verbose:
51 | print("{} : HTTP code {}".format(url, r.status_code))
52 | continue
53 | webpage = r.text
54 | done.add(url)
55 | sublinks = extract_suburls(webpage, args.URL)
56 | for s in sublinks:
57 | if s not in done:
58 | todo.add(s)
59 |
60 | # Yara scan
61 | res = rules.match(data=webpage)
62 | if len(res) > 0:
63 | print("{} matches {}".format(url, ", ".join([r.rule for r in res])))
64 | found.append([url, ", ".join([r.rule for r in res])])
65 |
66 | if args.verbose:
67 | print("\n")
68 | if len(found) > 0:
69 | print("FOUND !")
70 | for f in found:
71 | print("{} - {}".format(f[0], f[1]))
72 | else:
73 | print("Nothing found")
74 | print("")
75 |
--------------------------------------------------------------------------------
/shodan/shodanhistory.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import shodan
3 | import argparse
4 | import os
5 | import sys
6 | import json
7 | from dateutil.parser import parse
8 | from datetime import datetime, timedelta
9 |
10 |
11 | if __name__ == '__main__':
12 | parser = argparse.ArgumentParser(description='Fingerprint a system based on Shodan information')
13 | parser.add_argument('IP', help='IP')
14 | parser.add_argument('--key', '-k', help='Shodan API key')
15 |
16 | args = parser.parse_args()
17 |
18 | # Deal with the key first
19 | if args.key:
20 | key = args.key
21 | else:
22 | cpath = os.path.expanduser('~/.shodan/api_key')
23 | if os.path.isfile(cpath):
24 | with open(cpath, 'r') as f:
25 | key = f.read().strip()
26 | else:
27 | print("No API key found")
28 | sys.exit(1)
29 |
30 |
31 | api = shodan.Shodan(key)
32 | try:
33 | res = api.host(args.IP, history=True)
34 | except shodan.exception.APIError:
35 | print("IP not found in Shodan")
36 | else:
37 | for d in res['data']:
38 | if d['port'] == 22:
39 | print("%s - port 22 ssh - %s\n" % (
40 | d['timestamp'],
41 | d['data'].split("\n")[0]
42 | )
43 | )
44 | elif d['port'] == 80:
45 | print("%s - port 80 http - Server \"%s\"\n" % (
46 | d['timestamp'],
47 | d['http']['server']
48 | )
49 | )
50 | elif d['port'] == 443:
51 | if 'cert' in d['ssl']:
52 | print("%s - port 443 https - Cert \"%s\" \"%s\" %s - Server \"%s\"\n" % (
53 | d['timestamp'],
54 | d['ssl']['cert']['subject']['CN'],
55 | d['ssl']['cert']['issuer']['CN'],
56 | d['ssl']['cert']['fingerprint']['sha1'],
57 | d['http']['server']
58 | )
59 | )
60 | else:
61 | print("%s - port 443 https - Cert Unknown- Server \"%s\"\n" % (
62 | d['timestamp'],
63 | d['http']['server']
64 | )
65 | )
66 |
--------------------------------------------------------------------------------
/miasm/simu_sc_linux.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 | from pdb import pm
3 | from miasm.jitter.csts import PAGE_READ, PAGE_WRITE, EXCEPT_INT_XX, EXCEPT_ACCESS_VIOL, EXCEPT_PRIV_INSN
4 | from miasm.analysis.machine import Machine
5 | from miasm.os_dep.linux import environment, syscall
6 | import logging
7 |
8 |
9 | def code_sentinelle(jitter):
10 | print("Done")
11 | jitter.run = False
12 | jitter.pc = 0
13 | return True
14 |
15 | def priv(jitter):
16 | print("Privilege Exception")
17 | return False
18 |
19 |
20 | if __name__ == '__main__':
21 | parser = ArgumentParser(description="Linux shellcode")
22 | parser.add_argument("filename", help="Shellcode filename")
23 | parser.add_argument("-j", "--jitter",
24 | help="Jitter engine",
25 | default="python")
26 | parser.add_argument("-a", "--arch", help="Architecture (x86_32, \
27 | x86_64, arml)", choices=["x86_32", "x86_64", "arml"],
28 | default="x86_32")
29 | parser.add_argument("--verbose", "-v", action="store_true",
30 | help="Verbose mode")
31 | args = parser.parse_args()
32 |
33 | myjit = Machine(args.arch).jitter(args.jitter)
34 | myjit.init_stack()
35 |
36 |
37 | data = open(args.filename, 'rb').read()
38 | run_addr = 0x40000000
39 | myjit.vm.add_memory_page(run_addr, PAGE_READ | PAGE_WRITE, data)
40 | if args.verbose:
41 | myjit.set_trace_log()
42 | myjit.add_exception_handler(EXCEPT_PRIV_INSN, priv)
43 | myjit.add_exception_handler(EXCEPT_ACCESS_VIOL, code_sentinelle)
44 |
45 | # Log syscalls
46 | log = logging.getLogger('syscalls')
47 | log.setLevel(logging.DEBUG)
48 |
49 | # Enable syscall handling
50 | if args.arch == "x86_32":
51 | myjit.push_uint32_t(0x1337beef)
52 | myjit.add_breakpoint(0x1337beef, code_sentinelle)
53 | env = environment.LinuxEnvironment_x86_32()
54 | syscall.enable_syscall_handling(myjit, env, syscall.syscall_callbacks_x86_32)
55 | elif args.arch == "x86_64":
56 | myjit.push_uint64_t(0x1337beef)
57 | myjit.add_breakpoint(0x1337beef, code_sentinelle)
58 | env = environment.LinuxEnvironment_x86_64()
59 | syscall.enable_syscall_handling(myjit, env, syscall.syscall_callbacks_x86_64)
60 | else:
61 | env = environment.LinuxEnvironment_arml()
62 | syscall.enable_syscall_handling(myjit, env, syscall.syscall_callbacks_arml)
63 | myjit.run(run_addr)
64 |
--------------------------------------------------------------------------------
/web/waf/waf_test_keywords.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | import httplib
3 | import argparse
4 | import urlparse
5 | import random
6 | import urllib2
7 |
8 | def is_blocked(res):
9 | if res.status == 403:
10 | return True
11 | else:
12 | return False
13 |
14 | def send_request(host, req, get="GET", display=False):
15 | conn = httplib.HTTPConnection(host)
16 | conn.request(get, req)
17 | res = conn.getresponse()
18 | if display:
19 | print "Request: %s" % (get+ " " + host+req)+"\t/\t",
20 | print "Response: %i" % res.status
21 | return is_blocked(res)
22 |
23 | if __name__ == "__main__":
24 | parser = argparse.ArgumentParser(description='Test if common sqli keywords are blocked by the site')
25 | parser.add_argument('-f', '--file', help='File which contain the list of common files (default is fun_files.txt', default='sqli_keywords.txt')
26 | parser.add_argument('-v', '--verbose', help='verbose mode', action="count", default=0)
27 | parser.add_argument('-r', '--random', help='If the keyword is blocked test with random uppercase', action="store_true")
28 | parser.add_argument('host', metavar='HOST', help='Host targeted')
29 |
30 | args = parser.parse_args()
31 |
32 | if not args.host.startswith("http"):
33 | host = urlparse.urlparse("http://" + args.host)
34 | else:
35 | host = urlparse.urlparse(args.host)
36 | req = urllib2.Request(host.geturl())
37 | try:
38 | r = urllib2.urlopen(req)
39 | if r.getcode() != 200:
40 | print "Host unavailable!"
41 | exit(1)
42 | except urllib2.URLError as e:
43 | print "Host unavailable!"
44 | exit(1)
45 |
46 | fl = open(args.file, "r")
47 | kw = fl.readline().strip()
48 | while kw != "":
49 | if send_request(host.netloc, host.path+"?"+host.query+'+'+kw, display=(args.verbose > 1)):
50 | if args.random:
51 | kw2 = "".join( random.choice([k.upper(), k ]) for k in kw.lower() )
52 | if send_request(host.netloc, host.path+"?"+host.query+'+'+kw2, display=(args.verbose>1)):
53 | print "BLOCKED: %s (even with randomized char)" % kw
54 | else:
55 | print "BLOCKED: %s (but PASS with %s)" % (kw, kw2)
56 | else:
57 | print "BLOCKED: %s" % kw
58 | else:
59 | if args.verbose > 0:
60 | print "PASS: %s" % kw
61 | kw = fl.readline().strip()
62 |
--------------------------------------------------------------------------------
/pe/checkpesize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding: utf-8 -*-
3 | import pefile
4 | import argparse
5 | import sys
6 |
7 | """Check of the size of a PE file is correct
8 | Author : Tek
9 | Date : 4/10/2016
10 | """
11 |
12 | def get_pe_size(pe, verbose=True):
13 | """Return the PE size obtained from the file itself"""
14 | return max(map(lambda x: x.PointerToRawData + x.SizeOfRawData, pe.sections))
15 |
16 |
17 | def display_sections(pe):
18 | """Display information about the PE sections"""
19 | print("Name\tVirtualSize\tVirtualAddress\tRawSize\t\tRawAddress")
20 | for section in pe.sections:
21 | print("%s\t%s\t\t%s\t\t%s\t\t%s" % (
22 | section.Name,
23 | hex(section.Misc_VirtualSize),
24 | hex(section.VirtualAddress),
25 | hex(section.PointerToRawData),
26 | hex(section.SizeOfRawData)
27 | ))
28 | print("")
29 |
30 | if __name__ == "__main__":
31 | parser = argparse.ArgumentParser(description='Check PE size')
32 | parser.add_argument('FILE', help='a PE file')
33 | parser.add_argument('--quiet', '-q', action='store_true', help='Quiet output')
34 | parser.add_argument('--extra', '-e', help='Dump extra data in another file')
35 | parser.add_argument('--write', '-w', help='Copy the file with the right size')
36 | args = parser.parse_args()
37 |
38 | fin = open(args.FILE, 'rb')
39 | data = fin.read()
40 | fin.close()
41 | pe = pefile.PE(data=data)
42 |
43 | if not args.quiet:
44 | display_sections(pe)
45 |
46 | size = get_pe_size(pe)
47 | if len(data) > size:
48 | print("%i bytes of extra data (%i while it should be %i)" % (
49 | len(data) - size,
50 | len(data),
51 | size
52 | ))
53 | if args.write is not None:
54 | fout = open(args.write, 'wb')
55 | fout.write(data[:size])
56 | fout.close()
57 | print('Correct PE dumped in %s' % args.write)
58 | if args.extra is not None:
59 | fout = open(args.extra, 'wb')
60 | fout.write(data[size:])
61 | fout.close()
62 | print('Dumped extra data in %s' % args.extra)
63 | else:
64 | if len(data) == size:
65 | print('Correct size')
66 | else:
67 | print("File too short (%i while it should be %i)" % (len(data), size))
68 |
69 | if args.write is not None or args.extra is not None:
70 | print('No extradata, can\'t do anything for you, sorry!')
71 |
--------------------------------------------------------------------------------
/android/koodous_tag.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import os
4 | import sys
5 | import requests
6 |
7 |
8 | class Koodous(object):
9 | def __init__(self, key):
10 | self.key = key
11 | self.url = "https://api.koodous.com/"
12 | self.headers = headers = {"Authorization":"Token {}".format(key)}
13 |
14 | def search(self, query):
15 | """
16 | Search in Koodous
17 | """
18 | url = self.url + "apks"
19 | params = {'search':query}
20 | results = []
21 | finished = False
22 | next = None
23 | while not finished:
24 | if next:
25 | r = requests.get(url=next, headers=self.headers)
26 | else:
27 | r = requests.get(url=url, headers=self.headers, params=params)
28 | if r.status_code != 200:
29 | return results
30 | data = r.json()
31 | results += data['results']
32 | if data.get('next', None):
33 | next = data['next']
34 | else:
35 | finished = True
36 | return results
37 |
38 | def downvote(self, sha256):
39 | """
40 | Downvote a sample
41 | """
42 |
43 | url = '%sapks/%s/votes' % (self.url, sha256)
44 | res = requests.post(url, data={'kind': 'negative'}, headers=self.headers)
45 | return res.json()
46 |
47 | def comment(self, sha256, text):
48 | url = url = '%sapks/%s/comments' % (self.url, sha256)
49 | payload = {'text': text}
50 | response = requests.post(url=url, headers=self.headers, data=payload)
51 | return response.json()
52 |
53 | """
54 | Query a list of apps on Koodous, downvote and comment on all of them
55 | """
56 | if __name__ == '__main__':
57 | parser = argparse.ArgumentParser()
58 | parser.add_argument("QUERY", help="Query to be done on Koodous")
59 | parser.add_argument("--comment", "-c", help="Comment to be added to the samples")
60 | parser.add_argument("--negative", "-n",
61 | help="Vite negative", action="store_true")
62 | args = parser.parse_args()
63 |
64 | koodous_conf = os.path.expanduser("~/.koodous")
65 | if not os.path.isfile(koodous_conf):
66 | print("Please add your Koodous key to ~/.koodous")
67 | sys.exit(-1)
68 |
69 | with open(koodous_conf, 'r') as f:
70 | key = f.read().strip()
71 |
72 | koodous_obj = Koodous(key)
73 | apks = koodous_obj.search(args.QUERY)
74 | for app in apks:
75 | if args.comment:
76 | koodous_obj.comment(app['sha256'], args.comment)
77 | if args.negative:
78 | koodous_obj.downvote(app['sha256'])
79 | print(app['sha256'])
80 |
--------------------------------------------------------------------------------
/goo.gl/api.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import json
4 | import requests
5 | import os
6 | import sys
7 | try:
8 | import configparser as cp
9 | except ImportError:
10 | # python2
11 | import ConfigParser as cp
12 |
13 |
14 | class GoogleShortener(object):
15 | def __init__(self, config):
16 | self.host = 'https://www.googleapis.com/urlshortener/v1/url'
17 | self.token = config["key"]
18 |
19 | def get_analytics(self, hash):
20 | params = {'key': self.token, 'shortUrl': 'http://goo.gl/' + hash, 'projection': 'FULL'}
21 | r = requests.get(self.host, params=params)
22 | return r.json()
23 |
24 | def expand(self, hash):
25 | params = {'key': self.token, 'shortUrl': 'http://goo.gl/' + hash}
26 | r = requests.get(self.host, params=params)
27 | return r.json()
28 |
29 | def shorten(self, long_url):
30 | params = {'key': self.token, 'longUrl': long_url}
31 | r = requests.post(self.host, data=params)
32 | return r.json()
33 |
34 |
35 | def load_config():
36 | config = cp.ConfigParser()
37 | if os.path.isfile(os.path.join(os.path.expanduser("~"), ".goo.gl")):
38 | conffile = os.path.join(os.path.expanduser("~"), ".goo.gl")
39 | else:
40 | print("Couldn't find the config file")
41 | sys.exit(1)
42 | config.read(conffile)
43 | return {"key": config.get("API", "key")}
44 |
45 | if __name__ == "__main__":
46 | parser = argparse.ArgumentParser(description='Check goo.gl infos through the API')
47 | parser.add_argument('--hash', '-H', help='HASH of a link')
48 | parser.add_argument('--file', '-f', help='Get hashes from a file')
49 | args = parser.parse_args()
50 |
51 | config = load_config()
52 | go = GoogleShortener(config)
53 | if args.hash:
54 | print(json.dumps(go.get_analytics(args.hash), sort_keys=True, indent=4, separators=(',', ':')))
55 | elif args.file:
56 | f = open(args.file, 'r')
57 | data = f.read().split()
58 | f.close()
59 | print("Date;Short URL;Long URL;Analytics;Short URL Clicks;Long URL Clicks")
60 | for d in data:
61 | res = go.get_analytics(d.strip())
62 | print("%s;%s;%s;https://goo.gl/#analytics/goo.gl/%s/all_time;%s;%s" %
63 | (
64 | res["created"],
65 | res["id"],
66 | res["longUrl"],
67 | res["id"][-6:],
68 | res["analytics"]["allTime"]["shortUrlClicks"],
69 | res["analytics"]["allTime"]["longUrlClicks"]
70 | )
71 | )
72 | else:
73 | print("Please provide either a hash or a file")
74 | parser.print_help()
75 |
--------------------------------------------------------------------------------
/web/http_test.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python2
2 | import argparse
3 | import os
4 | import socket
5 | from urlparse import urlparse
6 |
7 | def get_request(num, host):
8 | if num == 0:
9 | # Default request
10 | return "GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % host
11 | elif num == 1:
12 | # HTTP 1.0 without HOST HEADER
13 | return "GET / HTTP/1.0\r\n\r\n"
14 | elif num == 2:
15 | return "GET / HTTP/1.0\r\nHost: %s\r\n\r\n" % host
16 | elif num == 3:
17 | return "OPTIONS / HTTP/1.1\r\nHost: %s\r\n\r\n" % host
18 | elif num == 4:
19 | return "TRACE / HTTP/1.1\r\nHost: %s\r\n\r\n" % host
20 | elif num == 5:
21 | return "FOOBAR / HTTP/1.1\r\nHost: %s\r\n\r\n" % host
22 | else:
23 | return ""
24 |
25 | def send_request(num, host, args):
26 | req = get_request(num, host)
27 | if req == "":
28 | print "Bad number"
29 | exit(1)
30 | else:
31 | print "========================================"
32 | print "Request #%i to %s\n" % (num, host)
33 | print req
34 |
35 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
36 | s.connect((host, 80))
37 | s.sendall(req)
38 | #FIXME : does not get all
39 | data = s.recv(100000)
40 | s.close()
41 |
42 | sep = data.find("\r\n\r\n")
43 | headers = data[:sep]
44 | content = data[sep+4:]
45 |
46 | if args.content:
47 | print data
48 | else:
49 | if len(content) < 700:
50 | print data
51 | else:
52 | print headers
53 | print "\nContent avoided (length %i)" % len(content)
54 | print ""
55 |
56 |
57 | if __name__ == "__main__":
58 | parser = argparse.ArgumentParser(description='Make some weird raw HTTP requests and print outputs')
59 | parser.add_argument('-a', '--all', help='Send all requests', action='store_true')
60 | parser.add_argument('-n', '--num', help='Select only one request to send [1-2]', type=int)
61 | parser.add_argument('-c', '--content', help='Always show the content', action="store_true")
62 | parser.add_argument('host', metavar='HOST', help='Host targeted')
63 | args = parser.parse_args()
64 |
65 | # valid the host
66 | hosturl = urlparse(args.host)
67 | if hosturl.netloc == '':
68 | # XXX: remove path after hostname
69 | host = hosturl.path
70 | else:
71 | host = hosturl.netloc
72 |
73 | DEFAULT = [0, 1]
74 | ALL = [0, 1, 2]
75 |
76 | if args.num != None:
77 | # Only one request
78 | send_request(args.num, host, args)
79 | else:
80 | if args.all:
81 | requests = ALL
82 | else:
83 | requests = DEFAULT
84 |
85 | for i in requests:
86 | send_request(i, host, args)
87 |
--------------------------------------------------------------------------------
/pt/domains_on_iplist.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import sys
4 | import json
5 | import dns.resolver
6 | import requests
7 | from datetime import datetime, timezone, timedelta
8 | from dns import resolver, reversename, exception
9 |
10 | PT_BASE_URL = "https://api.passivetotal.org"
11 | DEFAULT_TIMEFRAME = 365
12 |
13 | def get_unique_dns(config, ip_address, start=None):
14 | if not start:
15 | start_date = datetime.now() - timedelta(days=DEFAULT_TIMEFRAME)
16 | start = start_date.strftime("%Y-%m-%d %H:%M:%S")
17 |
18 | path = "/v2/dns/passive/unique"
19 | results = passivetotal_get(config, path, ip_address, start)
20 |
21 | domains = []
22 | if "results" in results:
23 | for domain in results["results"]:
24 | if domain not in domains:
25 | domains.append(domain)
26 |
27 | return domains
28 |
29 |
30 | def passivetotal_get(conf, path, query, start):
31 | url = PT_BASE_URL + path
32 | data = {"query": query, "start": start}
33 | PT_AUTH = (conf['username'], conf['api_key'])
34 | response = requests.get(url, auth=PT_AUTH, json=data)
35 | return response.json()
36 |
37 |
38 | def resolve_domain(domain):
39 | resolutions = []
40 | try:
41 | answer = resolver.query(domain, "A")
42 | for ip in answer:
43 | resolutions.append(ip.address)
44 | except (resolver.NoAnswer, resolver.NXDOMAIN, resolver.NoNameservers, exception.Timeout):
45 | pass
46 |
47 | return resolutions
48 |
49 | def get_config():
50 | conf_file = os.path.join(os.path.expanduser("~"), ".config/passivetotal/api_config.json")
51 | if os.path.isfile(conf_file):
52 | with open(conf_file, 'r') as f:
53 | conf = json.loads(f.read())
54 | else:
55 | print('No config file')
56 | sys.exit(1)
57 | return conf
58 |
59 |
60 | if __name__ == "__main__":
61 | parser = argparse.ArgumentParser(description='Get list of domains on a list of IPs')
62 | parser.add_argument('IPFILE', help='File with a list of IPs')
63 | args = parser.parse_args()
64 |
65 | config = get_config()
66 |
67 | with open(args.IPFILE) as f:
68 | ips = f.read().split('\n')
69 | ips.remove('')
70 |
71 | blocked_domains = set()
72 | for ip in ips:
73 | print("Checking {}".format(ip))
74 | domains = get_unique_dns(config, ip)
75 | for d in domains:
76 | sips = resolve_domain(d)
77 | if ip in sips:
78 | print("{} still on {}".format(d, ip))
79 | blocked_domains.add(d)
80 | else:
81 | print("{} not anymore on {}".format(d, ip))
82 |
83 | with open("a.txt", "w+") as f:
84 | for d in blocked_domains:
85 | f.write("{}\n".format(d))
86 |
--------------------------------------------------------------------------------
/cloudfare_certs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from sslyze.server_connectivity_tester import ServerConnectivityError
3 | from sslyze.server_connectivity_tester import ServerConnectivityTester
4 | from sslyze.ssl_settings import TlsWrappedProtocolEnum
5 | from sslyze.plugins.certificate_info_plugin import CertificateInfoScanCommand
6 | from sslyze.synchronous_scanner import SynchronousScanner
7 | from cryptography.x509 import DNSName, ExtensionOID
8 | from dns import resolver, exception
9 | import argparse
10 | import sys
11 |
12 |
13 | def get_cert_alt_names(host, port=443):
14 | try:
15 | server_tester = ServerConnectivityTester(hostname=host, port=port,
16 | tls_wrapped_protocol=TlsWrappedProtocolEnum.HTTPS)
17 | server_info = server_tester.perform()
18 | except ServerConnectivityError:
19 | print("Impossible to connect")
20 | sys.exit(1)
21 |
22 | command = CertificateInfoScanCommand()
23 | synchronous_scanner = SynchronousScanner()
24 | scan_result = synchronous_scanner.run_scan_command(server_info, command)
25 | cert = scan_result.verified_certificate_chain[0]
26 | subj_alt_names = []
27 | san_ext = cert.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
28 | subj_alt_names = san_ext.value.get_values_for_type(DNSName)
29 | return subj_alt_names
30 |
31 |
32 | def get_ns(domain):
33 | res = {}
34 | try:
35 | answers = resolver.query(domain, 'NS')
36 | except (resolver.NoAnswer, resolver.NXDOMAIN, resolver.NoNameservers):
37 | res['error'] = True
38 | res['msg'] = "No NS entry configured"
39 | except exception.Timeout:
40 | res['error'] = True
41 | res['msg'] = "Timeout"
42 | else:
43 | res['error'] = False
44 | res['ns'] = [str(b.target) for b in answers]
45 | return res
46 |
47 |
48 | if __name__ == "__main__":
49 | parser = argparse.ArgumentParser(description='Analyze cloudfare certs')
50 | parser.add_argument('HOST', help='host')
51 | args = parser.parse_args()
52 |
53 | res = get_ns(args.HOST)
54 | if res['error']:
55 | print(res['msg'])
56 | sys.exit(1)
57 | domain_ns = res['ns']
58 | print('NS: {}'.format(','.join(domain_ns)))
59 |
60 | alt_names = get_cert_alt_names(args.HOST)
61 | final_list = [b for b in alt_names if not b.startswith('*.') and 'cloudflaressl.com' not in b]
62 | for d in final_list:
63 | if d != args.HOST:
64 | res = get_ns(d)
65 | if res['error']:
66 | print('-{} - {}'.format(d, res['msg']))
67 | else:
68 | if res['ns'] == domain_ns:
69 | print('-{} - SAME NS'.format(d))
70 | else:
71 | print('-{} - different NS'.format(d))
72 |
--------------------------------------------------------------------------------
/format/fixtar.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 | import math
5 | import re
6 |
7 | def testheader(hd):
8 | r = re.match(b".{100}[0-9]{6} \x00[0-9\x00]{7}\x00[0-9\x00]{7}\x00[0-9]{11}\x00[0-9\x00]{11}\x00\d{6}\x00 \w\x00{100}", hd)
9 | return (r is not None)
10 |
11 |
12 | if __name__ == "__main__":
13 | parser = argparse.ArgumentParser(description='Repair a TAR archive')
14 | parser.add_argument('FILE', help='TAR File')
15 | parser.add_argument('OUTPUT', help='TAR File')
16 | args = parser.parse_args()
17 |
18 | size = os.path.getsize(args.FILE)
19 | fin = open(args.FILE, 'rb')
20 | fout = open(args.OUTPUT, 'wb+')
21 |
22 | i = 0
23 | prev = 0
24 | data = b""
25 | while (i + 512) < size:
26 | header = fin.read(512)
27 | if not testheader(header):
28 | if header == b'\x00'*len(header):
29 | print("Final empty header, it should be good, finger crossed")
30 | fout.write(data)
31 | fout.write(fin.read())
32 | sys.exit(1)
33 | print("Broken file entry at {} (wrong header at {})".format(prev, i))
34 | fin.seek(prev)
35 | hd = fin.read(512)
36 | fs = int(hd[124:124+12].strip(b"\x00").decode('utf-8'), 8)
37 | nb = math.ceil(fs / 512)
38 | data = fin.read(3000000)
39 | r = re.search(b".{100}[0-9]{6} \x00[0-9\x00]{7}\x00[0-9\x00]{7}\x00[0-9]{11}\x00[0-9\x00]{11}\x00\d{6}\x00 \w\x00{100}", data)
40 | if r:
41 | j = r.span()[0]
42 | print("Next header found")
43 | print(data[j:j+200])
44 | if j > (nb * 512):
45 | # There is some extra data we should remove
46 | print("Too much data")
47 | data = data[:nb*512]
48 | else:
49 | # There is not enough data
50 | print("Not enough data")
51 | data = data[:j] + b"\x00"*((nb*512) - j)
52 | fin.seek(prev)
53 | fin.read(512 + j)
54 | i = prev + 512 + j
55 | else:
56 | # broken
57 | print("No header found, quitting")
58 | fout.write(data)
59 | fout.write(fin.read())
60 | sys.exit(1)
61 | else:
62 | fout.write(data)
63 | fout.write(header)
64 | fs = int(header[124:124+12].strip(b"\x00").decode('utf-8'), 8)
65 | nb = math.ceil(fs / 512)
66 | data = fin.read(512*nb)
67 | # impossible to add without being sure the next header is good,
68 | # so saving here and adding later
69 | #fout.write(data)
70 | prev = i
71 | i += 512 + 512*nb
72 |
--------------------------------------------------------------------------------
/web/watch_response_time.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python2
2 |
3 | import argparse
4 | from urlparse import urlparse
5 | import requests
6 | from datetime import datetime
7 | import time
8 |
9 |
10 | def get_response_time(host):
11 | now = datetime.now()
12 | try:
13 | response = requests.get(host.geturl())
14 | if response.status_code == 200:
15 | print "%s - %s : Time Response: %f" % (str(now.time()),host.netloc, response.elapsed.total_seconds())
16 | return (True, response.elapsed)
17 | else:
18 | print "%s - %s : Error server not available" % (str(now.time()), host.netloc)
19 | return (False, 0.0)
20 | except requests.exceptions.ConnectionError:
21 | print "%s - %s : Fail to reach the server!" % (str(now.time()), host.netloc)
22 | return (False, 0.0)
23 |
24 | if __name__ == "__main__":
25 | parser = argparse.ArgumentParser(description='Watch web server response time')
26 | parser.add_argument('-l', '--log', help='Log response times over time', action='store_true')
27 | parser.add_argument('-o', '--once', help='Do not watch, just print response time and quit', action="store_true")
28 | parser.add_argument('-d', '--delay', help='Delay between request in seconds(default is 60)', type=int, default=60)
29 | parser.add_argument('-a', '--alerts', help='Display alerts when response time goes over 4s', action="store_true")
30 | parser.add_argument('host', metavar='HOST', help='Host targeted')
31 | args = parser.parse_args()
32 |
33 | # valid the host
34 | if args.host.startswith("http"):
35 | host = urlparse(args.host)
36 | else:
37 | host = urlparse("http://" + args.host)
38 |
39 | if args.log:
40 | flog = open(host.netloc + ".log", "a")
41 |
42 |
43 | #print args
44 | if args.alerts:
45 | import pynotify
46 | pynotify.init("icon-summary-body")
47 |
48 | if args.once:
49 | get_response_time(host)
50 | else:
51 | while True:
52 | (res, t) = get_response_time(host)
53 | if args.alerts and res and (t.total_seconds() > 4):
54 | notification = pynotify.Notification(
55 | host.netloc,
56 | "Response time : %f !!!!!!!!!!!!!!!" % t.total_seconds(),
57 | "notification-message-im")
58 | notification.show()
59 | del notification
60 | if args.log:
61 | # fail handle appends
62 | now = datetime.now()
63 | if res:
64 | flog.write(now.strftime("%Y-%m-%d %H:%M:%S\t->\t") + str(t.total_seconds())+ "\n")
65 | else:
66 | flog.write(now.strftime("%Y-%m-%d %H:%M:%S\t->\t") + "0.0\n")
67 | flog.flush()
68 | time.sleep(args.delay)
69 |
--------------------------------------------------------------------------------
/web/check_fun_files.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python2
2 | try:
3 | import urlparse
4 | except ImportError:
5 | # python 3
6 | import urllib.parse as urlparse
7 | import argparse
8 | import requests
9 | import os
10 |
11 | if __name__ == "__main__":
12 | parser = argparse.ArgumentParser(description='Check fun files on the web server')
13 | parser.add_argument('-f', '--file', help='File which contain the list of common files (default is fun_files.txt)', default='fun_files.txt')
14 | parser.add_argument('-s', '--save', help='Save file found', action='store_true')
15 | parser.add_argument('-o', '--outdir', help='Output directory (default is files)', default='files')
16 | parser.add_argument('-v', '--verbose', help='verbose mode', action="count", default=0)
17 | parser.add_argument('host', metavar='HOST', help='Host targeted')
18 | args = parser.parse_args()
19 |
20 | # Remove Insicure warnings
21 | try:
22 | import requests.packages.urllib3
23 | requests.packages.urllib3.disable_warnings()
24 | except:
25 | pass
26 |
27 | if not args.host.startswith("http"):
28 | host = "http://" + args.host
29 | hostname = args.host
30 | else:
31 | host = args.host
32 | hostname = urlparse.urlparse(args.host).netloc
33 |
34 | headers = {
35 | "User-Agent": "Baiduspider+(+http://www.baidu.com/search/spider.htm)"
36 | }
37 |
38 | # Check host validity
39 | try:
40 | r = requests.get(host, headers=headers, verify=False)
41 | except requests.ConnectionError:
42 | print("Host unavailable!")
43 | exit(1)
44 | if r.status_code != 200:
45 | print("Bad HTTP code when requesting / (%i), quitting" % r.status_code)
46 | exit(1)
47 |
48 | if args.save:
49 | # Check dirs
50 | if not os.path.exists(args.outdir):
51 | os.makedirs(args.outdir)
52 | if not os.path.exists(args.outdir + "/" + hostname):
53 | os.makedirs(args.outdir + "/" + hostname)
54 |
55 |
56 | # Loop on the file
57 | ffile = open(args.file, "r")
58 | fname = ffile.readline().strip()
59 | while fname != "":
60 | try:
61 | r = requests.get(urlparse.urljoin(host, fname), headers=headers, verify=False)
62 | if r.status_code == 200:
63 | if args.save:
64 | print("%s found ! (-> saved)" % fname)
65 | fout = open(args.outdir + "/" + hostname + "/" + fname, "a+")
66 | fout.write(r.text)
67 | fout.close()
68 | else:
69 | print("%s found !" % fname)
70 | else:
71 | print("%s not found ! (%i)" % (fname, r.status_code))
72 | except requests.ConnectionError:
73 | print("%s not available for %s" % (host, fname))
74 |
75 | fname = ffile.readline().strip()
76 |
--------------------------------------------------------------------------------
/misp/yaraxcheck.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding: utf-8 -*-
3 | import argparse
4 | import sys
5 | import yara
6 | from misp import MispServer, MispEvent, MispAttribute
7 | from misplib import parse_config
8 |
9 | """Checking yara rules from
10 | Author : Tek
11 | Date : 17/01/2017
12 | Require yara python library, see https://github.com/VirusTotal/yara-python
13 | """
14 |
15 | def check_yara(rules, data, name, verbose):
16 | res = rules.match(data=data)
17 | if len(res) > 0:
18 | print("%s: MATCH %s" % (
19 | name,
20 | ",".join(map(lambda x: x.rule, res))
21 | )
22 | )
23 | else:
24 | if verbose > 0:
25 | print('%s: no match' % name)
26 |
27 | return res
28 |
29 |
30 | if __name__ == "__main__":
31 | parser = argparse.ArgumentParser(description='Check yara rules on samples in MISP')
32 | parser.add_argument('--server', '-s', help='Server used for the request')
33 | parser.add_argument('--event', '-e', help='Event infos', type=int)
34 | parser.add_argument('--rules', '-r', help='Yara rule file')
35 | parser.add_argument('-v', '--verbose', action='count', default=0)
36 | args = parser.parse_args()
37 |
38 | config = parse_config()
39 | if args.server is not None:
40 | if args.server.lower() in config.keys():
41 | server = MispServer(url=config[args.server.lower()]['url'],
42 | apikey=config[args.server.lower()]['key'],
43 | ssl_chain=False)
44 | else:
45 | print("Server not found, quitting...")
46 | sys.exit(1)
47 |
48 | else:
49 | if 'default' not in config.keys():
50 | print("No default severs in MISP conf, quitting...")
51 | sys.exit(1)
52 | else:
53 | server = MispServer(url=config['default']['url'],
54 | apikey=config['default']['key'],
55 | ssl_chain=False)
56 |
57 | event = server.events.get(args.event)
58 | rules = yara.compile(filepath=args.rules)
59 |
60 | for attr in event.attributes:
61 | if attr.type == 'malware-sample':
62 | # Ignore zip files
63 | value = str(attr.value)
64 | if "|" in value:
65 | fname = value.split("|")[0]
66 | if not fname.endswith(".zip"):
67 | data = server.download(attr)
68 | check_yara(rules, data, attr.value, args.verbose)
69 | else:
70 | if args.verbose > 1:
71 | print("%s not considered (.zip file)" % (attr.value))
72 | else:
73 | data = server.download(attr)
74 | check_yara(rules, data, attr.value, args.verbose)
75 | else:
76 | if args.verbose > 1:
77 | print("%s not considered (type %s)" % (attr.value, attr.type))
78 |
--------------------------------------------------------------------------------
/misp/README.md:
--------------------------------------------------------------------------------
1 | # analyst-scripts : MISP scripts
2 |
3 | Scripts for using MISP
4 |
5 | * mispcli.py : Command line interface for MISP
6 | * mispcopy.py : script to copy attributes from one event to another
7 | * vtxcheck.py : search for hashes in VT and upload other hashes in MISP event
8 | * xsearch_misp.py : Search all IOCs from an event in another MISP instance (hint: if you use this on the same server it is useless)
9 |
10 | libs:
11 | * misp.py : copy of [Nicolas Bareil python-misp library](https://github.com/nbareil/python-misp)
12 | * misplib.py : shared code between the tools
13 |
14 | ## Configuration
15 |
16 | All these scripts uses the same configuration file which should be in ~/.misp with the following format:
17 | ```
18 | [Server1]
19 | url: URL
20 | key: key
21 | default: true
22 |
23 | [Server2]
24 | url: url
25 | key: key
26 | ```
27 |
28 | ## mispcli
29 |
30 | List events:
31 | ```bash
32 | # python mispcli.py -l
33 | 1 - Event with stuff in it
34 | 2 - another event
35 | ...
36 | ```
37 |
38 | Information about an event:
39 | ```
40 | $ python mispcli.py -e 42
41 | Event 42 : Title
42 | Tags : TLP:RED
43 | 10 Attributes including:
44 | - 1 comment (0 for detection)
45 | - 7 domain (7 for detection)
46 | - 1 hostname (1 for detection)
47 | - 1 ip-dst (1 for detection)
48 |
49 | ```
50 |
51 | Disable for IDS all the md5 in event 48 from Server2:
52 | ```
53 | $ python mispcli.py -s server2 -e 48 -t md5 --no-ids
54 | Attr df85d882ac37e278c9995dbbbfae7173 already not for IDS detection
55 | Attr 044eadff537f21814b923291f9614cab already not for IDS detection
56 | Attr 21a1ee58e4b543d7f2fa3b4022506029 already not for IDS detection
57 | Attr 36d2f0228c1c4f60bd1dad94977e5a5a already not for IDS detection
58 | Attr 1088a1d71916c674daae730642d77eda already not for IDS detection
59 | Attr 5cea24fb20763d255c67efe2b3fc9cc6 already not for IDS detection
60 | Attr 46d030b4253fa7911c3748f04420d1c4 already not for IDS detection
61 | Attr 7a368bf665bf601b679d079bea2923ae already not for IDS detection
62 | Attr 9ef1fadd764489b4161cce4a43929f9f already not for IDS detection
63 | Attr a13af624b690e1ead49bdf0d24560654 already not for IDS detection
64 | ...
65 | ```
66 |
67 | ### mispcopy
68 |
69 | Simple script to copy IOCs from an event to another:
70 | ```
71 | usage: mispcopy.py [-h] [--no-cleaning]
72 | SERVER_SOURCE EVENT_SOURCE SERVER_DEST EVENT_DEST
73 |
74 | Command line interface to MISP servers
75 |
76 | positional arguments:
77 | SERVER_SOURCE Server source for the copy
78 | EVENT_SOURCE Event source
79 | SERVER_DEST Server destination
80 | EVENT_DEST Event destination
81 |
82 | optional arguments:
83 | -h, --help show this help message and exit
84 | --no-cleaning, -c Do not clean attributes (personal rules)
85 | ```
86 |
87 | Example:
88 | ```
89 | $ python mispcopy.py server1 41 server2 468
90 | Uploaded Network activity / domain / www.google.com
91 | ...
92 | ```
93 |
94 |
--------------------------------------------------------------------------------
/macos/macho_print.py:
--------------------------------------------------------------------------------
1 | import lief
2 | import argparse
3 | import hashlib
4 |
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Print Mach-O information')
8 | parser.add_argument('MACHO', help='Mach-o file')
9 | args = parser.parse_args()
10 |
11 |
12 | binary = lief.parse(args.MACHO)
13 |
14 | with open(args.MACHO, 'rb') as f:
15 | data = f.read()
16 |
17 | # General information -> CPU Type
18 | # Hash, CPU Type, Size
19 | print("General Information")
20 | print("=" * 80)
21 | for algo in ["md5", "sha1", "sha256"]:
22 | m = getattr(hashlib, algo)()
23 | m.update(data)
24 | print("{:15} {}".format(algo.upper()+":", m.hexdigest()))
25 | print("{:15} {} bytes".format("Size:", len(data)))
26 | print("{:15} {}".format("Type:", binary.header.cpu_type.name))
27 | print("Entry point:\t0x%x" % binary.entrypoint)
28 | print("")
29 |
30 | # Commands
31 | print("Commands")
32 | print("=" * 80)
33 | for c in binary.commands:
34 | if c.command.name == "SEGMENT_64":
35 | print("{:20} {:10} {:5} {:14} {}".format(
36 | c.command.name,
37 | c.name if hasattr(c, 'name') else '',
38 | c.size,
39 | hex(c.virtual_address) if hasattr(c, 'virtual_address') else "",
40 | hex(c.file_offset) if hasattr(c, 'file_offset') else "",
41 | ))
42 | elif c.command.name in ["LOAD_DYLIB", "LOAD_WEAK_DYLIB"]:
43 | print("{:20} {} (version {})".format(
44 | c.command.name,
45 | c.name,
46 | ".".join([str(a) for a in c.current_version])
47 | ))
48 | elif c.command.name == "UUID":
49 | print("{:20} {}".format(
50 | c.command.name,
51 | ''.join('{:02x}'.format(x) for x in c.uuid)
52 | ))
53 | else:
54 | print("{:20} {:20}".format(
55 | c.command.name,
56 | c.name if hasattr(c, 'name') else ''
57 | ))
58 | print("")
59 |
60 | # Sections
61 | print("Sections")
62 | print("=" * 80)
63 | print("%-16s %-9s %-12s %-9s %-9s %-25s %s" % ( "Name", "Segname", "VirtAddr", "RawAddr", "Size", "type", "Md5"))
64 | for s in binary.sections:
65 | m = hashlib.md5()
66 | m.update(bytearray(s.content))
67 | print("%-16s %-9s %-12s %-9s %-9s %-25s %s" % (
68 | s.name,
69 | s.segment.name,
70 | hex(s.virtual_address),
71 | hex(s.offset),
72 | s.size,
73 | str(s.type).replace("SECTION_TYPES.", ""),
74 | m.hexdigest()
75 | ))
76 | print("")
77 |
78 | # Imports (binding infos)
79 | print("Imports")
80 | print("=" * 80)
81 | for f in binary.imported_symbols:
82 | try:
83 | print("{:35s} {}".format(f.name, f.binding_info.library.name))
84 | except lief.not_found:
85 | print(f.name)
86 |
87 |
88 |
--------------------------------------------------------------------------------
/pe/petimeline.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python2
2 | import argparse
3 | import lief
4 | import os
5 | import datetime
6 | import magic
7 | import pefile
8 | import hashlib
9 |
10 |
11 | def extract_datetime(pe):
12 | """
13 | Extract PE timestamp using lief
14 | """
15 | if pe.header.time_date_stamps:
16 | return datetime.datetime.fromtimestamp(pe.header.time_date_stamps)
17 | else:
18 | return None
19 |
20 |
21 | def extract_sig_startdate(pe):
22 | if pe.has_signature:
23 | issuer_serial = ":".join(map(lambda e : "{:02x}".format(e), pe.signature.signer_info.issuer[1]))
24 | for c in pe.signature.certificates:
25 | serial = ":".join(map(lambda e : "{:02x}".format(e), c.serial_number))
26 | if serial == issuer_serial:
27 | d = c.valid_from
28 | return datetime.datetime(year=d[0], month=d[1], day=d[2], hour=d[3], minute=d[4], second=d[5])
29 | else:
30 | return None
31 |
32 |
33 | def get_sha256(fpath):
34 | with open(fpath,"rb") as f:
35 | res = hashlib.sha256(f.read()).hexdigest()
36 | return res
37 |
38 | if __name__ == "__main__":
39 | parser = argparse.ArgumentParser(description='Create a timeline of PE/DLL timestamp')
40 | parser.add_argument('DIRECTORY', help='an integer for the accumulator')
41 | parser.add_argument('--recursive', '-r', action='store_true', help='an integer for the accumulator')
42 |
43 | args = parser.parse_args()
44 |
45 | allfiles = {}
46 | mime = magic.Magic(mime=True)
47 |
48 | if args.recursive:
49 | for root, dirs, files in os.walk(args.DIRECTORY):
50 | for f in files:
51 | fpath = os.path.join(root, f)
52 | if mime.from_file(fpath) == "application/x-dosexec":
53 | pe = lief.parse(fpath)
54 | timestamp = extract_datetime(pe)
55 | if timestamp is not None:
56 | allfiles[timestamp] = ("TIMESTAMP", fpath, get_sha256(fpath))
57 | timestamp = extract_sig_startdate(pe)
58 | if timestamp is not None:
59 | allfiles[timestamp] = ("SIGNATURE", fpath, get_sha256(fpath))
60 | else:
61 | for f in os.listdir(args.DIRECTORY):
62 | fpath = os.path.join(args.DIRECTORY, f)
63 | if os.path.isfile(fpath):
64 | if mime.from_file(fpath) == "application/x-dosexec":
65 | pe = lief.parse(fpath)
66 | timestamp = extract_datetime(pe)
67 | if timestamp is not None:
68 | allfiles[timestamp] = ("TIMESTAMP", fpath, get_sha256(fpath))
69 | timestamp = extract_sig_startdate(pe)
70 | if timestamp is not None:
71 | allfiles[timestamp] = ("SIGNATURE", fpath, get_sha256(fpath))
72 |
73 |
74 | dates = sorted(allfiles.keys())
75 | for d in dates:
76 | print("{} - {} - {} - {}".format(d.strftime("%Y-%m-%d %H:%M:%S"), allfiles[d][0], allfiles[d][2], allfiles[d][1]))
77 |
--------------------------------------------------------------------------------
/web/proxychecker.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import argparse
4 | import sys
5 |
6 | TEST_PAGE = 'http://192.241.164.26/tools/open_proxy_check.txt'
7 |
8 |
9 | def test_proxy(proxy, port):
10 | # Test HTTP first
11 | proxies = {'http': 'http://%s:%i' % (proxy, port)}
12 | try:
13 | r = requests.get(TEST_PAGE, proxies=proxies)
14 | except requests.exceptions.ProxyError:
15 | return False
16 | else:
17 | if r.status_code == 200:
18 | if 'If you are seeing this while running the open proxy text, your server is an open proxy.' in r.text:
19 | return True
20 | # This should not happen much
21 | return False
22 |
23 |
24 | if __name__ == '__main__':
25 | parser = argparse.ArgumentParser(description='Test an IP to check if it is an open proxy')
26 | subparsers = parser.add_subparsers(help='Subcommand')
27 | parser_a = subparsers.add_parser('ip', help='Check if an IP is an open proxy')
28 | parser_a.add_argument('IP', help='IP to be checked')
29 | parser_a.add_argument('--port', '-p', type=int, default=8080, help='Port')
30 | parser_a.set_defaults(subcommand='ip')
31 | parser_b = subparsers.add_parser('list', help='Check a list of IPs')
32 | parser_b.add_argument('FILE', help='File with a list of IP:port')
33 | parser_b.set_defaults(subcommand='list')
34 | parser_c = subparsers.add_parser('test', help='Test that the remote page is still up')
35 | parser_c.set_defaults(subcommand='test')
36 |
37 | args = parser.parse_args()
38 |
39 | if 'subcommand' in args:
40 | if args.subcommand == 'test':
41 | r = requests.get(TEST_PAGE)
42 | if r.status_code == 200:
43 | if 'If you are seeing this while running the open proxy text, your server is an open proxy.' in r.text:
44 | print("It works!")
45 | else:
46 | print("Bad bad bad: content has changed")
47 | else:
48 | print("Bad bad bad: not available")
49 | elif args.subcommand == 'list':
50 | try:
51 | with open(args.FILE) as f:
52 | data = f.read().split('\n')
53 | except FileNotFoundError:
54 | print('This file does not exist')
55 | sys.exit(1)
56 |
57 | for proxy in data:
58 | if proxy.strip() == '':
59 | continue
60 | try:
61 | p = proxy.split(':')
62 | port = int(p[1])
63 | except (ValueError, IndexError):
64 | print('%s - Invalid Value' % proxy)
65 | continue
66 | if test_proxy(p[0].strip(), port):
67 | print('%s - Yes' % proxy)
68 | else:
69 | print('%s - No' % proxy)
70 | elif args.subcommand == 'ip':
71 | if test_proxy(args.IP, args.port):
72 | print('YES')
73 | else:
74 | print('NO')
75 | else:
76 | parser.print_help()
77 | else:
78 | parser.print_help()
79 |
80 |
81 |
--------------------------------------------------------------------------------
/format/parsetar.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import math
4 | import re
5 | from datetime import datetime
6 |
7 | def testheader(hd):
8 | r = re.match(b".{100}[0-9]{6} \x00[0-9\x00]{7}\x00[0-9\x00]{7}\x00[0-9]{11}\x00[0-9\x00]{11}\x00\d{6}\x00 \d\x00{100}", hd)
9 | return (r is not None)
10 |
11 |
12 | if __name__ == "__main__":
13 | parser = argparse.ArgumentParser(description='Process a TAR archive')
14 | parser.add_argument('FILE', help='TAR File')
15 | parser.add_argument('--verbose', '-v', action="store_true", help="Verbose mode")
16 | args = parser.parse_args()
17 |
18 | size = os.path.getsize(args.FILE)
19 | fin = open(args.FILE, 'rb')
20 |
21 | i = 0
22 | nextheader = False
23 | while (i + 512) < size:
24 | header = fin.read(512)
25 | if len(header) < 512:
26 | print("Done")
27 | break
28 | if header == b"\x00"*512:
29 | # Sometimes a tar archive ends with an empty header
30 | print("{} - empty header".format(i))
31 | i += 512
32 | continue
33 |
34 | if header[345] != 0:
35 | name = header[345:500].decode('utf-8').strip("\x00") + "/" + header[0:100].decode('utf-8').strip("\x00")
36 | else:
37 | name = header[0:100].decode('utf-8').strip("\x00")
38 |
39 | fs = int(header[124:124+12].strip(b"\x00").decode('utf-8'), 8)
40 | mtime = datetime.fromtimestamp(int(header[136:136+12], 8))
41 | flag = header[156]
42 | nb = math.ceil(fs / 512)
43 | if flag == 120:
44 | # extension header for next file
45 | data = fin.read(512*nb)
46 | nextheader = {}
47 | j = 0
48 | headone = False
49 | while not headone:
50 | if data[j] == 0:
51 | headone = True
52 | break
53 | length = int(data[j:j+3])
54 | if length < 99:
55 | entry = data[j+3:j+length-3]
56 | else:
57 | entry = data[j+4:j+length-4]
58 | entry = entry.decode("utf-8", errors="ignore")
59 | j += length
60 | entry = entry.split("=", 1)
61 | if entry[0].endswith("time"):
62 | nextheader[entry[0]] = datetime.fromtimestamp(float(entry[1]))
63 | else:
64 | nextheader[entry[0]] = entry[1]
65 | else:
66 | #if testheader(header):
67 | #print("{:10d} - {} - {} bytes (OK)".format(i, name, fs))
68 | #else:
69 | #print("{:10d} - {} - {} bytes (NOPE)".format(i, name, fs))
70 | print("{:10d} - {} - {} bytes".format(i, name, fs))
71 | if nextheader != False:
72 | if args.verbose:
73 | for entry in nextheader:
74 | print("-{} : {}".format(entry, nextheader[entry]))
75 | else:
76 | if "name" in nextheader:
77 | print("-name: {}".format(nextheader["name"]))
78 | nextheader = False
79 | data = fin.read(512*nb)
80 | i += 512 + 512*nb
81 |
82 |
--------------------------------------------------------------------------------
/network/dns_resolve_mx.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | from dns import resolver, reversename, exception
4 | from IPy import IP
5 |
6 | def is_ip(target):
7 | """
8 | Test if a string is an IP address
9 | """
10 | if isinstance(target, str):
11 | try:
12 | IP(target)
13 | return True
14 | except ValueError:
15 | return False
16 | else:
17 | return False
18 |
19 |
20 | if __name__ == "__main__":
21 | parser = argparse.ArgumentParser(description='Resolve domains')
22 | parser.add_argument('TXTFILE', help='Text files with domains')
23 | parser.add_argument('--verbose', '-v', action='store_true',
24 | help='verbose mode')
25 | args = parser.parse_args()
26 |
27 | results = {}
28 |
29 | with open(args.TXTFILE) as f:
30 | data = f.read().split("\n")
31 |
32 | for d in data:
33 | dd = d.strip()
34 | if dd not in results and len(dd) > 0:
35 | try:
36 | res = resolver.resolve(dd, "MX")
37 | except (resolver.NoAnswer, resolver.NXDOMAIN):
38 | results[dd] = [True, "", ""]
39 | if args.verbose:
40 | print("{}: NXDOMAIN".format(dd))
41 | except resolver.NoNameservers:
42 | results[dd] = [False, "SERVFAIL", ""]
43 | if args.verbose:
44 | print("{}: SERVFAIL".format(dd))
45 | except exception.Timeout:
46 | results[dd] = [False, "Timeout", ""]
47 | if args.verbose:
48 | print("{}: Timeout".format(dd))
49 | else:
50 | for rdata in res:
51 | if is_ip(rdata.exchange.to_text()):
52 | # IP directly
53 | results[dd] = [True, "", rdata.exchange.to_text()]
54 | if args.verbose:
55 | print("{}: {}".format(dd, rdata.exchange.to_text()))
56 | else:
57 | # Domain
58 | try:
59 | ip = [b.address for b in resolver.resolve(rdata.exchange, 'A')][0]
60 | except (resolver.NoAnswer, resolver.NXDOMAIN):
61 | # Hostname without IPv4
62 | results[dd] = [True, rdata.exchange.to_text(), ""]
63 | if args.verbose:
64 | print("{}: {}".format(dd, rdata.exchange.to_text()))
65 | else:
66 | results[dd] = [True, rdata.exchange.to_text(), ip]
67 | if args.verbose:
68 | print("{}: {} - {}".format(dd, rdata.exchange.to_text(), ip))
69 | with open("resolutions.csv", "w+") as f:
70 | f.write("Domain,Success,Domain,IP\n")
71 | for domain in results.keys():
72 | f.write("{},{},{},{}\n".format(
73 | domain,
74 | results[domain][0],
75 | results[domain][1],
76 | results[domain][2]
77 | ))
78 |
79 | print("Results written in resolutions.csv")
80 |
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/pe/get_richheaderhash.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import pefile
3 | import os
4 | import hashlib
5 | import struct
6 |
7 |
8 | def get_richpe_hash(exe_path):
9 | """Computes the RichPE hash given a file path or data.
10 | If the RichPE hash is unable to be computed, returns None.
11 | Otherwise, returns the computed RichPE hash.
12 | If both file_path and data are provided, file_path is used by default.
13 | Source : https://github.com/RichHeaderResearch/RichPE
14 | """
15 | try:
16 | pe = pefile.PE(exe_path)
17 | except pefile.PEFormatError:
18 | return None
19 |
20 | if pe.RICH_HEADER is None:
21 | return None
22 |
23 | # Get list of @Comp.IDs and counts from Rich header
24 | # Elements in rich_fields at even indices are @Comp.IDs
25 | # Elements in rich_fields at odd indices are counts
26 | rich_fields = pe.RICH_HEADER.values
27 | if len(rich_fields) % 2 != 0:
28 | return None
29 |
30 | # The RichPE hash of a file is computed by computing the md5 of specific
31 | # metadata within the Rich header and the PE header
32 | md5 = hashlib.md5()
33 |
34 | # Update hash using @Comp.IDs and masked counts from Rich header
35 | while len(rich_fields):
36 | compid = rich_fields.pop(0)
37 | count = rich_fields.pop(0)
38 | mask = 2 ** (count.bit_length() // 2 + 1) - 1
39 | count |= mask
40 | md5.update(struct.pack("-report.json"))
67 |
68 | args = parser.parse_args()
69 |
70 | koodous_conf = os.path.expanduser("~/.koodous")
71 | if not os.path.isfile(koodous_conf):
72 | print("Please add your Koodous key to ~/.koodous")
73 | sys.exit(-1)
74 |
75 | with open(koodous_conf, 'r') as f:
76 | key = f.read().strip()
77 |
78 |
79 | if not args.sha256:
80 | print("I need at least a SHA256 hash!")
81 | parser.print_help()
82 | return
83 |
84 | report_name = "{}-report.json".format(args.sha256)
85 | if args.filename:
86 | report_name = args.filename
87 |
88 | success = download_report(sha256=args.sha256, dst=report_name, key=key)
89 | if success:
90 | print("Androguard report saved in {}".format(report_name))
91 |
92 |
93 | if __name__ == '__main__':
94 | main()
95 |
--------------------------------------------------------------------------------
/ooni/download_measurements.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import argparse
4 | import os
5 | import sys
6 |
7 | OONI_API_BASE_URL = 'https://api.ooni.io/api/v1/'
8 | MEASUREMENTS_PER_PAGE = 100000
9 | TESTS = ["web_connectivity", "http_requests", "dns_consistency", "http_invalid_request_line", "bridge_reachability", "tcp_connect", "http_header_field_manipulation", "http_host", "multi_protocol_traceroute", "meek_fronted_requests_test", "whatsapp", "vanilla_tor", "facebook_messenger", "ndt", "dash", "telegram"]
10 |
11 |
12 | def list_measurements(params, limit=10000, offset=100):
13 | """
14 | Query a list of measurements
15 | """
16 | res = []
17 | finished = False
18 | params["offset"] = offset
19 | params["limit"] = 100
20 | while not finished:
21 | r = requests.get(
22 | OONI_API_BASE_URL+'measurements',
23 | params=params
24 | )
25 | rr = r.json()
26 | if len(rr["results"]) == 0:
27 | finished = True
28 | else:
29 | res.extend(rr["results"])
30 | params["offset"] += offset
31 | return res
32 |
33 |
34 | if __name__ == '__main__':
35 | parser = argparse.ArgumentParser(description='Download some OONI measurements')
36 | parser.add_argument('--input', '-i', help="The input (for example a URL or IP address) to search measurements for")
37 | parser.add_argument('--test', '-t', help="Type of test")
38 | parser.add_argument('--country', '-c', help="Country code")
39 | parser.add_argument('--output', '-o', help="Output folder")
40 | parser.add_argument('--since', '-s', help='The start date of when measurements were run (ex. "2016-10-20T10:30:00")')
41 | parser.add_argument('--until', '-u', help='The end date of when measurement were run (ex. "2016-10-20T10:30:00")')
42 | parser.add_argument('--verbose', '-v', action='store_true',
43 | help="Verbose mode")
44 | parser.add_argument('--limit', '-l', default=10000, type=int,
45 | help="Maximum number of files downloaded")
46 | args = parser.parse_args()
47 |
48 | if args.output:
49 | if not os.path.isdir(args.output):
50 | print("Invalid folder")
51 | sys.exit(-1)
52 | else:
53 | args.output = "."
54 |
55 | # check that something is queried
56 | query = {}
57 | if args.input:
58 | query["input"] = args.input
59 | if args.country:
60 | query["probe_cc"] = args.country
61 | if args.test:
62 | if args.test not in TESTS:
63 | print("Invalid test name")
64 | sys.exit(-1)
65 | query["test_name"] = args.test
66 |
67 | if args.since:
68 | query["since"] = args.since
69 |
70 | res = list_measurements(query)
71 | if len(res) == args.limit:
72 | print("{} files identified, there are likely more files".format(len(res)))
73 | else:
74 | print("{} files identified".format(len(res)))
75 |
76 | for f in res:
77 | r = requests.get(f["measurement_url"])
78 | with open(os.path.join(args.output, f["measurement_id"]+ ".json"),"w") as fout:
79 | fout.write(r.text)
80 | print("Downloaded {}".format(f["measurement_id"] + ".json"))
81 | print("")
82 | print("{} files downloaded in {}".format(len(res), args.output))
83 |
--------------------------------------------------------------------------------
/ooni/get_ooni_telegram.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import requests
4 | from datetime import datetime, timedelta
5 | from colored import fg, bg, attr
6 |
7 | class OoniError(Exception):
8 | pass
9 |
10 | class OoniRepository(object):
11 | def __init__(self):
12 | self.base_url = "https://api.ooni.io/api/v1/"
13 |
14 | def list_tests(self, since, until, test, country):
15 | # TODO: handle pagination
16 | r = requests.get(
17 | self.base_url + "files",
18 | params={
19 | 'probe_cc':country,
20 | 'since': since.strftime("%Y-%m-%dT%H:%M:%S"),
21 | 'until': until.strftime("%Y-%m-%dT%H:%M:%S"),
22 | 'test_name': test
23 | }
24 | )
25 | if r.status_code == 200:
26 | return r.json()
27 | else:
28 | raise OoniError()
29 |
30 | def download_file(self, url):
31 | """
32 | Download a given OONI file
33 | """
34 | r = requests.get(url)
35 | if r.status_code == 200:
36 | return r.json()
37 | else:
38 | raise OoniError()
39 |
40 |
41 | if __name__ == "__main__":
42 | parser = argparse.ArgumentParser(description='Check OONI data')
43 | parser.add_argument('--country', '-c', default="IR",
44 | help="Country to check")
45 | parser.add_argument('--day', '-d', nargs='?',
46 | help='Day to consider (format YYYYMMDD)')
47 | args = parser.parse_args()
48 |
49 | if args.day:
50 | since = datetime.strptime(args.day, "%Y%m%d")
51 | else:
52 | now = datetime.now()
53 | since = datetime(now.year, now.month, now.day) - timedelta(days=1)
54 |
55 | until = since + timedelta(days=1)
56 |
57 | ooni = OoniRepository()
58 | results = ooni.list_tests(since, until, "telegram", args.country)
59 |
60 | # Organize per ASN
61 | asns = {}
62 | for r in results['results']:
63 | if r['probe_asn'] in asns:
64 | asns[r['probe_asn']].append(r)
65 | else:
66 | asns[r['probe_asn']] = [r]
67 |
68 | # Analyze stuff
69 | for asn in asns:
70 | print("%s%s# %s%s" % (fg('white'), bg('yellow'), asn.upper(), attr(0)))
71 | for r in sorted(asns[asn], key=lambda x: x['test_start_time']):
72 | data = ooni.download_file(r['download_url'])
73 | colors = {'KO': 'red', 'OK': 'green', 'None': 249}
74 | if data['test_keys']['telegram_http_blocking'] is None:
75 | http_blocking = "None"
76 | else:
77 | if data['test_keys']['telegram_http_blocking']:
78 | http_blocking = 'KO'
79 | else:
80 | http_blocking = 'OK'
81 | print("%s\t %sHTTP: %s\t%sTCP: %s\t\t%sWeb: %s%s" % (
82 | r['test_start_time'],
83 | fg(colors[http_blocking]),
84 | http_blocking,
85 | fg('red') if data['test_keys']['telegram_tcp_blocking'] else fg('green'),
86 | "KO" if data['test_keys']['telegram_tcp_blocking'] else "OK",
87 | fg('red') if data['test_keys']['telegram_web_status'] != 'ok' else fg('green'),
88 | data['test_keys']['telegram_web_status'],
89 | attr(0)
90 | )
91 | )
92 | print("")
93 |
94 |
--------------------------------------------------------------------------------
/web/phishingkits.txt:
--------------------------------------------------------------------------------
1 | dropbox.zip
2 | sparskss.zip
3 | dpbx.zip
4 | wells3x.zip
5 | secureLogin_3.zip
6 | administrator.zip
7 | ipaad.zip
8 | msn.zip
9 | wellsfargo.zip
10 | bookmark.zip
11 | Dropbox.zip
12 | www.zip
13 | hotmail.zip
14 | update.zip
15 | xnowxoffnowxnowoffhd.zip
16 | global.zip
17 | docx.zip
18 | support-Verification.zip
19 | estatspark.zip
20 | login.zip
21 | ipad.zip
22 | scampage.zip
23 | s.p.zip
24 | Arch.zip
25 | filez.zip
26 | irs.zip
27 | gdoc.zip
28 | phone.zip
29 | nD.zip
30 | db.zip
31 | adobe.zip
32 | FOX.zip
33 | usaa.zip
34 | GD.zip
35 | itunes.appel.com.zip
36 | DROPBOX%20MEN..zip
37 | BDB.zip
38 | yahoo.zip
39 | update_info-paypal-tema-login-update_info-paypal-tema-login-update_info-paypal-tema-loginhome.zip
40 | outlook.zip
41 | icscards%3anl.zip
42 | googledocs.zip
43 | alibaba.zip
44 | www.kantonalbank.ch.zip
45 | wes.zip
46 | google.zip
47 | Zone1.zip
48 | BDBB.zip
49 | Aol-Login.zip
50 | live.com.zip
51 | gmail.zip
52 | drpbx%20-%20Copy.zip
53 | Google.zip
54 | GD1.zip
55 | BiyiBlaze.zip
56 | BDBBB4.zip
57 | Aolnew.zip
58 | wells.zip
59 | web.zip
60 | validation.zip
61 | usaa_com.zip
62 | servelet_usaa.zip
63 | order.zip
64 | home.zip
65 | document.zip
66 | chase.zip
67 | app.zip
68 | BOBI.zip
69 | maxe.zip
70 | max.zip
71 | googledrive.zip
72 | googledoc.zip
73 | general.zip
74 | filedrop.zip
75 | dr.zip
76 | doc.zip
77 | access.zip
78 | Yahoo.zip
79 | Yahoo-2014.zip
80 | DropBoxDocument.zip
81 | www.hypovereinsbank.de.zip
82 | www.citibank.com.my.zip
83 | undoo.zip
84 | tesco.zip
85 | spass.zip
86 | outlook%20True..zip
87 | myposte.zip
88 | hvsf.zip
89 | gmez.zip
90 | global2.zip
91 | dpp.zip
92 | Usaa.zip
93 | R-viewdoc.zip
94 | Pamilerinayooluwa.zip
95 | Ourtime.zip
96 | Hotmail-New.zip
97 | DHL.zip
98 | Adobe.zip
99 | wp-admin.zip
100 | westpac.zip
101 | wellsfargo.com.zip
102 | welcome.zip
103 | suite.zip
104 | spaskas.zip
105 | signontax.zip
106 | share.zip
107 | script1.zip
108 | santander.zip
109 | rr.zip
110 | online.zip
111 | new.zip
112 | new%20google%20doc..zip
113 | dropboxLanre.zip
114 | drive.zip
115 | docs.zip
116 | db2.zip
117 | christain_mingle.zip
118 | aol.zip
119 | Investor.zip
120 | G6.zip
121 | BILLIONS%20PAGE..zip
122 | yahoo.com.zip
123 | ww.zip
124 | ups.zip
125 | outlooknew.zip
126 | finance.zip
127 | files.zip
128 | dropbox1..zip
129 | dropbox%20LoginVerification%20-prntscr.com-9sjlf0.zip
130 | dhl.zip
131 | db2016.zip
132 | css.zip
133 | commbankonlineau.zip
134 | box.zip
135 | bof.zip
136 | bbooffaa.zip
137 | auth.inet.ent_Logon-redirectjsp.true.zip
138 | art.zip
139 | admin.zip
140 | accounts.zip
141 | LIFEVERIFY.zip
142 | IRS.zip
143 | GOG.zip
144 | Dropbox1..zip
145 | Doc.zip
146 | DROPBOX
147 | Business.zip
148 | 8-login-form.zip
149 | 1.zip
150 | wllxzccc.zip
151 | webmail.zip
152 | vivt.zip
153 | validate.zip
154 | spar.zip
155 | royalbank.zip
156 | review.zip
157 | rebuilt.gdoc.zip
158 | obiora.zip
159 | news.zip
160 | match2.zip
161 | maildoc.zip
162 | google%20dariver%202015.zip
163 | good.zip
164 | gee.zip
165 | dropelv.%20-%20Copy.zip
166 | dropbox2016.zip
167 | dropbl.zip
168 | dpx.zip
169 | dm.zip
170 | db2011.zip
171 | class.zip
172 | ch.zip
173 | capitalone360.zip
174 | apple.zip
175 | aoljunior.zip
176 | PDP..zip
177 | Nuvo.zip
178 | Newdropbox15-1.zip
179 | Gouv_lmpouts.zip
180 | Gmail.zip
181 | gmail.zip
182 | Gdoc.zip
183 | Fresh.zip
184 | Ed.zip
185 | DROPBOX.zip
186 | 3.0.zip
187 | gdocs.zip
188 | gdocs1.zip
189 | GD.zip
190 | art3..zip
191 | AppleID.zip
192 |
--------------------------------------------------------------------------------
/cloudcidrs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import dns.resolver
4 | import ipaddress
5 | import sys
6 | import requests
7 |
8 |
9 | #------------------------- Google Cloud IPs -----------------------------------
10 | # See _cloud-netblocks.googleusercontent.com
11 | def google_dns_query(domain):
12 | """
13 | Do a dns query and return domains, cidrs
14 | """
15 | cidrs = []
16 | domains = []
17 | answers = dns.resolver.query(domain, 'TXT')
18 | for a in answers:
19 | for entry in a.to_text().split(" "):
20 | if entry.startswith('include:'):
21 | domains.append(entry[8:])
22 | elif entry.startswith('ip4:'):
23 | cidrs.append(ipaddress.ip_network(entry[4:]))
24 | elif entry.startswith('ip6:'):
25 | cidrs.append(ipaddress.ip_network(entry[4:]))
26 | for d in domains:
27 | cidrs.extend(google_dns_query(d))
28 | return cidrs
29 |
30 | def google_ranges():
31 | """
32 | Return google cloud CIDRs
33 | """
34 | return google_dns_query('_cloud-netblocks.googleusercontent.com')
35 |
36 | def aws_ranges():
37 | """
38 | Return AWS ranges
39 | https://ip-ranges.amazonaws.com/ip-ranges.json
40 | """
41 | r = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json')
42 | data = r.json()
43 | ranges = []
44 | for d in data['prefixes']:
45 | ranges.append(ipaddress.ip_network(d['ip_prefix']))
46 | return ranges
47 |
48 |
49 | if __name__ == '__main__':
50 | parser = argparse.ArgumentParser(description='Process some Cloud IP ranges')
51 | parser.add_argument('--ip', '-i', help='Check if an IP is in a cloud IP range')
52 | parser.add_argument('--list', '-l', help='Check if IPs from a file are in a Cloud IP range')
53 | parser.add_argument('--show', '-s', action='store_true', help='Print the list of IP ranges')
54 | args = parser.parse_args()
55 |
56 | providers = {
57 | 'Google Cloud': google_ranges(),
58 | 'Amazon AWS': aws_ranges()
59 | }
60 |
61 | if args.show:
62 | for p in providers:
63 | print('### %s' % p)
64 | for d in providers[p]:
65 | print(d)
66 | elif args.ip:
67 | ip = ipaddress.ip_address(args.ip)
68 | for p in providers:
69 | for d in providers[p]:
70 | if ip in d:
71 | print("%s - %s (%s)" % (args.ip, p, d))
72 | sys.exit(0)
73 | print('IP not found')
74 | elif args.list:
75 | with open(args.list, 'r') as f:
76 | ips = f.read().split("\n")
77 | for ip in ips:
78 | if ip.strip() != '':
79 | try:
80 | ipp = ipaddress.ip_address(ip)
81 | found = False
82 | for p in providers:
83 | for iprange in providers[p]:
84 | if ipp in iprange:
85 | print("%s ; %s" % (ip, p))
86 | found = True
87 | break
88 | if not found:
89 | print("%s ; Not found" % ip)
90 | except ValueError:
91 | print("%s ; Not an IP address" % ip)
92 | else:
93 | print("Please give either an IP (-i), a list (-l) or show the full list (-s)")
94 | parser.print_help()
95 |
--------------------------------------------------------------------------------