├── .gitignore ├── Makefile ├── README.md ├── common.hh ├── dshell-defcon ├── .dshellrc ├── .gitignore ├── LICENSE.txt ├── Makefile ├── README.md ├── bin │ ├── decode │ ├── decode.py │ ├── generate-dshellrc.py │ └── pcapanon.py ├── context.py ├── decoders │ ├── __init__.py │ ├── dhcp │ │ ├── __init__.py │ │ └── dhcp.py │ ├── dns │ │ ├── __init__.py │ │ ├── dns-asn.py │ │ ├── dns-cc.py │ │ ├── dns.py │ │ ├── innuendo-dns.py │ │ └── reservedips.py │ ├── filter │ │ ├── __init__.py │ │ ├── asn-filter.py │ │ ├── country.py │ │ ├── snort.py │ │ └── track.py │ ├── flows │ │ ├── __init__.py │ │ ├── large-flows.py │ │ ├── long-flows.py │ │ └── netflow.py │ ├── ftp │ │ ├── __init__.py │ │ └── ftp.py │ ├── http │ │ ├── __init__.py │ │ ├── flash-detect.py │ │ ├── httpdump.py │ │ ├── ms15-034.py │ │ ├── rip-http.py │ │ └── web.py │ ├── misc │ │ ├── __init__.py │ │ ├── followstream.py │ │ ├── grep.py │ │ ├── merge.py │ │ ├── stream2dump.py │ │ ├── synrst.py │ │ ├── writer.py │ │ └── xor.py │ ├── protocol │ │ ├── __init__.py │ │ ├── ether.py │ │ ├── ip.py │ │ └── protocol.py │ ├── smb │ │ ├── __init__.py │ │ ├── psexec.py │ │ ├── rip-smb-uploads.py │ │ └── smbfiles.py │ ├── templates │ │ ├── PacketDecoder.py │ │ ├── SessionDecoder.py │ │ └── __init__.py │ └── tftp │ │ ├── __init__.py │ │ └── tftp.py ├── doc │ └── generate-doc.sh ├── docker │ ├── Dockerfile │ └── README.md ├── dshell ├── dshell-decode ├── install-ubuntu.py ├── lib │ ├── dfile.py │ ├── dnsdecoder.py │ ├── dshell.py │ ├── httpdecoder.py │ ├── output │ │ ├── colorout.py │ │ ├── csvout.py │ │ ├── jsonout.py │ │ ├── netflowout.py │ │ ├── output.py │ │ └── xmlout.py │ ├── smbdecoder.py │ └── util.py ├── offset2stream.py ├── share │ └── GeoIP │ │ └── readme.txt └── tester.py ├── indexer.cc ├── pcap2ap ├── split-flow.cc └── web ├── Makefile ├── bower.json ├── css └── style.sass ├── html └── search.slim ├── js └── search.coffee ├── static ├── index.html ├── jquery.min.js ├── riot.min.js ├── semantic.min.css ├── semantic.min.js └── themes │ └── default │ └── assets │ └── fonts │ └── icons.woff2 └── web.rb /.gitignore: -------------------------------------------------------------------------------- 1 | bower_components 2 | node_modules 3 | /indexer 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CXXFLAGS += -g3 -march=native -std=c++11 -Wno-deprecated-declarations -pthread 2 | 3 | all: indexer 4 | 5 | clean: 6 | $(RM) indexer 7 | 8 | .PHONY: all clean 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PCAP Search 2 | 3 | PCAP Search is a full-text substring search engine based on FM-index and other 4 | succinct data structures. 5 | 6 | ## Installation 7 | 8 | ``` 9 | # dependencies of dshell 10 | pip2 install --user dpkt pypcap 11 | 12 | # dependencies of pcap2ap 13 | # bc, inotify-tools 14 | 15 | # dependencies of web/web.rb 16 | gem install --user-install sinatra sinatra-contrib tilt sass slim coffee-script 17 | # nodejs 18 | ``` 19 | 20 | ## Usage 21 | 22 | ```zsh 23 | mkdir -p /tmp/pcap/{all,eliza,wdub} 24 | 25 | # create /tmp/pcap/all/a.cap with tcpdump/tshark 26 | # create /tmp/pcap/wdub/a.cap with tcpdump/tshark 27 | 28 | # Transform .cap files into .cap.ap files 29 | # This intermediate format removes redundant metadata in PCAP/PCAPNG 30 | # and is used for locating a specific packet in PCAP/PCAPNG files. 31 | ./pcap2ap -r /tmp/pcap & 32 | 33 | # Transform .cap.ap files into .cap.ap.fm files 34 | ./indexer -r /tmp/pcap & 35 | ``` 36 | 37 | `indexer` search for `.ap` files in a directory, index them, and listen on a 38 | unix socket (`/tmp/search.sock` by default) to serve search queries. 39 | 40 | Two types of search queries are provided: search and autocomplete. 41 | 42 | ### Search 43 | 44 | The simplest query is constructed with `$'\0\0\0haystack'` (zsh's quoting 45 | notation). `indexer` will print all the occurrences of `haystack` in all 46 | its indexed `.ap` files. You can restrict `.ap` files to be searched 47 | by providing the filename range: `\0 $filename_begin \0 $filename_end 48 | \0 $query`. 49 | 50 | `print` is a builtin command in zsh. 51 | 52 | ```zsh 53 | query: \0 filename_begin \0 filename_end \0 query 54 | result: filename \t offset \t context 55 | 56 | # pattern is haystack 57 | print -rn -- $'\0\0\0haystack' | socat -t 60 - unix:/tmp/search.sock 58 | ``` 59 | 60 | ### Autocomplete 61 | 62 | A search query can be turned into an autocomplete query by supplying an offset 63 | number before the first `\0`. 64 | 65 | ```zsh 66 | query: offset \0 filename_begin \0 filename_end \0 query 67 | result: filename \t offset \t context 68 | 69 | # search, skip first 3 matches 70 | print -rn -- $'3\0\0\0haystack' | socat -t 60 - unix:/tmp/search.sock 71 | 72 | # search filenames F satisfying ("a" <= F <= "b"), skip first 5, pattern is "stack\0\0\1". \-escape is allowed 73 | print -rn -- $'5\0a\0b\0ha\0stack\0\\0\\1' | socat -t 60 - unix:/tmp/search.sock 74 | ``` 75 | 76 | ### Web frontend 77 | 78 | ```zsh 79 | # change `PCAP_DIR = File.expand_path '/tmp/pcap'` in `web/web.rb` 80 | web/web.rb 81 | ``` 82 | 83 | The web server will listen on 127.0.0.1:4568. 84 | 85 | ## Internals 86 | 87 | ### `pcap2ap`: extract TCP/UDP streams from `.cap` to `.cap.ap` 88 | 89 | Implement a [Dshell] plugin `dshell-defcon/dshell-decode` to split a `.cap` to several streams and reassemble them into a `.cap.ap` file. 90 | A `.cap.ap` file is a concatenation of its streams, where each stream is composed of packets laid out in order. This format makes searching across packet boundary easier. 91 | 92 | See [./dshell-defcon/README.md](./dshell-defcon/README.md) for detail. 93 | 94 | `pcap2ap` is a shell wrapper of `dshell-decode`. It watches (inotify) `.cap` files in one or multiple directories and transforms them into `.cap.ap` files. 95 | 96 | ### `indexer`: build a full-text index `.cap.ap.fm` for each `.cap.ap` and serve requests 97 | 98 | `indexer` watches `.fm` indices in one or more directories and acts as a unix socket server supporing auto complete and search. For both types of queries, it scans watched `.fm` indices and locates the needle in the data files. 99 | 100 | ### `web`: integrate `indexer` and the Dshell plugin 101 | 102 | `web/web.rb` is a web application built upon Sinatra. 103 | 104 | ### `web/web.rb` 105 | 106 | [Dshell]: https://github.com/USArmyResearchLab/Dshell 107 | 108 | ## `.ap` file specification 109 | 110 | ```c 111 | struct Ap { 112 | int32_t n_sessions; 113 | Session sessions[]; 114 | }; 115 | 116 | struct Session { 117 | int32_t n_packets; 118 | int32_t server_ip; 119 | int32_t server_port; 120 | int32_t client_ip; 121 | int32_t client_port; 122 | int32_t first_timestamp; 123 | int32_t last_timestamp; 124 | Packet packets[]; 125 | }; 126 | 127 | struct Packet { 128 | bool from_server; 129 | int32_t len; 130 | }; 131 | ``` 132 | 133 | ### `.fm` file specification 134 | 135 | ```c 136 | struct FM { 137 | char magic[8]; // GOODMEOW 138 | off_t len; 139 | // serialization of struct FMIndex 140 | }; 141 | ``` 142 | -------------------------------------------------------------------------------- /dshell-defcon/.dshellrc: -------------------------------------------------------------------------------- 1 | export PS1="`whoami`@`hostname`:\w Dshell> " 2 | dir=$(dirname "$0") 3 | export BINPATH=$dir/bin 4 | export DSHELL=$dir 5 | export DATAPATH=$dir/share 6 | export DECODERPATH=$dir/decoders 7 | export LIBPATH=$dir/lib 8 | export PATH=$BINPATH:$PATH 9 | export LD_LIBRARY_PATH=$LIBPATH:$LD_LIBRARY_PATH 10 | export PYTHONPATH=$DSHELL:$LIBPATH:$LIBPATH/output:$LIBPATH/python2.7/site-packages:$PYTHONPATH 11 | -------------------------------------------------------------------------------- /dshell-defcon/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | -------------------------------------------------------------------------------- /dshell-defcon/LICENSE.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/LICENSE.txt -------------------------------------------------------------------------------- /dshell-defcon/Makefile: -------------------------------------------------------------------------------- 1 | default: all 2 | 3 | all: rc dshell 4 | 5 | dshell: rc initpy 6 | 7 | rc: 8 | # Generating .dshellrc and dshell files 9 | python2 $(PWD)/bin/generate-dshellrc.py $(PWD) 10 | chmod 755 $(PWD)/dshell 11 | chmod 755 $(PWD)/dshell-decode 12 | chmod 755 $(PWD)/bin/decode.py 13 | ln -s $(PWD)/bin/decode.py $(PWD)/bin/decode 14 | 15 | initpy: 16 | find $(PWD)/decoders -type d -not -path \*.svn\* -print -exec touch {}/__init__.py \; 17 | 18 | pydoc: 19 | (cd $(PWD)/doc && ./generate-doc.sh $(PWD) ) 20 | 21 | clean: clean_pyc 22 | 23 | distclean: clean clean_py clean_pydoc clean_rc 24 | 25 | clean_rc: 26 | rm -fv $(PWD)/dshell 27 | rm -fv $(PWD)/dshell-decode 28 | rm -fv $(PWD)/.dshellrc 29 | rm -fv $(PWD)/bin/decode 30 | 31 | clean_py: 32 | find $(PWD)/decoders -name '__init__.py' -exec rm -v {} \; 33 | 34 | clean_pyc: 35 | find $(PWD)/decoders -name '*.pyc' -exec rm -v {} \; 36 | find $(PWD)/lib -name '*.pyc' -exec rm -v {} \; 37 | 38 | clean_pydoc: 39 | find $(PWD)/doc -name '*.htm*' -exec rm -v {} \; 40 | -------------------------------------------------------------------------------- /dshell-defcon/README.md: -------------------------------------------------------------------------------- 1 | # Dshell 2 | 3 | 4 | ## DefCon Usage 5 | 6 | `./dshell-decode -d stream2dump --stream2dump_outfiles=out /mnt/ctf/DEF\ CON\ 22\ ctf\ teams/blue-lotus/blue-lotus_00100_20140808181530.cap` 7 | 8 | `./offset2stream.py {str|repr|hex|pcap} original.pcap outputfile` 9 | 10 | An extensible network forensic analysis framework. Enables rapid development of plugins to support the dissection of network packet captures. 11 | 12 | Key features: 13 | 14 | 15 | * Robust stream reassembly 16 | * IPv4 and IPv6 support 17 | * Custom output handlers 18 | * Chainable decoders 19 | 20 | ## Prerequisites 21 | 22 | * Linux (developed on Ubuntu 12.04) 23 | * Python 2.7 24 | * [pygeoip](https://github.com/appliedsec/pygeoip), GNU Lesser GPL 25 | * [MaxMind GeoIP Legacy datasets](http://dev.maxmind.com/geoip/legacy/geolite/) 26 | * [PyCrypto](https://pypi.python.org/pypi/pycrypto), custom license 27 | * [dpkt](https://code.google.com/p/dpkt/), New BSD License 28 | * [IPy](https://github.com/haypo/python-ipy), BSD 2-Clause License 29 | * [pypcap](https://code.google.com/p/pypcap/), New BSD License 30 | 31 | ## Installation 32 | 33 | 1. Install all of the necessary Python modules listed above. Many of them are available via pip and/or apt-get. Pygeoip is not yet available as a package and must be installed with pip or manually. 34 | 35 | 1. `sudo apt-get install python-crypto python-dpkt python-ipy python-pypcap` 36 | 37 | 2. `sudo pip install pygeoip` 38 | 39 | 2. Configure pygeoip by moving the MaxMind data files (GeoIP.dat, GeoIPv6.dat, GeoIPASNum.dat, GeoIPASNumv6.dat) to <install-location>/share/GeoIP/ 40 | 41 | 2. Run `make`. This will build Dshell. 42 | 43 | 3. Run `./dshell`. This is Dshell. If you get a Dshell> prompt, you're good to go! 44 | 45 | ## Basic usage 46 | 47 | * `decode -l` 48 | * This will list all available decoders alongside basic information about them 49 | * `decode -h` 50 | * Show generic command-line flags available to most decoders 51 | * `decode -d ` 52 | * Display information about a decoder, including available command-line flags 53 | * `decode -d ` 54 | * Run the selected decoder on a pcap file 55 | 56 | ## Usage Examples 57 | 58 | Showing DNS lookups in [sample traffic](http://wiki.wireshark.org/SampleCaptures#General_.2F_Unsorted) 59 | 60 | ``` 61 | Dshell> decode -d dns ~/pcap/dns.cap 62 | dns 2005-03-30 03:47:46 192.168.170.8:32795 -> 192.168.170.20:53 ** 39867 PTR? 66.192.9.104 / PTR: 66-192-9-104.gen.twtelecom.net ** 63 | dns 2005-03-30 03:47:46 192.168.170.8:32795 -> 192.168.170.20:53 ** 30144 A? www.netbsd.org / A: 204.152.190.12 (ttl 82159s) ** 64 | dns 2005-03-30 03:47:46 192.168.170.8:32795 -> 192.168.170.20:53 ** 61652 AAAA? www.netbsd.org / AAAA: 2001:4f8:4:7:2e0:81ff:fe52:9a6b (ttl 86400s) ** 65 | dns 2005-03-30 03:47:46 192.168.170.8:32795 -> 192.168.170.20:53 ** 32569 AAAA? www.netbsd.org / AAAA: 2001:4f8:4:7:2e0:81ff:fe52:9a6b (ttl 86340s) ** 66 | dns 2005-03-30 03:47:46 192.168.170.8:32795 -> 192.168.170.20:53 ** 36275 AAAA? www.google.com / CNAME: www.l.google.com ** 67 | dns 2005-03-30 03:47:46 192.168.170.8:32795 -> 192.168.170.20:53 ** 9837 AAAA? www.example.notginh / NXDOMAIN ** 68 | dns 2005-03-30 03:52:17 192.168.170.8:32796 <- 192.168.170.20:53 ** 23123 PTR? 127.0.0.1 / PTR: localhost ** 69 | dns 2005-03-30 03:52:25 192.168.170.56:1711 <- 217.13.4.24:53 ** 30307 A? GRIMM.utelsystems.local / NXDOMAIN ** 70 | dns 2005-03-30 03:52:17 192.168.170.56:1710 <- 217.13.4.24:53 ** 53344 A? GRIMM.utelsystems.local / NXDOMAIN ** 71 | ``` 72 | 73 | Following and reassembling a stream in [sample traffic](http://wiki.wireshark.org/SampleCaptures#General_.2F_Unsorted) 74 | 75 | ``` 76 | Dshell> decode -d followstream ~/pcap/v6-http.cap 77 | Connection 1 (TCP) 78 | Start: 2007-08-05 19:16:44.189852 UTC 79 | End: 2007-08-05 19:16:44.204687 UTC 80 | 2001:6f8:102d:0:2d0:9ff:fee3:e8de:59201 -> 2001:6f8:900:7c0::2:80 (240 bytes) 81 | 2001:6f8:900:7c0::2:80 -> 2001:6f8:102d:0:2d0:9ff:fee3:e8de:59201 (2259 bytes) 82 | 83 | GET / HTTP/1.0 84 | Host: cl-1985.ham-01.de.sixxs.net 85 | Accept: text/html, text/plain, text/css, text/sgml, */*;q=0.01 86 | Accept-Encoding: gzip, bzip2 87 | Accept-Language: en 88 | User-Agent: Lynx/2.8.6rel.2 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8b 89 | 90 | HTTP/1.1 200 OK 91 | Date: Sun, 05 Aug 2007 19:16:44 GMT 92 | Server: Apache 93 | Content-Length: 2121 94 | Connection: close 95 | Content-Type: text/html 96 | 97 | 98 | 99 | 100 | Index of / 101 | 102 | 103 |

Index of /

104 |
Icon  Name                    Last modified      Size  Description
[DIR] 202-vorbereitung/ 06-Jul-2007 14:31 - 105 | [   ] Efficient_Video_on_d..> 19-Dec-2006 03:17 291K 106 | [   ] Welcome Stranger!!! 28-Dec-2006 03:46 0 107 | [TXT] barschel.htm 31-Jul-2007 02:21 44K 108 | [DIR] bnd/ 30-Dec-2006 08:59 - 109 | [DIR] cia/ 28-Jun-2007 00:04 - 110 | [   ] cisco_ccna_640-801_c..> 28-Dec-2006 03:48 236K 111 | [DIR] doc/ 19-Sep-2006 01:43 - 112 | [DIR] freenetproto/ 06-Dec-2006 09:00 - 113 | [DIR] korrupt/ 03-Jul-2007 11:57 - 114 | [DIR] mp3_technosets/ 04-Jul-2007 08:56 - 115 | [TXT] neues_von_rainald_go..> 21-Mar-2007 23:27 31K 116 | [TXT] neues_von_rainald_go..> 21-Mar-2007 23:29 36K 117 | [   ] pruef.pdf 28-Dec-2006 07:48 88K 118 |
119 | 120 | ``` 121 | 122 | Chaining decoders to view flow data for a specific country code in [sample traffic](http://wiki.wireshark.org/SampleCaptures#General_.2F_Unsorted) (note: TCP handshakes are not included in the packet count) 123 | 124 | ``` 125 | Dshell> decode -d country+netflow --country_code=JP ~/pcap/SkypeIRC.cap 126 | 2006-08-25 19:32:20.651502 192.168.1.2 -> 202.232.205.123 (-- -> JP) UDP 60583 33436 1 0 36 0 0.0000s 127 | 2006-08-25 19:32:20.766761 192.168.1.2 -> 202.232.205.123 (-- -> JP) UDP 60583 33438 1 0 36 0 0.0000s 128 | 2006-08-25 19:32:20.634046 192.168.1.2 -> 202.232.205.123 (-- -> JP) UDP 60583 33435 1 0 36 0 0.0000s 129 | 2006-08-25 19:32:20.747503 192.168.1.2 -> 202.232.205.123 (-- -> JP) UDP 60583 33437 1 0 36 0 0.0000s 130 | ``` 131 | 132 | Collecting netflow data for [sample traffic](http://wiki.wireshark.org/SampleCaptures#General_.2F_Unsorted) with vlan headers, then tracking the connection to a specific IP address 133 | 134 | ``` 135 | Dshell> decode -d netflow ~/pcap/vlan.cap 136 | 1999-11-05 18:20:43.170500 131.151.20.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 137 | 1999-11-05 18:20:42.063074 131.151.32.71 -> 131.151.32.255 (US -> US) UDP 138 138 1 0 201 0 0.0000s 138 | 1999-11-05 18:20:43.096540 131.151.1.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 139 | 1999-11-05 18:20:43.079765 131.151.5.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 140 | 1999-11-05 18:20:41.521798 131.151.104.96 -> 131.151.107.255 (US -> US) UDP 137 137 3 0 150 0 1.5020s 141 | 1999-11-05 18:20:43.087010 131.151.6.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 142 | 1999-11-05 18:20:43.368210 131.151.111.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 143 | 1999-11-05 18:20:43.250410 131.151.32.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 144 | 1999-11-05 18:20:43.115330 131.151.10.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 145 | 1999-11-05 18:20:43.375145 131.151.115.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 146 | 1999-11-05 18:20:43.363348 131.151.107.254 -> 255.255.255.255 (US -> --) UDP 520 520 1 0 24 0 0.0000s 147 | 1999-11-05 18:20:40.112031 131.151.5.55 -> 131.151.5.255 (US -> US) UDP 138 138 1 0 201 0 0.0000s 148 | 1999-11-05 18:20:43.183825 131.151.32.79 -> 131.151.32.255 (US -> US) UDP 138 138 1 0 201 0 0.0000s 149 | ``` 150 | -------------------------------------------------------------------------------- /dshell-defcon/bin/decode: -------------------------------------------------------------------------------- 1 | decode.py -------------------------------------------------------------------------------- /dshell-defcon/bin/generate-dshellrc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import os 4 | import sys 5 | 6 | if __name__ == '__main__': 7 | cwd = sys.argv[1] 8 | 9 | # environment variables used by shell and modules 10 | envvars = { 11 | 'DSHELL': '%s' % (cwd), 12 | 'DECODERPATH': '%s/decoders' % (cwd), 13 | 'BINPATH': '%s/bin' % (cwd), 14 | 'LIBPATH': '%s/lib' % (cwd), 15 | 'DATAPATH': '%s/share' % (cwd), 16 | } 17 | # further shell environment setup 18 | envsetup = { 19 | 'LD_LIBRARY_PATH': '$LIBPATH:$LD_LIBRARY_PATH', 20 | 'PATH': '$BINPATH:$PATH', 21 | 'PYTHONPATH': '$DSHELL:$LIBPATH:$LIBPATH/output:' + os.path.join('$LIBPATH', 'python' + '.'.join(sys.version.split('.', 3)[:2]).split(' ')[0], 'site-packages') + ':$PYTHONPATH'} 22 | 23 | try: 24 | os.mkdir(os.path.join( 25 | cwd, 'lib', 'python' + '.'.join(sys.version.split('.', 3)[:2]).split(' ')[0])) 26 | os.mkdir(os.path.join(cwd, 'lib', 'python' + 27 | '.'.join(sys.version.split('.', 3)[:2]).split(' ')[0], 'site-packages')) 28 | except Exception, e: 29 | print e 30 | 31 | envdict = {} 32 | envdict.update(envvars) 33 | envdict.update(envsetup) 34 | 35 | #.dshellrc text 36 | env = ['export PS1="`whoami`@`hostname`:\w Dshell> "'] + ['export %s=%s' % 37 | (k, v) for k, v in envvars.items()] + ['export %s=%s' % (k, v) for k, v in envsetup.items()] 38 | outfd = open('.dshellrc', 'w') 39 | outfd.write("\n".join(env)) 40 | if len(sys.argv) > 2 and sys.argv[2] == 'with_bash_completion': 41 | outfd.write(''' 42 | 43 | 44 | if [ `echo $BASH_VERSION | cut -d'.' -f1` -ge '4' ]; then 45 | if [ -f ~/.bash_aliases ]; then 46 | . ~/.bash_aliases 47 | fi 48 | 49 | if [ -f /etc/bash_completion ]; then 50 | . /etc/bash_completion 51 | fi 52 | 53 | find_decoder() 54 | { 55 | local IFS="+" 56 | for (( i=0; i<${#COMP_WORDS[@]}; i++ )); 57 | do 58 | if [ "${COMP_WORDS[$i]}" == '-d' ] ; then 59 | decoders=(${COMP_WORDS[$i+1]}) 60 | fi 61 | done 62 | } 63 | 64 | get_decoders() 65 | { 66 | decoders=$(for x in `find $DECODERPATH -iname '*.py' | grep -v '__init__'`; do basename ${x} .py; done) 67 | } 68 | 69 | _decode() 70 | { 71 | local dashdashcommands=' --ebpf --output --outfile --logfile' 72 | 73 | local cur prev xspec decoders 74 | COMPREPLY=() 75 | cur=`_get_cword` 76 | _expand || return 0 77 | prev="${COMP_WORDS[COMP_CWORD-1]}" 78 | 79 | case "${cur}" in 80 | --*) 81 | find_decoder 82 | local options="" 83 | # if [ -n "$decoders" ]; then 84 | # for decoder in "${decoders[@]}" 85 | # do 86 | # options+=`/usr/bin/python $BINPATH/gen_decoder_options.py $decoder` 87 | # options+=" " 88 | # done 89 | # fi 90 | 91 | options+=$dashdashcommands 92 | COMPREPLY=( $(compgen -W "${options}" -- ${cur}) ) 93 | return 0 94 | ;; 95 | 96 | *+*) 97 | get_decoders 98 | firstdecoder=${cur%+*}"+" 99 | COMPREPLY=( $(compgen -W "${decoders}" -P $firstdecoder -- ${cur//*+}) ) 100 | return 0 101 | ;; 102 | 103 | esac 104 | 105 | xspec="*.@(cap|pcap)" 106 | xspec="!"$xspec 107 | case "${prev}" in 108 | -d) 109 | get_decoders 110 | COMPREPLY=( $(compgen -W "${decoders[0]}" -- ${cur}) ) 111 | return 0 112 | ;; 113 | 114 | --output) 115 | local outputs=$(for x in `find $DSHELL/lib/output -iname '*.py' | grep -v 'output.py'`; do basename ${x} .py; done) 116 | 117 | COMPREPLY=( $(compgen -W "${outputs}" -- ${cur}) ) 118 | return 0 119 | ;; 120 | 121 | -F | -o | --outfile | -L | --logfile) 122 | xspec= 123 | ;; 124 | 125 | esac 126 | 127 | COMPREPLY=( $( compgen -f -X "$xspec" -- "$cur" ) \ 128 | $( compgen -d -- "$cur" ) ) 129 | } 130 | complete -F _decode -o filenames decode 131 | complete -F _decode -o filenames decode.py 132 | fi 133 | ''') 134 | outfd.close() 135 | 136 | # dshell text 137 | outfd = open('dshell', 'w') 138 | outfd.write('#!/bin/bash\n') 139 | outfd.write('/bin/bash --rcfile %s/.dshellrc\n' % (cwd)) 140 | outfd.close() 141 | 142 | # dshell-decode text 143 | outfd = open('dshell-decode', 'w') 144 | outfd.write('#!/bin/bash\n') 145 | outfd.write('source %s/.dshellrc\n' % (cwd)) 146 | outfd.write('decode "$@"') 147 | outfd.close() 148 | -------------------------------------------------------------------------------- /dshell-defcon/bin/pcapanon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | Created on Feb 6, 2012 4 | 5 | @author: tparker 6 | ''' 7 | 8 | import sys 9 | import dpkt 10 | import struct 11 | import pcap 12 | import socket 13 | import time 14 | from Crypto.Random import random 15 | from Crypto.Hash import SHA 16 | from output import PCAPWriter 17 | from util import getopts 18 | 19 | 20 | def hashaddr(addr, *extra): 21 | # hash key+address plus any extra data (ports if flow) 22 | global key, ip_range, ip_mask 23 | sha = SHA.new(key + addr) 24 | for e in extra: 25 | sha.update(str(extra)) 26 | # take len(addr) octets of digest as address, to int, mask, or with range, 27 | # back to octets 28 | return inttoip((iptoint(sha.digest()[0:len(addr)]) & ip_mask) | ip_range) 29 | 30 | 31 | def mangleMAC(addr): 32 | global zero_mac 33 | if zero_mac: 34 | return "\x00\x00\x00\x00\x00\x00" 35 | if addr in emap: 36 | return emap[addr] 37 | haddr = None 38 | if addr == "\x00\x00\x00\x00\x00\x00": 39 | haddr = addr # return null MAC 40 | if ord(addr[0]) & 0x01: 41 | haddr = addr # mac&0x800000000000 == broadcast addr, do not touch 42 | if not haddr: 43 | haddr = hashaddr(addr) 44 | # return hash bytes with first byte set to xxxxxx10 (LAA unicast) 45 | haddr = chr(ord(haddr[0]) & 0xfc | 0x2) + haddr[1:6] 46 | emap[addr] = haddr 47 | return haddr 48 | 49 | 50 | def mangleIP(addr, *ports): # addr,extra=our_port,other_port 51 | global exclude, exclude_port, anon_all, by_flow 52 | haddr = None 53 | intip = iptoint(addr) 54 | if len(addr) == 4 and intip >= 0xE0000000: 55 | haddr = addr # pass multicast 224.x.x.x and higher 56 | ip = iptoa(addr) 57 | # pass 127.x.x.x, IANA reserved, and autoconfig ranges 58 | if not anon_all and (ip.startswith('127.') 59 | or ip.startswith('10.') 60 | or ip.startswith('172.16.') 61 | or ip.startswith('192.168.') 62 | or ip.startswith('169.254.')): 63 | haddr = addr 64 | # pass ips matching exclude 65 | for x in exclude: 66 | if ip.startswith(x): 67 | haddr = addr 68 | if ports and ports[0] in exclude_port: 69 | haddr = addr # if our port is exclude 70 | if not haddr: 71 | if by_flow: 72 | # use ports if by flow, else just use ip 73 | haddr = hashaddr(addr, *ports) 74 | else: 75 | haddr = hashaddr(addr) 76 | return haddr 77 | 78 | 79 | def mangleIPs(src, dst, sport, dport): 80 | if by_flow: # if by flow, hash addresses with s/d ports 81 | if (src, sport, dst, dport) in ipmap: 82 | src, dst = ipmap[(src, sport, dst, dport)] 83 | elif (dst, dport, src, sport) in ipmap: 84 | # make sure reverse flow maps same 85 | dst, src = ipmap[(dst, dport, src, sport)] 86 | else: 87 | src, dst = ipmap.setdefault( 88 | (src, sport, dst, dport), (mangleIP(src, sport, dport), mangleIP(dst, dport, sport))) 89 | else: 90 | if src in ipmap: 91 | src = ipmap[src] 92 | else: 93 | src = ipmap.setdefault(src, mangleIP(src, sport)) 94 | if dst in ipmap: 95 | dst = ipmap[dst] 96 | else: 97 | dst = ipmap.setdefault(dst, mangleIP(dst, dport)) 98 | return src, dst 99 | 100 | 101 | def mactoa(addr): 102 | return ':'.join(['%02x' % b for b in struct.unpack('6B', addr)]) 103 | 104 | 105 | def iptoa(addr): 106 | if len(addr) is 16: 107 | return socket.inet_ntop(socket.AF_INET6, addr) 108 | else: 109 | return socket.inet_ntop(socket.AF_INET, addr) 110 | 111 | 112 | def iptoint(addr): 113 | if len(addr) is 16: # ipv6 to long 114 | ip = struct.unpack('!IIII', addr) 115 | return ip[0] << 96 | ip[1] << 64 | ip[2] << 32 | ip[3] 116 | else: 117 | return struct.unpack('!I', addr)[0] # ip to int 118 | 119 | 120 | def inttoip(l): 121 | if l > 0xffffffff: # ipv6 122 | return struct.pack('!IIII', l >> 96, l >> 64 & 0xffffffff, l >> 32 & 0xffffffff, l & 0xffffffff) 123 | else: 124 | return struct.pack('!I', l) 125 | 126 | 127 | def pcap_handler(ts, pktdata): 128 | global init_ts, start_ts, replace_ts, by_flow, anon_mac, zero_mac 129 | if not init_ts: 130 | init_ts = ts 131 | if replace_ts: 132 | ts = start_ts + (ts - init_ts) # replace timestamps 133 | try: 134 | pkt = dpkt.ethernet.Ethernet(pktdata) 135 | if anon_mac or zero_mac: 136 | pkt.src = mangleMAC(pkt.src) 137 | pkt.dst = mangleMAC(pkt.dst) 138 | if pkt.type == dpkt.ethernet.ETH_TYPE_IP: 139 | try: 140 | # TCP or UDP? 141 | sport, dport = pkt.data.data.sport, pkt.data.data.dport 142 | except: 143 | sport = dport = None # nope 144 | pkt.data.src, pkt.data.dst = mangleIPs( 145 | pkt.data.src, pkt.data.dst, sport, dport) 146 | pktdata = str(pkt) 147 | except Exception, e: 148 | print e 149 | out.write(len(pktdata), pktdata, ts) 150 | 151 | if __name__ == '__main__': 152 | 153 | global key, init_ts, start_ts, replace_ts, by_flow, anon_mac, zero_mac, exclude, exclude_port, anon_all, ip_range, ip_mask 154 | opts, args = getopts(sys.argv[1:], 'i:aezftx:p:rk:', [ 155 | 'ip=', 'all', 'ether', 'zero', 'flow', 'ts', 'exclude=', 'random', 'key=', 'port='], ['-x', '--exclude', '-p', '--port']) 156 | 157 | if '-r' in opts or '--random' in opts: 158 | key = random.long_to_bytes(random.getrandbits(64), 8) 159 | else: 160 | key = '' 161 | key = opts.get('-k', opts.get('--key', key)) 162 | 163 | ip_range = opts.get('-i', opts.get('--ip', '0.0.0.0')) 164 | ip_mask = 0 # bitmask for hashed address 165 | ipr = '' 166 | for o in map(int, ip_range.split('.')): 167 | ipr += chr(o) 168 | ip_mask <<= 8 # shift by 8 bits 169 | if not o: 170 | ip_mask |= 0xff # set octet mask to 0xff if ip_range octet is zero 171 | ip_range = iptoint(ipr) # convert to int value for hash&mask|ip_range 172 | 173 | replace_ts = '-t' in opts or '--ts' in opts 174 | by_flow = '-f' in opts or '--flow' in opts 175 | anon_mac = '-e' in opts or '--ether' in opts 176 | zero_mac = '-z' in opts or '--zero' in opts 177 | anon_all = '-a' in opts or '--all' in opts 178 | 179 | start_ts = time.time() 180 | init_ts = None 181 | 182 | exclude = opts.get('-x', []) 183 | exclude.extend(opts.get('--exclude', [])) 184 | 185 | exclude_port = map(int, opts.get('-p', [])) 186 | exclude_port.extend(map(int, opts.get('--port', []))) 187 | 188 | emap = {} 189 | ipmap = {} 190 | 191 | if len(args) < 2: 192 | print "usage: pcapanon.py [options] > mapping.csv\nOptions:\n\t[-i/--ip range]\n\t[-r/--random | -k/--key 'salt' ]\n\t[-a/--all] [-t/--ts] [-f/--flow]\n\t[-e/--ether | -z/--zero]\n\t[-x/--exclude pattern...]\n\t[-p/--port list...]" 193 | print "Will anonymize all non-reserved IPs to be in range specified by -i/--ip option," 194 | print "\tnonzero range octets are copied to anonymized address,\n\t(default range is 0.0.0.0 for fully random IPs)" 195 | print "CSV output maps original to anonymized addresses" 196 | print "By default anonymization will use a straight SHA1 hash of the address" 197 | print "\t***this is crackable as mapping is always the same***".upper() 198 | print "Use -r/--random to generate a random salt (cannot easily reverse without knowing map)" 199 | print "\tor use -k/--key 'salt' (will generate same mapping given same salt)," 200 | print "-f/--flows will anonymize by flow (per source:port<->dest:port tuples)" 201 | print "-a/--all will also anonymize reserved IPs" 202 | print "-x/--exclude will leave IPs starting with pattern unchanged" 203 | print "-p/--port port will leave IP unchanged if port is in list" 204 | print "-t/--ts will replace timestamp of first packet with time pcapanon was run,\n\tsubsequent packets will preserve delta from initial ts" 205 | print "-e/--ether will also anonymize non-broadcast MAC addresses" 206 | print "-z/--zero will zero all MAC addresses" 207 | sys.exit(0) 208 | 209 | out = PCAPWriter(args[-1]) 210 | print '#file, packets' 211 | for f in args[0:-1]: 212 | p = 0 213 | cap = pcap.pcap(f) 214 | while cap.dispatch(1, pcap_handler): 215 | p += 1 # process whole file 216 | del cap 217 | print '%s,%s' % (f, p) 218 | out.close() 219 | 220 | print "#type,is-anonymized, original, anonymized" 221 | for ia, oa in sorted(emap.items()): 222 | print 'ether,%d, %s, %s' % (int(not ia == oa), mactoa(ia), mactoa(oa)) 223 | for ia, oa in sorted(ipmap.items()): 224 | if by_flow: 225 | sip, sp, dip, dp = ia 226 | osip, odip = oa 227 | print "flow,%d, %s:%s,%s:%s, %s:%s,%s:%s" % (int(sip != osip or dip != odip), iptoa(sip), sp, iptoa(dip), dp, iptoa(osip), sp, iptoa(odip), dp) 228 | else: 229 | print 'ip,%d, %s, %s' % (int(ia != oa), iptoa(ia), iptoa(oa)) 230 | -------------------------------------------------------------------------------- /dshell-defcon/context.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | import re 4 | import sys 5 | import struct 6 | import socket 7 | import time 8 | 9 | 10 | lcontext = 50 11 | rcontext = 30 12 | 13 | 14 | def enc(s): 15 | ss = '' 16 | for i in s: 17 | if 32 <= ord(i) <= 126 and i != "\\": 18 | ss += i 19 | else: 20 | ss += "\\x" + i.encode('hex') 21 | return ss 22 | 23 | dir2arrow = { 24 | 'c': u"\u2190".encode('utf-8'), 25 | 's': u"\u2192".encode('utf-8'), 26 | '': '' 27 | } 28 | 29 | 30 | def context(fname, offset, len_body): 31 | ff = open(fname, 'rb') 32 | 33 | ff.seek(-4, 2) 34 | total_conns = struct.unpack('I', ff.read(4))[0] 35 | ff.seek(-4 * (1 + total_conns), 2) 36 | len_conns = list(struct.unpack('I' * total_conns, ff.read(4 * total_conns))) 37 | 38 | current_offset = 0 39 | for i in len_conns: 40 | if current_offset <= offset < current_offset + i: 41 | ff.seek(current_offset, 0) 42 | len_pkt, cliip, servip, cliport, servport, timestamp = struct.unpack('IIIHHI', ff.read(20)) 43 | cnt_pkt = struct.unpack('I', ff.read(4))[0] 44 | pkts_id = struct.unpack('I' * cnt_pkt, ff.read(4 * cnt_pkt)) 45 | last_blob = '' 46 | last_dir = '' 47 | outputed = -1 48 | output_data = "" 49 | for i in xrange(len_pkt): 50 | direction = ff.read(1) 51 | len_data = struct.unpack('I', ff.read(4))[0] 52 | data_begin = ff.tell() 53 | data = ff.read(len_data) 54 | data_end = ff.tell() 55 | if outputed >= 0 and outputed < rcontext: 56 | output_data += dir2arrow[direction] + enc(data[:rcontext - outputed]) 57 | break 58 | if data_begin <= offset < data_end: 59 | output_data = data[offset - data_begin: offset - data_begin + len_body] 60 | blobr = data[offset - data_begin + len_body: ] 61 | blobl = data[: offset - data_begin] 62 | output_data = dir2arrow[direction] + enc(blobl[-lcontext: ] + output_data + blobr[: rcontext]) 63 | if len(blobl) < lcontext: 64 | output_data = dir2arrow[last_dir] + enc(last_blob[-(lcontext - len(blobl)): ]) + output_data 65 | outputed = len(blobr) 66 | last_blob = data 67 | last_dir = direction 68 | ff.close() 69 | return timestamp, servport, cliport, output_data 70 | current_offset += i 71 | 72 | ff.close() 73 | return -1, -1, -1, '' 74 | 75 | while True: 76 | try: 77 | fname, offset, len_body = raw_input().split('\t') 78 | timestamp, servport, cliport, ret = context(fname, int(offset), int(len_body)) 79 | print "%s\t%d\t%d\t%d\t%d\t%s" % (fname, int(offset), timestamp, servport, cliport, ret) 80 | sys.stdout.flush() 81 | except EOFError: 82 | break 83 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/dhcp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/dhcp/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/dhcp/dhcp.py: -------------------------------------------------------------------------------- 1 | import dpkt 2 | import dshell 3 | import util 4 | from struct import unpack 5 | import binascii 6 | 7 | class DshellDecoder(dshell.UDPDecoder): 8 | 9 | def __init__(self): 10 | dshell.UDPDecoder.__init__(self, 11 | name='dhcp', 12 | description='extract client information from DHCP messages', 13 | longdescription=""" 14 | The dhcp decoder will extract the Transaction ID, Client Hostname, and 15 | Client MAC address from every UDP DHCP packet found in the given pcap 16 | using port 67. DHCP uses BOOTP as its transport protocol. 17 | BOOTP traffic generally uses ports 67 and 68 for outgoing and incoming traffic. 18 | This filter pulls DHCP Inform packets. 19 | 20 | Examples: 21 | 22 | General usage: 23 | 24 | decode -d dhcp 25 | 26 | This will display the connection info including the timestamp, 27 | the source IP : source port, destination IP : destination port, 28 | Transaction ID, Client Hostname, and the Client MAC address 29 | in a tabular format. 30 | 31 | 32 | Malware Traffic Analysis Exercise Traffic from 2015-03-03 where a user was hit with an Angler exploit kit: 33 | 34 | We want to find out more about the infected machine, and some of this information can be pulled from DHCP traffic 35 | 36 | decode -d dhcp /2015-03-03-traffic-analysis-exercise.pcap 37 | 38 | OUTPUT: 39 | dhcp 2015-03-03 14:05:10 172.16.101.196:68 -- 172.16.101.1:67 ** Transaction ID: 0xba5a2cfe Client Hostname: Gregory-PC Client MAC: 38:2c:4a:3d:ef:01 ** 40 | dhcp 2015-03-03 14:08:40 172.16.101.196:68 -- 255.255.255.255:67 ** Transaction ID: 0x6a482406 Client Hostname: Gregory-PC Client MAC: 38:2c:4a:3d:ef:01 ** 41 | dhcp 2015-03-03 14:10:11 172.16.101.196:68 -- 172.16.101.1:67 ** Transaction ID: 0xe74b17fe Client Hostname: Gregory-PC Client MAC: 38:2c:4a:3d:ef:01 ** 42 | dhcp 2015-03-03 14:12:50 172.16.101.196:68 -- 255.255.255.255:67 ** Transaction ID: 0xd62614a0 Client Hostname: Gregory-PC Client MAC: 38:2c:4a:3d:ef:01 ** 43 | """, 44 | filter='(udp and port 67)', 45 | author='dek', 46 | ) 47 | self.mac_address = None 48 | self.client_hostname = None 49 | self.xid = None 50 | 51 | 52 | # A packetHandler is used to ensure that every DHCP packet in the traffic is parsed 53 | def packetHandler(self, udp, data): 54 | try: 55 | dhcp_packet = dpkt.dhcp.DHCP(data) 56 | except dpkt.NeedData as e: 57 | self.warn('{} dpkt could not parse session data (DHCP packet not found)'.format(str(e))) 58 | return 59 | 60 | # Pull the transaction ID from the packet 61 | self.xid = hex(dhcp_packet.xid) 62 | 63 | # if we have a DHCP INFORM PACKET 64 | if dhcp_packet.op == dpkt.dhcp.DHCP_OP_REQUEST: 65 | self.debug(dhcp_packet.op) 66 | for option_code, msg_value in dhcp_packet.opts: 67 | 68 | # if opt is CLIENT_ID (61) 69 | # unpack the msg_value and reformat the MAC address 70 | if option_code == dpkt.dhcp.DHCP_OPT_CLIENT_ID: 71 | hardware_type, mac = unpack('B6s', msg_value) 72 | mac = binascii.hexlify(mac) 73 | self.mac_address = ':'.join([mac[i:i+2] for i in range(0, len(mac), 2)]) 74 | 75 | # if opt is HOSTNAME (12) 76 | elif option_code == dpkt.dhcp.DHCP_OPT_HOSTNAME: 77 | self.client_hostname = msg_value 78 | 79 | 80 | if self.xid and self.client_hostname and self.mac_address: 81 | self.alert('Transaction ID: {0:<12} Client Hostname: {1:<15} Client MAC: {2:<20}'.format( 82 | self.xid, self.client_hostname, self.mac_address), **udp.info()) 83 | 84 | 85 | if __name__ == '__main__': 86 | dObj = DshellDecoder() 87 | print dObj 88 | else: 89 | dObj = DshellDecoder() 90 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/dns/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/dns/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/dns/dns-asn.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import dpkt 3 | import socket 4 | from dnsdecoder import DNSDecoder 5 | 6 | 7 | class DshellDecoder(DNSDecoder): 8 | 9 | def __init__(self): 10 | DNSDecoder.__init__(self, 11 | name='dns-asn', 12 | description='identify AS of DNS A/AAAA record responses', 13 | filter='(port 53)', 14 | author='bg', 15 | cleanupinterval=10, 16 | maxblobs=2, 17 | ) 18 | 19 | def decode_q(self, dns): 20 | queried = "" 21 | if dns.qd[0].type == dpkt.dns.DNS_A: 22 | queried = queried + "A? %s" % (dns.qd[0].name) 23 | if dns.qd[0].type == dpkt.dns.DNS_AAAA: 24 | queried = queried + "AAAA? %s" % (dns.qd[0].name) 25 | return queried 26 | 27 | def DNSHandler(self, conn, request, response, **kwargs): 28 | anstext = '' 29 | queried = '' 30 | id = None 31 | for dns in request, response: 32 | if dns is None: 33 | continue 34 | id = dns.id 35 | # DNS Question, update connection info with query 36 | if dns.qr == dpkt.dns.DNS_Q: 37 | conn.info(query=self.decode_q(dns)) 38 | 39 | # DNS Answer with data and no errors 40 | elif (dns.qr == dpkt.dns.DNS_A and dns.rcode == dpkt.dns.DNS_RCODE_NOERR and len(dns.an) > 0): 41 | 42 | queried = self.decode_q(dns) 43 | 44 | answers = [] 45 | for an in dns.an: 46 | if an.type == dpkt.dns.DNS_A: 47 | try: 48 | cc = self.getASN(socket.inet_ntoa(an.ip)) 49 | answers.append( 50 | 'A: %s (%s) (ttl %s)' % (socket.inet_ntoa(an.ip), cc, an.ttl)) 51 | except: 52 | continue 53 | elif an.type == dpkt.dns.DNS_AAAA: 54 | try: 55 | cc = self.getASN( 56 | socket.inet_ntop(socket.AF_INET6, an.ip6)) 57 | answers.append('AAAA: %s (%s) (ttl %s)' % ( 58 | socket.inet_ntop(socket.AF_INET6, an.ip6), cc, an.ttl)) 59 | except: 60 | continue 61 | else: 62 | # un-handled type 63 | continue 64 | if queried != '': 65 | anstext = ", ".join(answers) 66 | 67 | if anstext: # did we get an answer? 68 | self.alert( 69 | str(id) + ' ' + queried + ' / ' + anstext, **conn.info(response=anstext)) 70 | 71 | 72 | if __name__ == '__main__': 73 | dObj = DshellDecoder() 74 | print dObj 75 | else: 76 | dObj = DshellDecoder() 77 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/dns/dns-cc.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import dpkt 3 | import socket 4 | from dnsdecoder import DNSDecoder 5 | 6 | 7 | class DshellDecoder(DNSDecoder): 8 | 9 | def __init__(self): 10 | DNSDecoder.__init__(self, 11 | name='dns-cc', 12 | description='identify country code of DNS A/AAAA record responses', 13 | filter='(port 53)', 14 | author='bg', 15 | cleanupinterval=10, 16 | maxblobs=2, 17 | optiondict={'foreign': {'action': 'store_true', 'help': 'report responses in foreign countries'}, 18 | 'code': {'type': 'string', 'help': 'filter on a specific country code (ex. US)'}} 19 | ) 20 | 21 | def decode_q(self, dns): 22 | queried = "" 23 | if dns.qd[0].type == dpkt.dns.DNS_A: 24 | queried = queried + "A? %s" % (dns.qd[0].name) 25 | if dns.qd[0].type == dpkt.dns.DNS_AAAA: 26 | queried = queried + "AAAA? %s" % (dns.qd[0].name) 27 | return queried 28 | 29 | def DNSHandler(self, conn, request, response, **kwargs): 30 | anstext = '' 31 | queried = '' 32 | id = None 33 | for dns in request, response: 34 | if dns is None: 35 | continue 36 | id = dns.id 37 | # DNS Question, update connection info with query 38 | if dns.qr == dpkt.dns.DNS_Q: 39 | conn.info(query=self.decode_q(dns)) 40 | 41 | # DNS Answer with data and no errors 42 | elif (dns.qr == dpkt.dns.DNS_A and dns.rcode == dpkt.dns.DNS_RCODE_NOERR and len(dns.an) > 0): 43 | 44 | queried = self.decode_q(dns) 45 | 46 | answers = [] 47 | for an in dns.an: 48 | if an.type == dpkt.dns.DNS_A: 49 | try: 50 | cc = self.getGeoIP(socket.inet_ntoa(an.ip)) 51 | if self.foreign and (cc == 'US' or cc == '--'): 52 | continue 53 | elif self.code != None and cc != self.code: 54 | continue 55 | answers.append( 56 | 'A: %s (%s) (ttl %ss)' % (socket.inet_ntoa(an.ip), cc, an.ttl)) 57 | except: 58 | continue 59 | elif an.type == dpkt.dns.DNS_AAAA: 60 | try: 61 | cc = self.getGeoIP( 62 | socket.inet_ntop(socket.AF_INET6, an.ip6)) 63 | if self.foreign and (cc == 'US' or cc == '--'): 64 | continue 65 | elif self.code != None and cc != self.code: 66 | continue 67 | answers.append('AAAA: %s (%s) (ttl %ss)' % ( 68 | socket.inet_ntop(socket.AF_INET6, an.ip6), cc, an.ttl)) 69 | except: 70 | continue 71 | else: 72 | # un-handled type 73 | continue 74 | if queried != '': 75 | anstext = ", ".join(answers) 76 | 77 | if anstext: # did we get an answer? 78 | self.alert( 79 | str(id) + ' ' + queried + ' / ' + anstext, **conn.info(response=anstext)) 80 | 81 | 82 | if __name__ == '__main__': 83 | dObj = DshellDecoder() 84 | print dObj 85 | else: 86 | dObj = DshellDecoder() 87 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/dns/dns.py: -------------------------------------------------------------------------------- 1 | import dpkt 2 | import socket 3 | from dnsdecoder import DNSDecoder 4 | 5 | 6 | class DshellDecoder(DNSDecoder): 7 | 8 | def __init__(self): 9 | DNSDecoder.__init__(self, 10 | name='dns', 11 | description='extract and summarize DNS queries/responses (defaults: A,AAAA,CNAME,PTR records)', 12 | filter='(udp and port 53)', 13 | author='bg/twp', 14 | optiondict={'show_noanswer': {'action': 'store_true', 'help': 'report unanswered queries alongside other queries'}, 15 | 'show_norequest': {'action': 'store_true', 'help': 'report unsolicited responses alongside other responses'}, 16 | 'only_noanswer': {'action': 'store_true', 'help': 'report only unanswered queries'}, 17 | 'only_norequest': {'action': 'store_true', 'help': 'report only unsolicited responses'}, 18 | 'showall': {'action': 'store_true', 'help': 'show all answered queries/responses'}} 19 | ) 20 | 21 | def decode_q(self, dns): 22 | queried = "" 23 | if dns.qd[0].type == dpkt.dns.DNS_A: 24 | queried = queried + "A? %s" % (dns.qd[0].name) 25 | if dns.qd[0].type == dpkt.dns.DNS_CNAME: 26 | queried = queried + "CNAME? %s" % (dns.qd[0].name) 27 | if dns.qd[0].type == dpkt.dns.DNS_AAAA: 28 | queried = queried + "AAAA? %s" % (dns.qd[0].name) 29 | if dns.qd[0].type == dpkt.dns.DNS_PTR: 30 | if dns.qd[0].name.endswith('.in-addr.arpa'): 31 | query_name = '.'.join( 32 | reversed(dns.qd[0].name.split('.in-addr.arpa')[0].split('.'))) 33 | else: 34 | query_name = dns.qd[0].name 35 | queried = queried + "PTR? %s" % (query_name) 36 | 37 | if not self.showall: 38 | return queried 39 | 40 | if dns.qd[0].type == dpkt.dns.DNS_NS: 41 | queried = queried + "NS? %s" % (dns.qd[0].name) 42 | if dns.qd[0].type == dpkt.dns.DNS_MX: 43 | queried = queried + "MX? %s" % (dns.qd[0].name) 44 | if dns.qd[0].type == dpkt.dns.DNS_TXT: 45 | queried = queried + "TXT? %s" % (dns.qd[0].name) 46 | if dns.qd[0].type == dpkt.dns.DNS_SRV: 47 | queried = queried + "SRV? %s" % (dns.qd[0].name) 48 | 49 | return queried 50 | 51 | def DNSHandler(self, conn, request, response, **kwargs): 52 | if self.only_norequest and request is not None: 53 | return 54 | if not self.show_norequest and request is None: 55 | return 56 | anstext = '' 57 | queried = '' 58 | id = None 59 | for dns in request, response: 60 | if dns is None: 61 | continue 62 | id = dns.id 63 | # DNS Question, update connection info with query 64 | if dns.qr == dpkt.dns.DNS_Q: 65 | conn.info(query=self.decode_q(dns)) 66 | 67 | # DNS Answer with data and no errors 68 | elif (dns.qr == dpkt.dns.DNS_A and dns.rcode == dpkt.dns.DNS_RCODE_NOERR and len(dns.an) > 0): 69 | 70 | queried = self.decode_q(dns) 71 | 72 | answers = [] 73 | for an in dns.an: 74 | if an.type == dpkt.dns.DNS_A: 75 | try: 76 | answers.append( 77 | 'A: %s (ttl %ss)' % (socket.inet_ntoa(an.ip), str(an.ttl))) 78 | except: 79 | continue 80 | elif an.type == dpkt.dns.DNS_AAAA: 81 | try: 82 | answers.append('AAAA: %s (ttl %ss)' % ( 83 | socket.inet_ntop(socket.AF_INET6, an.ip6), str(an.ttl))) 84 | except: 85 | continue 86 | elif an.type == dpkt.dns.DNS_CNAME: 87 | answers.append('CNAME: ' + an.cname) 88 | elif an.type == dpkt.dns.DNS_PTR: 89 | answers.append('PTR: ' + an.ptrname) 90 | elif an.type == dpkt.dns.DNS_NS: 91 | answers.append('NS: ' + an.nsname) 92 | elif an.type == dpkt.dns.DNS_MX: 93 | answers.append('MX: ' + an.mxname) 94 | elif an.type == dpkt.dns.DNS_TXT: 95 | answers.append('TXT: ' + ' '.join(an.text)) 96 | elif an.type == dpkt.dns.DNS_SRV: 97 | answers.append('SRV: ' + an.srvname) 98 | else: 99 | # un-handled type 100 | continue 101 | if queried != '': 102 | anstext = ", ".join(answers) 103 | 104 | #NXDOMAIN in response 105 | elif dns.qr == dpkt.dns.DNS_A and dns.rcode == dpkt.dns.DNS_RCODE_NXDOMAIN: 106 | queried = self.decode_q(dns) # decode query part 107 | 108 | if queried != '': 109 | anstext = 'NXDOMAIN' 110 | 111 | # did we get an answer? 112 | if anstext and not self.only_noanswer and not self.only_norequest: 113 | self.alert( 114 | str(id) + ' ' + queried + ' / ' + anstext, **conn.info(response=anstext)) 115 | elif not anstext and (self.show_noanswer or self.only_noanswer): 116 | self.alert( 117 | str(id) + ' ' + conn.query + ' / (no answer)', **conn.info()) 118 | 119 | if __name__ == '__main__': 120 | dObj = DshellDecoder() 121 | print dObj 122 | else: 123 | dObj = DshellDecoder() 124 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/dns/innuendo-dns.py: -------------------------------------------------------------------------------- 1 | import dpkt 2 | from dnsdecoder import DNSDecoder 3 | import base64 4 | 5 | 6 | class DshellDecoder(DNSDecoder): 7 | 8 | """ 9 | Proof-of-concept Dshell decoder to detect INNUENDO DNS Channel 10 | 11 | Based on the short marketing video [http://vimeo.com/115206626] the INNUENDO 12 | DNS Channel relies on DNS to communicate with an authoritative name server. 13 | The name server will respond with a base64 encoded TXT answer. This decoder 14 | will analyze DNS TXT queries and responses to determine if it matches the 15 | network traffic described in the video. There are multiple assumptions (*very 16 | poor*) in this detection plugin but serves as a proof-of-concept detector. This 17 | detector has not been tested against authentic INNUENDO DNS Channel traffic. 18 | 19 | Usage: decode -d innuendo-dns *.pcap 20 | 21 | """ 22 | 23 | def __init__(self): 24 | DNSDecoder.__init__(self, 25 | name='innuendo-dns', 26 | description='proof-of-concept detector for INNUENDO DNS channel', 27 | filter='(port 53)', 28 | author='primalsec', 29 | ) 30 | self.whitelist = [] # probably be necessary to whitelist A/V domains 31 | 32 | def in_whitelist(self, domain): 33 | # add logic 34 | return False 35 | 36 | def decrypt_payload(payload): pass 37 | 38 | def DNSHandler(self, conn, request, response, **kwargs): 39 | query = '' 40 | answers = [] 41 | 42 | for dns in request, response: 43 | 44 | if dns is None: 45 | continue 46 | 47 | id = dns.id 48 | 49 | # DNS Question, extract query name if it is a TXT record request 50 | if dns.qr == dpkt.dns.DNS_Q and dns.qd[0].type == dpkt.dns.DNS_TXT: 51 | query = dns.qd[0].name 52 | 53 | # DNS Answer with data and no errors 54 | elif (dns.qr == dpkt.dns.DNS_A and dns.rcode == dpkt.dns.DNS_RCODE_NOERR and len(dns.an) > 0): 55 | 56 | for an in dns.an: 57 | if an.type == dpkt.dns.DNS_TXT: 58 | answers.append(an.text[0]) 59 | 60 | if query != '' and len(answers) > 0: 61 | # add check here to see if the second level domain and top level 62 | # domain are not in a white list 63 | if self.in_whitelist(query): 64 | return 65 | 66 | # assumption: INNUENDO will use the lowest level domain for C2 67 | # example: AAAABBBBCCCC.foo.bar.com -> AAAABBBBCCCC is the INNUENDO 68 | # data 69 | subdomain = query.split('.')[0] 70 | 71 | # weak test based on video observation *very poor assumption* 72 | if subdomain.isupper(): 73 | # check each answer in the TXT response 74 | for answer in answers: 75 | try: 76 | # INNUENDO DNS channel base64 encodes the response, check to see if 77 | # it contains a valid base64 string *poor assumption* 78 | dummy = base64.b64decode(answer) 79 | 80 | self.alert( 81 | 'INNUENDO DNS Channel', query, '/', answer, **conn.info()) 82 | 83 | # here would be a good place to decrypt the payload (if you have the keys) 84 | # decrypt_payload( answer ) 85 | except: 86 | pass 87 | 88 | 89 | if __name__ == '__main__': 90 | dObj = DshellDecoder() 91 | print dObj 92 | else: 93 | dObj = DshellDecoder() 94 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/dns/reservedips.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import dpkt 3 | import socket 4 | from dnsdecoder import DNSDecoder 5 | import IPy 6 | 7 | 8 | class DshellDecoder(DNSDecoder): 9 | 10 | def __init__(self): 11 | DNSDecoder.__init__(self, 12 | name='reservedips', 13 | description='identify DNS resolutions that fall into reserved ip space', 14 | filter='(port 53)', 15 | author='bg', 16 | cleanupinterval=10, 17 | maxblobs=2, 18 | ) 19 | 20 | # source: https://en.wikipedia.org/wiki/Reserved_IP_addresses 21 | nets = ['0.0.0.0/8', # Used for broadcast messages to the current ("this") network as specified by RFC 1700, page 4. 22 | # Used for local communications within a private network as 23 | # specified by RFC 1918. 24 | '10.0.0.0/8', 25 | # Used for communications between a service provider and its 26 | # subscribers when using a Carrier-grade NAT, as specified by 27 | # RFC 6598. 28 | '100.64.0.0/10', 29 | # Used for loopback addresses to the local host, as specified 30 | # by RFC 990. 31 | '127.0.0.0/8', 32 | # Used for autoconfiguration between two hosts on a single 33 | # link when no IP address is otherwise specified 34 | '169.254.0.0/16', 35 | # Used for local communications within a private network as 36 | # specified by RFC 1918 37 | '172.16.0.0/12', 38 | # Used for the DS-Lite transition mechanism as specified by 39 | # RFC 6333 40 | '192.0.0.0/29', 41 | # Assigned as "TEST-NET" in RFC 5737 for use solely in 42 | # documentation and example source code and should not be used 43 | # publicly 44 | '192.0.2.0/24', 45 | # Used by 6to4 anycast relays as specified by RFC 3068 46 | '192.88.99.0/24', 47 | # Used for local communications within a private network as 48 | # specified by RFC 1918 49 | '192.168.0.0/16', 50 | # Used for testing of inter-network communications between two 51 | # separate subnets as specified in RFC 2544 52 | '198.18.0.0/15', 53 | # Assigned as "TEST-NET-2" in RFC 5737 for use solely in 54 | # documentation and example source code and should not be used 55 | # publicly 56 | '198.51.100.0/24', 57 | # Assigned as "TEST-NET-3" in RFC 5737 for use solely in 58 | # documentation and example source code and should not be used 59 | # publicly 60 | '203.0.113.0/24', 61 | # Reserved for multicast assignments as specified in RFC 5771 62 | '224.0.0.0/4', 63 | # Reserved for future use, as specified by RFC 6890 64 | '240.0.0.0/4', 65 | # Reserved for the "limited broadcast" destination address, as 66 | # specified by RFC 6890 67 | '255.255.255.255/32', 68 | 69 | '::/128', # Unspecified address 70 | '::1/128', # loopback address to the local host. 71 | '::ffff:0:0/96', # IPv4 mapped addresses 72 | '100::/64', # Discard Prefix RFC 6666 73 | '64:ff9b::/96', # IPv4/IPv6 translation (RFC 6052) 74 | '2001::/32', # Teredo tunneling 75 | # Overlay Routable Cryptographic Hash Identifiers (ORCHID) 76 | '2001:10::/28', 77 | '2001:db8::/32', # Addresses used in documentation 78 | '2002::/16', # 6to4 79 | 'fc00::/7', # Unique local address 80 | 'fe80::/10', # Link-local address 81 | 'ff00::/8', # Multicast 82 | ] 83 | 84 | self.reservednets = [] 85 | for net in nets: 86 | self.reservednets.append(IPy.IP(net)) 87 | self.domains = [] # list for known domains 88 | 89 | def inReservedSpace(self, ipaddress): 90 | for net in self.reservednets: 91 | if ipaddress in net: 92 | return True 93 | return False 94 | 95 | def decode_q(self, dns): 96 | queried = "" 97 | if dns.qd[0].type == dpkt.dns.DNS_A: 98 | queried = queried + "A? %s" % (dns.qd[0].name) 99 | if dns.qd[0].type == dpkt.dns.DNS_AAAA: 100 | queried = queried + "AAAA? %s" % (dns.qd[0].name) 101 | return queried 102 | 103 | def DNSHandler(self, conn, request, response, **kwargs): 104 | anstext = '' 105 | queried = '' 106 | id = None 107 | for dns in request, response: 108 | if dns is None: 109 | continue 110 | id = dns.id 111 | # DNS Question, update connection info with query 112 | if dns.qr == dpkt.dns.DNS_Q: 113 | conn.info(query=self.decode_q(dns)) 114 | 115 | # DNS Answer with data and no errors 116 | elif (dns.qr == dpkt.dns.DNS_A and dns.rcode == dpkt.dns.DNS_RCODE_NOERR and len(dns.an) > 0): 117 | 118 | queried = self.decode_q(dns) 119 | 120 | answers = [] 121 | for an in dns.an: 122 | if an.type == dpkt.dns.DNS_A: 123 | try: 124 | if self.inReservedSpace(socket.inet_ntoa(an.ip)): 125 | answers.append( 126 | 'A: ' + socket.inet_ntoa(an.ip) + ' (ttl ' + str(an.ttl) + 's)') 127 | except: 128 | continue 129 | elif an.type == dpkt.dns.DNS_AAAA: 130 | try: 131 | if self.inReservedSpace(socket.inet_ntop(socket.AF_INET6, an.ip6)): 132 | answers.append( 133 | 'AAAA: ' + socket.inet_ntop(socket.AF_INET6, an.ip6) + ' (ttl ' + str(an.ttl) + 's)') 134 | except: 135 | continue 136 | else: 137 | # un-handled type 138 | continue 139 | if queried != '': 140 | anstext = ", ".join(answers) 141 | 142 | if anstext: # did we get an answer? 143 | self.alert( 144 | str(id) + ' ' + queried + ' / ' + anstext, **conn.info(response=anstext)) 145 | 146 | 147 | if __name__ == '__main__': 148 | dObj = DshellDecoder() 149 | print dObj 150 | else: 151 | dObj = DshellDecoder() 152 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/filter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/filter/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/filter/asn-filter.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | import netflowout 4 | 5 | 6 | class DshellDecoder(dshell.TCPDecoder): 7 | 8 | def __init__(self, **kwargs): 9 | self.sessions = {} 10 | self.alerts = False 11 | self.file = None 12 | dshell.TCPDecoder.__init__(self, 13 | name='asn-filter', 14 | description='filter connections on autonomous system number (ASN)', 15 | longdescription=""" 16 | This decoder filters connections by autonomous system numbers/names (ASN). 17 | 18 | Chainable decoder used to filter TCP/UDP streams by ASNs. If no 19 | downstream (+) decoder is used the netflow data will be printed to 20 | the screen (when using --asn-filter_alerts). If used without specifying 21 | a asn string, the asn-filter will filter nothing out and pass 22 | everything onto the next decoder or print it. 23 | 24 | Examples: 25 | 26 | decode -d asn-filter --asn-filter_asn AS8075 --asn-filter_alerts 27 | 28 | This will print the connection info for all connections where 29 | AS8075 is the ASN for either the server of client. 30 | 31 | decode -d asn-filter --asn-filter_asn Google --asn-filter_alerts 32 | 33 | This will print the connection info for all connections where 34 | "Google" appeared in the ASN information. 35 | 36 | decode -d asn-filter+followstream --asn-filter_asn AS8075 37 | 38 | This will filter the streams by ASN and feed them into the 39 | followstream decoder. 40 | """, 41 | filter="ip or ip6", 42 | author='twp/nl', 43 | optiondict={ 44 | 'asn': {'type': 'string', 'help': 'asn for client or server'}, 45 | 'alerts': {'action': 'store_true'}}) 46 | '''instantiate an decoder that will call back to us once the IP decoding is done''' 47 | self.__decoder = dshell.IPDecoder() 48 | self.out = netflowout.NetflowOutput() 49 | self.chainable = True 50 | 51 | def decode(self, *args): 52 | if len(args) is 3: 53 | pktlen, pktdata, ts = args # orig_len,packet,ts format (pylibpcap) 54 | else: # ts,pktdata (pypcap) 55 | ts, pktdata = args 56 | pktlen = len(pktdata) 57 | '''do normal decoder stack to track session ''' 58 | dshell.TCPDecoder.decode(self, pktlen, pktdata, ts) 59 | '''our hook to decode the ip/ip6 addrs, then dump the addrs and raw packet to our callback''' 60 | self.__decoder.IPHandler = self.__callback # set private decoder to our callback 61 | self.__decoder.decode(pktlen, pktdata, ts, raw=pktdata) 62 | 63 | def __callback(self, addr, pkt, ts, raw=None, **kw): 64 | '''substitute IPhandler for forwarding packets to subdecoders''' 65 | if addr in self.sessions or (addr[1], addr[0]) in self.sessions: # if we are not passing this session, drop the packet 66 | if self.subDecoder: 67 | # make it look like a capture 68 | self.subDecoder.decode(len(raw), str(raw), ts) 69 | else: 70 | self.dump(raw, ts) 71 | 72 | def connectionInitHandler(self, conn): 73 | '''see if we have an ASN match and if so, flag this session for forwarding or dumping''' 74 | m = self.__asnTest(conn) 75 | if m: 76 | self.sessions[conn.addr] = m 77 | 78 | def __asnTest(self, conn): 79 | # If no ASN specified, pass all traffic through 80 | if not self.asn: 81 | return True 82 | # check criteria 83 | if self.asn.lower() in conn.clientasn.lower(): 84 | return u'client {0}'.format(conn.clientasn) 85 | if self.asn.lower() in conn.serverasn.lower(): 86 | return u'server {0}'.format(conn.serverasn) 87 | # no match 88 | return None 89 | 90 | def connectionHandler(self, conn): 91 | if conn.addr in self.sessions and self.alerts: 92 | self.alert(self.sessions[conn.addr], **conn.info()) 93 | 94 | def connectionCloseHandler(self, conn): 95 | if conn.addr in self.sessions: 96 | del self.sessions[conn.addr] 97 | 98 | dObj = DshellDecoder() 99 | if __name__ == "__main__": 100 | print dObj 101 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/filter/country.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: tparker 3 | ''' 4 | 5 | import dshell 6 | import util 7 | import netflowout 8 | 9 | 10 | class DshellDecoder(dshell.TCPDecoder): 11 | 12 | '''activity tracker ''' 13 | 14 | def __init__(self, **kwargs): 15 | ''' 16 | Constructor 17 | ''' 18 | self.sessions = {} 19 | self.alerts = False 20 | self.file = None 21 | dshell.TCPDecoder.__init__(self, 22 | name='country', 23 | description='filter connections on geolocation (country code)', 24 | longdescription=""" 25 | country: filter connections on geolocation (country code) 26 | 27 | Chainable decoder to filter TCP/UDP streams on geolocation data. If no 28 | downstream (+) decoders are specified, netflow data will be printed to 29 | the screen. 30 | 31 | Mandatory option: 32 | 33 | --country_code: specify (2 character) country code to filter on 34 | 35 | Default behavior: 36 | 37 | If either the client or server IP address matches the specified country, 38 | the stream will be included. 39 | 40 | Modifier options: 41 | 42 | --country_neither: Include only streams where neither the client nor the 43 | server IP address matches the specified country. 44 | 45 | --country_both: Include only streams where both the client AND the server 46 | IP addresses match the specified country. 47 | 48 | --country_notboth: Include streams where the specified country is NOT BOTH 49 | the client and server IP. Streams where it is one or 50 | the other may be included. 51 | 52 | 53 | Example: 54 | 55 | decode -d country traffic.pcap -W USonly.pcap --country_code US 56 | decode -d country+followstream traffic.pcap --country_code US --country_notboth 57 | """, 58 | filter="ip or ip6", 59 | author='twp', 60 | optiondict={ 61 | 'code': {'type': 'string', 'help': 'two-char country code'}, 62 | 'neither': {'action': 'store_true', 'help': 'neither (client/server) is in specified country'}, 63 | 'both': {'action': 'store_true', 'help': 'both (client/server) ARE in specified country'}, 64 | 'notboth': {'action': 'store_true', 'help': 'specified country is not both client and server'}, 65 | 'alerts': {'action': 'store_true'}}) 66 | '''instantiate an decoder that will call back to us once the IP decoding is done''' 67 | self.__decoder = dshell.IPDecoder() 68 | self.out = netflowout.NetflowOutput() 69 | self.chainable = True 70 | 71 | def decode(self, *args): 72 | if len(args) is 3: 73 | pktlen, pktdata, ts = args # orig_len,packet,ts format (pylibpcap) 74 | else: # ts,pktdata (pypcap) 75 | ts, pktdata = args 76 | pktlen = len(pktdata) 77 | '''do normal decoder stack to track session ''' 78 | dshell.TCPDecoder.decode(self, pktlen, pktdata, ts) 79 | '''our hook to decode the ip/ip6 addrs, then dump the addrs and raw packet to our callback''' 80 | self.__decoder.IPHandler = self.__callback # set private decoder to our callback 81 | self.__decoder.decode(pktlen, pktdata, ts, raw=pktdata) 82 | 83 | def __callback(self, addr, pkt, ts, raw=None, **kw): 84 | '''substitute IPhandler for forwarding packets to subdecoders''' 85 | if addr in self.sessions or (addr[1], addr[0]) in self.sessions: # if we are not passing this session, drop the packet 86 | if self.subDecoder: 87 | # make it look like a capture 88 | self.subDecoder.decode(len(raw), str(raw), ts) 89 | else: 90 | self.dump(raw, ts) 91 | 92 | def connectionInitHandler(self, conn): 93 | '''see if we have a country match and if so, flag this session for forwarding or dumping''' 94 | m = self.__countryTest(conn) 95 | if m: 96 | self.sessions[conn.addr] = m 97 | 98 | def __countryTest(self, conn): 99 | # If no country code specified, pass all traffic through 100 | if self.code == None or not len(self.code): 101 | return True 102 | # check criteria 103 | if self.neither and conn.clientcountrycode != self.code and conn.servercountrycode != self.code: 104 | return 'neither ' + self.code 105 | if self.both and conn.clientcountrycode == self.code and conn.servercountrycode == self.code: 106 | return 'both ' + self.code 107 | if self.notboth and (conn.clientcountrycode != self.code or conn.servercountrycode != self.code): 108 | return 'not both ' + self.code 109 | if conn.clientcountrycode == self.code: 110 | return 'client ' + self.code 111 | if conn.servercountrycode == self.code: 112 | return 'server ' + self.code 113 | # no match 114 | return None 115 | 116 | def connectionHandler(self, conn): 117 | if conn.addr in self.sessions and self.alerts: 118 | self.alert(self.sessions[conn.addr], **conn.info()) 119 | 120 | def connectionCloseHandler(self, conn): 121 | if conn.addr in self.sessions: 122 | del self.sessions[conn.addr] 123 | 124 | dObj = DshellDecoder() 125 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/filter/snort.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | 3 | 4 | class DshellDecoder(dshell.IPDecoder): 5 | 6 | def __init__(self): 7 | dshell.IPDecoder.__init__(self, 8 | name='snort', 9 | description='filter packets by snort rule', 10 | longdescription="""Chainable decoder to filter TCP/UDP streams by snort rule 11 | rule is parsed by dshell, a limited number of options are supported: 12 | currently supported rule options: 13 | content 14 | nocase 15 | depth 16 | offset 17 | within 18 | distance 19 | 20 | Mandatory option: 21 | 22 | --snort_rule: snort rule to filter by 23 | 24 | or 25 | 26 | -snort_conf: snort.conf formatted file to read for multiple rules 27 | 28 | Modifier options: 29 | 30 | --snort_all: Pass only if all rules pass 31 | --snort_none: Pass only if no rules pass 32 | --snort_alert: Alert if rule matches? 33 | 34 | Example: 35 | decode -d snort+followstream traffic.pcap --snort_rule 'alert tcp any any -> any any (content:"....."; nocase; depth .... )' 36 | 37 | """, 38 | filter='ip or ip6', 39 | author='twp', 40 | optiondict={'rule': {'type': 'string', 'help': 'snort rule to filter packets'}, 41 | 'conf': {'type': 'string', 'help': 'snort.conf file to read'}, 42 | 'alerts': {'action': 'store_true', 'help': 'alert if rule matched'}, 43 | 'none': {'action': 'store_true', 'help': 'pass if NO rules matched'}, 44 | 'all': {'action': 'store_true', 'help': 'all rules must match to pass'} 45 | } 46 | ) 47 | self.chainable = True 48 | 49 | def preModule(self): 50 | rules = [] 51 | if self.conf: 52 | fh = file(self.conf) 53 | rules = [r for r in (r.strip() for r in fh.readlines()) if len(r)] 54 | fh.close() 55 | else: 56 | if not self.rule or not len(self.rule): 57 | self.warn("No rule specified (--%s_rule)" % self.name) 58 | else: 59 | rules = [self.rule] 60 | self.rules = [] 61 | for r in rules: 62 | try: 63 | self.rules.append((self.parseSnortRule(r))) 64 | except Exception, e: 65 | self.error('bad snort rule "%s": %s' % (r, e)) 66 | if self._DEBUG: 67 | self._exc(e) 68 | if self.subDecoder: 69 | # we filter individual packets so session-based subdecoders will 70 | # need this set 71 | self.subDecoder.ignore_handshake = True 72 | dshell.IPDecoder.preModule(self) 73 | 74 | def rawHandler(self, pktlen, pkt, ts, **kwargs): 75 | kwargs['raw'] = pkt # put the raw frame in the kwargs 76 | # continue decoding 77 | return dshell.IPDecoder.rawHandler(self, pktlen, pkt, ts, **kwargs) 78 | 79 | def IPHandler(self, addr, pkt, ts, **kwargs): 80 | '''check packets using filterfn here''' 81 | raw = str( 82 | kwargs['raw']) # get the raw frame for forwarding if we match 83 | p = dshell.Packet(self, addr, pkt=str(pkt), ts=ts, **kwargs) 84 | a = [] 85 | match = None 86 | for r, msg in self.rules: 87 | if r(p): # if this rule matched 88 | match = True 89 | if msg: 90 | a.append(msg) # append rule message to alerts 91 | if self.none or not self.all: 92 | break # unless matching all, one match does it 93 | else: # last rule did not match 94 | match = False 95 | if self.all: 96 | break # stop once no match if all 97 | 98 | # all rules processed, match = state of last rule match 99 | if (match is not None) and ((match and not self.none) or (self.none and not match)): 100 | self.decodedbytes += len(str(pkt)) 101 | self.count += 1 102 | if self.alerts: 103 | self.alert(*a, **p.info()) 104 | if self.subDecoder: 105 | # decode or dump packet 106 | self.subDecoder.decode(len(raw), raw, ts) 107 | else: 108 | self.dump(len(raw), raw, ts) 109 | 110 | def parseSnortRule(self, ruletext): 111 | '''returns a lambda function that can be used to filter traffic and the alert message 112 | this function will expect a Packet() object and return True or False''' 113 | KEYWORDS = ( 114 | 'msg', 'content') # rule start, signal when we process all seen keywords 115 | msg = '' 116 | f = [] 117 | rule = ruletext.split(' ', 7) 118 | (a, proto, sip, sp, arrow, dip, dp) = rule[:7] 119 | if len(rule) > 7: 120 | rule = rule[7] 121 | else: 122 | rule = None 123 | if a != 'alert': 124 | raise Exception('Must be alert rule') 125 | f.append('p.proto == "' + proto.upper() + '"') 126 | if sip != 'any': 127 | f.append('p.sip == "' + sip + '"') 128 | if dip != 'any': 129 | f.append('p.dip == "' + dip + '"') 130 | if sp != 'any': 131 | f.append('p.sport == ' + sp) 132 | if dp != 'any': 133 | f.append('p.dport == ' + dp) 134 | f = ['(' + (' and '.join(f)) + ')'] # create header condition 135 | if rule: 136 | # split between () and split on ; 137 | rule = rule.strip('()').split(';') 138 | last = None # no last match 139 | while rule: 140 | try: 141 | k, v = rule.pop(0).strip().split(':', 1) 142 | except: 143 | continue 144 | if k.lower() == 'content': # reset content match 145 | content = v.strip().strip('"') 146 | # hex bytes? 147 | if content.startswith('|') and content.endswith('|'): 148 | content = ''.join( 149 | '\\x' + c for c in content.strip('|').split()) 150 | nocase = depth = offset = distance = within = None 151 | while rule: 152 | r = rule[0].strip() 153 | if ':' in r: 154 | k, v = r.split(':', 1) 155 | else: 156 | k, v = r, None 157 | k = k.lower() 158 | if k in KEYWORDS: 159 | break # next rule part 160 | elif k == 'nocase': 161 | nocase = True 162 | elif k == 'depth': 163 | depth = int(v) 164 | elif k == 'offset': 165 | offset = int(v) 166 | elif k == 'distance': 167 | distance = int(v) 168 | elif k == 'within': 169 | within = int(v) 170 | rule.pop(0) # remove this keyword:valuea 171 | # add coerce to lower if nocase? 172 | if nocase: 173 | nocase = '.lower()' 174 | else: 175 | nocase = '' 176 | # start,end offsets of find(), maybe number or result of 177 | # another find() 178 | st, end = offset, depth 179 | # if we have a last content match, use the distance/within kws 180 | if last: 181 | # within means this match has to be within X from 182 | # previous+distance, so use previous as offset and within 183 | # as depth 184 | if within: 185 | # set to last match and X from last match 186 | st, end = last, last + '+' + str(within) 187 | # distance means the next match must be AT LEAST X from the 188 | # last 189 | if distance: 190 | # set start to last match+distance 191 | st = last + '+' + str(distance) 192 | # else use the offset/depth values as given 193 | last = 'p.pkt' + nocase + \ 194 | '.find(' + "'" + content + "'" + nocase + ',' + \ 195 | str(st) + ',' + str(end) + ') != -1' 196 | if k.lower() == 'msg': 197 | msg = v.strip().strip('"') # get alert message 198 | if last: 199 | f.append('(' + last + ')') 200 | f = ' and '.join(f) 201 | self.debug('%s\t%s\t"%s"' % (ruletext, f, msg)) 202 | return eval('lambda(p): ' + f), msg # return fn and msg 203 | 204 | 205 | if __name__ == '__main__': 206 | dObj = DshellDecoder() 207 | print dObj 208 | else: 209 | dObj = DshellDecoder() 210 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/filter/track.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: tparker 3 | ''' 4 | 5 | import dshell 6 | import util 7 | 8 | 9 | class DshellDecoder(dshell.TCPDecoder): 10 | 11 | '''activity tracker ''' 12 | 13 | def __init__(self, **kwargs): 14 | ''' 15 | Constructor 16 | ''' 17 | self.sources = [] 18 | self.targets = [] 19 | self.sessions = {} 20 | self.alerts = False 21 | self.file = None 22 | dshell.TCPDecoder.__init__(self, 23 | name='track', 24 | description='tracked activity recorder', 25 | longdescription='''captures all traffic to/from target while a specific connection to the target is up 26 | specify target(s) ip and/or port as --track_target=ip:port,ip... 27 | --track_source=ip,ip.. can be used to limit to specified sources 28 | --track_alerts will turn on alerts for session start/end''', 29 | filter="ip", 30 | author='twp', 31 | optiondict={'target': {'action': 'append'}, 32 | 'source': {'action': 'append'}, 33 | 'alerts': {'action': 'store_true'}}) 34 | self.chainable = True 35 | 36 | '''instantiate an IPDecoder and replace the IPHandler 37 | to decode the ip/ip6 addr and then pass the packet 38 | to _IPHandler, which will write the packet if in addr is in session''' 39 | self.__decoder = dshell.IPDecoder() 40 | 41 | def preModule(self): 42 | '''parse the source and target lists''' 43 | if self.target: 44 | for tstr in self.target: 45 | targets = util.strtok(tstr, as_list=True)[0] 46 | for t in targets: 47 | try: 48 | parts = t.split(':') 49 | if len(parts) == 2: 50 | ip, port = parts # IP:port 51 | else: 52 | ip, port = t, None # IPv6 addr 53 | except: 54 | ip, port = t, None # IP 55 | if ip == '': 56 | ip = None # :port 57 | self.targets.append((ip, port)) 58 | if self.source: 59 | for sstr in self.source: 60 | sources = util.strtok(sstr, as_list=True)[0] 61 | for ip in sources: 62 | self.sources.append(ip) 63 | dshell.TCPDecoder.preModule(self) 64 | 65 | def decode(self, *args): 66 | if len(args) is 3: 67 | pktlen, pktdata, ts = args # orig_len,packet,ts format (pylibpcap) 68 | else: # ts,pktdata (pypcap) 69 | ts, pktdata = args 70 | pktlen = len(pktdata) 71 | '''do normal decoder stack to track session ''' 72 | dshell.TCPDecoder.decode(self, pktlen, pktdata, ts) 73 | '''our hook to decode the ip/ip6 addrs, then dump the addrs and raw packet 74 | to our session check routine''' 75 | self.__decoder.IPHandler = self.__callback # set private decoder to our callback 76 | self.__decoder.decode(pktlen, pktdata, ts, raw=pktdata) 77 | 78 | def __callback(self, addr, pkt, ts, raw=None, **kw): 79 | '''check to see if this packet is to/from an IP in a session, 80 | if so write it. the packet will be passed in the 'raw' kwarg''' 81 | if addr[0][0] in self.sessions: 82 | ip = addr[0][0] # source ip 83 | elif addr[1][0] in self.sessions: 84 | ip = addr[1][0] # dest ip 85 | else: 86 | return # not tracked 87 | for s in self.sessions[ip].values(): 88 | s.sessionpackets += 1 89 | s.sessionbytes += len(raw) # actual captured data len 90 | # dump the packet or sub-decode it 91 | if self.subDecoder: 92 | # make it look like a capture 93 | self.subDecoder.decode(len(raw), str(raw), ts) 94 | else: 95 | self.dump(raw, ts) 96 | 97 | def connectionInitHandler(self, conn): 98 | '''see if dest ip and/or port is in target list and (if a source list) 99 | source ip is in source list 100 | if so, put the connection in the tracked-session list by dest ip 101 | if a new connection to the target comes in from an allowed source, 102 | the existing connection will still be tracked''' 103 | ((sip, sport), (dip, dport)) = conn.addr 104 | sport, dport = str(sport), str(dport) 105 | if ((dip, dport) in self.targets) or ((dip, None) in self.targets) or ((None, dport) in self.targets): 106 | if not self.sources or (sip in self.sources): 107 | s = self.sessions.setdefault(dip, {}) 108 | s[conn.addr] = conn 109 | if self.alerts: 110 | self.alert('session started', **conn.info()) 111 | conn.info(sessionpackets=0, sessionbytes=0) 112 | 113 | def connectionHandler(self, conn): 114 | '''if a connection to a tracked-session host, alert and write if no subdecoder''' 115 | if self.alerts: 116 | if conn.serverip in self.sessions: 117 | self.alert('inbound', **conn.info()) 118 | if conn.clientip in self.sessions: 119 | self.alert('outbound', **conn.info()) 120 | if conn.serverip in self.sessions or conn.clientip in self.sessions: 121 | if not self.subDecoder: 122 | self.write(conn) 123 | 124 | def connectionCloseHandler(self, conn): 125 | '''close the tracked session if the initiating connection is closing 126 | make sure the conn in the session list matches, 127 | as we may have had more incoming connections to the same ip during the session''' 128 | if conn.serverip in self.sessions and conn.addr in self.sessions[conn.serverip]: 129 | if self.alerts: 130 | self.alert('session ended', **conn.info()) 131 | del self.sessions[conn.serverip][conn.addr] 132 | if not self.sessions[conn.serverip]: 133 | del self.sessions[conn.serverip] 134 | 135 | dObj = DshellDecoder() 136 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/flows/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/flows/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/flows/large-flows.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import netflowout 3 | 4 | 5 | class DshellDecoder(dshell.TCPDecoder): 6 | 7 | def __init__(self): 8 | dshell.TCPDecoder.__init__(self, 9 | name='large-flows', 10 | description='display netflows that have at least 1MB transferred', 11 | filter='tcp', 12 | author='bg', 13 | optiondict={'size': { 14 | 'type': 'float', 'default': 1, 'help': 'number of megabytes transferred'}} 15 | ) 16 | self.out = netflowout.NetflowOutput() 17 | self.min = 1048576 # 1MB 18 | 19 | def preModule(self): 20 | if self.size <= 0: 21 | self.warn( 22 | "Cannot have a size that's less than or equal to zero. (size: %s)" % (self.size)) 23 | self.size = 1 24 | self.min = 1048576 * self.size 25 | self.debug("Input: %s, Final size: %s bytes" % (self.size, self.min)) 26 | 27 | def connectionHandler(self, conn): 28 | if (conn.clientbytes + conn.serverbytes) >= self.min: 29 | self.alert(**conn.info()) 30 | 31 | 32 | if __name__ == '__main__': 33 | dObj = DshellDecoder() 34 | print dObj 35 | else: 36 | dObj = DshellDecoder() 37 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/flows/long-flows.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import netflowout 3 | 4 | 5 | class DshellDecoder(dshell.TCPDecoder): 6 | 7 | def __init__(self): 8 | self.len = 5 9 | dshell.TCPDecoder.__init__(self, 10 | name='long-flows', 11 | description='display netflows that have a duration of at least 5mins', 12 | filter='(tcp or udp)', 13 | author='bg', 14 | optiondict={ 15 | 'len': {'type': 'int', 'default': 5, 'help': 'set minimum connection time to alert on, in minutes [default: 5 mins]'}, 16 | } 17 | ) 18 | self.out = netflowout.NetflowOutput() 19 | 20 | def connectionHandler(self, conn): 21 | if (conn.endtime - conn.starttime) >= (60 * self.len): 22 | self.alert(**conn.info()) 23 | 24 | 25 | if __name__ == '__main__': 26 | dObj = DshellDecoder() 27 | print dObj 28 | else: 29 | dObj = DshellDecoder() 30 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/flows/netflow.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import netflowout 3 | 4 | 5 | class DshellDecoder(dshell.TCPDecoder): 6 | 7 | def __init__(self): 8 | dshell.TCPDecoder.__init__(self, 9 | name='netflow', 10 | description='generate netflow information from pcap', 11 | longdescription='generate netflow information from pcap', 12 | filter='(tcp or udp)', 13 | author='bg', 14 | # grouping for output module 15 | optiondict={'group': dict()} 16 | ) 17 | self.out = netflowout.NetflowOutput() 18 | 19 | def preModule(self): 20 | # pass grouping to output module 21 | if self.group: 22 | self.out.group = self.group.split(',') 23 | dshell.TCPDecoder.preModule(self) 24 | 25 | def connectionHandler(self, conn): 26 | self.alert(**conn.info()) 27 | 28 | def postModule(self): 29 | self.out.close() # write flow groups if grouping 30 | dshell.TCPDecoder.postModule(self) 31 | 32 | 33 | if __name__ == '__main__': 34 | dObj = DshellDecoder() 35 | print dObj 36 | else: 37 | dObj = DshellDecoder() 38 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/ftp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/ftp/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/http/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/http/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/http/httpdump.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | import hashlib 4 | import urllib 5 | import re 6 | 7 | from httpdecoder import HTTPDecoder 8 | 9 | 10 | class DshellDecoder(HTTPDecoder): 11 | 12 | def __init__(self): 13 | HTTPDecoder.__init__(self, 14 | name='httpdump', 15 | description='Dump useful information about HTTP sessions', 16 | filter='tcp and (port 80 or port 8080 or port 8000)', 17 | filterfn=lambda ((sip, sp), (dip, dp)): sp in ( 18 | 80, 8000, 8080) or dp in (80, 8000, 8080), 19 | author='amm', 20 | optiondict={ 21 | 'maxurilen': {'type': 'int', 'default': 30, 'help': 'Truncate URLs longer than max len. Set to 0 for no truncating. (default: 30)'}, 22 | 'maxpost': {'type': 'int', 'default': 1000, 'help': 'Truncate POST body longer than max chars. Set to 0 for no truncating. (default: 1000)'}, 23 | 'showcontent': {'action': 'store_true', 'help': 'Display response BODY.'}, 24 | 'showhtml': {'action': 'store_true', 'help': 'Display response BODY only if HTML.'}, 25 | 'urlfilter': {'type': 'string', 'default': None, 'help': 'Filter to URLs matching this regex'}, 26 | }, 27 | ) 28 | self.output = 'colorout' 29 | # Disable auto-gunzip as we want to indicate content that was 30 | # compressed in the output 31 | self.gunzip = False 32 | 33 | def HTTPHandler(self, conn, request, response, requesttime, responsetime): 34 | host = '' 35 | loc = '' 36 | uri = '' 37 | lastmodified = '' 38 | 39 | #request_time, request, response = self.httpDict[conn.addr] 40 | 41 | # extract method,uri,host from response 42 | host = util.getHeader(request, 'host') 43 | if host == '': 44 | host = conn.serverip 45 | 46 | try: 47 | status = response.status 48 | except: 49 | status = '' 50 | try: 51 | reason = response.reason 52 | except: 53 | reason = '' 54 | 55 | if self.urlfilter: 56 | if not re.search(self.urlfilter, host + request.uri): 57 | return 58 | 59 | if '?' in request.uri: 60 | [uri_location, uri_data] = request.uri.split('?', 1) 61 | else: 62 | uri_location = request.uri 63 | uri_data = '' 64 | 65 | if self.maxurilen > 0 and len(uri_location) > self.maxurilen: 66 | uri_location = uri_location[:self.maxurilen] + '[truncated]' 67 | else: 68 | uri_location = uri_location 69 | 70 | if response == None: 71 | response_message = "%s (%s) %s%s" % ( 72 | request.method, 'NO RESPONSE', host, uri_location) 73 | else: 74 | response_message = "%s (%s) %s%s (%s)" % ( 75 | request.method, response.status, host, uri_location, util.getHeader(response, 'content-type')) 76 | urlParams = util.URLDataToParameterDict(uri_data) 77 | postParams = util.URLDataToParameterDict(request.body) 78 | 79 | clientCookies = self._parseCookies(util.getHeader(request, 'cookie')) 80 | serverCookies = self._parseCookies( 81 | util.getHeader(response, 'set-cookie')) 82 | 83 | self.alert(response_message, 84 | urlParams=urlParams, postParams=postParams, clientCookies=clientCookies, serverCookies=serverCookies, 85 | **conn.info() 86 | ) 87 | 88 | referer = util.getHeader(request, 'referer') 89 | if len(referer): 90 | self.out.write(' Referer: %s\n' % referer) 91 | 92 | if clientCookies: 93 | self.out.write(' Client Transmitted Cookies:\n', direction='cs') 94 | for key in clientCookies: 95 | self.out.write(' %s -> %s\n' % (util.printableUnicode(key), 96 | util.printableUnicode(clientCookies[key])), direction='cs') 97 | if serverCookies: 98 | self.out.write(' Server Set Cookies:\n', direction='sc') 99 | for key in serverCookies: 100 | self.out.write(' %s -> %s\n' % (util.printableUnicode(key), 101 | util.printableUnicode(serverCookies[key])), direction='sc') 102 | 103 | if urlParams: 104 | self.out.write(' URLParameters:\n', direction='cs') 105 | for key in urlParams: 106 | self.out.write(' %s -> %s\n' % (util.printableUnicode(key), 107 | util.printableUnicode(urlParams[key])), direction='cs') 108 | if postParams: 109 | self.out.write(' POSTParameters:\n', direction='cs') 110 | for key in postParams: 111 | self.out.write(' %s -> %s\n' % (util.printableUnicode(key), 112 | util.printableUnicode(postParams[key])), direction='cs') 113 | elif len(request.body): 114 | self.out.write(' POST Body:\n', direction='cs') 115 | if len(request.body) > self.maxpost and self.maxpost > 0: 116 | self.out.write('%s[truncated]\n' % util.printableUnicode( 117 | request.body[:self.maxpost]), direction='cs') 118 | else: 119 | self.out.write( 120 | util.printableUnicode(request.body) + u"\n", direction='cs') 121 | 122 | if self.showcontent or self.showhtml: 123 | 124 | if self.showhtml and 'html' not in util.getHeader(response, 'content-type'): 125 | return 126 | 127 | if 'gzip' in util.getHeader(response, 'content-encoding'): 128 | content = self.decompressGzipContent(response.body) 129 | if content == None: 130 | content = '(gunzip failed)\n' + response.body 131 | else: 132 | content = '(gzip encoded)\n' + content 133 | else: 134 | content = response.body 135 | 136 | self.out.write("Body Content:\n", direction='sc') 137 | self.out.write( 138 | util.printableUnicode(content) + u"\n", direction='sc') 139 | 140 | def _parseCookies(self, data): 141 | p, kwp = util.strtok(data, sep='; ') 142 | return dict((urllib.unquote(k), urllib.unquote(kwp[k]))for k in kwp.keys()) 143 | 144 | 145 | if __name__ == '__main__': 146 | dObj = DshellDecoder() 147 | print dObj 148 | else: 149 | dObj = DshellDecoder() 150 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/http/ms15-034.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | from httpdecoder import HTTPDecoder 4 | 5 | class DshellDecoder(HTTPDecoder): 6 | ''' 7 | 15 April 2015 8 | 9 | Proof-of-concept code to detect attempts to enumerate MS15-034 vulnerable 10 | IIS servers and/or cause a denial of service. Each event will generate an 11 | alert that prints out the HTTP Request method and the range value contained 12 | with the HTTP stream. 13 | 14 | Usage: 15 | decode -d ms15-034 -q *.pcap 16 | decode -d ms15-034 -i -q 17 | 18 | References: 19 | https://technet.microsoft.com/library/security/ms15-034 20 | https://ma.ttias.be/remote-code-execution-via-http-request-in-iis-on-windows/ 21 | ''' 22 | def __init__(self): 23 | HTTPDecoder.__init__(self, 24 | name='ms15-034', 25 | description='detect attempts to enumerate MS15-034 vulnerable IIS servers', 26 | longdescription=''' 27 | Proof-of-concept code to detect attempts to enumerate MS15-034 vulnerable 28 | IIS servers and/or cause a denial of service. Each event will generate an 29 | alert that prints out the HTTP Request method and the range value contained 30 | with the HTTP stream. 31 | 32 | Usage: 33 | decode -d ms15-034 -q *.pcap 34 | decode -d ms15-034 -i -q 35 | ''', 36 | filter='tcp and (port 80 or port 8080 or port 8000)', 37 | filterfn=lambda ((sip, sp), (dip, dp)): sp in ( 38 | 80, 8000, 8080) or dp in (80, 8000, 8080), 39 | author='bg', 40 | ) 41 | 42 | def HTTPHandler(self, conn, request, response, requesttime, responsetime): 43 | if response == None: # Denial of Service (no server response) 44 | try: 45 | rangestr = util.getHeader(request,'range') 46 | # check range value to reduce false positive rate 47 | if not rangestr.endswith('18446744073709551615'): return 48 | except: return 49 | self.alert('MS15-034 DoS [Request Method: "%s" URI: "%s" Range: "%s"]' % \ 50 | (request.method, request.uri, rangestr), conn.info()) 51 | 52 | else: # probing for vulnerable server 53 | try: 54 | rangestr = util.getHeader(request,'range') 55 | # check range value to reduce false positive rate 56 | if not rangestr.endswith('18446744073709551615'): return 57 | except: return 58 | 59 | # indication of vulnerable server 60 | if rangestr and (response.status == '416' or \ 61 | response.reason == 'Requested Range Not Satisfiable'): 62 | 63 | self.alert('MS15-034 Vulnerable Server [Request Method: "%s" Range: "%s"]' % 64 | (request.method,rangestr), conn.info()) 65 | 66 | if request.method != 'GET': # this could be interesting 67 | pass # waiting on more details 68 | 69 | 70 | if __name__ == '__main__': 71 | dObj = DshellDecoder() 72 | print dObj 73 | else: 74 | dObj = DshellDecoder() 75 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/http/rip-http.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import re 3 | import datetime 4 | import sys 5 | import string 6 | 7 | # import any other modules here 8 | import re 9 | import os 10 | import hashlib 11 | import util 12 | 13 | # we extend this 14 | from httpdecoder import HTTPDecoder 15 | 16 | 17 | class DshellDecoder(HTTPDecoder): 18 | 19 | def __init__(self): 20 | HTTPDecoder.__init__(self, 21 | name='rip-http', 22 | description='rip files from HTTP traffic', 23 | filter='tcp and port 80', 24 | author='bg/twp', 25 | optiondict={'append_conn': {'action': 'store_true', 'help': 'append sourceip-destip to filename'}, 26 | 'append_ts': {'action': 'store_true', 'help': 'append timestamp to filename'}, 27 | 'direction': {'help': 'cs=only capture client POST, sc=only capture server GET response'}, 28 | 'outdir': {'help': 'directory to write output files (Default: current directory)', 'metavar': 'DIRECTORY', 'default': '.'}, 29 | 'content_filter': {'help': 'regex MIME type filter for files to save'}, 30 | 'name_filter': {'help': 'regex filename filter for files to save'}} 31 | ) 32 | 33 | def preModule(self): 34 | if self.content_filter: 35 | self.content_filter = re.compile(self.content_filter) 36 | if self.name_filter: 37 | self.name_filter = re.compile(self.name_filter) 38 | HTTPDecoder.preModule(self) 39 | 40 | self.openfiles = {} # dict of httpfile objects, indexed by url 41 | 42 | # Create output directory, if necessary 43 | if not os.path.exists(self.outdir): 44 | try: 45 | os.makedirs(self.outdir) 46 | except (IOError, OSError) as e: 47 | self.error("Could not create directory '%s': %s" % 48 | (self.outdir, e)) 49 | sys.exit(1) 50 | 51 | def splitstrip(self, data, sep, strip=' '): 52 | return [lpart.strip(strip) for lpart in data.split(sep)] 53 | 54 | def HTTPHandler(self, conn, request, response, requesttime, responsetime): 55 | payload = None 56 | self.debug('%s %s' % (repr(request), repr(response))) 57 | if (not self.direction or self.direction == 'cs') and request and request.method == 'POST' and request.body: 58 | payload = request 59 | elif (not self.direction or self.direction == 'sc') and response and response.status[0] == '2': 60 | payload = response 61 | if payload: 62 | if not (not self.content_filter or self.content_filter.search(payload.headers['content-type'])): 63 | payload = None 64 | if payload: 65 | # Calculate URL 66 | host = util.getHeader(request, 'host') 67 | if host == '': 68 | host = conn.serverip 69 | url = host + request.uri 70 | # File already open 71 | if url in self.openfiles: 72 | self.debug("Adding response section to %s" % url) 73 | (s, e) = self.openfiles[url].handleresponse(response) 74 | self.write(" --> Range: %d - %d\n" % (s, e)) 75 | # New file 76 | else: 77 | filename = request.uri.split('?')[0].split('/')[-1] 78 | if 'content-disposition' in payload.headers: 79 | cdparts = self.splitstrip(payload.headers['content-disposition'], ';') 80 | for cdpart in cdparts: 81 | try: 82 | k, v = self.splitstrip(cdpart, '=') 83 | if k == 'filename': 84 | filename = v 85 | except: 86 | pass 87 | self.debug("New file with URL: %s" % url) 88 | if not self.name_filter or self.name_filter.search(filename): 89 | if self.append_conn: 90 | filename += '_%s-%s' % (conn.serverip, 91 | conn.clientip) 92 | if self.append_ts: 93 | filename += '_%d' % (conn.ts) 94 | if not len(filename): 95 | filename = '%s-%s_index.html' % ( 96 | conn.serverip, conn.clientip) 97 | while os.path.exists(os.path.join(self.outdir, filename)): 98 | filename += '_' 99 | self.alert("New file: %s (%s)" % 100 | (filename, url), conn.info()) 101 | self.openfiles[url] = httpfile( 102 | os.path.join(self.outdir, filename), self) 103 | (s, e) = self.openfiles[url].handleresponse(payload) 104 | self.write(" --> Range: %d - %d\n" % (s, e)) 105 | if self.openfiles[url].done(): 106 | self.alert("File done: %s (%s)" % 107 | (self.openfiles[url].filename, url), conn.info()) 108 | del self.openfiles[url] 109 | 110 | 111 | class httpfile: 112 | 113 | def __init__(self, filename, decoder_instance): 114 | self.complete = False 115 | # Expected size in bytes of full file transfer 116 | self.size = 0 117 | # List of tuples indicating byte chunks already received and written to 118 | # disk 119 | self.ranges = [] 120 | self.decoder = decoder_instance 121 | self.filename = filename 122 | try: 123 | self.fh = open(filename, 'w') 124 | except IOError as e: 125 | self.decoder.error( 126 | "Could not create file '%s': %s" % (filename, e)) 127 | self.fh = None 128 | 129 | def __del__(self): 130 | if self.fh is None: 131 | return 132 | self.fh.close() 133 | if not self.done(): 134 | print "Incomplete file: %s" % self.filename 135 | try: 136 | os.rename(self.filename, self.filename + "_INCOMPLETE") 137 | except: 138 | pass 139 | ls = 0 140 | le = 0 141 | for s, e in self.ranges: 142 | if s > le + 1: 143 | print "Missing bytes between %d and %d" % (le, s) 144 | ls, le = s, e 145 | 146 | def handleresponse(self, response): 147 | # Check for Content Range 148 | range_start = 0 149 | range_end = len(response.body) - 1 150 | if 'content-range' in response.headers: 151 | m = re.search( 152 | 'bytes (\d+)-(\d+)/(\d+|\*)', response.headers['content-range']) 153 | if m: 154 | range_start = int(m.group(1)) 155 | range_end = int(m.group(2)) 156 | if len(response.body) < (range_end - range_start + 1): 157 | range_end = range_start + len(response.body) - 1 158 | try: 159 | if int(m.group(3)) > self.size: 160 | self.size = int(m.group(3)) 161 | except: 162 | pass 163 | elif 'content-length' in response.headers: 164 | try: 165 | if int(response.headers['content-length']) > self.size: 166 | self.size = int(response.headers['content-length']) 167 | except: 168 | pass 169 | # Update range tracking 170 | self.ranges.append((range_start, range_end)) 171 | # Write part of file 172 | if self.fh is not None: 173 | self.fh.seek(range_start) 174 | self.fh.write(response.body) 175 | return (range_start, range_end) 176 | 177 | def done(self): 178 | self.checkranges() 179 | return self.complete 180 | 181 | def checkranges(self): 182 | self.ranges.sort() 183 | current_start = 0 184 | current_end = 0 185 | foundgap = False 186 | # print self.ranges 187 | for s, e in self.ranges: 188 | if s <= current_end + 1: 189 | current_end = e 190 | else: 191 | foundgap = True 192 | current_start = s 193 | current_end = e 194 | if not foundgap: 195 | if (current_end + 1) >= self.size: 196 | self.complete = True 197 | return foundgap 198 | 199 | if __name__ == '__main__': 200 | dObj = DshellDecoder() 201 | print dObj 202 | else: 203 | dObj = DshellDecoder() 204 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/http/web.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import dfile 3 | import util 4 | import hashlib 5 | 6 | from httpdecoder import HTTPDecoder 7 | 8 | 9 | class DshellDecoder(HTTPDecoder): 10 | 11 | def __init__(self): 12 | HTTPDecoder.__init__(self, 13 | name='web', 14 | description='Improved version of web that tracks server response', 15 | filter='tcp and (port 80 or port 8080 or port 8000)', 16 | filterfn=lambda ((sip, sp), (dip, dp)): sp in ( 17 | 80, 8000, 8080) or dp in (80, 8000, 8080), 18 | author='bg,twp', 19 | optiondict={ 20 | 'maxurilen': {'type': 'int', 'default': 30, 'help': 'Truncate URLs longer than max len. Set to 0 for no truncating. (default: 30)'}, 21 | 'md5': {'action': 'store_true', 'help': 'calculate MD5 for each response. Available in CSV output.'} 22 | }, 23 | ) 24 | self.gunzip = False # Not interested in response body 25 | 26 | def HTTPHandler(self, conn, request, response, requesttime, responsetime): 27 | host = '' 28 | loc = '' 29 | lastmodified = '' 30 | 31 | #request_time, request, response = self.httpDict[conn.addr] 32 | 33 | # extract method,uri,host from response 34 | host = util.getHeader(request, 'host') 35 | if host == '': 36 | host = conn.serverip 37 | 38 | try: 39 | status = response.status 40 | except: 41 | status = '' 42 | try: 43 | reason = response.reason 44 | except: 45 | reason = '' 46 | 47 | loc = '' 48 | if status[:2] == '30': 49 | loc = util.getHeader(response, 'location') 50 | if len(loc): 51 | loc = '-> ' + loc 52 | 53 | lastmodified = util.HTTPlastmodified(response) 54 | referer = util.getHeader(request, 'referer') 55 | useragent = util.getHeader(request, 'user-agent') 56 | via = util.getHeader(request, 'via') 57 | 58 | try: 59 | responsesize = len(response.body.rstrip('\0')) 60 | except: 61 | responsesize = 0 62 | 63 | if self.md5: 64 | md5 = self._bodyMD5(response) 65 | else: 66 | md5 = '' 67 | 68 | # File objects 69 | try: 70 | if len(response.body) > 0: 71 | responsefile = dfile.dfile( 72 | name=request.uri, data=response.body) 73 | else: 74 | responsefile = '' 75 | except: 76 | responsefile = '' 77 | if request.method == 'POST' and len(request.body): 78 | ulcontenttype, ulfilename, uldata = self.POSTHandler(request.body) 79 | uploadfile = dfile.dfile(name=ulfilename, data=uldata) 80 | else: 81 | uploadfile = None 82 | 83 | requestInfo = '%s %s%s HTTP/%s' % (request.method, 84 | host, 85 | request.uri[:self.maxurilen] + '[truncated]' if self.maxurilen > 0 and len( 86 | request.uri) > self.maxurilen else request.uri, 87 | request.version) 88 | if response: 89 | responseInfo = '%s %s %s %s' % (status, reason, loc, lastmodified) 90 | else: 91 | responseInfo = '' 92 | 93 | self.alert("%-80s // %s" % (requestInfo, responseInfo), referer=referer, useragent=useragent, request=requestInfo, response=responseInfo, request_time=requesttime, response_time=responsetime, request_method=request.method, host=host, 94 | uri=request.uri, status=status, reason=reason, lastmodified=lastmodified, md5=md5, responsesize=responsesize, contenttype=util.getHeader(response, 'content-type'), responsefile=responsefile, uploadfile=uploadfile, via=via, **conn.info()) 95 | if self.out.sessionwriter: 96 | self.write(request.data, direction='cs') 97 | if response: 98 | self.write(response.body, direction='sc') 99 | 100 | # MD5sum(hex) of the body portion of the response 101 | def _bodyMD5(self, response): 102 | try: 103 | if len(response.body) > 0: 104 | return hashlib.md5(response.body.rstrip('\0')).hexdigest() 105 | else: 106 | return '' 107 | except: 108 | return '' 109 | 110 | def POSTHandler(self, postdata): 111 | next_line_is_data = False 112 | contenttype = '' 113 | filename = '' 114 | for l in postdata.split("\r\n"): 115 | if next_line_is_data: 116 | break 117 | if l == '': 118 | next_line_is_data = True # \r\n\r\n before data 119 | continue 120 | try: 121 | k, v = self.splitstrip(l, ':') 122 | if k == 'Content-Type': 123 | contenttype = v 124 | if k == 'Content-Disposition': 125 | cdparts = self.splitstrip(v, ';') 126 | for cdpart in cdparts: 127 | try: 128 | k, v = self.splitstrip(cdpart, '=', '"') 129 | if k == 'filename': 130 | filename = v 131 | except: 132 | pass 133 | except: 134 | pass 135 | return contenttype, filename, l 136 | 137 | def splitstrip(self, data, sep, strip=' '): 138 | return [lpart.strip(strip) for lpart in data.split(sep)] 139 | 140 | 141 | if __name__ == '__main__': 142 | dObj = DshellDecoder() 143 | print dObj 144 | else: 145 | dObj = DshellDecoder() 146 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/misc/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/followstream.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | import colorout 4 | #from impacket.ImpactDecoder import EthDecoder 5 | import datetime 6 | import sys 7 | import traceback 8 | import logging 9 | 10 | # import any other modules here 11 | import cgi 12 | 13 | 14 | class DshellDecoder(dshell.TCPDecoder): 15 | 16 | def __init__(self): 17 | dshell.TCPDecoder.__init__(self, 18 | name='followstream', 19 | description='Generates color-coded Screen/HTML output similar to Wireshark Follow Stream', 20 | longdescription=""" 21 | Generates color-coded Screen/HTML output similar to Wireshark Follow Stream. 22 | 23 | Output by default uses the "colorout" output class. This will send TTY 24 | color-formatted text to stdout (the screen) if available. If output 25 | is directed to a file (-o or --outfile), the output will be in HTML format. 26 | 27 | Note that the default bpf filter is to view all tcp traffic. The decoder 28 | can also process UDP traffic, or it can be limited to specific streams 29 | with --bpf/--ebpf. 30 | 31 | Useful options: 32 | 33 | --followstream_hex -- generates output in hex mode 34 | --followstream_time -- includes timestamp for each blob/transmission 35 | 36 | Example: 37 | 38 | decode -d followstream --ebpf 'port 80' mypcap.pcap --followstream_time 39 | decode -d followstream --ebpf 'port 80' mypcap.pcap -o file.html --followstream_time 40 | 41 | """, 42 | filter="tcp", 43 | author='amm', 44 | optiondict={ 45 | 'hex': {'action': 'store_true', 'help': 'two-column hex/ascii output'}, 46 | 'time': {'action': 'store_true', 'help': 'include timestamp for each blob'}, 47 | 'encoding': {'type': 'string', 'help': 'attempt to interpret text as encoded with specified schema'}, 48 | } 49 | ) 50 | self.out = colorout.ColorOutput() 51 | 52 | def __errorHandler(self, blob, expected, offset, caller): 53 | # Custom error handler that is called when data in a blob is missing or 54 | # overlapping 55 | if offset > expected: # data is missing 56 | self.data_missing_message += "[%d missing bytes]" % ( 57 | offset - expected) 58 | elif offset < expected: # data is overlapping 59 | self.data_missing_message += "[%d overlapping bytes]" % ( 60 | offset - expected) 61 | return True 62 | 63 | def preModule(self): 64 | self.connectionCount = 0 65 | # Reset the color mode, in case a file is specified 66 | self.out.setColorMode() 67 | # Used to indicate when data is missing or overlapping 68 | self.data_missing_message = '' 69 | # overwrite the output module's default error handler 70 | self.out.errorH = self.__errorHandler 71 | 72 | def connectionHandler(self, connection): 73 | 74 | try: 75 | 76 | # Skip Connections with no data transferred 77 | if connection.clientbytes + connection.serverbytes < 1: 78 | return 79 | 80 | # Update Connection Counter 81 | self.connectionCount += 1 82 | 83 | # Connection Header Information 84 | self.out.write("Connection %d (%s)\n" % ( 85 | self.connectionCount, str(connection.proto)), formatTag='H1') 86 | self.out.write("Start: %s UTC\n End: %s UTC\n" % (datetime.datetime.utcfromtimestamp( 87 | connection.starttime), datetime.datetime.utcfromtimestamp(connection.endtime)), formatTag='H2') 88 | self.out.write("%s:%s -> %s:%s (%d bytes)\n" % (connection.clientip, connection.clientport, 89 | connection.serverip, connection.serverport, connection.clientbytes), formatTag="H2", direction="cs") 90 | self.out.write("%s:%s -> %s:%s (%d bytes)\n\n" % (connection.serverip, connection.serverport, 91 | connection.clientip, connection.clientport, connection.serverbytes), formatTag="H2", direction="sc") 92 | 93 | self.out.write( 94 | connection, hex=self.hex, time=self.time, encoding=self.encoding) 95 | if self.data_missing_message: 96 | self.out.write( 97 | self.data_missing_message + "\n", level=logging.WARNING, time=self.time) 98 | self.data_missing_message = '' 99 | 100 | # Line break before next session 101 | self.out.write("\n\n") 102 | 103 | except KeyboardInterrupt: 104 | raise 105 | except: 106 | print 'Error in connectionHandler: ', sys.exc_info()[1] 107 | traceback.print_exc(file=sys.stdout) 108 | 109 | 110 | if __name__ == '__main__': 111 | dObj = DshellDecoder() 112 | print dObj 113 | else: 114 | dObj = DshellDecoder() 115 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/grep.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import datetime 3 | import sys 4 | 5 | # import any other modules here 6 | import re 7 | 8 | 9 | class grepDecoder(dshell.TCPDecoder): 10 | 11 | def __init__(self): 12 | dshell.TCPDecoder.__init__(self, 13 | name='grep', 14 | description='Search for patterns in streams.', 15 | longdescription=""" 16 | Grep is a utility decoder, useful on it's own or in combination with 17 | downstream (chained) decoders. Your search expression is specified with the 18 | --grep_expression option, and the default behavior is that the entire "line" 19 | of text surround each match will be printed, along with the standard 20 | connection information. However, granular match information is passed to the 21 | output decoder giving the user more control about the type of output they 22 | would like to see. Following is the named-variable convention passed to 23 | output: 24 | 25 | match: Full expression match 26 | m1: First sub-match 27 | m2: Second sub-match 28 | .. 29 | mn: N'th sub-match 30 | 31 | Examples: 32 | 33 | Snag User-Agent, display as CSV: 34 | 35 | decode -d grep --grep_ignorecase --grep_expression 'User-Agent: (.*?)$' --output csvout,m1 36 | 37 | The text following User-Agent will be the first sub-match and then 38 | printed as a named field in CSV output. 39 | 40 | Better yet: 41 | 42 | decode -d grep --grep_ignorecase --grep_expression 'User-Agent: (.*?)$' --oformat "%(m1)s" 43 | 44 | This uses the same expression but instead of the default output, 45 | specifies "m1" in a format string which makes it the ONLY value 46 | displayed. This is nice for piping into sort/uniq or other 47 | command-line filters. 48 | 49 | Iterative matching 50 | 51 | Rather than alerting on an entire line or just the first hit within that line, 52 | Python's regular expression module offers a function called "finditer" which 53 | scans across input text and provides an iterable object of ALL the matches. 54 | So with "--grep_iterate" we can use that. 55 | 56 | Examples: 57 | 58 | Simplistically grab all hyperlinks and dump to stdout: 59 | 60 | decode -d grep --grep_expression '' --grep_iterate --grep_ignorecase --oformat "%(m1)s" 61 | 62 | Chainable 63 | 64 | Grep is chainable. What does this mean? If data within a connection 65 | matches a grep expression, the entire connection is considered a "hit" and is 66 | then allowed to be processed by subDecoders. Non-hits are dropped. 67 | 68 | So this means you can search for an expression and view all matching 69 | connections in followstream, or process all as web traffic, etc. 70 | 71 | Examples: 72 | 73 | View all web traffic that originated from Windows 7 machines: 74 | 75 | decode -d grep+web --grep_ignorecase --grep_expression 'User-Agent: [^\\r\\n]*Windows 6.1' 76 | """, 77 | author='amm', 78 | filter='tcp', 79 | optiondict={ 80 | 'expression': {'type': 'string', 'help': 'Search expression'}, 81 | 'ignorecase': {'action': 'store_true', 'help': 'Case insensitive search.'}, 82 | 'singleline': {'action': 'store_true', 'help': 'Treat entire connection as single line of text.'}, 83 | 'iterate': {'action': 'store_true', 'help': 'Iterate hits on match string.'}, 84 | 'invert': {'action': 'store_true', 'help': 'For chained only: Invert hit results.'} 85 | } 86 | ) 87 | self.chainable = True 88 | 89 | def preModule(self): 90 | 91 | # 92 | # Does subdecoder have a blobHandler 93 | # 94 | if self.subDecoder and 'blobHandler' in dir(self.subDecoder): 95 | self.debug("subDecoder has blobHandler") 96 | self.subblobHandler = True 97 | # Indexed by connection, storage for all blobs being deferred 98 | self.deferredBlobs = {} 99 | else: 100 | self.subblobHandler = False 101 | 102 | # Pass/Drop dictionary of connections to use in chain mode 103 | self.connstate = {} 104 | 105 | # Must use singleLine mode when subDecoder is present 106 | if self.subDecoder: 107 | self.singleline = True 108 | 109 | # Re parameters 110 | self.reFlags = 0 111 | if self.ignorecase: 112 | self.reFlags = self.reFlags | re.IGNORECASE 113 | if self.singleline or self.iterate: 114 | self.reFlags = self.reFlags | re.S 115 | 116 | # Re Expression -> Object 117 | if self.expression == None or not len(self.expression): 118 | self.error( 119 | "Must specify expression using --%s_expression" % self.name) 120 | sys.exit(1) 121 | else: 122 | sys.stderr.write("Using expression: '%s'\n" % self.expression) 123 | self.reObj = re.compile(self.expression, self.reFlags) 124 | 125 | dshell.TCPDecoder.preModule(self) 126 | 127 | def errorH(self, **x): 128 | # custom errorHandler here 129 | pass 130 | 131 | def blobHandler(self, connection, blob): 132 | # Defer all Blob processing until the connection is handled, so we can 133 | # grep the entire connection stream 134 | if self.subblobHandler: 135 | if connection not in self.deferredBlobs: 136 | self.deferredBlobs[connection] = [] 137 | self.deferredBlobs[connection].append(blob) 138 | 139 | def connectionHandler(self, connection): 140 | 141 | # Normal processing, no subDecoder 142 | if not self.subDecoder: 143 | self.__searchStream(connection.data(direction='cs', errorHandler=self.errorH) + 144 | "\n" + connection.data(direction='sc', errorHandler=self.errorH), connection) 145 | return 146 | 147 | # Call sub blobHandler for all blobs 148 | if self.subblobHandler and self.__connectionTest(connection): 149 | self.debug("Preparing to process %d blobs in subdecoder" % 150 | len(self.deferredBlobs)) 151 | for b in self.deferredBlobs[connection]: 152 | self.subDecoder.blobHandler(connection, b) 153 | self.deferredBlobs[connection] = None 154 | 155 | # Call sub connectionHandler if necessary 156 | if 'connectionHandler' in dir(self.subDecoder) and self.__connectionTest(connection): 157 | self.subDecoder.connectionHandler(connection) 158 | 159 | def __alert(self, conn, hitstring, matchObj): 160 | kwargs = {'match': matchObj.group(0)} 161 | matchNumber = 0 162 | for mgroup in matchObj.groups(): 163 | matchNumber += 1 164 | kwargs['m' + str(matchNumber)] = mgroup 165 | self.alert(hitstring, kwargs, **conn.info()) 166 | 167 | def __connectionTest(self, connection): 168 | if connection not in self.connstate: 169 | if self.reObj.search(connection.data(direction='cs', errorHandler=self.errorH) + "\n" + connection.data(direction='sc', errorHandler=self.errorH)): 170 | self.connstate[connection] = True 171 | else: 172 | self.connstate[connection] = False 173 | if self.invert: 174 | self.connstate[connection] = not self.connstate[connection] 175 | if self.connstate[connection]: 176 | return True 177 | else: 178 | return False 179 | 180 | def __searchStream(self, d, conn): 181 | 182 | if self.singleline or self.iterate: 183 | self.__runSearch(d, conn) 184 | else: 185 | lines = d.split('\n') 186 | for l in lines: 187 | l = l.rstrip() 188 | self.__runSearch(l, conn) 189 | 190 | def __runSearch(self, d, conn): 191 | if self.iterate: 192 | for m in self.reObj.finditer(d): 193 | self.__alert(conn, m.group(0), m) 194 | else: 195 | m = self.reObj.search(d) 196 | if m: 197 | self.__alert(conn, d, m) 198 | 199 | 200 | # always instantiate an dObj of the class 201 | if __name__ == '__main__': 202 | dObj = grepDecoder() 203 | print dObj 204 | else: 205 | dObj = grepDecoder() 206 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/merge.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import dpkt 3 | 4 | 5 | class DshellDecoder(dshell.Decoder): 6 | 7 | """ 8 | merge.py - merge all pcap in to a single file 9 | 10 | Example: decode -d merge *.pcap -W merged.pcap 11 | """ 12 | 13 | def __init__(self): 14 | dshell.Decoder.__init__(self, 15 | name='merge', 16 | description='dump all packets to single file', 17 | longdescription="""Example: decode -d merge *.pcap -W merged.pcap""", 18 | author='bg/twp' 19 | ) 20 | self.chainable = True 21 | 22 | def rawHandler(self, pktlen, pkt, ts, **kw): 23 | if self.subDecoder: 24 | return self.subDecoder.rawHandler(pktlen, str(pkt), ts, **kw) 25 | else: 26 | return self.dump(pktlen, pkt, ts) 27 | 28 | 29 | if __name__ == '__main__': 30 | dObj = DshellDecoder() 31 | print dObj 32 | else: 33 | dObj = DshellDecoder() 34 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/stream2dump.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | import colorout 4 | #from impacket.ImpactDecoder import EthDecoder 5 | import datetime 6 | import sys 7 | import traceback 8 | import logging 9 | import struct 10 | import socket 11 | import os 12 | 13 | # import any other modules here 14 | import cgi 15 | 16 | 17 | class DshellDecoder(dshell.TCPDecoder): 18 | 19 | def __init__(self): 20 | dshell.TCPDecoder.__init__(self, 21 | name='stream2dump', 22 | description='Generates color-coded Screen/HTML output similar to Wireshark Follow Stream', 23 | longdescription=""" 24 | Generates color-coded Screen/HTML output similar to Wireshark Follow Stream. 25 | 26 | Output by default uses the "colorout" output class. This will send TTY 27 | color-formatted text to stdout (the screen) if available. If output 28 | is directed to a file (-o or --outfile), the output will be in HTML format. 29 | 30 | Note that the default bpf filter is to view all tcp traffic. The decoder 31 | can also process UDP traffic, or it can be limited to specific streams 32 | with --bpf/--ebpf. 33 | 34 | Useful options: 35 | 36 | --followstream_hex -- generates output in hex mode 37 | --followstream_time -- includes timestamp for each blob/transmission 38 | 39 | Example: 40 | 41 | decode -d followstream --ebpf 'port 80' mypcap.pcap --followstream_time 42 | decode -d followstream --ebpf 'port 80' mypcap.pcap -o file.html --followstream_time 43 | 44 | """, 45 | filter="", 46 | author='amm', 47 | optiondict={ 48 | 'hex': {'action': 'store_true', 'help': 'two-column hex/ascii output'}, 49 | 'time': {'action': 'store_true', 'help': 'include timestamp for each blob'}, 50 | 'encoding': {'type': 'string', 'help': 'attempt to interpret text as encoded with specified schema'}, 51 | 'outfiles': {'type': 'string', 'help': 'output files'}, 52 | } 53 | ) 54 | self.out = colorout.ColorOutput() 55 | 56 | def __errorHandler(self, blob, expected, offset, caller): 57 | # Custom error handler that is called when data in a blob is missing or 58 | # overlapping 59 | if offset > expected: # data is missing 60 | self.data_missing_message += "[%d missing bytes]" % ( 61 | offset - expected) 62 | elif offset < expected: # data is overlapping 63 | self.data_missing_message += "[%d overlapping bytes]" % ( 64 | offset - expected) 65 | return True 66 | 67 | def preModule(self): 68 | self.connectionCount = 0 69 | # Reset the color mode, in case a file is specified 70 | self.out.setColorMode() 71 | # Used to indicate when data is missing or overlapping 72 | self.data_missing_message = '' 73 | # overwrite the output module's default error handler 74 | self.out.errorH = self.__errorHandler 75 | self.dumpfiles = open(self.outfiles, 'wb') 76 | self.connlen = [] 77 | 78 | def postModule(self): 79 | self.out.write('Writing Index\n') 80 | for i in self.connlen: 81 | self.dumpfiles.write(struct.pack('I', i)) 82 | self.dumpfiles.write(struct.pack('I', len(self.connlen))) 83 | self.dumpfiles.close() 84 | 85 | def connectionHandler(self, connection): 86 | 87 | try: 88 | 89 | # Skip Connections with no data transferred 90 | if connection.clientbytes + connection.serverbytes < 1: 91 | return 92 | 93 | # Update Connection Counter 94 | self.connectionCount += 1 95 | 96 | # Connection Header Information 97 | self.out.write("Connection %d (%s)\n" % ( 98 | self.connectionCount, str(connection.proto)), formatTag='H1') 99 | self.out.write("Start: %s UTC\n End: %s UTC\n" % (datetime.datetime.utcfromtimestamp( 100 | connection.starttime), datetime.datetime.utcfromtimestamp(connection.endtime)), formatTag='H2') 101 | self.out.write("%s:%s -> %s:%s (%d bytes)\n" % (connection.clientip, connection.clientport, 102 | connection.serverip, connection.serverport, connection.clientbytes), formatTag="H2", direction="cs") 103 | self.out.write("%s:%s -> %s:%s (%d bytes)\n\n" % (connection.serverip, connection.serverport, 104 | connection.clientip, connection.clientport, connection.serverbytes), formatTag="H2", direction="sc") 105 | 106 | 107 | clientip = struct.unpack('I', socket.inet_aton(connection.clientip))[0] 108 | serverip = struct.unpack('I', socket.inet_aton(connection.serverip))[0] 109 | outdata = struct.pack('IIIHHI', len(connection.blobs), clientip, serverip, connection.clientport, connection.serverport, int(connection.starttime)) 110 | outdata += struct.pack('I', len(connection.pkts)) 111 | for i in connection.pkts: 112 | outdata += struct.pack('I', i) 113 | 114 | for i in connection.blobs: 115 | packetdata = i.data() 116 | packetout = i.direction[0] + struct.pack('I', len(packetdata)) + packetdata 117 | outdata += packetout 118 | self.dumpfiles.write(outdata) 119 | self.connlen.append(len(outdata)) 120 | 121 | #self.out.write( 122 | # connection, hex=self.hex, time=self.time, encoding=self.encoding) 123 | if self.data_missing_message: 124 | self.out.write( 125 | self.data_missing_message + "\n", level=logging.WARNING, time=self.time) 126 | self.data_missing_message = '' 127 | 128 | # Line break before next session 129 | self.out.write("\n\n") 130 | 131 | except KeyboardInterrupt: 132 | raise 133 | except: 134 | print 'Error in connectionHandler: ', sys.exc_info()[1] 135 | traceback.print_exc(file=sys.stdout) 136 | 137 | 138 | if __name__ == '__main__': 139 | dObj = DshellDecoder() 140 | print dObj 141 | else: 142 | dObj = DshellDecoder() 143 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/synrst.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import dpkt 3 | 4 | 5 | class DshellDecoder(dshell.IPDecoder): 6 | 7 | """ 8 | Simple TCP syn/rst filter (ipv4) only 9 | """ 10 | 11 | def __init__(self): 12 | dshell.IPDecoder.__init__(self, 13 | name='synrst', 14 | description='detect failed attempts to connect (SYN followed by a RST/ACK)', 15 | filter="tcp[13]=2 or tcp[13]=20", 16 | author='bg' 17 | ) 18 | self.tracker = {} # key = (srcip,srcport,seqnum,dstip,dstport) 19 | 20 | def packetHandler(self, ip=None): 21 | tcp = dpkt.ip.IP(ip.pkt).data 22 | 23 | if tcp.flags & 2: # check for SYN flag 24 | seqnum = tcp.seq 25 | key = '%s:%s:%d:%s:%s' % ( 26 | ip.sip, ip.sport, seqnum, ip.dip, ip.dport) 27 | self.tracker[key] = '' 28 | elif tcp.flags & 20: # check for RST/ACK flags 29 | acknum = tcp.ack - 1 30 | tmpkey = '%s:%s:%d:%s:%s' % ( 31 | ip.dip, ip.dport, acknum, ip.sip, ip.sport) 32 | if self.tracker.__contains__(tmpkey): 33 | self.alert('Failed connection', **ip.info()) 34 | del self.tracker[tmpkey] 35 | 36 | 37 | if __name__ == '__main__': 38 | dObj = DshellDecoder() 39 | print dObj 40 | else: 41 | dObj = DshellDecoder() 42 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/writer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on Jan 13, 2012 3 | 4 | @author: tparker 5 | ''' 6 | 7 | import dshell 8 | import dpkt 9 | from output import PCAPWriter 10 | 11 | 12 | class DshellDecoder(dshell.Decoder): 13 | 14 | ''' 15 | session writer - chain to a decoder to end the chain if the decoder does not output session or packets on its own 16 | if chained to a packet-based decoder, writes all packets to pcap file, can be used to convert or concatenate files 17 | if chained to a connection-based decoder, writes selected streams to session file 18 | ''' 19 | 20 | def __init__(self, **kwargs): 21 | ''' 22 | Constructor 23 | ''' 24 | self.file = None 25 | dshell.Decoder.__init__(self, 26 | name='writer', 27 | description='pcap/session writer', 28 | author='twp', 29 | raw=True, 30 | optiondict=dict(filename=dict(default='%(clientip)s:%(clientport)s-%(serverip)s:%(serverport)s-%(direction)s.txt'), 31 | ) 32 | ) 33 | 34 | def rawHandler(self, pktlen, pkt, ts): 35 | self.decodedbytes += pktlen 36 | self.count += 1 37 | self.dump(pktlen, pkt, ts) # pktlen may be wrong if we stripped vlan 38 | 39 | def IPHandler(self, addr, ip, ts, pkttype=None, **kw): 40 | self.decodedbytes += len(ip.data) 41 | self.count += 1 42 | # if we are passed in IP data vs layer-2 frames, we need to encapsulate 43 | # them 44 | self.dump(dpkt.ethernet.Ethernet(data=str(ip), pkttype=type), ts=ts) 45 | 46 | def connectionHandler(self, conn): 47 | self.write(conn) 48 | 49 | dObj = DshellDecoder() 50 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/misc/xor.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | import struct 4 | 5 | 6 | class DshellDecoder(dshell.TCPDecoder): 7 | 8 | def __init__(self): 9 | self.xorconn = {} # required to track each individual connection 10 | dshell.TCPDecoder.__init__(self, 11 | name='xor', 12 | description='XOR an entire stream with a given single byte key', 13 | filter="tcp", 14 | author='twp', 15 | optiondict={ 16 | 'key': {'type': 'str', 'default': '0xff', 'help': 'xor key [default 255]'}, 17 | 'cskey': {'type': 'str', 'default': None, 'help': 'c->s xor key [default None]'}, 18 | 'sckey': {'type': 'str', 'default': None, 'help': 's->c xor key [default None]'}, 19 | 'resync': {'action': 'store_true', 'help': 'resync if the key is seen in the stream'}, 20 | } 21 | ) 22 | # sets chainable to true and requires connectionInitHandler() and 23 | # connectionCloseHandler() 24 | self.chainable = True 25 | 26 | def preModule(self, *args, **kwargs): 27 | dshell.TCPDecoder.preModule(self, *args, **kwargs) 28 | # twp handle hex keys 29 | self.key = self.makeKey(self.key) 30 | if self.cskey: 31 | self.cskey = self.makeKey(self.cskey) 32 | if self.sckey: 33 | self.sckey = self.makeKey(self.sckey) 34 | 35 | def makeKey(self, key): 36 | if key.startswith('"'): 37 | return key[1:-1] 38 | if key.startswith('0x'): 39 | k, key = '', key[2:] 40 | for i in xrange(0, len(key), 2): 41 | k += chr(int(key[i:i + 2], 16)) 42 | return k 43 | else: 44 | return struct.pack('I', int(key)) 45 | 46 | # 47 | # connectionInitHandler is required as this module (and all other chainable modules) will have to track all 48 | # each connection independently of dshell.TCPDecoder 49 | # 50 | def connectionInitHandler(self, conn): 51 | # need to set up a custom connection tracker to handle 52 | self.xorconn[conn.addr] = dshell.Connection(self, conn.addr, conn.ts) 53 | # self.xorconn[conn.addr]=conn 54 | 55 | # 56 | # Each blob will be xor'ed and the "newblob" data will be added to the connection 57 | # we are individually tracking 58 | # 59 | def blobHandler(self, conn, blob): 60 | k = 0 # key index 61 | # create new data (ie. pkt data) 62 | # with appropriate key 63 | data, newdata = blob.data(), '' 64 | self.debug('IN ' + util.hexPlusAscii(blob.data())) 65 | if self.cskey != None and blob.direction == 'cs': 66 | key = self.cskey 67 | elif self.sckey != None and blob.direction == 'sc': 68 | key = self.sckey 69 | else: 70 | key = self.key 71 | for i in xrange(len(data)): 72 | if self.resync and data[i:i + len(key)] == key: 73 | k = 0 # resync if the key is seen 74 | # xor this byte with the aligned byte from the key 75 | newdata += chr(ord(data[i]) ^ ord(key[k])) 76 | k = (k + 1) % len(key) # move key position 77 | # update our connection object with the new data 78 | newblob = self.xorconn[conn.addr].update( 79 | conn.endtime, blob.direction, newdata) 80 | self.debug('OUT ' + repr(self.key) + ' ' + util.hexPlusAscii(newdata)) 81 | # if there is another decoder we want to pass this data too 82 | if newblob and 'blobHandler' in dir(self.subDecoder): 83 | # pass to the subDecoder's blobHandler() 84 | self.subDecoder.blobHandler(self.xorconn[conn.addr], newblob) 85 | 86 | # 87 | # The connection has finished without errors, then we pass the entire connection to the subDecoder's 88 | # connectionHandler() 89 | # 90 | def connectionHandler(self, conn): 91 | if conn.addr in self.xorconn: 92 | self.xorconn[conn.addr].proto = conn.proto 93 | if 'connectionHandler' in dir(self.subDecoder): 94 | self.subDecoder.connectionHandler(self.xorconn[conn.addr]) 95 | else: 96 | self.write(self.xorconn[conn.addr]) 97 | 98 | # 99 | # connectionCloseHandler is called when: 100 | # - a connection finishes w/o errors (no data loss) 101 | # - a connection finishes w errors 102 | # 103 | # If the connection exists in our custom connection tracker (self.xorconn), 104 | # we will have to pass it to the subDecoder's connectionCloseHandler 105 | # 106 | # 107 | def connectionCloseHandler(self, conn): 108 | if conn.addr in self.xorconn: 109 | if 'connectionCloseHandler' in dir(self.subDecoder): 110 | self.subDecoder.connectionCloseHandler(self.xorconn[conn.addr]) 111 | del self.xorconn[conn.addr] 112 | 113 | 114 | if __name__ == '__main__': 115 | dObj = DshellDecoder() 116 | print dObj 117 | else: 118 | dObj = DshellDecoder() 119 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/protocol/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/protocol/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/protocol/ether.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | import dpkt 4 | import datetime 5 | import binascii 6 | 7 | 8 | class DshellDecoder(dshell.Decoder): 9 | 10 | def __init__(self): 11 | dshell.Decoder.__init__(self, 12 | name='ether', 13 | description='raw ethernet capture decoder', 14 | filter='', 15 | author='twp', asdatetime=True 16 | ) 17 | 18 | def rawHandler(self, dlen, data, ts, **kw): 19 | if self.verbose: 20 | self.log("%.06f %d\n%s" % (ts, dlen, util.hexPlusAscii(str(data)))) 21 | eth = dpkt.ethernet.Ethernet(str(data)) 22 | src = binascii.hexlify(eth.src) 23 | dst = binascii.hexlify(eth.dst) 24 | self.alert('%6x->%6x %4x len %d' % (long(src, 16), long(dst, 16), eth.type, 25 | len(eth.data)), type=eth.type, bytes=len(eth.data), src=src, dst=dst, ts=ts) 26 | 27 | if __name__ == '__main__': 28 | dObj = DshellDecoder() 29 | print dObj 30 | else: 31 | dObj = DshellDecoder() 32 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/protocol/ip.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import util 3 | import dpkt 4 | import traceback 5 | 6 | 7 | class DshellDecoder(dshell.IP6Decoder): 8 | 9 | _PROTO_MAP = {dpkt.ip.IP_PROTO_TCP: 'TCP', 17: 'UDP'} 10 | 11 | def __init__(self): 12 | dshell.IP6Decoder.__init__(self, 13 | name='ip', 14 | description='IPv4/IPv6 decoder', 15 | filter='ip or ip6', 16 | author='twp', 17 | ) 18 | 19 | def packetHandler(self, ip=None, proto=None): 20 | if self.verbose: 21 | self.out.log(util.hexPlusAscii(ip.pkt)) 22 | self.alert(**ip.info()) 23 | if self.out.sessionwriter: 24 | self.write(ip) 25 | 26 | if __name__ == '__main__': 27 | dObj = DshellDecoder() 28 | print dObj 29 | else: 30 | dObj = DshellDecoder() 31 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/protocol/protocol.py: -------------------------------------------------------------------------------- 1 | import dshell 2 | import dpkt 3 | 4 | # Build a list of known IP protocols from dpkt 5 | try: 6 | PROTOCOL_MAP = dict((v, k[9:]) for k, v in dpkt.ip.__dict__.iteritems() if type( 7 | v) == int and k.startswith('IP_PROTO_') and k != 'IP_PROTO_HOPOPTS') 8 | except: 9 | PROTOCOL_MAP = {} 10 | 11 | 12 | class DshellDecoder(dshell.IPDecoder): 13 | 14 | """ 15 | protocol.py 16 | 17 | Identifies non-standard protocols (not tcp, udp or icmp) 18 | 19 | References: 20 | http://www.networksorcery.com/enp/protocol/ip.htm 21 | """ 22 | 23 | def __init__(self): 24 | dshell.IPDecoder.__init__(self, 25 | name='protocol', 26 | description='Identifies non-standard protocols (not tcp, udp or icmp)', 27 | filter='(ip and not tcp and not udp and not icmp)', 28 | author='bg', 29 | ) 30 | 31 | def packetHandler(self, ip): 32 | p = PROTOCOL_MAP.get(ip.proto, ip.proto) 33 | self.alert('PROTOCOL: %s (%d)' % 34 | (p, ip.proto), sip=ip.sip, dip=ip.dip, ts=ip.ts) 35 | 36 | if __name__ == '__main__': 37 | dObj = DshellDecoder() 38 | print dObj 39 | else: 40 | dObj = DshellDecoder() 41 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/smb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/smb/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/smb/rip-smb-uploads.py: -------------------------------------------------------------------------------- 1 | """ 2 | 2015 Feb 13 3 | 4 | Goes through SMB traffic and snips out any file uploads it sees. 5 | 6 | Specifically, it looks for create, write, and close commands and creates a 7 | local file, writes the raw data to the local file, and closes the file, 8 | respectively. 9 | """ 10 | 11 | import dshell 12 | from smbdecoder import SMBDecoder 13 | import sys 14 | import util 15 | import os 16 | 17 | SMB_STATUS_SUCCESS = 0x0 18 | SMB_COM_OPEN = 0x02 # Open a file. 19 | SMB_COM_CLOSE = 0x04 # Close a file. 20 | SMB_COM_NT_CREATE_ANDX = 0xa2 # Create or open a file or a directory. 21 | SMB_COM_WRITE_ANDX = 0x2f # Extended file write with AndX chaining. 22 | 23 | 24 | class DshellDecoder(SMBDecoder): 25 | 26 | def __init__(self): 27 | self.fidhandles = {} # dictionary to map fid handles to filenames 28 | # dictionary to map fid handles to local filedescriptors 29 | # (ie. fd = open(fname,'wb')) 30 | self.fds = {} 31 | self.outdir = None 32 | SMBDecoder.__init__(self, 33 | name='rip-smb-uploads', 34 | description='Extract files uploaded via SMB', 35 | filter='tcp and port 445', 36 | filterfn=lambda t: t[0][1] == 445 or t[1][1] == 445, 37 | author='bg', 38 | optiondict={ 39 | "outdir": {"help": "Directory to place files (default: ./smb_out)", "default": "./smb_out", "metavar": "DIRECTORY"}, 40 | } 41 | ) 42 | self.legacy = True 43 | 44 | def preModule(self): 45 | if not os.path.exists(self.outdir): 46 | try: 47 | os.makedirs(self.outdir) 48 | except OSError as e: 49 | self.error("Could not create directory '%s'\n%s" % (self.outdir, e)) 50 | sys.exit(1) 51 | 52 | def SMBHandler(self, conn, request=None, response=None, requesttime=None, responsetime=None, cmd=None, status=None): 53 | # we only care about valid responses and matching request/response user 54 | # IDs 55 | if status == SMB_STATUS_SUCCESS and request.uid == response.uid: 56 | 57 | if cmd == SMB_COM_NT_CREATE_ANDX: # file is being requested/opened 58 | self.debug('%s UID: %s MID: %s NT Create AndX Status: %s' % ( 59 | conn.addr, request.uid, response.mid, hex(status))) 60 | filename = request.PARSE_NT_CREATE_ANDX_REQUEST( 61 | request.smbdata) 62 | if type(filename) == type(None): 63 | self.debug('Error: smb.SMB.PARSE_NT_CREATE_ANDX_REQUEST\n%s' % util.hexPlusAscii(request.smbdata)) 64 | return 65 | 66 | fid = response.PARSE_NT_CREATE_ANDX_RESPONSE(response.smbdata) 67 | self.debug('%s FID: %s' % (conn.addr, fid)) 68 | 69 | if fid == -1: 70 | self.debug('Error: smb.SMB.PARSE_NT_CREATE_ANDX_RESPONSE\n%s' % util.hexPlusAscii(response.smbdata)) 71 | self.debug(util.hexPlusAscii(response.smbdata)) 72 | return 73 | self.fidhandles[fid] = self.__localfilename(self.outdir, os.path.normpath(filename)) 74 | 75 | elif cmd == SMB_COM_WRITE_ANDX: # write data to the file 76 | fid, rawbytes = request.PARSE_WRITE_ANDX(request.smbdata) 77 | 78 | # do we have a local fd already open to handle this write? 79 | if fid in self.fds.keys(): 80 | self.fds[fid].write(rawbytes) 81 | else: 82 | try: 83 | fidhandle = self.fidhandles[fid] 84 | self.fds[fid] = open(fidhandle, 'wb') 85 | self.fds[fid].write(rawbytes) 86 | except KeyError: 87 | self.debug("Error: Could not find fidhandle for FID %s" % (fid)) 88 | return 89 | 90 | elif cmd == SMB_COM_CLOSE: # file is being closed 91 | fid = request.PARSE_COM_CLOSE(request.smbdata) 92 | if fid in self.fds.keys(): 93 | self.log(repr(conn) + '\t%s' % (self.fidhandles[fid])) 94 | self.fds[fid].close() 95 | del self.fds[fid] 96 | if fid in self.fidhandles.keys(): 97 | self.debug('Closing FID: %s Filename: %s' % 98 | (hex(fid), self.fidhandles[fid])) 99 | del self.fidhandles[fid] 100 | 101 | 102 | def __localfilename(self, path, origname): 103 | # Generates a local file name based on the original 104 | tmp = origname.replace("\\", "_") 105 | tmp = tmp.replace("/", "_") 106 | tmp = tmp.replace(":", "_") 107 | localname = '' 108 | for c in tmp: 109 | if ord(c) > 32 and ord(c) < 127: 110 | localname += c 111 | else: 112 | localname += "%%%02X" % ord(c) 113 | localname = os.path.join(path, localname) 114 | postfix = '' 115 | i = 0 116 | while os.path.exists(localname + postfix): 117 | i += 1 118 | postfix = "_%02d" % i 119 | return localname + postfix 120 | 121 | 122 | if __name__ == '__main__': 123 | dObj = DshellDecoder() 124 | print dObj 125 | else: 126 | dObj = DshellDecoder() 127 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/smb/smbfiles.py: -------------------------------------------------------------------------------- 1 | """ 2 | 2015 Feb 13 3 | 4 | Processes SMB traffic and tries to find file reads and writes. 5 | 6 | When a read or write action is seen, the size of the transfer is recorded in 7 | a new "smbfile" object and a count is incremented for the type of action taken 8 | (i.e. reads+1 or writes+1). 9 | 10 | After the connection closes, an alert is generated showing some information 11 | about the connection, the action taken (read, write, or both), the full name 12 | of the file and how much data was transferred. 13 | """ 14 | 15 | from smbdecoder import SMBDecoder 16 | import util 17 | 18 | SMB_STATUS_SUCCESS = 0x0 19 | SMB_COM_OPEN = 0x02 # Open a file. 20 | SMB_COM_CLOSE = 0x04 # Close a file. 21 | SMB_COM_NT_CREATE_ANDX = 0xa2 # Create or open a file or a directory. 22 | SMB_COM_WRITE_ANDX = 0x2f # Extended file write with AndX chaining. 23 | SMB_COM_READ_ANDX = 0x2E 24 | SMB_COM_SESSION_SETUP_ANDX = 0x73 25 | SMB_COM_TREE_CONNECT_ANDX = 0x75 26 | 27 | 28 | class DshellDecoder(SMBDecoder): 29 | 30 | def __init__(self): 31 | # dictionary indexed by uid, points to login tuple (hostname, 32 | # domain\name) (string) 33 | self.uidname = {} 34 | self.tidmap = {} # dictionary indexed by tid, points to tree path 35 | # dictionary of smb file objects, indexed by conn+fid (use 36 | # sessIndexFromFID function) 37 | self.smbfileobjs = {} 38 | SMBDecoder.__init__(self, 39 | name='smbfiles', 40 | description='List files accessed via smb', 41 | filter='tcp and (port 445 or port 139)', 42 | filterfn=lambda t: t[0][1] == 445 or t[1][1] == 445 or t[0][1] == 139 or t[1][1] == 139, 43 | author='amm', 44 | optiondict={ 45 | 'nopsexec': {'action': 'store_true', 'help': 'supress psexecsvc streams from output'}, 46 | 'activeonly': {'action': 'store_true', 'help': 'only output files with reads or writes'} 47 | } 48 | ) 49 | 50 | def fileIndexFromFID(self, conn, fid): 51 | return ':'.join((str(conn.starttime), conn.sip, str(conn.sport), conn.dip, str(conn.dport), str(fid))) 52 | 53 | def connectionHandler(self, conn): 54 | SMBDecoder.connectionHandler(self, conn) 55 | for k in self.smbfileobjs.keys(): 56 | del self.smbfileobjs[k] 57 | 58 | # 59 | # Internal class to contain info about files 60 | # 61 | class smbfile: 62 | 63 | def __init__(self, parent, conn, fid, opentime, filename, username, hostname, treepath): 64 | self.parent = parent 65 | self.conn = conn 66 | self.opentime = opentime 67 | self.closetime = conn.endtime 68 | self.filename = filename 69 | self.username = username 70 | self.hostname = hostname 71 | self.treepath = treepath 72 | self.writes = 0 73 | self.reads = 0 74 | self.byteswritten = 0 75 | self.bytesread = 0 76 | 77 | def writeblock(self, data): 78 | self.writes += 1 79 | self.byteswritten += len(data) 80 | 81 | def readblock(self, data): 82 | self.reads += 1 83 | self.bytesread += len(data) 84 | 85 | def alert(self): 86 | if self.parent.nopsexec and self.filename.lower().startswith('\psexecsvc'): 87 | return 88 | if self.reads > 0 and self.writes > 0: 89 | mode = 'B' 90 | elif self.reads > 0: 91 | mode = 'R' 92 | elif self.writes > 0: 93 | mode = 'W' 94 | else: 95 | mode = '-' 96 | if self.parent.activeonly and mode == '-': 97 | return 98 | kwargs = { 99 | 'filename': self.filename, 'username': self.username, 'hostname': self.hostname, 'treepath': self.treepath, 100 | 'opentime': self.opentime, 'closetime': self.closetime, 'mode': mode, 101 | 'writes': self.writes, 'reads': self.reads, 'byteswritten': self.byteswritten, 'bytesread': self.bytesread 102 | } 103 | kwargs.update(self.conn.info()) 104 | kwargs['ts'] = self.opentime 105 | self.parent.alert( 106 | "%s %s%s (%s)" % ( 107 | self.username, self.treepath, self.filename, mode), 108 | kwargs 109 | ) 110 | 111 | def __del__(self): 112 | self.alert() 113 | 114 | def SMBHandler(self, conn, request=None, response=None, requesttime=None, responsetime=None, cmd=None, status=None): 115 | # we only care about valid responses and matching request/response user 116 | # IDs 117 | if status == SMB_STATUS_SUCCESS and request.uid == response.uid: 118 | 119 | # 120 | # SMB_COM_SESSION_SETUP - Start tracking user authentication by UID 121 | # 122 | if cmd == SMB_COM_SESSION_SETUP_ANDX and type(status) != type(None): 123 | auth_record = request.PARSE_SESSION_SETUP_ANDX_REQUEST( 124 | request.smbdata) 125 | if not(auth_record): 126 | return 127 | domain_name = auth_record.domain_name 128 | user_name = auth_record.user_name 129 | host_name = auth_record.host_name 130 | self.uidname[response.uid] = ( 131 | host_name, "%s\%s" % (domain_name, user_name)) 132 | 133 | # 134 | # SMB_COM_TREE_CONNECT - Start tracking tree by TID 135 | # 136 | if cmd == SMB_COM_TREE_CONNECT_ANDX: 137 | request_path = unicode(request.SMB_COM_TREE_CONNECT_ANDX_Request( 138 | request.smbdata), 'utf-16').encode('utf-8').rstrip('\0') 139 | self.tidmap[response.tid] = request_path 140 | 141 | # 142 | # SMB_COM_NT_CREATE - Start tracking file handle by FID 143 | # 144 | # file is being requested/opened 145 | elif cmd == SMB_COM_NT_CREATE_ANDX: 146 | self.debug('%s UID: %s MID: %s NT Create AndX Status: %s' % ( 147 | conn.addr, request.uid, response.mid, hex(status))) 148 | filename = request.PARSE_NT_CREATE_ANDX_REQUEST( 149 | request.smbdata) 150 | if type(filename) == type(None): 151 | self.debug('Error: smb.SMB.PARSE_NT_CREATE_ANDX_REQUEST\n%s' % util.hexPlusAscii( 152 | request.smbdata)) 153 | return 154 | fid = response.PARSE_NT_CREATE_ANDX_RESPONSE(response.smbdata) 155 | if fid == -1: 156 | self.debug('Error: smb.SMB.PARSE_NT_CREATE_ANDX_RESPONSE\n%s' % util.hexPlusAscii( 157 | response.smbdata)) 158 | self.debug(util.hexPlusAscii(response.smbdata)) 159 | return 160 | # Setup smbfile object 161 | if response.uid in self.uidname: 162 | hostname, username = self.uidname[response.uid] 163 | else: 164 | hostname = 'Unknown' 165 | username = 'Unknown\\Unknown' 166 | if response.tid in self.tidmap: 167 | treepath = self.tidmap[response.tid] 168 | else: 169 | treepath = '' 170 | fileobj = self.smbfile( 171 | self, conn, fid, requesttime, filename, username, hostname, treepath) 172 | fileIndex = self.fileIndexFromFID(conn, fid) 173 | self.smbfileobjs[fileIndex] = fileobj 174 | 175 | # 176 | # SMB_COM_WRITE - File writes 177 | # 178 | elif cmd == SMB_COM_WRITE_ANDX: # write data to the file 179 | fid, rawbytes = request.PARSE_WRITE_ANDX(request.smbdata) 180 | #self.debug('COM_WRITE_ANDX\n%s' % (util.hexPlusAscii(request.smbdata))) 181 | fileIndex = self.fileIndexFromFID(conn, fid) 182 | if fileIndex in self.smbfileobjs: 183 | self.smbfileobjs[fileIndex].writeblock(rawbytes) 184 | 185 | # 186 | # SMB_COM_READ - File reads 187 | # 188 | elif cmd == SMB_COM_READ_ANDX: # read data from the file 189 | fid = request.PARSE_READ_ANDX_Request(request.smbdata) 190 | rawbytes = response.PARSE_READ_ANDX_Response(response.smbdata) 191 | #self.debug('COM_READ_ANDX (FID %s)\n%s' % (fid, util.hexPlusAscii(response.smbdata))) 192 | fileIndex = self.fileIndexFromFID(conn, fid) 193 | if fileIndex in self.smbfileobjs: 194 | self.smbfileobjs[fileIndex].readblock(rawbytes) 195 | 196 | # 197 | # SMB_COM_CLOSE - Closing file 198 | # 199 | elif cmd == SMB_COM_CLOSE: # file is being closed 200 | fid = request.PARSE_COM_CLOSE(request.smbdata) 201 | fileIndex = self.fileIndexFromFID(conn, fid) 202 | if fileIndex in self.smbfileobjs: 203 | self.smbfileobjs[fileIndex].closetime = responsetime 204 | del self.smbfileobjs[fileIndex] 205 | 206 | if __name__ == '__main__': 207 | dObj = DshellDecoder() 208 | print dObj 209 | else: 210 | dObj = DshellDecoder() 211 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/templates/PacketDecoder.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import dshell 4 | import output 5 | import util 6 | 7 | 8 | class DshellDecoder(dshell.IPDecoder): 9 | 10 | '''generic packet-level decoder template''' 11 | 12 | def __init__(self, **kwargs): 13 | '''decoder-specific config''' 14 | 15 | '''pairs of 'option':{option-config}''' 16 | self.optiondict = {} 17 | 18 | '''bpf filter, for ipV4''' 19 | self.filter = '' 20 | '''filter function''' 21 | # self.filterfn= 22 | 23 | '''init superclasses''' 24 | self.__super__().__init__(**kwargs) 25 | 26 | def packetHandler(self, ip): 27 | '''handle as Packet() ojects''' 28 | pass 29 | 30 | # create an instance at load-time 31 | dObj = DshellDecoder() 32 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/templates/SessionDecoder.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import dshell 4 | import output 5 | import util 6 | 7 | 8 | class DshellDecoder(dshell.TCPDecoder): 9 | 10 | '''generic session-level decoder template''' 11 | 12 | def __init__(self, **kwargs): 13 | '''decoder-specific config''' 14 | 15 | '''pairs of 'option':{option-config}''' 16 | self.optiondict = {} 17 | 18 | '''bpf filter, for ipV4''' 19 | self.filter = '' 20 | '''filter function''' 21 | # self.filterfn= 22 | 23 | '''init superclasses''' 24 | self.__super__().__init__(**kwargs) 25 | 26 | def packetHandler(self, udp, data): 27 | '''handle UDP as Packet(),payload data 28 | remove this if you want to make UDP into pseudo-sessions''' 29 | pass 30 | 31 | def connectionInitHandler(self, conn): 32 | '''called when connection starts, before any data''' 33 | pass 34 | 35 | def blobHandler(self, conn, blob): 36 | '''handle session data as soon as reassembly is possible''' 37 | pass 38 | 39 | def connectionHandler(self, conn): 40 | '''handle session once all data is reassembled''' 41 | pass 42 | 43 | def connectionCloseHandler(self, conn): 44 | '''called when connection ends, after data is handled''' 45 | 46 | # create an instance at load-time 47 | dObj = DshellDecoder() 48 | -------------------------------------------------------------------------------- /dshell-defcon/decoders/templates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/templates/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/decoders/tftp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/dshell-defcon/decoders/tftp/__init__.py -------------------------------------------------------------------------------- /dshell-defcon/doc/generate-doc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | d=`pwd` 3 | if [ "$1" ]; then d=$1; fi 4 | source $d/.dshellrc || exit 5 | 6 | for f in $d/lib/*.py $d/lib/output/*.py $d/bin/*.py; do 7 | pydoc -w `basename $f|cut -d. -f1` 8 | done 9 | 10 | for f in `find $d/decoders -name \*.py -not -name __init__.py`; do 11 | pydoc -w $f 12 | done 13 | -------------------------------------------------------------------------------- /dshell-defcon/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | # install depdencies 4 | RUN apt-get update && apt-get install -y \ 5 | python-crypto \ 6 | python-dpkt \ 7 | python-ipy \ 8 | python-pypcap \ 9 | python-pip \ 10 | wget \ 11 | git 12 | 13 | RUN pip install pygeoip 14 | 15 | # Download the latest version of the code from GitHub 16 | WORKDIR /opt/ 17 | RUN git clone https://github.com/USArmyResearchLab/Dshell.git 18 | 19 | # download and gunzip GeoIP files 20 | WORKDIR /opt/Dshell/share/GeoIP/ 21 | RUN wget https://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz 22 | RUN wget https://geolite.maxmind.com/download/geoip/database/GeoIPv6.dat.gz 23 | RUN wget https://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz 24 | RUN wget https://download.maxmind.com/download/geoip/database/asnum/GeoIPASNumv6.dat.gz 25 | RUN gunzip *.gz 26 | 27 | # make Dshell 28 | WORKDIR /opt/Dshell/ 29 | RUN make 30 | 31 | # Used to mount pcap from a host OS directory 32 | VOLUME ["/mnt/pcap"] 33 | 34 | CMD ["/opt/Dshell/dshell"] 35 | -------------------------------------------------------------------------------- /dshell-defcon/docker/README.md: -------------------------------------------------------------------------------- 1 | ## Building a Dshell Docker image 2 | 3 | Step 1: Build a Docker image that has Dshell installed and configured 4 | ```bash 5 | sudo docker build -t dshell . 6 | ``` 7 | 8 | Step 2: Run the container with a native host directory (/home/user/pcap/) mounted in /mnt/pcap 9 | ```bash 10 | sudo docker run -v /home/user/pcap:/mnt/pcap -it dshell 11 | ``` 12 | 13 | Step 3: Use Dshell to analyze network traffic 14 | ```bash 15 | decode -d netflow /mnt/pcap/*.pcap 16 | ``` 17 | -------------------------------------------------------------------------------- /dshell-defcon/dshell: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /bin/bash --rcfile $(dirname "$0")/.dshellrc 3 | -------------------------------------------------------------------------------- /dshell-defcon/dshell-decode: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source $(dirname "$0")/.dshellrc 3 | decode "$@" 4 | -------------------------------------------------------------------------------- /dshell-defcon/install-ubuntu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from pkgutil import iter_modules 4 | from subprocess import call 5 | 6 | dependencies = { 7 | "Crypto": "crypto", 8 | "dpkt": "dpkt", 9 | "IPy": "ipy", 10 | "pcap": "pypcap" 11 | } 12 | 13 | installed, missing_pkgs = [pkg[1] for pkg in iter_modules()], [] 14 | 15 | for module, pkg in dependencies.items(): 16 | if module not in installed: 17 | print("dshell requires {}".format(module)) 18 | missing_pkgs.append("python-{}".format(pkg)) 19 | else: 20 | print("{} is installed".format(module)) 21 | 22 | if missing_pkgs: 23 | cmd = ["sudo", "apt-get", "install"] + missing_pkgs 24 | 25 | print(" ".join(cmd)) 26 | call(cmd) 27 | 28 | call(["make", "all"]) 29 | -------------------------------------------------------------------------------- /dshell-defcon/lib/dfile.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Dshell external file class/utils 3 | for use in rippers, dumpers, etc. 4 | 5 | @author: amm 6 | ''' 7 | import os 8 | from dshell import Blob 9 | from shutil import move 10 | from hashlib import md5 11 | 12 | ''' 13 | Mode Constants 14 | ''' 15 | FILEONDISK = 1 # Object refers to file already written to disk 16 | FILEINMEMORY = 2 # Object contains file contents in data member 17 | 18 | ''' 19 | dfile -- Dshell file class. 20 | 21 | Extends blob for offset based file chunk (segment) reassembly. 22 | Removes time and directionality from segments. 23 | 24 | Decoders can instantiate this class and pass it to 25 | output modules or other decoders. 26 | 27 | Decoders can choose to pass a file in memory or already 28 | written to disk. 29 | 30 | A dfile object can have one of the following modes: 31 | FILEONDISK 32 | FILEINMEMORY 33 | 34 | ''' 35 | 36 | 37 | class dfile(Blob): 38 | 39 | def __init__(self, mode=FILEINMEMORY, name=None, data=None, **kwargs): 40 | 41 | # Initialize Segments 42 | # Only really used in memory mode 43 | self.segments = {} 44 | self.startoffset = 0 45 | self.endoffset = 0 46 | 47 | # Initialize consistent info members 48 | self.mode = mode 49 | self.name = name 50 | self.diskpath = None 51 | self.info_keys = [ 52 | 'mode', 'name', 'diskpath', 'startoffset', 'endoffset'] 53 | 54 | # update with additional info 55 | self.info(**kwargs) 56 | # update data 57 | if data != None: 58 | self.update(data) 59 | 60 | def __iter__(self): 61 | ''' 62 | Undefined 63 | ''' 64 | pass 65 | 66 | def __str__(self): 67 | ''' 68 | Returns filename (string) 69 | ''' 70 | return self.name 71 | 72 | def __repr__(self): 73 | ''' 74 | Returns filename (string) 75 | ''' 76 | return self.name 77 | 78 | def md5(self): 79 | ''' 80 | Returns md5 of file 81 | Calculate based on reassembly from FILEINMEMORY 82 | or loads from FILEONDISK 83 | ''' 84 | if self.mode == FILEINMEMORY: 85 | return md5(self.data()).hexdigest() 86 | elif self.mode == FILEONDISK: 87 | m = md5() 88 | fh = open(self.diskpath, 'r') 89 | m.update(fh.read()) 90 | fh.close() 91 | return m.hexdigest() 92 | else: 93 | return None 94 | 95 | def load(self): 96 | ''' 97 | Load file from disk. Converts object to mode FILEINMEMORY 98 | ''' 99 | if not self.mode == FILEONDISK: 100 | return False 101 | try: 102 | fh = open(self.diskpath, 'r') 103 | self.update(fh.read()) 104 | fh.close() 105 | self.mode = FILEINMEMORY 106 | except: 107 | return False 108 | 109 | def write(self, path='.', name=None, clobber=False, errorHandler=None, padding=None, overlap=True): 110 | ''' 111 | Write file contents at location relative to path. 112 | Name on disk will be based on internal name unless one is provided. 113 | 114 | For mode FILEINMEMORY, file will data() will be called for reconstruction. 115 | After writing to disk, mode will be changed to FILEONDISK. 116 | If mode is already FILEONDISK, file will be moved to new location. 117 | 118 | ''' 119 | olddiskpath = self.diskpath 120 | if name == None: 121 | name = self.name 122 | self.diskpath = self.__localfilename(name, path, clobber) 123 | if self.mode == FILEINMEMORY: 124 | fh = open(self.diskpath, 'w') 125 | fh.write(self.data()) 126 | fh.close() 127 | self.segments = {} 128 | self.startoffset = 0 129 | self.endoffset = 0 130 | return self.diskpath 131 | elif self.mode == FILEONDISK: 132 | move(olddiskpath, self.diskpath) 133 | return self.diskpath 134 | 135 | def update(self, data, offset=None): 136 | if self.mode != FILEINMEMORY: 137 | return 138 | # if offsets are not being provided, just keep packets in wire order 139 | if offset == None: 140 | offset = self.endoffset 141 | # don't buffer duplicate packets 142 | if offset not in self.segments: 143 | self.segments[offset] = data 144 | # update the end offset if this packet goes at the end 145 | if offset >= self.endoffset: 146 | self.endoffset = offset + len(data) 147 | 148 | # 149 | # Generate a local (extracted) filename based on the original 150 | # 151 | def __localfilename(self, origname, path='.', clobber=False): 152 | tmp = origname.replace("\\", "_") 153 | tmp = tmp.replace("/", "_") 154 | tmp = tmp.replace(":", "_") 155 | tmp = tmp.replace("?", "_") 156 | tmp = tmp.lstrip('_') 157 | localname = '' 158 | for c in tmp: 159 | if ord(c) > 32 and ord(c) < 127: 160 | localname += c 161 | else: 162 | localname += "%%%02X" % ord(c) 163 | # Truncate (from left) to max filename length on filesystem (-3 in case 164 | # we need to add a suffix) 165 | localname = localname[os.statvfs(path).f_namemax * -1:] 166 | # Empty filename not allowed 167 | if localname == '': 168 | localname = 'blank' 169 | localname = os.path.realpath(os.path.join(path, localname)) 170 | if clobber: 171 | return localname 172 | # No Clobber mode, check to see if file exists 173 | suffix = '' 174 | i = 0 175 | while os.path.exists(localname + suffix): 176 | i += 1 177 | suffix = "_%02d" % i 178 | return localname + suffix 179 | -------------------------------------------------------------------------------- /dshell-defcon/lib/dnsdecoder.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import dshell 4 | import util 5 | import dpkt 6 | 7 | 8 | class DNSDecoder(dshell.TCPDecoder): 9 | 10 | '''extend DNSDecoder to handle DNS request/responses 11 | pairs request and response(s) by connection and query ID 12 | to allow for detection of DNS spoofing, etc.. (multiple responses to request with same ID) 13 | will call DNSHandler( 14 | conn=Connection(), 15 | request=dpkt.dns.DNS, 16 | response=dpkt.dns.DNS, 17 | requesttime=timestamp, responsetime=timestamp, 18 | responsecount=responsecount 19 | ) 20 | after each response. 21 | 22 | config: noanswer: if True and discarding w/o response, will call with response,responsetime=None,None (True) 23 | 24 | ''' 25 | 26 | def __init__(self, **kwargs): 27 | self.noanswer = True 28 | dshell.TCPDecoder.__init__(self, **kwargs) # DNS is over UDP and TCP! 29 | self.requests = {} 30 | self.maxblobs = None 31 | 32 | def packetHandler(self,udp,data): 33 | '''for each UDP packet , examine each segment (UDP packet) seperately as each will be a DNS Q/A 34 | pair Q/A by ID and return as pairs''' 35 | addr=udp.addr 36 | if addr[0][1] < addr[1][1]: addr=addr[1],addr[0] #swap ports if source port is lower, to keep tuple (client,server) 37 | connrqs = self.requests.setdefault(addr, {}) 38 | try: 39 | dns = dpkt.dns.DNS(data) 40 | except Exception, e: 41 | self._exc(e) 42 | if dns.qr == dpkt.dns.DNS_Q: 43 | connrqs[dns.id] = [udp.ts, dns, 0] 44 | elif dns.qr == dpkt.dns.DNS_A: 45 | rq = connrqs.get(dns.id, [None, None, 0]) 46 | rq[2] += 1 47 | if "DNSHandler" in dir(self): 48 | self.DNSHandler(conn=udp, request=rq[1], response=dns, requesttime=rq[0], 49 | responsetime=udp.ts, responsecount=rq[2]) 50 | 51 | def blobHandler(self, conn, blob): 52 | '''for each blob, examine each segment (UDP packet) seperately as each will be a DNS Q/A 53 | pair Q/A by ID and return as pairs''' 54 | connrqs = self.requests.setdefault(conn, {}) 55 | # iterate blob as each packet will be a seperate request (catches spoofing) 56 | for data in blob: 57 | try: 58 | dns = dpkt.dns.DNS(data) 59 | except Exception, e: 60 | self._exc(e) 61 | continue 62 | if dns.qr == dpkt.dns.DNS_Q: 63 | connrqs[dns.id] = [blob.starttime, dns, 0] 64 | elif dns.qr == dpkt.dns.DNS_A: 65 | rq = connrqs.get(dns.id, [None, None, 0]) 66 | rq[2] += 1 67 | if "DNSHandler" in dir(self): 68 | self.DNSHandler(conn=conn, request=rq[1], response=dns, requesttime=rq[0], 69 | responsetime=blob.starttime, responsecount=rq[2]) 70 | 71 | def connectionHandler(self,conn): 72 | '''clean up unanswered requests when we discard the connection''' 73 | if self.noanswer and "DNSHandler" in dir(self) and self.requests.get(conn): 74 | for requesttime, request, responsecount in self.requests[conn].values(): 75 | if not responsecount: 76 | if type(conn) is tuple: conn=dshell.Packet(self,conn) #wrap UDP addresses 77 | self.DNSHandler(conn=conn, request=request, response=None, 78 | requesttime=requesttime, responsetime=None, responsecount=responsecount) 79 | if conn in self.requests: 80 | del self.requests[conn] 81 | 82 | def postModule(self): 83 | '''flush out all remaining request state when module exits''' 84 | for conn in self.requests.keys(): 85 | self.connectionHandler(conn) 86 | 87 | 88 | class displaystub(dshell.Decoder): 89 | 90 | def __init__(self): 91 | dshell.Decoder.__init__(self, 92 | name='dnsdecoder', 93 | description='Intermediate class to support DNS based decoders.', 94 | longdescription="See source code or pydoc for details on use." 95 | ) 96 | 97 | if __name__ == '__main__': 98 | dObj = displaystub() 99 | print dObj 100 | else: # do we always want to print something here? Maybe only in debug mode?: 101 | dObj = displaystub() 102 | -------------------------------------------------------------------------------- /dshell-defcon/lib/httpdecoder.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import dshell 3 | import util 4 | import dpkt 5 | 6 | # for HTTPDecoder gzip decompression 7 | import gzip 8 | import cStringIO 9 | 10 | 11 | class HTTPDecoder(dshell.TCPDecoder): 12 | 13 | '''extend HTTPDecoder to handle HTTP request/responses 14 | will call HTTPHandler( 15 | conn=Connection(), 16 | request=dpkt.http.Request, 17 | response=dpkt.http.Response, 18 | requesttime=timestamp, responsetime=timestamp 19 | ) 20 | after each response. 21 | 22 | config: noresponse: if True and connection closes w/o response, will call with response,responsetime=None,None (True) 23 | gunzip: if True will decompress gzip encoded response bodies (default True) 24 | 25 | ''' 26 | 27 | def __init__(self, **kwargs): 28 | self.noresponse = True 29 | self.gunzip = True 30 | dshell.TCPDecoder.__init__(self, **kwargs) 31 | self.requests = {} 32 | 33 | # Custom error handler for data reassembly --- ignores errors, keep data 34 | def errorH(self, **x): 35 | return True 36 | 37 | def blobHandler(self, conn, blob): 38 | '''buffer the request blob and call the handler once we have the response blob''' 39 | if blob.direction == 'cs': 40 | try: 41 | self.requests[conn] = ( 42 | blob.starttime, dpkt.http.Request(blob.data(self.errorH))) 43 | except Exception, e: 44 | self.UnpackError(e) 45 | elif blob.direction == 'sc' and conn in self.requests: 46 | try: 47 | if 'HTTPHandler' in dir(self): 48 | response = dpkt.http.Response(blob.data(self.errorH)) 49 | if self.gunzip and 'gzip' in util.getHeader(response, 'content-encoding'): 50 | bodyUnzip = self.decompressGzipContent(response.body) 51 | if bodyUnzip != None: 52 | response.body = bodyUnzip 53 | self.HTTPHandler(conn=conn, 54 | request=self.requests[conn][1], 55 | response=response, 56 | requesttime=self.requests[conn][0], 57 | responsetime=blob.starttime) 58 | del self.requests[conn] 59 | except Exception, e: 60 | self.UnpackError(e) 61 | self.HTTPHandler(conn=conn, request=self.requests[conn][ 62 | 1], response=None, requesttime=self.requests[conn][0], responsetime=blob.starttime) 63 | del self.requests[conn] 64 | 65 | def connectionHandler(self, conn): 66 | '''when the connection closes, flush out any request blobs that did not have a response''' 67 | if conn in self.requests: 68 | if self.noresponse and 'HTTPHandler' in dir(self): 69 | self.HTTPHandler(conn=conn, 70 | request=self.requests[conn][1], 71 | response=None, 72 | requesttime=self.requests[conn][0], 73 | responsetime=self.requests[conn][0]) 74 | del self.requests[conn] 75 | 76 | def decompressGzipContent(self, httpcontent): 77 | '''utility function to decompress gzip compressed content''' 78 | cstr = cStringIO.StringIO(httpcontent) 79 | try: 80 | return gzip.GzipFile(fileobj=cstr).read() 81 | except: 82 | return None 83 | 84 | def UnpackError(self, error): 85 | self._exc(error) 86 | 87 | 88 | class displaystub(dshell.Decoder): 89 | 90 | def __init__(self): 91 | dshell.Decoder.__init__(self, 92 | name='httpdecoder', 93 | description='Intermediate class to support HTTP based decoders.', 94 | longdescription="See source code or pydoc for details on use." 95 | ) 96 | 97 | if __name__ == '__main__': 98 | dObj = displaystub() 99 | print dObj 100 | else: 101 | dObj = displaystub() 102 | -------------------------------------------------------------------------------- /dshell-defcon/lib/output/csvout.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: tparker 3 | ''' 4 | 5 | import output 6 | import util 7 | 8 | 9 | class CSVOutput(output.TextOutput): 10 | 11 | ''' 12 | CSV Output module 13 | use with --output=csvout,[,data,customfield[:type],...] (a list of field:types to append to end of default format) 14 | add [,file=...[,mode=...]] to write to outfile (or use -w arg on cmdline) 15 | add format=... to replace the default fields or use a format string 16 | add delim= to change delimeter from comma 17 | ''' 18 | 19 | _NULL = '' 20 | 21 | _DEFAULT_DELIM = ',' 22 | 23 | _DEFAULT_FIELDS = [('decoder', 's'), ('datetime', 's'), 24 | ('sip', 's'), ('sport', 's'), ('dip', 's'), ('dport', 's')] 25 | 26 | def __init__(self, *args, **kwargs): 27 | ''' 28 | sets up an output module, be sure to call Output.__init__ first or last 29 | args will have the name of the module as args[0], anything else after 30 | ''' 31 | # start with a set of default fields 32 | fields = self._DEFAULT_FIELDS 33 | 34 | if 'format' in kwargs: 35 | fields = [] 36 | fmtstr = kwargs['format'] 37 | del kwargs['format'] # don't let base class process this 38 | else: 39 | fmtstr = '' 40 | 41 | # set delimiter 42 | if 'delim' in kwargs: 43 | delim = kwargs['delim'] 44 | if delim.lower() == 'tab': 45 | delim = "\t" 46 | else: 47 | delim = self._DEFAULT_DELIM 48 | 49 | # parse args as fields 50 | if len(args): 51 | for a in args: 52 | try: 53 | f, t = a.split(':') # split on field:type 54 | except: 55 | f, t = a, 's' # default to string type 56 | fields.append((f, t)) 57 | 58 | # build format string to pass to textoutput 59 | if fmtstr: 60 | fmtstr += delim 61 | fmtstr += delim.join(['%%(%s)%s' % (f, t) for f, t in fields]) 62 | 63 | # everything else is exactly like the text output module 64 | output.TextOutput.__init__(self, format=fmtstr, **kwargs) 65 | 66 | # print header if not suppressed 67 | if self.fh and 'noheader' not in kwargs: 68 | self.fh.write('#' + delim.join([f[0] for f in fields]) + "\n") 69 | 70 | '''NOTE: output modules return obj=reference to the CLASS 71 | instead of a dObj=instance so we can init with args''' 72 | obj = CSVOutput 73 | -------------------------------------------------------------------------------- /dshell-defcon/lib/output/jsonout.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: amm 3 | ''' 4 | 5 | import output 6 | import datetime 7 | import json 8 | 9 | 10 | class JSONOutput(output.TextOutput): 11 | 12 | ''' 13 | JSON Output module 14 | use with --output=jsonout 15 | 16 | usage: as with csvout, you can pass a list of field names that will be included in the JSON output 17 | 18 | options 19 | ------- 20 | geoip: If set to Y, output module won't discard geoip tags 21 | notrim: If set to Y, do not trim any fields from the output 22 | ensure_ascii: Enable this option in json library 23 | 24 | ''' 25 | 26 | _TIMESTAMP_FIELDS = ( 27 | 'ts', 'starttime', 'endtime', 'request_time', 'response_time') 28 | 29 | def __init__(self, *args, **kwargs): 30 | 31 | # Options 32 | self.options = {} 33 | for o in ('geoip', 'notrim', 'ensure_ascii'): 34 | self.options[o] = False 35 | if o in kwargs: 36 | if kwargs[o] == True or kwargs[o].upper() in ('Y', 'T', '1', 'YES', 'ON', 'TRUE'): 37 | self.options[o] = True 38 | del kwargs[o] 39 | 40 | # Args as fields 41 | self.jsonfields = None 42 | if len(args): 43 | self.jsonfields = [] 44 | for a in args: 45 | self.jsonfields.append(a) 46 | 47 | # Call parent init 48 | output.TextOutput.__init__(self, **kwargs) 49 | 50 | def alert(self, *args, **kw): 51 | 52 | # User specified field list?? 53 | if self.jsonfields != None: 54 | for f in kw.keys(): 55 | if f not in self.jsonfields: 56 | del kw[f] 57 | elif not self.options['notrim']: 58 | # Remove Common Redundant Fields 59 | for name in ('addr', 'direction', 'clientport', 'serverport', 'clientip', 'serverip', 'sipint', 'dipint'): 60 | if name in kw: 61 | del kw[name] 62 | # Time Fields 63 | # Rename 'ts' to 'starttime' if 'starttime' not present 64 | if 'ts' in kw: 65 | if 'starttime' not in kw: 66 | kw['starttime'] = kw['ts'] 67 | del kw['ts'] 68 | # Convert known timestamp fields to string format 69 | for name in self._TIMESTAMP_FIELDS: 70 | try: 71 | kw[name] = datetime.datetime.fromtimestamp( 72 | float(kw[name])).strftime(self.timeformat) 73 | except: 74 | pass 75 | # Remove GEOIP Fields 76 | if not self.options['geoip']: 77 | for name in ('servercountrycode', 'clientcountrycode', 'sipcc', 'dipcc', 'clientasn', 'serverasn', 'dipasn', 'sipasn'): 78 | if name in kw: 79 | del kw[name] 80 | self.fh.write( 81 | json.dumps(kw, ensure_ascii=self.options['ensure_ascii']) + "\n") 82 | if self.nobuffer: 83 | self.fh.flush() 84 | 85 | obj = JSONOutput 86 | -------------------------------------------------------------------------------- /dshell-defcon/lib/output/netflowout.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: amm 3 | ''' 4 | 5 | import output 6 | import util 7 | import sys 8 | import datetime 9 | 10 | 11 | class NetflowOutput(output.TextOutput): 12 | 13 | ''' 14 | Netflow Output module 15 | use with --output=netflowoutput 16 | ` use group=clientip,serverip for grouping by clientip,serverip 17 | ''' 18 | #_DEFAULT_FIELDS=[('decoder','s'),('datetime','s'),('sip','s'),('sport','s'),('dip','s'),('dport','s')] 19 | #_DEFAULT_FORMAT="%(starttime)s %(sip)16s:%(sport)-5s -> %(dip)16s:%(dport)-5s" 20 | 21 | def __init__(self, *args, **kwargs): 22 | self.group = kwargs.get('group') 23 | self.groups = {} 24 | if self.group: 25 | self.group = self.group.split('/') 26 | # Call parent init 27 | output.TextOutput.__init__(self, **kwargs) 28 | 29 | def alert(self, *args, **kw): 30 | if self.group: 31 | k = tuple(kw[g] for g in self.group) # group by selected fields 32 | if k not in self.groups: 33 | r = k[::-1] 34 | if r in self.groups: 35 | k = r # is other dir in groups 36 | else: 37 | self.groups[k] = [] 38 | self.groups[k].append(kw) 39 | else: 40 | self.__alert(**kw) # not grouping, just print it 41 | 42 | def close(self): 43 | # dump groups if we are closing output 44 | if self.group: 45 | for k in sorted(self.groups.iterkeys()): 46 | # write header 47 | self.fh.write(' '.join( 48 | '%s=%s' % (self.group[i], k[i]) for i in xrange(len(self.group))) + '\n') 49 | for kw in self.groups[k]: 50 | self.fh.write('\t') 51 | self.__alert(self, **kw) 52 | self.fh.write('\n') 53 | output.TextOutput.close(self) 54 | 55 | def __alert(self, *args, **kw): 56 | self.fh.write('%s %16s -> %16s (%s -> %s) %4s %6s %6s %5d %5d %7d %7d %-.4fs\n' % (datetime.datetime.utcfromtimestamp(kw['starttime']), 57 | kw[ 58 | 'clientip'], 59 | kw[ 60 | 'serverip'], 61 | kw[ 62 | 'clientcountrycode'], 63 | kw[ 64 | 'servercountrycode'], 65 | kw[ 66 | 'proto'], 67 | kw[ 68 | 'clientport'], 69 | kw[ 70 | 'serverport'], 71 | kw[ 72 | 'clientpackets'], 73 | kw[ 74 | 'serverpackets'], 75 | kw[ 76 | 'clientbytes'], 77 | kw[ 78 | 'serverbytes'], 79 | ( 80 | kw['endtime'] - kw['starttime']) 81 | ) 82 | ) 83 | if self.nobuffer: 84 | self.fh.flush() 85 | 86 | obj = NetflowOutput 87 | -------------------------------------------------------------------------------- /dshell-defcon/lib/output/xmlout.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: tparker 3 | ''' 4 | 5 | import output 6 | import util 7 | import dshell 8 | from xml.etree import ElementTree as ET 9 | 10 | 11 | class XMLOutput(output.FileOutput): 12 | 13 | '''XMLOutput Module''' 14 | 15 | def __init__(self, *args, **kwargs): 16 | '''init the underlying file output to get the file handle''' 17 | output.FileOutput.__init__( 18 | self, *args, **kwargs) # pass all to fileoutput 19 | self.root = ET.Element('dshell') 20 | self.element = self.root 21 | 22 | def alert(self, *args, **kwargs): 23 | '''we will assume we get alerts before we get the matching session data''' 24 | self.element = ET.SubElement( 25 | self.root, 'alert', self._filter_attr(kwargs)) 26 | self.element.text = self._filter_text(' '.join(args)) 27 | 28 | def write(self, obj, parent=None, **kwargs): 29 | '''write the object data under the last alert element (or the root if no alert) 30 | if a conn object recurse in by iterating 31 | else write the string output of the object''' 32 | if not parent: 33 | parent = self.element 34 | kw = dict(**kwargs) 35 | # turns "" into "yyyy" 36 | tag = str(type(obj)).split("'", 2)[1] 37 | if tag.startswith('dshell.'): # is a dshell object 38 | kw.update(**obj.info()) # get attribs 39 | # turns "dshell.Connection" into "Connection" 40 | tag = tag.split('dshell.')[1] 41 | e = ET.SubElement(parent, tag, self._filter_attr(kw)) 42 | if tag == 'Connection': # recurse on blobs in conn 43 | for blob in obj: 44 | self.write(blob, parent=e) 45 | return # subobjects will have the data 46 | # leave this up to the object to handle 47 | e.text = self._filter_text(str(obj)) 48 | 49 | def _filter_attr(self, d): return dict((k, str(v)) 50 | for (k, v) in d.iteritems()) 51 | 52 | def _filter_text(self, t): return ''.join(c for c in t if ord(c) < 128) 53 | 54 | def close(self): 55 | '''write the ElementTree to the file''' 56 | ET.ElementTree(self.root).write(self.fh) 57 | 58 | '''NOTE: output modules return obj=reference to the CLASS 59 | instead of a dObj=instance so we can init with args''' 60 | obj = XMLOutput 61 | -------------------------------------------------------------------------------- /dshell-defcon/share/GeoIP/readme.txt: -------------------------------------------------------------------------------- 1 | GeoIP Legacy data sets go here. 2 | -------------------------------------------------------------------------------- /dshell-defcon/tester.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | import os 3 | import tempfile 4 | import sys 5 | import subprocess 6 | import multiprocessing 7 | import struct 8 | import psutil 9 | import hashlib 10 | 11 | APFILE=sys.argv[1] 12 | SERVICE=sys.argv[2] 13 | PORT=int(sys.argv[3]) 14 | 15 | def loconn(loc): 16 | ret = os.popen("./offset2stream.py %s %d locconn /dev/null /dev/stdout" % (APFILE, loc)).read().strip() 17 | if not ret: 18 | return None 19 | else: 20 | return int(ret.split()[1]) 21 | 22 | def gen_replay(loc): 23 | fd, fname = tempfile.mkstemp(".py") 24 | os.close(fd) 25 | os.system("./offset2stream.py %s %d pythondiff /dev/null %s" % (APFILE, loc, fname)) 26 | return fname 27 | 28 | _fd, TMPSCRIPTNAME = tempfile.mkstemp(".sh") 29 | os.close(_fd) 30 | os.chmod(TMPSCRIPTNAME, 0755) 31 | fff = open(TMPSCRIPTNAME, "w") 32 | if os.getenv("ARCH") == "mips": 33 | fff.write("#!/bin/bash\ncd %s;LD_PRELOAD=/tmp/qemu.so qemu-mipsel -U LD_PRELOAD -L /mnt/rootfs-mips -strace %s 2>&1" % (os.path.dirname(SERVICE), SERVICE)) 34 | else: 35 | fff.write("#!/bin/bash\ncd %s;LD_PRELOAD=/tmp/qemu.so qemu-x86_64 -U LD_PRELOAD -strace %s 2>&1" % (os.path.dirname(SERVICE), SERVICE)) 36 | fff.close() 37 | 38 | 39 | existed = set() 40 | 41 | mng = multiprocessing.Manager() 42 | existed = mng.dict() 43 | lock = mng.Lock() 44 | 45 | def process(loc): 46 | pocfname = gen_replay(loc) 47 | fffpoc = open(pocfname, "rb") 48 | poccode = fffpoc.read() 49 | fffpoc.close() 50 | codehash = hashlib.sha1(poccode).hexdigest() 51 | lock.acquire() 52 | if codehash in existed: 53 | lock.release() 54 | os.unlink(pocfname) 55 | return loc, -2 56 | existed[codehash] = 1 57 | lock.release() 58 | checker_popen = os.popen("python2 %s 127.0.0.1 %d 2>&1 r" % (pocfname, PORT)) 59 | checker_data = checker_popen.read() 60 | checker_popen.close() 61 | os.unlink(pocfname) 62 | if "FARKFARKFARK" in checker_data: 63 | return loc, loc 64 | else: 65 | return loc, -1 66 | 67 | socatproc = subprocess.Popen("socat tcp-listen:%d,fork,reuseaddr exec:%s,su=nobody >/dev/null 2>/dev/null" % (PORT, TMPSCRIPTNAME), shell=True) 68 | 69 | 70 | curloc = 0 71 | fsize = os.stat(APFILE).st_size 72 | fff = open(APFILE, "rb") 73 | fff.seek(-4, 2) 74 | ttt = struct.unpack('I', fff.read(4))[0] 75 | fff.close() 76 | fsize = fsize - (ttt + 1) * 4 77 | 78 | def tasks(): 79 | global curloc 80 | while curloc < fsize: 81 | nextloc = loconn(curloc) 82 | if nextloc != None: 83 | yield curloc 84 | curloc = nextloc 85 | else: 86 | curloc = curloc + 1 87 | 88 | pool = multiprocessing.Pool(100) 89 | 90 | ret = pool.imap(process, tasks()) 91 | 92 | for i in ret: 93 | if i[1] > 0: 94 | print i[1] 95 | 96 | #socatproc.kill() 97 | #socatproc.send_signal(9) 98 | def kill(proc_pid): 99 | process = psutil.Process(proc_pid) 100 | for proc in process.children(recursive=True): 101 | proc.kill() 102 | process.kill() 103 | 104 | kill(socatproc.pid) 105 | os.unlink(TMPSCRIPTNAME) 106 | -------------------------------------------------------------------------------- /pcap2ap: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | set -e -u 3 | setopt nullglob 4 | 5 | program=$0 6 | dshell_defcon=${0:a:h}/dshell-defcon 7 | pcap_suffix=.cap 8 | ap_suffix=.ap 9 | opt_recursive= 10 | 11 | usage() { 12 | cat </dev/null 54 | stop=$(date +%s.%N) 55 | log_action created $ap_suffix for $filepath, size: $(stat -c %s $filepath), used $(bc -l<<<"scale=3;($stop-$start)/1") s 56 | print -p 57 | ) & 58 | } 59 | 60 | rm_data() { 61 | rm -fv $filepath$ap_suffix 62 | } 63 | 64 | parallel=$(nproc) 65 | while getopts hp:r opt; do 66 | case $opt; in 67 | h) usage 0;; 68 | p) parallel=$OPTARG;; 69 | r) opt_recursive=1;; 70 | \?) exit;; 71 | esac 72 | done 73 | shift $[OPTIND-1] 74 | 75 | echo +$@ 76 | 77 | if [[ -z ${1:-} ]]; then 78 | usage 1 79 | fi 80 | for i in $@; do 81 | [[ -d $i ]] || fatal 1 is not a directory 82 | done 83 | 84 | coproc semaphore 85 | 86 | main() { 87 | log_status processing $@ 88 | 89 | if [[ -n $opt_recursive ]]; then 90 | for i in $@/**/*$pcap_suffix; do 91 | [[ -e $i$ap_suffix && ! -z $i$ap_suffix ]] || add_data $i 92 | done 93 | else 94 | for i in $@/*$pcap_suffix; do 95 | [[ -e $i$ap_suffix && ! -z $i$ap_suffix ]] || add_data $i 96 | done 97 | fi 98 | 99 | log_status start inotify 100 | 101 | inotifywait ${opt_recursive:+-r} -mqe CREATE,CLOSE_WRITE,DELETE,MODIFY,MOVE --format $'%e\t%w\t%f' $@ | while IFS=$'\t' read -r event dir filename; do 102 | local filepath=$dir/$filename 103 | if [[ $event =~ 'CREATE|MOVED_TO' ]]; then 104 | if [[ $event =~ CREATE ]]; then 105 | log_event CREATE $filepath 106 | else 107 | log_event MOVED_TO $filepath 108 | fi 109 | if [[ ! $event =~ ISDIR && $filename =~ "\\$pcap_suffix\$" ]]; then 110 | if filetype=$(stat -c %F $filepath); then 111 | if [[ $filetype =~ symbolic ]]; then 112 | add_data $filepath 113 | elif [[ $filetype =~ regular ]]; then 114 | add $filepath 115 | fi 116 | fi 117 | fi 118 | elif [[ $event =~ 'DELETE|MOVED_FROM' ]]; then 119 | if [[ $event =~ DELETE ]]; then 120 | log_event DELETE $filepath 121 | else 122 | log_event MOVED_FROM $filepath 123 | fi 124 | if [[ ! $event =~ ISDIR && $filename =~ "\\$pcap_suffix\$" ]]; then 125 | del $filepath 126 | rm_data $filepath 127 | fi 128 | elif [[ $event =~ MODIFY ]]; then 129 | #log_event MODIFY $filepath 130 | if [[ $filename =~ "\\$pcap_suffix\$" ]]; then 131 | add $filepath 132 | fi 133 | elif [[ $event =~ CLOSE_WRITE ]]; then 134 | if [[ -n ${modified[$filepath]:+1} ]]; then 135 | log_event CLOSE_WRITE after MODIFY $filepath 136 | del $filepath 137 | add_data $filepath 138 | else 139 | log_event CLOSE_WRITE $filepath 140 | fi 141 | fi 142 | done 143 | } 144 | 145 | main $@ 146 | -------------------------------------------------------------------------------- /web/Makefile: -------------------------------------------------------------------------------- 1 | install-bower: 2 | npm i -g bower 3 | bower i 4 | 5 | install: install-bower 6 | 7 | .PHONY: install install-bower 8 | -------------------------------------------------------------------------------- /web/bower.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "pcap-search", 3 | "version": "0.0.1", 4 | "private": "true", 5 | "dependencies": { 6 | "jquery": "~2.1.4", 7 | "devbridge-autocomplete": "~1.2.21", 8 | "semantic-ui": "~2.0.7" 9 | }, 10 | "exportsOverride": { 11 | "jquery": { 12 | "js": "jquery.min.js" 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /web/css/style.sass: -------------------------------------------------------------------------------- 1 | =experimental($prop, $val) 2 | -webkit-#{$prop}: $val 3 | -moz-#{$prop}: $val 4 | -o-#{$prop}: $val 5 | -ms-#{$prop}: $val 6 | #{$prop}: $val 7 | 8 | =box-shadow($v) 9 | +experimental(box-shadow, $v) 10 | 11 | =border-radius($v) 12 | +experimental(border-radius, $v) 13 | 14 | =box-sizing($v) 15 | +experimental(box-sizing, $v) 16 | 17 | html, body 18 | height: 100% 19 | margin: 0px 20 | padding: 0px 21 | 22 | $footer-h: 171px 23 | 24 | .wrap 25 | min-height: 100% 26 | margin-bottom: (-$footer-h) 27 | 28 | .footer, .footer-push 29 | height: $footer-h 30 | 31 | * 32 | box-sizing: border-box 33 | 34 | #map 35 | position: relative 36 | width: 800px 37 | height: 600px 38 | margin: 0 auto 39 | 40 | body 41 | font-size: 16px 42 | font-family: "Open Sans", "Helvetica Neue", "Helvetica", "Arial", sans-serif 43 | line-height: 1.3 44 | background: #FFFFFF 45 | color: #444 46 | 47 | .ui.header 48 | font-family: 'Source Sans Pro', "Helvetica Neue", "Helvetica", "Arial", sans-serif 49 | 50 | a 51 | color: #009FDA 52 | text-decoration: none 53 | transition: color 0.3s ease 54 | 55 | p a 56 | font-weight: bold 57 | 58 | .ui.page.grid.segment 59 | margin-top: 0 60 | padding-bottom: 2.5rem 61 | 62 | .masthead.ui.inverted.segment 63 | background-image: url(/img/bg.jpg) 64 | background-size: cover 65 | padding-top: 0 66 | padding-bottom: 0 67 | overflow: hidden 68 | 69 | .column 70 | position: relative 71 | 72 | .image 73 | position: absolute 74 | left: 0 75 | 76 | .information 77 | margin: 6em 1em 8em 380px 78 | 79 | footer 80 | text-align: center 81 | 82 | .autocomplete-suggestions 83 | border: 1px solid #999 84 | background: #FFF 85 | overflow: auto 86 | 87 | .autocomplete-suggestion 88 | padding: 2px 5px 89 | white-space: nowrap 90 | overflow: hidden 91 | 92 | .autocomplete-selected 93 | background: #F0F0F0 94 | 95 | .autocomplete-suggestions strong 96 | font-weight: normal 97 | color: #3399FF 98 | 99 | .main-form 100 | margin: 0 auto 101 | text-align: center 102 | 103 | .ui.action 104 | margin: 0 auto 105 | 106 | .ui.action.input 107 | display: inline-table 108 | vertical-align: middle 109 | 110 | input 111 | width: 32em 112 | 113 | footer 114 | line-height: 1.5 115 | 116 | // inline form hack 117 | 118 | .ui.input input 119 | padding: .6em 1em 120 | 121 | .ui.button 122 | padding: .75em 1.5em 123 | 124 | .hidden 125 | visibility: hidden 126 | 127 | 128 | // typography 129 | 130 | .ui.checkbox 131 | label 132 | color: #fff 133 | 134 | pre 135 | padding: .5em 136 | border-radius: .4em 137 | background: #1d1f21 138 | color: #c5c8c6 139 | .kw 140 | color: #b294bb 141 | .dv, .bn, .fl 142 | color: #de935f 143 | 144 | h1, h2, h3, h4 145 | font-family: "Source Sans Pro", "Helvetica Neue", "Helvetica", "Arial", sans-serif 146 | border-bottom: 1px solid rgba(0,0,0,0.1) 147 | margin: 1em 0 1em 148 | 149 | h1::selection, h2::selection, h3::selection, h4::selection, h5::selection 150 | color: #111 151 | background: #F1C1C2 152 | 153 | h1 154 | font-size: 2rem 155 | line-height: 1.33 156 | 157 | h2 158 | font-size: 1.75rem 159 | line-height: 1.33 160 | 161 | h3 162 | font-size: 1.33rem 163 | line-height: 1.33 164 | 165 | h4 166 | font-size: 1.2rem 167 | line-height: 1.33 168 | color: #444 169 | 170 | h5 171 | font-size: 1rem 172 | line-height: 1.33 173 | color: #444 174 | 175 | .math, code 176 | display: inline-block 177 | background-color: rgba(0, 0, 0, 0.02) 178 | margin: .25em 179 | padding: .125em .5em 180 | border-radius: 3px 181 | 182 | // widgets 183 | 184 | #peek 185 | position: fixed 186 | left: 100px 187 | top: 100px 188 | 189 | .menu 190 | .active:nth-child(3n+1) 191 | border-right-color: #56bb73 192 | color: #56bb73 193 | .active:nth-child(3n+2) 194 | border-right-color: #ef3f49 195 | color: #ef3f49 196 | .active:nth-child(3n+3) 197 | border-right-color: #a24096 198 | color: #a24096 199 | 200 | code 201 | background-color: transparent 202 | 203 | .hex 204 | background-color: rgba(0, 0, 0, 0.05) 205 | color: #666666 206 | padding-left: 2px 207 | padding-right: 2px 208 | 209 | .red 210 | background: #f0f000 211 | 212 | // layout 213 | 214 | main 215 | margin-left: 230px 216 | 217 | p > img 218 | display: block 219 | 220 | img 221 | max-width: 80% 222 | 223 | .pagination-wrap 224 | text-align: center !important 225 | 226 | footer.page.grid 227 | text-align: center !important 228 | -------------------------------------------------------------------------------- /web/html/search.slim: -------------------------------------------------------------------------------- 1 | doctype 5 2 | html 3 | head 4 | meta lang='utf-8' 5 | link rel='stylesheet' href='/bower_components/semantic-ui/dist/semantic.min.css' 6 | link rel='stylesheet' href='/css/style.css' 7 | body 8 | .ui.inverted.page.grid.masthead.segment 9 | .column 10 | form.form.segment.main-form 11 | .inline.field.ui.left.icon.action.input style='margin-left:1em;' 12 | i.icon.search 13 | input#q type='text' name='q' value=q 14 | button.ui.blue.submit.button Go 15 | 16 | .ui.page.grid 17 | .column.pagination-wrap 18 | - if pages > 1 19 | - l = [page-15, 0].max 20 | - h = [page+15, pages].min 21 | .ui.pagination.menu 22 | - if l > 0 23 | a.icon.item href="/search?q=#{CGI.escape q}&page=#{0}" 24 | i.left.icon.arrow 25 | - (l...h).each do |p| 26 | a.item class=(p == page ? 'active' : '') href="/search?q=#{CGI.escape q}&page=#{p}" = p 27 | - if h < pages 28 | a.icon.item href="/search?q=#{CGI.escape q}&page=#{h-1}" 29 | i.right.icon.arrow 30 | .ui.page.grid 31 | .sixteen.wide.column 32 | .ui.feed 33 | - result.each do |r| 34 | .event 35 | .content 36 | .summary 37 | a.title href="http://#{r[:uri]}" target='_blank' 38 | = r[:filename] 39 | ' 40 | = r[:offset] 41 | code== r[:context] 42 | 43 | script src='/bower_components/jquery/jquery.min.js' 44 | script src='/bower_components/devbridge-autocomplete/dist/jquery.autocomplete.min.js' 45 | script src='/bower_components/semantic-ui/dist/semantic.min.js' 46 | script src='/js/search.js' 47 | -------------------------------------------------------------------------------- /web/js/search.coffee: -------------------------------------------------------------------------------- 1 | console.log 'meow' 2 | 3 | $('#q').autocomplete 4 | serviceUrl: '/api/autocomplete' 5 | -------------------------------------------------------------------------------- /web/static/themes/default/assets/fonts/icons.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaskRay/pcap-search/6985a3b18d9505229328f63ddb3256ff61c4876c/web/static/themes/default/assets/fonts/icons.woff2 -------------------------------------------------------------------------------- /web/web.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'json' 4 | require 'socket' 5 | require 'tempfile' 6 | require 'timeout' 7 | begin 8 | require 'tilt' 9 | require 'sass' 10 | require 'slim' 11 | require 'coffee-script' 12 | require 'sinatra' 13 | require 'sinatra/reloader' 14 | rescue LoadError => e 15 | STDERR.puts e.message 16 | STDERR.puts 'gem install sinatra sinatra-contrib tilt sass slim coffee-script' 17 | exit 1 18 | end 19 | 20 | SEARCH_SOCK = '/tmp/search.sock' 21 | SEARCH_TIMEOUT = 30 22 | MAX_PAGES = 30 23 | PER_PAGE = 20 24 | DSHELL_DEFCON = File.join __dir__, '..', 'dshell-defcon' 25 | PCAP_DIR = File.expand_path '/home/ray/defcon24' 26 | 27 | # Main 28 | 29 | configure :development do 30 | register Sinatra::Reloader 31 | end 32 | 33 | set :static, true 34 | set :public_folder, File.join(__dir__, "static") 35 | set :views, __dir__ 36 | set :bind, '0' 37 | set :port, 4568 38 | 39 | set :views, sass: 'css', coffee: 'js', :default => 'html' 40 | 41 | def offset2stream filepath, offset, type, out, &block 42 | IO.popen([File.join(DSHELL_DEFCON, 'offset2stream.py'), "#{filepath}.ap", offset.to_s, type, filepath, out], &block) 43 | end 44 | 45 | helpers do 46 | def find_template(views, name, engine, &block) 47 | _, folder = views.detect { |k,v| engine == Tilt[k] } 48 | folder ||= views[:default] 49 | super(folder, name, engine, &block) 50 | end 51 | end 52 | 53 | before do 54 | response.headers['Access-Control-Allow-Origin'] = '*' 55 | end 56 | 57 | get '/' do 58 | send_file File.join(__dir__,'static','index.html') 59 | end 60 | 61 | get '/download' do 62 | query = Rack::Utils.parse_query request.query_string 63 | filename = query['filename'] 64 | offset = query['offset'] 65 | type = query['type'] 66 | service = query['service'] || 'all' 67 | unless filename && type 68 | return 412 69 | end 70 | case type 71 | when 'all' 72 | content_type 'application/vnd.tcpdump.pcap' 73 | attachment filename 74 | send_file File.join(PCAP_DIR, service, filename) 75 | when 'pcap', 'str', 'hex', 'repr', 'c', 'pythonsimple', 'pythondiff' 76 | return 412 unless offset 77 | if type == 'pcap' 78 | content_type 'application/vnd.tcpdump.pcap' 79 | attachment "#{filename.sub(/\.cap$/, '')}@#{offset}.cap" 80 | end 81 | temp_file = Tempfile.new filename 82 | offset2stream File.join(PCAP_DIR, service, filename), offset, type, temp_file.path do |h| 83 | h.read 84 | end 85 | Thread.new do 86 | sleep 1 87 | path = temp_file.path 88 | temp_file.close 89 | File.delete path 90 | end 91 | send_file temp_file 92 | else 93 | 412 94 | end 95 | end 96 | 97 | get '/api/list' do 98 | content_type :json 99 | Dir.entries(PCAP_DIR).select {|x| x !~ /^\./ && File.directory?(File.join PCAP_DIR, x) }.to_json 100 | end 101 | 102 | get '/api/autocomplete' do 103 | content_type :json 104 | query = Rack::Utils.parse_query request.query_string 105 | q = query['q'] || '' 106 | service = query['service'] || 'all' 107 | res = '' 108 | begin 109 | Timeout.timeout SEARCH_TIMEOUT do 110 | sock = Socket.new Socket::AF_UNIX, Socket::SOCK_STREAM, 0 111 | sock.connect Socket.pack_sockaddr_un(SEARCH_SOCK) 112 | sock.write "\0#{File.join PCAP_DIR, service, "\x01"}\0#{File.join PCAP_DIR, service, "\x7f"}\0#{q}" 113 | sock.close_write 114 | sug = [] 115 | sock.read.lines.each {|line| 116 | filepath, offset, context = line.chomp.split "\t" 117 | filepath = filepath.sub(/\.ap$/, '') 118 | offset = offset.to_i 119 | offset2stream filepath, offset, 'loc', '/dev/stdout' do |h| 120 | body = h.read 121 | if ! body.empty? 122 | _, y = body.split.map(&:to_i) 123 | sug << context.scan(/(?:\\x(?:..)|[^\\]){,#{[y-offset,context.size].min}}/)[0] if offset < y 124 | end 125 | end 126 | } 127 | res = {query: q, suggestions: sug.uniq }.to_json 128 | sock.close 129 | end 130 | rescue => e 131 | STDERR.puts e.message 132 | STDERR.puts e.backtrace 133 | end 134 | res 135 | end 136 | 137 | get '/api/search' do 138 | query = Rack::Utils.parse_query request.query_string 139 | q = query['q'] || '' 140 | service = query['service'] || 'all' 141 | page = (query['page'] || 0).to_i 142 | offset = page*PER_PAGE 143 | res = '' 144 | total = 0 145 | 146 | qq = q.gsub(/\\[0-7]{1,3}/) {|match| 147 | "\\x#{'%02x' % match[1..-1].to_i(8)}" 148 | } 149 | .gsub('\\\\', '\\x5c') 150 | .gsub('\\a', '\\x07') 151 | .gsub('\\b', '\\x08') 152 | .gsub('\\t', '\\x09') 153 | .gsub('\\n', '\\x0a') 154 | .gsub('\\v', '\\x0b') 155 | .gsub('\\f', '\\x0c') 156 | .gsub('\\r', '\\x0d') 157 | 158 | begin 159 | Timeout.timeout SEARCH_TIMEOUT do 160 | sock = Socket.new Socket::AF_UNIX, Socket::SOCK_STREAM, 0 161 | sock.connect Socket.pack_sockaddr_un(SEARCH_SOCK) 162 | sock.write "#{offset}\0#{File.join PCAP_DIR, service, "\x01"}\0#{File.join PCAP_DIR, service, "\x7f"}\0#{qq}" 163 | sock.close_write 164 | lines = sock.read.lines 165 | sock.close 166 | total = [lines[-1].to_i, PER_PAGE*MAX_PAGES].min 167 | 168 | res = [] 169 | IO.popen [File.join(DSHELL_DEFCON, 'context.py')], 'r+' do |h| 170 | lines[0...-1].each {|line| 171 | filepath, offset, len = line.chomp.split "\t" 172 | h.puts "#{filepath}\t#{offset}\t#{len}" 173 | h.flush 174 | line = h.readline 175 | _, offset, epoch, port0, port1, context = line.chomp.split "\t" 176 | epoch = epoch.to_i 177 | if epoch >= 0 && context && ! context.empty? 178 | res << {filename: filepath.sub(/.*\/(.*)\.ap$/, '\1'), offset: offset.to_i, epoch: epoch, port0: port0.to_i, port1: port1.to_i, context: context} 179 | end 180 | } 181 | end 182 | 183 | res_grouped = Hash.new {|h,k| h[k] = [] } 184 | res.each {|x| 185 | filename = x.delete :filename 186 | res_grouped[filename] << x 187 | } 188 | 189 | res = { 190 | query: qq, 191 | results: res_grouped 192 | }.to_json 193 | end 194 | rescue Timeout::Error => e 195 | STDERR.puts e.message 196 | rescue => e 197 | STDERR.puts e.message 198 | STDERR.puts e.backtrace 199 | else 200 | res 201 | end 202 | end 203 | --------------------------------------------------------------------------------