The response has been limited to 50k tokens of the smallest files in the repo. You can remove this limitation by removing the max tokens filter.
├── .gitignore
├── Dockerfile
├── Dshell-Training-Pack-0.1.tar.gz
├── Dshell_Developer_Guide.pdf
├── Dshell_User_Guide.pdf
├── LICENSE
├── README
├── README.md
├── dist
    └── Dshell-3.1.3.tar.gz
├── dshell
    ├── __init__.py
    ├── api.py
    ├── core.py
    ├── data
    │   ├── GeoIP
    │   │   └── readme.txt
    │   ├── dshellrc
    │   └── empty.pcap
    ├── decode.py
    ├── dshellargparse.py
    ├── dshellgeoip.py
    ├── dshelllist.py
    ├── output
    │   ├── __init__.py
    │   ├── alertout.py
    │   ├── colorout.py
    │   ├── csvout.py
    │   ├── elasticout.py
    │   ├── htmlout.py
    │   ├── jsonout.py
    │   ├── netflowout.py
    │   ├── output.py
    │   └── pcapout.py
    ├── plugins
    │   ├── __init__.py
    │   ├── dhcp
    │   │   ├── __init__.py
    │   │   └── dhcp.py
    │   ├── dns
    │   │   ├── __init__.py
    │   │   ├── dns.py
    │   │   ├── dnscc.py
    │   │   ├── innuendo-dns.py
    │   │   └── specialips.py
    │   ├── dnsplugin.py
    │   ├── filter
    │   │   ├── __init__.py
    │   │   ├── country.py
    │   │   └── track.py
    │   ├── flows
    │   │   ├── __init__.py
    │   │   ├── dataflows.py
    │   │   ├── largeflows.py
    │   │   ├── longflows.py
    │   │   ├── netflow.py
    │   │   ├── reverseflows.py
    │   │   └── toptalkers.py
    │   ├── ftp
    │   │   ├── __init__.py
    │   │   └── ftp.py
    │   ├── http
    │   │   ├── __init__.py
    │   │   ├── httpdump.py
    │   │   ├── joomla.py
    │   │   ├── ms15-034.py
    │   │   ├── riphttp.py
    │   │   └── web.py
    │   ├── httpplugin.py
    │   ├── malware
    │   │   ├── __init__.py
    │   │   └── sweetorange.py
    │   ├── misc
    │   │   ├── __init__.py
    │   │   ├── followstream.py
    │   │   ├── pcapwriter.py
    │   │   ├── search.py
    │   │   ├── sslalerts.py
    │   │   ├── synrst.py
    │   │   └── xor.py
    │   ├── nbns
    │   │   ├── __init__.py
    │   │   └── nbns.py
    │   ├── portscan
    │   │   ├── __init__.py
    │   │   ├── indegree.py
    │   │   └── trw.py
    │   ├── protocol
    │   │   ├── __init__.py
    │   │   ├── bitcoin.py
    │   │   ├── ether.py
    │   │   ├── ip.py
    │   │   └── protocol.py
    │   ├── ssh
    │   │   ├── __init__.py
    │   │   └── ssh-pubkey.py
    │   ├── ssl
    │   │   ├── __init__.py
    │   │   ├── sslblacklist.py
    │   │   └── tls.py
    │   ├── tftp
    │   │   ├── __init__.py
    │   │   └── tftp.py
    │   ├── visual
    │   │   ├── __init__.py
    │   │   └── piecharts.py
    │   ├── voip
    │   │   ├── __init__.py
    │   │   ├── rtp.py
    │   │   └── sip.py
    │   └── wifi
    │   │   ├── __init__.py
    │   │   ├── wifi80211.py
    │   │   └── wifibeacon.py
    └── util.py
├── scripts
    └── dshell
└── setup.py


/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | __pycache__
3 | Dshell.egg-info
4 | build/
5 | .idea/
6 | 


--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
 1 | FROM python:3-alpine as builder
 2 | 
 3 | COPY . /src
 4 | 
 5 | WORKDIR /src
 6 | 
 7 | ARG OUI_SRC="http://standards-oui.ieee.org/oui/oui.txt"
 8 | 
 9 | ENV VIRTUAL_ENV="/opt/venv"
10 | 
11 | RUN apk add cargo curl g++ gcc rust libpcap-dev libffi-dev \
12 |     && python3 -m venv "${VIRTUAL_ENV}" \
13 |     && curl --location --silent --output "/src/dshell/data/oui.txt" "${OUI_SRC}"
14 | 
15 | ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
16 | 
17 | RUN pip install --upgrade pip wheel && pip install .
18 | 
19 | FROM python:3-alpine
20 | 
21 | ENV VIRTUAL_ENV="/opt/venv"
22 | 
23 | COPY --from=builder "${VIRTUAL_ENV}/" "${VIRTUAL_ENV}/"
24 | 
25 | RUN apk add --no-cache bash libstdc++ libpcap
26 | 
27 | VOLUME ["/data"]
28 | 
29 | WORKDIR "/data"
30 | 
31 | ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
32 | 
33 | ENTRYPOINT ["dshell"]
34 | 


--------------------------------------------------------------------------------
/Dshell-Training-Pack-0.1.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/Dshell-Training-Pack-0.1.tar.gz


--------------------------------------------------------------------------------
/Dshell_Developer_Guide.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/Dshell_Developer_Guide.pdf


--------------------------------------------------------------------------------
/Dshell_User_Guide.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/Dshell_User_Guide.pdf


--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | © (2020) United States Government, as represented by the Secretary of the Army. All rights reserved.
2 | 
3 | ICF Incorporated, L.L.C. contributed to the development of Dshell (Python 3).
4 | 
5 | Because the project utilizes code licensed from contributors and other third parties, it therefore is licensed under the MIT License. http://opensource.org/licenses/mit-license.php. Under that license, permission is granted free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the conditions that any appropriate copyright notices and this permission notice are included in all copies or substantial portions of the Software.
6 | 
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 | 
9 | 


--------------------------------------------------------------------------------
/dist/Dshell-3.1.3.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dist/Dshell-3.1.3.tar.gz


--------------------------------------------------------------------------------
/dshell/__init__.py:
--------------------------------------------------------------------------------
 1 | 
 2 | # Expose classes and functions that external users will need to access as the API
 3 | from .core import ConnectionPlugin, PacketPlugin, Packet
 4 | # TODO: Make decode.process_files()/main() function more API friendly through documentation and unwrapping the kwargs
 5 | from .api import get_plugins, get_plugin_information
 6 | 
 7 | from .output.alertout import AlertOutput
 8 | from .output.colorout import ColorOutput
 9 | from .output.csvout import CSVOutput
10 | from .output.elasticout import ElasticOutput
11 | from .output.htmlout import HTMLOutput
12 | from .output.jsonout import JSONOutput
13 | from .output.netflowout import NetflowOutput
14 | from .output.pcapout import PCAPOutput
15 | 


--------------------------------------------------------------------------------
/dshell/api.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Dshell3 Python API
 3 | """
 4 | 
 5 | import logging
 6 | import operator
 7 | from importlib import import_module
 8 | 
 9 | # TODO: Move get_plugins() here?
10 | from .dshelllist import get_plugins
11 | 
12 | 
13 | logger = logging.getLogger(__name__)
14 | 
15 | 
16 | # TODO: Should this be renamed to "load_plugins()" since it actually imports the modules?
17 | def get_plugin_information() -> dict:
18 |     """
19 |     Generates and returns a dictionary of plugins.
20 |     :return: dictionary containing plugin name -> plugin module
21 |     :raises ImportError: If a plugin could not be imported.
22 |     """
23 |     plugin_map = get_plugins()
24 |     # Import ALL of the decoders and print info about them before exiting
25 |     plugins = {}
26 |     for name, module in sorted(plugin_map.items(), key=operator.itemgetter(1)):
27 |         try:
28 |             module = import_module(module)
29 |             if not module.DshellPlugin:
30 |                 continue
31 |             module = module.DshellPlugin()
32 |             plugins[name] = module
33 |         except Exception as e:
34 |             raise ImportError(f"Could not load {repr(module)} with error: {e}")
35 | 
36 |     return plugins
37 | 


--------------------------------------------------------------------------------
/dshell/data/GeoIP/readme.txt:
--------------------------------------------------------------------------------
1 | GeoIP data sets go here.
2 | 


--------------------------------------------------------------------------------
/dshell/data/dshellrc:
--------------------------------------------------------------------------------
1 | export PS1="`whoami`@`hostname`:\w Dshell> "
2 | alias decode="python3 -m dshell.decode "
3 | 


--------------------------------------------------------------------------------
/dshell/data/empty.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/data/empty.pcap


--------------------------------------------------------------------------------
/dshell/dshellargparse.py:
--------------------------------------------------------------------------------
 1 | """
 2 | This argument parser is almost identical to the Python standard argparse.
 3 | This one adds a function to automatically add plugin-specific arguments.
 4 | """
 5 | 
 6 | import argparse
 7 | 
 8 | 
 9 | def custom_bytes(value):
10 |     """
11 |     Converts value strings for command lines that are suppose to be bytes.
12 |     If value startswith "0x", value will be assumed to be a hex string.
13 |     Otherwise data will be encoded with utf8
14 |     """
15 |     if isinstance(value, bytes):
16 |         return value
17 |     if value.startswith("0x"):
18 |         try:
19 |             return bytes.fromhex(value[2:])
20 |         except ValueError:
21 |             pass  # Wasn't hex after all, just treat as a utf8 string.
22 |     return value.encode("utf8")
23 | 
24 | 
25 | class DshellArgumentParser(argparse.ArgumentParser):
26 | 
27 |     def add_plugin_arguments(self, plugin_name, plugin_obj):
28 |         """
29 |         add_plugin_arguments(self, plugin_name, plugin_obj)
30 | 
31 |         Give it the name of the plugin and an instance of the plugin, and
32 |         it will automatically create argument entries.
33 |         """
34 |         if plugin_obj.optiondict:
35 |             group = '{} plugin options'.format(plugin_obj.name)
36 |             group = self.add_argument_group(group)
37 |             for argname, optargs in plugin_obj.optiondict.items():
38 |                 optname = "{}_{}".format(plugin_name, argname)
39 |                 data_type = optargs.get("type", None)
40 |                 if data_type and data_type == bytes:
41 |                     optargs["type"] = custom_bytes
42 |                     default = optargs.get("default", None)
43 |                     if default is not None:
44 |                         optargs["default"] = custom_bytes(default)
45 |                 group.add_argument("--" + optname, dest=optname, **optargs)
46 | 
47 |     def get_plugin_arguments(self, plugin_name, plugin_obj):
48 |         """
49 |         get_plugin_arguments(self, plugin_name, plugin_obj)
50 | 
51 |         Returns a list of argument names and the attributes they're associated
52 |         with.
53 | 
54 |         e.g. --country_code for the "country" plugin ties to the "code" attr
55 |              in the plugin object. Thus, the return would be
56 |              [("country_code", "code"), ...]
57 |         """
58 |         args_and_attrs = []
59 |         if plugin_obj.optiondict:
60 |             for argname in plugin_obj.optiondict.keys():
61 |                 optname = "{}_{}".format(plugin_name, argname)
62 |                 args_and_attrs.append((optname, argname))
63 |         return args_and_attrs
64 | 


--------------------------------------------------------------------------------
/dshell/dshellgeoip.py:
--------------------------------------------------------------------------------
  1 | # -*- coding: utf-8 -*-
  2 | """
  3 | A wrapper around GeoIP2 that provides convenience functions for querying and
  4 | collecting GeoIP data
  5 | """
  6 | 
  7 | import datetime
  8 | import logging
  9 | import os
 10 | from collections import OrderedDict
 11 | 
 12 | import geoip2.database
 13 | import geoip2.errors
 14 | 
 15 | from dshell.util import get_data_path
 16 | 
 17 | 
 18 | logger = logging.getLogger(__name__)
 19 | 
 20 | 
 21 | class DshellGeoIP(object):
 22 |     MAX_CACHE_SIZE = 5000
 23 | 
 24 |     def __init__(self, acc=False):
 25 |         self.geodir = os.path.join(get_data_path(), 'GeoIP')
 26 |         self.geoccfile = os.path.join(self.geodir, 'GeoLite2-City.mmdb')
 27 |         self.geoasnfile = os.path.join(self.geodir, 'GeoLite2-ASN.mmdb')
 28 |         self.geoccdb = geoip2.database.Reader(self.geoccfile)
 29 |         self.geoasndb = geoip2.database.Reader(self.geoasnfile)
 30 |         self.geo_asn_cache = DshellGeoIPCache(max_cache_size=self.MAX_CACHE_SIZE)
 31 |         self.geo_loc_cache = DshellGeoIPCache(max_cache_size=self.MAX_CACHE_SIZE)
 32 |         self.acc = acc
 33 | 
 34 |     def check_file_dates(self):
 35 |         """
 36 |         Check the data file age, and log a warning if it's over a year old.
 37 |         """
 38 |         cc_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(self.geoccfile))
 39 |         asn_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(self.geoasnfile))
 40 |         n = datetime.datetime.now()
 41 |         year = datetime.timedelta(days=365)
 42 |         if (n - cc_mtime) > year or (n - asn_mtime) > year:
 43 |             logger.debug("GeoIP data file(s) over a year old, and possibly outdated.")
 44 | 
 45 |     def geoip_country_lookup(self, ip):
 46 |         """
 47 |         Looks up the IP and returns the two-character country code.
 48 |         """
 49 |         location = self.geoip_location_lookup(ip)
 50 |         return location[0]
 51 | 
 52 |     def geoip_asn_lookup(self, ip):
 53 |         """
 54 |         Looks up the IP and returns an ASN string.
 55 |         Example:
 56 |             print geoip_asn_lookup("74.125.26.103")
 57 |             "AS15169 Google LLC"
 58 |         """
 59 |         try:
 60 |             return self.geo_asn_cache[ip]
 61 |         except KeyError:
 62 |             try:
 63 |                 template = "AS{0.autonomous_system_number} {0.autonomous_system_organization}"
 64 |                 asn = template.format(self.geoasndb.asn(ip))
 65 |                 self.geo_asn_cache[ip] = asn
 66 |                 return asn
 67 |             except geoip2.errors.AddressNotFoundError:
 68 |                 return None
 69 | 
 70 |     def geoip_location_lookup(self, ip):
 71 |         """
 72 |         Looks up the IP and returns a tuple containing country code, latitude,
 73 |         and longitude.
 74 |         """
 75 |         try:
 76 |             return self.geo_loc_cache[ip]
 77 |         except KeyError:
 78 |             try:
 79 |                 location = self.geoccdb.city(ip)
 80 |                 # Get country code based on order of importance
 81 |                 # 1st: Country that owns an IP address registered in another
 82 |                 #      location (e.g. military bases in foreign countries)
 83 |                 # 2nd: Country in which the IP address is registered
 84 |                 # 3rd: Physical country where IP address is located
 85 |                 # https://dev.maxmind.com/geoip/geoip2/whats-new-in-geoip2/#Country_Registered_Country_and_Represented_Country
 86 |                 # Handle flag from plugin optional args to enable all 3 country codes
 87 |                 if self.acc:
 88 |                     try:
 89 |                         cc = "{}/{}/{}".format(location.represented_country.iso_code,
 90 |                                                location.registered_country.iso_code,
 91 |                                                location.country.iso_code)
 92 |                         cc = cc.replace("None", "--")
 93 | 
 94 |                     except KeyError:
 95 |                         pass
 96 |                 else:
 97 |                     cc = (location.represented_country.iso_code or
 98 |                           location.registered_country.iso_code or
 99 |                           location.country.iso_code or
100 |                           '--')
101 | 
102 |                 location = (
103 |                     cc,
104 |                     location.location.latitude,
105 |                     location.location.longitude
106 |                 )
107 |                 self.geo_loc_cache[ip] = location
108 |                 return location
109 |             except geoip2.errors.AddressNotFoundError:
110 |                 # Handle flag from plugin optional args to enable all 3 country codes
111 |                 if self.acc:
112 |                     location = ("--/--/--", None, None)
113 |                 else:
114 |                     location = ("--", None, None)
115 |                 self.geo_loc_cache[ip] = location
116 |                 return location
117 | 
118 | 
119 | class DshellFailedGeoIP(object):
120 |     """
121 |     Class used in place of DshellGeoIP if GeoIP database files are not found.
122 |     """
123 | 
124 |     def __init__(self):
125 |         self.geodir = os.path.join(get_data_path(), 'GeoIP')
126 |         self.geoccdb = None
127 |         self.geoasndb = None
128 | 
129 |     def check_file_dates(self):
130 |         pass
131 | 
132 |     def geoip_country_lookup(self, ip):
133 |         return "??"
134 | 
135 |     def geoip_asn_lookup(self, ip):
136 |         return None
137 | 
138 |     def geoip_location_lookup(self, ip):
139 |         return ("??", None, None)
140 | 
141 | 
142 | class DshellGeoIPCache(OrderedDict):
143 |     """
144 |     A cache for storing recent IP lookups to improve performance.
145 |     """
146 | 
147 |     def __init__(self, *args, **kwargs):
148 |         self.max_cache_size = kwargs.pop("max_cache_size", 500)
149 |         OrderedDict.__init__(self, *args, **kwargs)
150 | 
151 |     def __setitem__(self, key, value):
152 |         OrderedDict.__setitem__(self, key, value)
153 |         self.check_max_size()
154 | 
155 |     def check_max_size(self):
156 |         while len(self) > self.max_cache_size:
157 |             self.popitem(last=False)
158 | 


--------------------------------------------------------------------------------
/dshell/dshelllist.py:
--------------------------------------------------------------------------------
 1 | """
 2 | A library containing functions for generating lists of important modules.
 3 | These are mostly used in decode.py and in unit tests
 4 | """
 5 | 
 6 | import logging
 7 | import os
 8 | import pkg_resources
 9 | from glob import iglob
10 | 
11 | from dshell.util import get_plugin_path
12 | 
13 | 
14 | logger = logging.getLogger(__name__)
15 | 
16 | 
17 | def get_plugins():
18 |     """
19 |     Generate a list of all available plugin modules, either in the
20 |     dshell.plugins directory or external packages
21 |     """
22 |     plugins = {}
23 |     # List of directories above the plugins directory that we don't care about
24 |     import_base = get_plugin_path().split(os.path.sep)[:-1]
25 | 
26 |     # Walk through the plugin path and find any Python modules that aren't
27 |     # __init__.py. These are assumed to be plugin modules and will be
28 |     # treated as such.
29 |     for root, dirs, files in os.walk(get_plugin_path()):
30 |         if '__init__.py' in files:
31 |             import_path = root.split(os.path.sep)[len(import_base):]
32 |             for f in iglob("{}/*.py".format(root)):
33 |                 name = os.path.splitext(os.path.basename(f))[0]
34 |                 if name != '__init__':
35 |                     if name in plugins and logger:
36 |                         logger.warning("Duplicate plugin name found: {}".format(name))
37 |                     module = '.'.join(["dshell"] + import_path + [name])
38 |                     plugins[name] = module
39 | 
40 |     # Next, try to discover additional plugins installed externally.
41 |     # Uses entry points in setup.py files.
42 |     for ep_plugin in pkg_resources.iter_entry_points("dshell_plugins"):
43 |         if ep_plugin.name in plugins:
44 |             logger.warning("Duplicate plugin name found: {}".format(ep_plugin.name))
45 |         plugins[ep_plugin.name] = ep_plugin.module_name
46 | 
47 |     return plugins
48 | 
49 | 
50 | def get_output_modules(output_module_path):
51 |     """
52 |     Generate a list of all available output modules under an output_module_path
53 |     """
54 |     modules = []
55 |     for f in iglob("{}/*.py".format(output_module_path)):
56 |         name = os.path.splitext(os.path.basename(f))[0]
57 |         if name != '__init__' and name != 'output':
58 |             # Ignore __init__ and the base output.py module
59 |             modules.append(name)
60 |     return modules
61 | 


--------------------------------------------------------------------------------
/dshell/output/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/output/__init__.py


--------------------------------------------------------------------------------
/dshell/output/alertout.py:
--------------------------------------------------------------------------------
 1 | """
 2 | This output module is used to display single-line alerts
 3 | 
 4 | It inherits nearly everything from the base Output class, and only resets the
 5 | _DEFAULT_FORMAT to a more expressive format.
 6 | """
 7 | 
 8 | from dshell.output.output import Output
 9 | 
10 | class AlertOutput(Output):
11 |     "A class that provides a default format for printing a single-line alert"
12 |     _DESCRIPTION = "Default format for printing a single-line alert"
13 |     _DEFAULT_FORMAT = "[%(plugin)s] %(ts)s %(sip)16s:%(sport)-5s %(dir_arrow)s %(dip)16s:%(dport)-5s ** %(data)s **\n"
14 | 
15 | obj = AlertOutput
16 | 


--------------------------------------------------------------------------------
/dshell/output/colorout.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Generates packet or reconstructed stream output with ANSI color codes.
 3 | 
 4 | Based on output module originally written by amm
 5 | """
 6 | 
 7 | from dshell.output.output import Output
 8 | import dshell.core
 9 | import dshell.util
10 | 
11 | class ColorOutput(Output):
12 |     _DESCRIPTION = "Reconstructed output with ANSI color codes"
13 |     _PACKET_FORMAT = """Packet %(counter)s (%(proto)s)
14 | Start: %(ts)s
15 | %(sip)16s:%(sport)6s -> %(dip)16s:%(dport)6s (%(bytes)s bytes)
16 | 
17 | %(data)s
18 | 
19 | """
20 |     _CONNECTION_FORMAT = """Connection %(counter)s (%(protocol)s)
21 | Start: %(starttime)s
22 | End:   %(endtime)s
23 | %(clientip)16s:%(clientport)6s -> %(serverip)16s:%(serverport)6s (%(clientbytes)s bytes)
24 | %(serverip)16s:%(serverport)6s -> %(clientip)16s:%(clientport)6s (%(serverbytes)s bytes)
25 | 
26 | %(data)s
27 | 
28 | """
29 |     _DEFAULT_FORMAT = _PACKET_FORMAT
30 |     _DEFAULT_DELIM = "\n\n"
31 | 
32 | 
33 |     def __init__(self, *args, **kwargs):
34 |         super().__init__(*args, **kwargs)
35 |         self.counter = 1
36 |         self.colors = {
37 |             'cs': '31',   # client-to-server is red
38 |             'sc': '32',   # server-to-client is green
39 |             '--': '34',   # everything else is blue
40 |         }
41 |         self.hexmode = kwargs.get('hex', False)
42 |         self.format_is_set = False
43 | 
44 |     def setup(self):
45 |         # activate color blind friendly mode
46 |         if self.cbf:
47 |             self.colors['cs'] = '33'   #client-to-server is yellow
48 |     
49 |     def write(self, *args, **kwargs):
50 |         if not self.format_is_set:
51 |             if 'clientip' in kwargs:
52 |                 self.set_format(self._CONNECTION_FORMAT)
53 |             else:
54 |                 self.set_format(self._PACKET_FORMAT)
55 |             self.format_is_set = True
56 | 
57 |         # a template string for data output
58 |         colorformat = "\x1b[%sm%s\x1b[0m"
59 | 
60 |         # Iterate over the args and try to parse out any raw data strings
61 |         rawdata = []
62 |         for arg in args:
63 |             if type(arg) == dshell.core.Blob:
64 |                 if arg.data:
65 |                     rawdata.append((arg.data, arg.direction))
66 |             elif type(arg) == dshell.core.Connection:
67 |                 for blob in arg.blobs:
68 |                     if blob.data:
69 |                         rawdata.append((blob.data, blob.direction))
70 |             elif type(arg) == dshell.core.Packet:
71 |                 rawdata.append((arg.pkt.body_bytes, kwargs.get('direction', '--')))
72 |             elif type(arg) == tuple:
73 |                 rawdata.append(arg)
74 |             else:
75 |                 rawdata.append((arg, kwargs.get('direction', '--')))
76 | 
77 |         # Clean up the rawdata into something more presentable
78 |         if self.hexmode:
79 |             cleanup_func = dshell.util.hex_plus_ascii
80 |         else:
81 |             cleanup_func = dshell.util.printable_text
82 |         for k, v in enumerate(rawdata):
83 |             newdata = cleanup_func(v[0])
84 |             rawdata[k] = (newdata, v[1])
85 | 
86 |         # Convert the raw data strings into color-coded output
87 |         data = []
88 |         for arg in rawdata:
89 |             datastring = colorformat % (self.colors.get(arg[1], '0'), arg[0])
90 |             data.append(datastring)
91 | 
92 |         super().write(counter=self.counter, *data, **kwargs)
93 |         self.counter += 1
94 | 
95 | obj = ColorOutput
96 | 


--------------------------------------------------------------------------------
/dshell/output/csvout.py:
--------------------------------------------------------------------------------
 1 | """
 2 | This output module converts plugin output into a CSV format
 3 | """
 4 | 
 5 | import csv
 6 | from dshell.output.output import Output
 7 | 
 8 | class CSVOutput(Output):
 9 |     """
10 |     Takes specified fields provided to the write function and print them in
11 |     a CSV format.
12 | 
13 |     Delimiter can be set with --oarg delimiter=<char>
14 | 
15 |     A header row can be printed with --oarg header
16 | 
17 |     Additional fields can be included with --oarg fields=field1,field2,field3
18 |     For example, MAC address can be included with --oarg fields=smac,dmac
19 |     Note: Field names must match the variable names in the plugin
20 | 
21 |     Additional flow fields for connection can be included with --oarg flows
22 |     """
23 | 
24 |     # TODO refine plugin to do things like wrap quotes around long strings
25 | 
26 |     _DEFAULT_FIELDS = ['plugin', 'ts', 'sip', 'sport', 'dip', 'dport', 'data']
27 |     _DEFAULT_FLOW_FIELDS = ['plugin', 'starttime', 'clientip', 'serverip', 'clientcc', 'servercc', 'protocol', 'clientport', 'serverport', 'clientpackets', 'serverpackets', 'clientbytes', 'serverbytes', 'duration', 'data']
28 |     _DEFAULT_DELIM = ','
29 |     _DESCRIPTION = "CSV format output"
30 | 
31 |     def __init__(self, *args, **kwargs):
32 |         self.use_header = False
33 |         self.fields = list(self._DEFAULT_FIELDS)
34 |         super().__init__(**kwargs)
35 | 
36 |     def set_format(self, _=None):
37 |         "Set the format to a CSV list of fields"
38 |         columns = []
39 |         for f in self.fields:
40 |             if f:
41 |                 columns.append(f)
42 |         if self.extra:
43 |             columns.append("extra")
44 |         fmt = self.delimiter.join('%%(%s)r' % f for f in columns)
45 |         fmt += "\n"
46 |         super().set_format(fmt)
47 | 
48 |     def set_oargs(self, **kwargs):
49 |         self.use_header = kwargs.pop("header", False)
50 |         if kwargs.pop("flows", False):
51 |             self.fields = list(self._DEFAULT_FLOW_FIELDS)
52 |         if exfields := kwargs.pop("fields", None):
53 |             for field in exfields.split(','):
54 |                 self.fields.append(field)
55 |         super().set_oargs(**kwargs)
56 |         self.set_format()
57 | 
58 |     def setup(self):
59 |         if self.use_header:
60 |             self.fh.write(self.delimiter.join([f for f in self.fields]) + "\n")
61 | 
62 | 
63 | obj = CSVOutput


--------------------------------------------------------------------------------
/dshell/output/elasticout.py:
--------------------------------------------------------------------------------
 1 | """
 2 | This output module converts plugin output into JSON and indexes it into
 3 | an Elasticsearch datastore
 4 | 
 5 | NOTE: This module requires the third-party 'elasticsearch' Python module
 6 | """
 7 | 
 8 | import ipaddress
 9 | import json
10 | 
11 | from elasticsearch import Elasticsearch
12 | 
13 | import dshell.output.jsonout
14 | 
15 | class ElasticOutput(dshell.output.jsonout.JSONOutput):
16 |     """
17 |     Elasticsearch output module
18 |     Use with --output=elasticsearchout
19 | 
20 |     It is recommended that it be run with some options set:
21 |         host:       server hosting the database (localhost)
22 |         port:       HTTP port listening (9200)
23 |         index:      name of index storing results ("dshell")
24 |         type:       the type for each alert ("alerts")
25 | 
26 |     Example use:
27 |         decode --output=elasticout --oargs="index=dshellalerts" --oargs="type=netflowout" -d netflow ~/pcap/example.pcap
28 |     """
29 | 
30 |     _DESCRIPTION = "Automatically insert data into an elasticsearch instance"
31 | 
32 |     def __init__(self, *args, **kwargs):
33 |         super().__init__(*args, **kwargs.copy())
34 | 
35 |         self.options = {}
36 |         self.options['host'] = kwargs.get('host', 'localhost')
37 |         self.options['port'] = int(kwargs.get('port', 9200))
38 |         self.options['index'] = kwargs.get('index', 'dshell')
39 |         self.options['type'] = kwargs.get('type', 'alerts')
40 | 
41 |         self.es = Elasticsearch([self.options['host']], port=self.options['port'])
42 | 
43 |     def write(self, *args, **kwargs):
44 |         "Converts alert's keyword args to JSON and indexes it into Elasticsearch datastore."
45 |         if args and 'data' not in kwargs:
46 |             kwargs['data'] = self.delimiter.join(map(str, args))
47 | 
48 |         # Elasticsearch can't handle IPv6 (at time of writing)
49 |         # Just delete the ints and expand the string notation.
50 |         # Hopefully, it will be possible to perform range searches on this
51 |         # consistent IP string format.
52 |         try:
53 |             del kwargs['dipint']
54 |         except KeyError:
55 |             pass
56 |         try:
57 |             del kwargs['sipint']
58 |         except KeyError:
59 |             pass
60 |         try:
61 |             kwargs['dip'] = ipaddress.ip_address(kwargs['dip']).exploded
62 |         except KeyError:
63 |             pass
64 |         try:
65 |             kwargs['sip'] = ipaddress.ip_address(kwargs['sip']).exploded
66 |         except KeyError:
67 |             pass
68 | 
69 |         jsondata = json.dumps(kwargs, ensure_ascii=self.ensure_ascii, default=self.json_default)
70 | #        from pprint import pprint
71 | #        pprint(jsondata)
72 |         self.es.index(index=self.options['index'], doc_type=self.options['type'], body=jsondata)
73 | 
74 | obj = ElasticOutput
75 | 


--------------------------------------------------------------------------------
/dshell/output/htmlout.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Generates packet or reconstructed stream output as a HTML page.
  3 | 
  4 | Based on colorout module originally written by amm
  5 | """
  6 | 
  7 | from dshell.output.output import Output
  8 | import dshell.util
  9 | import dshell.core
 10 | from xml.sax.saxutils import escape
 11 | 
 12 | class HTMLOutput(Output):
 13 |     _DESCRIPTION = "HTML format output"
 14 |     _PACKET_FORMAT = """<h1>Packet %(counter)s (%(protocol)s)</h1><h2>Start: %(ts)s
 15 | %(sip)s:%(sport)s -> %(dip)s:%(dport)s (%(bytes)s bytes)
 16 | </h2>
 17 | %(data)s
 18 | """
 19 |     _CONNECTION_FORMAT = """<h1>Connection %(counter)s (%(protocol)s)</h1><h2>Start: %(starttime)s
 20 | End: %(endtime)s
 21 | %(clientip)s:%(clientport)s -> %(serverip)s:%(serverport)s (%(clientbytes)s bytes)
 22 | %(serverip)s:%(serverport)s -> %(clientip)s:%(clientport)s (%(serverbytes)s bytes)
 23 | </h2>
 24 | %(data)s
 25 | """
 26 |     _DEFAULT_FORMAT = _PACKET_FORMAT
 27 |     _DEFAULT_DELIM = "<br />"
 28 | 
 29 |     _HTML_HEADER = """
 30 | <html>
 31 | <head>
 32 |     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
 33 |     <title>Dshell Output</title>
 34 |     <style>
 35 |         body {
 36 |             font-family: monospace;
 37 |             font-size: 10pt;
 38 |             white-space: pre;
 39 |         }
 40 |         h1 {
 41 |             font-family: helvetica;
 42 |             font-size: 13pt;
 43 |             font-weight: bolder;
 44 |             white-space: pre;
 45 |         }
 46 |         h2 {
 47 |             font-family: helvetica;
 48 |             font-size: 12pt;
 49 |             font-weight: bolder;
 50 |             margin: 0 0;
 51 |             white-space: pre;
 52 |         }
 53 |     </style>
 54 | </head>
 55 | <body>
 56 | """
 57 | 
 58 |     _HTML_FOOTER = """
 59 | </body>
 60 | </html>
 61 | """
 62 | 
 63 |     def __init__(self, *args, **kwargs):
 64 |         "Can be called with an optional 'hex' argument to display output in hex"
 65 |         super().__init__(*args, **kwargs)
 66 |         self.counter = 1
 67 |         self.colors = {
 68 |             'cs': 'red',   # client-to-server is red
 69 |             'sc': 'green',   # server-to-client is green
 70 |             '--': 'blue',   # everything else is blue
 71 |         }
 72 |         self.hexmode = kwargs.get('hex', False)
 73 |         self.format_is_set = False
 74 | 
 75 |     def setup(self):
 76 |         # activate color blind friendly mode
 77 |         if self.cbf:
 78 |             self.colors['cs'] = 'gold'   # client-to-server is gold (darker yellow)
 79 |             self.colors['sc'] = 'seagreen'   # server-to-client is sea green (lighter green)
 80 |         self.fh.write(self._HTML_HEADER)
 81 | 
 82 |     def write(self, *args, **kwargs):
 83 |         if not self.format_is_set:
 84 |             if 'clientip' in kwargs:
 85 |                 self.set_format(self._CONNECTION_FORMAT)
 86 |             else:
 87 |                 self.set_format(self._PACKET_FORMAT)
 88 |             self.format_is_set = True
 89 | 
 90 |         # a template string for data output
 91 |         colorformat = '<span style="color:%s;">%s</span>'
 92 | 
 93 |         # Iterate over the args and try to parse out any raw data strings
 94 |         rawdata = []
 95 |         for arg in args:
 96 |             if type(arg) == dshell.core.Blob:
 97 |                 if arg.data:
 98 |                     rawdata.append((arg.data, arg.direction))
 99 |             elif type(arg) == dshell.core.Connection:
100 |                 for blob in arg.blobs:
101 |                     if blob.data:
102 |                         rawdata.append((blob.data, blob.direction))
103 |             elif type(arg) == dshell.core.Packet:
104 |                 rawdata.append((arg.pkt.body_bytes, kwargs.get('direction', '--')))
105 |             elif type(arg) == tuple:
106 |                 rawdata.append(arg)
107 |             else:
108 |                 rawdata.append((arg, kwargs.get('direction', '--')))
109 | 
110 |         # Clean up the rawdata into something more presentable
111 |         if self.hexmode:
112 |             cleanup_func = dshell.util.hex_plus_ascii
113 |         else:
114 |             cleanup_func = dshell.util.printable_text
115 |         for k, v in enumerate(rawdata):
116 |             newdata = cleanup_func(v[0])
117 |             newdata = escape(newdata)
118 |             rawdata[k] = (newdata, v[1])
119 | 
120 |         # Convert the raw data strings into color-coded output
121 |         data = []
122 |         for arg in rawdata:
123 |             datastring = colorformat % (self.colors.get(arg[1], ''), arg[0])
124 |             data.append(datastring)
125 | 
126 |         super().write(counter=self.counter, *data, **kwargs)
127 |         self.counter += 1
128 | 
129 |     def close(self):
130 |         self.fh.write(self._HTML_FOOTER)
131 |         Output.close(self)
132 | 
133 | obj = HTMLOutput
134 | 


--------------------------------------------------------------------------------
/dshell/output/jsonout.py:
--------------------------------------------------------------------------------
 1 | """
 2 | This output module converts plugin output into JSON
 3 | """
 4 | 
 5 | from datetime import datetime
 6 | import json
 7 | from dshell.output.output import Output
 8 | from dshell.core import Packet, Blob, Connection
 9 | 
10 | class JSONOutput(Output):
11 |     """
12 |     Converts arguments for every write into JSON
13 |     Can be called with ensure_ascii=True to pass flag on to the json module.
14 |     """
15 |     _DEFAULT_FORMAT = "%(jsondata)s\n"
16 |     _DESCRIPTION = "JSON format output"
17 | 
18 |     def __init__(self, *args, **kwargs):
19 |         self.ensure_ascii = kwargs.get('ensure_ascii', False)
20 |         super().__init__(*args, **kwargs)
21 | 
22 |     def write(self, *args, **kwargs):
23 |         if self.extra:
24 |             # JSONOutput does not make use of the --extra flag, so disable it
25 |             # before printing output
26 |             self.extra = False
27 |         if args and 'data' not in kwargs:
28 |             kwargs['data'] = self.delimiter.join(map(str, args))
29 |         jsondata = json.dumps(kwargs, ensure_ascii=self.ensure_ascii, default=self.json_default)
30 |         super().write(jsondata=jsondata)
31 | 
32 |     def json_default(self, obj):
33 |         """
34 |         JSON serializer for objects not serializable by default json code
35 |         https://stackoverflow.com/a/22238613
36 |         """
37 |         if isinstance(obj, datetime):
38 |             serial = obj.strftime(self.timeformat)
39 |             return serial
40 |         if isinstance(obj, bytes):
41 |             serial = repr(obj)
42 |             return serial
43 |         if isinstance(obj, (Connection, Blob, Packet)):
44 |             serial = obj.info()
45 |             return serial
46 |         raise TypeError ("Type not serializable ({})".format(str(type(obj))))
47 | 
48 | obj = JSONOutput
49 | 


--------------------------------------------------------------------------------
/dshell/output/netflowout.py:
--------------------------------------------------------------------------------
  1 | """
  2 | This output module is used for generating flow-format output
  3 | """
  4 | 
  5 | from dshell.output.output import Output
  6 | from datetime import datetime
  7 | 
  8 | class NetflowOutput(Output):
  9 |     """
 10 |     A class for printing connection information for pcap
 11 | 
 12 |     Output can be grouped by setting the group flag to a field or fields
 13 |     separated by a forward-slash
 14 |     For example:
 15 |       --output=netflowout --oarg="group=clientip/serverip"
 16 |     Note: Output when grouping is only generated at the end of analysis
 17 | 
 18 |     A header row can be printed before output using --oarg header
 19 |     """
 20 | 
 21 |     _DESCRIPTION = "Flow (connection overview) format output"
 22 |     # Define two types of formats:
 23 |     # Those for plugins handling individual packets (not really helpful)
 24 |     _PACKET_FORMAT = "%(ts)s  %(sip)16s -> %(dip)16s  (%(sipcc)s -> %(dipcc)s) %(protocol)5s  %(sport)6s  %(dport)6s %(bytes)7s %(data)s\n"
 25 |     _PACKET6_FORMAT = "%(ts)s  %(sip)40s -> %(dip)40s  (%(sipcc)s -> %(dipcc)s) %(protocol)5s  %(sport)6s  %(dport)6s %(bytes)7s %(data)s\n"
 26 |     _PACKET_PRETTY_HEADER = "[start timestamp] [source IP] -> [destination IP] ([source country] -> [destination country]) [protocol] [source port] [destination port] [bytes] [message data]\n"
 27 |     # And those plugins handling full connections (more useful and common)
 28 |     _CONNECTION_FORMAT = "%(starttime)s  %(clientip)16s -> %(serverip)16s  (%(clientcc)s -> %(servercc)s) %(protocol)5s  %(clientport)6s  %(serverport)6s %(clientpackets)5s  %(serverpackets)5s  %(clientbytes)7s  %(serverbytes)7s  %(duration)-.4fs %(data)s\n"
 29 |     _CONNECTION6_FORMAT = "%(starttime)s  %(clientip)40s -> %(serverip)40s  (%(clientcc)s -> %(servercc)s) %(protocol)5s  %(clientport)6s  %(serverport)6s %(clientpackets)5s  %(serverpackets)5s  %(clientbytes)7s  %(serverbytes)7s  %(duration)-.4fs %(data)s\n"
 30 |     _CONNECTION_PRETTY_HEADER = "[start timestamp] [client IP] -> [server IP] ([client country] -> [server country]) [protocol] [client port] [server port] [client packets] [server packets] [client bytes] [server bytes] [duration] [message data]\n"
 31 |     # TODO decide if IPv6 formats are necessary, and how to switch between them
 32 |     #      and IPv4 formats
 33 |     # Default to packets since those fields are in both types of object
 34 |     _DEFAULT_FORMAT = _PACKET_FORMAT
 35 | 
 36 |     def __init__(self, *args, **kwargs):
 37 |         self.group = False
 38 |         self.group_cache = {}  # results will be stored here, if grouping
 39 |         self.format_is_set = False
 40 |         self.use_header = False
 41 |         Output.__init__(self, *args, **kwargs)
 42 | 
 43 |     def set_format(self, fmt, pretty_header=_PACKET_PRETTY_HEADER):
 44 |         if self.use_header:
 45 |             self.fh.write(str(pretty_header))
 46 |         return super().set_format(fmt)
 47 | 
 48 |     def set_oargs(self, **kwargs):
 49 |         # Are we printing the format string as a file header?
 50 |         self.use_header = kwargs.pop("header", False)
 51 |         # Are we grouping the results, and by what fields?
 52 |         if 'group' in kwargs:
 53 |             self.group = True
 54 |             groupfields = kwargs.pop('group', '')
 55 |             self.group_fields = groupfields.split('/')
 56 |         else:
 57 |             self.group = False
 58 |         super().set_oargs(**kwargs)
 59 | 
 60 |     def write(self, *args, **kwargs):
 61 |         # Change output format depending on if we're handling a connection or
 62 |         # a single packet
 63 |         if not self.format_is_set:
 64 |             if "clientip" in kwargs:
 65 |                 self.set_format(self._CONNECTION_FORMAT, self._CONNECTION_PRETTY_HEADER)
 66 |             else:
 67 |                 self.set_format(self._PACKET_FORMAT, self._PACKET_PRETTY_HEADER)
 68 |             self.format_is_set = True
 69 | 
 70 |         if self.group:
 71 |             # If grouping, check if the IP tuple is in the cache already.
 72 |             # If not, check the reverse of the tuple (i.e. opposite direction)
 73 |             try:
 74 |                 key = tuple([kwargs[g] for g in self.group_fields])
 75 |             except KeyError as e:
 76 |                 Output.write(self, *args, **kwargs)
 77 |                 return
 78 |             if key not in self.group_cache:
 79 |                 rkey = key[::-1]
 80 |                 if rkey in self.group_cache:
 81 |                     key = rkey
 82 |                 else:
 83 |                     self.group_cache[key] = []
 84 |             self.group_cache[key].append(kwargs)
 85 |         else:
 86 |             # If not grouping, just write out the connection immediately
 87 |             Output.write(self, *args, **kwargs)
 88 | 
 89 |     def close(self):
 90 |         if self.group:
 91 |             self.group = False # we're done grouping, so turn it off
 92 |             for key in self.group_cache.keys():
 93 |                 # write header by mapping key index with user's group list
 94 |                 self.fh.write(' '.join([
 95 |                     '%s=%s' % (self.group_fields[i], key[i]) for i in range(len(self.group_fields))])
 96 |                     + "\n")
 97 |                 for kw in self.group_cache[key]:
 98 |                     self.fh.write("\t")
 99 |                     Output.write(self, **kw)
100 |                 self.fh.write("\n")
101 |         Output.close(self)
102 | 
103 | obj = NetflowOutput


--------------------------------------------------------------------------------
/dshell/output/pcapout.py:
--------------------------------------------------------------------------------
 1 | """
 2 | This output module generates pcap output when given very specific arguments.
 3 | """
 4 | 
 5 | from dshell.output.output import Output
 6 | import struct
 7 | import sys
 8 | 
 9 | # TODO get this module to work with ConnectionPlugins
10 | 
11 | class PCAPOutput(Output):
12 |     "Writes data to a pcap file."
13 |     _DESCRIPTION = "Writes data to a pcap file (does not work with connection-based plugins)"
14 | 
15 |     def __init__(self, *args, **kwargs):
16 |         super().__init__(*args, mode='wb', **kwargs)
17 |         if self.fh == sys.stdout:
18 |             # Switch to a stdout that can handle byte output
19 |             self.fh = sys.stdout.buffer
20 |         # Since we have to wait until the link-layer type is set, we wait
21 |         # until the first write() operation before writing the pcap header
22 |         self.header_written = False
23 | 
24 |     def write(self, *args, **kwargs):
25 |         """
26 |         Write a packet to the pcap file.
27 | 
28 |         Arguments:
29 |             pktlen  : raw packet length
30 |             rawpkt  : raw packet data string
31 |             ts      : timestamp
32 |             link_layer_type :   link-layer type (optional) (default: 1)
33 |                                 (e.g. 1 for Ethernet, 105 for 802.11, etc.)
34 |         """
35 |         # The first time write() is called, the pcap header is written.
36 |         # This is to allow the plugin enough time to figure out what the
37 |         # link-layer type is for the data.
38 |         if not self.header_written:
39 |             link_layer_type = kwargs.get('link_layer_type', 1)
40 |             # write the header:
41 |             # magic_number, version_major, version_minor, thiszone, sigfigs,
42 |             # snaplen, link-layer type
43 |             self.fh.write(
44 |                 struct.pack('IHHIIII', 0xa1b2c3d4, 2, 4, 0, 0, 65535, link_layer_type))
45 |             self.header_written = True
46 | 
47 |         # Attempt to fetch the required fields
48 |         pktlen = kwargs.get('pktlen', None)
49 |         rawpkt = kwargs.get('rawpkt', None)
50 |         ts = kwargs.get('ts', None)
51 |         if pktlen is None or rawpkt is None or ts is None:
52 |             raise TypeError("PCAPOutput.write() requires at least these arguments to write packet data: pktlen, rawpkt, and ts.\n\tIt is possible this plugin is not configured to handle pcap output.")
53 | 
54 |         self.fh.write(
55 |             struct.pack('II', int(ts), int((ts - int(ts)) * 1000000)))
56 |         self.fh.write(struct.pack('II', len(rawpkt), pktlen))
57 |         self.fh.write(rawpkt)
58 | 
59 |     def close(self):
60 |         if self.fh == sys.stdout.buffer:
61 |             self.fh = sys.stdout
62 |         super().close()
63 | 
64 | obj = PCAPOutput
65 | 


--------------------------------------------------------------------------------
/dshell/plugins/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/dhcp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/dhcp/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/dhcp/dhcp.py:
--------------------------------------------------------------------------------
  1 | """
  2 | DHCP Plugin
  3 | """
  4 | 
  5 | import dshell.core
  6 | import dshell.util
  7 | from dshell.output.alertout import AlertOutput
  8 | 
  9 | from pypacker.layer4 import udp
 10 | from pypacker.layer567 import dhcp
 11 | 
 12 | from struct import unpack
 13 | 
 14 | class DshellPlugin(dshell.core.PacketPlugin):
 15 |     def __init__(self, **kwargs):
 16 |         super().__init__(name='dhcp',
 17 |                          description='extract client information from DHCP messages',
 18 |                          longdescription="""
 19 | The dhcp plugin will extract the Transaction ID, Hostname, and
 20 | Client ID (MAC address) from every UDP DHCP packet found in the given pcap
 21 | using port 67.  DHCP uses BOOTP as its transport protocol.
 22 | BOOTP assigns port 67 for the 'BOOTP server' and port 68 for the 'BOOTP client'.
 23 | This filter pulls DHCP Inform packets.
 24 | 
 25 | Examples:
 26 | 
 27 |     General usage:
 28 | 
 29 |         decode -d dhcp <pcap>
 30 | 
 31 |             This will display the connection info including the timestamp,
 32 |             the source IP : source port, destination IP : destination port,
 33 |             Transaction ID, Client Hostname, and the Client MAC address
 34 |             in a tabular format.
 35 | 
 36 | 
 37 |     Malware Traffic Analysis Exercise Traffic from 2015-03-03 where a user was hit with an Angler exploit kit:
 38 |         <http://www.malware-traffic-analysis.net/2015/03/03/2015-03-03-traffic-analysis-exercise.pcap>
 39 |     We want to find out more about the infected machine, and some of this information can be pulled from DHCP traffic
 40 | 
 41 |         decode -d dhcp 2015-03-03-traffic-analysis-exercise.pcap
 42 | 
 43 |             OUTPUT:
 44 | [dhcp] 2015-03-03 14:05:10   172.16.101.196:68    ->     172.16.101.1:67    ** Transaction ID: 0xba5a2cfe   Client ID (MAC): 38:2C:4A:3D:EF:01    Hostname: Gregory-PC **
 45 | [dhcp] 2015-03-03 14:08:40   172.16.101.196:68    ->  255.255.255.255:67    ** Transaction ID: 0x6a482406   Client ID (MAC): 38:2C:4A:3D:EF:01    Hostname: Gregory-PC **
 46 | [dhcp] 2015-03-03 14:10:11   172.16.101.196:68    ->     172.16.101.1:67    ** Transaction ID: 0xe74b17fe   Client ID (MAC): 38:2C:4A:3D:EF:01    Hostname: Gregory-PC **
 47 | [dhcp] 2015-03-03 14:12:50   172.16.101.196:68    ->  255.255.255.255:67    ** Transaction ID: 0xd62614a0   Client ID (MAC): 38:2C:4A:3D:EF:01    Hostname: Gregory-PC **
 48 | """,
 49 |                             bpf='(udp and port 67)',
 50 |                             output=AlertOutput(label=__name__),
 51 |                             author='dek',
 52 |                         )
 53 |         self.mac_address = None
 54 |         self.client_hostname = None
 55 |         self.xid = None
 56 | 
 57 |     # A packetHandler is used to ensure that every DHCP packet in the traffic is parsed
 58 |     def packet_handler(self, pkt):
 59 | 
 60 |         # iterate through the layers and find the DHCP layer
 61 |         dhcp_packet = pkt.pkt.upper_layer
 62 |         while not isinstance(dhcp_packet, dhcp.DHCP):
 63 |             try:
 64 |                 dhcp_packet = dhcp_packet.upper_layer
 65 |             except AttributeError:
 66 |                 # There doesn't appear to be a DHCP layer
 67 |                 return
 68 | 
 69 |         # Pull the transaction ID from the packet
 70 |         self.xid = hex(dhcp_packet.xid)
 71 | 
 72 |         # if we have a DHCP INFORM PACKET
 73 |         if dhcp_packet.op == dhcp.DHCP_OP_REQUEST:
 74 |             for opt in list(dhcp_packet.opts):
 75 |                 try:
 76 |                     option_code = opt.type
 77 |                     msg_value = opt.body_bytes
 78 |                 except AttributeError:
 79 |                     continue
 80 | 
 81 |                 # if opt is CLIENT_ID (61)
 82 |                 # unpack the msg_value and reformat the MAC address
 83 |                 if option_code == dhcp.DHCP_OPT_CLIENT_ID:
 84 |                     hardware_type, mac = unpack('B6s', msg_value)
 85 |                     mac = mac.hex().upper()
 86 |                     self.mac_address = ':'.join([mac[i:i+2] for i in range(0, len(mac), 2)])
 87 | 
 88 |                 # if opt is HOSTNAME (12)
 89 |                 elif option_code == dhcp.DHCP_OPT_HOSTNAME:
 90 |                     self.client_hostname = msg_value.decode('utf-8')
 91 | 
 92 |         # Allow for unknown hostnames
 93 |         if not self.client_hostname:
 94 |             self.client_hostname = ""
 95 | 
 96 |         if self.xid and self.mac_address:
 97 |             self.write('Transaction ID: {0:<12} Client ID (MAC): {1:<20} Hostname: {2:<}'.format(
 98 |                        self.xid, self.mac_address, self.client_hostname), **pkt.info(), dir_arrow='->')
 99 |             return pkt
100 | 
101 | if __name__ == "__main__":
102 |     print(DshellPlugin())
103 | 


--------------------------------------------------------------------------------
/dshell/plugins/dns/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/dns/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/dns/dns.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Extracts and summarizes DNS queries and responses.
  3 | """
  4 | 
  5 | import dshell.core
  6 | from dshell.plugins import dnsplugin
  7 | from dshell.output.alertout import AlertOutput
  8 | 
  9 | from pypacker.pypacker import dns_name_decode
 10 | from pypacker.layer567 import dns
 11 | 
 12 | import ipaddress
 13 | 
 14 | RESPONSE_ERRORS = {
 15 |     dns.DNS_RCODE_FORMERR: "FormErr",
 16 |     dns.DNS_RCODE_SERVFAIL: "ServFail",
 17 |     dns.DNS_RCODE_NXDOMAIN: "NXDOMAIN",
 18 |     dns.DNS_RCODE_NOTIMP: "NotImp",
 19 |     dns.DNS_RCODE_REFUSED: "Refused",
 20 |     dns.DNS_RCODE_YXDOMAIN: "YXDp,aom",
 21 |     dns.DNS_RCODE_YXRRSET: "YXRRSet",
 22 |     dns.DNS_RCODE_NXRRSET: "NXRRSet",
 23 |     dns.DNS_RCODE_NOTAUTH: "NotAuth",
 24 |     dns.DNS_RCODE_NOTZONE: "NotZone",
 25 | }
 26 | 
 27 | class DshellPlugin(dnsplugin.DNSPlugin):
 28 | 
 29 |     def __init__(self, *args, **kwargs):
 30 |         super().__init__(
 31 |             name="DNS",
 32 |             description="Extract and summarize DNS queries/responses",
 33 |             longdescription="""
 34 | The DNS plugin extracts and summarizes DNS queries and their responses. If
 35 | possible, each query is paired with its response(s).
 36 | 
 37 | Possible anomalies can be found using the --dns_show_noanswer,
 38 | --dns_only_noanswer, --dns_show_norequest, or --dns_only_norequest flags
 39 | (see --help).
 40 | 
 41 | For example, looking for responses that did not come from a request:
 42 |     decode -d dns --dns_only_norequest
 43 | 
 44 | Additional information for responses can be seen with --dns_country and
 45 | --dns_asn to show country codes and ASNs, respectively. These results can be
 46 | piped to grep for filtering results.
 47 | 
 48 | For example, to look for all traffic from Germany:
 49 |     decode -d dns --dns_country |grep "country: DE"
 50 | 
 51 | To look for non-US traffic, try:
 52 |     decode -d dns --dns_country |grep "country:" |grep -v "country: US"
 53 | """,
 54 |             author="bg/twp",
 55 |             bpf="udp and port 53",
 56 |             output=AlertOutput(label=__name__),
 57 |             optiondict={'show_noanswer': {'action': 'store_true', 'help': 'report unanswered queries alongside other queries'},
 58 |                         'show_norequest': {'action': 'store_true', 'help': 'report unsolicited responses alongside other responses'},
 59 |                         'only_noanswer': {'action': 'store_true', 'help': 'report only unanswered queries'},
 60 |                         'only_norequest': {'action': 'store_true', 'help': 'report only unsolicited responses'},
 61 |                         'country': {'action': 'store_true', 'help': 'show country code for returned IP addresses'},
 62 |                         'asn': {'action': 'store_true', 'help': 'show ASN for returned IP addresses'},
 63 |                     }
 64 |         )
 65 | 
 66 |     def premodule(self):
 67 |         if self.only_norequest:
 68 |             self.show_norequest = True
 69 |         if self.only_noanswer:
 70 |             self.show_noanswer = True
 71 | 
 72 | 
 73 |     def dns_handler(self, conn, requests, responses):
 74 |         if self.only_norequest and requests is not None:
 75 |             return
 76 |         if self.only_noanswer and responses is not None:
 77 |             return
 78 |         if not self.show_norequest and requests is None:
 79 |             return
 80 |         if not self.show_noanswer and responses is None:
 81 |             return
 82 | 
 83 |         msg = []
 84 | 
 85 |         # For simplicity, we focus only on the last request if there's more
 86 |         # than one.
 87 |         if requests:
 88 |             request_pkt = requests[-1]
 89 |             request = request_pkt.pkt.highest_layer
 90 |             id = request.id
 91 |             for query in request.queries:
 92 |                 if query.type == dns.DNS_A:
 93 |                     msg.append("A? {}".format(query.name_s))
 94 |                 elif query.type == dns.DNS_AAAA:
 95 |                     msg.append("AAAA? {}".format(query.name_s))
 96 |                 elif query.type == dns.DNS_CNAME:
 97 |                     msg.append("CNAME? {}".format(query.name_s))
 98 |                 elif query.type == dns.DNS_LOC:
 99 |                     msg.append("LOC? {}".format(query.name_s))
100 |                 elif query.type == dns.DNS_MX:
101 |                     msg.append("MX? {}".format(query.name_s))
102 |                 elif query.type == dns.DNS_PTR:
103 |                     msg.append("PTR? {}".format(query.name_s))
104 |                 elif query.type == dns.DNS_SRV:
105 |                     msg.append("SRV? {}".format(query.name_s))
106 |                 elif query.type == dns.DNS_TXT:
107 |                     msg.append("TXT? {}".format(query.name_s))
108 |         else:
109 |             request = None
110 | 
111 |         if responses:
112 |             response_pkt = responses[-1]
113 |             for response in responses:
114 |                 rcode = response.rcode
115 |                 response = response.pkt.highest_layer
116 |                 id = response.id
117 |                 # Check for errors in the response code
118 |                 err = RESPONSE_ERRORS.get(rcode, None)
119 |                 if err:
120 |                     msg.append(err)
121 |                     continue
122 |                 # Get the response counts
123 |                 msg.append("{}/{}/{}".format(response.answers_amount, response.authrr_amount, response.addrr_amount))
124 |                 # Parse the answers from the response
125 |                 for answer in response.answers:
126 |                     if answer.type == dns.DNS_A or answer.type == dns.DNS_AAAA:
127 |                         msg_fields = {}
128 |                         msg_format = "A: {ip} (ttl {ttl}s)"
129 |                         answer_ip = ipaddress.ip_address(answer.address)
130 |                         msg_fields['ip'] = str(answer_ip)
131 |                         msg_fields['ttl'] = str(answer.ttl)
132 |                         if self.country:
133 |                             msg_fields['country'] = dshell.core.geoip.geoip_country_lookup(msg_fields['ip']) or '--'
134 |                             msg_format += " (country: {country})"
135 |                         if self.asn:
136 |                             msg_fields['asn'] = dshell.core.geoip.geoip_asn_lookup(msg_fields['ip'])
137 |                             msg_format += " (ASN: {asn})"
138 |                         msg.append(msg_format.format(**msg_fields))
139 |                     # TODO pypacker doesn't really parse CNAMEs out. We try
140 |                     #      to get what we can manually, but keep checking if
141 |                     #      if it gets officially included in pypacker
142 |                     elif answer.type == dns.DNS_CNAME:
143 |                         if request:
144 |                             cname = dnsplugin.basic_cname_decode(request.queries[0].name, answer.address)
145 |                         else:
146 |                             cname = dns_name_decode(answer.address)
147 |                         msg.append('CNAME: {!r}'.format(cname))
148 |                     elif answer.type == dns.DNS_LOC:
149 |                         msg.append("LOC: {!s}".format(answer.address))
150 |                     elif answer.type == dns.DNS_MX:
151 |                         msg.append('MX: {!s}'.format(answer.address))
152 |                     elif answer.type == dns.DNS_NS:
153 |                         msg.append('NS: {!s}'.format(answer.address))
154 |                     elif answer.type == dns.DNS_PTR:
155 |                         ptr = dns_name_decode(answer.address)
156 |                         msg.append('PTR: {!s}'.format(ptr))
157 |                     elif answer.type == dns.DNS_SRV:
158 |                         msg.append('SRV: {!s}'.format(answer.address))
159 |                     elif answer.type == dns.DNS_TXT:
160 |                         msg.append('TXT: {!s}'.format(answer.address))
161 | 
162 |         else:
163 |             msg.append("No response")
164 | 
165 |         msg.insert(0, "ID: {}".format(id))
166 |         msg = ", ".join(msg)
167 |         if request:
168 |             self.write(msg, **request_pkt.info())
169 |         elif response:
170 |             self.write(msg, **response_pkt.info())
171 |         else:
172 |             self.write(msg, **conn.info())
173 | 
174 |         return conn, requests, responses
175 | 


--------------------------------------------------------------------------------
/dshell/plugins/dns/dnscc.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Identifies DNS queries and finds the country code of the record response.
 3 | """
 4 | 
 5 | import dshell.core
 6 | from dshell.plugins import dnsplugin
 7 | from dshell.output.alertout import AlertOutput
 8 | 
 9 | from pypacker.pypacker import dns_name_decode
10 | from pypacker.layer567 import dns
11 | 
12 | import ipaddress
13 | 
14 | class DshellPlugin(dnsplugin.DNSPlugin):
15 | 
16 |     def __init__(self, *args, **kwargs):
17 |         super().__init__(
18 |             name="DNS Country Code",
19 |             description="identify country code of DNS A/AAAA record responses",
20 |             bpf="port 53",
21 |             author="bg",
22 |             output=AlertOutput(label=__name__),
23 |             optiondict={
24 |                 'foreign': {
25 |                     'action': 'store_true',
26 |                     'help': 'report responses in non-US countries'
27 |                 },
28 |                 'code': {
29 |                     'type': str,
30 |                     'help': 'filter on a specific country code (ex. US, DE, JP, etc.)'
31 |                 }
32 |             }
33 |         )
34 | 
35 |     def dns_handler(self, conn, requests, responses):
36 |         "pull out the A/AAAA queries from the last DNS request in a connection"
37 |         queries = []
38 |         if requests:
39 |             request = requests[-1].pkt.highest_layer
40 |             id = request.id
41 |             for query in request.queries:
42 |                 if query.type == dns.DNS_A:
43 |                     queries.append("A? {}".format(query.name_s))
44 |                 elif query.type == dns.DNS_AAAA:
45 |                     queries.append("AAAA? {}".format(query.name_s))
46 |         queries = ', '.join(queries)
47 | 
48 |         answers = []
49 |         if responses:
50 |             for response in responses:
51 |                 response = response.pkt.highest_layer
52 |                 id = response.id
53 |                 for answer in response.answers:
54 |                     if answer.type == dns.DNS_A:
55 |                         ip = ipaddress.ip_address(answer.address).compressed
56 |                         cc = dshell.core.geoip.geoip_country_lookup(ip) or '--'
57 |                         if self.foreign and (cc == 'US' or cc == '--'):
58 |                             continue
59 |                         elif self.code and cc != self.code:
60 |                             continue
61 |                         answers.append("A: {} ({}) (ttl: {}s)".format(
62 |                             ip, cc, answer.ttl))
63 |                     elif answer.type == dns.DNS_AAAA:
64 |                         ip = ipaddress.ip_address(answer.address).compressed
65 |                         if ip == '::':
66 |                             cc = '--'
67 |                         else:
68 |                             cc = dshell.core.geoip.geoip_country_lookup(ip) or '--'
69 |                         if self.foreign and (cc == 'US' or cc == '--'):
70 |                             continue
71 |                         elif self.code and cc != self.code:
72 |                             continue
73 |                         answers.append("AAAA: {} ({}) (ttl: {}s)".format(
74 |                             ip, cc, answer.ttl))
75 |         answers = ', '.join(answers)
76 | 
77 |         if answers:
78 |             msg = "ID: {}, {} / {}".format(id, queries, answers)
79 |             self.write(msg, queries=queries, answers=answers, **conn.info())
80 |             return conn, requests, responses
81 |         else:
82 |             return
83 | 
84 | 
85 | 


--------------------------------------------------------------------------------
/dshell/plugins/dns/innuendo-dns.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Proof-of-concept Dshell plugin to detect INNUENDO DNS Channel
 3 | 
 4 | Based on the short marketing video (http://vimeo.com/115206626) the
 5 | INNUENDO DNS Channel relies on DNS to communicate with an authoritative
 6 | name server. The name server will respond with a base64 encoded TXT
 7 | answer. This plugin will analyze DNS TXT queries and responses to
 8 | determine if it matches the network traffic described in the video.
 9 | There are multiple assumptions (*very poor*) in this detection plugin
10 | but serves as a proof-of-concept detector. This detector has not been
11 | tested against authentic INNUENDO DNS Channel traffic.
12 | """
13 | 
14 | 
15 | from dshell.plugins.dnsplugin import DNSPlugin
16 | from dshell.output.alertout import AlertOutput
17 | 
18 | from pypacker.layer567 import dns
19 | 
20 | import base64
21 | 
22 | class DshellPlugin(DNSPlugin):
23 |     """
24 |     Proof-of-concept Dshell plugin to detect INNUENDO DNS Channel
25 | 
26 |     Usage: decode -d innuendo *.pcap
27 |     """
28 | 
29 |     def __init__(self):
30 |         super().__init__(
31 |             name="innuendo-dns",
32 |             description="proof-of-concept detector for INNUENDO DNS channel",
33 |             bpf="port 53",
34 |             author="primalsec",
35 |             output=AlertOutput(label=__name__),
36 |         )
37 | 
38 |     def dns_handler(self, conn, requests, responses):
39 |         response = responses[-1]
40 | 
41 |         query = None
42 |         answers = []
43 | 
44 |         if requests:
45 |             request = requests[-1].pkt.highest_layer
46 |             query = request.queries[-1]
47 |             # DNS Question, extract query name if it is a TXT record request
48 |             if query.type == dns.DNS_TXT:
49 |                 query = query.name_s
50 | 
51 |         if responses:
52 |             for response in responses:
53 |                 rcode = response.rcode
54 |                 response = response.pkt.highest_layer
55 |                 # DNS Answer with data and no errors
56 |                 if rcode == dns.DNS_RCODE_NOERR and response.answers:
57 |                     for answer in response.answers:
58 |                         if answer.type == dns.DNS_TXT:
59 |                             answers.append(answer.address)
60 | 
61 |         if query and answers:
62 |             # assumption: INNUENDO will use the lowest level domain for C2
63 |             # example: AAAABBBBCCCC.foo.bar.com -> AAAABBBBCCCC is the INNUENDO
64 |             # data
65 |             subdomain = query.split('.', 1)[0]
66 | 
67 |             # weak test based on video observation *very poor assumption*
68 |             if subdomain.isupper():
69 |                 # check each answer in the TXT response
70 |                 for answer in answers:
71 |                     try:
72 |                         # INNUENDO DNS channel base64 encodes the response, check to see if
73 |                         # it contains a valid base64 string  *poor assumption*
74 |                         dummy = base64.b64decode(answer)
75 | 
76 |                         self.write('INNUENDO DNS Channel', query, '/', answer, **conn.info())
77 | 
78 |                         # here would be a good place to decrypt the payload (if you have the keys)
79 |                         # decrypt_payload( answer )
80 |                     except:
81 |                         return None
82 |                 return conn, requests, responses
83 | 
84 |         return None
85 | 
86 | 


--------------------------------------------------------------------------------
/dshell/plugins/dns/specialips.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Identifies DNS resolutions that fall into special IP spaces (i.e. private,
  3 | reserved, loopback, multicast, link-local, or unspecified).
  4 | 
  5 | When found, it will print an alert for the request/response pair. The alert
  6 | will include the type of special IP in parentheses:
  7 |     (loopback)
  8 |     (private)
  9 |     (reserved)
 10 |     (multicast)
 11 |     (link-local)
 12 |     (unspecified)
 13 | """
 14 | 
 15 | from dshell.plugins import dnsplugin
 16 | from dshell.output.alertout import AlertOutput
 17 | 
 18 | from pypacker.layer567 import dns
 19 | 
 20 | import ipaddress
 21 | 
 22 | 
 23 | class DshellPlugin(dnsplugin.DNSPlugin):
 24 | 
 25 |     def __init__(self, *args, **kwargs):
 26 |         super().__init__(
 27 |             name="special-ips",
 28 |             description="identify DNS resolutions that fall into special IP (IPv4 and IPv6) spaces (i.e. private, reserved, loopback, multicast, link-local, or unspecified)",
 29 |             bpf="port 53",
 30 |             author="dev195",
 31 |             output=AlertOutput(label=__name__),
 32 |             longdescription="""
 33 | Identifies DNS resolutions that fall into special IP spaces (i.e. private,
 34 | reserved, loopback, multicast, link-local, or unspecified).
 35 | 
 36 | When found, it will print an alert for the request/response pair. The alert
 37 | will include the type of special IP in parentheses:
 38 |     (loopback)
 39 |     (private)
 40 |     (reserved)
 41 |     (multicast)
 42 |     (link-local)
 43 |     (unspecified)
 44 | 
 45 | For example, to look for responses with private IPs:
 46 |     Dshell> decode -d specialips ~/pcap/SkypeIRC.cap  |grep "(private)"
 47 |     [special-ips] 2006-08-25 15:31:06      192.168.1.2:2128  --      192.168.1.1:53    ** ID: 12579, A? voyager.home., A: 192.168.1.1 (private) (ttl 10000s) **
 48 | 
 49 | Finding can also be written to a separate pcap file by chaining:
 50 |     Dshell> decode -d specialips+pcapwriter --pcapwriter_outfile="special-dns.pcap" ~/pcap/example.pcap
 51 | """,
 52 |         )
 53 | 
 54 | 
 55 |     def dns_handler(self, conn, requests, responses):
 56 |         """
 57 |         Stores the DNS request, then iterates over responses looking for
 58 |         special IP addresses. If it finds one, it will print an alert for the
 59 |         request/response pair.
 60 |         """
 61 |         msg = []
 62 | 
 63 |         if requests:
 64 |             request_pkt = requests[-1]
 65 |             request = request_pkt.pkt.highest_layer
 66 |             id = request.id
 67 |             for query in request.queries:
 68 |                 if query.type == dns.DNS_A:
 69 |                     msg.append("A? {}".format(query.name_s))
 70 |                 elif query.type == dns.DNS_AAAA:
 71 |                     msg.append("AAAA? {}".format(query.name_s))
 72 | 
 73 | 
 74 |         if responses:
 75 |             keep_responses = False
 76 |             for response in responses:
 77 |                 response = response.pkt.highest_layer
 78 |                 for answer in response.answers:
 79 |                     if answer.type == dns.DNS_A or answer.type == dns.DNS_AAAA:
 80 |                         answer_ip = ipaddress.ip_address(answer.address)
 81 |                         msg_fields = {}
 82 |                         msg_format = "A: {ip} ({type}) (ttl {ttl}s)"
 83 |                         msg_fields['ip'] = str(answer_ip)
 84 |                         msg_fields['ttl'] = str(answer.ttl)
 85 |                         msg_fields['type'] = ''
 86 |                         if answer_ip.is_loopback:
 87 |                             msg_fields['type'] = 'loopback'
 88 |                             keep_responses = True
 89 |                         elif answer_ip.is_private:
 90 |                             msg_fields['type'] = 'private'
 91 |                             keep_responses = True
 92 |                         elif answer_ip.is_reserved:
 93 |                             msg_fields['type'] = 'reserved'
 94 |                             keep_responses = True
 95 |                         elif answer_ip.is_multicast:
 96 |                             msg_fields['type'] = 'multicast'
 97 |                             keep_responses = True
 98 |                         elif answer_ip.is_link_local:
 99 |                             msg_fields['type'] = 'link-local'
100 |                             keep_responses = True
101 |                         elif answer_ip.is_unspecified:
102 |                             msg_fields['type'] = 'unspecified'
103 |                             keep_responses = True
104 |                         msg.append(msg_format.format(**msg_fields))
105 |             if keep_responses:
106 |                 msg.insert(0, "ID: {}".format(id))
107 |                 msg = ", ".join(msg)
108 |                 self.write(msg, **conn.info())
109 |                 return conn, requests, responses
110 | 
111 | 


--------------------------------------------------------------------------------
/dshell/plugins/dnsplugin.py:
--------------------------------------------------------------------------------
  1 | """
  2 | This is a base-level plugin intended to handle DNS lookups and responses
  3 | 
  4 | It inherits from the base ConnectionPlugin and provides a new handler
  5 | function: dns_handler(conn, requests, responses)
  6 | 
  7 | It automatically pairs request/response packets by ID and passes them to the
  8 | handler for a custom plugin, such as dns.py, to use.
  9 | """
 10 | 
 11 | import logging
 12 | 
 13 | import dshell.core as dshell
 14 | 
 15 | from pypacker.pypacker import dns_name_decode
 16 | from pypacker.layer567 import dns
 17 | 
 18 | logger = logging.getLogger(__name__)
 19 | 
 20 | 
 21 | def basic_cname_decode(request, answer):
 22 |     """
 23 |     DIRTY HACK ALERT
 24 | 
 25 |     This function exists to convert DNS CNAME responses into human-readable
 26 |     strings. pypacker cannot currently convert these, so this one attempts
 27 |     to do it. However, it is not complete and will only work for the most
 28 |     common situations (i.e. no pointers, or pointers that only point to the
 29 |     first request).
 30 | 
 31 |     Feed it the bytes (query.name) of the first request and the bytes for the
 32 |     answer (answer.address) with a CNAME, and it will return the parsed string.
 33 |     """
 34 | 
 35 |     if b"\xc0" not in answer:
 36 |         # short-circuit if there is no pointer
 37 |         return dns_name_decode(answer)
 38 |     # Get the offset into the question by grabbing the number after \xc0
 39 |     # Then, offset the offset by subtracting the query header length (12)
 40 |     snip_index = answer[answer.index(b"\xc0") + 1] - 12
 41 |     # Grab the necessary piece from the request
 42 |     snip = request[snip_index:]
 43 |     # Reassemble and return
 44 |     rebuilt = answer[:answer.index(b"\xc0")] + snip
 45 |     return dns_name_decode(rebuilt)
 46 | 
 47 | 
 48 | class DNSPlugin(dshell.ConnectionPlugin):
 49 |     """
 50 |     A base-level plugin that overwrites the connection_handler in
 51 |     ConnectionPlugin. It provides a new handler function: dns_handler.
 52 |     """
 53 | 
 54 |     def __init__(self, **kwargs):
 55 |         dshell.ConnectionPlugin.__init__(self, **kwargs)
 56 | 
 57 |     def connection_handler(self, conn):
 58 |         requests = {}
 59 |         responses = {}
 60 |         id_to_blob_map = {}
 61 |         id_to_packets_map = {}
 62 | 
 63 |         for blob in conn.blobs:
 64 |             for pkt in blob.packets:
 65 |                 packet = pkt.pkt
 66 |                 if not isinstance(packet.highest_layer, dns.DNS):
 67 |                     # First packet is not DNS, so we don't care
 68 |                     blob.hidden = True
 69 |                     break
 70 | 
 71 |                 dnsp = packet.highest_layer
 72 |                 id_to_blob_map.setdefault(dnsp.id, []).append(blob)
 73 |                 id_to_packets_map.setdefault(dnsp.id, []).append(pkt)
 74 |                 qr_flag = dnsp.flags >> 15
 75 |                 rcode = dnsp.flags & 15
 76 |                 setattr(pkt, 'qr', qr_flag)
 77 |                 setattr(pkt, 'rcode', rcode)
 78 | #                print("{0:016b}".format(dnsp.flags))
 79 |                 if qr_flag == dns.DNS_Q:
 80 |                     requests.setdefault(dnsp.id, []).append(pkt)
 81 |                 elif qr_flag == dns.DNS_A:
 82 |                     responses.setdefault(dnsp.id, []).append(pkt)
 83 | 
 84 |         all_ids = set(list(requests.keys()) + list(responses.keys()))
 85 |         keep_connection = False
 86 |         for id in all_ids:
 87 |             request_list = requests.get(id, None)
 88 |             response_list = responses.get(id, None)
 89 |             dns_handler_out = self.dns_handler(conn, requests=request_list, responses=response_list)
 90 |             if not dns_handler_out:
 91 |                 # remove packets from connections that dns_handler did not like
 92 |                 for blob in id_to_blob_map[id]:
 93 |                     for pkt in id_to_packets_map[id]:
 94 |                         try:
 95 |                             blob.packets.remove(pkt)
 96 |                         except ValueError:
 97 |                             continue
 98 |             else:
 99 |                 for blob in id_to_blob_map[id]:
100 |                     blob.hidden = False
101 |             try:
102 |                 if dns_handler_out and not isinstance(dns_handler_out[0], dshell.Connection):
103 |                     logger.warning("The output from {} dns_handler must be a list with a dshell.Connection as the first element! Chaining plugins from here may not be possible.".format(self.name))
104 |                     continue
105 |             except TypeError:
106 |                 logger.warning("The output from {} dns_handler must be a list with a dshell.Connection as the first element! Chaining plugins from here may not be possible.".format(self.name))
107 |                 continue
108 |             keep_connection = True
109 |         if keep_connection:
110 |             return conn
111 | 
112 |     def dns_handler(self, conn, requests, responses):
113 |         """
114 |         A placeholder.
115 | 
116 |         Plugins will be able to overwrite this to perform custom activites
117 |         on DNS data.
118 | 
119 |         It takes in a Connection, a list of requests (or None), and a list of
120 |         responses (or None). The requests and responses are not intermixed;
121 |         the responses in the list correspond to the requests according to ID.
122 | 
123 |         It should return a list containing the same types of values that came
124 |         in as arguments (i.e. return (conn, requests, responses)). This is
125 |         mostly a consistency thing, as only the Connection is passed along to
126 |         other plugins.
127 |         """
128 |         return (conn, requests, responses)
129 | 
130 | 
131 | DshellPlugin = None
132 | 


--------------------------------------------------------------------------------
/dshell/plugins/filter/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/filter/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/filter/country.py:
--------------------------------------------------------------------------------
  1 | """
  2 | A filter for connections by IP address country code. Will generally be chained
  3 | with other plugins.
  4 | """
  5 | 
  6 | import dshell.core
  7 | from dshell.output.netflowout import NetflowOutput
  8 | 
  9 | class DshellPlugin(dshell.core.ConnectionPlugin):
 10 | 
 11 |     def __init__(self, *args, **kwargs):
 12 |         super().__init__(
 13 |             name="Country Filter",
 14 |             bpf='ip or ip6',
 15 |             description="filter connections by IP address country code",
 16 |             longdescription="""
 17 | country: filter connections on geolocation (country code)
 18 | 
 19 | Mandatory option:
 20 | 
 21 |   --country_code: specify (2 character) country code to filter on
 22 | 
 23 | Default behavior:
 24 | 
 25 |   If either the client or server IP address matches the specified country,
 26 |   the stream will be included.
 27 | 
 28 | Modifier options:
 29 | 
 30 |   --country_neither: Include only streams where neither the client nor the
 31 |                      server IP address matches the specified country.
 32 | 
 33 |   --country_both:    Include only streams where both the client AND the server
 34 |                      IP addresses match the specified country.
 35 | 
 36 |   --country_notboth: Include streams where the specified country is NOT BOTH
 37 |                      the client and server IP.  Streams where it is one or
 38 |                      the other may be included.
 39 | 
 40 |   --country_alerts:  Show alerts for this plugin (default: false)
 41 | 
 42 | 
 43 | Example:
 44 | 
 45 |   decode -d country+pcapwriter traffic.pcap --pcapwriter_outfile=USonly.pcap --country_code US
 46 |   decode -d country+followstream traffic.pcap --country_code US --country_notboth
 47 | """,
 48 |             author="tp",
 49 |             output=NetflowOutput(label=__name__),
 50 |             optiondict={
 51 |                 'code': {'type': str, 'help': 'two-char country code', 'metavar':'CC'},
 52 |                 'neither': {'action': 'store_true', 'help': 'neither (client/server) is in specified country'},
 53 |                 'both': {'action': 'store_true', 'help': 'both (client/server) ARE in specified country'},
 54 |                 'notboth': {'action': 'store_true', 'help': 'specified country is not both client and server'},
 55 |                 'alerts': {'action': 'store_true', 'default':False, 'help':'have this filter show alerts for matches'}
 56 |             },
 57 |         )
 58 | 
 59 |     def premodule(self):
 60 |         # Several of the args are mutually exclusive
 61 |         # Check if more than one is set, and print a warning if so
 62 |         if (self.neither + self.both + self.notboth) > 1:
 63 |             self.logger.warning("Can only use one of these args at a time: 'neither', 'both', or 'notboth'")
 64 | 
 65 |     def connection_handler(self, conn):
 66 |         # If no country code specified, pass all traffic through
 67 |         if not self.code:
 68 |             return conn
 69 | 
 70 |         if self.neither:
 71 |             if conn.clientcc != self.code and conn.servercc != self.code:
 72 |                 if self.alerts: self.write('neither', **conn.info())
 73 |                 return conn
 74 |             else:
 75 |                 return
 76 | 
 77 |         elif self.both:
 78 |             if conn.clientcc == self.code and conn.servercc == self.code:
 79 |                 if self.alerts: self.write('both', **conn.info())
 80 |                 return conn
 81 |             else:
 82 |                 return
 83 | 
 84 |         elif self.notboth:
 85 |             if ((conn.clientcc != self.code and conn.servercc == self.code)
 86 |                 or
 87 |                 (conn.clientcc == self.code and conn.servercc != self.code)):
 88 |                     if self.alerts: self.write('notboth', **conn.info())
 89 |                     return conn
 90 |             else:
 91 |                 return
 92 | 
 93 |         else:
 94 |             if conn.clientcc == self.code or conn.servercc == self.code:
 95 |                 if self.alerts: self.write('match', **conn.info())
 96 |                 return conn
 97 | 
 98 |         # no match
 99 |         return None
100 | 
101 | 
102 | if __name__ == "__main__":
103 |     print(DshellPlugin())
104 | 


--------------------------------------------------------------------------------
/dshell/plugins/filter/track.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Only follows connections that match user-provided IP addresses and ports. Is
  3 | generally chained with other plugins.
  4 | """
  5 | 
  6 | import ipaddress
  7 | import sys
  8 | 
  9 | import dshell.core
 10 | from dshell.output.alertout import AlertOutput
 11 | 
 12 | class DshellPlugin(dshell.core.ConnectionPlugin):
 13 |     def __init__(self, **kwargs):
 14 |         super().__init__(
 15 |             name="track",
 16 |             author="twp,dev195",
 17 |             description="Only follow connections that match user-provided IP addresses and ports",
 18 |             longdescription="""Only follow connections that match user-provided IP addresses
 19 | 
 20 | IP addresses can be specified with --track_source and --track_target.
 21 | Multiple IPs can be used with commas (e.g. --track_source=192.168.1.1,127.0.0.1).
 22 | Ports can be included with IP addresses by joining them with a 'p' (e.g. --track_target=192.168.1.1p80,127.0.0.1).
 23 | Ports can be used alone with just a 'p' (e.g. --track_target=p53).
 24 | CIDR notation is okay (e.g. --track_source=196.168.0.0/16).
 25 | 
 26 | --track_source : used to limit connections by the IP that initiated the connection (usually the client)
 27 | --trace_target : used to limit connections by the IP that received the connection (usually the server)
 28 | --track_alerts : used to display optional alerts indicating when a connection starts/ends""",
 29 |             bpf="ip or ip6",
 30 |             output=AlertOutput(label=__name__),
 31 |             optiondict={
 32 |                 "target": {
 33 |                     "default": [],
 34 |                     "action": "append",
 35 |                     "metavar": "IPpPORT"},
 36 |                 "source": {
 37 |                     "default": [],
 38 |                     "action": "append",
 39 |                     "metavar": "IPpPORT"},
 40 |                 "alerts": {
 41 |                     "action": "store_true"}
 42 |                 }
 43 |             )
 44 |         self.sources = []
 45 |         self.targets = []
 46 | 
 47 |     def __split_ips(self, input):
 48 |         """
 49 |         Used to split --track_target and --track_source arguments into
 50 |         list-of-lists used in the connection handler
 51 |         """
 52 |         return_val = []
 53 |         for piece in input.split(','):
 54 |             if 'p' in piece:
 55 |                 ip, port = piece.split('p', 1)
 56 |                 try:
 57 |                     port = int(port)
 58 |                 except ValueError as e:
 59 |                     self.error("Could not parse port number in {!r} - {!s}".format(piece, e))
 60 |                     sys.exit(1)
 61 |                 if 0 < port > 65535:
 62 |                     self.error("Could not parse port number in {!r} - must be in valid port range".format(piece))
 63 |                     sys.exit(1)
 64 |             else:
 65 |                 ip, port = piece, None
 66 |             if '/' in ip:
 67 |                 try:
 68 |                     ip = ipaddress.ip_network(ip)
 69 |                 except ValueError as e:
 70 |                     self.error("Could not parse CIDR netrange - {!s}".format(e))
 71 |                     sys.exit(1)
 72 |             elif ip:
 73 |                 try:
 74 |                     ip = ipaddress.ip_address(ip)
 75 |                 except ValueError as e:
 76 |                     self.error("Could not parse IP address - {!s}".format(e))
 77 |                     sys.exit(1)
 78 |             else:
 79 |                 ip = None
 80 |             return_val.append((ip, port))
 81 |         return return_val
 82 | 
 83 |     def __check_ips(self, masterip, masterport, checkip, checkport):
 84 |         "Checks IPs and ports for matches against the user-selected values"
 85 |         # masterip, masterport are the values selected by the user
 86 |         # checkip, checkport are the values to be checked against masters
 87 |         ip_okay = False
 88 |         port_okay = False
 89 | 
 90 |         if masterip is None:
 91 |             ip_okay = True
 92 |         elif (isinstance(masterip, (ipaddress.IPv4Network, ipaddress.IPv6Network))
 93 |             and checkip in masterip):
 94 |                 ip_okay = True
 95 |         elif (isinstance(masterip, (ipaddress.IPv4Address, ipaddress.IPv6Address))
 96 |             and masterip == checkip):
 97 |                 ip_okay = True
 98 | 
 99 |         if masterport is None:
100 |             port_okay = True
101 |         elif masterport == checkport:
102 |             port_okay = True
103 | 
104 |         if port_okay and ip_okay:
105 |             return True
106 |         else:
107 |             return False
108 | 
109 | 
110 |     def premodule(self):
111 |         if self.target:
112 |             for tstr in self.target:
113 |                 self.targets.extend(self.__split_ips(tstr))
114 |         if self.source:
115 |             for sstr in self.source:
116 |                 self.sources.extend(self.__split_ips(sstr))
117 |         self.logger.debug("targets: {!s}".format(self.targets))
118 |         self.logger.debug("sources: {!s}".format(self.sources))
119 | 
120 |     def connection_handler(self, conn):
121 |         if self.targets:
122 |             conn_okay = False
123 |             for target in self.targets:
124 |                 targetip = target[0]
125 |                 targetport = target[1]
126 |                 serverip = ipaddress.ip_address(conn.serverip)
127 |                 serverport = conn.serverport
128 |                 if self.__check_ips(targetip, targetport, serverip, serverport):
129 |                     conn_okay = True
130 |                     break
131 |             if not conn_okay:
132 |                 return
133 | 
134 |         if self.sources:
135 |             conn_okay = False
136 |             for source in self.sources:
137 |                 sourceip = source[0]
138 |                 sourceport = source[1]
139 |                 clientip = ipaddress.ip_address(conn.clientip)
140 |                 clientport = conn.clientport
141 |                 if self.__check_ips(sourceip, sourceport, clientip, clientport):
142 |                     conn_okay = True
143 |                     break
144 |             if not conn_okay:
145 |                 return
146 | 
147 |         if self.alerts:
148 |             self.write("matching connection", **conn.info())
149 | 
150 |         return conn
151 | 
152 | if __name__ == "__main__":
153 |     print(DshellPlugin())
154 | 


--------------------------------------------------------------------------------
/dshell/plugins/flows/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/flows/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/flows/dataflows.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Displays netflows that have at least 1 byte transferred, by default.
 3 | Bytes threshold can be updated by the user.
 4 | """
 5 | 
 6 | import dshell.core
 7 | from dshell.output.netflowout import NetflowOutput
 8 | 
 9 | class DshellPlugin(dshell.core.ConnectionPlugin):
10 | 
11 |     def __init__(self):
12 |         super().__init__(
13 |             name="dataflows",
14 |             description="Display netflows that have at least 1 byte transferred",
15 |             author="amm",
16 |             output=NetflowOutput(label=__name__),
17 |             optiondict={
18 |                 'size': {
19 |                     'type': int,
20 |                     'default': 1,
21 |                     'metavar': 'SIZE',
22 |                     'help': 'number of bytes transferred (default: 1)'}
23 |             }
24 |         )
25 | 
26 |     def premodule(self):
27 |         if self.size <= 0:
28 |             self.warn("Cannot have a size that's less than or equal to zero (size: {}). Setting to 1.".format(self.size))
29 |             self.size = 1
30 | 
31 |     def connection_handler(self, conn):
32 |         if conn.clientbytes + conn.serverbytes >= self.size:
33 |             self.write(**conn.info())
34 |             return conn
35 | 
36 | 
37 | 


--------------------------------------------------------------------------------
/dshell/plugins/flows/largeflows.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Displays netflows that have at least 1MB transferred, by default.
 3 | Megabyte threshold can be updated by the user.
 4 | """
 5 | 
 6 | import dshell.core
 7 | from dshell.output.netflowout import NetflowOutput
 8 | 
 9 | class DshellPlugin(dshell.core.ConnectionPlugin):
10 | 
11 |     def __init__(self):
12 |         super().__init__(
13 |             name="large-flows",
14 |             description="Display netflows that have at least 1MB transferred",
15 |             author="bg",
16 |             output=NetflowOutput(label=__name__),
17 |             optiondict={
18 |                 'size': {
19 |                     'type': float,
20 |                     'default': 1,
21 |                     'metavar': 'SIZE',
22 |                     'help': 'number of megabytes transferred (default: 1)'}
23 |             }
24 |         )
25 | 
26 |     def premodule(self):
27 |         if self.size <= 0:
28 |             self.logger.warning("Cannot have a size that's less than or equal to zero (size: {}). Setting to 1.".format(self.size))
29 |             self.size = 1
30 |         self.min = 1048576 * self.size
31 |         self.logger.debug("Input: {}, Final size: {} bytes".format(self.size, self.min))
32 | 
33 |     def connection_handler(self, conn):
34 |         if conn.clientbytes + conn.serverbytes >= self.min:
35 |             self.write(**conn.info())
36 |             return conn
37 | 
38 | 
39 | 


--------------------------------------------------------------------------------
/dshell/plugins/flows/longflows.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Displays netflows that have a duration of at least 5 minutes.
 3 | Minute threshold can be updated by the user.
 4 | """
 5 | 
 6 | import dshell.core
 7 | from dshell.output.netflowout import NetflowOutput
 8 | 
 9 | class DshellPlugin(dshell.core.ConnectionPlugin):
10 | 
11 |     def __init__(self):
12 |         super().__init__(
13 |             name="long-flows",
14 |             description="Display netflows that have a duration of at least 5 minutes",
15 |             author="bg",
16 |             output=NetflowOutput(label=__name__),
17 |             optiondict={
18 |                 "len": {
19 |                     "type": float,
20 |                     "default": 5,
21 |                     "help": "set minimum connection time to MIN minutes (default: 5)",
22 |                     "metavar": "MIN",
23 |                 }
24 |             }
25 |         )
26 | 
27 |     def premodule(self):
28 |         if self.len <= 0:
29 |             self.logger.warning("Cannot have a time that's less than or equal to zero (size: {}). Setting to 5.".format(self.len))
30 |             self.len = 5
31 |         self.secs = 60 * self.len
32 | 
33 |     def connection_handler(self, conn):
34 |         tdelta = (conn.endtime - conn.starttime).total_seconds()
35 |         if tdelta >= self.secs:
36 |             self.write(**conn.info())
37 |             return conn
38 | 
39 | 


--------------------------------------------------------------------------------
/dshell/plugins/flows/netflow.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Collects and displays statistics about connections (a.k.a. flow data)
 3 | """
 4 | 
 5 | import dshell.core
 6 | from dshell.output.netflowout import NetflowOutput
 7 | 
 8 | class DshellPlugin(dshell.core.ConnectionPlugin):
 9 |     def __init__(self, *args, **kwargs):
10 |         super().__init__(
11 |             name="Netflow",
12 |             description="Collects and displays flow statistics about connections",
13 |             author="dev195",
14 |             bpf="ip or ip6",
15 |             output=NetflowOutput(label=__name__),
16 |             longdescription="""
17 | Collect and display flow statistics about connections.
18 | 
19 | It will reassemble connections and print one row for each flow keyed by
20 | address four-tuple. Each row, by default, will have the following fields:
21 | 
22 | - Start Time : the timestamp of the first packet for a connection
23 | - Client IP  : the IP address of the host that initiated the connection
24 | - Server IP  : the IP address of the host that receives the connection
25 |   (note: client/server designation is based on first packet seen for a connection)
26 | - Client Country : the country code for the client IP address
27 | - Server Country : the country code for the server IP address
28 | - Protocol   : the layer-3 protocol of the connection
29 | - Client Port: port number used by client
30 | - Server Port: port number used by server
31 | - Client Packets : number of data-carrying packets from the client
32 | - Server Packets : number of data-carrying packets from the server
33 |   (note: packet counts ignore packets without data, e.g. handshakes, ACKs, etc.)
34 | - Client Bytes   : total bytes sent by the client
35 | - Server Bytes   : total bytes sent by the server
36 | - Duration   : time between the first packet and final packet of a connection
37 | - Message Data: extra field not used by this plugin
38 | """
39 |         )
40 | 
41 |     def connection_handler(self, conn):
42 |         self.write(**conn.info())
43 |         return conn
44 | 


--------------------------------------------------------------------------------
/dshell/plugins/flows/reverseflows.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Generate an alert when a client transmits more data than the server.
 3 | 
 4 | Additionally, the user can specify a threshold. This means that an alert
 5 | will be generated if the client transmits more than three times as much data
 6 | as the server.
 7 | 
 8 | The default threshold value is 3.0, meaning that any client transmits
 9 | more than three times as much data as the server will generate an alert.
10 | 
11 | Examples:
12 | 1) decode -d reverse-flow <pcap>
13 |     Generates an alert for client transmissions that are three times
14 |     greater than the server transmission.
15 | 
16 | 2) decode -d reverse-flow <pcap> --reverse-flow_threshold 61
17 |     Generates an alert for all client transmissions that are 61 times
18 |     greater than the server transmission
19 | 
20 | 3) decode -d reverse-flow <pcap> --reverse-flow_threshold 61  --reverse-flow_zero
21 |     Generates an alert for all client transmissions that are 61 times greater
22 |     than the server transmission.
23 | """
24 | 
25 | import dshell.core
26 | from dshell.output.alertout import AlertOutput
27 | 
28 | class DshellPlugin(dshell.core.ConnectionPlugin):
29 | 
30 |     def __init__(self):
31 |         super().__init__(
32 |             name="reverse-flows",
33 |             description="Generate an alert if the client transmits more data than the server",
34 |             author="me",
35 |             bpf="tcp or udp",
36 |             output=AlertOutput(label=__name__),
37 |             optiondict={
38 |                'threshold': {'type':float, 'default':3.0,
39 |                              'help':'Alerts if client transmits more than threshold times the data of the server'},
40 |                'minimum': {'type':int, 'default':0,
41 |                            'help':'alert on client transmissions larger than min bytes [default: 0]'},
42 |                'zero': {'action':'store_true', 'default':False,
43 |                         'help':'alert if the server transmits zero bytes [default: false]'},
44 |             },
45 |             longdescription="""
46 | Generate an alert when a client transmits more data than the server.
47 | 
48 | Additionally, the user can specify a threshold. This means that an alert
49 | will be generated if the client transmits more than three times as much data
50 | as the server.
51 | 
52 | The default threshold value is 3.0, meaning that any client transmits
53 | more than three times as much data as the server will generate an alert.
54 | 
55 | Examples:
56 | 1) decode -d reverse-flow <pcap>
57 |     Generates an alert for client transmissions that are three times
58 |     greater than the server transmission.
59 | 
60 | 2) decode -d reverse-flow <pcap> --reverse-flow_threshold 61
61 |     Generates an alert for all client transmissions that are 61 times
62 |     greater than the server transmission
63 | 
64 | 3) decode -d reverse-flow <pcap> --reverse-flow_threshold 61  --reverse-flow_zero
65 |     Generates an alert for all client transmissions that are 61 times greater
66 |     than the server transmission.
67 |             """,
68 |         )
69 | 
70 |     def premodule(self):
71 |         if self.threshold < 0:
72 |             self.logger.warning("Cannot have a negative threshold. Defaulting to 3.0. (threshold: {0})".format(self.threshold))
73 |             self.threshold = 3.0
74 |         elif not self.threshold:
75 |             self.logger.warning("Threshold not set. Displaying all client-server transmissions (threshold: {0})".format(self.threshold))
76 | 
77 |     def connection_handler(self, conn):
78 |         if conn.clientbytes < self.minimum:
79 |             return
80 | 
81 |         if self.zero or (conn.serverbytes and float(conn.clientbytes)/conn.serverbytes > self.threshold):
82 |             self.write('client sent {:>6.2f} more than the server'.format(conn.clientbytes/float(conn.serverbytes)), **conn.info(), dir_arrow="->")
83 |             return conn
84 | 
85 | 


--------------------------------------------------------------------------------
/dshell/plugins/flows/toptalkers.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Finds the top-talkers in a file or on an interface based on byte count.
 3 | """
 4 | 
 5 | import dshell.core
 6 | from dshell.output.alertout import AlertOutput
 7 | from dshell.util import human_readable_filesize
 8 | 
 9 | class DshellPlugin(dshell.core.ConnectionPlugin):
10 | 
11 |     def __init__(self, *args, **kwargs):
12 |         super().__init__(
13 |             name="Top Talkers",
14 |             description="Find top-talkers based on byte count",
15 |             author="dev195",
16 |             bpf="tcp or udp",
17 |             output=AlertOutput(label=__name__),
18 |             optiondict={
19 |                 "top_x": {
20 |                     "type": int,
21 |                     "default": 20,
22 |                     "help": "Only display the top X results (default: 20)",
23 |                     "metavar": "X"
24 |                 },
25 |                 "total": {
26 |                     "action": "store_true",
27 |                     "help": "Sum byte counts from both directions instead of separate entries for individual directions"
28 |                 },
29 |                 "h": {
30 |                     "action": "store_true",
31 |                     "help": "Print byte counts in human-readable format"
32 |                 }
33 |             },
34 |             longdescription="""
35 | Finds top 20 connections with largest transferred byte count.
36 | 
37 | Can be configured to display an arbitrary Top X list with arguments.
38 | 
39 | Does not pass connections down plugin chain.
40 | """
41 |         )
42 | 
43 |     def premodule(self):
44 |         """
45 |         Initialize a list to hold the top X talkers
46 |         Format of each entry:
47 |             (bytes, direction, Connection object)
48 |         """
49 |         self.top_talkers = [(0, '---', None)]
50 | 
51 |     def connection_handler(self, conn):
52 |         if self.total:
53 |             # total up the client and server bytes
54 |             self.__process_bytes(conn.clientbytes + conn.serverbytes, '<->', conn)
55 |         else:
56 |             # otherwise, treat client and server bytes separately
57 |             self.__process_bytes(conn.clientbytes, '-->', conn)
58 |             self.__process_bytes(conn.serverbytes, '<--', conn)
59 | 
60 |     def postmodule(self):
61 |         "Iterate over the entries in top_talkers list and print them"
62 |         for bytecount, direction, conn in self.top_talkers:
63 |             if conn is None:
64 |                 break
65 |             if self.h:
66 |                 byte_display = human_readable_filesize(bytecount)
67 |             else:
68 |                 byte_display = "{} B".format(bytecount)
69 |             msg = "client {} server {}".format(direction, byte_display)
70 |             self.write(msg, **conn.info(), dir_arrow="->")
71 | 
72 |     def __process_bytes(self, bytecount, direction, conn):
73 |         """
74 |         Check if the bytecount for a connection belongs in top_talkers
75 |         If so, insert it into the list and pop off the lowest entry
76 |         """
77 |         for i, oldbytecount in enumerate(self.top_talkers):
78 |             if bytecount >= oldbytecount[0]:
79 |                 self.top_talkers.insert(i, (bytecount, direction, conn))
80 |                 break
81 | 
82 |         while len(self.top_talkers) > self.top_x:
83 |             self.top_talkers.pop(-1)
84 | 


--------------------------------------------------------------------------------
/dshell/plugins/ftp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/ftp/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/http/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/http/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/http/httpdump.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Presents useful information points for HTTP sessions
  3 | """
  4 | 
  5 | import dshell.core
  6 | import dshell.util
  7 | from dshell.plugins.httpplugin import HTTPPlugin
  8 | from dshell.output.colorout import ColorOutput
  9 | 
 10 | from urllib.parse import parse_qs
 11 | from http import cookies
 12 | 
 13 | class DshellPlugin(HTTPPlugin):
 14 |     def __init__(self):
 15 |         super().__init__(
 16 |             name="httpdump",
 17 |             description="Dump useful information about HTTP sessions",
 18 |             bpf="tcp and (port 80 or port 8080 or port 8000)",
 19 |             author="amm",
 20 |             output=ColorOutput(label=__name__),
 21 |             optiondict={
 22 |                 "maxurilen": {
 23 |                     "type": int,
 24 |                     "default": 30,
 25 |                     "metavar": "LENGTH",
 26 |                     "help": "Truncate URLs longer than LENGTH (default: 30). Set to 0 for no truncating."},
 27 |                 "maxpost": {
 28 |                     "type": int,
 29 |                     "default": 1000,
 30 |                     "metavar": "LENGTH",
 31 |                     "help": "Truncate POST bodies longer than LENGTH characters (default: 1000). Set to 0 for no truncating."},
 32 |                 "maxcontent": {
 33 |                     "type": int,
 34 |                     "default": 0,
 35 |                     "metavar": "LENGTH",
 36 |                     "help": "Truncate response bodies longer than LENGTH characters (default: no truncating). Set to 0 for no truncating."},
 37 |                 "showcontent": {
 38 |                     "action": "store_true",
 39 |                     "help": "Display response body"},
 40 |                 "showhtml": {
 41 |                     "action": "store_true",
 42 |                     "help": "Display only HTML results"},
 43 |                 "urlfilter": {
 44 |                     "type": str,
 45 |                     "default": None,
 46 |                     "metavar": "REGEX",
 47 |                     "help": "Filter to URLs matching this regular expression"}
 48 |                 }
 49 |             )
 50 | 
 51 |     def premodule(self):
 52 |         if self.urlfilter:
 53 |             import re
 54 |             self.urlfilter = re.compile(self.urlfilter)
 55 | 
 56 |     def http_handler(self, conn, request, response):
 57 |         host = request.headers.get('host', conn.serverip)
 58 |         url = host + request.uri
 59 |         pretty_url = url
 60 | 
 61 |         # separate URL-encoded data from the location
 62 |         if '?' in request.uri:
 63 |             uri_location, uri_data = request.uri.split('?', 1)
 64 |             pretty_url = host + uri_location
 65 |         else:
 66 |             uri_location, uri_data = request.uri, ""
 67 | 
 68 |         # Check if the URL matches a user-defined filter
 69 |         if self.urlfilter and not self.urlfilter.search(pretty_url):
 70 |             return
 71 | 
 72 |         if self.maxurilen > 0 and len(uri_location) > self.maxurilen:
 73 |             uri_location = "{}[truncated]".format(uri_location[:self.maxurilen])
 74 |             pretty_url = host + uri_location
 75 | 
 76 |         # Set the first line of the alert to show some basic metadata
 77 |         if response == None:
 78 |             msg = ["{} (NO RESPONSE) {}".format(request.method, pretty_url)]
 79 |         else:
 80 |             msg = ["{} ({}) {} ({})".format(request.method, response.status, pretty_url, response.headers.get("content-type", "[no content-type]"))]
 81 | 
 82 |         # Determine if there is any POST data from the client and parse
 83 |         if request and request.method == "POST":
 84 |             try:
 85 |                 post_params = parse_qs(request.body.decode("utf-8"), keep_blank_values=True)
 86 |                 # If parse_qs only returns a single element with a null
 87 |                 # value, it's probably an eroneous evaluation. Most likely
 88 |                 # base64 encoded payload ending in an '=' character.
 89 |                 if len(post_params) == 1 and list(post_params.values()) == [["\x00"]]:
 90 |                     post_params = request.body
 91 |             except UnicodeDecodeError:
 92 |                 post_params = request.body
 93 |         else:
 94 |             post_params = {}
 95 | 
 96 |         # Get some additional useful data
 97 |         url_params = parse_qs(uri_data, keep_blank_values=True)
 98 |         referer = request.headers.get("referer", None)
 99 |         client_cookie = cookies.SimpleCookie(request.headers.get("cookie", ""))
100 |         server_cookie = cookies.SimpleCookie(response.headers.get("cookie", ""))
101 | 
102 |         # Piece together the alert message
103 |         if referer:
104 |             msg.append("Referer: {}".format(referer))
105 | 
106 |         if client_cookie:
107 |             msg.append("Client Transmitted Cookies:")
108 |             for k, v in client_cookie.items():
109 |                 msg.append("\t{} -> {}".format(k, v.value))
110 | 
111 |         if server_cookie:
112 |             msg.append("Server Set Cookies:")
113 |             for k, v in server_cookie.items():
114 |                 msg.append("\t{} -> {}".format(k, v.value))
115 | 
116 |         if url_params:
117 |             msg.append("URL Parameters:")
118 |             for k, v in url_params.items():
119 |                 msg.append("\t{} -> {}".format(k, v))
120 | 
121 |         if post_params:
122 |             if isinstance(post_params, dict):
123 |                 msg.append("POST Parameters:")
124 |                 for k, v in post_params.items():
125 |                     msg.append("\t{} -> {}".format(k, v))
126 |             else:
127 |                 msg.append("POST Data:")
128 |                 msg.append(dshell.util.printable_text(str(post_params)))
129 |         elif request.body:
130 |             msg.append("POST Body:")
131 |             request_body = dshell.util.printable_text(request.body)
132 |             if self.maxpost > 0 and len(request.body) > self.maxpost:
133 |                 msg.append("{}[truncated]".format(request_body[:self.maxpost]))
134 |             else:
135 |                 msg.append(request_body)
136 | 
137 |         if self.showcontent or self.showhtml:
138 |             if self.showhtml and 'html' not in response.headers.get('content-type', ''):
139 |                 return
140 |             if 'gzip' in response.headers.get('content-encoding', ''):
141 |                 # TODO gunzipping
142 |                 content = '(gzip encoded)\n{}'.format(response.body)
143 |             else:
144 |                 content = response.body
145 |             content = dshell.util.printable_text(content)
146 |             if self.maxcontent and len(content) > self.maxcontent:
147 |                 content = "{}[truncated]".format(content[:self.maxcontent])
148 |             msg.append("Body Content:")
149 |             msg.append(content)
150 | 
151 |         # Display the start and end times based on Blob instead of Connection
152 |         kwargs = conn.info()
153 |         if request:
154 |             kwargs['starttime'] = request.blob.starttime
155 |             kwargs['clientbytes'] = len(request.blob.data)
156 |         else:
157 |             kwargs['starttime'] = None
158 |             kwargs['clientbytes'] = 0
159 |         if response:
160 |             kwargs['endtime'] = response.blob.endtime
161 |             kwargs['serverbytes'] = len(response.blob.data)
162 |         else:
163 |             kwargs['endtime'] = None
164 |             kwargs['serverbytes'] = 0
165 | 
166 |         if post_params:
167 |             kwargs['post_params'] = post_params
168 |         if url_params:
169 |             kwargs['url_params'] = url_params
170 |         if client_cookie:
171 |             kwargs['client_cookie'] = client_cookie
172 |         if server_cookie:
173 |             kwargs['server_cookie'] = server_cookie
174 | 
175 |         self.write('\n'.join(msg), **kwargs)
176 | 
177 |         return conn, request, response
178 | 


--------------------------------------------------------------------------------
/dshell/plugins/http/joomla.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Detect and dissect malformed HTTP headers targeting Joomla
 3 | 
 4 | https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-8562
 5 | """
 6 | 
 7 | from dshell.plugins.httpplugin import HTTPPlugin
 8 | from dshell.output.alertout import AlertOutput
 9 | 
10 | import re
11 | 
12 | class DshellPlugin(HTTPPlugin):
13 |     def __init__(self):
14 |         super().__init__(
15 |             name="Joomla CVE-2015-8562",
16 |             author="bg",
17 |             description='detect attempts to enumerate MS15-034 vulnerable IIS servers',
18 |             bpf='tcp and (port 80 or port 8080 or port 8000)',
19 |             output=AlertOutput(label=__name__),
20 |             optiondict={
21 |                 "raw_payload": {
22 |                     "action": "store_true",
23 |                     "help": "return the raw payload (do not attempt to decode chr encoding)",
24 |                 }
25 |             },
26 |             longdescription='''
27 | Detect and dissect malformed HTTP headers targeting Joomla
28 | 
29 | https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-8562
30 | 
31 | Usage Examples:
32 | ---------------
33 | 
34 | Dshell> decode -d joomla *.pcap
35 | [Joomla CVE-2015-8562] 2015-12-15 20:17:18    192.168.1.119:43865 <-    192.168.1.139:80    ** x-forwarded-for -> system('touch /tmp/2'); **
36 | 
37 | The module assumes the cmd payload is encoded using chr.  To turn this off run:
38 | 
39 | Dshell> decode -d joomla --joomla_raw_payload *.pcap
40 | [Joomla CVE-2015-8562] 2015-12-15 20:17:18    192.168.1.119:43865 <-    192.168.1.139:80    ** x-forwarded-for -> "eval(chr(115).chr(121).chr(115).chr(116).chr(101).chr(109).chr(40).chr(39).chr(116).chr(111).chr(117).chr(99).chr(104).chr(32).chr(47).chr(116).chr(109).chr(112).chr(47).chr(50).chr(39).chr(41).chr(59)); **
41 | ''',
42 |         )
43 | 
44 |         # Indicator of (potential) compromise
45 |         self.ioc = "JFactory::getConfig();exit"
46 |         self.ioc_bytes = bytes(self.ioc, "ascii")
47 | 
48 |     def attempt_decode(self, cmd):
49 |         ptext = ''
50 |         for c in re.findall('\d+', cmd):
51 |             ptext += chr(int(c))
52 |         return ptext
53 | 
54 |     def parse_cmd(self, data):
55 |         start = data.find('"feed_url";')+11
56 |         end = data.find(self.ioc)
57 |         chunk = data[start:end]
58 | 
59 |         try:
60 |             cmd = chunk.split(':')[-1]
61 |             if self.raw_payload:
62 |                 return cmd
63 | 
64 |             plaintext_cmd = self.attempt_decode(cmd)
65 |             return plaintext_cmd
66 |         except:
67 |             return None
68 | 
69 |     def http_handler(self, conn, request, response):
70 |         if not request:
71 |             return
72 | 
73 |         if self.ioc_bytes not in request.blob.data:
74 |             # indicator of (potential) compromise is not here
75 |             return
76 | 
77 |         # there is an attempt to exploit Joomla!
78 | 
79 |         # The Joomla exploit could be sent any HTTP header field
80 |         for hdr, val in request.headers.items():
81 |             if self.ioc in val:
82 |                 cmd = self.parse_cmd(val)
83 |                 if cmd:
84 |                     self.alert('{} -> {}'.format(hdr, cmd), **conn.info())
85 |                     return conn, request, response
86 | 
87 | 


--------------------------------------------------------------------------------
/dshell/plugins/http/ms15-034.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Proof-of-concept code to detect attempts to enumerate MS15-034 vulnerable
 3 | IIS servers and/or cause a denial of service.  Each event will generate an
 4 | alert that prints out the HTTP Request method and the range value contained
 5 | with the HTTP stream.
 6 | """
 7 | 
 8 | from dshell.plugins.httpplugin import HTTPPlugin
 9 | from dshell.output.alertout import AlertOutput
10 | 
11 | class DshellPlugin(HTTPPlugin):
12 |     def __init__(self):
13 |         super().__init__(
14 |             name="ms15-034",
15 |             author="bg",
16 |             description='detect attempts to enumerate MS15-034 vulnerable IIS servers',
17 |             bpf='tcp and (port 80 or port 8080 or port 8000)',
18 |             output=AlertOutput(label=__name__),
19 |             longdescription='''
20 | Proof-of-concept code to detect attempts to enumerate MS15-034 vulnerable
21 | IIS servers and/or cause a denial of service.  Each event will generate an
22 | alert that prints out the HTTP Request method and the range value contained
23 | with the HTTP stream.
24 | 
25 | Usage:
26 | decode -d ms15-034 -q *.pcap
27 | decode -d ms15-034 -i <interface> -q
28 | 
29 | References:
30 | https://technet.microsoft.com/library/security/ms15-034
31 | https://ma.ttias.be/remote-code-execution-via-http-request-in-iis-on-windows/
32 | ''',
33 |         )
34 | 
35 | 
36 |     def http_handler(self, conn, request, response):
37 |         if response == None:
38 |             # Denial of Service (no server response)
39 |             try:
40 |                 rangestr = request.headers.get("range", '')
41 |                 # check range value to reduce false positive rate
42 |                 if not rangestr.endswith('18446744073709551615'):
43 |                     return
44 |             except:
45 |                 return
46 |             self.write('MS15-034 DoS [Request Method: "{0}" URI: "{1}" Range: "{2}"]'.format(request.method, request.uri, rangestr), conn.info())
47 |             return conn, request, response
48 | 
49 |         else:
50 |             # probing for vulnerable server
51 |             try:
52 |                 rangestr = request.headers.get("range", '')
53 |                 if not rangestr.endswith('18446744073709551615'):
54 |                     return
55 |             except:
56 |                 return
57 | 
58 |             # indication of vulnerable server
59 |             if rangestr and (response.status == '416' or \
60 |                              response.reason == 'Requested Range Not Satisfiable'):
61 |                 self.write('MS15-034 Vulnerable Server  [Request Method: "{0}" Range: "{1}"]'.format(request.method,rangestr), conn.info())
62 |                 return conn, request, response
63 | 
64 | 
65 | 


--------------------------------------------------------------------------------
/dshell/plugins/http/riphttp.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Identifies HTTP traffic and reassembles file transfers before writing them to
  3 | files.
  4 | """
  5 | 
  6 | import os
  7 | import re
  8 | import sys
  9 | 
 10 | from dshell.plugins.httpplugin import HTTPPlugin
 11 | from dshell.output.alertout import AlertOutput
 12 | 
 13 | class DshellPlugin(HTTPPlugin):
 14 |     def __init__(self):
 15 |         super().__init__(
 16 |             name="rip-http",
 17 |             author="bg,twp",
 18 |             bpf="tcp and (port 80 or port 8080 or port 8000)",
 19 |             description="Rips files from HTTP traffic",
 20 |             output=AlertOutput(label=__name__),
 21 |             optiondict={'append_conn':
 22 |                             {'action': 'store_true',
 23 |                              'help': 'append sourceip-destip to filename'},
 24 |                         'append_ts':
 25 |                             {'action': 'store_true',
 26 |                              'help': 'append timestamp to filename'},
 27 |                         'direction':
 28 |                             {'help': 'cs=only capture client POST, sc=only capture server GET response',
 29 |                              'metavar': '"cs" OR "sc"',
 30 |                              'default': None},
 31 |                         'outdir':
 32 |                             {'help': 'directory to write output files (Default: current directory)',
 33 |                              'metavar': 'DIRECTORY',
 34 |                              'default': '.'},
 35 |                         'content_filter':
 36 |                             {'help': 'regex MIME type filter for files to save',
 37 |                              'metavar': 'REGEX'},
 38 |                         'name_filter':
 39 |                             {'help': 'regex filename filter for files to save',
 40 |                              'metavar': 'REGEX'}
 41 |             }
 42 |         )
 43 | 
 44 |     def premodule(self):
 45 |         if self.direction not in ('cs', 'sc', None):
 46 |             self.logger.warning("Invalid value for direction: {!r}. Argument must be either 'sc' for server-to-client or 'cs' for client-to-server.".format(self.direction))
 47 |             sys.exit(1)
 48 | 
 49 |         if self.content_filter:
 50 |             self.content_filter = re.compile(self.content_filter)
 51 |         if self.name_filter:
 52 |             self.name_filter = re.compile(self.name_filter)
 53 | 
 54 |         self.openfiles = {}
 55 | 
 56 |         if not os.path.exists(self.outdir):
 57 |             try:
 58 |                 os.makedirs(self.outdir)
 59 |             except (IOError, OSError) as e:
 60 |                 self.error("Could not create output directory: {!r}: {!s}"
 61 |                            .format(self.outdir, e))
 62 |                 sys.exit(1)
 63 | 
 64 |     def http_handler(self, conn, request, response):
 65 |         if (not self.direction or self.direction == 'cs') and request and request.method == "POST" and request.body:
 66 |             if not self.content_filter or self.content_filter.search(request.headers.get('content-type', '')):
 67 |                 payload = request
 68 |         elif (not self.direction or self.direction == 'sc') and response and response.status[0] == '2':
 69 |             if not self.content_filter or self.content_filter.search(response.headers.get('content-type', '')):
 70 |                 payload = response
 71 |         else:
 72 |             payload = None
 73 | 
 74 |         if not payload:
 75 |             # Connection did not match any filters, so get rid of it
 76 |             return
 77 | 
 78 |         host = request.headers.get('host', conn.serverip)
 79 |         url = host + request.uri
 80 | 
 81 |         if url in self.openfiles:
 82 |             # File is already open, so just insert the new data
 83 |             s, e = self.openfiles[url].handleresponse(response)
 84 |             self.logger.debug("{0!r} --> Range: {1} - {2}".format(url, s, e))
 85 |         else:
 86 |             # A new file!
 87 |             filename = request.uri.split('?', 1)[0].split('/')[-1]
 88 |             if self.name_filter and self.name_filter.search(filename):
 89 |                 # Filename did not match filter, so get rid of it
 90 |                 return
 91 |             if not filename:
 92 |                 # Assume index.html if there is no filename
 93 |                 filename = "index.html"
 94 |             if self.append_conn:
 95 |                 filename += "_{0}-{1}".format(conn.serverip, conn.clientip)
 96 |             if self.append_ts:
 97 |                 filename += "_{}".format(conn.ts)
 98 |             while os.path.exists(os.path.join(self.outdir, filename)):
 99 |                 filename += "_"
100 |             self.write("New file {} ({})".format(filename, url), **conn.info(), dir_arrow="<-")
101 |             self.openfiles[url] = HTTPFile(os.path.join(self.outdir, filename), self)
102 |             s, e = self.openfiles[url].handleresponse(payload)
103 |             self.logger.debug("{0!r} --> Range: {1} - {2}".format(url, s, e))
104 |         if self.openfiles[url].done():
105 |             self.write("File done {} ({})".format(filename, url), **conn.info(), dir_arrow="<-")
106 |             del self.openfiles[url]
107 | 
108 |         return conn, request, response
109 | 
110 | 
111 | class HTTPFile(object):
112 |     """
113 |     An internal class used to hold metadata for open HTTP files.
114 |     Used mostly to reassemble fragmented transfers.
115 |     """
116 | 
117 |     def __init__(self, filename, plugin_instance):
118 |         self.complete = False
119 |         # Expected size in bytes of full file transfer
120 |         self.size = 0
121 |         # List of tuples indicating byte chunks already received and written to
122 |         # disk
123 |         self.ranges = []
124 |         self.plugin = plugin_instance
125 |         self.filename = filename
126 |         try:
127 |             self.fh = open(filename, 'wb')
128 |         except IOError as e:
129 |             self.plugin.error(
130 |                 "Could not create file {!r}: {!s}".format(filename, e))
131 |             self.fh = None
132 | 
133 |     def __del__(self):
134 |         if self.fh is None:
135 |             return
136 |         self.fh.close()
137 |         if not self.done():
138 |             self.plugin.warning("Incomplete file: {!r}".format(self.filename))
139 |             try:
140 |                 os.rename(self.filename, self.filename + "_INCOMPLETE")
141 |             except:
142 |                 pass
143 |             ls = 0
144 |             le = 0
145 |             for s, e in self.ranges:
146 |                 if s > le + 1:
147 |                     self.plugin.warning(
148 |                         "Missing bytes between {0} and {1}".format(le, s))
149 |                 ls, le = s, e
150 | 
151 |     def handleresponse(self, response):
152 |         # Check for Content Range
153 |         range_start = 0
154 |         range_end = len(response.body) - 1
155 |         if 'content-range' in response.headers:
156 |             m = re.search(
157 |                 'bytes (\d+)-(\d+)/(\d+|\*)', response.headers['content-range'])
158 |             if m:
159 |                 range_start = int(m.group(1))
160 |                 range_end = int(m.group(2))
161 |                 if len(response.body) < (range_end - range_start + 1):
162 |                     range_end = range_start + len(response.body) - 1
163 |                 try:
164 |                     if int(m.group(3)) > self.size:
165 |                         self.size = int(m.group(3))
166 |                 except:
167 |                     pass
168 |         elif 'content-length' in response.headers:
169 |             try:
170 |                 if int(response.headers['content-length']) > self.size:
171 |                     self.size = int(response.headers['content-length'])
172 |             except:
173 |                 pass
174 |         # Update range tracking
175 |         self.ranges.append((range_start, range_end))
176 |         # Write part of file
177 |         if self.fh is not None:
178 |             self.fh.seek(range_start)
179 |             self.fh.write(response.body)
180 |         return (range_start, range_end)
181 | 
182 |     def done(self):
183 |         self.checkranges()
184 |         return self.complete
185 | 
186 |     def checkranges(self):
187 |         self.ranges.sort()
188 |         current_start = 0
189 |         current_end = 0
190 |         foundgap = False
191 |         # print self.ranges
192 |         for s, e in self.ranges:
193 |             if s <= current_end + 1:
194 |                 current_end = e
195 |             else:
196 |                 foundgap = True
197 |                 current_start = s
198 |                 current_end = e
199 |         if not foundgap:
200 |             if (current_end + 1) >= self.size:
201 |                 self.complete = True
202 |         return foundgap
203 | 
204 | 
205 | if __name__ == "__main__":
206 |     print(DshellPlugin())
207 | 


--------------------------------------------------------------------------------
/dshell/plugins/http/web.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Displays basic information for web requests/responses in a connection.
 3 | """
 4 | 
 5 | from dshell.plugins.httpplugin import HTTPPlugin
 6 | from dshell.output.alertout import AlertOutput
 7 | 
 8 | from hashlib import md5
 9 | 
10 | class DshellPlugin(HTTPPlugin):
11 |     def __init__(self):
12 |         super().__init__(
13 |             name="web",
14 |             author="bg,twp",
15 |             description="Displays basic information for web requests/responses in a connection",
16 |             bpf="tcp and (port 80 or port 8080 or port 8000)",
17 |             output=AlertOutput(label=__name__),
18 |             optiondict={
19 |                 "md5": {"action": "store_true",
20 |                         "help": "Calculate MD5 for each response."}
21 |             },
22 |         )
23 | 
24 |     def http_handler(self, conn, request, response):
25 |      
26 |         if request:
27 |             if request.method=="":
28 |                 # It's impossible to have a properly formed HTTP request without a method
29 |                 # indicating, the httpplugin is calling http_handler without a full object
30 |                 return None
31 |             # Collect basics about the request, if available
32 |             method = request.method
33 |             host = request.headers.get("host", "")
34 |             uri = request.uri
35 | #            useragent = request.headers.get("user-agent", None)
36 | #            referer = request.headers.get("referer", None)
37 |             version = request.version
38 |         else:
39 |             method = "(no request)"
40 |             host = ""
41 |             uri = ""
42 |             version = ""
43 | 
44 |         if response:
45 |             if response.status == "" and response.reason == "":
46 |                 # Another indication of improperly parsed HTTP object in httpplugin
47 |                 return None
48 |             # Collect basics about the response, if available
49 |             status = response.status
50 |             reason = response.reason
51 |             if self.md5:
52 |                 hash = "(md5: {})".format(md5(response.body).hexdigest())
53 |             else:
54 |                 hash = ""
55 |         else:
56 |             status = "(no response)"
57 |             reason = ""
58 |             hash = ""
59 | 
60 |         data = "{} {}{} HTTP/{} {} {} {}".format(method,
61 |                                                  host,
62 |                                                  uri,
63 |                                                  version,
64 |                                                  status,
65 |                                                  reason,
66 |                                                  hash)
67 |         if not request:
68 |             self.write(data, method=method, host=host, uri=uri, version=version, status=status, reason=reason, hash=hash, **response.blob.info())
69 |         elif not response:
70 |             self.write(data, method=method, uri=uri, version=version, status=status, reason=reason, hash=hash, **request.headers, **request.blob.info())
71 |         else:
72 |         	self.write(data, method=method, uri=uri, version=version, status=status, reason=reason, hash=hash, request_headers=request.headers, response_headers=response.headers, **request.blob.info())
73 |         return conn, request, response
74 | 
75 | if __name__ == "__main__":
76 |     print(DshellPlugin())
77 | 


--------------------------------------------------------------------------------
/dshell/plugins/httpplugin.py:
--------------------------------------------------------------------------------
  1 | """
  2 | This is a base-level plugin inteded to handle HTTP connections.
  3 | 
  4 | It inherits from the base ConnectionPlugin and provides a new handler
  5 | function: http_handler(conn, request, response).
  6 | 
  7 | It automatically pairs requests/responses, parses headers, reassembles bodies,
  8 | and collects them into HTTPRequest and HTTPResponse objects that are passed
  9 | to the http_handler.
 10 | """
 11 | 
 12 | import logging
 13 | 
 14 | import dshell.core
 15 | 
 16 | from pypacker.layer567 import http
 17 | 
 18 | import gzip
 19 | import io
 20 | 
 21 | 
 22 | logger = logging.getLogger(__name__)
 23 | 
 24 | 
 25 | def parse_headers(obj, f):
 26 |     """Return dict of HTTP headers parsed from a file object."""
 27 |     # Logic lifted mostly from dpkt's http module
 28 |     d = {}
 29 |     while 1:
 30 |         line = f.readline()
 31 |         line = line.decode('utf-8')
 32 |         line = line.strip()
 33 |         if not line:
 34 |             break
 35 |         l = line.split(None, 1)
 36 |         if not l[0].endswith(':'):
 37 |             raise dshell.core.DataError("Invalid header {!r}".format(line))
 38 |         k = l[0][:-1].lower()
 39 |         v = len(l) != 1 and l[1] or ''
 40 |         if k in d:
 41 |             if not type(d[k]) is list:
 42 |                 d[k] = [d[k]]
 43 |             d[k].append(v)
 44 |         else:
 45 |             d[k] = v
 46 |     return d
 47 | 
 48 | 
 49 | def parse_body(obj, f, headers):
 50 |     """Return HTTP body parsed from a file object, given HTTP header dict."""
 51 |     # Logic lifted mostly from dpkt's http module
 52 |     if headers.get('transfer-encoding', '').lower() == 'chunked':
 53 |         l = []
 54 |         found_end = False
 55 |         while 1:
 56 |             try:
 57 |                 sz = f.readline().split(None, 1)[0]
 58 |             except IndexError:
 59 |                 obj.errors.append(dshell.core.DataError('missing chunk size'))
 60 |                 # FIXME: If this error occurs sz is not available to continue parsing!
 61 |                 #   The appropriate exception should be thrown.
 62 |                 raise
 63 |             n = int(sz, 16)
 64 |             if n == 0:
 65 |                 found_end = True
 66 |             buf = f.read(n)
 67 |             if f.readline().strip():
 68 |                 break
 69 |             if n and len(buf) == n:
 70 |                 l.append(buf)
 71 |             else:
 72 |                 break
 73 |         if not found_end:
 74 |             raise dshell.core.DataError('premature end of chunked body')
 75 |         body = b''.join(l)
 76 |     elif 'content-length' in headers:
 77 |         n = int(headers['content-length'])
 78 |         body = f.read(n)
 79 |         if len(body) != n:
 80 |             obj.errors.append(dshell.core.DataError('short body (missing {} bytes)'.format(n - len(body))))
 81 |     elif 'content-type' in headers:
 82 |         body = f.read()
 83 |     else:
 84 |         # XXX - need to handle HTTP/0.9
 85 |         body = b''
 86 |     return body
 87 | 
 88 | 
 89 | class HTTPRequest(object):
 90 |     """
 91 |     A class for HTTP requests
 92 | 
 93 |     Attributes:
 94 |         blob    : the Blob instance of the request
 95 |         errors  : a list of caught exceptions from parsing
 96 |         method  : the method of the request (e.g. GET, PUT, POST, etc.)
 97 |         uri     : the URI being requested (host not included)
 98 |         version : the HTTP version (e.g. "1.1" for "HTTP/1.1")
 99 |         headers : a dictionary containing the headers and values
100 |         body    : bytestring of the reassembled body, after the headers
101 |     """
102 |     _methods = (
103 |         'GET', 'PUT', 'ICY',
104 |         'COPY', 'HEAD', 'LOCK', 'MOVE', 'POLL', 'POST',
105 |         'BCOPY', 'BMOVE', 'MKCOL', 'TRACE', 'LABEL', 'MERGE',
106 |         'DELETE', 'SEARCH', 'UNLOCK', 'REPORT', 'UPDATE', 'NOTIFY',
107 |         'BDELETE', 'CONNECT', 'OPTIONS', 'CHECKIN',
108 |         'PROPFIND', 'CHECKOUT', 'CCM_POST',
109 |         'SUBSCRIBE', 'PROPPATCH', 'BPROPFIND',
110 |         'BPROPPATCH', 'UNCHECKOUT', 'MKACTIVITY',
111 |         'MKWORKSPACE', 'UNSUBSCRIBE', 'RPC_CONNECT',
112 |         'VERSION-CONTROL',
113 |         'BASELINE-CONTROL'
114 |         )
115 | 
116 |     def __init__(self, blob):
117 |         self.errors = []
118 |         self.headers = {}
119 |         self.body = b''
120 |         self.blob = blob
121 |         data = io.BytesIO(blob.data)
122 |         rawline = data.readline()
123 |         try:
124 |             line = rawline.decode('utf-8')
125 |         except UnicodeDecodeError:
126 |             line = ''
127 |         l = line.strip().split()
128 |         if len(l) != 3 or l[0] not in self._methods or not l[2].startswith('HTTP'):
129 |             self.errors.append(dshell.core.DataError('invalid HTTP request: {!r}'.format(rawline)))
130 |             self.method = ''
131 |             self.uri = ''
132 |             self.version = ''
133 |             return
134 |         else:
135 |             self.method = l[0]
136 |             self.uri = l[1]
137 |             self.version = l[2][5:]
138 |         self.headers = parse_headers(self, data)
139 |         self.body = parse_body(self, data, self.headers)
140 | 
141 | 
142 | class HTTPResponse(object):
143 |     """
144 |     A class for HTTP responses
145 | 
146 |     Attributes:
147 |         blob    : the Blob instance of the request
148 |         errors  : a list of caught exceptions from parsing
149 |         version : the HTTP version (e.g. "1.1" for "HTTP/1.1")
150 |         status  : the status code of the response (e.g. "200" or "304")
151 |         reason  : the status text of the response (e.g. "OK" or "Not Modified")
152 |         headers : a dictionary containing the headers and values
153 |         body    : bytestring of the reassembled body, after the headers
154 |     """
155 |     def __init__(self, blob):
156 |         self.errors = []
157 |         self.headers = {}
158 |         self.body = b''
159 |         self.blob = blob
160 |         data = io.BytesIO(blob.data)
161 |         rawline = data.readline()
162 |         try:
163 |             line = rawline.decode('utf-8')
164 |         except UnicodeDecodeError:
165 |             line = ''
166 |         l = line.strip().split(None, 2)
167 |         if len(l) < 2 or not l[0].startswith("HTTP") or not l[1].isdigit():
168 |             self.errors.append(dshell.core.DataError('invalid HTTP response: {!r}'.format(rawline)))
169 |             self.version = ''
170 |             self.status = ''
171 |             self.reason = ''
172 |             return
173 |         else:
174 |             self.version = l[0][5:]
175 |             self.status = l[1]
176 |             self.reason = l[2]
177 |         self.headers = parse_headers(self, data)
178 |         self.body = parse_body(self, data, self.headers)
179 | 
180 |     def decompress_gzip_content(self):
181 |         """
182 |         If this response has Content-Encoding set to something with "gzip",
183 |         this function will decompress it and store it in the body.
184 |         """
185 |         if "gzip" in self.headers.get("content-encoding", ""):
186 |             try:
187 |                 iobody = io.BytesIO(self.body)
188 |             except TypeError as e:
189 |                 # TODO: Why would body ever not be bytes? If it's not bytes, then that means
190 |                 #   we have a bug somewhere in the code and therefore should just allow the
191 |                 #   original exception to be raised.
192 |                 self.errors.append(dshell.core.DataError("Body was not a byte string ({!s}). Could not decompress.".format(type(self.body))))
193 |                 return
194 |             try:
195 |                 self.body = gzip.GzipFile(fileobj=iobody).read()
196 |             except OSError as e:
197 |                 self.errors.append(OSError("Could not gunzip body. {!s}".format(e)))
198 |                 return
199 | 
200 | 
201 | class HTTPPlugin(dshell.core.ConnectionPlugin):
202 | 
203 |     def __init__(self, **kwargs):
204 |         super().__init__(**kwargs)
205 |         # Use "gunzip" argument to automatically decompress gzipped responses
206 |         self.gunzip = kwargs.get("gunzip", False)
207 | 
208 |     def connection_handler(self, conn):
209 |         """
210 |         Goes through each Blob in a Connection, assuming they appear in pairs
211 |         of requests and responses, and builds HTTPRequest and HTTPResponse
212 |         objects.
213 | 
214 |         After a response (or only a request at the end of a connection),
215 |         http_handler is called. If it returns nothing, the respective blobs
216 |         are marked as hidden so they won't be passed to additional plugins.
217 |         """
218 |         request = None
219 |         response = None
220 |         for blob in conn.blobs:
221 |             # blob.reassemble(allow_overlap=True, allow_padding=True)
222 |             if not blob.data:
223 |                 continue
224 |             if blob.direction == 'cs':
225 |                 # client-to-server request
226 |                 request = HTTPRequest(blob)
227 |                 for req_error in request.errors:
228 |                     self.debug("Request Error: {!r}".format(req_error))
229 |             elif blob.direction == 'sc':
230 |                 # server-to-client response
231 |                 response = HTTPResponse(blob)
232 |                 for rep_error in response.errors:
233 |                     self.debug("Response Error: {!r}".format(rep_error))
234 |                 if self.gunzip:
235 |                     response.decompress_gzip_content()
236 |                 http_handler_out = self.http_handler(conn=conn, request=request, response=response)
237 |                 if not http_handler_out:
238 |                     if request:
239 |                         request.blob.hidden = True
240 |                     if response:
241 |                         response.blob.hidden = True
242 |                 request = None
243 |                 response = None
244 |         if request and not response:
245 |             http_handler_out = self.http_handler(conn=conn, request=request, response=None)
246 |             if not http_handler_out:
247 |                 blob.hidden = True
248 |         return conn
249 | 
250 |     def http_handler(self, conn, request, response):
251 |         """
252 |         A placeholder.
253 | 
254 |         Plugins will be able to overwrite this to perform custom activites
255 |         on HTTP data.
256 | 
257 |         It SHOULD return a list containing the sames types of values that came
258 |         in as arguments (i.e. return (conn, request, response)) or None. This
259 |         is mostly a consistency thing. Realistically, it only needs to return
260 |         some value that evaluates to True to pass the Blobs along to additional
261 |         plugins.
262 | 
263 |         Arguments:
264 |             conn:       a Connection object
265 |             request:    a HTTPRequest object
266 |             response:   a HTTPResponse object
267 |         """
268 |         return conn, request, response
269 | 
270 | DshellPlugin = None
271 | 


--------------------------------------------------------------------------------
/dshell/plugins/malware/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/malware/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/malware/sweetorange.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 2015 Feb 13
 3 | 
 4 | Sometimes, attackers will try to obfuscate links to the Sweet Orange exploit
 5 | kit. This plugin is an attempt to decode that sort of traffic.
 6 | 
 7 | It will use a regular expression to try and detect certain variable names that
 8 | can be contained in JavaScript code. It will then take the value assigned to
 9 | it and decode the domain address hidden inside the value.
10 | 
11 | Samples:
12 | http://malware-traffic-analysis.net/2014/10/27/index2.html
13 | http://malware-traffic-analysis.net/2014/10/03/index.html
14 | http://malware-traffic-analysis.net/2014/09/25/index.html
15 | """
16 | 
17 | import re
18 | 
19 | from dshell.output.alertout import AlertOutput
20 | from dshell.plugins.httpplugin import HTTPPlugin
21 | 
22 | class DshellPlugin(HTTPPlugin):
23 | 
24 |     def __init__(self):
25 |         super().__init__(
26 |             name="sweetorange",
27 |             longdescription="Used to decode certain variants of the Sweet Orange exploit kit redirect traffic. Looks for telltale Javascript variable names (e.g. 'ajax_data_source' and 'main_request_data_content') and automatically decodes the exploit landing page contained.",
28 |             description="Used to decode certain variants of the Sweet Orange exploit kit redirect traffic",
29 |             bpf="tcp and (port 80 or port 8080 or port 8000)",
30 |             output=AlertOutput(label=__name__),
31 |             author="dev195",
32 |             gunzip=True,
33 |             optiondict={
34 |                 "variable": {
35 |                     "type": str,
36 |                     "action": "append",
37 |                     "help": 'Variable names to search for. Default ("ajax_data_source", "main_request_data_content")',
38 |                     "default": ["ajax_data_source", "main_request_data_content"]
39 |                 },
40 |                 "color": {
41 |                     "action": "store_true",
42 |                     "help": "Display encoded/decoded lines in different TTY colors.",
43 |                     "default": False
44 |                 },
45 |             }
46 |         )
47 | 
48 | 
49 |     def premodule(self):
50 |         self.sig_regex = re.compile(
51 |             r"var (" + '|'.join(map(re.escape, self.variable)) + ")='(.*?)';")
52 |         self.hexregex = re.compile(r'[^a-fA-F0-9]')
53 |         self.logger.debug('Variable regex: "%s"' % self.sig_regex.pattern)
54 | 
55 |     def http_handler(self, conn, request, response):
56 |         try:
57 |             response_body = response.body.decode("ascii")
58 |         except UnicodeError:
59 |             return
60 |         except AttributeError:
61 |             return
62 | 
63 |         if response and any([v in response_body for v in self.variable]):
64 |             # Take the variable's value, extract the hex characters, and
65 |             # convert to ASCII
66 |             matches = self.sig_regex.search(response_body)
67 |             try:
68 |                 hidden = matches.groups()[1]
69 |                 match = bytes.fromhex(self.hexregex.sub('', hidden))
70 |                 match = match.decode('utf-8')
71 |             except:
72 |                 return
73 |             if self.color:
74 |                 # If desired, add TTY colors to the alerts for differentiation
75 |                 # between encoded/decoded strings
76 |                 hidden = "\x1b[37;2m%s\x1b[0m" % hidden
77 |                 match = "\x1b[32m%s\x1b[0m" % match
78 | 
79 |             self.logger.info(hidden)
80 |             self.write(match, **conn.info())
81 |             return (conn, request, response)
82 | 
83 | 


--------------------------------------------------------------------------------
/dshell/plugins/misc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/misc/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/misc/followstream.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Generates color-coded Screen/HTML output similar to Wireshark Follow Stream
 3 | """
 4 | 
 5 | import dshell.core
 6 | from dshell.output.colorout import ColorOutput
 7 | 
 8 | class DshellPlugin(dshell.core.ConnectionPlugin):
 9 | 
10 |     def __init__(self):
11 |         super().__init__(
12 |             name="Followstream",
13 |             author="amm/dev195",
14 |             description="Generates color-coded Screen/HTML output similar to Wireshark Follow Stream. Empty connections will be skipped.",
15 |             bpf="tcp",
16 |             output=ColorOutput(label=__name__),
17 |         )
18 | 
19 |     def connection_handler(self, conn):
20 |         if conn.totalbytes > 0:
21 |             self.write(conn, **conn.info())
22 |             return conn
23 | 
24 | if __name__ == "__main__":
25 |     print(DshellPlugin())
26 | 


--------------------------------------------------------------------------------
/dshell/plugins/misc/pcapwriter.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Generates pcap output
 3 | 
 4 | Can be used alone or chained at the end of plugins for a kind of filter.
 5 | 
 6 | Use --pcapwriter_outfile to separate its output from that of other plugins.
 7 | 
 8 | Example uses include:
 9 |  - merging multiple pcap files into one
10 |    (decode -d pcapwriter ~/pcap/* >merged.pcap)
11 |  - saving relevant traffic by chaining with another plugin
12 |    (decode -d track+pcapwriter --track_source=192.168.1.1 --pcapwriter_outfile=merged.pcap ~/pcap/*)
13 |  - getting pcap output from plugins that can't use pcapout
14 |    (decode -d web+pcapwriter ~/pcap/*)
15 | """
16 | 
17 | import struct
18 | 
19 | import dshell.core
20 | 
21 | class DshellPlugin(dshell.core.PacketPlugin):
22 | 
23 |     def __init__(self, *args, **kwargs):
24 |         super().__init__(
25 |             name="pcap writer",
26 |             description="Used to generate pcap output for plugins that can't use -o pcapout",
27 |             longdescription="""Generates pcap output
28 | 
29 | Can be used alone or chained at the end of plugins for a kind of filter.
30 | 
31 | Use --pcapwriter_outfile to separate its output from that of other plugins.
32 | 
33 | Example uses include:
34 |  - merging multiple pcap files into one (decode -d pcapwriter ~/pcap/* --pcapwriter_outfile=merged.pcap)
35 |  - saving relevant traffic by chaining with another plugin (decode -d track+pcapwriter --track_source=192.168.1.1 --pcapwriter_outfile=merged.pcap ~/pcap/*)
36 |  - getting pcap output from plugins that can't use pcapout (decode -d web+pcapwriter ~/pcap/*)
37 | """,
38 |             author="dev195",
39 |             optiondict={
40 |                 "outfile": {
41 |                     "type": str,
42 |                     "help": "Write to FILE instead of stdout",
43 |                     "metavar": "FILE",
44 |                 }
45 |             }
46 |         )
47 |         self.outfile = None  # Filled in with constructor
48 |         self.pcap_fh = None
49 | 
50 |     def prefile(self, infile=None):
51 |         # Default to setting pcap output filename based on first input file.
52 |         if not self.outfile:
53 |             self.outfile = (infile or self.current_pcap_file) + ".pcap"
54 | 
55 |     def packet_handler(self, packet: dshell.Packet):
56 |         # If we don't have a pcap file handle, this is our first packet.
57 |         # Create the output pcap file handle.
58 |         # NOTE: We want to create the file on the first packet instead of premodule so we
59 |         #   have a chance to use the input file as part of our output filename.
60 |         if not self.pcap_fh:
61 |             self.pcap_fh = open(self.outfile, mode="wb")
62 |             link_layer_type = self.link_layer_type or 1
63 |             # write the header:
64 |             # magic_number, version_major, version_minor, thiszone, sigfigs,
65 |             # snaplen, link-layer type
66 |             self.pcap_fh.write(
67 |                 struct.pack('IHHIIII', 0xa1b2c3d4, 2, 4, 0, 0, 65535, link_layer_type))
68 | 
69 |         ts = packet.ts
70 |         rawpkt = packet.rawpkt
71 |         pktlen = packet.pktlen
72 |         self.pcap_fh.write(struct.pack('II', int(ts), int((ts - int(ts)) * 1000000)))
73 |         self.pcap_fh.write(struct.pack('II', len(rawpkt), pktlen))
74 |         self.pcap_fh.write(rawpkt)
75 | 
76 |         return packet
77 | 
78 |     def postmodule(self):
79 |         if self.pcap_fh:
80 |             self.pcap_fh.close()
81 | 
82 | 
83 | if __name__ == "__main__":
84 |     print(DshellPlugin())
85 | 


--------------------------------------------------------------------------------
/dshell/plugins/misc/search.py:
--------------------------------------------------------------------------------
 1 | import dshell.core
 2 | from dshell.util import printable_text
 3 | from dshell.output.alertout import AlertOutput
 4 | 
 5 | import re
 6 | import sys
 7 | 
 8 | class DshellPlugin(dshell.core.ConnectionPlugin):
 9 | 
10 |     def __init__(self):
11 |         super().__init__(
12 |             name="search",
13 |             author="dev195",
14 |             bpf="tcp or udp",
15 |             description="Search for patterns in connections",
16 |             longdescription="""
17 | Reconstructs streams and searches the content for a user-provided regular
18 | expression. Requires definition of the --search_expression argument. Additional
19 | options can be provided to alter behavior.
20 |             """,
21 |             output=AlertOutput(label=__name__),
22 |             optiondict={
23 |                 "expression": {
24 |                     "help": "Search expression",
25 |                     "type": str,
26 |                     "metavar": "REGEX"},
27 |                 "ignorecase": {
28 |                     "help": "Ignore case when searching",
29 |                     "action": "store_true"},
30 |                 "invert": {
31 |                     "help": "Return connections that DO NOT match expression",
32 |                     "action": "store_true"},
33 |                 "quiet": {
34 |                     "help": "Do not display matches from this plugin. Useful when chaining plugins.",
35 |                     "action": "store_true"}
36 |             })
37 | 
38 | 
39 | 
40 |     def premodule(self):
41 |         # make sure the user actually provided an expression to search for
42 |         if not self.expression:
43 |             self.error("Must define an expression to search for using --search_expression")
44 |             sys.exit(1)
45 | 
46 |         # define the regex flags, based on arguments
47 |         re_flags = 0
48 |         if self.ignorecase:
49 |             re_flags = re_flags | re.IGNORECASE
50 | 
51 |         # Create the regular expression
52 |         try:
53 |             # convert expression to bytes so it can accurately compare to
54 |             # the connection data (which is also of type bytes)
55 |             byte_expression = bytes(self.expression, 'utf-8')
56 |             self.regex = re.compile(byte_expression, re_flags)
57 |         except Exception as e:
58 |             self.error("Could not compile regex ({0})".format(e))
59 |             sys.exit(1)
60 | 
61 | 
62 | 
63 |     def connection_handler(self, conn):
64 |         """
65 |         Go through the data of each connection.
66 |         If anything is a hit, return the entire connection.
67 |         """
68 | 
69 |         match_found = False
70 |         for blob in conn.blobs:
71 |             for line in blob.data.splitlines():
72 |                 match = self.regex.search(line)
73 |                 if match and self.invert:
74 |                     return None
75 |                 elif match and not self.invert:
76 |                     match_found = True
77 |                     if not self.quiet:
78 |                         if blob.sip == conn.sip:
79 |                             self.write(printable_text(line, False), **conn.info(), dir_arrow="->")
80 |                         else:
81 |                             self.write(printable_text(line, False), **conn.info(), dir_arrow="<-")
82 |                 elif self.invert and not match:
83 |                     if not self.quiet:
84 |                         self.write(**conn.info())
85 |                     return conn
86 |         if match_found:
87 |             return conn
88 | 
89 | 
90 | 
91 | if __name__ == "__main__":
92 |     print(DshellPlugin())
93 | 


--------------------------------------------------------------------------------
/dshell/plugins/misc/sslalerts.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Looks for SSL alert messages
  3 | """
  4 | 
  5 | # handy reference:
  6 | # http://blog.fourthbit.com/2014/12/23/traffic-analysis-of-an-ssl-slash-tls-session
  7 | 
  8 | import dshell.core
  9 | from dshell.output.alertout import AlertOutput
 10 | 
 11 | import hashlib
 12 | import io
 13 | import struct
 14 | from pprint import pprint
 15 | 
 16 | # SSLv3/TLS version
 17 | SSL3_VERSION = 0x0300
 18 | TLS1_VERSION = 0x0301
 19 | TLS1_1_VERSION = 0x0302
 20 | TLS1_2_VERSION = 0x0303
 21 | 
 22 | # Record type
 23 | SSL3_RT_CHANGE_CIPHER_SPEC = 20
 24 | SSL3_RT_ALERT             = 21
 25 | SSL3_RT_HANDSHAKE         = 22
 26 | SSL3_RT_APPLICATION_DATA  = 23
 27 | 
 28 | # Handshake message type
 29 | SSL3_MT_HELLO_REQUEST           = 0
 30 | SSL3_MT_CLIENT_HELLO            = 1
 31 | SSL3_MT_SERVER_HELLO            = 2
 32 | SSL3_MT_CERTIFICATE             = 11
 33 | SSL3_MT_SERVER_KEY_EXCHANGE     = 12
 34 | SSL3_MT_CERTIFICATE_REQUEST     = 13
 35 | SSL3_MT_SERVER_DONE             = 14
 36 | SSL3_MT_CERTIFICATE_VERIFY      = 15
 37 | SSL3_MT_CLIENT_KEY_EXCHANGE     = 16
 38 | SSL3_MT_FINISHED                = 20
 39 | 
 40 | alert_types = {
 41 |     0x00: "CLOSE_NOTIFY",
 42 |     0x0a: "UNEXPECTED_MESSAGE",
 43 |     0x14: "BAD_RECORD_MAC",
 44 |     0x15: "DECRYPTION_FAILED",
 45 |     0x16: "RECORD_OVERFLOW",
 46 |     0x1e: "DECOMPRESSION_FAILURE",
 47 |     0x28: "HANDSHAKE_FAILURE",
 48 |     0x29: "NO_CERTIFICATE",
 49 |     0x2a: "BAD_CERTIFICATE",
 50 |     0x2b: "UNSUPPORTED_CERTIFICATE",
 51 |     0x2c: "CERTIFICATE_REVOKED",
 52 |     0x2d: "CERTIFICATE_EXPIRED",
 53 |     0x2e: "CERTIFICATE_UNKNOWN",
 54 |     0x2f: "ILLEGAL_PARAMETER",
 55 |     0x30: "UNKNOWN_CA",
 56 |     0x31: "ACCESS_DENIED",
 57 |     0x32: "DECODE_ERROR",
 58 |     0x33: "DECRYPT_ERROR",
 59 |     0x3c: "EXPORT_RESTRICTION",
 60 |     0x46: "PROTOCOL_VERSION",
 61 |     0x47: "INSUFFICIENT_SECURITY",
 62 |     0x50: "INTERNAL_ERROR",
 63 |     0x5a: "USER_CANCELLED",
 64 |     0x64: "NO_RENEGOTIATION",
 65 | }
 66 | 
 67 | alert_severities = {
 68 |     0x01: "warning",
 69 |     0x02: "fatal",
 70 | }
 71 | 
 72 | class DshellPlugin(dshell.core.ConnectionPlugin):
 73 | 
 74 |     def __init__(self):
 75 |         super().__init__(
 76 |             name="sslalerts",
 77 |             author="dev195",
 78 |             bpf="tcp and (port 443 or port 993 or port 1443 or port 8531)",
 79 |             description="Looks for SSL alert messages",
 80 |             output=AlertOutput(label=__name__),
 81 |         )
 82 | 
 83 |     def blob_handler(self, conn, blob):
 84 |         data = io.BytesIO(blob.data)
 85 |         alert_seen = False
 86 |         # Iterate over each layer of the connection, paying special attention to the certificate
 87 |         while True:
 88 |             try:
 89 |                 content_type, proto_version, record_len = struct.unpack("!BHH", data.read(5))
 90 |             except struct.error:
 91 |                 break
 92 |             if proto_version not in (SSL3_VERSION, TLS1_VERSION, TLS1_1_VERSION, TLS1_2_VERSION):
 93 |                 return None
 94 |             if content_type == SSL3_RT_ALERT:
 95 |                 handshake_len = struct.unpack("!I", data.read(4))[0]
 96 | #                assert handshake_len == 2  # TODO remove when live
 97 |                 severity = struct.unpack("!B", data.read(1))[0]
 98 |                 if severity not in alert_severities:
 99 |                     continue
100 |                 severity_msg = alert_severities.get(severity, severity)
101 |                 alert_type = struct.unpack("!B", data.read(1))[0]
102 |                 alert_msg = alert_types.get(alert_type, str(alert_type))
103 |                 self.write("SSL alert: ({}) {}".format(severity_msg, alert_msg), **conn.info())
104 |                 alert_seen = True
105 | 
106 |         if alert_seen:
107 |             return conn, blob
108 | 


--------------------------------------------------------------------------------
/dshell/plugins/misc/synrst.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Detects failed attempts to connect (SYN followed by RST/ACK)
 3 | """
 4 | 
 5 | import dshell.core
 6 | from dshell.output.alertout import AlertOutput
 7 | 
 8 | from pypacker.layer4 import tcp
 9 | 
10 | class DshellPlugin(dshell.core.PacketPlugin):
11 | 
12 |     def __init__(self):
13 |         super().__init__(
14 |             name="SYN/RST",
15 |             description="Detects failed attempts to connect (SYN followed by RST/ACK)",
16 |             author="bg",
17 |             bpf="(ip and (tcp[13]=2 or tcp[13]=20)) or (ip6 and tcp)",
18 |             output=AlertOutput(label=__name__)
19 |         )
20 | 
21 |     def premodule(self):
22 |         # Cache to hold SYNs waiting to pair with RST/ACKs
23 |         self.tracker = {}
24 | 
25 |     def packet_handler(self, pkt):
26 |         # Check if SYN or RST/ACK. Discard non-matches.
27 |         if pkt.tcp_flags not in (tcp.TH_SYN, tcp.TH_RST|tcp.TH_ACK):
28 |             return
29 | 
30 |         # Try to find the TCP layer
31 |         tcpp = pkt.pkt.upper_layer
32 |         while not isinstance(tcpp, tcp.TCP):
33 |             try:
34 |                 tcpp = tcpp.upper_layer
35 |             except AttributeError:
36 |                 # There doesn't appear to be a TCP layer, for some reason
37 |                 return
38 | 
39 |         if tcpp.flags == tcp.TH_SYN:
40 |             seqnum = tcpp.seq
41 |             key = "{}|{}|{}|{}|{}".format(
42 |                 pkt.sip, pkt.sport, seqnum, pkt.dip, pkt.dport)
43 |             self.tracker[key] = pkt
44 |         elif tcpp.flags == tcp.TH_RST|tcp.TH_ACK:
45 |             acknum = tcpp.ack - 1
46 |             tmpkey = "{}|{}|{}|{}|{}".format(
47 |                 pkt.dip, pkt.dport, acknum, pkt.sip, pkt.sport)
48 |             if tmpkey in self.tracker:
49 |                 msg = "Failed connection [initiated by {}]".format(pkt.dip)
50 |                 self.write(msg, **pkt.info())
51 |                 oldpkt = self.tracker[tmpkey]
52 |                 del self.tracker[tmpkey]
53 |                 return [oldpkt, pkt]
54 | 


--------------------------------------------------------------------------------
/dshell/plugins/misc/xor.py:
--------------------------------------------------------------------------------
  1 | """
  2 | XOR the data in every packet with a user-provided key. Multiple keys can be used
  3 | for different data directions.
  4 | """
  5 | 
  6 | import struct
  7 | 
  8 | import dshell.core
  9 | import dshell.util
 10 | from dshell.output.output import Output
 11 | 
 12 | class DshellPlugin(dshell.core.ConnectionPlugin):
 13 |     def __init__(self):
 14 |         super().__init__(
 15 |             name="xor",
 16 |             description="XOR every packet with a given key",
 17 |             output=Output(label=__name__),
 18 |             bpf="tcp",
 19 |             author="twp,dev195",
 20 |             optiondict={
 21 |                 "key": {
 22 |                     "type": str,
 23 |                     "default": "0xff",
 24 |                     "help": "xor key in hex format (default: 0xff)",
 25 |                     "metavar": "0xHH"
 26 |                 },
 27 |                 "cskey": {
 28 |                     "type": str,
 29 |                     "default": None,
 30 |                     "help": "xor key to use for client-to-server data (default: None)",
 31 |                     "metavar": "0xHH"
 32 |                 },
 33 |                 "sckey": {
 34 |                     "type": str,
 35 |                     "default": None,
 36 |                     "help": "xor key to use for server-to-client data (default: None)",
 37 |                     "metavar": "0xHH"
 38 |                 },
 39 |                 "resync": {
 40 |                     "action": "store_true",
 41 |                     "help": "resync the key index if the key is seen in the data"
 42 |                 }
 43 |             }
 44 |         )
 45 | 
 46 |     def __make_key(self, key):
 47 |         "Convert a user-provided key into a standard format plugin can use."
 48 |         if key.startswith("0x") or key.startswith("\\x"):
 49 |             # Convert a hex key
 50 |             oldkey = key[2:]
 51 |             newkey = b''
 52 |             for i in range(0, len(oldkey), 2):
 53 |                 try:
 54 |                     newkey += struct.pack('B', int(oldkey[i:i + 2], 16))
 55 |                 except ValueError as e:
 56 |                     self.logger.warning("Error converting hex. Will treat as raw string. - {!s}".format(e))
 57 |                     newkey = key.encode('ascii')
 58 |                     break
 59 |         else:
 60 |             try:
 61 |                 # See if it's a numeric key
 62 |                 newkey = int(key)
 63 |                 newkey = struct.pack('I', newkey)
 64 |             except ValueError:
 65 |                 # otherwise, convert string key to bytes as it is
 66 |                 newkey = key.encode('ascii')
 67 |         self.logger.debug("__make_key: {!r} -> {!r}".format(key, newkey))
 68 |         return newkey
 69 | 
 70 |     def premodule(self):
 71 |         self.key = self.__make_key(self.key)
 72 |         if self.cskey:
 73 |             self.cskey = self.__make_key(self.cskey)
 74 |         if self.sckey:
 75 |             self.sckey = self.__make_key(self.sckey)
 76 | 
 77 |     def connection_handler(self, conn):
 78 |         for blob in conn.blobs:
 79 |             key_index = 0
 80 |             if self.sckey and blob.direction == 'sc':
 81 |                 key = self.sckey
 82 |             elif self.cskey and blob.direction == 'cs':
 83 |                 key = self.cskey
 84 |             else:
 85 |                 key = self.key
 86 |             for pkt in blob.packets:
 87 |                 # grab the data from the TCP layer and down
 88 |                 data = pkt.data
 89 |                 # data = pkt.pkt.upper_layer.upper_layer.body_bytes
 90 |                 self.logger.debug("Original:\n{}".format(dshell.util.hex_plus_ascii(data)))
 91 |                 # XOR the data and store it in new_data
 92 |                 new_data = b''
 93 |                 for i in range(len(data)):
 94 |                     if self.resync and data[i:i + len(key)] == key:
 95 |                         key_index = 0
 96 |                     x = data[i] ^ key[key_index]
 97 |                     new_data += struct.pack('B', x)
 98 |                     key_index = (key_index + 1) % len(key)
 99 |                 pkt.data = new_data
100 |                 # # rebuild the packet by adding together each of the layers
101 |                 # pkt.rawpkt = pkt.pkt.header_bytes + pkt.pkt.upper_layer.header_bytes + pkt.pkt.upper_layer.upper_layer.header_bytes + new_data
102 |                 self.logger.debug("New:\n{}".format(dshell.util.hex_plus_ascii(new_data)))
103 |         return conn
104 | 


--------------------------------------------------------------------------------
/dshell/plugins/nbns/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/nbns/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/nbns/nbns.py:
--------------------------------------------------------------------------------
  1 | """
  2 | NBNS plugin
  3 | """
  4 | 
  5 | from struct import unpack
  6 | 
  7 | import dshell.core
  8 | from dshell.output.alertout import AlertOutput
  9 | 
 10 | # A few common NBNS Protocol Info Opcodes
 11 | # Due to a typo in RFC 1002, 0x9 is also acceptable, but rarely used 
 12 | #   for 'NetBios Refresh'
 13 | # 'NetBios Multi-Homed Name Regsitration' (0xf) was added after the RFC
 14 | nbns_op = { 0: 'NB_NAME_QUERY', 
 15 |             5: 'NB_REGISTRATION',
 16 |             6: 'NB_RELEASE', 
 17 |             7: 'NB_WACK',
 18 |             8: 'NB_REFRESH',
 19 |             9: 'NB_REFRESH', 
 20 |             15: 'NB_MULTI_HOME_REG' }
 21 | 
 22 | 
 23 | class DshellPlugin(dshell.core.PacketPlugin):
 24 |     def __init__(self):
 25 |         super().__init__(   name='nbns',
 26 |                             description='Extract client information from NBNS traffic',
 27 |                             longdescription="""
 28 | The nbns (NetBIOS Name Service) plugin will extract the Transaction ID, Protocol Info, 
 29 | Client Hostname, and Client MAC address from every UDP NBNS packet found in the given 
 30 | pcap using port 137.  UDP is the standard transport protocol for NBNS traffic.
 31 | This filter pulls pertinent information from NBNS packets.
 32 | 
 33 | Examples:
 34 | 
 35 |     General usage:
 36 | 
 37 |         decode -d nbns <pcap>
 38 | 
 39 |             This will display the connection info including the timestamp,
 40 |             the source IP, destination IP, Transaction ID, Protocol Info,
 41 |             Client Hostname, and the Client MAC address in a tabular format.
 42 | 
 43 | 
 44 |     Malware Traffic Analysis Exercise Traffic from 2014-12-08 where a user was hit with a Fiesta exploit kit:
 45 |         <http://www.malware-traffic-analysis.net/2014/12/08/2014-12-08-traffic-analysis-exercise.pcap>
 46 |     We want to find out more about the infected machine, and some of this information can be pulled from NBNS traffic
 47 | 
 48 |         decode -d nbns 2014-12-08-traffic-analysis-exercise.pcap
 49 | 
 50 |           OUTPUT (first few packets):
 51 |             [nbns] 2014-12-08 18:19:13  192.168.204.137:137   ->    192.168.204.2:137   ** 
 52 |                     Transaction ID:         0xb480   
 53 |                     Info:                   NB_NAME_QUERY    
 54 |                     Client Hostname:        WPAD             
 55 |                     Client MAC:             00:0C:29:9D:B8:6D 
 56 |              **
 57 |             [nbns] 2014-12-08 18:19:14  192.168.204.137:137   ->    192.168.204.2:137   ** 
 58 |                     Transaction ID:         0xb480   
 59 |                     Info:                   NB_NAME_QUERY    
 60 |                     Client Hostname:        WPAD             
 61 |                     Client MAC:             00:0C:29:9D:B8:6D 
 62 |              **
 63 |             [nbns] 2014-12-08 18:19:16  192.168.204.137:137   ->    192.168.204.2:137   ** 
 64 |                     Transaction ID:         0xb480   
 65 |                     Info:                   NB_NAME_QUERY    
 66 |                     Client Hostname:        WPAD             
 67 |                     Client MAC:             00:0C:29:9D:B8:6D 
 68 |              **
 69 |             [nbns] 2014-12-08 18:19:17  192.168.204.137:137   ->  192.168.204.255:137   ** 
 70 |                     Transaction ID:         0xb480   
 71 |                     Info:                   NB_NAME_QUERY    
 72 |                     Client Hostname:        WPAD             
 73 |                     Client MAC:             00:0C:29:9D:B8:6D 
 74 |              **
 75 |   """,
 76 |                             bpf='(udp and port 137)',
 77 |                             output=AlertOutput(label=__name__),
 78 |                             author='dek',
 79 |                             )
 80 |         self.mac_address = None
 81 |         self.client_hostname = None
 82 |         self.xid = None
 83 |         self.prot_info = None
 84 |         
 85 | 
 86 |     def packet_handler(self, pkt):
 87 |         
 88 |         # iterate through the layers and find the NBNS layer
 89 |         nbns_packet = pkt.pkt.upper_layer
 90 |         try:
 91 |             nbns_packet = nbns_packet.upper_layer
 92 |         except IndexError as e:
 93 |             self.logger.error('{}: could not parse session data \
 94 |                       (NBNS packet not found)'.format(str(e)))
 95 |             # pypacker may throw an Exception here; could use 
 96 |             #   further testing
 97 |             return
 98 | 
 99 | 
100 |         # Extract the Client hostname from the connection data
101 |         # It is represented as 32-bytes half-ASCII
102 |         try:
103 |             nbns_name = unpack('32s', pkt.data[13:45])[0]
104 |         except Exception as e:
105 |             self.logger.error('{}: (NBNS packet not found)'.format(str(e)))
106 |             return
107 | 
108 | 
109 |         # Decode the 32-byte half-ASCII name to its 16 byte NetBIOS name
110 |         try:
111 |             if len(nbns_name) == 32:
112 |                 decoded = []
113 |                 for i in range(0,32,2):
114 |                     nibl = hex(ord(chr(nbns_name[i])) - ord('A'))[2:]
115 |                     nibh = hex(ord(chr(nbns_name[i+1])) - ord('A'))[2:]
116 |                     decoded.append(chr(int(''.join((nibl, nibh)), 16)))
117 | 
118 |                 # For uniformity, strip excess byte and space chars
119 |                 self.client_hostname = ''.join(decoded)[0:-1].strip()
120 |             else:
121 |                 self.client_hostname = str(nbns_name)
122 | 
123 |         except ValueError as e:
124 |             self.logger.error('{}: Hostname in improper format \
125 |                       (NBNS packet not found)'.format(str(e)))
126 |             return
127 | 
128 | 
129 |         # Extract the Transaction ID from the NBNS packet
130 |         xid = unpack('2s', pkt.data[0:2])[0]
131 |         self.xid = "0x{}".format(xid.hex())
132 | 
133 |         # Extract the opcode info from the NBNS Packet
134 |         op = unpack('2s', pkt.data[2:4])[0]
135 |         op_hex = op.hex()
136 |         op = int(op_hex, 16)
137 |         # Remove excess bits
138 |         op = (op >> 11) & 15
139 | 
140 |         # Decode protocol info if it was present in the payload
141 |         try: 
142 |             self.prot_info = nbns_op[op]
143 |         except:
144 |             self.prot_info = "0x{}".format(op_hex)
145 | 
146 |         # Extract the MAC address from the ethernet layer of the packet
147 |         self.mac_address = pkt.smac 
148 | 
149 |         # Allow for unknown hostnames
150 |         if not self.client_hostname:
151 |             self.client_hostname = "" 
152 | 
153 |         if self.xid and self.prot_info and self.client_hostname and self.mac_address:
154 |             self.write('\n\tTransaction ID:\t\t{:<8} \n\tInfo:\t\t\t{:<16} \n\tClient Hostname:\t{:<16} \n\tClient MAC:\t\t{:<18}\n'.format(
155 |                         self.xid, self.prot_info, self.client_hostname, self.mac_address), **pkt.info(), dir_arrow='->')
156 |             return pkt
157 | 
158 | 
159 | if __name__ == "__main__":
160 |     print(DshellPlugin())
161 | 


--------------------------------------------------------------------------------
/dshell/plugins/portscan/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/portscan/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/portscan/indegree.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Parse traffic to detect scanners based on connection to IPs that are rarely touched by others
 3 | """
 4 | 
 5 | import dshell.core
 6 | 
 7 | class DshellPlugin(dshell.core.ConnectionPlugin):
 8 | 
 9 |     def __init__(self):
10 |         super().__init__(
11 |             name='parse indegree',
12 |             description='Parse traffic to detect scanners based on connection to IPs that are rarely touched by others',
13 |             bpf='(tcp or udp)',
14 |             author='dev195',
15 |         )
16 |         self.client_conns = {}
17 |         self.server_conns = {}
18 |         self.minhits = 3
19 | 
20 |     def connection_handler(self, conn):
21 |         self.client_conns.setdefault(conn.clientip, set())
22 |         self.server_conns.setdefault(conn.serverip, set())
23 | 
24 |         self.client_conns[conn.clientip].add(conn.serverip)
25 |         self.server_conns[conn.serverip].add(conn.clientip)
26 | 
27 |     def postfile(self):
28 |         for clientip, serverips in self.client_conns.items():
29 |             target_count = len(serverips)
30 |             S = min((len(self.server_conns[serverip]) for serverip in serverips))
31 |             if S > 2 or target_count < 5:
32 |                 continue
33 |             # TODO implement whitelist
34 |             self.write("Scanning IP: {} / S score: {:.1f} / Number of records: {}".format(clientip, S, target_count))
35 | 
36 | 


--------------------------------------------------------------------------------
/dshell/plugins/portscan/trw.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Uses the Threshold Random Walk algorithm described in this paper:
 3 | 
 4 | Limitations to threshold random walk scan detection and mitigating enhancements
 5 | Written by: Mell, P.; Harang, R.
 6 | http://ieeexplore.ieee.org/xpls/icp.jsp?arnumber=6682723
 7 | """
 8 | 
 9 | import dshell.core
10 | from dshell.output.output import Output
11 | 
12 | from pypacker.layer4 import tcp
13 | 
14 | from collections import defaultdict
15 | 
16 | o0 = 0.8  # probability IP is benign given successful connection
17 | o1 = 0.2  # probability IP is a scanner given successful connection
18 | is_success = o0/o1
19 | is_failure = o1/o0
20 | 
21 | max_fp_prob = 0.01
22 | min_detect_prob = 0.99
23 | hi_threshold = min_detect_prob / max_fp_prob
24 | lo_threshold = max_fp_prob / min_detect_prob
25 | 
26 | OUTPUT_FORMAT = "(%(plugin)s) %(data)s\n"
27 | 
28 | class DshellPlugin(dshell.core.PacketPlugin):
29 |     def __init__(self, *args, **kwargs):
30 |         super().__init__(
31 |             name="trw",
32 |             author="dev195",
33 |             bpf="tcp",
34 |             output=Output(label=__name__, format=OUTPUT_FORMAT),
35 |             description="Uses Threshold Random Walk to detect network scanners",
36 |             optiondict={
37 |                 "mark_benigns": {
38 |                     "action": "store_true",
39 |                     "help": "Use an upper threshold to mark IPs as benign, thus removing them from consideration as scanners"
40 |                 }
41 |             }
42 |         )
43 |         self.synners = set()
44 |         self.ip_scores = defaultdict(lambda: 1)
45 |         self.classified_ips = set()
46 | 
47 |     def check_score(self, ip, score):
48 |         if self.mark_benigns and score >= hi_threshold:
49 |             self.write("IP {} is benign (score: {})".format(ip, score))
50 |             self.classified_ips.add(ip)
51 |         elif score <= lo_threshold:
52 |             self.write("IP {} IS A SCANNER! (score: {})".format(ip, score))
53 |             self.classified_ips.add(ip)
54 | 
55 |     def packet_handler(self, pkt):
56 |         if not pkt.tcp_flags:
57 |             return
58 | 
59 |         # If we have a SYN, store it in a set and wait for some kind of
60 |         # response or the end of pcap
61 |         if pkt.tcp_flags == tcp.TH_SYN and pkt.sip not in self.classified_ips:
62 |             self.synners.add(pkt.addr)
63 |             return pkt
64 | 
65 |         # If we get the SYN/ACK, score the destination IP with a success
66 |         elif pkt.tcp_flags == (tcp.TH_SYN | tcp.TH_ACK) and pkt.dip not in self.classified_ips:
67 |             alt_addr = ((pkt.dip, pkt.dport), (pkt.sip, pkt.sport))
68 |             if alt_addr in self.synners:
69 |                 self.ip_scores[pkt.dip] *= is_success
70 |                 self.check_score(pkt.dip, self.ip_scores[pkt.dip])
71 |                 self.synners.remove(alt_addr)
72 |             return pkt
73 | 
74 |         # If we get a RST, assume the connection was refused and score the
75 |         # destination IP with a failure
76 |         elif pkt.tcp_flags & tcp.TH_RST and pkt.dip not in self.classified_ips:
77 |             alt_addr = ((pkt.dip, pkt.dport), (pkt.sip, pkt.sport))
78 |             if alt_addr in self.synners:
79 |                 self.ip_scores[pkt.dip] *= is_failure
80 |                 self.check_score(pkt.dip, self.ip_scores[pkt.dip])
81 |                 self.synners.remove(alt_addr)
82 |             return pkt
83 | 
84 | 
85 |     def postfile(self):
86 |         # Go through any SYNs that didn't get a response and assume they failed
87 |         for addr in self.synners:
88 |             ip = addr[0][0]
89 |             if ip in self.classified_ips:
90 |                 continue
91 |             self.ip_scores[ip] *= is_failure
92 |             self.check_score(ip, self.ip_scores[ip])
93 | 
94 | 


--------------------------------------------------------------------------------
/dshell/plugins/protocol/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/protocol/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/protocol/ether.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Shows MAC address information and optionally filters by it. It is highly
 3 | recommended that oui.txt be included in the share/ directory (see README).
 4 | """
 5 | 
 6 | import os
 7 | 
 8 | import dshell.core
 9 | from dshell.output.output import Output
10 | from dshell.util import get_data_path
11 | 
12 | class DshellPlugin(dshell.core.PacketPlugin):
13 |     OUTPUT_FORMAT = "[%(plugin)s] %(dt)s   %(sip)-15s %(smac)-18s %(smac_org)-35s ->  %(dip)-15s %(dmac)-18s %(dmac_org)-35s %(byte_count)d\n"
14 | 
15 |     def __init__(self, *args, **kwargs):
16 |         super().__init__(
17 |             name="Ethernet",
18 |             description="Show MAC address information and optionally filter by it",
19 |             author="dev195",
20 |             output=Output(label=__name__, format=self.OUTPUT_FORMAT),
21 |             optiondict={
22 |                 "org": {"default":[], "action":"append", "metavar":"ORGANIZATION", "help":"Organizations owning MAC address to inclusively filter on (exact match only). Can be used multiple times to look for multiple organizations."},
23 |                 "org_exclusive": {"default":False, "action":"store_true", "help":"Set organization filter to be exclusive"},
24 |                 'quiet': {'action': 'store_true', 'default':False, 'help':'disable alerts for this plugin'}
25 |             }
26 |         )
27 |         self.oui_map = {}
28 | 
29 |     def premodule(self):
30 |         # Create a mapping of MAC address prefix to organization
31 |         # http://standards-oui.ieee.org/oui.txt
32 |         ouifilepath = os.path.join(get_data_path(), 'oui.txt')
33 |         try:
34 |             with open(ouifilepath, encoding="utf-8") as ouifile:
35 |                 for line in ouifile:
36 |                     if "(hex)" not in line:
37 |                         continue
38 |                     line = line.strip().split(None, 2)
39 |                     prefix = line[0].replace('-', ':')
40 |                     org = line[2]
41 |                     self.oui_map[prefix] = org
42 |         except FileNotFoundError:
43 |             # user probably did not download it
44 |             # print warning and continue
45 |             self.logger.warning("Could not find {} (see README). Will not be able to determine MAC organizations.".format(ouifilepath))
46 | 
47 |     def packet_handler(self, pkt):
48 |         if not pkt.smac or not pkt.dmac:
49 |             return
50 |         smac_prefix = pkt.smac[:8].upper()
51 |         smac_org = self.oui_map.get(smac_prefix, '???')
52 |         dmac_prefix = pkt.dmac[:8].upper()
53 |         dmac_org = self.oui_map.get(dmac_prefix, '???')
54 | 
55 |         # Filter out any packets that do not match organization filter
56 |         if self.org:
57 |             if self.org_exclusive and (smac_org in self.org or dmac_org in self.org):
58 |                 return
59 |             elif not self.org_exclusive and not (smac_org in self.org or dmac_org in self.org):
60 |                 return
61 | 
62 |         if not self.quiet:
63 |             self.write("", smac_org=smac_org, dmac_org=dmac_org, **pkt.info())
64 |         return pkt
65 | 
66 | 
67 | if __name__ == "__main__":
68 |     print(DshellPlugin())
69 | 


--------------------------------------------------------------------------------
/dshell/plugins/protocol/ip.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Outputs all IPv4/IPv6 traffic, and hex plus ascii with verbose flag
 3 | """
 4 | 
 5 | import dshell.core
 6 | import dshell.util
 7 | from dshell.output.alertout import AlertOutput
 8 | 
 9 | class DshellPlugin(dshell.core.PacketPlugin):
10 | 
11 |     def __init__(self):
12 |         super().__init__(
13 |             name='ip',
14 |             description='IPv4/IPv6 plugin',
15 |             bpf='ip or ip6',
16 |             author='twp',
17 |             output=AlertOutput(label=__name__),
18 |         )
19 | 
20 |     def packet_handler(self, packet):
21 |         self.write(**packet.info(), dir_arrow='->')
22 |         # If verbose flag set, outputs packet contents in hex and ascii alongside packet info
23 |         self.logger.info("\n" + dshell.util.hex_plus_ascii(packet.rawpkt))
24 |         return packet
25 | 


--------------------------------------------------------------------------------
/dshell/plugins/protocol/protocol.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Tries to find traffic that does not belong to the following protocols:
 3 | TCP, UDP, or ICMP
 4 | """
 5 | 
 6 | import dshell.core
 7 | from dshell.output.alertout import AlertOutput
 8 | 
 9 | class DshellPlugin(dshell.core.PacketPlugin):
10 | 
11 |     def __init__(self):
12 |         super().__init__(
13 |             name="Uncommon Protocols",
14 |             description="Finds uncommon (i.e. not tcp, udp, or icmp) protocols in IP traffic",
15 |             bpf="(ip or ip6) and not tcp and not udp and not icmp and not icmp6",
16 |             author="bg",
17 |             output=AlertOutput(label=__name__),
18 |         )
19 | 
20 |     def packet_handler(self, packet):
21 |         self.write("PROTOCOL: {} ({})".format(packet.protocol, packet.protocol_num), **packet.info(), dir_arrow="->")
22 |         return packet
23 | 


--------------------------------------------------------------------------------
/dshell/plugins/ssh/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/ssh/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/ssh/ssh-pubkey.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Extract server ssh public key from key exchange
  3 | """
  4 | 
  5 | import dshell.core
  6 | from dshell.output.alertout import AlertOutput
  7 | import struct
  8 | import base64
  9 | import hashlib
 10 | 
 11 | 
 12 | class DshellPlugin(dshell.core.ConnectionPlugin):
 13 | 
 14 |     def __init__(self):
 15 |         super().__init__(
 16 |             name="ssh-pubkey",
 17 |             author="amm",
 18 |             description="Extract server ssh public key from key exchange",
 19 |             bpf="tcp port 22",
 20 |             output=AlertOutput(label=__name__)
 21 |         )
 22 | 
 23 |     def connection_handler(self, conn):
 24 | 
 25 |         server_banner = ''
 26 |         sc_blob_count = 0
 27 |         cs_blob_count = 0
 28 | 
 29 |         info = {}
 30 | 
 31 |         for blob in conn.blobs:
 32 | 
 33 |             #
 34 |             # CS Blobs: Only interest is a client banner
 35 |             #
 36 |             if blob.direction == 'cs':
 37 |                 cs_blob_count += 1
 38 |                 if cs_blob_count > 1:
 39 |                     continue
 40 |                 else:
 41 |                     blob.reassemble(allow_overlap=True, allow_padding=True)
 42 |                     if not blob.data:
 43 |                         continue
 44 |                     info['clientbanner'] = blob.data.split(b'\x0d')[0].rstrip()
 45 |                     if not info['clientbanner'].startswith(b'SSH'):
 46 |                         return conn  # NOT AN SSH CONNECTION
 47 |                     try:
 48 |                         info['clientbanner'] = info['clientbanner'].decode(
 49 |                             'utf-8')
 50 |                     except UnicodeDecodeError:
 51 |                         return conn
 52 |                     continue
 53 | 
 54 |             #
 55 |             # SC Blobs: Banner and public key
 56 |             #
 57 |             sc_blob_count += 1
 58 |             blob.reassemble(allow_overlap=True, allow_padding=True)
 59 |             if not blob.data:
 60 |                 continue
 61 |             d = blob.data
 62 | 
 63 |             # Server Banner
 64 |             if sc_blob_count == 1:
 65 |                 info['serverbanner'] = d.split(b'\x0d')[0].rstrip()
 66 |                 if not info['serverbanner'].startswith(b'SSH'):
 67 |                     return conn  # NOT AN SSH CONNECTION
 68 |                 try:
 69 |                     info['serverbanner'] = info['serverbanner'].decode('utf-8')
 70 |                 except UnicodeDecodeError:
 71 |                     pass
 72 |                 continue
 73 | 
 74 |             # Key Exchange Packet/Messages
 75 |             mlist = messagefactory(d)
 76 |             stop_blobs = False
 77 |             for m in mlist:
 78 |                 if m.message_code == 31 or m.message_code == 33:
 79 |                     info['host_pubkey'] = m.host_pub_key
 80 |                     stop_blobs = True
 81 |                     break
 82 |             if stop_blobs:
 83 |                 break
 84 | 
 85 |         #print(repr(info))
 86 | 
 87 |         if 'host_pubkey' in info:
 88 |             # Calculate key fingerprints
 89 |             info['host_fingerprints'] = {}
 90 |             for hash_scheme in ("md5", "sha1", "sha256"):
 91 |                 hashfunction = eval("hashlib."+hash_scheme)
 92 |                 thisfp = key_fingerprint(info['host_pubkey'], hashfunction)
 93 |                 info['host_fingerprints'][hash_scheme] = ':'.join(
 94 |                     ['%02x' % b for b in thisfp])
 95 | 
 96 |             msg = "%s" % (info['host_pubkey'])
 97 |             self.write(msg, **info, **conn.info())
 98 |             return conn
 99 | 
100 | 
101 | def messagefactory(data):
102 | 
103 |     datalen = len(data)
104 |     offset = 0
105 |     msglist = []
106 |     while offset < datalen:
107 |         try:
108 |             msg = sshmessage(data[offset:])
109 |         except ValueError:
110 |             return msglist
111 |         msglist.append(msg)
112 |         offset += msg.packet_len + 4
113 | 
114 |     return msglist
115 | 
116 | 
117 | class sshmessage:
118 | 
119 |     def __init__(self, rawdata):
120 |         self.__parse_raw(rawdata)
121 | 
122 |     def __parse_raw(self, data):
123 |         datalen = len(data)
124 |         if datalen < 6:
125 |             raise ValueError
126 | 
127 |         (self.packet_len, self.padding_len,
128 |          self.message_code) = struct.unpack(">IBB", data[0:6])
129 |         if datalen < self.packet_len + 4:
130 |             raise ValueError
131 |         self.body = data[6:4+self.packet_len]
132 | 
133 |         # ECDH Kex Reply
134 |         if self.message_code == 31 or self.message_code == 33:
135 |             host_key_len = struct.unpack(">I", self.body[0:4])[0]
136 |             full_key_net = self.body[4:4+host_key_len]
137 |             key_type_name_len = struct.unpack(">I", full_key_net[0:4])[0]
138 |             key_type_name = full_key_net[4:4+key_type_name_len]
139 |             key_data = full_key_net[4+key_type_name_len:]
140 |             if key_type_name_len > 50:
141 |                 # something went wrong
142 |                 # this probably isn't a code 31
143 |                 self.message_code = 0
144 |             else:
145 |                 self.host_pub_key = "%s %s" % (key_type_name.decode(
146 |                     'utf-8'), base64.b64encode(full_key_net).decode('utf-8'))
147 | 
148 | 
149 | def key_fingerprint(ssh_pubkey, hashfunction=hashlib.sha256):
150 | 
151 |     # Treat as bytes, not string
152 |     if type(ssh_pubkey) == str:
153 |         ssh_pubkey = ssh_pubkey.encode('utf-8')
154 | 
155 |     # Strip space from end
156 |     ssh_pubkey = ssh_pubkey.rstrip(b"\r\n\0 ")
157 | 
158 |     # Only look at first line
159 |     ssh_pubkey = ssh_pubkey.split(b"\n")[0]
160 |     # If two spaces, look at middle segment
161 |     if ssh_pubkey.count(b" ") >= 1:
162 |         ssh_pubkey = ssh_pubkey.split(b" ")[1]
163 | 
164 |     # Try to decode key as base64
165 |     try:
166 |         keybin = base64.b64decode(ssh_pubkey)
167 |     except:
168 |         sys.stderr.write("Invalid key value:\n")
169 |         sys.stderr.write("  \"%s\":\n" % ssh_pubkey)
170 |         return None
171 | 
172 |     # Fingerprint
173 |     return hashfunction(keybin).digest()
174 | 
175 | 
176 | if __name__ == "__main__":
177 |     print(DshellPlugin())
178 | 


--------------------------------------------------------------------------------
/dshell/plugins/ssl/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/ssl/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/ssl/sslblacklist.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Looks for certificates in SSL/TLS traffic and tries to find any hashes that
  3 | match those in the abuse.ch blacklist.
  4 | (https://sslbl.abuse.ch/blacklist/)
  5 | """
  6 | 
  7 | # handy reference:
  8 | # http://blog.fourthbit.com/2014/12/23/traffic-analysis-of-an-ssl-slash-tls-session
  9 | 
 10 | import dshell.core
 11 | from dshell.output.alertout import AlertOutput
 12 | 
 13 | import hashlib
 14 | import io
 15 | import struct
 16 | 
 17 | # SSLv3/TLS version
 18 | SSL3_VERSION = 0x0300
 19 | TLS1_VERSION = 0x0301
 20 | TLS1_1_VERSION = 0x0302
 21 | TLS1_2_VERSION = 0x0303
 22 | 
 23 | # Record type
 24 | SSL3_RT_CHANGE_CIPHER_SPEC = 20
 25 | SSL3_RT_ALERT             = 21
 26 | SSL3_RT_HANDSHAKE         = 22
 27 | SSL3_RT_APPLICATION_DATA  = 23
 28 | 
 29 | # Handshake message type
 30 | SSL3_MT_HELLO_REQUEST           = 0
 31 | SSL3_MT_CLIENT_HELLO            = 1
 32 | SSL3_MT_SERVER_HELLO            = 2
 33 | SSL3_MT_CERTIFICATE             = 11
 34 | SSL3_MT_SERVER_KEY_EXCHANGE     = 12
 35 | SSL3_MT_CERTIFICATE_REQUEST     = 13
 36 | SSL3_MT_SERVER_DONE             = 14
 37 | SSL3_MT_CERTIFICATE_VERIFY      = 15
 38 | SSL3_MT_CLIENT_KEY_EXCHANGE     = 16
 39 | SSL3_MT_FINISHED                = 20
 40 | 
 41 | 
 42 | class DshellPlugin(dshell.core.ConnectionPlugin):
 43 | 
 44 |     def __init__(self):
 45 |         super().__init__(
 46 |             name="sslblacklist",
 47 |             author="dev195",
 48 |             bpf="tcp and (port 443 or port 993 or port 1443 or port 8531)",
 49 |             description="Looks for certificate SHA1 matches in the abuse.ch blacklist",
 50 |             longdescription="""
 51 |     Looks for certificates in SSL/TLS traffic and tries to find any hashes that
 52 |     match those in the abuse.ch blacklist.
 53 | 
 54 |     Requires downloading the blacklist CSV from abuse.ch:
 55 |     https://sslbl.abuse.ch/blacklist/
 56 | 
 57 |     If the CSV is not in the current directory, use the --sslblacklist_csv
 58 |     argument to provide a file path.
 59 | """,
 60 |             output=AlertOutput(label=__name__),
 61 |             optiondict={
 62 |                 "csv": {
 63 |                     "help": "filepath to the sslblacklist.csv file",
 64 |                     "default": "./sslblacklist.csv",
 65 |                     "metavar": "FILEPATH"
 66 |                 },
 67 |             }
 68 |         )
 69 | 
 70 |     def premodule(self):
 71 |         self.parse_blacklist_csv(self.csv)
 72 | 
 73 |     def parse_blacklist_csv(self, filepath):
 74 |         "parses the SSL blacklist CSV, given the 'filepath'"
 75 |         # Python's standard csv module doesn't seem to handle it properly
 76 |         self.hashes = {}
 77 |         with open(filepath, 'r') as csv:
 78 |             for line in csv:
 79 |                 line = line.split('#')[0]  # ignore comments
 80 |                 line = line.strip()
 81 |                 try:
 82 |                     timestamp, sha1, reason = line.split(',', 3)
 83 |                     self.hashes[sha1] = reason
 84 |                 except ValueError:
 85 |                     continue
 86 | 
 87 |     def blob_handler(self, conn, blob):
 88 |         if blob.direction == 'cs':
 89 |             return None
 90 | 
 91 |         data = io.BytesIO(blob.data)
 92 | 
 93 |         # Iterate over each layer of the connection, paying special attention to the certificate
 94 |         while True:
 95 |             try:
 96 |                 content_type, proto_version, record_len = struct.unpack("!BHH", data.read(5))
 97 |             except struct.error:
 98 |                 break
 99 |             if proto_version not in (SSL3_VERSION, TLS1_VERSION, TLS1_1_VERSION, TLS1_2_VERSION):
100 |                 return None
101 |             if content_type == SSL3_RT_HANDSHAKE:
102 |                 handshake_type = struct.unpack("!B", data.read(1))[0]
103 |                 handshake_len = struct.unpack("!I", b"\x00"+data.read(3))[0]
104 |                 if handshake_type == SSL3_MT_CERTIFICATE:
105 |                     # Process the certificate itself
106 |                     cert_chain_len = struct.unpack("!I", b"\x00"+data.read(3))[0]
107 |                     bytes_processed = 0
108 |                     while (bytes_processed < cert_chain_len):
109 |                         try:
110 |                             cert_data_len = struct.unpack("!I", b"\x00"+data.read(3))[0]
111 |                             cert_data = data.read(cert_data_len)
112 |                             bytes_processed = 3 + cert_data_len
113 |                             sha1 = hashlib.sha1(cert_data).hexdigest()
114 |                             if sha1 in self.hashes:
115 |                                 bad_guy = self.hashes[sha1]
116 |                                 self.write("Certificate hash match: {}".format(bad_guy), **conn.info())
117 |                         except struct.error as e:
118 |                             break
119 |                 else:
120 |                     # Ignore any layers that are not a certificate
121 |                     data.read(handshake_len)
122 |                     continue
123 | 
124 |         return conn, blob
125 | 


--------------------------------------------------------------------------------
/dshell/plugins/tftp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/tftp/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/visual/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/visual/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/visual/piecharts.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Plugin that generates HTML+JavaScript pie charts for flow information
  3 | """
  4 | 
  5 | import dshell.core
  6 | from dshell.output.output import Output
  7 | 
  8 | import operator
  9 | from collections import defaultdict
 10 | 
 11 | class VisualizationOutput(Output):
 12 |     """
 13 |     Special output class intended to only be used for this specific plugin.
 14 |     """
 15 | 
 16 |     _DEFAULT_FORMAT='{"value":%(data)s, "datatype":"%(datatype)s", "label":"%(label)s"},'
 17 | 
 18 |     _HTML_HEADER = """
 19 | <html>
 20 | <head>
 21 |     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
 22 |     <title>Dshell - Pie Chart Output</title>
 23 |     <script type="text/javascript" src="d3.js"></script>
 24 |     <style>
 25 |         .legend {
 26 |             font-size: 12px;
 27 |         }
 28 |         rect {
 29 |             stroke-width: 2;
 30 |         }
 31 |         .tooltip {
 32 |             background: #ffffff;
 33 |             box-shadow: 0 0 5px #999999;
 34 |             color: #333333;
 35 |             display: none;
 36 |             font-size: 12px;
 37 |             padding: 10px;
 38 |             position: absolute;
 39 |             text-align: center;
 40 |             width: 200px;
 41 |             z-index: 10;
 42 |             left: 130px;
 43 |             top: 95px;
 44 |         }
 45 |     </style>
 46 | </head>
 47 | 
 48 | <body>
 49 | <center>
 50 | <div id="content" width="90%">
 51 | 
 52 | <table border="0">
 53 | <tr>
 54 |     <td>
 55 |     <h1> Source Countries </h1>
 56 |     <div id="source_country"></div>
 57 |     </td>
 58 |     <td>
 59 |     <h1> Destination Countries </h1>
 60 |     <div id="dest_country"></div>
 61 |     </td>
 62 | </tr><tr>
 63 |     <td>
 64 |     <h1> Source ASNs </h1>
 65 |     <div id="source_asn"></div>
 66 |     </td>
 67 |     <td>
 68 |     <h1> Destination ASNs </h1>
 69 |     <div id="dest_asn"></div>
 70 |     </td>
 71 | </tr><tr>
 72 |     <td>
 73 |     <h1> Source Ports </h1>
 74 |     <div id="source_port"></div>
 75 |     </td>
 76 |     <td>
 77 |     <h1> Destination Ports </h1>
 78 |     <div id="dest_port"></div>
 79 |     </td>
 80 | </tr><tr>
 81 |     <td>
 82 |     <h1> Protocols </h1>
 83 |     <div id="protocol"></div>
 84 |     </td><td>
 85 |     </td>
 86 | </tr>
 87 | </table>
 88 | 
 89 | <script type="text/javascript">
 90 | 
 91 | var w = (window.innerWidth / 2) * 0.9,
 92 |     h = 400,
 93 |     r = Math.min(w, h) / 2.5,
 94 |     legendRectSize = 15,
 95 |     legendSpacing = 4;
 96 | 
 97 | var data = JSON.parse('["""
 98 | 
 99 |     # ignore the trailing comma by adding an empty object at the end
100 |     _HTML_FOOTER = """{}]');
101 | 
102 | var src_country_data = [],
103 |     dst_country_data = [],
104 |     src_asn_data = [],
105 |     dst_asn_data = [],
106 |     src_ports_data = [],
107 |     dst_ports_data = [],
108 |     protocols_data = [];
109 | 
110 | for (var i = 0; i < data.length; i++) {
111 |     switch (data[i]['datatype']) {
112 |         case 'protocol':
113 |             protocols_data.push(data[i]);
114 |             break;
115 |         case 'source_country':
116 |             src_country_data.push(data[i]);
117 |             break;
118 |         case 'dest_country':
119 |             dst_country_data.push(data[i]);
120 |             break;
121 |         case 'source_asn':
122 |             src_asn_data.push(data[i]);
123 |             break;
124 |         case 'dest_asn':
125 |             dst_asn_data.push(data[i]);
126 |             break;
127 |         case 'source_port':
128 |             src_ports_data.push(data[i]);
129 |             break;
130 |         case 'dest_port':
131 |             dst_ports_data.push(data[i]);
132 |             break;
133 |     }
134 | }
135 | 
136 | draw_graph(src_country_data, 'source_country');
137 | draw_graph(dst_country_data, 'dest_country');
138 | draw_graph(src_asn_data, 'source_asn');
139 | draw_graph(dst_asn_data, 'dest_asn');
140 | draw_graph(src_ports_data, 'source_port');
141 | draw_graph(dst_ports_data, 'dest_port');
142 | draw_graph(protocols_data, 'protocol');
143 | 
144 | function draw_graph(indata, intype) {
145 |     var color = d3.scaleOrdinal(d3.schemeCategory10);
146 | 
147 |     var tooltip = d3.select("#"+intype)
148 |         .append('div')
149 |         .attr('class', 'tooltip');
150 | 
151 |     tooltip.append('div')
152 |         .attr('class', 'label');
153 |     tooltip.append('div')
154 |          .attr('class', 'count');
155 |     tooltip.append('div')
156 |         .attr('class', 'percent');
157 | 
158 |     var svg = d3.select("#"+intype)
159 |         .append("svg:svg")
160 |             .attr("width", w)
161 |             .attr("height", h)
162 |         .append("svg:g")
163 |             .attr("transform", "translate(" + r + "," + r + ")");
164 | 
165 |     var arc = d3.arc()
166 |         .innerRadius(0)
167 |         .outerRadius(r);
168 | 
169 |     var pie = d3.pie()
170 |         .value(function(d) { return d.value; })
171 |         .sort(null);
172 | 
173 |     var path = svg.selectAll("path")
174 |         .data(pie(indata))
175 |         .enter()
176 |             .append('path')
177 |                 .attr('d', arc)
178 |                 .attr('fill', function(d, i) { return color(i); });
179 | 
180 |     path.on('mouseover', function(d) {
181 |         var total = d3.sum(indata.map(function(d) {
182 |             return d.value;
183 |         }));
184 |         var percent = Math.round(1000 * d.data.value / total) / 10;
185 |         tooltip.select('.label').html(d.data.label);
186 |         tooltip.select('.count').html(d.data.value + ' / ' + total);
187 |         tooltip.select('.percent').html(percent + '%');
188 |         tooltip.style('display', 'block');
189 |     });
190 | 
191 |     path.on('mouseout', function(d) {
192 |         tooltip.style('display', 'none');
193 |     });
194 | 
195 |     path.on('mousemove', function(d) {
196 |         tooltip
197 |             .style('top', (d3.event.pageY + 10) + 'px')
198 |             .style('left', (d3.event.pageX + 10) + 'px');
199 |     });
200 | 
201 |     var legend = svg.selectAll('.legend')
202 |         .data(color.domain())
203 |         .enter()
204 |             .append('svg:g')
205 |                 .attr('class', 'legend')
206 |                 .attr('transform', function(d,i) {
207 |                     var h = legendRectSize + legendSpacing;
208 |                     var offset = h * color.domain().length / 2;
209 |                     var horz = r + 20;
210 |                     var vert = i * h - offset;
211 |                     return 'translate(' + horz + ',' + vert + ')';
212 |                 });
213 | 
214 |     legend.append('rect')
215 |         .attr('width', legendRectSize)
216 |         .attr('height', legendRectSize)
217 |         .style('fill', color)
218 |         .style('stroke', color);
219 | 
220 |     legend.append('text')
221 |         .attr('x', legendRectSize + legendSpacing)
222 |         .attr('y', legendRectSize - legendSpacing)
223 |         .text(function(d) {return indata[d].label; });
224 | 
225 | }
226 | 
227 | </script>
228 | </div>
229 | </center>
230 | </body>
231 | </html>
232 | 
233 | """
234 | 
235 |     def setup(self):
236 |         Output.setup(self)
237 |         self.fh.write(self._HTML_HEADER)
238 | 
239 |     def close(self):
240 |         self.fh.write(self._HTML_FOOTER)
241 |         Output.close(self)
242 | 
243 | class DshellPlugin(dshell.core.ConnectionPlugin):
244 | 
245 |     def __init__(self):
246 |         super().__init__(
247 |             name='Pie Charts',
248 |             author='dev195',
249 |             bpf="ip",
250 |             description='Generates visualizations based on connections',
251 |             longdescription="""
252 | Generates HTML+JavaScript pie chart visualizations based on connections.
253 | 
254 | Output should be redirected to a file and placed in a directory that has the d3.js JavaScript library. Library is available for download at https://d3js.org/
255 | """,
256 |             output=VisualizationOutput(label=__name__),
257 |         )
258 | 
259 |         self.top_x = 10
260 | 
261 |     def premodule(self):
262 |         "Set each of the counter dictionaries as defaultdict(int)"
263 |         # source
264 |         self.s_country_count = defaultdict(int)
265 |         self.s_asn_count = defaultdict(int)
266 |         self.s_port_count = defaultdict(int)
267 |         self.s_ip_count = defaultdict(int)
268 |         # dest
269 |         self.d_country_count = defaultdict(int)
270 |         self.d_asn_count = defaultdict(int)
271 |         self.d_port_count = defaultdict(int)
272 |         self.d_ip_count = defaultdict(int)
273 |         # protocol
274 |         self.proto = defaultdict(int)
275 | 
276 | 
277 |     def postmodule(self):
278 |         "Write the top X results for each type of data we're counting"
279 |         t = self.top_x + 1
280 |         for i in sorted(self.proto.items(), reverse=True, key=operator.itemgetter(1))[:t]:
281 |             if i[0]:
282 |                 self.write(int(i[1]), datatype="protocol", label=i[0])
283 |         for i in sorted(self.s_country_count.items(), reverse=True, key=operator.itemgetter(1))[:t]:
284 |             if i[0] and i[0] != '--':
285 |                 self.write(int(i[1]), datatype="source_country", label=i[0])
286 |         for i in sorted(self.d_country_count.items(), reverse=True, key=operator.itemgetter(1))[:t]:
287 |             if i[0] and i[0] != '--':
288 |                 self.write(int(i[1]), datatype="dest_country", label=i[0])
289 |         for i in sorted(self.s_asn_count.items(), reverse=True, key=operator.itemgetter(1))[:t]:
290 |             if i[0] and i[0] != '--':
291 |                 self.write(int(i[1]), datatype="source_asn", label=i[0])
292 |         for i in sorted(self.d_asn_count.items(), reverse=True, key=operator.itemgetter(1))[:t]:
293 |             if i[0] and i[0] != '--':
294 |                 self.write(int(i[1]), datatype="dest_asn", label=i[0])
295 |         for i in sorted(self.s_port_count.items(), reverse=True, key=operator.itemgetter(1))[:t]:
296 |             if i[0]:
297 |                 self.write(int(i[1]), datatype="source_port", label=i[0])
298 |         for i in sorted(self.d_port_count.items(), reverse=True, key=operator.itemgetter(1))[:t]:
299 |             if i[0]:
300 |                 self.write(int(i[1]), datatype="dest_port", label=i[0])
301 | 
302 |     def connection_handler(self, conn):
303 |         "For each conn, increment the counts for the relevant dictionary keys"
304 |         self.proto[conn.protocol] += 1
305 |         self.s_country_count[conn.sipcc] += 1
306 |         self.s_asn_count[conn.sipasn] += 1
307 |         self.s_port_count[conn.sport] += 1
308 |         self.s_ip_count[conn.sip] += 1
309 |         self.d_country_count[conn.dipcc] += 1
310 |         self.d_asn_count[conn.dipasn] += 1
311 |         self.d_port_count[conn.dport] += 1
312 |         self.d_ip_count[conn.dip] += 1
313 |         return conn
314 | 
315 | 


--------------------------------------------------------------------------------
/dshell/plugins/voip/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/voip/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/voip/rtp.py:
--------------------------------------------------------------------------------
  1 | """
  2 | Real-time transport protocol (RTP) capture plugin
  3 | """
  4 | 
  5 | import datetime
  6 | 
  7 | import dshell.core
  8 | from dshell.output.alertout import AlertOutput
  9 | 
 10 | from pypacker.layer4 import udp
 11 | from pypacker.layer567 import rtp
 12 | 
 13 | class DshellPlugin(dshell.core.PacketPlugin):
 14 | 
 15 |     def __init__(self):
 16 |         super().__init__(
 17 |             name="RTP",
 18 |             author="mm/dev195",
 19 |             bpf="udp",
 20 |             description="Real-time transport protocol (RTP) capture plugin",
 21 |             longdescription="""
 22 | The real-time transport protocol (RTP) plugin will extract the Hosts, Payload Type, Synchronization source, 
 23 | Sequence Number, Padding, Marker and Client MAC address from every RTP packet found in the given pcap.
 24 | 
 25 | General usage:
 26 | 
 27 |     decode -d rtp <pcap> 
 28 |     decode -d rtp --no-vlan --layer2=sll.SLL <pcap> 
 29 | 
 30 | Examples:
 31 | 
 32 |     https://wiki.wireshark.org/SampleCaptures#SIP_and_RTP
 33 |     https://wiki.wireshark.org/SampleCaptures?action=AttachFile&do=get&target=rtp_example.raw.gz
 34 | 
 35 |     decode -d rtp rtp_example.pcap
 36 | 
 37 | Output:
 38 |     
 39 |     rtp 2016-09-21 23:44:40   50.197.16.141:1195  --     192.168.9.12:44352 ** 
 40 |         From: 50.197.16.141 (00:02:31:11:a5:97) to 192.168.9.12 (45:20:01:31:45:40) 
 41 |         Payload Type (7 bits): Dynamic
 42 |         Sequence Number (16 bits): 58635
 43 |         Timestamp (32 bits): 1331328074 
 44 |         Synchronization source (32 bits): 1948709792
 45 |         Arrival Time: 1474497880.6 --> 2016-09-21 22:44:40.604135
 46 |         Contributing source (32 bits): 1, Padding (1 bit): 1, Extension (1 bit): 1, Marker (1 bit): 0
 47 |      **
 48 |     rtp 2016-09-21 23:44:40         10.5.1.8:5086  --         10.5.1.7:5070  ** 
 49 |         From: 10.5.1.8 (00:02:81:11:a0:d7) to 10.5.1.7 (45:00:20:c8:a3:26) 
 50 |         Payload Type (7 bits): PCMU - Audio - 8000 Hz - 1 Channel
 51 |         Sequence Number (16 bits): 17664
 52 |         Timestamp (32 bits): 98240 
 53 |         Synchronization source (32 bits): 1671095215
 54 |         Arrival Time: 1474497880.6 --> 2016-09-21 22:44:40.604160
 55 |         Contributing source (32 bits): 0, Padding (1 bit): 0, Extension (1 bit): 0, Marker (1 bit): 0
 56 |      **
 57 |   """,
 58 |             output=AlertOutput(label=__name__)
 59 |         )
 60 | 
 61 |     def premodule(self):
 62 |         self.payload_type = {0: "PCMU - Audio - 8000 Hz - 1 Channel", 1: "Reserved", 2: "Reserved", 3: "GSM - Audio - 8000 Hz - 1 Channel",
 63 |                              4: "G723 - Audio - 8000 Hz - 1 Channel", 5: "DVI4 - Audio - 8000 Hz - 1 Channel", 6: "DVI4 - Audio - 16000 Hz - 1 Channel",
 64 |                              7: "LPC - Audio - 8000 Hz - 1 Channel", 8: "PCMA - Audio - 8000 Hz - 1 Channel", 9: "G722 - Audio - 8000 Hz - 1 Channel",
 65 |                              10: "L16 - Audio - 44100 Hz - 2 Channel", 11: "L16 - Audio - 44100 Hz - 1 Channel", 12: "QCELP - Audio - 8000 Hz - 1 Channel",
 66 |                              13: "CN - Audio - 8000 Hz - 1 Channel", 14: "MPA - Audio - 90000 Hz", 15: "G728 - Audio - 8000 Hz - 1 Channel", 16: "DVI4 - Audio - 11025 Hz - 1 Channel",
 67 |                              17: "DVI4 - Audio - 22050 Hz - 1 Channel", 18: "G729 - Audio - 8000 Hz - 1 Channel", 19: "Reserved - Audio", 20: "Unassigned - Audio",
 68 |                              21: "Unassigned - Audio", 22: "Unassigned - Audio", 23: "Unassigned - Audio", 24: "Unassigned - Video", 25: "CelB - Video - 90000 Hz",
 69 |                              26: "JPEG - Video - 90000 Hz", 27: "Unassigned - Video", 28: "nv - Video - 90000 Hz", 29: "Unassigned - Video", 30: "Unassigned - Video",
 70 |                              31: "H261 - Video - 90000 Hz", 32: "MPV - Video - 90000 Hz", 33: "MP2T - Audio/Video - 90000 Hz", 34: "H263 - Video - 90000 Hz"}
 71 | 
 72 |         for i in range(35,72):
 73 |             self.payload_type[i] = "Unassigned"
 74 |         for i in range(72,77):
 75 |             self.payload_type[i] = "Reserved for RTCP conflict avoidance"
 76 |         for i in range(77,96):
 77 |             self.payload_type[i] = "Unassigned"
 78 |         for i in range(96,128):
 79 |             self.payload_type[i] = "Dynamic"
 80 | 
 81 |     def packet_handler(self, pkt):
 82 |         # Scrape out the UDP layer of the packet
 83 |         udpp = pkt.pkt.upper_layer
 84 |         while not isinstance(udpp, udp.UDP):
 85 |             try:
 86 |                 udpp = udpp.upper_layer
 87 |             except AttributeError:
 88 |                 # There doesn't appear to be an UDP layer
 89 |                 return
 90 | 
 91 |         # Parse the RTP protocol from above the UDP layer
 92 |         rtpp = rtp.RTP(udpp.body_bytes)
 93 | 
 94 |         if rtpp.version != 2:
 95 |             # RTP should always be version 2
 96 |             return
 97 | 
 98 |         pt = self.payload_type.get(rtpp.pt, "??")
 99 | 
100 |         self.write("\n\tFrom: {0} ({1}) to {2} ({3}) \n\tPayload Type (7 bits): {4}\n\tSequence Number (16 bits): {5}\n\tTimestamp (32 bits): {6} \n\tSynchronization source (32 bits): {7}\n\tArrival Time: {8} --> {9}\n\tContributing source (32 bits): {10}, Padding (1 bit): {11}, Extension (1 bit): {12}, Marker (1 bit): {13}\n".format(
101 |             pkt.sip, pkt.smac, pkt.dip, pkt.dmac, pt, rtpp.seq, rtpp.ts,
102 |             rtpp.ssrc, pkt.ts, datetime.datetime.utcfromtimestamp(pkt.ts),
103 |             rtpp.cc, rtpp.p, rtpp.x, rtpp.m), **pkt.info())
104 | 
105 |         return pkt
106 | 


--------------------------------------------------------------------------------
/dshell/plugins/voip/sip.py:
--------------------------------------------------------------------------------
  1 | """
  2 |  Author: MM - https://github.com/1modm
  3 | 
  4 |  The Session Initiation Protocol (SIP) is the IETF protocol for VOIP and other
  5 |  text and multimedia sessions and is a communications protocol for signaling
  6 |  and controlling.
  7 |  SIP is independent from the underlying transport protocol. It runs on the
  8 |  Transmission Control Protocol (TCP), the User Datagram Protocol (UDP) or the
  9 |  Stream Control Transmission Protocol (SCTP)
 10 | 
 11 |  Rate and codec calculation thanks to https://git.ucd.ie/volte-and-of/voip-pcapy
 12 | 
 13 |  RFC: https://www.ietf.org/rfc/rfc3261.txt
 14 | 
 15 |  SIP is a text-based protocol with syntax similar to that of HTTP.
 16 |  There are two different types of SIP messages: requests and responses.
 17 |  - Requests initiate a SIP transaction between two SIP entities for
 18 |    establishing, controlling, and terminating sessions.
 19 |  - Responses are send by the user agent server indicating the result of a
 20 |    received request.
 21 | 
 22 |  - SIP session setup example:
 23 | 
 24 |        Alice's  . . . . . . . . . . . . . . . . . . . .  Bob's
 25 |       softphone                                        SIP Phone
 26 |          |                |                |                |
 27 |          |    INVITE F1   |                |                |
 28 |          |--------------->|    INVITE F2   |                |
 29 |          |  100 Trying F3 |--------------->|    INVITE F4   |
 30 |          |<---------------|  100 Trying F5 |--------------->|
 31 |          |                |<-------------- | 180 Ringing F6 |
 32 |          |                | 180 Ringing F7 |<---------------|
 33 |          | 180 Ringing F8 |<---------------|     200 OK F9  |
 34 |          |<---------------|    200 OK F10  |<---------------|
 35 |          |    200 OK F11  |<---------------|                |
 36 |          |<---------------|                |                |
 37 |          |                       ACK F12                    |
 38 |          |------------------------------------------------->|
 39 |          |                   Media Session                  |
 40 |          |<================================================>|
 41 |          |                       BYE F13                    |
 42 |          |<-------------------------------------------------|
 43 |          |                     200 OK F14                   |
 44 |          |------------------------------------------------->|
 45 |          |                                                  |
 46 | 
 47 | """
 48 | 
 49 | import dshell.core
 50 | from dshell.output.colorout import ColorOutput
 51 | 
 52 | from pypacker.layer4 import udp
 53 | from pypacker.layer567 import sip
 54 | 
 55 | class DshellPlugin(dshell.core.PacketPlugin):
 56 | 
 57 |     def __init__(self):
 58 |         super().__init__(
 59 |             name="SIP",
 60 |             author="mm/dev195",
 61 |             output=ColorOutput(label=__name__),
 62 |             bpf="udp",
 63 |             description="(UNFINISHED) Session Initiation Protocol (SIP) capture plugin",
 64 |             longdescription="""
 65 | The Session Initiation Protocol (SIP) plugin will extract the Call ID, User agent, Codec, Method, 
 66 | SIP call, Host, and Client MAC address from every SIP request or response packet found in the given pcap.  
 67 | 
 68 | General usage:
 69 |     decode -d sip <pcap> 
 70 | 
 71 | Detailed usage:
 72 |     decode -d sip --sip_showpkt <pcap> 
 73 | 
 74 | Layer2 sll usage:
 75 |     decode -d sip --no-vlan --layer2=sll.SLL <pcap> 
 76 | 
 77 | SIP over TCP:
 78 |     decode -d sip --bpf 'tcp' <pcap> 
 79 | 
 80 | SIP is a text-based protocol with syntax similar to that of HTTP, so you can use followstream plugin:
 81 |     decode -d followstream --ebpf 'port 5060' --bpf 'udp' <pcap>
 82 | 
 83 | Examples:
 84 | 
 85 |     https://wiki.wireshark.org/SampleCaptures#SIP_and_RTP
 86 |     http://vignette3.wikia.nocookie.net/networker/images/f/fb/Sample_SIP_call_with_RTP_in_G711.pcap/revision/latest?cb=20140723121754
 87 | 
 88 |     decode -d sip metasploit-sip-invite-spoof.pcap
 89 |     decode -d sip Sample_SIP_call_with_RTP_in_G711.pcap
 90 | 
 91 | Output:
 92 | 
 93 |     <-- SIP Request --> 
 94 |     Timestamp: 2016-09-21 22:44:28.220185 UTC - Protocol: UDP - Size: 435 bytes
 95 |     Sequence and Method: 1 ACK
 96 |     From: 10.5.1.8:5060 (00:20:80:a1:13:db) to 10.5.1.7:5060 (15:2a:01:b4:0f:47)
 97 |     Via: SIP/2.0/UDP 10.5.1.8:5060;branch=z9hG4bK940bdac4-8a13-1410-9e58-08002772a6e9;rport
 98 |     SIP call: "M" <sip:M@10.5.1.8>;tag=0ba2d5c4-8a13-1910-9d56-08002772a6e9  -->  "miguel" <sip:demo-alice@10.5.1.7>;tag=84538c9d-ba7e-e611-937f-68a3c4f0d6ce
 99 |     Call ID: 0ba2d5c4-8a13-1910-9d57-08002772a6e9@M-PC
100 | 
101 |     --> SIP Response <-- 
102 |     Timestamp: 2016-09-21 22:44:27.849761 UTC - Protocol: UDP - Size: 919 bytes
103 |     Sequence and Method: 1 INVITE
104 |     From: 10.5.1.7:5060 (02:0a:40:12:30:23) to 10.5.1.8:5060 (d5:02:03:94:31:1b)
105 |     Via: SIP/2.0/UDP 10.5.1.8:5060;branch=z9hG4bK26a8d5c4-8a13-1910-9d58-08002772a6e9;rport=5060;received=10.5.1.8
106 |     SIP call: "M" <sip:M@10.5.1.8>;tag=0ba2d5c4-8a13-1910-9d56-08002772a6e9  -->  "miguel" <sip:demo-alice@10.5.1.7>;tag=84538c9d-ba7e-e611-937f-68a3c4f0d6ce
107 |     Call ID: 0ba2d5c4-8a13-1910-9d57-08002772a6e9@M-PC
108 |     Codec selected: PCMU 
109 |     Rate selected: 8000 
110 | 
111 | Detailed Output:
112 | 
113 |     --> SIP Response <-- 
114 |     Timestamp: 2016-09-21 22:44:25.360974 UTC - Protocol: UDP - Size: 349 bytes
115 |     From: 10.5.1.7:5060 (15:2a:01:b4:0f:47) to 10.5.1.8:5060 (00:20:80:a1:13:db) 
116 |     SIP/2.0 100 Trying
117 |     content-length: 0
118 |     via: SIP/2.0/UDP 10.5.1.8:5060;branch=z9hG4bK26a8d5c4-8a13-1910-9d58-08002772a6e9;rport=5060;received=10.5.1.8
119 |     from: "M" <sip:M@10.5.1.8>;tag=0ba2d5c4-8a13-1910-9d56-08002772a6e9
120 |     to: <sip:demo-alice@10.5.1.7>
121 |     cseq: 1 INVITE
122 |     call-id: 0ba2d5c4-8a13-1910-9d57-08002772a6e9@M-PC
123 | 
124 |     --> SIP Response <-- 
125 |     Timestamp: 2016-09-21 22:44:25.387780 UTC - Protocol: UDP - Size: 585 bytes
126 |     From: 10.5.1.7:5060 (15:2a:01:b4:0f:47) to 10.5.1.8:5060 (00:20:80:a1:13:db)
127 |     SIP/2.0 180 Ringing
128 |     content-length: 0
129 |     via: SIP/2.0/UDP 10.5.1.8:5060;branch=z9hG4bK26a8d5c4-8a13-1910-9d58-08002772a6e9;rport=5060;received=10.5.1.8
130 |     from: "M" <sip:M@10.5.1.8>;tag=0ba2d5c4-8a13-1910-9d56-08002772a6e9
131 |     require: 100rel
132 |     rseq: 694867676
133 |     user-agent: Ekiga/4.0.1
134 |     to: "miguel" <sip:demo-alice@10.5.1.7>;tag=84538c9d-ba7e-e611-937f-68a3c4f0d6ce
135 |     contact: "miguel" <sip:miguel@10.5.1.7>
136 |     cseq: 1 INVITE
137 |     allow: INVITE,ACK,OPTIONS,BYE,CANCEL,SUBSCRIBE,NOTIFY,REFER,MESSAGE,INFO,PING,PRACK
138 |     call-id: 0ba2d5c4-8a13-1910-9d57-08002772a6e9@M-PC
139 | """,
140 |             optiondict={
141 |                 "showpkt": {
142 |                     "action": "store_true",
143 |                     "default": False,
144 |                     "help": "Display the full SIP response or request body"
145 |                 }
146 |             }
147 |        )
148 | 
149 |         self.rate = None
150 |         self.codec = None
151 |         self.direction = None
152 | 
153 |     def packet_handler(self, pkt):
154 |         self.rate = str()
155 |         self.codec = str()
156 |         self.direction = str()
157 | 
158 |         # Scrape out the UDP layer of the packet
159 |         udpp = pkt.pkt.upper_layer
160 |         while not isinstance(udpp, udp.UDP):
161 |             try:
162 |                 udpp = udpp.upper_layer
163 |             except AttributeError:
164 |                 # There doesn't appear to be an UDP layer
165 |                 return
166 | 
167 |         # Check if exists SIP Request
168 |         if sip.SIP(udpp.body_bytes):
169 |             siptxt = "<-- SIP Request -->"
170 |             sippkt = sip.SIP(udpp.body_bytes)
171 |             self.direction = "sc"
172 |             self.output = True
173 | 
174 |         # TODO finish SIP plugin (pypacker needs to finish SIP, too)
175 | 


--------------------------------------------------------------------------------
/dshell/plugins/wifi/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/USArmyResearchLab/Dshell/d7b9f0b5a9716b3780b4877fa5c4e7a3beec73d9/dshell/plugins/wifi/__init__.py


--------------------------------------------------------------------------------
/dshell/plugins/wifi/wifi80211.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Shows 802.11 information for individual packets.
 3 | """
 4 | 
 5 | import dshell.core
 6 | from dshell.output.output import Output
 7 | 
 8 | from pypacker.layer12 import ieee80211
 9 | 
10 | # Create a dictionary of string representations of frame types
11 | TYPE_KEYS = {
12 |     ieee80211.MGMT_TYPE: "MGMT",
13 |     ieee80211.CTL_TYPE: "CTRL",
14 |     ieee80211.DATA_TYPE: "DATA"
15 | }
16 | 
17 | # Create a dictionary of subtype keys from constants defined in ieee80211
18 | # Its keys will be tuple pairs of (TYPE, SUBTYPE)
19 | SUBTYPE_KEYS = dict()
20 | # Management frame subtypes
21 | SUBTYPE_KEYS.update(dict(((ieee80211.MGMT_TYPE, v), k[2:]) for k, v in ieee80211.__dict__.items() if type(v) == int and k.startswith("M_")))
22 | # Control frame subtypes
23 | SUBTYPE_KEYS.update(dict(((ieee80211.CTL_TYPE, v), k[2:]) for k, v in ieee80211.__dict__.items() if type(v) == int and k.startswith("C_")))
24 | # Data frame subtypes
25 | SUBTYPE_KEYS.update(dict(((ieee80211.DATA_TYPE, v), k[2:]) for k, v in ieee80211.__dict__.items() if type(v) == int and k.startswith("D_")))
26 | 
27 | class DshellPlugin(dshell.core.PacketPlugin):
28 | 
29 |     OUTPUT_FORMAT = "[%(plugin)s] %(dt)s [%(ftype)s] [%(encrypted)s] [%(fsubtype)s] %(bodybytes)r %(retry)s\n"
30 | 
31 |     def __init__(self, *args, **kwargs):
32 |         super().__init__(
33 |             name="802.11",
34 |             description="Show 802.11 packet information",
35 |             author="dev195",
36 |             bpf="wlan type mgt or wlan type ctl or wlan type data",
37 |             output=Output(label=__name__, format=self.OUTPUT_FORMAT),
38 |             optiondict={
39 |                 "ignore_mgt": {"action": "store_true", "help": "Ignore management frames"},
40 |                 "ignore_ctl": {"action": "store_true", "help": "Ignore control frames"},
41 |                 "ignore_data": {"action": "store_true", "help": "Ignore data frames"},
42 |                 "ignore_beacon": {"action": "store_true", "help": "Ignore beacons"},
43 |             },
44 |             longdescription="""
45 | Shows basic information for 802.11 packets, including:
46 |  - Frame type
47 |  - Encryption
48 |  - Frame subtype
49 |  - Data sample
50 | """
51 |         )
52 | 
53 |     def handle_plugin_options(self):
54 |         "Update the BPF based on 'ignore' flags"
55 |         # NOTE: This function is naturally called in decode.py
56 |         bpf_pieces = []
57 |         if not self.ignore_mgt:
58 |             if self.ignore_beacon:
59 |                 bpf_pieces.append("(wlan type mgt and not wlan type mgt subtype beacon)")
60 |             else:
61 |                 bpf_pieces.append("wlan type mgt")
62 |         if not self.ignore_ctl:
63 |             bpf_pieces.append("wlan type ctl")
64 |         if not self.ignore_data:
65 |             bpf_pieces.append("wlan type data")
66 |         self.bpf = " or ".join(bpf_pieces)
67 | 
68 |     def packet_handler(self, pkt):
69 |         try:
70 |             frame = pkt.pkt.ieee80211
71 |         except AttributeError:
72 |             frame = pkt.pkt
73 |         encrypted = "encrypted" if frame.protected else "         "
74 |         frame_type = TYPE_KEYS.get(frame.type, '----')
75 |         frame_subtype = SUBTYPE_KEYS.get((frame.type, frame.subtype), "")
76 |         retry = "[resent]" if frame.retry else ""
77 |         bodybytes = frame.body_bytes[:50]
78 | 
79 |         self.write(
80 |             encrypted=encrypted,
81 |             ftype=frame_type,
82 |             fsubtype=frame_subtype,
83 |             retry=retry,
84 |             bodybytes=bodybytes,
85 |             **pkt.info()
86 |         )
87 | 
88 |         return pkt
89 | 


--------------------------------------------------------------------------------
/dshell/plugins/wifi/wifibeacon.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Shows 802.11 wireless beacons and related information
 3 | """
 4 | 
 5 | from collections import defaultdict
 6 | from datetime import datetime
 7 | 
 8 | import dshell.core
 9 | from dshell.output.output import Output
10 | 
11 | class DshellPlugin(dshell.core.PacketPlugin):
12 | 
13 |     OUTPUT_FORMAT = "[%(plugin)s]\t%(dt)s\tInterval: %(interval)s TU,\tSSID: %(ssid)s\t%(count)s\n"
14 | 
15 |     def __init__(self, *args, **kwargs):
16 |         super().__init__(
17 |             name="Wi-fi Beacons",
18 |             description="Show SSIDs of 802.11 wireless beacons",
19 |             author="dev195",
20 |             bpf="wlan type mgt subtype beacon",
21 |             output=Output(label=__name__, format=self.OUTPUT_FORMAT),
22 |             optiondict={
23 |                 "group": {"action": "store_true", "help": "Group beacons together with counts"},
24 |             }
25 |         )
26 |         self.group_counts = defaultdict(int)
27 |         self.group_times  = defaultdict(datetime.now)
28 | 
29 |     def packet_handler(self, pkt):
30 |         # Extract 802.11 frame from packet
31 |         try:
32 |             frame = pkt.pkt.ieee80211
33 |         except AttributeError:
34 |             frame = pkt.pkt
35 | 
36 |         # Confirm that packet is, in fact, a beacon
37 |         if not frame.is_beacon():
38 |             return
39 | 
40 |         # Extract SSID from frame
41 |         beacon = frame.beacon
42 |         ssid = ""
43 |         try:
44 |             for param in beacon.params:
45 |                 # Find the SSID parameter
46 |                 if param.id == 0:
47 |                     ssid = param.body_bytes.decode("utf-8")
48 |                     break
49 |         except IndexError:
50 |             # Sometimes pypacker fails to parse a packet
51 |             return
52 | 
53 |         if self.group:
54 |             self.group_counts[(ssid, beacon.interval)] += 1
55 |             self.group_times[(ssid, beacon.interval)]  = pkt.ts
56 |         else:
57 |             self.write(ssid=ssid, interval=beacon.interval, **pkt.info())
58 | 
59 |         return pkt
60 | 
61 |     def postfile(self):
62 |         if self.group:
63 |             for key, val in self.group_counts.items():
64 |                 ssid, interval = key
65 |                 dt = self.group_times[key]
66 |                 self.write(ssid=ssid, interval=interval, plugin=self.name, dt=dt, count=val)
67 | 


--------------------------------------------------------------------------------
/dshell/util.py:
--------------------------------------------------------------------------------
  1 | """
  2 | A collection of useful utilities used in several plugins and libraries.
  3 | """
  4 | 
  5 | import os
  6 | import string
  7 | 
  8 | 
  9 | def xor(xinput, key):
 10 |     """
 11 |     Xor an input string with a given character key.
 12 | 
 13 |     Arguments:
 14 |         input:  plain text input string
 15 |         key:    xor key
 16 |     """
 17 |     output = ''.join([chr(ord(c) ^ key) for c in xinput])
 18 |     return output
 19 | 
 20 | 
 21 | def get_data_path():
 22 |     dpath = os.path.dirname(__file__)
 23 |     return os.path.sep.join((dpath, 'data'))
 24 | 
 25 | 
 26 | def get_plugin_path():
 27 |     dpath = os.path.dirname(__file__)
 28 |     return os.path.sep.join((dpath, 'plugins'))
 29 | 
 30 | 
 31 | def get_output_path():
 32 |     dpath = os.path.dirname(__file__)
 33 |     return os.path.sep.join((dpath, 'output'))
 34 | 
 35 | 
 36 | def decode_base64(intext, alphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/', padchar='='):
 37 |     """
 38 |     Decodes a base64-encoded string, optionally using a custom alphabet.
 39 | 
 40 |     Arguments:
 41 |         intext:     input plaintext string
 42 |         alphabet:   base64 alphabet to use
 43 |         padchar:    padding character
 44 |     """
 45 |     # Build dictionary from alphabet
 46 |     alphabet_index = {}
 47 |     for i, c in enumerate(alphabet):
 48 |         if c in alphabet_index:
 49 |             raise ValueError("'{}' used more than once in alphabet".format(c))
 50 |         alphabet_index[c] = i
 51 |     alphabet_index[padchar] = 0
 52 | 
 53 |     alphabet += padchar
 54 | 
 55 |     outtext = ''
 56 |     intext = intext.rstrip('\n')
 57 | 
 58 |     i = 0
 59 |     while i < len(intext) - 3:
 60 |         if (
 61 |             intext[i] not in alphabet
 62 |             or intext[i + 1] not in alphabet
 63 |             or intext[i + 2] not in alphabet
 64 |             or intext[i + 3] not in alphabet
 65 |         ):
 66 |             raise KeyError("Non-alphabet character in encoded text.")
 67 |         val = alphabet_index[intext[i]] * 262144
 68 |         val += alphabet_index[intext[i + 1]] * 4096
 69 |         val += alphabet_index[intext[i + 2]] * 64
 70 |         val += alphabet_index[intext[i + 3]]
 71 |         i += 4
 72 |         for factor in [65536, 256, 1]:
 73 |             outtext += chr(int(val / factor))
 74 |             val = val % factor
 75 | 
 76 |     return outtext
 77 | 
 78 | 
 79 | def printable_text(intext, include_whitespace=True):
 80 |     """
 81 |     Replaces non-printable characters with dots.
 82 | 
 83 |     Arguments:
 84 |         intext:     input plaintext string
 85 |         include_whitespace (bool):  set to False to mark whitespace characters
 86 |                                     as unprintable
 87 |     """
 88 |     printable = string.ascii_letters + string.digits + string.punctuation
 89 |     if include_whitespace:
 90 |         printable += string.whitespace
 91 | 
 92 |     if isinstance(intext, bytes):
 93 |         intext = intext.decode("ascii", errors="replace")
 94 | 
 95 |     outtext = [c if c in printable else '.' for c in intext]
 96 |     outtext = ''.join(outtext)
 97 | 
 98 |     return outtext
 99 | 
100 | 
101 | def hex_plus_ascii(data, width=16, offset=0):
102 |     """
103 |     Converts a data string into a two-column hex and string layout,
104 |     similar to tcpdump with -X
105 | 
106 |     Arguments:
107 |         data:   incoming data to format
108 |         width:  width of the columns
109 |         offset: offset output from the left by this value
110 |     """
111 |     output = ""
112 |     for i in range(0, len(data), width):
113 |         s = data[i:i + width]
114 |         if isinstance(s, bytes):
115 |             outhex = ' '.join(["{:02X}".format(x) for x in s])
116 |         else:
117 |             outhex = ' '.join(["{:02X}".format(ord(x)) for x in s])
118 |         outstr = printable_text(s, include_whitespace=False)
119 |         outstr = "{:08X}  {:49}  {}\n".format(i + offset, outhex, outstr)
120 |         output += outstr
121 |     return output
122 | 
123 | 
124 | def gen_local_filename(path, origname):
125 |     """
126 |     Generates a local filename based on the original. Automatically adds a
127 |     number to the end, if file already exists.
128 | 
129 |     Arguments:
130 |         path:       output path for file
131 |         origname:   original name of the file to transform
132 |     """
133 | 
134 |     tmp = origname.replace("\\", "_")
135 |     tmp = tmp.replace("/", "_")
136 |     tmp = tmp.replace(":", "_")
137 |     localname = ''
138 |     for c in tmp:
139 |         if ord(c) > 32 and ord(c) < 127:
140 |             localname += c
141 |         else:
142 |             localname += "%%%02X" % ord(c)
143 |     localname = os.path.join(path, localname)
144 |     postfix = ''
145 |     i = 0
146 |     while os.path.exists(localname + postfix):
147 |         i += 1
148 |         postfix = "_{:04d}".format(i)
149 |     return localname + postfix
150 | 
151 | 
152 | def human_readable_filesize(bytecount):
153 |     """
154 |     Converts the raw byte counts into a human-readable format
155 |     https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size/1094933#1094933
156 |     """
157 |     for unit in ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB'):
158 |         if abs(bytecount) < 1024.0:
159 |             return "{:3.2f} {}".format(bytecount, unit)
160 |         bytecount /= 1024.0
161 |     return "{:3.2f} {}".format(bytecount, "YB")
162 | 


--------------------------------------------------------------------------------
/scripts/dshell:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | DATA_DIR=`python3 -c "import dshell.util; print (dshell.util.get_data_path());"`
3 | /bin/bash --rcfile "$DATA_DIR/dshellrc"
4 | 


--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
 1 | from setuptools import find_packages, setup
 2 | 
 3 | setup(
 4 |     name="Dshell",
 5 |     version="3.2.3",
 6 |     author="USArmyResearchLab",
 7 |     description="An extensible network forensic analysis framework",
 8 |     url="https://github.com/USArmyResearchLab/Dshell",
 9 |     python_requires='>=3.8',
10 |     packages=find_packages(),
11 |     package_data={
12 |         "dshell": ["data/dshellrc", "data/GeoIP/readme.txt"],
13 |     },
14 |     classifiers=[
15 |         "Programming Language :: Python :: 3",
16 |         "License :: OSI Approved :: MIT License",
17 |         "Operating System :: POSIX :: Linux",
18 |         "Environment :: Console",
19 |         "Topic :: Security",
20 |     ],
21 |     install_requires=[
22 |         "geoip2",
23 |         "pcapy-ng",
24 |         "pypacker",
25 |         "pyopenssl",
26 |         "elasticsearch",
27 |         "tabulate",
28 |     ],
29 |     entry_points={
30 |         "console_scripts": [
31 |             "dshell-decode = dshell.decode:main_command_line",
32 |         ],
33 |         "dshell_plugins": [],
34 |     },
35 |     scripts=[
36 |         "scripts/dshell",
37 |     ],
38 | )
39 | 


--------------------------------------------------------------------------------