├── .gitignore ├── .gitmodules ├── .rspec ├── CONTRIBUTING.md ├── Dockerfile ├── Dockerfile.testing ├── Gemfile ├── Gemfile.lock ├── LICENSE ├── README.md ├── Rakefile ├── bin └── dap ├── cortex.yaml ├── dap.gemspec ├── data ├── .gitkeep ├── tlds-alpha-by-domain.txt └── vulndb.rb ├── lib ├── dap.rb └── dap │ ├── filter.rb │ ├── filter │ ├── base.rb │ ├── geoip.rb │ ├── geoip2.rb │ ├── gquic.rb │ ├── http.rb │ ├── ldap.rb │ ├── names.rb │ ├── openssl.rb │ ├── recog.rb │ ├── simple.rb │ ├── smbclient.rb │ ├── ssh_keyscan.rb │ ├── udp.rb │ └── vulnmatch.rb │ ├── input.rb │ ├── input │ ├── csv.rb │ └── warc.rb │ ├── output.rb │ ├── proto │ ├── addp.rb │ ├── dtls.rb │ ├── ipmi.rb │ ├── ldap.rb │ ├── mssql.rb │ ├── natpmp.rb │ └── wdbrpc.rb │ ├── utils │ ├── misc.rb │ └── oui.rb │ └── version.rb ├── samples ├── http_get_reply.ic12.bz2 ├── http_get_reply.ic12.sh ├── http_get_reply_iframes.json.bz2 ├── http_get_reply_iframes.json.sh ├── http_get_reply_links.json.sh ├── iawide.warc.bz2 ├── iawide_warc.sh ├── ipmi_chan_auth_replies.crd.bz2 ├── ipmi_chan_auth_replies.sh ├── ssl_certs.bz2 ├── ssl_certs_geo.sh ├── ssl_certs_names.sh ├── ssl_certs_names_expanded.sh ├── ssl_certs_org.sh ├── udp-netbios.csv.bz2 └── udp-netbios.sh ├── spec ├── dap │ ├── filter │ │ ├── gquic_filter_spec.rb │ │ ├── http_filter_spec.rb │ │ ├── ldap_filter_spec.rb │ │ ├── simple_filter_spec.rb │ │ └── udp_filter_spec.rb │ ├── input │ │ └── json_spec.rb │ ├── proto │ │ ├── ipmi_spec.rb │ │ └── ldap_proto_spec.rb │ └── utils │ │ └── misc_spec.rb └── spec_helper.rb ├── test ├── filters.bats ├── inputs.bats ├── test_common.bash └── test_data │ ├── geoip │ ├── GeoIPASNum.dat │ ├── GeoIPCity.dat │ └── GeoIPOrg.dat │ └── geoip2 │ ├── GeoIP2-City-Test.mmdb │ ├── GeoIP2-ISP-Test.mmdb │ └── GeoLite2-ASN-Test.mmdb └── tools ├── geo-ip-summary.rb ├── ipmi-vulns.rb ├── json-summarize.rb ├── netbios-counts.rb ├── upnp-vulns.rb └── value-counts-to-md-table.rb /.gitignore: -------------------------------------------------------------------------------- 1 | # Ruby and tooling specific 2 | .yardoc 3 | doc/ 4 | /pkg/ 5 | 6 | # Ignore rvm files 7 | .ruby-version 8 | .ruby-gemset 9 | 10 | # Ignore geoip data file 11 | data/geoip.dat 12 | data/GeoLite2-ASN.mmdb 13 | data/GeoLite2-City.mmdb 14 | data/GeoLite2-Country.mmdb 15 | 16 | ## Environment normalization: 17 | /.bundle/ 18 | /vendor/bundle 19 | /lib/bundler/man/ 20 | 21 | # Misc 22 | **/.DS_Store 23 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "test/test_helper/bats-support"] 2 | path = test/test_helper/bats-support 3 | url = https://github.com/ztombol/bats-support 4 | [submodule "test/test_helper/bats-assert"] 5 | path = test/test_helper/bats-assert 6 | url = https://github.com/ztombol/bats-assert 7 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --colour 2 | --format d 3 | --require spec_helper -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to dap 2 | 3 | The users and maintainers of dap would greatly appreciate any contributions 4 | you can make to the project. These contributions typically come in the form of 5 | filed bugs/issues or pull requests (PRs). These contributions routinely result 6 | in new versions of the [dap gem](https://rubygems.org/gems/dap) to be 7 | released. The process for everything is described below. 8 | 9 | ## Contributing Issues / Bug Reports 10 | 11 | If you encounter any bugs or problems with dap, please file them 12 | [here](https://github.com/rapid7/dap/issues/new), providing as much detail as 13 | possible. If the bug is straight-forward enough and you understand the fix for 14 | the bug well enough, you may take the simpler, less-paperwork route and simply 15 | fill a PR with the fix and the necessary details. 16 | 17 | ## Contributing Code 18 | 19 | dap uses a model nearly identical to that of 20 | [Metasploit](https://github.com/rapid7/metasploit-framework) as outlined 21 | [here](https://github.com/rapid7/metasploit-framework/wiki/Setting-Up-a-Metasploit-Development-Environment), 22 | at least from a ```git``` perspective. If you've been through that process 23 | (or, even better, you've been through it many times with many people), you can 24 | do exactly what you did for Metasploit but with dap and ignore the rest of 25 | this document. 26 | 27 | On the other hand, if you haven't, read on! 28 | 29 | ### Fork and Clone 30 | 31 | Generally, this should only need to be done once, or if you need to start over. 32 | 33 | 1. Fork dap: Visit https://github.com/rapid7/dap and click Fork, 34 | selecting your github account if prompted 35 | 2. Clone ```git@github.com:/dap.git```, replacing 36 | `````` with, you guessed it, your Github username. 37 | 3. Add the master dap repository as your upstream: 38 | 39 | ``` 40 | git remote add upstream git://github.com/rapid7/dap.git 41 | ``` 42 | 4. Update your `.git/config` to ensure that the `remote ["upstream"]` section is configured to pull both branches and PRs from upstream. It should look something like the following, in particular the second `fetch` option: 43 | 44 | ``` 45 | [remote "upstream"] 46 | url = git@github.com:rapid7/dap.git 47 | fetch = +refs/heads/*:refs/remotes/upstream/* 48 | fetch = +refs/pull/*/head:refs/remotes/upstream/pr/* 49 | ``` 50 | 5. Fetch the latest revisions, including PRs: 51 | 52 | ``` 53 | git fetch --all 54 | ``` 55 | 56 | ### Branch and Improve 57 | 58 | If you have a contribution to make, first create a branch to contain your 59 | work. The name is yours to choose, however generally it should roughly 60 | describe what you are doing. In this example, and from here on out, the 61 | branch will be FOO, but you should obviously change this: 62 | 63 | ``` 64 | git fetch --all 65 | git checkout master 66 | git rebase upstream/master 67 | git checkout -b FOO 68 | ``` 69 | 70 | Now, make your changes, commit as necessary with useful commit messages. 71 | 72 | Please note that changes to [lib/dap/version.rb](https://github.com/rapid7/dap/blob/master/lib/dap/version.rb) in PRs are almost never necessary. 73 | 74 | Now push your changes to your fork: 75 | 76 | ``` 77 | git push origin FOO 78 | ``` 79 | 80 | Finally, submit the PR. Navigate to ```https://github.com//dap/compare/FOO```, fill in the details and submit. 81 | 82 | ### Testing 83 | 84 | You are encourage to perform testing _before_ submitting the PR. There are two types of tests in place: 85 | run `bundle exec rspec spec`. # Testing 86 | 87 | There are two testing frameworks in place. 88 | 89 | * Ruby `rspec` 90 | * [bats](https://github.com/sstephenson/bats) integration tests 91 | 92 | To run these tests locally, run: 93 | ``` 94 | docker build -t dap_testing -f Dockerfile.testing . && \ 95 | docker run --rm --name dap_testing -it -e DAP_EXECUTABLE=dap dap_testing /bin/bash -l -c "rvm use 2.7.6 && gem build dap && gem install dap*.gem && bundle exec rspec spec && find /opt/bats_testing -name \*.bats | grep -v test/test_helper/ | xargs -n1 bats" 96 | ``` 97 | 98 | ## Landing PRs 99 | 100 | (Note: this portion is a work-in-progress. Please update it as things change) 101 | 102 | Much like with the process of submitting PRs, dap's process for landing PRs 103 | is very similar to [Metasploit's process for landing 104 | PRs](https://github.com/rapid7/metasploit-framework/wiki/Landing-Pull-Requests). 105 | In short: 106 | 107 | 1. Follow the "Fork and Clone" steps from above 108 | 2. Update your `.git/config` to ensure that the `remote ["upstream"]` section is configured to pull both branches and PRs from upstream. It should look something like the following, in particular the second `fetch` option: 109 | 110 | ``` 111 | [remote "upstream"] 112 | url = git@github.com:rapid7/dap.git 113 | fetch = +refs/heads/*:refs/remotes/upstream/* 114 | fetch = +refs/pull/*/head:refs/remotes/upstream/pr/* 115 | ``` 116 | 3. Fetch the latest revisions, including PRs: 117 | 118 | ``` 119 | git fetch --all 120 | ``` 121 | 4. Checkout and branch the PR for testing. Replace ```PR``` below with the actual PR # in question: 122 | 123 | ``` 124 | git checkout -b landing-PR upstream/pr/PR 125 | ``` 126 | 5. Test the PR (see the Testing section above) 127 | 6. Merge with master, re-test, validate and push: 128 | 129 | ``` 130 | git checkout -b upstream-master --track upstream/master 131 | git merge -S --no-ff --edit landing-PR # merge the PR into upstream-master 132 | # re-test if/as necessary 133 | git push upstream upstream-master:master --dry-run # confirm you are pushing what you expect 134 | git push upstream upstream-master:master # push upstream-master to upstream:master 135 | ``` 136 | 7. If applicable, release a new version (see next section) 137 | 138 | ## Releasing New Versions 139 | 140 | When dap's critical parts are modified, for example its decoding or underlying supporting code, a new version _must_ eventually be released. Releases for non-functional updates such as updates to documentation are not necessary. 141 | 142 | When a new version of dap is to be released, you _must_ follow the instructions below. 143 | 144 | 1. If are not already a dap project contributor for the dap gem (you'd be listed [here under OWNERS](https://rubygems.org/gems/dap)), become one: 145 | 1. Get an account on [Rubygems](https://rubygems.org) 146 | 2. Contact one of the dap project contributors (listed [here under OWNERS](https://rubygems.org/gems/dap) and have them add you to the dap gem. They'll need to run: 147 | ``` 148 | gem owner dap -a EMAIL 149 | ``` 150 | 2. Edit [lib/dap/version.rb](https://github.com/rapid7/dap/blob/master/lib/dap/version.rb) and increment ```VERSION```. Commit and push to rapid7/dap master. 151 | 3. Run `rake release`. Among other things, this creates the new gem, uploads it to Rubygems and tags the release with a tag like `v`, where `` is replaced with the version from `version.rb`. For example, if you release version 1.2.3 of the gem, the tag will be `v1.2.3`. 152 | 4. If your default remote repository is not `rapid7/dap`, you must ensure that the tags created in the previous step are also pushed to the right location(s). For example, if `origin` is your fork of dap and `upstream` is `rapid7/master`, you should run `git push --tags --dry-run upstream` to confirm what tags will be pushed and then `git push --tags upstream` to push the tags. 153 | 154 | ## Misc tips on building dap 155 | 156 | Ruby often comes prepackaged on linux/mac os systems. Although the README already mentions using `rbenv`, it useful to make sure your envoiroment is actually using `rbenv` before running any ruby commands such as `gem`, `bundle`, `ruby` or `dap` itself utilizing the `which` command to confirm that the their paths indicate they came from `rbenv`. 157 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get update 5 | RUN apt-get install -y \ 6 | ca-certificates \ 7 | ruby \ 8 | ruby-dev \ 9 | git \ 10 | make \ 11 | g++ \ 12 | libffi-dev \ 13 | libgeoip-dev 14 | 15 | RUN apt-get install -y libxml2-dev zlib1g-dev 16 | 17 | RUN gem install bundler 18 | 19 | RUN apt-get install -y wget && mkdir -p /var/lib/geoip && cd /var/lib/geoip && wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz && gunzip GeoLiteCity.dat.gz && mv GeoLiteCity.dat geoip.dat 20 | 21 | RUN gem install dap -s https://github.com/rapid7/dap 22 | 23 | # Clean up APT when done. 24 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 25 | 26 | ENTRYPOINT ["dap"] 27 | -------------------------------------------------------------------------------- /Dockerfile.testing: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | ENV TEST_DIR /opt/bats_testing 4 | 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | RUN apt-get update 7 | RUN apt-get install -y build-essential ca-certificates curl git jq libffi-dev libgeoip-dev libxml2-dev wget zlib1g-dev 8 | 9 | # install rvm and necessary ruby bits 10 | RUN curl -sSL https://rvm.io/mpapis.asc | gpg --import - 11 | RUN curl -sSL https://rvm.io/pkuczynski.asc | gpg --import - 12 | RUN curl -sSL https://get.rvm.io | bash -s stable 13 | RUN /bin/bash -l -c "rvm requirements" 14 | RUN /bin/bash -l -c "rvm install 2.7.6" 15 | RUN /bin/bash -l -c "rvm use 2.7.6 && gem update --system && gem install bundler" 16 | COPY . $TEST_DIR/ 17 | RUN /bin/bash -l -c "cd $TEST_DIR && rvm use 2.7.6 && bundle update --bundler && bundle install" 18 | 19 | # install maxmind legacy data 20 | RUN mkdir /var/lib/geoip 21 | COPY test/test_data/geoip/*.dat /var/lib/geoip/ 22 | # Note that these test files were copied from 23 | # https://github.com/maxmind/geoip-api-php/raw/master/tests/data/GeoIPCity.dat 24 | # https://github.com/maxmind/geoip-api-php/raw/master/tests/data/GeoIPASNum.dat 25 | # https://github.com/maxmind/geoip-api-php/raw/master/tests/data/GeoIPOrg.dat 26 | 27 | # install maxmind geoip2 data 28 | RUN mkdir /var/lib/geoip2 29 | COPY test/test_data/geoip2/*.mmdb /var/lib/geoip2/ 30 | # Note that these test files were copied from 31 | # https://github.com/maxmind/MaxMind-DB/raw/f6ed981c23b0eb33d7c07568e2177236252afda6/test-data/GeoLite2-ASN-Test.mmdb 32 | # https://github.com/maxmind/MaxMind-DB/raw/f6ed981c23b0eb33d7c07568e2177236252afda6/test-data/GeoIP2-City-Test.mmdb 33 | # https://github.com/maxmind/MaxMind-DB/blob/f6ed981c23b0eb33d7c07568e2177236252afda6/test-data/GeoIP2-ISP-Test.mmdb 34 | 35 | # install bats 36 | RUN git clone https://github.com/sstephenson/bats.git && cd bats && ./install.sh /usr 37 | 38 | WORKDIR /opt/bats_testing 39 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gemspec 4 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | PATH 2 | remote: . 3 | specs: 4 | dap (1.3.1) 5 | bit-struct 6 | geoip-c 7 | htmlentities 8 | maxmind-db 9 | net-dns 10 | nokogiri 11 | oj 12 | recog 13 | 14 | GEM 15 | remote: https://rubygems.org/ 16 | specs: 17 | aruba (2.1.0) 18 | bundler (>= 1.17, < 3.0) 19 | childprocess (>= 2.0, < 5.0) 20 | contracts (>= 0.16.0, < 0.18.0) 21 | cucumber (>= 4.0, < 9.0) 22 | rspec-expectations (~> 3.4) 23 | thor (~> 1.0) 24 | bit-struct (0.17) 25 | builder (3.2.4) 26 | childprocess (4.1.0) 27 | contracts (0.16.1) 28 | cucumber (8.0.0) 29 | builder (~> 3.2, >= 3.2.4) 30 | cucumber-ci-environment (~> 9.0, >= 9.0.4) 31 | cucumber-core (~> 11.0, >= 11.0.0) 32 | cucumber-cucumber-expressions (~> 15.1, >= 15.1.1) 33 | cucumber-gherkin (~> 23.0, >= 23.0.1) 34 | cucumber-html-formatter (~> 19.1, >= 19.1.0) 35 | cucumber-messages (~> 18.0, >= 18.0.0) 36 | diff-lcs (~> 1.5, >= 1.5.0) 37 | mime-types (~> 3.4, >= 3.4.1) 38 | multi_test (~> 1.1, >= 1.1.0) 39 | sys-uname (~> 1.2, >= 1.2.2) 40 | cucumber-ci-environment (9.1.0) 41 | cucumber-core (11.0.0) 42 | cucumber-gherkin (~> 23.0, >= 23.0.1) 43 | cucumber-messages (~> 18.0, >= 18.0.0) 44 | cucumber-tag-expressions (~> 4.1, >= 4.1.0) 45 | cucumber-cucumber-expressions (15.2.0) 46 | cucumber-gherkin (23.0.1) 47 | cucumber-messages (~> 18.0, >= 18.0.0) 48 | cucumber-html-formatter (19.2.0) 49 | cucumber-messages (~> 18.0, >= 18.0.0) 50 | cucumber-messages (18.0.0) 51 | cucumber-tag-expressions (4.1.0) 52 | diff-lcs (1.5.0) 53 | ffi (1.15.5) 54 | geoip-c (0.9.1) 55 | htmlentities (4.3.4) 56 | maxmind-db (1.1.1) 57 | mime-types (3.4.1) 58 | mime-types-data (~> 3.2015) 59 | mime-types-data (3.2022.0105) 60 | mini_portile2 (2.8.0) 61 | multi_test (1.1.0) 62 | net-dns (0.9.0) 63 | nokogiri (1.13.9) 64 | mini_portile2 (~> 2.8.0) 65 | racc (~> 1.4) 66 | oj (3.13.21) 67 | racc (1.6.0) 68 | rake (13.0.6) 69 | recog (3.0.3) 70 | nokogiri 71 | rspec (3.11.0) 72 | rspec-core (~> 3.11.0) 73 | rspec-expectations (~> 3.11.0) 74 | rspec-mocks (~> 3.11.0) 75 | rspec-core (3.11.0) 76 | rspec-support (~> 3.11.0) 77 | rspec-expectations (3.11.1) 78 | diff-lcs (>= 1.2.0, < 2.0) 79 | rspec-support (~> 3.11.0) 80 | rspec-mocks (3.11.1) 81 | diff-lcs (>= 1.2.0, < 2.0) 82 | rspec-support (~> 3.11.0) 83 | rspec-support (3.11.1) 84 | sys-uname (1.2.2) 85 | ffi (~> 1.1) 86 | thor (1.2.1) 87 | webrick (1.7.0) 88 | yard (0.9.28) 89 | webrick (~> 1.7.0) 90 | 91 | PLATFORMS 92 | ruby 93 | 94 | DEPENDENCIES 95 | aruba 96 | cucumber 97 | dap! 98 | rake 99 | rspec 100 | yard 101 | 102 | BUNDLED WITH 103 | 2.3.24 104 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2013 Rapid7 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DAP: The Data Analysis Pipeline 2 | 3 | [![Gem Version](https://badge.fury.io/rb/dap.svg)](http://badge.fury.io/rb/dap) 4 | 5 | DAP was created to transform text-based data on the command-line, specializing in transforms that are annoying or difficult to do with existing tools. 6 | 7 | DAP reads data using an input plugin, transforms it through a series of filters, and prints it out again using an output plugin. Every record is treated as a document (aka: hash/dict) and filters are used to reduce, expand, and transform these documents as they pass through. Think of DAP as a mashup between sed, awk, grep, csvtool, and jq, with map/reduce capabilities. 8 | 9 | DAP was written to process terabyte-sized public scan datasets, such as those provided by https://scans.io/. Although DAP isn't particularly fast, it can be used across multiple cores (and machines) by splitting the input source and wrapping the execution with GNU Parallel. 10 | 11 | ## Installation 12 | 13 | ### Prerequisites 14 | 15 | DAP requires Ruby and is best suited for systems with a relatively current version with 2.6.x being the minimum requirement. 16 | Ideally, this will be managed with either 17 | [`rbenv`](https://github.com/rbenv/rbenv) or [`rvm`](https://rvm.io/) with the bundler gem also installed and up to date. 18 | Using system managed/installed Rubies is possible but fraught with peril. 19 | 20 | #### Maxmind IP Location Databases 21 | 22 | If you intend on using any of the `geo_ip*` or `geo_ip2*` filters, you must 23 | install the databases that provide the data for these filters. If you do not 24 | intend on using these filters, you can skip this step. 25 | 26 | `dap` versions 1.4.x and later depend on [Maxmind's geoip2/geolite2 27 | databases](https://dev.maxmind.com/geoip/geoip2/geolite2/) to be able to append 28 | geographic and related metadata to analyzed datasets. In order to use this 29 | functionality you must put your copy of the relevant Maxmind databases in the 30 | correct location in `/var/lib/geoip2` or the `data` directory of your `dap` 31 | installation or override with an environment variable that specifies the full 32 | path to the database in question: 33 | 34 | * ASN: `GeoLite2-ASN.mmdb` (environment override: `GEOIP2_ASN_DATABASE_PATH`) 35 | * City: `GeoLite2-City.mmdb` (environment override: `GEOIP2_CITY_DATABASE_PATH`) 36 | * ISP: `GeoIP2-ISP.mmdb` (environment override: `GEOIP2_ISP_DATABASE_PATH`) 37 | 38 | *NOTE*: Prior to `dap` version 1.4.x there was a dependency on [Maxmind's geoip 39 | database](http://dev.maxmind.com/geoip/legacy/downloadable/) 40 | to be able to append geographic metadata to analyzed datasets. However, since 41 | that time Maxmind has dropped support for these legacy databases. If you 42 | intend to continue using this deprecated functionality, you must put your copy 43 | of the relevant Maxmind legacy databases in the correct location in 44 | `/var/lib/geoip` or the `data` directory of your `dap` installation or override 45 | with an environment variable that specifies the full path to the database in question: 46 | 47 | * ASN: `GeoIPASNum.dat` (environment override in 1.4.x+: `GEOIP_ASN_DATABASE_PATH`) 48 | * City: `geoip_city.dat` (environment override in 1.4.x+: `GEOIP_CITY_DATABASE_PATH`) 49 | * Org: `geoip_org.dat` (environment override in 1.4.x+: `GEOIP_ORG_DATABASE_PATH`) 50 | 51 | ### Ubuntu 16.04+ 52 | 53 | ```bash 54 | sudo apt-get install zlib1g-dev ruby ruby-dev gcc make ruby-bundler 55 | gem install dap 56 | ``` 57 | 58 | ### OS X 59 | 60 | ```bash 61 | # Install the GeoIP C library required by DAP 62 | brew update 63 | brew install geoip 64 | 65 | gem install dap 66 | ``` 67 | 68 | ## Usage 69 | 70 | In its simplest form, DAP takes input, applies zero or more filters which modify the input, and then outputs the result. The input, filters and output are separated by plus signs (`+`). As seen from `dap -h`: 71 | 72 | ```shell 73 | Usage: dap [input] + [filter] + [output] 74 | --inputs 75 | --outputs 76 | --filters 77 | ``` 78 | 79 | To see which input/output formats are supported and what filters are available, run `dap --inputs`,`dap --outputs` or `dap --filters`, respectively. 80 | 81 | This example reads as input a single IP address from `STDIN` in line form, applies geo-ip transformations as a filter on that line, and then returns the output as JSON: 82 | 83 | ```shell 84 | $ echo 8.8.8.8 | bin/dap + lines + geo_ip2_city line + json | jq . 85 | { 86 | "line": "8.8.8.8", 87 | "line.geoip2.city.city.geoname_id": "0", 88 | "line.geoip2.city.continent.code": "NA", 89 | "line.geoip2.city.continent.geoname_id": "6255149", 90 | "line.geoip2.city.country.geoname_id": "6252001", 91 | "line.geoip2.city.country.iso_code": "US", 92 | "line.geoip2.city.country.is_in_european_union": "false", 93 | "line.geoip2.city.location.accuracy_radius": "1000", 94 | "line.geoip2.city.location.latitude": "37.751", 95 | "line.geoip2.city.location.longitude": "-97.822", 96 | "line.geoip2.city.location.metro_code": "0", 97 | "line.geoip2.city.location.time_zone": "America/Chicago", 98 | "line.geoip2.city.postal.code": "", 99 | "line.geoip2.city.registered_country.geoname_id": "6252001", 100 | "line.geoip2.city.registered_country.iso_code": "US", 101 | "line.geoip2.city.registered_country.is_in_european_union": "false", 102 | "line.geoip2.city.represented_country.geoname_id": "0", 103 | "line.geoip2.city.represented_country.iso_code": "", 104 | "line.geoip2.city.represented_country.is_in_european_union": "false", 105 | "line.geoip2.city.represented_country.type": "", 106 | "line.geoip2.city.traits.is_anonymous_proxy": "false", 107 | "line.geoip2.city.traits.is_satellite_provider": "false", 108 | "line.geoip2.city.continent.name": "North America", 109 | "line.geoip2.city.country.name": "United States", 110 | "line.geoip2.city.registered_country.name": "United States" 111 | } 112 | ``` 113 | 114 | There are also several examples of how to use DAP along with sample datasets [here](samples). 115 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require "bundler/gem_tasks" 2 | 3 | require 'rspec/core/rake_task' 4 | RSpec::Core::RakeTask.new do |t| 5 | t.pattern = 'spec/**/*_spec.rb' 6 | end 7 | 8 | require 'yard' 9 | require 'yard/rake/yardoc_task' 10 | YARD::Rake::YardocTask.new do |t| 11 | t.files = ['lib/**/*.rb', '-', 'README.md'] 12 | end 13 | 14 | require 'cucumber' 15 | require 'cucumber/rake/task' 16 | 17 | Cucumber::Rake::Task.new(:features) do |t| 18 | t.cucumber_opts = %w(features --format pretty) 19 | end 20 | 21 | task :default => [ :spec, :features, :yard ] 22 | task :tests => [ :spec ] 23 | -------------------------------------------------------------------------------- /bin/dap: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | $:.unshift(File.expand_path(File.join(File.dirname(__FILE__), "..", "lib"))) 4 | 5 | require 'rubygems' 6 | require 'bundler/setup' 7 | require 'shellwords' 8 | require 'dap' 9 | 10 | def version 11 | $stderr.puts "dap #{Dap::VERSION}" 12 | exit(0) 13 | end 14 | 15 | def usage 16 | $stderr.puts "" 17 | $stderr.puts " Usage: #{$0} [input] + [filter] + [output]" 18 | $stderr.puts " --inputs" 19 | $stderr.puts " --outputs" 20 | $stderr.puts " --filters" 21 | $stderr.puts "" 22 | $stderr.puts "Example: echo world | #{$0} lines stdin + rename line=hello + json stdout" 23 | $stderr.puts "" 24 | exit(1) 25 | end 26 | 27 | def show_inputs 28 | $stderr.puts "Inputs:" 29 | Dap::Factory.inputs.each_pair do |k,v| 30 | $stderr.puts " * #{k}" 31 | end 32 | $stderr.puts 33 | exit(1) 34 | end 35 | 36 | def show_outputs 37 | $stderr.puts "Outputs:" 38 | Dap::Factory.outputs.each_pair do |k,v| 39 | $stderr.puts " * #{k}" 40 | end 41 | $stderr.puts 42 | exit(1) 43 | end 44 | 45 | def show_filters 46 | $stderr.puts "Filters:" 47 | Dap::Factory.filters.each_pair do |k,v| 48 | $stderr.puts " * #{k}" 49 | end 50 | $stderr.puts 51 | exit(1) 52 | end 53 | 54 | trace = false 55 | args = [] 56 | 57 | # 58 | # Tokenize on + then treat each stage as a separate name + argument list 59 | # 60 | ARGV.join(' ').split(/\s*\+\s*/).each do |bit| 61 | 62 | # Handle quoted arguments as needed 63 | # XXX: Doesn't work as expected since ARGV parsing gobbles them up 64 | aset = Shellwords.shellwords(bit) 65 | 66 | # Check the first argument for help or usage flags 67 | arg = aset.first 68 | 69 | if arg == "--trace" 70 | trace = true 71 | arg = aset.shift 72 | end 73 | 74 | if arg == "-h" or arg == "--help" 75 | usage 76 | end 77 | 78 | if arg == "--version" or arg == "-v" 79 | version 80 | end 81 | 82 | if arg == "--inputs" 83 | show_inputs 84 | end 85 | 86 | if arg == "--outputs" 87 | show_outputs 88 | end 89 | 90 | if arg == "--filters" 91 | show_filters 92 | end 93 | 94 | args << aset if aset.length > 0 95 | end 96 | 97 | inp_args = args.shift 98 | out_args = args.pop 99 | 100 | usage if (inp_args == nil or out_args == nil) 101 | 102 | filters = [] 103 | 104 | inp = Dap::Factory.create_input(inp_args) 105 | out = Dap::Factory.create_output(out_args) 106 | args.each do |a| 107 | filters << Dap::Factory.create_filter(a) 108 | end 109 | 110 | out.start 111 | 112 | while true 113 | data = inp.read_record 114 | break if data == Dap::Input::Error::EOF 115 | next if data == Dap::Input::Error::Empty 116 | next if data == Dap::Input::Error::InvalidFormat 117 | 118 | docs = [ data ] 119 | 120 | fcount = 1 121 | filters.each do |f| 122 | $stderr.puts "T: #{" " * (fcount * 2)}#{f.name} -> #{docs.inspect} " if trace 123 | docs = docs.collect {|doc| f.process(doc) }.flatten 124 | $stderr.puts "T: #{" " * (fcount * 2)}#{" " * f.name.length} == #{docs.inspect}" if trace 125 | fcount += 1 126 | break if docs.length == 0 127 | end 128 | 129 | begin 130 | docs.each do |doc| 131 | out.write_record(doc) 132 | end 133 | rescue ::Errno::EPIPE 134 | break 135 | end 136 | end 137 | 138 | out.stop 139 | -------------------------------------------------------------------------------- /cortex.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | info: 3 | title: Dap 4 | description: Data Analysis Pipeline 5 | x-cortex-git: 6 | github: 7 | alias: r7org 8 | repository: rapid7/dap 9 | x-cortex-tag: dap 10 | x-cortex-type: service 11 | x-cortex-domain-parents: 12 | - tag: octo-and-labs 13 | x-cortex-groups: 14 | - exposure:internal-ship 15 | openapi: 3.0.1 16 | servers: 17 | - url: "/" 18 | -------------------------------------------------------------------------------- /dap.gemspec: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | $LOAD_PATH.push File.expand_path('../lib', __FILE__) 3 | require 'dap/version' 4 | 5 | Gem::Specification.new do |s| 6 | s.name = 'dap' 7 | s.version = Dap::VERSION 8 | s.required_ruby_version = '>= 2.6' 9 | s.authors = [ 10 | 'Rapid7 Research' 11 | ] 12 | s.email = [ 13 | 'research@rapid7.com' 14 | ] 15 | s.homepage = "https://www.github.com/rapid7/dap" 16 | s.summary = %q{DAP: The Data Analysis Pipeline} 17 | s.description = %q{ 18 | DAP reads data using an input plugin, transforms it through a series of filters, and prints it out again 19 | using an output plugin. Every record is treated as a document (aka: hash/dict) and filters are used to 20 | reduce, expand, and transform these documents as they pass through. Think of DAP as a mashup between 21 | sed, awk, grep, csvtool, and jq, with map/reduce capabilities. 22 | }.gsub(/\s+/, ' ').strip 23 | 24 | s.files = `git ls-files`.split("\n") 25 | s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") 26 | s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } 27 | s.require_paths = ['lib'] 28 | 29 | # ---- Dependencies ---- 30 | 31 | s.add_development_dependency 'rake' 32 | s.add_development_dependency 'rspec' 33 | s.add_development_dependency 'yard' 34 | s.add_development_dependency 'cucumber' 35 | s.add_development_dependency 'aruba' 36 | 37 | s.add_runtime_dependency 'nokogiri' 38 | s.add_runtime_dependency 'oj' 39 | s.add_runtime_dependency 'htmlentities' 40 | s.add_runtime_dependency 'net-dns' 41 | s.add_runtime_dependency 'bit-struct' 42 | s.add_runtime_dependency 'geoip-c' 43 | s.add_runtime_dependency 'recog' 44 | s.add_runtime_dependency 'maxmind-db' 45 | end 46 | -------------------------------------------------------------------------------- /data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/data/.gitkeep -------------------------------------------------------------------------------- /data/tlds-alpha-by-domain.txt: -------------------------------------------------------------------------------- 1 | # Version 2016091900, Last Updated Mon Sep 19 07:07:01 2016 UTC 2 | AAA 3 | AARP 4 | ABARTH 5 | ABB 6 | ABBOTT 7 | ABBVIE 8 | ABC 9 | ABLE 10 | ABOGADO 11 | ABUDHABI 12 | AC 13 | ACADEMY 14 | ACCENTURE 15 | ACCOUNTANT 16 | ACCOUNTANTS 17 | ACO 18 | ACTIVE 19 | ACTOR 20 | AD 21 | ADAC 22 | ADS 23 | ADULT 24 | AE 25 | AEG 26 | AERO 27 | AETNA 28 | AF 29 | AFAMILYCOMPANY 30 | AFL 31 | AG 32 | AGAKHAN 33 | AGENCY 34 | AI 35 | AIG 36 | AIGO 37 | AIRBUS 38 | AIRFORCE 39 | AIRTEL 40 | AKDN 41 | AL 42 | ALFAROMEO 43 | ALIBABA 44 | ALIPAY 45 | ALLFINANZ 46 | ALLSTATE 47 | ALLY 48 | ALSACE 49 | ALSTOM 50 | AM 51 | AMERICANEXPRESS 52 | AMERICANFAMILY 53 | AMEX 54 | AMFAM 55 | AMICA 56 | AMSTERDAM 57 | ANALYTICS 58 | ANDROID 59 | ANQUAN 60 | ANZ 61 | AO 62 | APARTMENTS 63 | APP 64 | APPLE 65 | AQ 66 | AQUARELLE 67 | AR 68 | ARAMCO 69 | ARCHI 70 | ARMY 71 | ARPA 72 | ART 73 | ARTE 74 | AS 75 | ASDA 76 | ASIA 77 | ASSOCIATES 78 | AT 79 | ATHLETA 80 | ATTORNEY 81 | AU 82 | AUCTION 83 | AUDI 84 | AUDIBLE 85 | AUDIO 86 | AUSPOST 87 | AUTHOR 88 | AUTO 89 | AUTOS 90 | AVIANCA 91 | AW 92 | AWS 93 | AX 94 | AXA 95 | AZ 96 | AZURE 97 | BA 98 | BABY 99 | BAIDU 100 | BANAMEX 101 | BANANAREPUBLIC 102 | BAND 103 | BANK 104 | BAR 105 | BARCELONA 106 | BARCLAYCARD 107 | BARCLAYS 108 | BAREFOOT 109 | BARGAINS 110 | BAUHAUS 111 | BAYERN 112 | BB 113 | BBC 114 | BBT 115 | BBVA 116 | BCG 117 | BCN 118 | BD 119 | BE 120 | BEATS 121 | BEAUTY 122 | BEER 123 | BENTLEY 124 | BERLIN 125 | BEST 126 | BESTBUY 127 | BET 128 | BF 129 | BG 130 | BH 131 | BHARTI 132 | BI 133 | BIBLE 134 | BID 135 | BIKE 136 | BING 137 | BINGO 138 | BIO 139 | BIZ 140 | BJ 141 | BLACK 142 | BLACKFRIDAY 143 | BLANCO 144 | BLOCKBUSTER 145 | BLOG 146 | BLOOMBERG 147 | BLUE 148 | BM 149 | BMS 150 | BMW 151 | BN 152 | BNL 153 | BNPPARIBAS 154 | BO 155 | BOATS 156 | BOEHRINGER 157 | BOFA 158 | BOM 159 | BOND 160 | BOO 161 | BOOK 162 | BOOKING 163 | BOOTS 164 | BOSCH 165 | BOSTIK 166 | BOT 167 | BOUTIQUE 168 | BR 169 | BRADESCO 170 | BRIDGESTONE 171 | BROADWAY 172 | BROKER 173 | BROTHER 174 | BRUSSELS 175 | BS 176 | BT 177 | BUDAPEST 178 | BUGATTI 179 | BUILD 180 | BUILDERS 181 | BUSINESS 182 | BUY 183 | BUZZ 184 | BV 185 | BW 186 | BY 187 | BZ 188 | BZH 189 | CA 190 | CAB 191 | CAFE 192 | CAL 193 | CALL 194 | CALVINKLEIN 195 | CAM 196 | CAMERA 197 | CAMP 198 | CANCERRESEARCH 199 | CANON 200 | CAPETOWN 201 | CAPITAL 202 | CAPITALONE 203 | CAR 204 | CARAVAN 205 | CARDS 206 | CARE 207 | CAREER 208 | CAREERS 209 | CARS 210 | CARTIER 211 | CASA 212 | CASH 213 | CASINO 214 | CAT 215 | CATERING 216 | CBA 217 | CBN 218 | CBRE 219 | CBS 220 | CC 221 | CD 222 | CEB 223 | CENTER 224 | CEO 225 | CERN 226 | CF 227 | CFA 228 | CFD 229 | CG 230 | CH 231 | CHANEL 232 | CHANNEL 233 | CHASE 234 | CHAT 235 | CHEAP 236 | CHINTAI 237 | CHLOE 238 | CHRISTMAS 239 | CHROME 240 | CHRYSLER 241 | CHURCH 242 | CI 243 | CIPRIANI 244 | CIRCLE 245 | CISCO 246 | CITADEL 247 | CITI 248 | CITIC 249 | CITY 250 | CITYEATS 251 | CK 252 | CL 253 | CLAIMS 254 | CLEANING 255 | CLICK 256 | CLINIC 257 | CLINIQUE 258 | CLOTHING 259 | CLOUD 260 | CLUB 261 | CLUBMED 262 | CM 263 | CN 264 | CO 265 | COACH 266 | CODES 267 | COFFEE 268 | COLLEGE 269 | COLOGNE 270 | COM 271 | COMCAST 272 | COMMBANK 273 | COMMUNITY 274 | COMPANY 275 | COMPARE 276 | COMPUTER 277 | COMSEC 278 | CONDOS 279 | CONSTRUCTION 280 | CONSULTING 281 | CONTACT 282 | CONTRACTORS 283 | COOKING 284 | COOKINGCHANNEL 285 | COOL 286 | COOP 287 | CORSICA 288 | COUNTRY 289 | COUPON 290 | COUPONS 291 | COURSES 292 | CR 293 | CREDIT 294 | CREDITCARD 295 | CREDITUNION 296 | CRICKET 297 | CROWN 298 | CRS 299 | CRUISES 300 | CSC 301 | CU 302 | CUISINELLA 303 | CV 304 | CW 305 | CX 306 | CY 307 | CYMRU 308 | CYOU 309 | CZ 310 | DABUR 311 | DAD 312 | DANCE 313 | DATE 314 | DATING 315 | DATSUN 316 | DAY 317 | DCLK 318 | DDS 319 | DE 320 | DEAL 321 | DEALER 322 | DEALS 323 | DEGREE 324 | DELIVERY 325 | DELL 326 | DELOITTE 327 | DELTA 328 | DEMOCRAT 329 | DENTAL 330 | DENTIST 331 | DESI 332 | DESIGN 333 | DEV 334 | DHL 335 | DIAMONDS 336 | DIET 337 | DIGITAL 338 | DIRECT 339 | DIRECTORY 340 | DISCOUNT 341 | DISCOVER 342 | DISH 343 | DIY 344 | DJ 345 | DK 346 | DM 347 | DNP 348 | DO 349 | DOCS 350 | DOCTOR 351 | DODGE 352 | DOG 353 | DOHA 354 | DOMAINS 355 | DOT 356 | DOWNLOAD 357 | DRIVE 358 | DTV 359 | DUBAI 360 | DUCK 361 | DUNLOP 362 | DUNS 363 | DUPONT 364 | DURBAN 365 | DVAG 366 | DZ 367 | EARTH 368 | EAT 369 | EC 370 | ECO 371 | EDEKA 372 | EDU 373 | EDUCATION 374 | EE 375 | EG 376 | EMAIL 377 | EMERCK 378 | ENERGY 379 | ENGINEER 380 | ENGINEERING 381 | ENTERPRISES 382 | EPOST 383 | EPSON 384 | EQUIPMENT 385 | ER 386 | ERICSSON 387 | ERNI 388 | ES 389 | ESQ 390 | ESTATE 391 | ESURANCE 392 | ET 393 | EU 394 | EUROVISION 395 | EUS 396 | EVENTS 397 | EVERBANK 398 | EXCHANGE 399 | EXPERT 400 | EXPOSED 401 | EXPRESS 402 | EXTRASPACE 403 | FAGE 404 | FAIL 405 | FAIRWINDS 406 | FAITH 407 | FAMILY 408 | FAN 409 | FANS 410 | FARM 411 | FARMERS 412 | FASHION 413 | FAST 414 | FEDEX 415 | FEEDBACK 416 | FERRARI 417 | FERRERO 418 | FI 419 | FIAT 420 | FIDELITY 421 | FILM 422 | FINAL 423 | FINANCE 424 | FINANCIAL 425 | FIRE 426 | FIRESTONE 427 | FIRMDALE 428 | FISH 429 | FISHING 430 | FIT 431 | FITNESS 432 | FJ 433 | FK 434 | FLICKR 435 | FLIGHTS 436 | FLIR 437 | FLORIST 438 | FLOWERS 439 | FLY 440 | FM 441 | FO 442 | FOO 443 | FOODNETWORK 444 | FOOTBALL 445 | FORD 446 | FOREX 447 | FORSALE 448 | FORUM 449 | FOUNDATION 450 | FOX 451 | FR 452 | FRESENIUS 453 | FRL 454 | FROGANS 455 | FRONTDOOR 456 | FRONTIER 457 | FTR 458 | FUJITSU 459 | FUJIXEROX 460 | FUND 461 | FURNITURE 462 | FUTBOL 463 | FYI 464 | GA 465 | GAL 466 | GALLERY 467 | GALLO 468 | GALLUP 469 | GAME 470 | GAMES 471 | GAP 472 | GARDEN 473 | GB 474 | GBIZ 475 | GD 476 | GDN 477 | GE 478 | GEA 479 | GENT 480 | GENTING 481 | GEORGE 482 | GF 483 | GG 484 | GGEE 485 | GH 486 | GI 487 | GIFT 488 | GIFTS 489 | GIVES 490 | GIVING 491 | GL 492 | GLADE 493 | GLASS 494 | GLE 495 | GLOBAL 496 | GLOBO 497 | GM 498 | GMAIL 499 | GMBH 500 | GMO 501 | GMX 502 | GN 503 | GODADDY 504 | GOLD 505 | GOLDPOINT 506 | GOLF 507 | GOO 508 | GOODHANDS 509 | GOODYEAR 510 | GOOG 511 | GOOGLE 512 | GOP 513 | GOT 514 | GOV 515 | GP 516 | GQ 517 | GR 518 | GRAINGER 519 | GRAPHICS 520 | GRATIS 521 | GREEN 522 | GRIPE 523 | GROUP 524 | GS 525 | GT 526 | GU 527 | GUARDIAN 528 | GUCCI 529 | GUGE 530 | GUIDE 531 | GUITARS 532 | GURU 533 | GW 534 | GY 535 | HAMBURG 536 | HANGOUT 537 | HAUS 538 | HBO 539 | HDFC 540 | HDFCBANK 541 | HEALTH 542 | HEALTHCARE 543 | HELP 544 | HELSINKI 545 | HERE 546 | HERMES 547 | HGTV 548 | HIPHOP 549 | HISAMITSU 550 | HITACHI 551 | HIV 552 | HK 553 | HKT 554 | HM 555 | HN 556 | HOCKEY 557 | HOLDINGS 558 | HOLIDAY 559 | HOMEDEPOT 560 | HOMEGOODS 561 | HOMES 562 | HOMESENSE 563 | HONDA 564 | HONEYWELL 565 | HORSE 566 | HOST 567 | HOSTING 568 | HOT 569 | HOTELES 570 | HOTMAIL 571 | HOUSE 572 | HOW 573 | HR 574 | HSBC 575 | HT 576 | HTC 577 | HU 578 | HUGHES 579 | HYATT 580 | HYUNDAI 581 | IBM 582 | ICBC 583 | ICE 584 | ICU 585 | ID 586 | IE 587 | IEEE 588 | IFM 589 | IINET 590 | IKANO 591 | IL 592 | IM 593 | IMAMAT 594 | IMDB 595 | IMMO 596 | IMMOBILIEN 597 | IN 598 | INDUSTRIES 599 | INFINITI 600 | INFO 601 | ING 602 | INK 603 | INSTITUTE 604 | INSURANCE 605 | INSURE 606 | INT 607 | INTEL 608 | INTERNATIONAL 609 | INTUIT 610 | INVESTMENTS 611 | IO 612 | IPIRANGA 613 | IQ 614 | IR 615 | IRISH 616 | IS 617 | ISELECT 618 | ISMAILI 619 | IST 620 | ISTANBUL 621 | IT 622 | ITAU 623 | ITV 624 | IWC 625 | JAGUAR 626 | JAVA 627 | JCB 628 | JCP 629 | JE 630 | JEEP 631 | JETZT 632 | JEWELRY 633 | JLC 634 | JLL 635 | JM 636 | JMP 637 | JNJ 638 | JO 639 | JOBS 640 | JOBURG 641 | JOT 642 | JOY 643 | JP 644 | JPMORGAN 645 | JPRS 646 | JUEGOS 647 | JUNIPER 648 | KAUFEN 649 | KDDI 650 | KE 651 | KERRYHOTELS 652 | KERRYLOGISTICS 653 | KERRYPROPERTIES 654 | KFH 655 | KG 656 | KH 657 | KI 658 | KIA 659 | KIM 660 | KINDER 661 | KINDLE 662 | KITCHEN 663 | KIWI 664 | KM 665 | KN 666 | KOELN 667 | KOMATSU 668 | KOSHER 669 | KP 670 | KPMG 671 | KPN 672 | KR 673 | KRD 674 | KRED 675 | KUOKGROUP 676 | KW 677 | KY 678 | KYOTO 679 | KZ 680 | LA 681 | LACAIXA 682 | LADBROKES 683 | LAMBORGHINI 684 | LAMER 685 | LANCASTER 686 | LANCIA 687 | LANCOME 688 | LAND 689 | LANDROVER 690 | LANXESS 691 | LASALLE 692 | LAT 693 | LATINO 694 | LATROBE 695 | LAW 696 | LAWYER 697 | LB 698 | LC 699 | LDS 700 | LEASE 701 | LECLERC 702 | LEFRAK 703 | LEGAL 704 | LEGO 705 | LEXUS 706 | LGBT 707 | LI 708 | LIAISON 709 | LIDL 710 | LIFE 711 | LIFEINSURANCE 712 | LIFESTYLE 713 | LIGHTING 714 | LIKE 715 | LILLY 716 | LIMITED 717 | LIMO 718 | LINCOLN 719 | LINDE 720 | LINK 721 | LIPSY 722 | LIVE 723 | LIVING 724 | LIXIL 725 | LK 726 | LOAN 727 | LOANS 728 | LOCKER 729 | LOCUS 730 | LOFT 731 | LOL 732 | LONDON 733 | LOTTE 734 | LOTTO 735 | LOVE 736 | LPL 737 | LPLFINANCIAL 738 | LR 739 | LS 740 | LT 741 | LTD 742 | LTDA 743 | LU 744 | LUNDBECK 745 | LUPIN 746 | LUXE 747 | LUXURY 748 | LV 749 | LY 750 | MA 751 | MACYS 752 | MADRID 753 | MAIF 754 | MAISON 755 | MAKEUP 756 | MAN 757 | MANAGEMENT 758 | MANGO 759 | MARKET 760 | MARKETING 761 | MARKETS 762 | MARRIOTT 763 | MARSHALLS 764 | MASERATI 765 | MATTEL 766 | MBA 767 | MC 768 | MCD 769 | MCDONALDS 770 | MCKINSEY 771 | MD 772 | ME 773 | MED 774 | MEDIA 775 | MEET 776 | MELBOURNE 777 | MEME 778 | MEMORIAL 779 | MEN 780 | MENU 781 | MEO 782 | METLIFE 783 | MG 784 | MH 785 | MIAMI 786 | MICROSOFT 787 | MIL 788 | MINI 789 | MINT 790 | MIT 791 | MITSUBISHI 792 | MK 793 | ML 794 | MLB 795 | MLS 796 | MM 797 | MMA 798 | MN 799 | MO 800 | MOBI 801 | MOBILY 802 | MODA 803 | MOE 804 | MOI 805 | MOM 806 | MONASH 807 | MONEY 808 | MONSTER 809 | MONTBLANC 810 | MOPAR 811 | MORMON 812 | MORTGAGE 813 | MOSCOW 814 | MOTORCYCLES 815 | MOV 816 | MOVIE 817 | MOVISTAR 818 | MP 819 | MQ 820 | MR 821 | MS 822 | MSD 823 | MT 824 | MTN 825 | MTPC 826 | MTR 827 | MU 828 | MUSEUM 829 | MUTUAL 830 | MUTUELLE 831 | MV 832 | MW 833 | MX 834 | MY 835 | MZ 836 | NA 837 | NAB 838 | NADEX 839 | NAGOYA 840 | NAME 841 | NATIONWIDE 842 | NATURA 843 | NAVY 844 | NBA 845 | NC 846 | NE 847 | NEC 848 | NET 849 | NETBANK 850 | NETFLIX 851 | NETWORK 852 | NEUSTAR 853 | NEW 854 | NEWS 855 | NEXT 856 | NEXTDIRECT 857 | NEXUS 858 | NF 859 | NFL 860 | NG 861 | NGO 862 | NHK 863 | NI 864 | NICO 865 | NIKE 866 | NIKON 867 | NINJA 868 | NISSAN 869 | NISSAY 870 | NL 871 | NO 872 | NOKIA 873 | NORTHWESTERNMUTUAL 874 | NORTON 875 | NOW 876 | NOWRUZ 877 | NOWTV 878 | NP 879 | NR 880 | NRA 881 | NRW 882 | NTT 883 | NU 884 | NYC 885 | NZ 886 | OBI 887 | OFF 888 | OFFICE 889 | OKINAWA 890 | OLAYAN 891 | OLAYANGROUP 892 | OLDNAVY 893 | OLLO 894 | OM 895 | OMEGA 896 | ONE 897 | ONG 898 | ONL 899 | ONLINE 900 | ONYOURSIDE 901 | OOO 902 | OPEN 903 | ORACLE 904 | ORANGE 905 | ORG 906 | ORGANIC 907 | ORIENTEXPRESS 908 | ORIGINS 909 | OSAKA 910 | OTSUKA 911 | OTT 912 | OVH 913 | PA 914 | PAGE 915 | PAMPEREDCHEF 916 | PANASONIC 917 | PANERAI 918 | PARIS 919 | PARS 920 | PARTNERS 921 | PARTS 922 | PARTY 923 | PASSAGENS 924 | PAY 925 | PCCW 926 | PE 927 | PET 928 | PF 929 | PFIZER 930 | PG 931 | PH 932 | PHARMACY 933 | PHILIPS 934 | PHOTO 935 | PHOTOGRAPHY 936 | PHOTOS 937 | PHYSIO 938 | PIAGET 939 | PICS 940 | PICTET 941 | PICTURES 942 | PID 943 | PIN 944 | PING 945 | PINK 946 | PIONEER 947 | PIZZA 948 | PK 949 | PL 950 | PLACE 951 | PLAY 952 | PLAYSTATION 953 | PLUMBING 954 | PLUS 955 | PM 956 | PN 957 | PNC 958 | POHL 959 | POKER 960 | POLITIE 961 | PORN 962 | POST 963 | PR 964 | PRAMERICA 965 | PRAXI 966 | PRESS 967 | PRIME 968 | PRO 969 | PROD 970 | PRODUCTIONS 971 | PROF 972 | PROGRESSIVE 973 | PROMO 974 | PROPERTIES 975 | PROPERTY 976 | PROTECTION 977 | PRU 978 | PRUDENTIAL 979 | PS 980 | PT 981 | PUB 982 | PW 983 | PWC 984 | PY 985 | QA 986 | QPON 987 | QUEBEC 988 | QUEST 989 | QVC 990 | RACING 991 | RAID 992 | RE 993 | READ 994 | REALESTATE 995 | REALTOR 996 | REALTY 997 | RECIPES 998 | RED 999 | REDSTONE 1000 | REDUMBRELLA 1001 | REHAB 1002 | REISE 1003 | REISEN 1004 | REIT 1005 | REN 1006 | RENT 1007 | RENTALS 1008 | REPAIR 1009 | REPORT 1010 | REPUBLICAN 1011 | REST 1012 | RESTAURANT 1013 | REVIEW 1014 | REVIEWS 1015 | REXROTH 1016 | RICH 1017 | RICHARDLI 1018 | RICOH 1019 | RIGHTATHOME 1020 | RIO 1021 | RIP 1022 | RO 1023 | ROCHER 1024 | ROCKS 1025 | RODEO 1026 | ROOM 1027 | RS 1028 | RSVP 1029 | RU 1030 | RUHR 1031 | RUN 1032 | RW 1033 | RWE 1034 | RYUKYU 1035 | SA 1036 | SAARLAND 1037 | SAFE 1038 | SAFETY 1039 | SAKURA 1040 | SALE 1041 | SALON 1042 | SAMSCLUB 1043 | SAMSUNG 1044 | SANDVIK 1045 | SANDVIKCOROMANT 1046 | SANOFI 1047 | SAP 1048 | SAPO 1049 | SARL 1050 | SAS 1051 | SAVE 1052 | SAXO 1053 | SB 1054 | SBI 1055 | SBS 1056 | SC 1057 | SCA 1058 | SCB 1059 | SCHAEFFLER 1060 | SCHMIDT 1061 | SCHOLARSHIPS 1062 | SCHOOL 1063 | SCHULE 1064 | SCHWARZ 1065 | SCIENCE 1066 | SCJOHNSON 1067 | SCOR 1068 | SCOT 1069 | SD 1070 | SE 1071 | SEAT 1072 | SECURE 1073 | SECURITY 1074 | SEEK 1075 | SELECT 1076 | SENER 1077 | SERVICES 1078 | SES 1079 | SEVEN 1080 | SEW 1081 | SEX 1082 | SEXY 1083 | SFR 1084 | SG 1085 | SH 1086 | SHANGRILA 1087 | SHARP 1088 | SHAW 1089 | SHELL 1090 | SHIA 1091 | SHIKSHA 1092 | SHOES 1093 | SHOP 1094 | SHOPPING 1095 | SHOUJI 1096 | SHOW 1097 | SHOWTIME 1098 | SHRIRAM 1099 | SI 1100 | SILK 1101 | SINA 1102 | SINGLES 1103 | SITE 1104 | SJ 1105 | SK 1106 | SKI 1107 | SKIN 1108 | SKY 1109 | SKYPE 1110 | SL 1111 | SLING 1112 | SM 1113 | SMART 1114 | SMILE 1115 | SN 1116 | SNCF 1117 | SO 1118 | SOCCER 1119 | SOCIAL 1120 | SOFTBANK 1121 | SOFTWARE 1122 | SOHU 1123 | SOLAR 1124 | SOLUTIONS 1125 | SONG 1126 | SONY 1127 | SOY 1128 | SPACE 1129 | SPIEGEL 1130 | SPOT 1131 | SPREADBETTING 1132 | SR 1133 | SRL 1134 | SRT 1135 | ST 1136 | STADA 1137 | STAPLES 1138 | STAR 1139 | STARHUB 1140 | STATEBANK 1141 | STATEFARM 1142 | STATOIL 1143 | STC 1144 | STCGROUP 1145 | STOCKHOLM 1146 | STORAGE 1147 | STORE 1148 | STREAM 1149 | STUDIO 1150 | STUDY 1151 | STYLE 1152 | SU 1153 | SUCKS 1154 | SUPPLIES 1155 | SUPPLY 1156 | SUPPORT 1157 | SURF 1158 | SURGERY 1159 | SUZUKI 1160 | SV 1161 | SWATCH 1162 | SWIFTCOVER 1163 | SWISS 1164 | SX 1165 | SY 1166 | SYDNEY 1167 | SYMANTEC 1168 | SYSTEMS 1169 | SZ 1170 | TAB 1171 | TAIPEI 1172 | TALK 1173 | TAOBAO 1174 | TARGET 1175 | TATAMOTORS 1176 | TATAR 1177 | TATTOO 1178 | TAX 1179 | TAXI 1180 | TC 1181 | TCI 1182 | TD 1183 | TDK 1184 | TEAM 1185 | TECH 1186 | TECHNOLOGY 1187 | TEL 1188 | TELECITY 1189 | TELEFONICA 1190 | TEMASEK 1191 | TENNIS 1192 | TEVA 1193 | TF 1194 | TG 1195 | TH 1196 | THD 1197 | THEATER 1198 | THEATRE 1199 | TIAA 1200 | TICKETS 1201 | TIENDA 1202 | TIFFANY 1203 | TIPS 1204 | TIRES 1205 | TIROL 1206 | TJ 1207 | TJMAXX 1208 | TJX 1209 | TK 1210 | TKMAXX 1211 | TL 1212 | TM 1213 | TMALL 1214 | TN 1215 | TO 1216 | TODAY 1217 | TOKYO 1218 | TOOLS 1219 | TOP 1220 | TORAY 1221 | TOSHIBA 1222 | TOTAL 1223 | TOURS 1224 | TOWN 1225 | TOYOTA 1226 | TOYS 1227 | TR 1228 | TRADE 1229 | TRADING 1230 | TRAINING 1231 | TRAVEL 1232 | TRAVELCHANNEL 1233 | TRAVELERS 1234 | TRAVELERSINSURANCE 1235 | TRUST 1236 | TRV 1237 | TT 1238 | TUBE 1239 | TUI 1240 | TUNES 1241 | TUSHU 1242 | TV 1243 | TVS 1244 | TW 1245 | TZ 1246 | UA 1247 | UBANK 1248 | UBS 1249 | UCONNECT 1250 | UG 1251 | UK 1252 | UNICOM 1253 | UNIVERSITY 1254 | UNO 1255 | UOL 1256 | UPS 1257 | US 1258 | UY 1259 | UZ 1260 | VA 1261 | VACATIONS 1262 | VANA 1263 | VANGUARD 1264 | VC 1265 | VE 1266 | VEGAS 1267 | VENTURES 1268 | VERISIGN 1269 | VERSICHERUNG 1270 | VET 1271 | VG 1272 | VI 1273 | VIAJES 1274 | VIDEO 1275 | VIG 1276 | VIKING 1277 | VILLAS 1278 | VIN 1279 | VIP 1280 | VIRGIN 1281 | VISA 1282 | VISION 1283 | VISTA 1284 | VISTAPRINT 1285 | VIVA 1286 | VIVO 1287 | VLAANDEREN 1288 | VN 1289 | VODKA 1290 | VOLKSWAGEN 1291 | VOTE 1292 | VOTING 1293 | VOTO 1294 | VOYAGE 1295 | VU 1296 | VUELOS 1297 | WALES 1298 | WALMART 1299 | WALTER 1300 | WANG 1301 | WANGGOU 1302 | WARMAN 1303 | WATCH 1304 | WATCHES 1305 | WEATHER 1306 | WEATHERCHANNEL 1307 | WEBCAM 1308 | WEBER 1309 | WEBSITE 1310 | WED 1311 | WEDDING 1312 | WEIBO 1313 | WEIR 1314 | WF 1315 | WHOSWHO 1316 | WIEN 1317 | WIKI 1318 | WILLIAMHILL 1319 | WIN 1320 | WINDOWS 1321 | WINE 1322 | WINNERS 1323 | WME 1324 | WOLTERSKLUWER 1325 | WOODSIDE 1326 | WORK 1327 | WORKS 1328 | WORLD 1329 | WS 1330 | WTC 1331 | WTF 1332 | XBOX 1333 | XEROX 1334 | XFINITY 1335 | XIHUAN 1336 | XIN 1337 | XN--11B4C3D 1338 | XN--1CK2E1B 1339 | XN--1QQW23A 1340 | XN--30RR7Y 1341 | XN--3BST00M 1342 | XN--3DS443G 1343 | XN--3E0B707E 1344 | XN--3OQ18VL8PN36A 1345 | XN--3PXU8K 1346 | XN--42C2D9A 1347 | XN--45BRJ9C 1348 | XN--45Q11C 1349 | XN--4GBRIM 1350 | XN--55QW42G 1351 | XN--55QX5D 1352 | XN--5SU34J936BGSG 1353 | XN--5TZM5G 1354 | XN--6FRZ82G 1355 | XN--6QQ986B3XL 1356 | XN--80ADXHKS 1357 | XN--80AO21A 1358 | XN--80ASEHDB 1359 | XN--80ASWG 1360 | XN--8Y0A063A 1361 | XN--90A3AC 1362 | XN--90AE 1363 | XN--90AIS 1364 | XN--9DBQ2A 1365 | XN--9ET52U 1366 | XN--9KRT00A 1367 | XN--B4W605FERD 1368 | XN--BCK1B9A5DRE4C 1369 | XN--C1AVG 1370 | XN--C2BR7G 1371 | XN--CCK2B3B 1372 | XN--CG4BKI 1373 | XN--CLCHC0EA0B2G2A9GCD 1374 | XN--CZR694B 1375 | XN--CZRS0T 1376 | XN--CZRU2D 1377 | XN--D1ACJ3B 1378 | XN--D1ALF 1379 | XN--E1A4C 1380 | XN--ECKVDTC9D 1381 | XN--EFVY88H 1382 | XN--ESTV75G 1383 | XN--FCT429K 1384 | XN--FHBEI 1385 | XN--FIQ228C5HS 1386 | XN--FIQ64B 1387 | XN--FIQS8S 1388 | XN--FIQZ9S 1389 | XN--FJQ720A 1390 | XN--FLW351E 1391 | XN--FPCRJ9C3D 1392 | XN--FZC2C9E2C 1393 | XN--FZYS8D69UVGM 1394 | XN--G2XX48C 1395 | XN--GCKR3F0F 1396 | XN--GECRJ9C 1397 | XN--H2BRJ9C 1398 | XN--HXT814E 1399 | XN--I1B6B1A6A2E 1400 | XN--IMR513N 1401 | XN--IO0A7I 1402 | XN--J1AEF 1403 | XN--J1AMH 1404 | XN--J6W193G 1405 | XN--JLQ61U9W7B 1406 | XN--JVR189M 1407 | XN--KCRX77D1X4A 1408 | XN--KPRW13D 1409 | XN--KPRY57D 1410 | XN--KPU716F 1411 | XN--KPUT3I 1412 | XN--L1ACC 1413 | XN--LGBBAT1AD8J 1414 | XN--MGB9AWBF 1415 | XN--MGBA3A3EJT 1416 | XN--MGBA3A4F16A 1417 | XN--MGBA7C0BBN0A 1418 | XN--MGBAAM7A8H 1419 | XN--MGBAB2BD 1420 | XN--MGBAYH7GPA 1421 | XN--MGBB9FBPOB 1422 | XN--MGBBH1A71E 1423 | XN--MGBC0A9AZCG 1424 | XN--MGBCA7DZDO 1425 | XN--MGBERP4A5D4AR 1426 | XN--MGBPL2FH 1427 | XN--MGBT3DHD 1428 | XN--MGBTX2B 1429 | XN--MGBX4CD0AB 1430 | XN--MIX891F 1431 | XN--MK1BU44C 1432 | XN--MXTQ1M 1433 | XN--NGBC5AZD 1434 | XN--NGBE9E0A 1435 | XN--NODE 1436 | XN--NQV7F 1437 | XN--NQV7FS00EMA 1438 | XN--NYQY26A 1439 | XN--O3CW4H 1440 | XN--OGBPF8FL 1441 | XN--P1ACF 1442 | XN--P1AI 1443 | XN--PBT977C 1444 | XN--PGBS0DH 1445 | XN--PSSY2U 1446 | XN--Q9JYB4C 1447 | XN--QCKA1PMC 1448 | XN--QXAM 1449 | XN--RHQV96G 1450 | XN--ROVU88B 1451 | XN--S9BRJ9C 1452 | XN--SES554G 1453 | XN--T60B56A 1454 | XN--TCKWE 1455 | XN--UNUP4Y 1456 | XN--VERMGENSBERATER-CTB 1457 | XN--VERMGENSBERATUNG-PWB 1458 | XN--VHQUV 1459 | XN--VUQ861B 1460 | XN--W4R85EL8FHU5DNRA 1461 | XN--W4RS40L 1462 | XN--WGBH1C 1463 | XN--WGBL6A 1464 | XN--XHQ521B 1465 | XN--XKC2AL3HYE2A 1466 | XN--XKC2DL3A5EE0H 1467 | XN--Y9A3AQ 1468 | XN--YFRO4I67O 1469 | XN--YGBI2AMMX 1470 | XN--ZFR164B 1471 | XPERIA 1472 | XXX 1473 | XYZ 1474 | YACHTS 1475 | YAHOO 1476 | YAMAXUN 1477 | YANDEX 1478 | YE 1479 | YODOBASHI 1480 | YOGA 1481 | YOKOHAMA 1482 | YOU 1483 | YOUTUBE 1484 | YT 1485 | YUN 1486 | ZA 1487 | ZAPPOS 1488 | ZARA 1489 | ZERO 1490 | ZIP 1491 | ZIPPO 1492 | ZM 1493 | ZONE 1494 | ZUERICH 1495 | ZW 1496 | -------------------------------------------------------------------------------- /data/vulndb.rb: -------------------------------------------------------------------------------- 1 | 2 | # Searches contains each of the services, within each service it contains 3 | # a hash key that will be compared against each of the items in the 4 | # regex hash, and if a hit is returned the value from the regex is inserted 5 | # into the hash with the output_key as the key. 6 | # 7 | SEARCHES = { 8 | :upnp => [{ 9 | :hash_key => 'data.upnp_server', 10 | :output_key => 'vulnerability', 11 | :regex => { 12 | /MiniUPnPd\/1\.0([\.\,\-\~\s]|$)/mi => ['CVE-2013-0229'], 13 | /MiniUPnPd\/1\.[0-3]([\.\,\-\~\s]|$)/mi => ['CVE-2013-0230'], 14 | /Intel SDK for UPnP devices.*|Portable SDK for UPnP devices(\/?\s*$|\/1\.([0-5]\..*|8\.0.*|(6\.[0-9]|6\.1[0-7])([\.\,\-\~\s]|$)))/mi => ['CVE-2012-5958', 'CVE-2012-5959'] 15 | } 16 | }], 17 | 18 | :ipmi => [{ 19 | :hash_key => 'data.ipmi_compat_password', 20 | :output_key => 'vulnerability', 21 | :regex => { 22 | /1/ => ['IPMI-STRAIGHT-PASS'], 23 | } 24 | },{ 25 | :hash_key => 'data.ipmi_compat_md2', 26 | :output_key => 'vulnerability', 27 | :regex => { 28 | /1/ => ['IPMI-MD2'], 29 | } 30 | },{ 31 | :hash_key => 'data.ipmi_compat_none', 32 | :output_key => 'vulnerability', 33 | :regex => { 34 | /1/ => ['IPMI-NOAUTH'], 35 | } 36 | },{ 37 | :hash_key => 'data.ipmi_user_disable_message_auth', 38 | :output_key => 'vulnerability', 39 | :regex => { 40 | /1/ => ['IPMI-PERMSG'], 41 | } 42 | },{ 43 | :hash_key => 'data.ipmi_user_disable_user_auth', 44 | :output_key => 'vulnerability', 45 | :regex => { 46 | /1/ => ['IPMI-USRLVL'], 47 | } 48 | }], 49 | 50 | :mssql => [{ 51 | :hash_key => 'data.version.name', 52 | :output_key => 'vulnerability', 53 | :cvemap => { 54 | #['', ''] => ['CVE-2007-5090'], 55 | ['2000', '-'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232', 'CVE-2008-4110', 'CVE-2008-5416'], 56 | #['2000', '""'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232'], 57 | ['2000', 'sp1'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232'], 58 | ['2000', 'sp2'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232'], 59 | ['2000', 'sp3'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232'], 60 | ['2000', 'sp3a'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232'], 61 | ['2000', 'sp4'] => ['CVE-2008-0085', 'CVE-2008-0086', 'CVE-2008-0106', 'CVE-2008-0107', 'CVE-2012-0158', 'CVE-2012-1856'], 62 | ['2005', '-'] => ['CVE-2008-5416'], 63 | ['2005', 'sp1'] => ['CVE-2008-0085', 'CVE-2008-0107'], 64 | ['2005', 'sp2'] => ['CVE-2007-4814', 'CVE-2007-5348', 'CVE-2008-0085', 'CVE-2008-0086', 'CVE-2008-0106', 'CVE-2008-0107', 'CVE-2008-3012', 'CVE-2008-3013', 'CVE-2008-3014', 'CVE-2008-3015', 'CVE-2009-2500', 'CVE-2009-2501', 'CVE-2009-2502', 'CVE-2009-2503', 'CVE-2009-2504', 'CVE-2009-2518', 'CVE-2009-2528', 'CVE-2009-3126'], 65 | ['2005', 'sp3'] => ['CVE-2009-2500', 'CVE-2009-2501', 'CVE-2009-2502', 'CVE-2009-2503', 'CVE-2009-2504', 'CVE-2009-2518', 'CVE-2009-2528', 'CVE-2009-3126', 'CVE-2011-1280'], 66 | ['2005', 'sp4'] => ['CVE-2011-1280', 'CVE-2012-0158', 'CVE-2012-1856', 'CVE-2012-2552'], 67 | ['2008', 'r2'] => ['CVE-2011-1280', 'CVE-2012-0158', 'CVE-2012-1856'], 68 | ['2008', 'r2 sp1'] => ['CVE-2012-1856', 'CVE-2012-2552'], 69 | ['2008', 'r2 sp2'] => ['CVE-2012-1856', 'CVE-2014-4061'], 70 | ['2008', 'sp1'] => ['CVE-2011-1280'], 71 | ['2008', 'sp2'] => ['CVE-2011-1280', 'CVE-2012-0158', 'CVE-2012-1856', 'CVE-2012-2552'], 72 | ['2008', 'sp3'] => ['CVE-2012-0158', 'CVE-2012-1856', 'CVE-2012-2552', 'CVE-2014-4061'], 73 | ['2012', '-'] => ['CVE-2012-2552'], 74 | ['2012', 'sp1'] => ['CVE-2014-1820', 'CVE-2014-4061'], 75 | ['2014', '-'] => ['CVE-2014-1820'], 76 | ['7.0', '-'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232', 'CVE-2004-1560'], 77 | ['7.0', 'sp1'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232', 'CVE-2004-1560'], 78 | ['7.0', 'sp2'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232', 'CVE-2004-1560'], 79 | ['7.0', 'sp3'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232', 'CVE-2004-1560'], 80 | ['7.0', 'sp4'] => ['CVE-2003-0230', 'CVE-2003-0231', 'CVE-2003-0232', 'CVE-2004-1560', 'CVE-2008-0085', 'CVE-2008-0086', 'CVE-2008-0106', 'CVE-2008-0107'], 81 | } 82 | }], 83 | 84 | :http => [ 85 | #### ELASTICSEARCH RCE 86 | { 87 | # direct shellcommand elastic rce 88 | :match => [ 89 | ['http.path', '/_search'], 90 | ['http.body', 'script_fields'], 91 | ['http.body', 'java.lang.Runtime'], 92 | ['http.body', 'getRuntime()'], 93 | ], 94 | :cve => ['VULN-ELASTICSEARCH-RCE', 'CVE-2014-3120'] 95 | },{ 96 | # this just adds another tag as it's most likely done with metasploit 97 | :match => [ 98 | ['http.path', '/_search'], 99 | ['http.body', 'script_fields'], 100 | ['http.body', 'metasploit.Payload'], 101 | ], 102 | :cve => ['VULN-ELASTICSEARCH-RCE', 'METASPLOIT'] 103 | }] + [ 104 | #### PHP CGI 105 | { 106 | :match => [ 107 | ['http.path', '/cgi-bin/php'], 108 | ], 109 | :cve => ['VULN-PHPCGI'] 110 | },{ 111 | :match => [ 112 | ['http.path', '/cgi-bin/authLogin.cgi'], 113 | ], 114 | :cve => ['VULN-QNAP-SHELLSHOCK'] 115 | }], 116 | } 117 | -------------------------------------------------------------------------------- /lib/dap.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | 3 | require 'bundler/setup' 4 | 5 | require 'dap/version' 6 | require 'dap/input' 7 | require 'dap/output' 8 | require 'dap/filter' 9 | 10 | class Factory 11 | 12 | @@inputs = {} 13 | @@outputs = {} 14 | @@filters = {} 15 | 16 | def self.create_input(args) 17 | name = args.shift 18 | raise RuntimeError, "Invalid input plugin: #{name}" unless @@inputs[name] 19 | @@inputs[name].new(args) 20 | end 21 | 22 | def self.create_output(args) 23 | name = args.shift 24 | raise RuntimeError, "Invalid output plugin: #{name}" unless @@outputs[name] 25 | @@outputs[name].new(args) 26 | end 27 | 28 | def self.create_filter(args) 29 | name = args.shift 30 | raise RuntimeError, "Invalid filter plugin: #{name}" unless @@filters[name] 31 | @@filters[name].new(args) 32 | end 33 | 34 | # 35 | # Create nice-looking filter names from classes 36 | # Ex: FilterHTTPDecode => http_decode 37 | # Ex: FilterLimitLen => limit_len 38 | # 39 | def self.name_from_class(name) 40 | name.to_s.split('::').last. 41 | gsub(/([A-Z][a-z])/) { |c| "_#{c[0,1].downcase}#{c[1,1]}" }. 42 | gsub(/([a-z][A-Z])/) { |c| "#{c[0,1]}_#{c[1,1].downcase}" }. 43 | gsub(/_+/, '_'). 44 | sub(/^_(input|filter|output)_/, '').downcase 45 | end 46 | 47 | # 48 | # Load input formats 49 | # 50 | def self.load_inputs 51 | Dap::Input.constants.each do |c| 52 | next unless c.to_s =~ /^Input/ 53 | o = Dap::Input.const_get(c) 54 | @@inputs[ name_from_class(c) ] = o 55 | end 56 | end 57 | 58 | # 59 | # Load output formats 60 | # 61 | def self.load_outputs 62 | Dap::Output.constants.each do |c| 63 | o = Dap::Output.const_get(c) 64 | next unless c.to_s =~ /^Output/ 65 | @@outputs[ name_from_class(c) ] = o 66 | end 67 | end 68 | 69 | # 70 | # Load filters 71 | # 72 | def self.load_filters 73 | Dap::Filter.constants.each do |c| 74 | o = Dap::Filter.const_get(c) 75 | next unless c.to_s =~ /^Filter/ 76 | @@filters[ name_from_class(c) ] = o 77 | end 78 | end 79 | 80 | def self.inputs 81 | @@inputs 82 | end 83 | 84 | def self.outputs 85 | @@outputs 86 | end 87 | 88 | def self.filters 89 | @@filters 90 | end 91 | 92 | def self.load_modules 93 | self.load_inputs 94 | self.load_outputs 95 | self.load_filters 96 | end 97 | end 98 | 99 | Factory.load_modules 100 | 101 | end -------------------------------------------------------------------------------- /lib/dap/filter.rb: -------------------------------------------------------------------------------- 1 | require 'dap/filter/base' 2 | require 'dap/filter/simple' 3 | require 'dap/filter/http' 4 | require 'dap/filter/udp' 5 | require 'dap/filter/openssl' 6 | require 'dap/filter/names' 7 | require 'dap/filter/geoip' 8 | require 'dap/filter/geoip2' 9 | require 'dap/filter/recog' 10 | require 'dap/filter/vulnmatch' 11 | require 'dap/filter/ssh_keyscan' 12 | require 'dap/filter/smbclient' 13 | require 'dap/filter/ldap' 14 | require 'dap/filter/gquic' 15 | -------------------------------------------------------------------------------- /lib/dap/filter/base.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Filter 3 | 4 | module Base 5 | attr_accessor :name, :opts 6 | 7 | def initialize(args) 8 | self.opts = {} 9 | args.each do |arg| 10 | k,v = arg.split("=", 2) 11 | self.opts[k] = v 12 | end 13 | self.name = Dap::Factory.name_from_class(self.class) 14 | end 15 | 16 | def process(doc) 17 | raise RuntimeError, "No process() method defined for filter #{self.name}" 18 | end 19 | 20 | end 21 | 22 | module BaseDecoder 23 | include Base 24 | def process(doc) 25 | self.opts.each_pair do |k,v| 26 | next unless doc.has_key?(k) 27 | info = decode(doc[k]) || {} 28 | info.each_pair do |x,y| 29 | doc[ "#{k}.#{x}" ] = y 30 | end 31 | end 32 | [ doc ] 33 | end 34 | end 35 | 36 | end 37 | end -------------------------------------------------------------------------------- /lib/dap/filter/geoip.rb: -------------------------------------------------------------------------------- 1 | require 'geoip' 2 | 3 | module Dap 4 | module Filter 5 | 6 | module GeoIPLibrary 7 | GEOIP_DIRS = [ 8 | File.expand_path( File.join( File.dirname(__FILE__), "..", "..", "..", "data")), 9 | "/var/lib/geoip" 10 | ] 11 | GEOIP_CITY = %W{ geoip.dat geoip_city.dat GeoCity.dat IP_V4_CITY.dat GeoCityLite.dat GeoLiteCity.dat } 12 | GEOIP_ORGS = %W{ geoip_org.dat IP_V4_ORG.dat } 13 | GEOIP_ASN = %W{ GeoIPASNum.dat } 14 | 15 | @@geo_city = nil 16 | @@geo_orgs = nil 17 | @@geo_asn = nil 18 | 19 | GEOIP_CITY_DATABASE_PATH = ENV["GEOIP_CITY_DATABASE_PATH"] 20 | GEOIP_ASN_DATABASE_PATH = ENV["GEOIP_ASN_DATABASE_PATH"] 21 | GEOIP_ORG_DATABASE_PATH = ENV["GEOIP_ORG_DATABASE_PATH"] 22 | 23 | if GEOIP_CITY_DATABASE_PATH 24 | if ::File.exist?(GEOIP_CITY_DATABASE_PATH) 25 | @@geo_city = GeoIP::City.new(GEOIP_CITY_DATABASE_PATH) 26 | end 27 | else 28 | GEOIP_DIRS.each do |d| 29 | GEOIP_CITY.each do |f| 30 | path = File.join(d, f) 31 | if ::File.exist?(path) 32 | @@geo_city = GeoIP::City.new(path) 33 | break 34 | end 35 | end 36 | end 37 | end 38 | 39 | if GEOIP_ORG_DATABASE_PATH 40 | if ::File.exist?(GEOIP_ORG_DATABASE_PATH) 41 | @@geo_orgs = GeoIP::Organization.new(GEOIP_ORG_DATABASE_PATH) 42 | end 43 | else 44 | GEOIP_DIRS.each do |d| 45 | GEOIP_ORGS.each do |f| 46 | path = File.join(d, f) 47 | if ::File.exist?( path ) 48 | @@geo_orgs = GeoIP::Organization.new(path) 49 | break 50 | end 51 | end 52 | end 53 | end 54 | 55 | if GEOIP_ASN_DATABASE_PATH 56 | if ::File.exist?(GEOIP_ASN_DATABASE_PATH) 57 | @@geo_asn = GeoIP::Organization.new(GEOIP_ASN_DATABASE_PATH) 58 | end 59 | else 60 | GEOIP_DIRS.each do |d| 61 | GEOIP_ASN.each do |f| 62 | path = File.join(d, f) 63 | if ::File.exist?(path) 64 | @@geo_asn = GeoIP::Organization.new(path) 65 | break 66 | end 67 | end 68 | end 69 | end 70 | end 71 | 72 | 73 | # 74 | # Add GeoIP tags using the MaxMind GeoIP::City 75 | # 76 | class FilterGeoIP 77 | include BaseDecoder 78 | include GeoIPLibrary 79 | def decode(ip) 80 | unless @@geo_city 81 | raise "No MaxMind GeoIP::City data found" 82 | end 83 | geo_hash = @@geo_city.look_up(ip) 84 | return unless geo_hash 85 | ret = {} 86 | geo_hash.each_pair do |k,v| 87 | next unless k 88 | ret[k.to_s] = v.to_s 89 | end 90 | 91 | ret 92 | end 93 | end 94 | 95 | # 96 | # Add GeoIP tags using the MaxMind GeoIP::Organization database 97 | # 98 | class FilterGeoIPOrg 99 | include BaseDecoder 100 | include GeoIPLibrary 101 | def decode(ip) 102 | unless @@geo_orgs 103 | raise "No MaxMind GeoIP::Organization data found" 104 | end 105 | geo_hash = @@geo_orgs.look_up(ip) 106 | return unless (geo_hash and geo_hash[:name]) 107 | { :org => geo_hash[:name] } 108 | end 109 | end 110 | 111 | # 112 | # Add GeoIP ASN tags using the MaxMind GeoIP::ASN database 113 | # 114 | class FilterGeoIPAsn 115 | include BaseDecoder 116 | include GeoIPLibrary 117 | def decode(ip) 118 | unless @@geo_asn 119 | raise "No MaxMind GeoIP::ASN data found" 120 | end 121 | geo_hash = @@geo_asn.look_up(ip) 122 | return unless (geo_hash and geo_hash[:name]) 123 | { :asn => geo_hash[:name].split(' ')[0] } 124 | end 125 | end 126 | 127 | end 128 | end 129 | -------------------------------------------------------------------------------- /lib/dap/filter/geoip2.rb: -------------------------------------------------------------------------------- 1 | require 'maxmind/db' 2 | 3 | module Dap 4 | module Filter 5 | 6 | require 'dap/utils/misc' 7 | 8 | module GeoIP2Library 9 | GEOIP2_DIRS = [ 10 | File.expand_path( File.join( File.dirname(__FILE__), "..", "..", "..", "data")), 11 | "/var/lib/geoip", 12 | "/var/lib/geoip2" 13 | ] 14 | GEOIP2_CITY = %W{ GeoLite2-City.mmdb } 15 | GEOIP2_ASN = %W{ GeoLite2-ASN.mmdb } 16 | GEOIP2_ISP = %W{ GeoIP2-ISP.mmdb } 17 | 18 | def self.find_db(db_file_names, db_dirs, env_path) 19 | if env_path 20 | if ::File.exist?(env_path) 21 | return MaxMind::DB.new(env_path, mode: MaxMind::DB::MODE_MEMORY) 22 | end 23 | else 24 | db_dirs.each do |d| 25 | db_file_names.each do |f| 26 | path = File.join(d, f) 27 | if ::File.exist?(path) 28 | return MaxMind::DB.new(path, mode: MaxMind::DB::MODE_MEMORY) 29 | end 30 | end 31 | end 32 | end 33 | nil 34 | end 35 | 36 | def get_maxmind_data(db, ip) 37 | begin 38 | db.get(ip) 39 | rescue IPAddr::InvalidAddressError 40 | end 41 | end 42 | 43 | def remove_empties(hash) 44 | hash.each_pair do |k,v| 45 | if v.empty? 46 | hash.delete(k) 47 | end 48 | end 49 | hash 50 | end 51 | 52 | @@geo_asn = find_db(GEOIP2_ASN, GEOIP2_DIRS, ENV["GEOIP2_ASN_DATABASE_PATH"]) 53 | @@geo_city = find_db(GEOIP2_CITY, GEOIP2_DIRS, ENV["GEOIP2_CITY_DATABASE_PATH"]) 54 | @@geo_isp = find_db(GEOIP2_ISP, GEOIP2_DIRS, ENV["GEOIP2_ISP_DATABASE_PATH"]) 55 | end 56 | 57 | 58 | # 59 | # Add GeoIP2 tags using the MaxMind GeoIP2::City 60 | # 61 | class FilterGeoIP2City 62 | include BaseDecoder 63 | include GeoIP2Library 64 | 65 | GEOIP2_LANGUAGE = ENV["GEOIP2_LANGUAGE"] || "en" 66 | LOCALE_SPECIFIC_NAMES = %w(city.names continent.names country.names registered_country.names represented_country.names) 67 | DESIRED_GEOIP2_KEYS = %w( 68 | city.geoname_id 69 | continent.code continent.geoname_id 70 | country.geoname_id country.iso_code country.is_in_european_union 71 | location.accuracy_radius location.latitude location.longitude location.metro_code location.time_zone 72 | postal.code 73 | registered_country.geoname_id registered_country.iso_code registered_country.is_in_european_union 74 | represented_country.geoname_id represented_country.iso_code represented_country.is_in_european_union represented_country.type 75 | traits.is_anonymous_proxy traits.is_satellite_provider 76 | ) 77 | 78 | attr_reader :locale_specific_names 79 | def initialize(args={}) 80 | @locale_specific_names = LOCALE_SPECIFIC_NAMES.map { |lsn| "#{lsn}.#{GEOIP2_LANGUAGE}" } 81 | super 82 | end 83 | 84 | def decode(ip) 85 | unless @@geo_city 86 | raise "No MaxMind GeoIP2::City data found" 87 | end 88 | 89 | ret = defaults 90 | geo_hash = get_maxmind_data(@@geo_city, ip) 91 | return unless geo_hash 92 | 93 | if geo_hash.include?("subdivisions") 94 | # handle countries that are divided into various subdivisions. generally 1, sometimes 2 95 | subdivisions = geo_hash["subdivisions"] 96 | geo_hash.delete("subdivisions") 97 | ret["geoip2.city.subdivisions.length"] = subdivisions.size.to_s 98 | subdivisions.each_index do |i| 99 | subdivision = subdivisions[i] 100 | subdivision.each_pair do |k,v| 101 | if %w(geoname_id iso_code).include?(k) 102 | ret["geoip2.city.subdivisions.#{i}.#{k}"] = v.to_s 103 | elsif k == "names" 104 | if v.include?(GEOIP2_LANGUAGE) 105 | ret["geoip2.city.subdivisions.#{i}.name"] = subdivision["names"][GEOIP2_LANGUAGE] 106 | end 107 | end 108 | end 109 | end 110 | end 111 | 112 | Dap::Utils::Misc.flatten_hash(geo_hash).each_pair do |k,v| 113 | if DESIRED_GEOIP2_KEYS.include?(k) 114 | # these keys we can just copy directly over 115 | ret["geoip2.city.#{k}"] = v 116 | elsif @locale_specific_names.include?(k) 117 | # these keys we need to pick the locale-specific name and set the key accordingly 118 | lsn_renamed = k.gsub(/\.names.#{GEOIP2_LANGUAGE}/, ".name") 119 | ret["geoip2.city.#{lsn_renamed}"] = v 120 | end 121 | end 122 | 123 | remove_empties(ret) 124 | end 125 | 126 | def defaults() 127 | ret = {} 128 | default_int_suffixes = %w(geoname_id metro_code) 129 | default_bool_suffixes = %w(is_in_european_union is_anonymous_proxy is_satellite_provider) 130 | DESIRED_GEOIP2_KEYS.each do |k| 131 | suffix = k.split(/\./)[-1] 132 | if default_int_suffixes.include?(suffix) 133 | ret["geoip2.city.#{k}"] = "0" 134 | elsif default_bool_suffixes.include?(suffix) 135 | ret["geoip2.city.#{k}"] = "false" 136 | else 137 | ret["geoip2.city.#{k}"] = "" 138 | end 139 | end 140 | ret 141 | end 142 | end 143 | 144 | # 145 | # Add GeoIP2 ASN and Org tags using the MaxMind GeoIP2::ASN database 146 | # 147 | class FilterGeoIP2Asn 148 | include BaseDecoder 149 | include GeoIP2Library 150 | 151 | def decode(ip) 152 | unless @@geo_asn 153 | raise "No MaxMind GeoIP2::ASN data found" 154 | end 155 | 156 | geo_hash = get_maxmind_data(@@geo_asn, ip) 157 | return unless geo_hash 158 | ret = {} 159 | 160 | if geo_hash.include?("autonomous_system_number") 161 | ret["geoip2.asn.asn"] = "AS#{geo_hash["autonomous_system_number"]}" 162 | else 163 | ret["geoip2.asn.asn"] = "" 164 | end 165 | 166 | if geo_hash.include?("autonomous_system_organization") 167 | ret["geoip2.asn.asn_org"] = "#{geo_hash["autonomous_system_organization"]}" 168 | else 169 | ret["geoip2.asn.asn_org"] = "" 170 | end 171 | 172 | remove_empties(ret) 173 | end 174 | end 175 | 176 | # 177 | # Add GeoIP2 ISP tags using the MaxMind GeoIP2::ISP database 178 | # 179 | class FilterGeoIP2Isp 180 | include BaseDecoder 181 | include GeoIP2Library 182 | def decode(ip) 183 | unless @@geo_isp 184 | raise "No MaxMind GeoIP2::ISP data found" 185 | end 186 | 187 | geo_hash = get_maxmind_data(@@geo_isp, ip) 188 | return unless geo_hash 189 | ret = {} 190 | 191 | if geo_hash.include?("autonomous_system_number") 192 | ret["geoip2.isp.asn"] = "AS#{geo_hash["autonomous_system_number"]}" 193 | else 194 | ret["geoip2.isp.asn"] = "" 195 | end 196 | 197 | if geo_hash.include?("autonomous_system_organization") 198 | ret["geoip2.isp.asn_org"] = geo_hash["autonomous_system_organization"] 199 | else 200 | ret["geoip2.isp.asn_org"] = "" 201 | end 202 | 203 | if geo_hash.include?("isp") 204 | ret["geoip2.isp.isp"] = geo_hash["isp"] 205 | else 206 | ret["geoip2.isp.isp"] = "" 207 | end 208 | 209 | if geo_hash.include?("organization") 210 | ret["geoip2.isp.org"] = geo_hash["organization"] 211 | else 212 | ret["geoip2.isp.org"] = "" 213 | end 214 | 215 | remove_empties(ret) 216 | end 217 | end 218 | 219 | # 220 | # Convert GeoIP2 data as closely as possible to the legacy GeoIP data as generated by geo_ip, geo_ip_asn and geo_ip_org 221 | # 222 | class FilterGeoIP2LegacyCompat 223 | include Base 224 | include GeoIP2Library 225 | 226 | attr_accessor :base_field 227 | 228 | def initialize(args) 229 | super 230 | fail "Expected 1 arguments to '#{self.name}' but got #{args.size}" unless args.size == 1 231 | self.base_field = args.first 232 | end 233 | 234 | def process(doc) 235 | # all of these values we just take directly and rename 236 | remap = { 237 | # geoip2 name -> geoip name 238 | "city.country.iso_code": "country_code", 239 | "city.country.name": "country_name", 240 | "city.postal.code": "postal_code", 241 | "city.location.latitude": "latitude", 242 | "city.location.longitude": "longitude", 243 | "city.city.name": "city", 244 | "city.subdivisions.0.iso_code": "region", 245 | "city.subdivisions.0.name": "region_name", 246 | "asn.asn": "asn", 247 | "isp.asn": "asn", 248 | } 249 | 250 | ret = {} 251 | remap.each_pair do |geoip2,geoip| 252 | geoip2_key = "#{self.base_field}.geoip2.#{geoip2}" 253 | if doc.include?(geoip2_key) 254 | ret["#{self.base_field}.#{geoip}"] = doc[geoip2_key] 255 | end 256 | end 257 | 258 | # these values all require special handling 259 | 260 | # https://dev.maxmind.com/geoip/geoip2/whats-new-in-geoip2/#Custom_Country_Codes 261 | # which basically says if traits.is_anonymous_proxy is true, previously the 262 | # country_code would have had a special value of A1. Similarly, if 263 | # traits.is_satellite_provider is true, previously the country_code would 264 | # have a special value of A2. 265 | anon_key = "#{self.base_field}.geoip2.city.traits.is_anonymous_proxy" 266 | if doc.include?(anon_key) 267 | anon_value = doc[anon_key] 268 | if anon_value == "true" 269 | ret["#{self.base_field}.country_code"] = "A1" 270 | end 271 | end 272 | 273 | satellite_key = "#{self.base_field}.geoip2.city.traits.is_satellite_provider" 274 | if doc.include?(satellite_key) 275 | satellite_value = doc[satellite_key] 276 | if satellite_value == "true" 277 | ret["#{self.base_field}.country_code"] = "A1" 278 | end 279 | end 280 | 281 | # only set dma_code if location.metro_code was set and not empty or 0 282 | metro_key = "#{self.base_field}.geoip2.city.location.metro_code}" 283 | if doc.include?(metro_key) 284 | metro_value = doc[metro_key] 285 | if !metro_value.empty? && metro_value != "0" 286 | ret["#{self.base_field}.dma_code"] = metro_value 287 | end 288 | end 289 | 290 | # get the org key from 3 possible fields in decreasing order of preference 291 | asn_org_key = "#{self.base_field}.geoip2.asn.asn_org" 292 | isp_asn_org_key = "#{self.base_field}.geoip2.isp.asn_org" 293 | isp_org_key = "#{self.base_field}.geoip2.isp.asn_org" 294 | [ isp_org_key, isp_asn_org_key, asn_org_key ].each do |k| 295 | v = doc[k] 296 | if v && !v.empty? 297 | ret["#{self.base_field}.org"] = v 298 | break 299 | end 300 | end 301 | 302 | [ doc.merge(remove_empties(ret)) ] 303 | end 304 | end 305 | 306 | end 307 | end 308 | -------------------------------------------------------------------------------- /lib/dap/filter/gquic.rb: -------------------------------------------------------------------------------- 1 | # Documentation on what the different gquic values are 2 | # https://github.com/quicwg/base-drafts/wiki/QUIC-Versions 3 | module Dap 4 | module Filter 5 | 6 | # 7 | # Decode a Google Quic VersionsRequest probe response 8 | # 9 | class FilterDecodeGquicVersionsResult 10 | include BaseDecoder 11 | 12 | # 13 | # Decode an GQUIC ( Google Quic) versions probe response 14 | # 15 | # @param data [String] Binary string containing raw response from server 16 | # @return [Hash] containing all GQUIC versions supported else nil 17 | # 18 | def decode(data) 19 | return unless data 20 | # need to skip 9 bytes and assume at least one valid version Q044 21 | if data.length > 9 + 4 && (data.length - 9) % 4 22 | versions = [] 23 | i = 9 24 | step = 4 25 | while i < data.length 26 | version = data[i..i+4-1] 27 | # Versions start with the letter Q followed by number e.g. 001 - 043 28 | if version =~ /^Q\d{3}$/ 29 | versions.push(version) 30 | end 31 | i = i + step 32 | end 33 | if versions.length > 0 34 | # examples show versions in descending order, but in case its not reverse sort 35 | info = {'versions' => versions.sort.reverse} 36 | return info 37 | end 38 | end 39 | end 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /lib/dap/filter/http.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Filter 3 | 4 | require 'htmlentities' 5 | require 'shellwords' 6 | require 'uri' 7 | require 'zlib' 8 | require 'stringio' 9 | 10 | # Dirty element extractor, works around memory issues with Nokogiri 11 | module HTMLGhetto 12 | def extract_elements(data) 13 | @coder ||= HTMLEntities.new 14 | res = [] 15 | data. 16 | to_s. 17 | encode('UTF-8', invalid: :replace, undef: :replace, replace: ''). 18 | scan(/<([^<>]{1,4096})>/m).each do |e| 19 | 20 | e = e.first 21 | 22 | # Skip closing tags 23 | next if e[0,1] == "/" 24 | 25 | # Get the name vs attributes 26 | name, astr = e.split(/\s+/, 2).map{|x| x.to_s } 27 | astr ||= '' 28 | 29 | # Skip non-alpha elements 30 | next unless name =~ /^[a-zA-Z]/ 31 | 32 | # Convert newlines to spaces & strip trailing /> 33 | astr = astr.gsub(/\n/, ' ').sub(/\/$/, '') 34 | 35 | o = { name: name } 36 | 37 | begin 38 | Shellwords.shellwords(astr).each do |attr_str| 39 | aname, avalue = attr_str.split('=', 2).map{|x| x.to_s.strip } 40 | avalue = avalue.to_s.gsub(/^\"|"$/, '') 41 | o[aname.downcase] = @coder.decode(avalue) 42 | end 43 | rescue ::Interrupt 44 | raise $! 45 | rescue ::Exception 46 | # If shellwords couldn't parse it, split on space instead 47 | astr.to_s.split(/\s+/).each do |attr_str| 48 | aname, avalue = attr_str.split('=', 2).map{|x| x.to_s.strip } 49 | avalue = avalue.to_s.gsub(/^\"|"$/, '') 50 | o[aname.downcase] = @coder.decode(avalue) 51 | end 52 | end 53 | res << o 54 | end 55 | 56 | res 57 | end 58 | end 59 | 60 | class FilterHTMLIframes 61 | include Base 62 | include HTMLGhetto 63 | 64 | def process(doc) 65 | out = [] 66 | self.opts.each_pair do |k,v| 67 | next unless doc.has_key?(k) 68 | extract(doc[k]).each do |url| 69 | out << doc.merge({ 'iframe' => url }) 70 | end 71 | end 72 | out 73 | end 74 | 75 | def extract(data) 76 | extract_elements(data).select{|x| x[:name] == 'iframe'}.each do |e| 77 | url = e['src'] 78 | next unless (url && url.length > 0) 79 | urls << url 80 | end 81 | urls 82 | end 83 | end 84 | 85 | 86 | class FilterHTMLLinks 87 | include Base 88 | include HTMLGhetto 89 | 90 | def process(doc) 91 | out = [] 92 | self.opts.each_pair do |k,v| 93 | next unless doc.has_key?(k) 94 | extract(doc[k]).each do |link_info| 95 | out << doc.merge(link_info) 96 | end 97 | end 98 | out 99 | end 100 | 101 | def extract(data) 102 | urls = [] 103 | 104 | extract_elements(data).each do |e| 105 | url = e['href'] || e['src'] 106 | next unless (url && url.length > 0) 107 | urls << { 'link' => url, 'element' => e[:name] } 108 | end 109 | 110 | urls 111 | end 112 | end 113 | 114 | class FilterDecodeURI 115 | include BaseDecoder 116 | def decode(data) 117 | save = {} 118 | uri = URI.parse(data) rescue nil 119 | return unless uri 120 | 121 | save["host"] = uri.host if uri.host 122 | save["port"] = uri.port.to_s if uri.port 123 | save["path"] = uri.path if uri.path 124 | save["query"] = uri.query if uri.query 125 | save["scheme"] = uri.scheme if uri.scheme 126 | save["user"] = uri.user if uri.user 127 | save["password"] = uri.password if uri.password 128 | 129 | save 130 | end 131 | end 132 | 133 | 134 | class FilterDecodeHTTPReply 135 | include BaseDecoder 136 | 137 | def decode(data) 138 | lines = data.split(/\r?\n/) 139 | resp = lines.shift 140 | save = {} 141 | return save if resp !~ /^HTTP\/\d+\.\d+\s+(\d+)(?:\s+(.*))?/ 142 | 143 | save["http_code"] = $1.to_i 144 | save["http_message"] = ($2 ? $2.strip : '') 145 | save["http_raw_headers"] = {} 146 | save.merge!(parse_headers(lines)) 147 | 148 | head, raw_body = data.split(/\r?\n\r?\n/, 2) 149 | 150 | # Some buggy systems exclude the header entirely 151 | raw_body ||= head 152 | 153 | save["http_raw_body"] = [raw_body].pack("m*").gsub(/\s+/n, "") 154 | body = raw_body 155 | 156 | transfer_encoding = save["http_raw_headers"]["transfer-encoding"] 157 | if transfer_encoding && transfer_encoding.include?("chunked") 158 | offset = 0 159 | chunk_num = 1 160 | body = '' 161 | while (true) 162 | # read the chunk size from where we currently are. The chunk size will 163 | # be specified in hex, at the beginning, and is followed by \r\n. 164 | if /^(?[a-z0-9]+)\r\n/i =~ raw_body.slice(offset, raw_body.size) 165 | # convert chunk size 166 | chunk_size = chunk_size_str.to_i(16) 167 | # advance past this chunk marker and its trailing \r\n 168 | offset += chunk_size_str.size + 2 169 | if offset + chunk_size > raw_body.size 170 | $stderr.puts "Skipping impossibly large #{chunk_size}-byte ##{chunk_num} chunk, at offset #{offset}/#{raw_body.size}" 171 | break 172 | end 173 | # read this chunk, starting from just past the chunk marker and 174 | # stopping at the supposed end of the chunk 175 | body << raw_body.slice(offset, chunk_size) 176 | # advance the offset to past the end of the chunk and its trailing \r\n 177 | offset += chunk_size + 2 178 | chunk_num += 1 179 | else 180 | break 181 | end 182 | end 183 | 184 | # chunked-encoding allows headers to occur after the chunks, so parse those 185 | if offset < raw_body.size 186 | trailing_headers = parse_headers(raw_body.slice(offset, raw_body.size).split(/\r?\n/)) 187 | save.merge!(trailing_headers) { |header, old, new| 188 | if old.kind_of?(String) 189 | [old, new].join(',') 190 | elsif old.kind_of?(Hash) 191 | old.merge(new) { |nheader, nold, nnew| 192 | nold + nnew 193 | } 194 | end 195 | } 196 | end 197 | end 198 | 199 | content_encoding = save["http_raw_headers"]["content-encoding"] 200 | if content_encoding && content_encoding.include?("gzip") 201 | begin 202 | gunzip = Zlib::GzipReader.new(StringIO.new(body)) 203 | body = gunzip.read.encode('UTF-8', :invalid=>:replace, :replace=>'?') 204 | gunzip.close() 205 | rescue 206 | end 207 | end 208 | save["http_body"] = body 209 | 210 | if body =~ /([^>]+)</mi 211 | save["http_title"] = $1.strip 212 | end 213 | 214 | save 215 | end 216 | 217 | def valid_header_name?(name) 218 | return name !~ /[\x00-\x1f()<>@,;:\\\"\/\[\]?={}\s]/ 219 | end 220 | 221 | def parse_headers(lines) 222 | headers = {} 223 | 224 | while lines.length > 0 225 | hline = lines.shift 226 | if /^(?<header_name>[^:]+):\s*(?<header_value>.*)$/ =~ hline 227 | header_value.strip! 228 | header_name.downcase! 229 | 230 | if valid_header_name?(header_name) 231 | headers["http_raw_headers"] ||= {} 232 | headers["http_raw_headers"][header_name] ||= [] 233 | headers["http_raw_headers"][header_name] << header_value 234 | 235 | # XXX: warning, all of these mishandle duplicate headers 236 | case header_name 237 | when 'etag' 238 | headers["http_etag"] = header_value 239 | 240 | when 'set-cookie' 241 | bits = header_value.gsub(/\;?\s*path=.*/i, '').gsub(/\;?\s*expires=.*/i, '').gsub(/\;\s*HttpOnly.*/, '') 242 | headers["http_cookie"] = bits 243 | 244 | when 'server' 245 | headers["http_server"] = header_value 246 | 247 | when 'x-powered-by' 248 | headers["http_powered"] = header_value 249 | 250 | when 'date' 251 | begin 252 | d = DateTime.parse(header_value) 253 | headers["http_date"] = d.to_time.utc.strftime("%Y%m%dT%H:%M:%S%z") if d 254 | rescue 255 | end 256 | 257 | when 'last-modified' 258 | begin 259 | d = DateTime.parse(header_value) 260 | headers["http_modified"] = d.to_time.utc.strftime("%Y%m%dT%H:%M:%S%z") if d 261 | rescue 262 | end 263 | 264 | when 'location' 265 | headers["http_location"] = header_value 266 | 267 | when 'www-authenticate' 268 | headers["http_auth"] = header_value 269 | 270 | when 'content-length' 271 | headers["content-length"] = header_value.to_i 272 | end 273 | else 274 | # not a valid header. XXX, eventually we should log or do something more useful here 275 | end 276 | elsif hline == "" 277 | break 278 | end 279 | end 280 | 281 | return headers 282 | end 283 | end 284 | end 285 | end 286 | -------------------------------------------------------------------------------- /lib/dap/filter/ldap.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Filter 3 | 4 | require 'openssl' 5 | 6 | require 'dap/proto/ldap' 7 | 8 | # 9 | # Decode an LDAP SearchRequest probe response 10 | # 11 | class FilterDecodeLdapSearchResult 12 | include BaseDecoder 13 | 14 | # 15 | # Decode an LDAP SearchRequest probe response 16 | # 17 | # @param data [String] Binary string containing raw response from server 18 | # @return [Hash] Hash containing all LDAP responses 19 | # 20 | def decode(data) 21 | info = {} 22 | 23 | # RFC 4511 - 4.5.2 SearchResult contains zero or more SearchResultEntry or 24 | # SearchResultReference messages followed by a single SearchResultDone 25 | # message. OpenSSL::ASN1.decode doesn't handle the back to back Sequences 26 | # well, so identify the lengths and split them into individual ASN1 elements 27 | messages = Dap::Proto::LDAP.split_messages(data) 28 | 29 | if messages.empty? 30 | err_msg = 'FilterDecodeLdapSearchResult - Unable to parse response' 31 | info['Error'] = { 'errorMessage' => err_msg } 32 | end 33 | 34 | 35 | messages.each do |element| 36 | begin 37 | elem_decoded = OpenSSL::ASN1.decode(element) 38 | parsed_type, parsed_data = Dap::Proto::LDAP.parse_message(elem_decoded) 39 | info[parsed_type] = parsed_data if parsed_type && parsed_data 40 | rescue Exception => e 41 | err_msg = 'FilterDecodeLdapSearchResult - Unable to decode ASN.1 element' 42 | $stderr.puts "#{err_msg}: #{e}" 43 | $stderr.puts e.backtrace 44 | $stderr.puts "Element:\n\t#{element.inspect}" 45 | $stderr.puts "Element hex:\n\t#{element.unpack('H*')}\n\n" 46 | info['Error'] = { 'errorMessage' => err_msg } 47 | next 48 | end 49 | end 50 | 51 | info 52 | end 53 | 54 | end 55 | 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /lib/dap/filter/names.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Filter 3 | 4 | MATCH_FQDN = /^([a-z0-9\_\-]+\.)+[a-z0-9\-]+\.?$/ 5 | VALID_FQDNS_FILE = File.expand_path(File.join(File.dirname(__FILE__), "..", "..", "..", "data", "tlds-alpha-by-domain.txt")) 6 | 7 | 8 | class FilterExtractHostname 9 | include BaseDecoder 10 | 11 | def initialize(*args) 12 | @valid_fqdns = IO.readlines(VALID_FQDNS_FILE).map(&:rstrip).map(&:downcase) 13 | super(*args) 14 | end 15 | 16 | def decode(data) 17 | data = data.strip.gsub(/.*\@/, '').gsub(/^\*+/, '').gsub(/^\.+/, '').gsub(/\.+$/, '').downcase 18 | return unless data =~ MATCH_FQDN 19 | 20 | return unless @valid_fqdns.include?(data.split('.').last) 21 | 22 | { 'hostname' => data } 23 | end 24 | end 25 | 26 | class FilterSplitDomains 27 | include Base 28 | def process(doc) 29 | lines = [ ] 30 | self.opts.each_pair do |k,v| 31 | if doc.has_key?(k) 32 | expand(doc[k]).each do |line| 33 | lines << doc.merge({ "#{k}.domain" => line }) 34 | end 35 | end 36 | end 37 | lines.length == 0 ? [ doc ] : [ lines ] 38 | end 39 | 40 | def expand(data) 41 | names = [] 42 | bits = data.split('.') 43 | while (bits.length > 1) 44 | names << bits.join('.') 45 | bits.shift 46 | end 47 | names 48 | end 49 | end 50 | 51 | 52 | class FilterPrependSubdomains 53 | include Base 54 | def process(doc) 55 | lines = [ ] 56 | self.opts.each_pair do |k,v| 57 | if doc.has_key?(k) 58 | expand(doc[k], v).each do |line| 59 | lines << doc.merge({ k => line }) 60 | end 61 | end 62 | end 63 | lines.length == 0 ? [ ] : [ lines ] 64 | end 65 | 66 | def expand(data, names) 67 | outp = [ data ] 68 | bits = data.split(".") 69 | subs = names.split(",") 70 | 71 | # Avoid www.www.domain.tld and mail.www.domain.tld 72 | return outp if subs.include?(bits.first) 73 | subs.each do |sub| 74 | outp << "#{sub}.#{data}" 75 | end 76 | 77 | outp 78 | end 79 | 80 | end 81 | 82 | # 83 | # Acts like SplitDomains but strips out common dynamic IP RDNS formats 84 | # 85 | # XXX - Lots of work left to do 86 | # 87 | 88 | class FilterSplitNonDynamicDomains 89 | include Base 90 | def process(doc) 91 | lines = [ ] 92 | self.opts.each_pair do |k,v| 93 | if doc.has_key?(k) 94 | expand(doc[k]).each do |line| 95 | lines << doc.merge({ "#{k}.domain" => line }) 96 | end 97 | end 98 | end 99 | lines.length == 0 ? [ doc ] : [ lines ] 100 | end 101 | 102 | def expand(data) 103 | names = [] 104 | data = data.unpack("C*").pack("C*"). 105 | gsub(/.*ip\d+\.ip\d+\.ip\d+\.ip\d+\./, ''). 106 | gsub(/.*\d+[\_\-\.x]\d+[\_\-\.x]\d+[\_\-\.x]\d+[^\.]+/, ''). 107 | gsub(/.*node-[a-z0-9]+.*pool.*dynamic\./, ''). 108 | gsub(/.*[a-z][a-z]\d+\.[a-z]as[a-z0-9]+\./, ''). 109 | # cl223.001033200.technowave.ne.jp 110 | gsub(/^cl\d+.[0-9]{6,14}\./, ''). 111 | # n157.s1117.m-zone.jp 112 | gsub(/^n\d+.s\d+\.m-zone.jp/, 'm-zone.jp'). 113 | # u570054.xgsnu2.imtp.tachikawa.mopera.net 114 | # s505207.xgsspn.imtp.tachikawa.spmode.ne.jp 115 | gsub(/^[us]\d+.xgs[a-z0-9]+\.imtp/, 'imtp'). 116 | # tzbm6501209.tobizaru.jp 117 | gsub(/^tzbm[0-9]{6,9}\./, ''). 118 | # ARennes-556-1-256-bdcst.w2-14.abo.wanadoo.fr 119 | gsub(/.*\-\d+\-\d+\-\d+\-(net|bdcst)\./, ''). 120 | # bl19-128-119.dsl.telepac.pt 121 | gsub(/.*\d+\-\d+\-\d+\.dsl/, 'dsl'). 122 | gsub(/.*pool\./, ''). 123 | gsub(/.*dynamic\./, ''). 124 | gsub(/.*static\./, ''). 125 | gsub(/.*dhcp[^\.]+\./, ''). 126 | gsub(/^\d{6,100}\./, ''). 127 | gsub(/^\.+/, ''). 128 | tr('^a-z0-9.-', '') 129 | 130 | bits = data.split('.') 131 | while (bits.length > 1) 132 | names << bits.join('.') 133 | bits.shift 134 | end 135 | names 136 | end 137 | end 138 | 139 | 140 | end 141 | end 142 | -------------------------------------------------------------------------------- /lib/dap/filter/openssl.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Filter 3 | 4 | require 'openssl' 5 | 6 | class FilterDecodeX509 7 | include BaseDecoder 8 | 9 | def decode(data) 10 | save = {} 11 | cert = OpenSSL::X509::Certificate.new(data) rescue nil 12 | return unless cert 13 | 14 | dnames = [] 15 | cert.subject.to_s.split("/").each do |bit| 16 | var,val = bit.split("=", 2) 17 | next unless (var and val) 18 | var = var.to_s.downcase.strip 19 | save["s_#{var}"] = val 20 | if var == "cn" 21 | dnames << val 22 | end 23 | end 24 | 25 | cert.issuer.to_s.split("/").each do |bit| 26 | var,val = bit.split("=", 2) 27 | next unless (var and val) 28 | var = var.to_s.downcase.strip 29 | save["i_#{var}"] = val 30 | end 31 | 32 | cert.extensions.each do |e| 33 | next unless e.to_s =~ /^([^\s]+)\s*=\s*(.*)/ 34 | var,val = $1,$2 35 | var = var.to_s.downcase.strip 36 | save["e_#{var}"] = val.strip 37 | 38 | if var == "subjectaltname" 39 | val.split(",").map{|x| x.gsub("DNS:", "").gsub("IP:", "").gsub("email:", "").strip }.each do |name| 40 | dnames << name 41 | end 42 | end 43 | 44 | end 45 | 46 | save["names"] = dnames 47 | save 48 | end 49 | 50 | end 51 | 52 | end 53 | end -------------------------------------------------------------------------------- /lib/dap/filter/recog.rb: -------------------------------------------------------------------------------- 1 | require 'recog' 2 | 3 | module Dap 4 | module Filter 5 | 6 | class FilterRecog 7 | include Base 8 | 9 | def process(doc) 10 | self.opts.each_pair do |k,v| 11 | next unless doc.has_key?(k) 12 | match = Recog::Nizer.match(v, doc[k]) 13 | next unless match 14 | match.each_pair do |ok, ov| 15 | doc["#{k}.recog.#{ok}"] = ov.to_s 16 | end 17 | end 18 | [ doc ] 19 | end 20 | end 21 | 22 | end 23 | end -------------------------------------------------------------------------------- /lib/dap/filter/simple.rb: -------------------------------------------------------------------------------- 1 | require 'digest/sha1' 2 | require 'digest/md5' 3 | require 'digest/sha2' 4 | require 'json' 5 | 6 | module Dap 7 | module Filter 8 | 9 | class FilterCopy 10 | include Base 11 | 12 | def process(doc) 13 | self.opts.each_pair do |k,v| 14 | if doc.has_key?(k) 15 | doc[v] = doc[k] 16 | end 17 | end 18 | [ doc ] 19 | end 20 | end 21 | 22 | class FilterRename 23 | include Base 24 | 25 | def initialize(args) 26 | super 27 | missing_rename = self.opts.select { |k, v| v.nil? }.keys 28 | unless missing_rename.empty? 29 | fail "Missing new name for renames of #{missing_rename.join(',')}" 30 | end 31 | end 32 | 33 | def process(doc) 34 | self.opts.each_pair do |k,v| 35 | if doc.has_key?(k) 36 | doc[v] = doc[k] 37 | doc.delete(k) 38 | end 39 | end 40 | [ doc ] 41 | end 42 | end 43 | 44 | class FilterFieldReplace 45 | include Base 46 | attr_accessor :all 47 | 48 | def initialize(args, all=false) 49 | self.all = all 50 | super(args) 51 | missing_replace = self.opts.select { |k, v| v.nil? }.keys 52 | unless missing_replace.empty? 53 | fail "Missing search/replace for #{missing_replace.join(',')}" 54 | end 55 | end 56 | 57 | def process(doc) 58 | self.opts.each_pair do |k,v| 59 | if doc.has_key?(k) 60 | search, replace = v.split('=', 2) 61 | search = Regexp.new(search) 62 | if self.all 63 | doc[k] = doc[k].gsub(search, replace) 64 | else 65 | doc[k] = doc[k].sub(search, replace) 66 | end 67 | end 68 | end 69 | [ doc ] 70 | end 71 | end 72 | 73 | class FilterFieldReplaceAll < FilterFieldReplace 74 | def initialize(args) 75 | super(args, all=true) 76 | end 77 | end 78 | 79 | # Example below replaces periods with underscores in the names of all keys 80 | # one level below 'my_key' 81 | # rename_subkey_match my_key '.' '_' 82 | class FilterRenameSubkeyMatch 83 | include Base 84 | 85 | def initialize(args) 86 | super 87 | fail "Expected 3 arguments to '#{self.name}' but got #{args.size}" unless args.size == 3 88 | self.opts = args 89 | end 90 | 91 | def process(doc) 92 | temp_field = {} 93 | field, original, updated = self.opts 94 | return [ doc ] unless doc[field].is_a?(::Hash) 95 | doc[field].each_key do |k| 96 | new_k = k.gsub(original, updated) 97 | temp_field[new_k] = doc[field][k] 98 | end 99 | doc[field] = temp_field 100 | [ doc ] 101 | end 102 | end 103 | 104 | class FilterMatchRemove 105 | include Base 106 | def process(doc) 107 | self.opts.each_pair do |re,_| 108 | doc.each_key do |k| 109 | if k.match(re) 110 | doc.delete(k) 111 | end 112 | end 113 | end 114 | [ doc ] 115 | end 116 | end 117 | 118 | class FilterRemove 119 | include Base 120 | def process(doc) 121 | self.opts.each_pair do |k,v| 122 | if doc.has_key?(k) 123 | doc.delete(k) 124 | end 125 | end 126 | [ doc ] 127 | end 128 | end 129 | 130 | class FilterMatchSelect 131 | include Base 132 | def process(doc) 133 | ndoc = {} 134 | self.opts.each_pair do |re,| 135 | doc.each_key do |k| 136 | if k.match(re) 137 | ndoc[k] = doc[k] 138 | end 139 | end 140 | end 141 | (ndoc.keys.length == 0) ? [] : [ ndoc ] 142 | end 143 | end 144 | 145 | class FilterMatchSelectKey < FilterMatchSelect 146 | end 147 | 148 | class FilterMatchSelectValue 149 | include Base 150 | def process(doc) 151 | ndoc = {} 152 | self.opts.each_pair do |re,| 153 | doc.each_key do |k| 154 | if doc[k].match(re) 155 | ndoc[k] = doc[k] 156 | end 157 | end 158 | end 159 | (ndoc.keys.length == 0) ? [] : [ ndoc ] 160 | end 161 | end 162 | 163 | class FilterSelect 164 | include Base 165 | def process(doc) 166 | ndoc = {} 167 | self.opts.each_pair do |k,v| 168 | if doc.has_key?(k) 169 | ndoc[k] = doc[k] 170 | end 171 | end 172 | (ndoc.keys.length == 0) ? [] : [ ndoc ] 173 | end 174 | end 175 | 176 | class FilterInsert 177 | include Base 178 | def process(doc) 179 | self.opts.each_pair do |k,v| 180 | doc[k] = v 181 | end 182 | [ doc ] 183 | end 184 | end 185 | 186 | class FilterInclude 187 | include Base 188 | def process(doc) 189 | self.opts.each_pair do |k,v| 190 | if doc.has_key?(k) and doc[k].to_s.index(v) 191 | return [ doc ] 192 | end 193 | end 194 | [ ] 195 | end 196 | end 197 | 198 | # where 'some.field == some_value' 199 | # where 'some.field != some_value' 200 | # TODO: do something other than basic string comparison. Would be nice to have where 'some.field > 2', etc 201 | class FilterWhere 202 | attr_accessor :query 203 | 204 | def initialize(args) 205 | fail "Expected 3 arguments to 'where' but got #{args.size}" unless args.size == 3 206 | self.query = args 207 | end 208 | 209 | def process(doc) 210 | field, operator, expected = self.query 211 | return [ doc ] if doc.has_key?(field) and doc[field].send(operator, expected) 212 | [ ] 213 | end 214 | end 215 | 216 | class FilterExclude 217 | include Base 218 | def process(doc) 219 | self.opts.each_pair do |k,v| 220 | if doc.has_key?(k) and doc[k].to_s.index(v) 221 | return [ ] 222 | end 223 | end 224 | [ doc ] 225 | end 226 | end 227 | 228 | class FilterExists 229 | include Base 230 | def process(doc) 231 | self.opts.each_pair do |k,v| 232 | if doc.has_key?(k) and doc[k].to_s.length > 0 233 | return [ doc ] 234 | end 235 | end 236 | [ ] 237 | end 238 | end 239 | 240 | class FilterNotExists < FilterExists 241 | include Base 242 | def process(doc) 243 | exists_doc = super(doc) 244 | exists_doc.empty? ? [ doc ] : [ ] 245 | end 246 | end 247 | 248 | # Applies some simple annotation to the given fields, adding another 249 | # field name with the appended annotation type, i.e.: 250 | # 251 | # $ echo '{"foo":"blah"}' | dap json stdin + annotate foo=length + json 252 | # {"foo":"bar","foo.length":4} 253 | class FilterAnnotate 254 | include Base 255 | def process(doc) 256 | self.opts.each_pair do |k,v| 257 | if doc.has_key?(k) 258 | case v 259 | when 'length' 260 | doc["#{k}.length"] = doc[k].length 261 | when 'size' 262 | doc["#{k}.size"] = doc[k].size 263 | else 264 | fail "Unsupported annotation '#{v}'" 265 | end 266 | end 267 | end 268 | [ doc ] 269 | end 270 | end 271 | 272 | class FilterTransform 273 | include Base 274 | def process(doc) 275 | self.opts.each_pair do |k,v| 276 | if doc.has_key?(k) 277 | case v 278 | when /^int(?<base>\d+)?$/ 279 | base = $LAST_MATCH_INFO['base'] 280 | if base.nil? 281 | doc[k] = doc[k].to_s.to_i 282 | else 283 | doc[k] = doc[k].to_s.to_i(base.to_i) 284 | end 285 | when 'float' 286 | doc[k] = doc[k].to_f 287 | when 'reverse' 288 | doc[k] = doc[k].to_s.reverse 289 | when 'downcase' 290 | doc[k] = doc[k].to_s.downcase 291 | when 'upcase' 292 | doc[k] = doc[k].to_s.upcase 293 | when /^(lstrip|ltrim)$/ 294 | doc[k] = doc[k].to_s.lstrip 295 | when /^(rstrip|rtrim)$/ 296 | doc[k] = doc[k].to_s.rstrip 297 | when /^(strip|trim)$/ 298 | doc[k] = doc[k].to_s.strip 299 | when 'ascii' 300 | doc[k] = doc[k].to_s.gsub(/[\x00-\x1f\x7f-\xff]/n, '') 301 | when 'json' 302 | doc[k] = JSON.parse(doc[k].to_s) 303 | when 'utf8encode' 304 | doc[k] = doc[k].to_s.encode!('UTF-8', invalid: :replace, undef: :replace, replace: '') 305 | when 'base64decode' 306 | doc[k] = doc[k].to_s.unpack('m*').first 307 | when 'base64encode' 308 | doc[k] = [doc[k].to_s].pack('m*').gsub(/\s+/n, '') 309 | when 'qprintdecode' 310 | doc[k] = doc[k].to_s.gsub(/=([0-9A-Fa-f]{2})/n){ |x| [x[1,2]].pack("H*") } 311 | when 'qprintencode' 312 | doc[k] = doc[k].to_s.gsub(/[\x00-\x20\x3d\x7f-\xff]/n){|x| ( "=%.2x" % x.unpack("C").first ).upcase } 313 | when 'hexdecode' 314 | doc[k] = [ doc[k].to_s ].pack("H*") 315 | when 'hexencode' 316 | doc[k] = doc[k].to_s.unpack("H*").first 317 | else 318 | fail "Invalid transform '#{v}'" 319 | end 320 | end 321 | end 322 | [ doc ] 323 | end 324 | end 325 | 326 | class FilterFlatten 327 | include Base 328 | def process(doc) 329 | self.opts.each_pair do |k,| 330 | if doc.has_key?(k) and doc[k].is_a?(Hash) 331 | doc[k].each_pair do |fk,fv| 332 | doc["#{k}.#{fk}"] = fv 333 | end 334 | end 335 | end 336 | [ doc ] 337 | end 338 | end 339 | 340 | class FilterExpand 341 | include Base 342 | def process(doc) 343 | new_doc = doc.clone 344 | self.opts.each_pair do |k,| 345 | k_re = /^#{k}\.(?<sub_key>.+)$/ 346 | doc.each do |fk,fv| 347 | if md = k_re.match(fk) 348 | new_doc[k] ||= {} 349 | new_doc[k][md[:sub_key]] = fv 350 | end 351 | end 352 | end 353 | [ new_doc ] 354 | end 355 | end 356 | 357 | class FilterTruncate 358 | include Base 359 | def process(doc) 360 | self.opts.each_pair do |k,v| 361 | if doc.has_key?(k) 362 | doc[k] = doc[k].to_s[0, v.to_i] 363 | end 364 | end 365 | [ doc ] 366 | end 367 | end 368 | 369 | class FilterSplitLine 370 | include Base 371 | def process(doc) 372 | lines = [ ] 373 | self.opts.each_pair do |k,v| 374 | if doc.has_key?(k) 375 | doc[k].to_s.split(/\n/).each do |line| 376 | lines << doc.merge({ "#{k}.line" => line }) 377 | end 378 | end 379 | end 380 | lines.length == 0 ? [ doc ] : [ lines ] 381 | end 382 | end 383 | 384 | class FilterSplitWord 385 | include Base 386 | def process(doc) 387 | lines = [ ] 388 | self.opts.each_pair do |k,v| 389 | if doc.has_key?(k) 390 | doc[k].to_s.split(/\W/).each do |line| 391 | lines << doc.merge({ "#{k}.word" => line }) 392 | end 393 | end 394 | end 395 | lines.length == 0 ? [ doc ] : [ lines ] 396 | end 397 | end 398 | 399 | class FilterSplitTab 400 | include Base 401 | def process(doc) 402 | lines = [ ] 403 | self.opts.each_pair do |k,v| 404 | if doc.has_key?(k) 405 | doc[k].to_s.split(/\t/).each do |line| 406 | lines << doc.merge({ "#{k}.tab" => line }) 407 | end 408 | end 409 | end 410 | lines.length == 0 ? [ doc ] : [ lines ] 411 | end 412 | end 413 | 414 | 415 | class FilterSplitComma 416 | include Base 417 | def process(doc) 418 | lines = [ ] 419 | self.opts.each_pair do |k,v| 420 | if doc.has_key?(k) 421 | doc[k].to_s.split(/,/).each do |line| 422 | lines << doc.merge({ "#{k}.word" => line }) 423 | end 424 | end 425 | end 426 | lines.length == 0 ? [ doc ] : [ lines ] 427 | end 428 | end 429 | 430 | class FilterSplitArray 431 | include Base 432 | def process(doc) 433 | lines = [ ] 434 | self.opts.each_pair do |k,v| 435 | if doc.has_key?(k) and doc[k].respond_to?(:each) 436 | doc[k].each do |line| 437 | lines << doc.merge({ "#{k}.item" => line }) 438 | end 439 | end 440 | end 441 | lines.length == 0 ? [ doc ] : [ lines ] 442 | end 443 | end 444 | 445 | class FilterFieldSplit 446 | include Base 447 | def process(doc) 448 | self.opts.each_pair do |k,v| 449 | if doc.has_key?(k) 450 | count = 1 451 | doc[k].to_s.split(Regexp.new(v)).each do |thing| 452 | doc.merge!({ "#{k}.f#{count}" => thing }) 453 | count += 1 454 | end 455 | end 456 | end 457 | [ doc ] 458 | end 459 | end 460 | 461 | class FilterFieldSplitLine < FilterFieldSplit 462 | def initialize(args) 463 | super(args.map { |arg| "#{arg}=\\n" } ) 464 | end 465 | end 466 | 467 | class FilterFieldSplitWord < FilterFieldSplit 468 | def initialize(args) 469 | super(args.map { |arg| "#{arg}=\\W" } ) 470 | end 471 | end 472 | 473 | class FilterFieldSplitTab < FilterFieldSplit 474 | def initialize(args) 475 | super(args.map { |arg| "#{arg}=\\t" } ) 476 | end 477 | end 478 | 479 | class FilterFieldSplitComma < FilterFieldSplit 480 | def initialize(args) 481 | super(args.map { |arg| "#{arg}=," } ) 482 | end 483 | end 484 | 485 | class FilterFieldSplitPeriod < FilterFieldSplit 486 | def initialize(args) 487 | super(args.map { |arg| "#{arg}=\\." } ) 488 | end 489 | end 490 | 491 | class FilterFieldSplitArray 492 | include Base 493 | def process(doc) 494 | self.opts.each_pair do |k,v| 495 | if doc.has_key?(k) and doc[k].respond_to?(:each) 496 | wcount = 1 497 | doc[k].each do |word| 498 | doc.merge!({ "#{k}.f#{wcount}" => word }) 499 | wcount += 1 500 | end 501 | end 502 | end 503 | [ doc ] 504 | end 505 | end 506 | 507 | class FilterFieldArrayJoinComma 508 | include Base 509 | def process(doc) 510 | self.opts.each_pair do |k,v| 511 | if doc.has_key?(v) and doc[v].respond_to?(:each) 512 | doc[k] = doc[v].join(",") 513 | end 514 | end 515 | [ doc ] 516 | end 517 | end 518 | 519 | class FilterFieldArrayJoinWhitespace 520 | include Base 521 | def process(doc) 522 | self.opts.each_pair do |k,v| 523 | if doc.has_key?(v) and doc[v].respond_to?(:each) 524 | doc[k] = doc[v].join(" ") 525 | end 526 | end 527 | [ doc ] 528 | end 529 | end 530 | 531 | class FilterDigest 532 | include Base 533 | def process(doc) 534 | self.opts.each_pair do |k,v| 535 | if doc.has_key?(k) 536 | case v 537 | when 'sha1' 538 | doc["#{k}.sha1"] = Digest::SHA1.hexdigest(doc[k].to_s) 539 | when 'md5' 540 | doc["#{k}.md5"] = Digest::MD5.hexdigest(doc[k].to_s) 541 | when 'sha256' 542 | doc["#{k}.sha256"] = Digest::SHA256.hexdigest(doc[k].to_s) 543 | end 544 | end 545 | end 546 | [ doc ] 547 | end 548 | end 549 | 550 | end 551 | end 552 | -------------------------------------------------------------------------------- /lib/dap/filter/smbclient.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Filter 3 | 4 | require 'digest/md5' 5 | 6 | class FilterDecodeSMBClient 7 | include BaseDecoder 8 | 9 | def decode(data) 10 | save = {} 11 | 12 | data.split(/\n/).each do |line| 13 | case line.strip 14 | when /^Domain=\[([^\]]+)\] OS=\[([^\]]+)\] Server=\[([^\]]+)\]/ 15 | save['smb_domain'] = $1 16 | save['smb_native_os'] = $2 17 | save['smb_native_lm'] = $3 18 | end 19 | end 20 | 21 | save 22 | end 23 | end 24 | 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /lib/dap/filter/ssh_keyscan.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Filter 3 | 4 | require 'digest/md5' 5 | 6 | class FilterDecodeSSHKeyscan 7 | include BaseDecoder 8 | 9 | def decode(data) 10 | save = {} 11 | 12 | data.split(/\n/).each do |line| 13 | case line.strip 14 | when /^# [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+(.*)/m 15 | banner = $1 16 | save['banner'] = banner 17 | if banner =~ /^SSH-([\d\.]+)-([^\s]+)\s+(.*)/m 18 | save['ssh-protocol'] = $1 19 | save['ssh-version'] = $2 20 | save['ssh-vendor'] = $3 21 | save['ssh-recog'] = $2 + " " + $3 22 | end 23 | 24 | when /^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+((ssh|ecdsa)[^\s]+)\s+(.*)/m 25 | ktype = $1 26 | kdata = $3 27 | save['hkey-' + ktype] = kdata 28 | save['hkey-' + ktype + '-fp'] = Digest::MD5.hexdigest(kdata.unpack('m*').first).scan(/../).join(':') 29 | end 30 | end 31 | 32 | save 33 | end 34 | end 35 | 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /lib/dap/filter/vulnmatch.rb: -------------------------------------------------------------------------------- 1 | require_relative '../../../data/vulndb' 2 | 3 | module Dap 4 | module Filter 5 | 6 | module BaseVulnMatch 7 | def search(hash, service) 8 | SEARCHES[service].each do | entry | 9 | entry[:regex].each do | regex, value | 10 | if regex =~ hash[entry[:hash_key]].force_encoding('BINARY') 11 | # Handle cases that could be multiple hits, not for upnp but could be others. 12 | hash[entry[:output_key]] = ( hash[entry[:output_key]] ? hash[entry[:output_key]] + value : value ) 13 | end 14 | end if hash[entry[:hash_key]] 15 | end 16 | hash 17 | end 18 | 19 | def lookup(hash, service) 20 | SEARCHES[service].each do | entry | 21 | if hash[entry[:hash_key]] 22 | res = entry[:cvemap][hash[entry[:hash_key]]] 23 | if res 24 | hash[entry[:output_key]] = res 25 | end 26 | end 27 | end 28 | hash 29 | end 30 | end 31 | 32 | class FilterVulnMatchUPNP 33 | include Base 34 | include BaseVulnMatch 35 | 36 | def process(doc) 37 | doc = search(doc, :upnp) 38 | [ doc ] 39 | end 40 | end 41 | 42 | class FilterVulnMatchIPMI 43 | include Base 44 | include BaseVulnMatch 45 | 46 | def process(doc) 47 | doc = search(doc, :ipmi) 48 | 49 | if (doc['data.ipmi_user_non_null'] == "0") && (doc['data.ipmi_user_null'] == "0") 50 | doc["vulnerability"] = ( doc["vulnerability"] ? doc["vulnerability"] + ["IPMI-ANON"] : ["IPMI-ANON"] ) 51 | end 52 | 53 | [ doc ] 54 | end 55 | end 56 | 57 | class FilterVulnMatchMSSQL 58 | include Base 59 | include BaseVulnMatch 60 | 61 | def process(doc) 62 | doc = lookup(doc, :mssql) 63 | [ doc ] 64 | end 65 | end 66 | 67 | class FilterVulnMatchHTTP 68 | include Base 69 | include BaseVulnMatch 70 | 71 | def check_shellshock(doc) 72 | if not doc["http.headers"] 73 | return [] 74 | end 75 | 76 | h = doc["http.headers"] 77 | sspattern = /\(\)\s*{\s*:;\s*};/ 78 | 79 | if h["user-agent"] and h["user-agent"] =~ sspattern 80 | return ['VULN-SHELLSHOCK', 'CVE-2014-6271'] 81 | end 82 | 83 | if h["referrer"] and h["referrer"] =~ sspattern 84 | return ['VULN-SHELLSHOCK', 'CVE-2014-6271'] 85 | end 86 | 87 | return [] 88 | end 89 | 90 | def check_elastic(doc) 91 | if not doc['http.path'] 92 | return [] 93 | end 94 | if not doc['http.path'] == '/_search' 95 | return [] 96 | end 97 | 98 | input = doc['http.url'] 99 | if doc['http.method'] == "POST" 100 | input = doc['http.body'] 101 | end 102 | 103 | if not input.match("script_fields") 104 | return [] 105 | end 106 | 107 | out = ['VULN-ELASTICSEARCH-RCE', 'CVE-2014-3120'] 108 | if input.match("Runtime") and input.match("getRuntime()") 109 | out += ["EXEC-SHELLCMD"] 110 | end 111 | 112 | if input.match("FileOutputStream") and input.match("URLClassLoader") 113 | out += ["EXEC-JAVA-CLASS"] 114 | end 115 | 116 | if input.match("getDeclaredConstructor") 117 | out += ['CVE-2015-1427'] 118 | end 119 | 120 | if input.match("metasploit.Payload") 121 | out += ['METASPLOIT'] 122 | end 123 | 124 | return out 125 | end 126 | 127 | def process(doc) 128 | vulns = [] 129 | if doc['vulnerability'] 130 | vulns |= doc['vulnerability'] 131 | end 132 | 133 | vulns |= check_elastic(doc) 134 | vulns |= check_shellshock(doc) 135 | 136 | # see vulndb.rb, allows for simple matches to be added quickly 137 | SEARCHES[:http].each do | entry | 138 | success = true 139 | 140 | # all matches must go through 141 | entry[:match].each do | k, v | 142 | if not doc[k] 143 | success = false 144 | else 145 | m = doc[k].match(v) 146 | if not m 147 | success = false 148 | end 149 | end 150 | 151 | if not success 152 | break 153 | end 154 | end 155 | 156 | if success 157 | vulns |= entry[:cve] 158 | end 159 | end 160 | 161 | if vulns != [] 162 | doc['vulnerability'] = vulns 163 | end 164 | 165 | [ doc ] 166 | end 167 | end 168 | 169 | class FilterGenericSetMatch 170 | include Base 171 | attr_accessor :matchset 172 | 173 | def initialize(args) 174 | self.opts = {} 175 | args.each do |arg| 176 | k,v = arg.split("=", 2) 177 | self.opts[k] = v 178 | end 179 | self.name = Dap::Factory.name_from_class(self.class) 180 | 181 | fail "Expected key and set arguments to #{self.name} but got #{self.opts}" unless self.opts.has_key?("key") and self.opts.has_key?("set") 182 | 183 | self.matchset = {} 184 | File.readlines(self.opts["set"]).each do |line| 185 | self.matchset[line.chomp] = nil 186 | end 187 | end 188 | 189 | def process(doc) 190 | if doc.has_key?(self.opts["key"]) 191 | if doc[self.opts["key"]].kind_of?(Array) 192 | doc[self.opts["key"]].each do |val| 193 | if self.matchset.has_key?(val) 194 | return [ doc ] 195 | end 196 | end 197 | else 198 | if self.matchset.has_key?(doc[self.opts["key"]]) 199 | return [ doc ] 200 | end 201 | end 202 | end 203 | [ ] 204 | end 205 | end 206 | 207 | end 208 | end -------------------------------------------------------------------------------- /lib/dap/input.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Input 3 | 4 | require 'oj' 5 | 6 | # 7 | # Error codes for failed reads 8 | # 9 | module Error 10 | EOF = :eof 11 | Empty = :empty 12 | InvalidFormat = :invalid 13 | end 14 | 15 | module FileSource 16 | 17 | attr_accessor :fd 18 | 19 | def open(file_name) 20 | close 21 | self.fd = ['-', 'stdin', nil].include?(file_name) ? 22 | $stdin : ::File.open(file_name, "rb") 23 | end 24 | 25 | def close 26 | self.close if self.fd 27 | self.fd = nil 28 | end 29 | end 30 | 31 | # 32 | # Line Input 33 | # 34 | class InputLines 35 | 36 | include FileSource 37 | 38 | def initialize(args) 39 | self.open(args.first) 40 | end 41 | 42 | def read_record 43 | line = self.fd.readline rescue nil 44 | return Error::EOF unless line 45 | { 'line' => line.chomp("\n") } 46 | end 47 | 48 | end 49 | 50 | # 51 | # JSON Input (line-delimited records) 52 | # 53 | class InputJSON 54 | 55 | include FileSource 56 | 57 | def initialize(args) 58 | self.open(args.first) 59 | end 60 | 61 | def read_record 62 | line = self.fd.readline rescue nil 63 | return Error::EOF unless line 64 | begin 65 | json = Oj.load(line.strip, mode: :strict) 66 | rescue 67 | $stderr.puts "Record is not valid JSON and will be skipped: '#{line.chomp}'" 68 | return Error::InvalidFormat 69 | end 70 | return Error::Empty unless json 71 | json 72 | end 73 | 74 | end 75 | 76 | end 77 | end 78 | 79 | require 'dap/input/warc' 80 | require 'dap/input/csv' 81 | -------------------------------------------------------------------------------- /lib/dap/input/csv.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Input 3 | 4 | require 'csv' 5 | 6 | # 7 | # CSV 8 | # 9 | class InputCSV 10 | 11 | include FileSource 12 | 13 | attr_accessor :has_header, :headers 14 | 15 | def initialize(args) 16 | self.headers = [] 17 | 18 | fname = args.shift 19 | self.open(fname) 20 | 21 | args.each do |arg| 22 | if arg =~ /^header=(.*)/ 23 | val =$1 24 | self.has_header = !! (val =~ /^y|t|1/i) 25 | end 26 | end 27 | 28 | if self.has_header 29 | data = read_record 30 | unless (data == :eof or data == :empty) 31 | self.headers = data.values.map{|x| x.to_s.strip } 32 | end 33 | end 34 | end 35 | 36 | def read_record 37 | res = {} 38 | line = self.fd.readline rescue nil 39 | return Error::EOF unless line 40 | line.force_encoding('BINARY') 41 | 42 | # Short-circuit the slow CSV parser if the data does not contain double quotes 43 | arr = line.index('"') ? 44 | ( CSV.parse(line) rescue nil ) : 45 | [ line.split(',').map{|x| x.strip } ] 46 | 47 | return Error::Empty unless arr 48 | cnt = 0 49 | arr.first.each do |x| 50 | cnt += 1 51 | if x.to_s.length > 0 52 | res[headers[cnt-1] || cnt.to_s] = x 53 | end 54 | end 55 | res 56 | end 57 | 58 | end 59 | 60 | end 61 | end 62 | -------------------------------------------------------------------------------- /lib/dap/input/warc.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Input 3 | 4 | # 5 | # WARC 6 | # 7 | class InputWARC 8 | 9 | include FileSource 10 | 11 | attr_accessor :header, :info 12 | 13 | def initialize(args) 14 | self.open(args.first) 15 | read_warc_header 16 | end 17 | 18 | def read_warc_header 19 | self.header = read_record 20 | 21 | if self.header == Error::EOF 22 | raise RuntimeError, "Invalid WARC header" 23 | end 24 | 25 | unless self.header['warc_type'].to_s == "warcinfo" 26 | raise RuntimeError, "Invalid WARC header (missing warcinfo)" 27 | end 28 | 29 | self.info = {} 30 | self.header['content'].to_s.split("\n").each do |line| 31 | k, v = line.strip.split(/\s*:\s*/, 2) 32 | next unless v 33 | self.info[k] = v 34 | end 35 | end 36 | 37 | def read_record 38 | begin 39 | 40 | version = self.fd.readline 41 | unless version and version =~ /^WARC\/\d+\.\d+/ 42 | return Error::EOF 43 | end 44 | warc = {} 45 | 46 | loop do 47 | line = self.fd.readline 48 | 49 | unless line.strip.length == 0 50 | k, v = line.strip.split(/\s*:\s*/, 2) 51 | k = k.downcase.gsub('-', '_') 52 | warc[k] = v.to_s 53 | next 54 | end 55 | 56 | unless warc['content_length'] 57 | return Error::EOF 58 | end 59 | 60 | warc['content'] = self.fd.read(warc['content_length'].to_i) 61 | skip = self.fd.readline 62 | skip = self.fd.readline 63 | 64 | unless skip.strip.length == 0 65 | return Error::EOF 66 | end 67 | 68 | break 69 | end 70 | 71 | return warc 72 | 73 | rescue ::EOFError 74 | return Error::EOF 75 | end 76 | end 77 | 78 | end 79 | 80 | end 81 | end -------------------------------------------------------------------------------- /lib/dap/output.rb: -------------------------------------------------------------------------------- 1 | require 'oj' 2 | require 'csv' 3 | 4 | 5 | module Dap 6 | module Output 7 | 8 | 9 | module FileDestination 10 | 11 | attr_accessor :fd 12 | 13 | def open(file_name) 14 | close 15 | self.fd = ['-', 'stdout', nil].include?(file_name) ? 16 | $stdout : ::File.open(file_name, "wb") 17 | end 18 | 19 | def close 20 | self.close if self.fd 21 | self.fd = nil 22 | end 23 | 24 | # Overload this to add headers 25 | def start 26 | end 27 | 28 | # Overload this to add footers 29 | def stop 30 | end 31 | 32 | # String sanitizer for UTF-8 33 | def sanitize(o) 34 | 35 | # Handle strings 36 | if o.kind_of? ::String 37 | return o.to_s.encode(o.encoding, "UTF-8", :invalid => :replace, :undef => :replace, :replace => '') 38 | end 39 | 40 | # Handle hashes 41 | if o.kind_of? ::Hash 42 | r = {} 43 | o.each_pair do |k,v| 44 | k = sanitize(k) 45 | v = sanitize(v) 46 | r[k] = v 47 | end 48 | return r 49 | end 50 | 51 | # Handle arrays 52 | if o.kind_of? ::Array 53 | return o.map{|x| sanitize(x) } 54 | end 55 | 56 | # Leave as-is 57 | o 58 | end 59 | end 60 | 61 | 62 | # 63 | # Line Output (CSV, TSV, etc) 64 | # XXX: Quoted field handling is not supported, CSV should be a new output type 65 | # 66 | class OutputLines 67 | 68 | attr_accessor :fields, :delimiter 69 | FIELD_WILDCARD = '_' 70 | 71 | include FileDestination 72 | 73 | def initialize(args) 74 | file = nil 75 | self.delimiter = "," 76 | self.fields = FIELD_WILDCARD 77 | 78 | header = false 79 | 80 | args.each do |str| 81 | k,v = str.split('=', 2) 82 | case k 83 | when 'file' 84 | file = v 85 | when 'header' 86 | header = ( v =~ /^[ty1]/i ? true : false ) 87 | when 'fields' 88 | self.fields = v.split(',') 89 | when 'delimiter' 90 | self.delimiter = 91 | case v.to_s 92 | when 'tab' 93 | "\t" 94 | when 'null' 95 | "\x00" 96 | else 97 | v 98 | end 99 | end 100 | end 101 | self.open(file) 102 | 103 | if header and not fields.include?(FIELD_WILDCARD) 104 | self.fd.puts self.fields.join(self.delimiter) 105 | self.fd.flush 106 | end 107 | 108 | end 109 | 110 | def write_record(doc) 111 | out = [] 112 | 113 | if self.fields.include?(FIELD_WILDCARD) 114 | doc.each_pair do |k,v| 115 | out << sanitize(v.to_s) 116 | end 117 | else 118 | self.fields.each do |k| 119 | out << sanitize(doc[k].to_s) 120 | end 121 | end 122 | 123 | return unless out.length > 0 124 | 125 | self.fd.puts out.join(self.delimiter) 126 | self.fd.flush 127 | end 128 | 129 | end 130 | 131 | # 132 | # JSON Output (line-delimited records) 133 | # 134 | class OutputJSON 135 | 136 | include FileDestination 137 | 138 | def initialize(args) 139 | self.open(args.first) 140 | end 141 | 142 | def write_record(doc) 143 | self.fd.puts Oj.dump(sanitize(doc), mode: :strict) 144 | self.fd.flush 145 | end 146 | 147 | end 148 | 149 | 150 | # 151 | # CSV Output 152 | # 153 | class OutputCSV 154 | 155 | attr_accessor :fields, :delimiter 156 | FIELD_WILDCARD = '_' 157 | 158 | include FileDestination 159 | 160 | def initialize(args) 161 | file = nil 162 | self.delimiter = "," 163 | self.fields = FIELD_WILDCARD 164 | 165 | header = false 166 | 167 | args.each do |str| 168 | k,v = str.split('=', 2) 169 | case k 170 | when 'file' 171 | file = v 172 | when 'header' 173 | header = ( v =~ /^[ty1]/i ? true : false ) 174 | when 'fields' 175 | self.fields = v.split(',') 176 | when 'delimiter' 177 | self.delimiter = 178 | case v.to_s 179 | when 'tab' 180 | "\t" 181 | when 'null' 182 | "\x00" 183 | else 184 | v 185 | end 186 | end 187 | end 188 | self.open(file) 189 | 190 | if header and not fields.include?(FIELD_WILDCARD) 191 | self.fd.puts self.fields.to_csv 192 | end 193 | 194 | end 195 | 196 | def write_record(doc) 197 | out = [] 198 | 199 | if self.fields.include?(FIELD_WILDCARD) 200 | doc.each_pair do |k,v| 201 | out << sanitize(v.to_s) 202 | end 203 | else 204 | self.fields.each do |k| 205 | out << sanitize(doc[k].to_s) 206 | end 207 | end 208 | 209 | return unless out.length > 0 210 | 211 | self.fd.puts out.to_csv 212 | end 213 | 214 | end 215 | 216 | end 217 | end 218 | -------------------------------------------------------------------------------- /lib/dap/proto/addp.rb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/lib/dap/proto/addp.rb -------------------------------------------------------------------------------- /lib/dap/proto/dtls.rb: -------------------------------------------------------------------------------- 1 | # -*- coding: binary -*- 2 | module Dap 3 | module Proto 4 | module DTLS 5 | 6 | 7 | class RecordLayer < BitStruct 8 | unsigned :content_type, 8, 'Content type' 9 | unsigned :version, 16, 'Version' 10 | unsigned :epoch, 16, 'Epoch' 11 | unsigned :sequence, 48, 'Sequence number' 12 | unsigned :payload_length, 16, 'Payload length' 13 | rest :payload 14 | 15 | def valid? 16 | payload_length == payload.length 17 | end 18 | end 19 | end 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /lib/dap/proto/ipmi.rb: -------------------------------------------------------------------------------- 1 | # -*- coding: binary -*- 2 | module Dap 3 | module Proto 4 | module IPMI 5 | 6 | class Channel_Auth_Reply < BitStruct 7 | 8 | unsigned :rmcp_version, 8, "RMCP Version" 9 | unsigned :rmcp_padding, 8, "RMCP Padding" 10 | unsigned :rmcp_sequence, 8, "RMCP Sequence" 11 | unsigned :rmcp_mtype, 1, "RMCP Message Type" 12 | unsigned :rmcp_class, 7, "RMCP Message Class" 13 | 14 | unsigned :session_auth_type, 8, "Session Auth Type" 15 | unsigned :session_sequence, 32, "Session Sequence Number" 16 | unsigned :session_id, 32, "Session ID" 17 | unsigned :message_length, 8, "Message Length" 18 | 19 | unsigned :ipmi_tgt_address, 8, "IPMI Target Address" 20 | unsigned :ipmi_tgt_lun, 8, "IPMI Target LUN" 21 | unsigned :ipmi_header_checksum, 8, "IPMI Header Checksum" 22 | unsigned :ipmi_src_address, 8, "IPMI Source Address" 23 | unsigned :ipmi_src_lun, 8, "IPMI Source LUN" 24 | unsigned :ipmi_command, 8, "IPMI Command" 25 | unsigned :ipmi_completion_code, 8, "IPMI Completion Code" 26 | 27 | unsigned :ipmi_channel, 8, "IPMI Channel" 28 | 29 | unsigned :ipmi_compat_20, 1, "IPMI Version Compatibility: IPMI 2.0+" 30 | unsigned :ipmi_compat_reserved1, 1, "IPMI Version Compatibility: Reserved 1" 31 | unsigned :ipmi_compat_oem_auth, 1, "IPMI Version Compatibility: OEM Authentication" 32 | unsigned :ipmi_compat_password, 1, "IPMI Version Compatibility: Straight Password" 33 | unsigned :ipmi_compat_reserved2, 1, "IPMI Version Compatibility: Reserved 2" 34 | unsigned :ipmi_compat_md5, 1, "IPMI Version Compatibility: MD5" 35 | unsigned :ipmi_compat_md2, 1, "IPMI Version Compatibility: MD2" 36 | unsigned :ipmi_compat_none, 1, "IPMI Version Compatibility: None" 37 | 38 | unsigned :ipmi_user_reserved1, 2, "IPMI User Compatibility: Reserved 1" 39 | unsigned :ipmi_user_kg, 1, "IPMI User Compatibility: KG Set to Default" 40 | unsigned :ipmi_user_disable_message_auth, 1, "IPMI User Compatibility: Disable Per-Message Authentication" 41 | unsigned :ipmi_user_disable_user_auth, 1, "IPMI User Compatibility: Disable User-Level Authentication" 42 | unsigned :ipmi_user_non_null, 1, "IPMI User Compatibility: Non-Null Usernames Enabled" 43 | unsigned :ipmi_user_null, 1, "IPMI User Compatibility: Null Usernames Enabled" 44 | unsigned :ipmi_user_anonymous, 1, "IPMI User Compatibility: Anonymous Login Enabled" 45 | 46 | unsigned :ipmi_conn_reserved1, 6, "IPMI Connection Compatibility: Reserved 1" 47 | unsigned :ipmi_conn_20, 1, "IPMI Connection Compatibility: 2.0" 48 | unsigned :ipmi_conn_15, 1, "IPMI Connection Compatibility: 1.5" 49 | 50 | unsigned :ipmi_oem_id, 24, "IPMI OEM ID", :endian => 'little' 51 | 52 | rest :ipm_oem_data, "IPMI OEM Data + Checksum Byte" 53 | 54 | 55 | def valid? 56 | (rmcp_version == 6) && (message_length == 16) 57 | end 58 | 59 | def to_banner 60 | info = self 61 | banner = "#{(info.ipmi_compat_20 == 1) ? "IPMI-2.0" : "IPMI-1.5"} " 62 | 63 | pass_info = [] 64 | pass_info << "oem_auth" if info.ipmi_compat_oem_auth == 1 65 | pass_info << "password" if info.ipmi_compat_password == 1 66 | pass_info << "md5" if info.ipmi_compat_md5 == 1 67 | pass_info << "md2" if info.ipmi_compat_md2 == 1 68 | pass_info << "null" if info.ipmi_compat_none == 1 69 | 70 | user_info = [] 71 | user_info << "kg_default" if (info.ipmi_compat_20 == 1 and info.ipmi_user_kg == 1) 72 | user_info << "auth_msg" unless info.ipmi_user_disable_message_auth == 1 73 | user_info << "auth_user" unless info.ipmi_user_disable_user_auth == 1 74 | user_info << "non_null_user" if info.ipmi_user_non_null == 1 75 | user_info << "null_user" if info.ipmi_user_null == 1 76 | user_info << "anonymous_user" if info.ipmi_user_anonymous == 1 77 | 78 | conn_info = [] 79 | conn_info << "1.5" if info.ipmi_conn_15 == 1 80 | conn_info << "2.0" if info.ipmi_conn_20 == 1 81 | 82 | if info.ipmi_oem_id != 0 83 | banner << "OEMID:#{info.ipmi_oem_id} " 84 | end 85 | 86 | banner << "UserAuth(#{user_info.join(", ")}) PassAuth(#{pass_info.join(", ")}) Level(#{conn_info.join(", ")}) " 87 | banner 88 | end 89 | 90 | end 91 | 92 | end 93 | end 94 | end 95 | -------------------------------------------------------------------------------- /lib/dap/proto/ldap.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Proto 3 | class LDAP 4 | 5 | # LDAPResult element resultCode lookup 6 | # Reference: https://tools.ietf.org/html/rfc4511#section-4.1.9 7 | # https://ldapwiki.willeke.com/wiki/LDAP%20Result%20Codes 8 | RESULT_DESC = { 9 | 0 => 'success', 10 | 1 => 'operationsError', 11 | 2 => 'protocolError', 12 | 3 => 'timeLimitExceeded', 13 | 4 => 'sizeLimitExceeded', 14 | 5 => 'compareFalse', 15 | 6 => 'compareTrue', 16 | 7 => 'authMethodNotSupported', 17 | 8 => 'strongerAuthRequired', 18 | 9 => 'reserved', 19 | 10 => 'referral', 20 | 11 => 'adminLimitExceeded', 21 | 12 => 'unavailableCriticalExtension', 22 | 13 => 'confidentialityRequired', 23 | 14 => 'saslBindInProgress', 24 | 16 => 'noSuchAttribute', 25 | 17 => 'undefinedAttributeType', 26 | 18 => 'inappropriateMatching', 27 | 19 => 'constraintViolation', 28 | 20 => 'attributeOrValueExists', 29 | 21 => 'invalidAttributeSyntax', 30 | 32 => 'noSuchObject', 31 | 34 => 'invalidDNSyntax', 32 | 48 => 'inappropriateAuthentication', 33 | 49 => 'invalidCredentials', 34 | 50 => 'insufficientAccessRights', 35 | 51 => 'busy', 36 | 52 => 'unavailable', 37 | 53 => 'unwillingToPerform', 38 | 64 => 'namingViolation', 39 | 80 => 'other', 40 | 82 => 'localError (client response)', 41 | 94 => 'noResultsReturned (client response)', 42 | } 43 | 44 | # 45 | # Parse ASN1 element and extract the length. 46 | # See The BER length section here: 47 | # https://blogs.oracle.com/directorymanager/entry/a_quick_introduction_to_asn 48 | # 49 | # @param data [String] Binary string containing ASN1 element(s) 50 | # @return [Fixnum, nil] Total length of of the ASN1 element, nil on error 51 | # 52 | def self.decode_elem_length(data) 53 | return unless data.length > 2 54 | 55 | # Length of element starts counting after the length 56 | elem_start = 2 57 | 58 | # Unpack the second byte as an integer 59 | length = data.byteslice(1).unpack('C')[0] 60 | 61 | if length > 127 62 | # Length will take more than one byte to store 63 | len_bytes = length - 128 64 | return unless data.length > len_bytes + 2 65 | 66 | # This shouldn't happen... 67 | return unless len_bytes > 0 68 | 69 | length = 0 70 | len_bytes.times do |i| 71 | temp_len = data.byteslice(2 + i).unpack('C')[0] 72 | length = ( length << 8 ) + temp_len 73 | end 74 | 75 | elem_start += len_bytes 76 | end 77 | 78 | elem_start + length 79 | end 80 | 81 | # 82 | # Split binary string into ASN1 elements. 83 | # 84 | # @param data [String] Binary string containing raw response from LDAP server 85 | # @return [Array] Array of binary strings containing ASN1 elements 86 | # 87 | def self.split_messages(data) 88 | messages = [] 89 | return messages unless data.length > 2 90 | pos = 0 91 | while pos < data.length 92 | break unless data.byteslice(pos) == '0' 93 | elem_len = Dap::Proto::LDAP.decode_elem_length(data.byteslice(pos..data.length - 1)) 94 | break unless elem_len 95 | 96 | # Sanity check and then carve out the current element 97 | if data.length >= elem_len + pos 98 | current_elem = data.byteslice(pos, elem_len) 99 | messages.push(current_elem) 100 | end 101 | pos += elem_len 102 | end 103 | messages 104 | end 105 | 106 | # 107 | # Parse an LDAPResult (not SearchResult) ASN.1 structure 108 | # Reference: https://tools.ietf.org/html/rfc4511#section-4.1.9 109 | # 110 | # @param data [OpenSSL::ASN1::ASN1Data] LDAPResult structure 111 | # @return [Hash] Hash containing decoded LDAP response 112 | # 113 | def self.parse_ldapresult(ldap_result) 114 | results = {} 115 | 116 | # Sanity check the result code element 117 | if ldap_result.value[0] && ldap_result.value[0].value 118 | code_elem = ldap_result.value[0] 119 | return results unless code_elem.tag == 10 && code_elem.tag_class == :UNIVERSAL 120 | results['resultCode'] = code_elem.value.to_i 121 | end 122 | 123 | # These are probably safe if the resultCode validates 124 | results['resultDesc'] = RESULT_DESC[ results['resultCode'] ] if results['resultCode'] 125 | results['resultMatchedDN'] = ldap_result.value[1].value if ldap_result.value[1] && ldap_result.value[1].value 126 | results['resultdiagMessage'] = ldap_result.value[2].value if ldap_result.value[2] && ldap_result.value[2].value 127 | 128 | # Handle optional elements that may be returned by certain 129 | # LDAP application messages 130 | ldap_result.value.each do |element| 131 | next unless element.tag_class && element.tag && element.value 132 | next unless element.tag_class == :CONTEXT_SPECIFIC 133 | 134 | case element.tag 135 | when 3 136 | results['referral'] = element.value 137 | when 7 138 | results['serverSaslCreds'] = element.value 139 | when 10 140 | results['responseName'] = element.value 141 | when 11 142 | results['responseValue'] = element.value 143 | end 144 | end 145 | 146 | results 147 | end 148 | 149 | # 150 | # Parse an LDAP SearchResult entry. 151 | # 152 | # @param data [OpenSSL::ASN1::Sequence] LDAP message to parse 153 | # @return [Array] Array containing 154 | # result_type - Message type (SearchResultEntry, SearchResultDone, etc.) 155 | # results - Hash containing nested decoded LDAP response 156 | # 157 | def self.parse_message(data) 158 | # RFC 4511 - Section 4.5.2 159 | 160 | result_type = '' 161 | results = {} 162 | 163 | unless data.class == OpenSSL::ASN1::Sequence 164 | result_type = 'Error' 165 | results['errorMessage'] = 'parse_message: Message is not of type OpenSSL::ASN1::Sequence' 166 | return [result_type, results] 167 | end 168 | 169 | unless data.value && data.value.length > 1 170 | result_type = 'Error' 171 | results['errorMessage'] = 'parse_message: Invalid LDAP response (Empty Sequence)' 172 | return [result_type, results] 173 | end 174 | 175 | if data.value[1].tag == 4 176 | # SearchResultEntry found.. 177 | result_type = 'SearchResultEntry' 178 | if data.value[1].value[0].tag == 4 179 | results['objectName'] = data.value[1].value[0].value 180 | end 181 | 182 | if data.value[1].value[1] 183 | attrib_hash = {} 184 | 185 | # Handle PartialAttributeValues 186 | data.value[1].value[1].each do |partial_attrib| 187 | 188 | value_array = [] 189 | attrib_type = partial_attrib.value[0].value 190 | 191 | partial_attrib.value[1].each do |part_attrib_value| 192 | value_array.push(part_attrib_value.value) 193 | end 194 | 195 | attrib_hash[attrib_type] = value_array 196 | end 197 | 198 | results['PartialAttributes'] = attrib_hash 199 | end 200 | 201 | elsif data.value[1] && data.value[1].tag == 5 202 | # SearchResultDone found.. 203 | result_type = 'SearchResultDone' 204 | ldap_result = data.value[1] 205 | 206 | if ldap_result.value[0] && ldap_result.value[0].class == OpenSSL::ASN1::Sequence 207 | # Encoding of the SearchResultDone seems to vary, this is RFC format 208 | # of an LDAPResult ASN.1 structure in which the data is contained in a 209 | # Sequence 210 | results = parse_ldapresult(ldap_result.value[0]) 211 | elsif ldap_result.value[0] 212 | # LDAPResult w/o outer Sequence wrapper, used by MS Windows 213 | results = parse_ldapresult(ldap_result) 214 | end 215 | if data.value[2] && data.value[2].tag == 10 216 | # Unknown structure for providing a response, looks like LDAPResult 217 | # but placed at a higher level in the response, salvage what we can.. 218 | results['resultCode'] = data.value[2].value.to_i if data.value[2].value 219 | results['resultDesc'] = RESULT_DESC[ results['resultCode'] ] if results['resultCode'] 220 | results['resultMatchedDN'] = data.value[3].value if data.value[3] && data.value[3].value 221 | results['resultdiagMessage'] = data.value[4].value if data.value[4] && data.value[4].value 222 | end 223 | 224 | elsif data.value[1] && data.value[1].tag == 1 225 | result_type = 'BindResponse' 226 | results = parse_ldapresult(data.value[1]) 227 | 228 | elsif data.value[1] && data.value[1].tag == 2 229 | result_type = 'UnbindRequest' 230 | 231 | elsif data.value[1] && data.value[1].tag == 3 232 | # There is no legitimate use of application tag 3 233 | # in this context per RFC 4511. Try to figure 234 | # out what the intent is. 235 | resp_data = data.value[1] 236 | if resp_data.value[0].tag == 10 && resp_data.value[2].tag == 4 237 | # Probably an incorrectly tagged BindResponse 238 | result_type = 'BindResponse' 239 | results = parse_ldapresult(resp_data) 240 | else 241 | result_type = 'UnhandledTag' 242 | results['tagNumber'] = data.value[1].tag.to_i if data.value[1].tag 243 | end 244 | 245 | elsif data.value[1] && data.value[1].tag == 24 246 | result_type = 'ExtendedResponse' 247 | results = parse_ldapresult(data.value[1]) 248 | 249 | else 250 | # Unhandled tag 251 | result_type = 'UnhandledTag' 252 | results['tagNumber'] = data.value[1].tag.to_i if data.value[1].tag 253 | end 254 | 255 | [result_type, results] 256 | end 257 | 258 | 259 | end 260 | 261 | end 262 | end 263 | -------------------------------------------------------------------------------- /lib/dap/proto/mssql.rb: -------------------------------------------------------------------------------- 1 | # -*- coding: binary -*- 2 | module Dap 3 | module Proto 4 | module MSSQL 5 | 6 | # 7 | # Data condensed from http://sqlserverbuilds.blogspot.com/ 8 | # Given a version like 8.00.2039, this data structure allows 9 | # us to determine that the year version is 2000 sp4. 10 | # The version_num_to_name method implements this conversion. 11 | # 12 | MSSQL_VERSIONS = { 13 | '7.00'=> { 14 | :year=>'7.0', 15 | :service_packs=> { 16 | 623=>'-', 17 | 699=>'sp1', 18 | 842=>'sp2', 19 | 961=>'sp3', 20 | 1063=>'sp4' 21 | } 22 | }, 23 | '8.00'=> { 24 | :year=>'2000', 25 | :service_packs=> { 26 | 194=>'-', 27 | 384=>'sp1', 28 | 534=>'sp2', 29 | 760=>'sp3', 30 | 2039=>'sp4' 31 | } 32 | }, 33 | '9.00'=> { 34 | :year=>'2005', 35 | :service_packs=> { 36 | 1399=>'-', 37 | 2047=>'sp1', 38 | 3042=>'sp2', 39 | 4035=>'sp3', 40 | 5000=>'sp4' 41 | } 42 | }, 43 | '10.00'=> { 44 | :year=>'2008', 45 | :service_packs=> { 46 | 1600=>'-', 47 | 2531=>'sp1', 48 | 4000=>'sp2', 49 | 5500=>'sp3' 50 | } 51 | }, 52 | '10.50'=> { 53 | :year=>'2008', 54 | :service_packs=> { 55 | 1600=>'r2', 56 | 2500=>'r2 sp1', 57 | 4000=>'r2 sp2' 58 | } 59 | }, 60 | '11.00'=> { 61 | :year=>'2012', 62 | :service_packs=> { 63 | 2100=>'-', 64 | 3000=>'sp1' 65 | } 66 | }, 67 | '12.00'=> { 68 | :year=>'2014', 69 | :service_packs=> { 70 | 2000=>'-' 71 | } 72 | } 73 | } 74 | 75 | # 76 | # Given a XX.YY.ZZ[.AA] version, will attempt to get the sql server 77 | # year/service pack version for it. 78 | def self.version_num_to_name(version) 79 | rx = /(\d+)\.(\d+)\.(\d+).*/ 80 | if version =~ rx 81 | v1 = $1.to_i 82 | v2 = $2.to_i 83 | v3 = $3.to_i 84 | else 85 | return [ nil, nil ] 86 | end 87 | #puts("v1=#{v1}, v2=#{v2}, v3=#{v3}") 88 | key = sprintf("%d.%02d",v1,v2) 89 | svc_pack = nil 90 | year = nil 91 | if MSSQL_VERSIONS[key] 92 | year = MSSQL_VERSIONS[key][:year] 93 | svc_packs = MSSQL_VERSIONS[key][:service_packs] 94 | is_first=true 95 | svc_packs.each do | k, v| 96 | #puts( "k=#{k}, v=#{v}") 97 | if v3 <= k and is_first 98 | svc_pack = v 99 | break 100 | elsif v3 == k 101 | svc_pack = v 102 | break 103 | else 104 | svc_pack = v 105 | end 106 | is_first=false 107 | end 108 | end 109 | [ year, svc_pack] 110 | end 111 | 112 | end 113 | end 114 | end 115 | -------------------------------------------------------------------------------- /lib/dap/proto/natpmp.rb: -------------------------------------------------------------------------------- 1 | # -*- coding: binary -*- 2 | module Dap 3 | module Proto 4 | module NATPMP 5 | 6 | # All responses must be exactly this size 7 | REQUIRED_SIZE = 12 8 | 9 | # http://tools.ietf.org/html/rfc6886#page-8 10 | class ExternalAddressResponse < BitStruct 11 | unsigned :version, 8, 'Version' # should always be 0 12 | unsigned :opcode, 8, 'opcode' # 0-128 request, 128+ response 13 | unsigned :result, 16, 'result code' # see Dap::Proto::NATPMP::ResultCode 14 | unsigned :epoch, 32, 'Time elapsed since port mapping table was initialized or reset' 15 | octets :external_ip, 32, 'External IPv4 address' 16 | end 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /lib/dap/proto/wdbrpc.rb: -------------------------------------------------------------------------------- 1 | # -*- coding: binary -*- 2 | module Dap 3 | module Proto 4 | class WDBRPC 5 | 6 | def self.wdbrpc_checksum(data) 7 | sum = 0 8 | data.unpack("n*").each {|c| sum += c } 9 | sum = (sum & 0xffff) + (sum >> 16) 10 | (~sum) 11 | end 12 | 13 | def self.wdbrpc_decode_str(data) 14 | return if data.length < 4 15 | slen = data.slice!(0,4).unpack("N")[0] 16 | return "" if slen == 0 17 | while (slen % 4 != 0) 18 | slen += 1 19 | end 20 | 21 | data.slice!(0,slen).to_s.split("\x00")[0] 22 | end 23 | 24 | def self.wdbrpc_decode_int(data) 25 | return if data.length < 4 26 | data.slice!(0,4).unpack("N")[0] 27 | end 28 | 29 | def self.wdbrpc_decode_arr(data, dtype) 30 | return if data.length < 4 31 | res = [] 32 | 33 | alen = data.slice!(0,4).unpack("N")[0] 34 | return res if alen == 0 35 | 36 | 1.upto(alen) do |idx| 37 | case dtype 38 | when :int 39 | res << wdbrpc_decode_int(data) 40 | when :str 41 | res << wdbrpc_decode_str(data) 42 | when :bool 43 | res << wdbrpc_decode_bool(data) 44 | end 45 | end 46 | 47 | res 48 | end 49 | 50 | def self.wdbrpc_decode_bool(data) 51 | return if data.length < 4 52 | (data.slice!(0,4).unpack("N")[0] == 0) ? false : true 53 | end 54 | 55 | 56 | end 57 | end 58 | end -------------------------------------------------------------------------------- /lib/dap/utils/misc.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | module Utils 3 | module Misc 4 | 5 | def self.flatten_hash(h) 6 | ret = {} 7 | h.each_pair do |k,v| 8 | next unless k 9 | if v.is_a?(Hash) 10 | flatten_hash(v).each_pair do |fk,fv| 11 | ret["#{k}.#{fk}"] = fv.to_s 12 | end 13 | else 14 | ret[k.to_s] = v.to_s 15 | end 16 | end 17 | ret 18 | end 19 | 20 | end 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /lib/dap/version.rb: -------------------------------------------------------------------------------- 1 | module Dap 2 | VERSION = "1.3.1" 3 | end 4 | -------------------------------------------------------------------------------- /samples/http_get_reply.ic12.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/samples/http_get_reply.ic12.bz2 -------------------------------------------------------------------------------- /samples/http_get_reply.ic12.sh: -------------------------------------------------------------------------------- 1 | bzcat http_get_reply.ic12.bz2 | ../bin/dap lines + field_split_tab line + rename line.f1=ip line.f4=data + select ip data + transform data=qprintdecode + decode_http_reply data + select ip data.http_code data.http_server + json 2 | -------------------------------------------------------------------------------- /samples/http_get_reply_iframes.json.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/samples/http_get_reply_iframes.json.bz2 -------------------------------------------------------------------------------- /samples/http_get_reply_iframes.json.sh: -------------------------------------------------------------------------------- 1 | bzcat http_get_reply_iframes.json.bz2 | ../bin/dap json + transform data=base64decode + include data='<iframe' + html_iframes data + select ip iframe + json 2 | -------------------------------------------------------------------------------- /samples/http_get_reply_links.json.sh: -------------------------------------------------------------------------------- 1 | bzcat http_get_reply_iframes.json.bz2 | ../bin/dap json + transform data=base64decode + html_links data + select ip link element + decode_uri link + json 2 | -------------------------------------------------------------------------------- /samples/iawide.warc.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/samples/iawide.warc.bz2 -------------------------------------------------------------------------------- /samples/iawide_warc.sh: -------------------------------------------------------------------------------- 1 | bzcat iawide.warc.bz2 | ../bin/dap warc + html_links content + select ip link element + decode_uri link + json 2 | -------------------------------------------------------------------------------- /samples/ipmi_chan_auth_replies.crd.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/samples/ipmi_chan_auth_replies.crd.bz2 -------------------------------------------------------------------------------- /samples/ipmi_chan_auth_replies.sh: -------------------------------------------------------------------------------- 1 | bzcat ipmi_chan_auth_replies.crd.bz2 | ../bin/dap lines + field_split_tab line + rename line.f2=ip line.f6=data + select ip data + transform data=hexdecode + decode_ipmi_chan_auth_reply data + remove data + json 2 | -------------------------------------------------------------------------------- /samples/ssl_certs.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/samples/ssl_certs.bz2 -------------------------------------------------------------------------------- /samples/ssl_certs_geo.sh: -------------------------------------------------------------------------------- 1 | bzcat ssl_certs.bz2 | ../bin/dap json + select host_ip ssl_version port cipher + geo_ip host_ip + json 2 | -------------------------------------------------------------------------------- /samples/ssl_certs_names.sh: -------------------------------------------------------------------------------- 1 | bzcat ssl_certs.bz2 | ../bin/dap json + field_split_array certs + transform certs.f1=base64decode + remove certs + decode_x509 certs.f1 + select certs.f1.names + exists certs.f1.names + split_array certs.f1.names + select certs.f1.names.item + exists certs.f1.names.item + lines 2 | -------------------------------------------------------------------------------- /samples/ssl_certs_names_expanded.sh: -------------------------------------------------------------------------------- 1 | bzcat ssl_certs.bz2 | ../bin/dap json + field_split_array certs + transform certs.f1=base64decode + remove certs + decode_x509 certs.f1 + select certs.f1.names + exists certs.f1.names + split_array certs.f1.names + select certs.f1.names.item + exists certs.f1.names.item + rename certs.f1.names.item=hostname + extract_hostname hostname + select hostname.hostname + split_domains hostname.hostname + select hostname.hostname.domain + rename hostname.hostname.domain=name + prepend_subdomains name=www,dns,mail,vpn,secure,ssl + json 2 | -------------------------------------------------------------------------------- /samples/ssl_certs_org.sh: -------------------------------------------------------------------------------- 1 | bzcat ssl_certs.bz2 | ../bin/dap json + select host_ip ssl_version port cipher + geo_ip_org host_ip + json 2 | -------------------------------------------------------------------------------- /samples/udp-netbios.csv.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/samples/udp-netbios.csv.bz2 -------------------------------------------------------------------------------- /samples/udp-netbios.sh: -------------------------------------------------------------------------------- 1 | bzcat udp-netbios.csv.bz2 | ../bin/dap csv - header=y + select saddr data + rename saddr=ip + transform data=hexdecode + decode_netbios_status_reply data + remove data + geo_ip ip + json 2 | -------------------------------------------------------------------------------- /spec/dap/filter/gquic_filter_spec.rb: -------------------------------------------------------------------------------- 1 | require "base64" 2 | 3 | describe Dap::Filter::FilterDecodeGquicVersionsResult do 4 | describe '.decode' do 5 | 6 | let(:filter) { described_class.new(['data']) } 7 | 8 | context 'testing gquic valid input base64 encoded output from the real world' do 9 | let(:decode) { filter.decode(Base64.decode64("DQAAAAECAwQFUTA0NFEwNDNRMDM5UTAzNQ=="))} 10 | it 'returns an hash w/ versions as list of versions' do 11 | expect(decode).to eq({"versions"=> ["Q044","Q043","Q039","Q035"]}) 12 | end 13 | end 14 | 15 | context 'testing gquic valid input artifical example' do 16 | let(:decode) { filter.decode("aaaaaaaaaQ044Q043Q039Q035")} 17 | it 'returns an hash w/ versions as list of versions' do 18 | expect(decode).to eq({"versions"=> ["Q044","Q043","Q039","Q035"]}) 19 | end 20 | end 21 | 22 | context 'testing gquic valid versions with invalid versions' do 23 | let(:decode) { filter.decode("aaaaaaaaaQ044R043R039Q035")} 24 | it 'returns an hash w/ versions as list of versions' do 25 | expect(decode).to eq({"versions"=> ["Q044", "Q035"]}) 26 | end 27 | end 28 | 29 | context 'testing valid string but not gquic versions' do 30 | let(:decode) { filter.decode("H044R043E039L035") } 31 | it 'returns nil' do 32 | expect(decode).to eq(nil) 33 | end 34 | end 35 | 36 | # do not want ["Qy6j","Qrta","Ql3T","QkKf","QTUB"] 37 | context 'testing valid string with Q in it but not gquic versions ' do 38 | let(:decode) { filter.decode("aaaaaaaaaQy6jQrtaQl3TQkKfQTUB") } 39 | it 'returns nil' do 40 | expect(decode).to eq(nil) 41 | end 42 | end 43 | 44 | context 'testing gquic empty string input' do 45 | let(:decode) { filter.decode("") } 46 | it 'returns nil' do 47 | expect(decode).to eq(nil) 48 | end 49 | end 50 | 51 | context 'testing gquic nil input' do 52 | let(:decode) { filter.decode(nil) } 53 | it 'returns nil' do 54 | expect(decode).to eq(nil) 55 | end 56 | end 57 | 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /spec/dap/filter/http_filter_spec.rb: -------------------------------------------------------------------------------- 1 | require 'zlib' 2 | 3 | describe Dap::Filter::FilterDecodeHTTPReply do 4 | describe '.decode' do 5 | 6 | let(:filter) { described_class.new(['data']) } 7 | 8 | 9 | context 'decoding non-HTTP response' do 10 | let(:decode) { filter.decode("This\r\nis\r\nnot\r\nHTTP\r\n\r\n") } 11 | it 'returns an empty hash' do 12 | expect(decode).to eq({}) 13 | end 14 | end 15 | 16 | context 'decoding uncompressed response' do 17 | let(:decode) { filter.decode("HTTP/1.0 200 OK\r\nHeader1: value1\r\nHow(}does<htTp=work?:itdoesn't\r\nHeader2: value2\r\nHEADER2: VALUE2\r\n\r\nstuff") } 18 | let(:decode_date) { filter.decode("HTTP/1.0 200 OK\r\nHeader1: value1\r\nHow(}does<htTp=work?:itdoesn't\r\nDate: Fri, 24 Mar 2017 15:34:04 GMT\r\nHEADER2: VALUE2\r\nLast-Modified: Fri, 24 Mar 2013 15:34:04 GMT\r\n\r\nstuff") } 19 | 20 | it 'correctly sets status code' do 21 | expect(decode['http_code']).to eq(200) 22 | end 23 | 24 | it 'correctly sets status message' do 25 | expect(decode['http_message']).to eq('OK') 26 | end 27 | 28 | it 'correctly sets body' do 29 | expect(decode['http_body']).to eq('stuff') 30 | end 31 | 32 | it 'correctly extracts http_raw_headers' do 33 | expect(decode['http_raw_headers']).to eq({'header1' => ['value1'], 'header2' => ['value2', 'VALUE2']}) 34 | end 35 | 36 | it 'extracts Date http header' do 37 | expect(decode_date['http_raw_headers']['date']).to eq(["Fri, 24 Mar 2017 15:34:04 GMT"]) 38 | expect(decode_date['http_date']).to eq("20170324T15:34:04+0000") 39 | end 40 | 41 | it 'extracts Last-Modified http header' do 42 | expect(decode_date['http_raw_headers']['last-modified']).to eq(["Fri, 24 Mar 2013 15:34:04 GMT"]) 43 | expect(decode_date['http_modified']).to eq("20130324T15:34:04+0000") 44 | end 45 | end 46 | 47 | context 'decoding binary response' do 48 | # this represents the HTTP response for an HTTP/1.1 request for 49 | # https://upload.wikimedia.org/wikipedia/commons/c/ca/1x1.png, which you 50 | # can replicate with something like 51 | # echo -n "HTTP/1.1 `lwp-request -sem GET https://upload.wikimedia.org/wikipedia/commons/c/ca/1x1.png`" | base64 52 | let(:decode) { filter.decode("SFRUUC8xLjEgMjAwIE9LCkNvbm5lY3Rpb246IGNsb3NlCkRhdGU6IFR1ZSwgMjggTWFyIDIwMTcgMTc6MTg6NTQgR01UClZpYTogMS4xIHZhcm5pc2gtdjQsIDEuMSB2YXJuaXNoLXY0LCAxLjEgdmFybmlzaC12NCwgMS4xIHZhcm5pc2gtdjQKQWNjZXB0LVJhbmdlczogYnl0ZXMKQWdlOiAxNDc2MTkKRVRhZzogNzFhNTBkYmJhNDRjNzgxMjhiMjIxYjdkZjdiYjUxZjEKQ29udGVudC1MZW5ndGg6IDk1CkNvbnRlbnQtVHlwZTogaW1hZ2UvcG5nCkxhc3QtTW9kaWZpZWQ6IFN1biwgMDYgT2N0IDIwMTMgMjM6NTQ6MjUgR01UCkFjY2Vzcy1Db250cm9sLUFsbG93LU9yaWdpbjogKgpBY2Nlc3MtQ29udHJvbC1FeHBvc2UtSGVhZGVyczogQWdlLCBEYXRlLCBDb250ZW50LUxlbmd0aCwgQ29udGVudC1SYW5nZSwgWC1Db250ZW50LUR1cmF0aW9uLCBYLUNhY2hlLCBYLVZhcm5pc2gKQ2xpZW50LURhdGU6IFR1ZSwgMjggTWFyIDIwMTcgMTc6MTg6NTQgR01UCkNsaWVudC1QZWVyOiAxOTguMzUuMjYuMTEyOjQ0MwpDbGllbnQtUmVzcG9uc2UtTnVtOiAxCkNsaWVudC1TU0wtQ2VydC1Jc3N1ZXI6IC9DPUJFL089R2xvYmFsU2lnbiBudi1zYS9DTj1HbG9iYWxTaWduIE9yZ2FuaXphdGlvbiBWYWxpZGF0aW9uIENBIC0gU0hBMjU2IC0gRzIKQ2xpZW50LVNTTC1DZXJ0LVN1YmplY3Q6IC9DPVVTL1NUPUNhbGlmb3JuaWEvTD1TYW4gRnJhbmNpc2NvL089V2lraW1lZGlhIEZvdW5kYXRpb24sIEluYy4vQ049Ki53aWtpcGVkaWEub3JnCkNsaWVudC1TU0wtQ2lwaGVyOiBBRVMxMjgtU0hBCkNsaWVudC1TU0wtU29ja2V0LUNsYXNzOiBJTzo6U29ja2V0OjpTU0wKQ2xpZW50LVNTTC1XYXJuaW5nOiBQZWVyIGNlcnRpZmljYXRlIG5vdCB2ZXJpZmllZApTdHJpY3QtVHJhbnNwb3J0LVNlY3VyaXR5OiBtYXgtYWdlPTMxNTM2MDAwOyBpbmNsdWRlU3ViRG9tYWluczsgcHJlbG9hZApUaW1pbmctQWxsb3ctT3JpZ2luOiAqClgtQW5hbHl0aWNzOiBodHRwcz0xO25vY29va2llcz0xClgtQ2FjaGU6IGNwMTA3MSBoaXQvOSwgY3AyMDIwIGhpdC8yLCBjcDQwMDUgbWlzcywgY3A0MDE1IGhpdC84ODg3ClgtQ2FjaGUtU3RhdHVzOiBoaXQKWC1DbGllbnQtSVA6IDE3My4xNy43OS41NApYLU9iamVjdC1NZXRhLVNoYTFiYXNlMzY6IDFxNG5hMXhqNnRvcHpsbjUxdHB6cXF4dGR0ZHdvOXAKWC1UaW1lc3RhbXA6IDEzODExMDM2NjQuMDg2NDMKWC1UcmFucy1JZDogdHgzZDZmMmU1NzQ0MGM0NzA4YTI0ZjgtMDA1OGQ4NWE1YgpYLVZhcm5pc2g6IDYwMTIxMjI1IDU4NDE4ODgsIDQ3NzczOTUyIDM4MjY4MDcwLCA3MDcxNjg5NiwgNDM2MzkyNTAwIDE4MjE4MjgwMwoKiVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5CYII".unpack("m*").first) } 53 | 54 | it 'correctly sets http_raw_body base64' do 55 | expect(decode['http_raw_body']).to eq('iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5CYII=') 56 | end 57 | end 58 | 59 | context 'decoding gzip compressed response' do 60 | let(:body) { 61 | io = StringIO.new 62 | io.set_encoding('ASCII-8BIT') 63 | gz = Zlib::GzipWriter.new(io) 64 | gz.write('stuff') 65 | gz.close 66 | io.string 67 | } 68 | let(:decode) { filter.decode("HTTP/1.0 200 OK\r\nContent-encoding: gzip\r\n\r\n#{body}") } 69 | 70 | it 'correctly decompresses body' do 71 | expect(decode['http_body']).to eq('stuff') 72 | end 73 | end 74 | 75 | context 'decoding valid chunked responses' do 76 | let(:body) { "5\r\nabcde\r\n0F\r\nfghijklmnopqrst\r\n06\r\nuvwxyz\r\n0\r\n" } 77 | let(:decode) { filter.decode("HTTP/1.0 200 OK\r\nTransfer-encoding: chunked\r\n\r\n#{body}\r\nSecret: magic\r\n") } 78 | 79 | it 'correctly dechunks body' do 80 | expect(decode['http_body']).to eq(('a'..'z').to_a.join) 81 | end 82 | 83 | it 'finds normal headers' do 84 | expect(decode['http_raw_headers']['transfer-encoding']).to eq(%w(chunked)) 85 | end 86 | 87 | it 'finds trailing headers' do 88 | expect(decode['http_raw_headers']['secret']).to eq(%w(magic)) 89 | end 90 | end 91 | 92 | context 'decoding bogus chunked responses' do 93 | let(:body) { "5\r\nabcde\r\nFF\r\nfghijklmnopqrst\r\n06\r\n" } 94 | let(:decode) { filter.decode("HTTP/1.0 200 OK\r\nTransfer-encoding: chunked\r\n\r\n#{body}") } 95 | 96 | it 'reads the partial body' do 97 | expect(decode['http_body']).to eq(('a'..'e').to_a.join) 98 | end 99 | 100 | it 'finds normal headers' do 101 | expect(decode['http_raw_headers']['transfer-encoding']).to eq(%w(chunked)) 102 | end 103 | end 104 | 105 | context 'decoding truncated, chunked responses' do 106 | let(:body) { "5\r\nabcde\r\n0F\r\nfghijklmnopqrst\r\n06\r\n" } 107 | let(:decode) { filter.decode("HTTP/1.0 200 OK\r\nTransfer-encoding: chunked\r\n\r\n#{body}") } 108 | 109 | it 'reads the partial body' do 110 | expect(decode['http_body']).to eq(('a'..'t').to_a.join) 111 | end 112 | 113 | it 'finds normal headers' do 114 | expect(decode['http_raw_headers']['transfer-encoding']).to eq(%w(chunked)) 115 | end 116 | end 117 | 118 | context 'decoding responses that are missing the "reason phrase", an RFC anomaly' do 119 | let(:decode) { filter.decode("HTTP/1.1 301\r\nDate: Tue, 28 Mar 2017 20:46:52 GMT\r\nContent-Type: text/html\r\nContent-Length: 177\r\nConnection: close\r\nLocation: http://www.example.com/\r\n\r\nstuff") } 120 | 121 | it 'decodes anyway' do 122 | expect(decode['http_body']).to eq('stuff') 123 | end 124 | end 125 | 126 | end 127 | end 128 | 129 | describe Dap::Filter::FilterHTMLLinks do 130 | describe '.process' do 131 | 132 | let(:filter) { described_class.new(['data']) } 133 | 134 | context 'lowercase' do 135 | let(:processed) { filter.process({'data' => '<a href="a"/><a href="b"/>'}) } 136 | it 'extracted the correct links' do 137 | expect(processed.map { |p| p['link'] }).to eq(%w(a b)) 138 | end 139 | end 140 | 141 | context 'uppercase' do 142 | let(:processed) { filter.process({'data' => '<A HREF="a"/><A HREF="b"/>'}) } 143 | it 'extracted the correct links' do 144 | expect(processed.map { |p| p['link'] }).to eq(%w(a b)) 145 | end 146 | end 147 | 148 | context 'scattercase' do 149 | let(:processed) { filter.process({'data' => '<A HrEf="a"/><A HrEf="b"/>'}) } 150 | it 'extracted the correct links' do 151 | expect(processed.map { |p| p['link'] }).to eq(%w(a b)) 152 | end 153 | end 154 | 155 | context 'repeated less than symbol' do 156 | body = 157 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 158 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 159 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 160 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 161 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 162 | '<a href="a"/>'\ 163 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 164 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 165 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 166 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 167 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 168 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 169 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 170 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 171 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 172 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 173 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 174 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 175 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 176 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 177 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 178 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 179 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 180 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 181 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 182 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 183 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 184 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 185 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 186 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 187 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 188 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 189 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 190 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 191 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 192 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 193 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 194 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 195 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 196 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 197 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 198 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 199 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 200 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 201 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 202 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 203 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 204 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 205 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 206 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 207 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 208 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 209 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 210 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 211 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 212 | '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'\ 213 | '<a href="b"/>' 214 | let(:processed) { filter.process({'data' => body}) } 215 | it 'extracted the correct links' do 216 | expect(processed.map { |p| p['link'] }).to eq(%w(a b)) 217 | end 218 | end 219 | end 220 | end 221 | -------------------------------------------------------------------------------- /spec/dap/filter/ldap_filter_spec.rb: -------------------------------------------------------------------------------- 1 | describe Dap::Filter::FilterDecodeLdapSearchResult do 2 | describe '.decode' do 3 | 4 | original = ['3030020107642b040030273025040b6f626a656374436c61'\ 5 | '737331160403746f70040f4f70656e4c444150726f6f7444'\ 6 | '5345300c02010765070a010004000400'] 7 | 8 | data = original.pack('H*') 9 | 10 | let(:filter) { described_class.new(['data']) } 11 | 12 | context 'testing full ldap response message' do 13 | let(:decode) { filter.decode(data) } 14 | it 'returns Hash as expected' do 15 | expect(decode.class).to eq(::Hash) 16 | end 17 | 18 | it 'returns expected value' do 19 | test_val = { 'SearchResultDone' => { 20 | 'resultCode' => 0, 21 | 'resultDesc' => 'success', 22 | 'resultMatchedDN' => '', 23 | 'resultdiagMessage' => '' 24 | }, 25 | 'SearchResultEntry' => { 26 | 'objectName' => '', 27 | 'PartialAttributes' => { 28 | 'objectClass' => ['top', 'OpenLDAProotDSE'] 29 | } 30 | } } 31 | 32 | expect(decode).to eq(test_val) 33 | end 34 | end 35 | 36 | context 'testing invalid ldap response message' do 37 | let(:decode) { filter.decode('303030303030') } 38 | it 'returns error message as expected' do 39 | test_val = { 'Error' => { 40 | 'errorMessage' => 41 | 'FilterDecodeLdapSearchResult - Unable to parse response' } } 42 | expect(decode).to eq(test_val) 43 | end 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /spec/dap/filter/simple_filter_spec.rb: -------------------------------------------------------------------------------- 1 | describe Dap::Filter::FilterCopy do 2 | describe '.process' do 3 | 4 | let(:filter) { described_class.new(["foo=bar"]) } 5 | 6 | context 'copy one json field to another' do 7 | let(:process) { filter.process({"foo" => "bar"}) } 8 | it 'copies and leaves the original field' do 9 | expect(process).to eq([{"foo" => "bar", "bar" => "bar"}]) 10 | end 11 | end 12 | end 13 | end 14 | 15 | describe Dap::Filter::FilterFlatten do 16 | describe '.process' do 17 | 18 | let(:filter) { described_class.new(["foo"]) } 19 | 20 | context 'flatten nested json' do 21 | let(:process) { filter.process({"foo" => {"bar" => "baz"}}) } 22 | it 'has new flattened nested document keys' do 23 | expect(process).to eq([{"foo" => {"bar" => "baz"}, "foo.bar" => "baz"}]) 24 | end 25 | end 26 | 27 | context 'ignore unnested keys' do 28 | let(:process) { filter.process({"foo" => "bar"}) } 29 | it 'is the same as the original document' do 30 | expect(process).to eq([{"foo" => "bar"}]) 31 | end 32 | end 33 | end 34 | end 35 | 36 | describe Dap::Filter::FilterExpand do 37 | describe '.process' do 38 | 39 | let(:filter) { described_class.new(["foo"]) } 40 | 41 | context 'expand unnested json' do 42 | let(:process) { filter.process({"foo.bar" => "baz"}) } 43 | it 'has new expanded keys' do 44 | expect(process).to eq([{"foo" => {"bar" => "baz"}, "foo.bar" => "baz"}]) 45 | end 46 | end 47 | 48 | context 'ignore all but specified unnested json' do 49 | let(:process) { filter.process({"foo.bar" => "baz", "baf.blah" => "baz" }) } 50 | it 'has new expanded keys' do 51 | expect(process).to eq([{"foo" => {"bar" => "baz"}, "foo.bar" => "baz", "baf.blah" => "baz"}]) 52 | end 53 | end 54 | 55 | context 'ignore nested json' do 56 | let(:process) { filter.process({"foo" => "bar"}) } 57 | it 'is the same as the original document' do 58 | expect(process).to eq([{"foo" => "bar"}]) 59 | end 60 | end 61 | end 62 | end 63 | 64 | describe Dap::Filter::FilterRenameSubkeyMatch do 65 | describe '.process' do 66 | 67 | let(:filter) { described_class.new(['foo', '.', '_']) } 68 | 69 | context 'with subkeys' do 70 | let(:process) { filter.process({"foo" => {"bar.one" => "baz", "bar.two" => "baz"}, "foo.bar" => "baz", "bar" => {"bar.one" => "baz", "bar.two" => "baz"}}) } 71 | it 'renames keys as expected' do 72 | expect(process).to eq([{"foo" => {"bar_one" => "baz", "bar_two" => "baz"}, "foo.bar" => "baz", "bar" => {"bar.one" => "baz", "bar.two" => "baz"}}]) 73 | end 74 | end 75 | 76 | context 'without subkeys' do 77 | let(:process) { filter.process({"foo" => "bar", "foo.blah" => "blah", "foo.bar" => "baz"}) } 78 | it 'produces unchanged output without errors' do 79 | expect(process).to eq([{"foo" => "bar", "foo.blah" => "blah", "foo.bar" => "baz"}]) 80 | end 81 | end 82 | end 83 | end 84 | 85 | describe Dap::Filter::FilterMatchRemove do 86 | describe '.process' do 87 | 88 | let(:filter) { described_class.new(["foo."]) } 89 | 90 | context 'with similar keys' do 91 | let(:process) { filter.process({"foo" => "bar", "foo.blah" => "blah", "foo.bar" => "baz"}) } 92 | it 'removes the expected keys' do 93 | expect(process).to eq([{"foo" => "bar"}]) 94 | end 95 | end 96 | end 97 | end 98 | 99 | describe Dap::Filter::FilterMatchSelect do 100 | describe '.process' do 101 | 102 | let(:filter) { described_class.new(["foo."]) } 103 | 104 | context 'with similar keys' do 105 | let(:process) { filter.process({"foo" => "bar", "foo.blah" => "blah", "foo.bar" => "baz"}) } 106 | it 'selects the expected keys' do 107 | expect(process).to eq([{"foo.blah" => "blah", "foo.bar" => "baz"}]) 108 | end 109 | end 110 | end 111 | end 112 | 113 | describe Dap::Filter::FilterSelect do 114 | describe '.process' do 115 | 116 | let(:filter) { described_class.new(["foo"]) } 117 | 118 | context 'with similar keys' do 119 | let(:process) { filter.process({"foo" => "bar", "foobar" => "blah"}) } 120 | it 'selects the expected keys' do 121 | expect(process).to eq([{"foo" => "bar"}]) 122 | end 123 | end 124 | end 125 | end 126 | 127 | describe Dap::Filter::FilterMatchSelectKey do 128 | describe '.process' do 129 | 130 | let(:filter) { described_class.new(["foo."]) } 131 | 132 | context 'with similar keys' do 133 | let(:process) { filter.process({"foo" => "bar", "foo.blah" => "blah", "foo.bar" => "baz"}) } 134 | it 'selects the expected keys' do 135 | expect(process).to eq([{"foo.blah" => "blah", "foo.bar" => "baz"}]) 136 | end 137 | end 138 | end 139 | end 140 | 141 | describe Dap::Filter::FilterMatchSelectValue do 142 | describe '.process' do 143 | 144 | let(:filter) { described_class.new(["ba"]) } 145 | 146 | context 'with similar keys' do 147 | let(:process) { filter.process({"foo" => "bar", "foo.blah" => "blah", "foo.bar" => "baz"}) } 148 | it 'selects the expected keys' do 149 | expect(process).to eq([{"foo" => "bar", "foo.bar" => "baz"}]) 150 | end 151 | end 152 | end 153 | end 154 | 155 | describe Dap::Filter::FilterTransform do 156 | describe '.process' do 157 | 158 | context 'invalid transform' do 159 | let(:filter) { described_class.new(['foo=blahblah']) } 160 | it 'fails' do 161 | expect { filter.process({'foo' => 'abc123'}) }.to raise_error(RuntimeError, /Invalid transform/) 162 | end 163 | end 164 | 165 | context 'reverse' do 166 | let(:filter) { described_class.new(['foo=reverse']) } 167 | 168 | context 'ASCII' do 169 | let(:process) { filter.process({'foo' => 'abc123'}) } 170 | it 'is reversed' do 171 | expect(process).to eq(['foo' => '321cba']) 172 | end 173 | end 174 | 175 | context 'UTF-8' do 176 | let(:process) { filter.process({'foo' => '☹☠'}) } 177 | it 'is reversed' do 178 | expect(process).to eq(['foo' => '☠☹']) 179 | end 180 | end 181 | end 182 | 183 | context 'int default' do 184 | let(:filter) { described_class.new(['val=int']) } 185 | 186 | context 'valid int' do 187 | let(:process) { filter.process({'val' => '1'}) } 188 | it 'is the correct int' do 189 | expect(process).to eq(['val' => 1]) 190 | end 191 | end 192 | 193 | context 'invalid int' do 194 | let(:process) { filter.process({'val' => 'cats'}) } 195 | it 'is the correct int' do 196 | expect(process).to eq(['val' => 0]) 197 | end 198 | end 199 | end 200 | 201 | context 'int different base' do 202 | let(:filter) { described_class.new(['val=int16']) } 203 | let(:process) { filter.process({'val' => 'FF'}) } 204 | 205 | it 'is the correct int' do 206 | expect(process).to eq(['val' => 255]) 207 | end 208 | end 209 | 210 | context 'float' do 211 | let(:filter) { described_class.new(['val=float']) } 212 | 213 | context 'valid float' do 214 | let(:process) { filter.process({'val' => '1.0'}) } 215 | it 'is the correct float' do 216 | expect(process).to eq(['val' => 1.0]) 217 | end 218 | end 219 | 220 | context 'invalid float' do 221 | let(:process) { filter.process({'val' => 'cats.0'}) } 222 | it 'is the correct float' do 223 | expect(process).to eq(['val' => 0.0]) 224 | end 225 | end 226 | end 227 | 228 | context 'json' do 229 | let(:filter) { described_class.new(['val=json']) } 230 | 231 | context 'valid json' do 232 | let(:process) { filter.process({'val' => '{"nested": "1"}'}) } 233 | it 'is the correct JSON' do 234 | expect(process).to eq(['val' => { 'nested' => '1' }]) 235 | end 236 | end 237 | 238 | context 'invalid json' do 239 | it 'raises on invalid JSON' do 240 | expect { filter.process({'val' => '{abc123'}) }.to raise_error(JSON::ParserError) 241 | end 242 | end 243 | end 244 | 245 | context 'stripping' do 246 | context 'lstrip' do 247 | let(:filter) { described_class.new(['foo=lstrip']) } 248 | let(:process) { filter.process({'foo' => ' abc123 '}) } 249 | it 'lstripped' do 250 | expect(process).to eq(['foo' => 'abc123 ']) 251 | end 252 | end 253 | 254 | context 'rstrip' do 255 | let(:filter) { described_class.new(['foo=rstrip']) } 256 | let(:process) { filter.process({'foo' => ' abc123 '}) } 257 | it 'rstripped' do 258 | expect(process).to eq(['foo' => ' abc123']) 259 | end 260 | end 261 | 262 | context 'strip' do 263 | let(:filter) { described_class.new(['foo=strip']) } 264 | let(:process) { filter.process({'foo' => ' abc123 '}) } 265 | it 'stripped' do 266 | expect(process).to eq(['foo' => 'abc123']) 267 | end 268 | end 269 | end 270 | end 271 | end 272 | 273 | describe Dap::Filter::FilterFieldReplace do 274 | describe '.process' do 275 | 276 | let(:filter) { described_class.new(["value1=foo=bar"]) } 277 | 278 | let(:process) { filter.process({"value1" => "foo.bar.foo", "value2" => "secret"}) } 279 | it 'replaced correctly' do 280 | expect(process).to eq([{"value1" => "bar.bar.foo", "value2" => "secret"}]) 281 | end 282 | end 283 | end 284 | 285 | describe Dap::Filter::FilterFieldReplaceAll do 286 | describe '.process' do 287 | 288 | let(:filter) { described_class.new(["value1=foo=bar"]) } 289 | 290 | let(:process) { filter.process({"value1" => "foo.bar.foo", "value2" => "secret"}) } 291 | it 'replaced correctly' do 292 | expect(process).to eq([{"value1" => "bar.bar.bar", "value2" => "secret"}]) 293 | end 294 | end 295 | end 296 | 297 | describe Dap::Filter::FilterFieldSplitPeriod do 298 | describe '.process' do 299 | 300 | let(:filter) { described_class.new(["value"]) } 301 | 302 | context 'splitting on period boundary' do 303 | let(:process) { filter.process({"value" => "foo.bar.baf"}) } 304 | it 'splits correctly' do 305 | expect(process).to eq([{"value" => "foo.bar.baf", "value.f1" => "foo", "value.f2" => "bar", "value.f3" => "baf"}]) 306 | end 307 | end 308 | end 309 | end 310 | 311 | describe Dap::Filter::FilterFieldSplitLine do 312 | describe '.process' do 313 | 314 | let(:filter) { described_class.new(["value"]) } 315 | 316 | context 'splitting on newline boundary' do 317 | let(:process) { filter.process({"value" => "foo\nbar\nbaf"}) } 318 | it 'splits correctly' do 319 | expect(process).to eq([{"value" => "foo\nbar\nbaf", "value.f1" => "foo", "value.f2" => "bar", "value.f3" => "baf"}]) 320 | end 321 | end 322 | end 323 | end 324 | -------------------------------------------------------------------------------- /spec/dap/filter/udp_filter_spec.rb: -------------------------------------------------------------------------------- 1 | describe Dap::Filter::FilterDecodeDNSVersionReply do 2 | describe '.decode' do 3 | 4 | let(:filter) { described_class.new([]) } 5 | 6 | context 'parsing empty string' do 7 | let(:decode) { filter.decode('') } 8 | it 'returns an empty hash' do 9 | expect(decode).to eq( {} ) 10 | end 11 | end 12 | 13 | base64_string = "AF8074UAAAEAAQABAAAHVkVSU0lPTgRCSU5EAAAQAAPADAAQAAMAAAAAACcmOS44LjJyYzEtUmVkSGF0LTkuOC4yLTAuMzcucmMxLmVsNl83LjXADAACAAMAAAAAAALADA==" 14 | test_string = base64_string.to_s.unpack('m*').first 15 | 16 | context 'parsing a partial response' do 17 | let(:decode) { filter.decode(test_string[2..10]) } 18 | it 'returns an empty hash' do 19 | expect(decode).to eq( {} ) 20 | end 21 | end 22 | 23 | context 'parsing TCP DNS response' do 24 | let(:decode) { filter.decode(test_string) } 25 | it 'returns the correct version' do 26 | expect(decode).to eq({ 'dns_version' => '9.8.2rc1-RedHat-9.8.2-0.37.rc1.el6_7.5' }) 27 | end 28 | end 29 | 30 | # strip the first two bytes from the TCP response to mimic a UDP response 31 | context 'parsing UDP DNS response' do 32 | let(:decode) { filter.decode(test_string[2..-1]) } 33 | it 'returns the correct version' do 34 | expect(decode).to eq({ 'dns_version' => '9.8.2rc1-RedHat-9.8.2-0.37.rc1.el6_7.5' }) 35 | end 36 | end 37 | 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /spec/dap/input/json_spec.rb: -------------------------------------------------------------------------------- 1 | describe Dap::Input::InputJSON do 2 | describe '.read_record' do 3 | context 'decoding input json' do 4 | let(:file_object) { double("fake file") } 5 | let(:input) { described_class.new(['data']) } 6 | let(:record) { input.read_record } 7 | it 'parses values starting with a colon (:) as a string' do 8 | allow(File).to receive(:open).with('data', 'rb').and_return(file_object) 9 | allow(file_object).to receive(:readline).and_return('{"a": ":b"}') 10 | expect(record['a']).to eq(":b") 11 | end 12 | end 13 | end 14 | end -------------------------------------------------------------------------------- /spec/dap/proto/ipmi_spec.rb: -------------------------------------------------------------------------------- 1 | describe Dap::Proto::IPMI::Channel_Auth_Reply do 2 | describe '.valid?' do 3 | 4 | context 'testing with valid rmcp version and message length' do 5 | it 'returns true as expected' do 6 | expect(described_class.new(rmcp_version: 6).valid?).to be false 7 | expect(described_class.new(message_length: 16).valid?).to be false 8 | expect(described_class.new(rmcp_version: 6, message_length: 16).valid?).to be true 9 | end 10 | end 11 | 12 | context 'testing with invalid data' do 13 | let(:reply) { described_class.new } 14 | 15 | it 'returns false as expected' do 16 | expect(reply.valid?).to be false 17 | end 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /spec/dap/proto/ldap_proto_spec.rb: -------------------------------------------------------------------------------- 1 | describe Dap::Proto::LDAP do 2 | subject { described_class } 3 | 4 | describe '.decode_elem_length' do 5 | context 'testing lengths shorter than 128 bits' do 6 | data = ['301402'].pack('H*') 7 | 8 | let(:decode_len) { subject.decode_elem_length(data) } 9 | it 'returns a Fixnum' do 10 | expect(decode_len.class).to eq(::Integer) 11 | end 12 | it 'returns value correctly' do 13 | expect(decode_len).to eq(22) 14 | end 15 | end 16 | 17 | context 'testing lengths greater than 128 bits' do 18 | data = ['308400000bc102010'].pack('H*') 19 | 20 | let(:decode_len) { subject.decode_elem_length(data) } 21 | it 'returns a Fixnum' do 22 | expect(decode_len.class).to eq(::Integer) 23 | end 24 | it 'returns value correctly' do 25 | expect(decode_len).to eq(3015) 26 | end 27 | end 28 | 29 | context 'testing with 3 byte length' do 30 | data = ['3083015e0802010764'].pack('H*') 31 | 32 | let(:decode_len) { subject.decode_elem_length(data) } 33 | it 'returns a Fixnum' do 34 | expect(decode_len.class).to eq(::Integer) 35 | end 36 | it 'returns value correctly' do 37 | expect(decode_len).to eq(89613) 38 | end 39 | end 40 | 41 | context 'testing invalid length' do 42 | data = ['308400000bc1'].pack('H*') 43 | 44 | let(:decode_len) { subject.decode_elem_length(data) } 45 | it 'returns nil as expected' do 46 | expect(decode_len).to eq(nil) 47 | end 48 | 49 | end 50 | end 51 | 52 | describe '.split_messages' do 53 | 54 | original = ['3030020107642b040030273025040b6f626a656374436c61'\ 55 | '737331160403746f70040f4f70656e4c444150726f6f7444'\ 56 | '5345300c02010765070a010004000400'] 57 | 58 | data = original.pack('H*') 59 | 60 | excessive_len = ['308480010000000000000000'].pack('H*') 61 | 62 | entry = ['3030020107642b040030273025040b6f626a656374436c6173'\ 63 | '7331160403746f70040f4f70656e4c444150726f6f74445345'] 64 | 65 | done = ['300c02010765070a010004000400'] 66 | 67 | context 'testing full message' do 68 | let(:split_messages) { subject.split_messages(data) } 69 | it 'returns Array as expected' do 70 | expect(split_messages.class).to eq(::Array) 71 | end 72 | 73 | it 'returns SearchResultEntry value as expected' do 74 | expect(split_messages[0].unpack('H*')).to eq(entry) 75 | end 76 | 77 | it 'returns SearchResultDone value as expected' do 78 | expect(split_messages[1].unpack('H*')).to eq(done) 79 | end 80 | end 81 | 82 | context 'testing invalid message' do 83 | let(:split_messages) { subject.split_messages('FF') } 84 | it 'returns Array as expected' do 85 | expect(split_messages.class).to eq(::Array) 86 | end 87 | end 88 | 89 | context 'testing short message' do 90 | let(:split_messages) { subject.split_messages('00') } 91 | it 'returns Array as expected' do 92 | expect(split_messages.class).to eq(::Array) 93 | end 94 | end 95 | 96 | context 'testing message length greater than total data length' do 97 | let(:split_messages) { subject.split_messages(excessive_len) } 98 | it 'returns Array as expected' do 99 | expect(split_messages.class).to eq(::Array) 100 | end 101 | 102 | it 'returns empty Array as expected' do 103 | expect(split_messages).to eq([]) 104 | end 105 | end 106 | 107 | context 'testing empty ASN.1 Sequence' do 108 | hex = ['308400000000'] 109 | empty_seq = hex.pack('H*') 110 | 111 | let(:split_messages) { subject.split_messages(empty_seq) } 112 | it 'returns Array as expected' do 113 | expect(split_messages.class).to eq(::Array) 114 | end 115 | 116 | it 'returns empty Array as expected' do 117 | expect(split_messages).to eq([]) 118 | end 119 | end 120 | end 121 | 122 | describe '.parse_ldapresult' do 123 | 124 | context 'testing valid data' do 125 | hex = ['300c02010765070a010004000400'] 126 | data = OpenSSL::ASN1.decode(hex.pack('H*')) 127 | 128 | let(:parse_ldapresult) { subject.parse_ldapresult(data.value[1]) } 129 | it 'returns Hash as expected' do 130 | expect(parse_ldapresult.class).to eq(::Hash) 131 | end 132 | 133 | it 'returns results as expected' do 134 | test_val = { 'resultCode' => 0, 135 | 'resultDesc' => 'success', 136 | 'resultMatchedDN' => '', 137 | 'resultdiagMessage' => '' 138 | } 139 | expect(parse_ldapresult).to eq(test_val) 140 | end 141 | end 142 | 143 | context 'testing invalid data' do 144 | hex = ['300702010765020400'] 145 | data = OpenSSL::ASN1.decode(hex.pack('H*')) 146 | 147 | let(:parse_ldapresult) { subject.parse_ldapresult(data.value[1]) } 148 | it 'returns Hash as expected' do 149 | expect(parse_ldapresult.class).to eq(::Hash) 150 | end 151 | 152 | it 'returns empty Hash as expected' do 153 | test_val = {} 154 | expect(parse_ldapresult).to eq(test_val) 155 | end 156 | end 157 | 158 | end 159 | 160 | describe '.parse_messages' do 161 | 162 | context 'testing SearchResultEntry' do 163 | hex = ['3030020107642b040030273025040b6f626a656374436c6173'\ 164 | '7331160403746f70040f4f70656e4c444150726f6f74445345'] 165 | data = OpenSSL::ASN1.decode(hex.pack('H*')) 166 | 167 | let(:parse_message) { subject.parse_message(data) } 168 | it 'returns Array as expected' do 169 | expect(parse_message.class).to eq(::Array) 170 | end 171 | 172 | it 'returns SearchResultEntry value as expected' do 173 | test_val = ['SearchResultEntry', { 174 | 'objectName' => '', 175 | 'PartialAttributes' => { 176 | 'objectClass' => [ 177 | 'top', 178 | 'OpenLDAProotDSE' 179 | ] 180 | } 181 | }] 182 | expect(parse_message).to eq(test_val) 183 | end 184 | end 185 | 186 | context 'testing SearchResultDone' do 187 | hex = ['300c02010765070a010004000400'] 188 | data = OpenSSL::ASN1.decode(hex.pack('H*')) 189 | 190 | let(:parse_message) { subject.parse_message(data) } 191 | it 'returns Array as expected' do 192 | expect(parse_message.class).to eq(::Array) 193 | end 194 | 195 | it 'returns SearchResultDone value as expected' do 196 | test_val = ['SearchResultDone', { 197 | 'resultCode' => 0, 198 | 'resultDesc' => 'success', 199 | 'resultMatchedDN' => '', 200 | 'resultdiagMessage' => '' 201 | }] 202 | expect(parse_message).to eq(test_val) 203 | end 204 | end 205 | 206 | context 'testing SearchResultDone - edge case #1' do 207 | hex = ['300802010765000a0101'] 208 | data = OpenSSL::ASN1.decode(hex.pack('H*')) 209 | 210 | let(:parse_message) { subject.parse_message(data) } 211 | it 'returns Array as expected' do 212 | expect(parse_message.class).to eq(::Array) 213 | end 214 | 215 | it 'returns operationsError as expected' do 216 | test_val = ['SearchResultDone', { 217 | 'resultCode' => 1, 218 | 'resultDesc' => 'operationsError' 219 | }] 220 | expect(parse_message).to eq(test_val) 221 | end 222 | end 223 | 224 | context 'testing UnhandledTag' do 225 | hex = ['300c02010767070a010004000400'] 226 | data = OpenSSL::ASN1.decode(hex.pack('H*')) 227 | 228 | let(:parse_message) { subject.parse_message(data) } 229 | it 'returns Array as expected' do 230 | expect(parse_message.class).to eq(::Array) 231 | end 232 | 233 | it 'returns UnhandledTag value as expected' do 234 | test_val = ['UnhandledTag', { 'tagNumber' => 7 }] 235 | expect(parse_message).to eq(test_val) 236 | end 237 | end 238 | 239 | context 'testing empty ASN.1 Sequence' do 240 | 241 | data = OpenSSL::ASN1::Sequence.new([]) 242 | 243 | let(:parse_message) { subject.parse_message(data) } 244 | it 'returns Array as expected' do 245 | expect(parse_message.class).to eq(::Array) 246 | end 247 | 248 | it 'returns error value as expected' do 249 | test_val = ['Error', { 250 | 'errorMessage' => 251 | 'parse_message: Invalid LDAP response (Empty Sequence)' 252 | }] 253 | expect(parse_message).to eq(test_val) 254 | end 255 | end 256 | 257 | end 258 | 259 | end 260 | -------------------------------------------------------------------------------- /spec/dap/utils/misc_spec.rb: -------------------------------------------------------------------------------- 1 | describe Dap::Utils::Misc do 2 | describe '.flatten_hash' do 3 | context 'with mixed nested data' do 4 | let(:test_hash) { {"foo0": "bar0", "foo1": {"bar1": "stuff", "more": 1}, "foo2": {"bar2": "stuff", "more": 1, "morestuff": {"foo1": "thing1"}}} } 5 | let(:expected_flat) { {'foo0'=>'bar0', 'foo1.bar1'=>'stuff', 'foo1.more'=>'1', 'foo2.bar2'=>'stuff', 'foo2.more'=>'1', 'foo2.morestuff.foo1'=>'thing1'} } 6 | let(:actual_flat) { Dap::Utils::Misc.flatten_hash(test_hash) } 7 | it 'flattens properly' do 8 | expect(actual_flat).to eq(expected_flat) 9 | end 10 | end 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'dap' 2 | 3 | # This file was generated by the `rspec --init` command. Conventionally, all 4 | # specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`. 5 | # The generated `.rspec` file contains `--require spec_helper` which will cause this 6 | # file to always be loaded, without a need to explicitly require it in any files. 7 | # 8 | # Given that it is always loaded, you are encouraged to keep this file as 9 | # light-weight as possible. Requiring heavyweight dependencies from this file 10 | # will add to the boot time of your test suite on EVERY test run, even for an 11 | # individual file that may not need all of that loaded. Instead, consider making 12 | # a separate helper file that requires the additional dependencies and performs 13 | # the additional setup, and require it from the spec files that actually need it. 14 | # 15 | # The `.rspec` file also contains a few flags that are not defaults but that 16 | # users commonly want. 17 | # 18 | # See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration 19 | RSpec.configure do |config| 20 | # rspec-expectations config goes here. You can use an alternate 21 | # assertion/expectation library such as wrong or the stdlib/minitest 22 | # assertions if you prefer. 23 | config.expect_with :rspec do |expectations| 24 | # This option will default to `true` in RSpec 4. It makes the `description` 25 | # and `failure_message` of custom matchers include text for helper methods 26 | # defined using `chain`, e.g.: 27 | # be_bigger_than(2).and_smaller_than(4).description 28 | # # => "be bigger than 2 and smaller than 4" 29 | # ...rather than: 30 | # # => "be bigger than 2" 31 | expectations.include_chain_clauses_in_custom_matcher_descriptions = true 32 | end 33 | 34 | # rspec-mocks config goes here. You can use an alternate test double 35 | # library (such as bogus or mocha) by changing the `mock_with` option here. 36 | config.mock_with :rspec do |mocks| 37 | # Prevents you from mocking or stubbing a method that does not exist on 38 | # a real object. This is generally recommended, and will default to 39 | # `true` in RSpec 4. 40 | mocks.verify_partial_doubles = true 41 | end 42 | 43 | # The settings below are suggested to provide a good initial experience 44 | # with RSpec, but feel free to customize to your heart's content. 45 | =begin 46 | # These two settings work together to allow you to limit a spec run 47 | # to individual examples or groups you care about by tagging them with 48 | # `:focus` metadata. When nothing is tagged with `:focus`, all examples 49 | # get run. 50 | config.filter_run :focus 51 | config.run_all_when_everything_filtered = true 52 | 53 | # Limits the available syntax to the non-monkey patched syntax that is recommended. 54 | # For more details, see: 55 | # - http://myronmars.to/n/dev-blog/2012/06/rspecs-new-expectation-syntax 56 | # - http://teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/ 57 | # - http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3#new__config_option_to_disable_rspeccore_monkey_patching 58 | config.disable_monkey_patching! 59 | 60 | # This setting enables warnings. It's recommended, but in some cases may 61 | # be too noisy due to issues in dependencies. 62 | config.warnings = true 63 | 64 | # Many RSpec users commonly either run the entire suite or an individual 65 | # file, and it's useful to allow more verbose output when running an 66 | # individual spec file. 67 | if config.files_to_run.one? 68 | # Use the documentation formatter for detailed output, 69 | # unless a formatter has already been configured 70 | # (e.g. via a command-line flag). 71 | config.default_formatter = 'doc' 72 | end 73 | 74 | # Print the 10 slowest examples and example groups at the 75 | # end of the spec run, to help surface which specs are running 76 | # particularly slow. 77 | config.profile_examples = 10 78 | 79 | # Run specs in random order to surface order dependencies. If you find an 80 | # order dependency and want to debug it, you can fix the order by providing 81 | # the seed, which is printed after each run. 82 | # --seed 1234 83 | config.order = :random 84 | 85 | # Seed global randomization in this process using the `--seed` CLI option. 86 | # Setting this allows you to use `--seed` to deterministically reproduce 87 | # test failures related to randomization by passing the same `--seed` value 88 | # as the one that triggered the failure. 89 | Kernel.srand config.seed 90 | =end 91 | end 92 | -------------------------------------------------------------------------------- /test/filters.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load ./test_common 4 | 5 | @test "rename" { 6 | run bash -c 'echo world | $DAP_EXECUTABLE lines + rename line=hello + json' 7 | assert_success 8 | assert_output '{"hello":"world"}' 9 | } 10 | 11 | @test "not_exists" { 12 | run bash -c "echo '{\"foo\":\"bar\"}' | $DAP_EXECUTABLE json + not_exists foo + json" 13 | assert_success 14 | assert_output '' 15 | run bash -c "echo '{\"bar\":\"bar\"}' | $DAP_EXECUTABLE json + not_exists foo + json" 16 | assert_success 17 | assert_output '{"bar":"bar"}' 18 | } 19 | 20 | @test "split_comma" { 21 | run bash -c "echo '{\"foo\":\"bar,baz\"}' | $DAP_EXECUTABLE json + split_comma foo + json | jq -Sc ." 22 | assert_success 23 | assert_line --index 0 '{"foo":"bar,baz","foo.word":"bar"}' 24 | assert_line --index 1 '{"foo":"bar,baz","foo.word":"baz"}' 25 | } 26 | 27 | @test "field_split_line" { 28 | run bash -c "echo '{\"foo\":\"bar\nbaz\"}' | $DAP_EXECUTABLE json + field_split_line foo + json | jq -Sc ." 29 | assert_success 30 | assert_output '{"foo":"bar\nbaz","foo.f1":"bar","foo.f2":"baz"}' 31 | } 32 | 33 | @test "not_empty" { 34 | # only exists in godap currently 35 | skip 36 | run bash -c "echo '{\"foo\":\"bar,baz\"}' | $DAP_EXECUTABLE json + not_empty foo + json | jq -Sc ." 37 | assert_success 38 | assert_output '{"foo":"bar,baz"}' 39 | } 40 | 41 | @test "field_split_tab" { 42 | run bash -c "echo '{\"foo\":\"bar\tbaz\"}' | $DAP_EXECUTABLE json + field_split_tab foo + json | jq -Sc ." 43 | assert_success 44 | assert_output '{"foo":"bar\tbaz","foo.f1":"bar","foo.f2":"baz"}' 45 | } 46 | 47 | @test "truncate" { 48 | run bash -c "echo '{\"foo\":\"bar\tbaz\"}' | $DAP_EXECUTABLE json + truncate foo + json | jq -Sc ." 49 | assert_success 50 | assert_output '{"foo":""}' 51 | } 52 | 53 | @test "insert" { 54 | run bash -c "echo '{\"foo\":\"bar\tbaz\"}' | $DAP_EXECUTABLE json + insert a=b + json | jq -Sc ." 55 | assert_success 56 | assert_output '{"a":"b","foo":"bar\tbaz"}' 57 | } 58 | 59 | @test "field_split_array" { 60 | run bash -c "echo '{\"foo\":[\"a\",2]}' | $DAP_EXECUTABLE json + field_split_array foo + json | jq -Sc ." 61 | assert_success 62 | assert_output '{"foo":["a",2],"foo.f1":"a","foo.f2":2}' 63 | } 64 | 65 | @test "exists" { 66 | run bash -c "echo '{\"foo\":\"bar\tbaz\"}' | $DAP_EXECUTABLE json + exists a + json | jq -Sc ." 67 | assert_success 68 | assert_output '' 69 | run bash -c "echo '{\"foo\":\"bar\tbaz\"}' | $DAP_EXECUTABLE json + exists foo + json | jq -Sc ." 70 | assert_success 71 | assert_output '{"foo":"bar\tbaz"}' 72 | } 73 | 74 | @test "split_line" { 75 | run bash -c "echo '{\"foo\":\"bar\nbaz\"}' | $DAP_EXECUTABLE json + split_line foo + json | jq -Sc ." 76 | assert_success 77 | assert_line --index 0 '{"foo":"bar\nbaz","foo.line":"bar"}' 78 | assert_line --index 1 '{"foo":"bar\nbaz","foo.line":"baz"}' 79 | } 80 | 81 | @test "select" { 82 | run bash -c "echo '{\"foo\":\"bar\", \"baz\":\"qux\", \"a\":\"b\"}' | $DAP_EXECUTABLE json + select foo + json | jq -Sc ." 83 | assert_success 84 | assert_output '{"foo":"bar"}' 85 | run bash -c "echo '{\"foo\":\"bar\", \"baz\":\"qux\", \"a\":\"b\"}' | $DAP_EXECUTABLE json + select foo baz + json | jq -Sc ." 86 | assert_success 87 | assert_output '{"baz":"qux","foo":"bar"}' 88 | } 89 | 90 | @test "remove" { 91 | run bash -c "echo '{\"foo\":\"bar\", \"baz\":\"qux\", \"a\":\"b\"}' | $DAP_EXECUTABLE json + remove foo baz + json | jq -Sc ." 92 | assert_success 93 | assert_output '{"a":"b"}' 94 | } 95 | 96 | @test "include" { 97 | run bash -c "echo '{\"foo\":\"bar\", \"baz\":\"qux\", \"a\":\"b\"}' | $DAP_EXECUTABLE json + include a=c + json | jq -Sc ." 98 | assert_success 99 | assert_output '' 100 | run bash -c "echo '{\"foo\":\"bar\", \"baz\":\"qux\", \"a\":\"b\"}' | $DAP_EXECUTABLE json + include a=b + json | jq -Sc ." 101 | assert_success 102 | assert_output '{"a":"b","baz":"qux","foo":"bar"}' 103 | } 104 | 105 | @test "transform" { 106 | run bash -c "echo '{\"foo\":\"bar\"}' | $DAP_EXECUTABLE json + transform foo=base64encode + json | jq -Sc ." 107 | assert_success 108 | assert_output '{"foo":"YmFy"}' 109 | } 110 | 111 | @test "recog_match" { 112 | run bash -c "echo '9.8.2rc1-RedHat-9.8.2-0.62.rc1.el6_9.2' | $DAP_EXECUTABLE lines + recog line=dns.versionbind + json | jq -Sc ." 113 | assert_success 114 | assert_output '{"line":"9.8.2rc1-RedHat-9.8.2-0.62.rc1.el6_9.2","line.recog.fingerprint_db":"dns.versionbind","line.recog.matched":"ISC BIND: Red Hat Enterprise Linux","line.recog.os.cpe23":"cpe:/o:redhat:enterprise_linux:6","line.recog.os.family":"Linux","line.recog.os.product":"Enterprise Linux","line.recog.os.vendor":"Red Hat","line.recog.os.version":"6","line.recog.os.version.version":"9","line.recog.service.cpe23":"cpe:/a:isc:bind:9.8.2rc1","line.recog.service.family":"BIND","line.recog.service.product":"BIND","line.recog.service.protocol":"dns","line.recog.service.vendor":"ISC","line.recog.service.version":"9.8.2rc1"}' 115 | } 116 | 117 | @test "recog_nomatch" { 118 | run bash -c "echo 'should not match' | $DAP_EXECUTABLE lines + recog line=dns.versionbind + json | jq -Sc ." 119 | assert_success 120 | assert_output '{"line":"should not match"}' 121 | } 122 | 123 | @test "recog_invalid_arg" { 124 | # currently fails in dap, passes in godap 125 | skip 126 | run bash -c "echo 'test' | $DAP_EXECUTABLE lines + recog + json" 127 | assert_failure 128 | } 129 | 130 | @test "geo_ip yields valid fields" { 131 | run bash -c "echo 66.92.181.240 | GEOIP_CITY_DATABASE_PATH=./test/test_data/geoip/GeoIPCity.dat $DAP_EXECUTABLE lines + geo_ip line + json | jq -Sc ." 132 | assert_success 133 | assert_output '{"line":"66.92.181.240","line.area_code":"510","line.city":"Fremont","line.country_code":"US","line.country_code3":"USA","line.country_name":"United States","line.dma_code":"807","line.latitude":"37.50790023803711","line.longitude":"-121.95999908447266","line.postal_code":"94538","line.region":"CA","line.region_name":"California"}' 134 | } 135 | 136 | @test "geo_ip_org yields valid fields" { 137 | run bash -c "echo 12.87.118.0 | GEOIP_ORG_DATABASE_PATH=./test/test_data/geoip/GeoIPOrg.dat $DAP_EXECUTABLE lines + geo_ip_org line + json | jq -Sc -r ." 138 | assert_success 139 | assert_output '{"line":"12.87.118.0","line.org":"AT&T Worldnet Services"}' 140 | } 141 | 142 | @test "geo_ip_asn" { 143 | run bash -c "echo 12.87.118.0 | GEOIP_ASN_DATABASE_PATH=./test/test_data/geoip/GeoIPASNum.dat $DAP_EXECUTABLE lines + geo_ip_asn line + json | jq -Sc -r ." 144 | assert_success 145 | assert_output '{"line":"12.87.118.0","line.asn":"AS7018"}' 146 | } 147 | 148 | @test "geo_ip2_city" { 149 | # test with default language 150 | run bash -c "echo 81.2.69.142 | GEOIP2_CITY_DATABASE_PATH=test/test_data/geoip2/GeoIP2-City-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_city line + json | jq -Sc -r ." 151 | assert_success 152 | assert_output '{"line":"81.2.69.142","line.geoip2.city.city.geoname_id":"2643743","line.geoip2.city.city.name":"London","line.geoip2.city.continent.code":"EU","line.geoip2.city.continent.geoname_id":"6255148","line.geoip2.city.continent.name":"Europe","line.geoip2.city.country.geoname_id":"2635167","line.geoip2.city.country.is_in_european_union":"true","line.geoip2.city.country.iso_code":"GB","line.geoip2.city.country.name":"United Kingdom","line.geoip2.city.location.accuracy_radius":"10","line.geoip2.city.location.latitude":"51.5142","line.geoip2.city.location.longitude":"-0.0931","line.geoip2.city.location.metro_code":"0","line.geoip2.city.location.time_zone":"Europe/London","line.geoip2.city.registered_country.geoname_id":"6252001","line.geoip2.city.registered_country.is_in_european_union":"false","line.geoip2.city.registered_country.iso_code":"US","line.geoip2.city.registered_country.name":"United States","line.geoip2.city.represented_country.geoname_id":"0","line.geoip2.city.represented_country.is_in_european_union":"false","line.geoip2.city.subdivisions.0.geoname_id":"6269131","line.geoip2.city.subdivisions.0.iso_code":"ENG","line.geoip2.city.subdivisions.0.name":"England","line.geoip2.city.subdivisions.length":"1","line.geoip2.city.traits.is_anonymous_proxy":"false","line.geoip2.city.traits.is_satellite_provider":"false"}' 153 | 154 | # test with non-default language 155 | run bash -c "echo 67.43.156.0 | GEOIP2_CITY_DATABASE_PATH=test/test_data/geoip2/GeoIP2-City-Test.mmdb GEOIP2_LANGUAGE=fr $DAP_EXECUTABLE lines + geo_ip2_city line + json | jq -Sc -r ." 156 | assert_success 157 | assert_output '{"line":"67.43.156.0","line.geoip2.city.city.geoname_id":"0","line.geoip2.city.continent.code":"AS","line.geoip2.city.continent.geoname_id":"6255147","line.geoip2.city.continent.name":"Asie","line.geoip2.city.country.geoname_id":"1252634","line.geoip2.city.country.is_in_european_union":"false","line.geoip2.city.country.iso_code":"BT","line.geoip2.city.country.name":"Bhutan","line.geoip2.city.location.accuracy_radius":"534","line.geoip2.city.location.latitude":"27.5","line.geoip2.city.location.longitude":"90.5","line.geoip2.city.location.metro_code":"0","line.geoip2.city.location.time_zone":"Asia/Thimphu","line.geoip2.city.registered_country.geoname_id":"798549","line.geoip2.city.registered_country.is_in_european_union":"true","line.geoip2.city.registered_country.iso_code":"RO","line.geoip2.city.registered_country.name":"Roumanie","line.geoip2.city.represented_country.geoname_id":"0","line.geoip2.city.represented_country.is_in_european_union":"false","line.geoip2.city.traits.is_anonymous_proxy":"true","line.geoip2.city.traits.is_satellite_provider":"false"}' 158 | 159 | # test IPv6 160 | run bash -c "echo 2a02:d9c0:: | GEOIP2_CITY_DATABASE_PATH=test/test_data/geoip2/GeoIP2-City-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_city line + json | jq -Sc -r ." 161 | assert_success 162 | assert_output '{"line":"2a02:d9c0::","line.geoip2.city.city.geoname_id":"0","line.geoip2.city.continent.code":"AS","line.geoip2.city.continent.geoname_id":"6255147","line.geoip2.city.continent.name":"Asia","line.geoip2.city.country.geoname_id":"298795","line.geoip2.city.country.is_in_european_union":"false","line.geoip2.city.country.iso_code":"TR","line.geoip2.city.country.name":"Turkey","line.geoip2.city.location.accuracy_radius":"100","line.geoip2.city.location.latitude":"39.05901","line.geoip2.city.location.longitude":"34.91155","line.geoip2.city.location.metro_code":"0","line.geoip2.city.location.time_zone":"Europe/Istanbul","line.geoip2.city.registered_country.geoname_id":"298795","line.geoip2.city.registered_country.is_in_european_union":"false","line.geoip2.city.registered_country.iso_code":"TR","line.geoip2.city.registered_country.name":"Turkey","line.geoip2.city.represented_country.geoname_id":"0","line.geoip2.city.represented_country.is_in_european_union":"false","line.geoip2.city.traits.is_anonymous_proxy":"false","line.geoip2.city.traits.is_satellite_provider":"false"}' 163 | 164 | # test invalid IP 165 | run bash -c "echo test | GEOIP2_CITY_DATABASE_PATH=test/test_data/geoip2/GeoIP2-City-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_city line + json | jq -Sc -r ." 166 | assert_success 167 | assert_output '{"line":"test"}' 168 | } 169 | 170 | @test "geo_ip2_asn" { 171 | run bash -c "echo 12.81.92.0 | GEOIP2_ASN_DATABASE_PATH=test/test_data/geoip2/GeoLite2-ASN-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_asn line + json | jq -Sc -r ." 172 | assert_success 173 | assert_output '{"line":"12.81.92.0","line.geoip2.asn.asn":"AS7018","line.geoip2.asn.asn_org":"AT&T Services"}' 174 | 175 | # test IPv6 176 | run bash -c "echo 2600:7000:: | GEOIP2_ASN_DATABASE_PATH=test/test_data/geoip2/GeoLite2-ASN-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_asn line + json | jq -Sc -r ." 177 | assert_success 178 | assert_output '{"line":"2600:7000::","line.geoip2.asn.asn":"AS6939","line.geoip2.asn.asn_org":"Hurricane Electric, Inc."}' 179 | 180 | # test invalid IP 181 | run bash -c "echo test | GEOIP2_ASN_DATABASE_PATH=test/test_data/geoip2/GeoLite2-ASN-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_asn line + json | jq -Sc -r ." 182 | assert_success 183 | assert_output '{"line":"test"}' 184 | } 185 | 186 | @test "geo_ip2_isp" { 187 | run bash -c "echo -e '12.81.92.0\n2600:7000::\ntest' | GEOIP2_ISP_DATABASE_PATH=test/test_data/geoip2/GeoIP2-ISP-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_isp line + json | jq -Sc -r ." 188 | assert_line --index 0 '{"line":"12.81.92.0","line.geoip2.isp.asn":"AS7018","line.geoip2.isp.isp":"AT&T Services","line.geoip2.isp.org":"AT&T Services"}' 189 | # test IPv6 190 | assert_line --index 1 '{"line":"2600:7000::","line.geoip2.isp.asn":"AS6939","line.geoip2.isp.asn_org":"Hurricane Electric, Inc."}' 191 | # test invalid IP 192 | assert_line --index 2 '{"line":"test"}' 193 | } 194 | 195 | @test "geo_ip2_legacy_compat" { 196 | run bash -c "echo -e '81.2.69.142\n12.81.92.0\n2a02:d9c0::\n2a01:1000::' | GEOIP2_ASN_DATABASE_PATH=test/test_data/geoip2/GeoLite2-ASN-Test.mmdb GEOIP2_CITY_DATABASE_PATH=test/test_data/geoip2/GeoIP2-City-Test.mmdb GEOIP2_ISP_DATABASE_PATH=test/test_data/geoip2/GeoIP2-ISP-Test.mmdb $DAP_EXECUTABLE lines + geo_ip2_city line + geo_ip2_asn line + geo_ip2_isp line + geo_ip2_legacy_compat line + match_remove line.geoip2 + json | jq -Sc -r ." 197 | assert_success 198 | # this one only has city data, not ASN/org/ISP 199 | assert_line --index 0 '{"line":"81.2.69.142","line.city":"London","line.country_code":"GB","line.country_name":"United Kingdom","line.latitude":"51.5142","line.longitude":"-0.0931","line.region":"ENG","line.region_name":"England"}' 200 | # this one has ASN/org data in the test databases but none in the city DB 201 | assert_line --index 1 '{"line":"12.81.92.0","line.asn":"AS7018","line.org":"AT&T Services"}' 202 | # exists only city 203 | assert_line --index 2 '{"line":"2a02:d9c0::","line.country_code":"TR","line.country_name":"Turkey","line.latitude":"39.05901","line.longitude":"34.91155"}' 204 | # exists in ISP 205 | assert_line --index 3 '{"line":"2a01:1000::","line.asn":"AS5617","line.org":"Telekomunikacja Polska S.A."}' 206 | 207 | run bash -c "echo '{\"ip\": \"4.2.2.1\", \"something_empty\": \"\", \"some_int\": 80}' | GEOIP2_CITY_DATABASE_PATH=test/test_data/geoip2/GeoIP2-City-Test.mmdb dap json + geo_ip2_city ip + geo_ip2_legacy_compat ip + match_remove ip. + json" 208 | assert_success 209 | assert_output '{"ip":"4.2.2.1","something_empty":"","some_int":80}' 210 | } 211 | -------------------------------------------------------------------------------- /test/inputs.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load ./test_common 4 | 5 | @test "reads json" { 6 | run bash -c 'echo "{\"foo\": 1 }" | dap json + json' 7 | assert_success 8 | assert_output '{"foo":1}' 9 | } 10 | 11 | @test "reads lines" { 12 | run bash -c 'echo hello world | dap lines + json' 13 | assert_success 14 | assert_output '{"line":"hello world"}' 15 | } 16 | -------------------------------------------------------------------------------- /test/test_common.bash: -------------------------------------------------------------------------------- 1 | TEST_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 2 | 3 | load "${TEST_DIR}/test_helper/bats-support/load.bash" 4 | load "${TEST_DIR}/test_helper/bats-assert/load.bash" 5 | 6 | function setup_workdir() { 7 | WORK_DIR=`mktemp -d /tmp/output.XXXXXX` 8 | } 9 | 10 | function teardown_workdir() { 11 | cd 12 | if [ -z "${DISABLE_BATS_TEARDOWN}" ]; then 13 | test -d $WORK_DIR && rm -Rf $WORK_DIR 14 | fi 15 | } 16 | 17 | function setup() { 18 | setup_workdir 19 | } 20 | 21 | function teardown() { 22 | teardown_workdir 23 | } 24 | 25 | -------------------------------------------------------------------------------- /test/test_data/geoip/GeoIPASNum.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/test/test_data/geoip/GeoIPASNum.dat -------------------------------------------------------------------------------- /test/test_data/geoip/GeoIPCity.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/test/test_data/geoip/GeoIPCity.dat -------------------------------------------------------------------------------- /test/test_data/geoip/GeoIPOrg.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/test/test_data/geoip/GeoIPOrg.dat -------------------------------------------------------------------------------- /test/test_data/geoip2/GeoIP2-City-Test.mmdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/test/test_data/geoip2/GeoIP2-City-Test.mmdb -------------------------------------------------------------------------------- /test/test_data/geoip2/GeoIP2-ISP-Test.mmdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/test/test_data/geoip2/GeoIP2-ISP-Test.mmdb -------------------------------------------------------------------------------- /test/test_data/geoip2/GeoLite2-ASN-Test.mmdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rapid7/dap/3305e99d505289f884c9f4d0c0fb9af66a2af1e2/test/test_data/geoip2/GeoLite2-ASN-Test.mmdb -------------------------------------------------------------------------------- /tools/geo-ip-summary.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | require 'oj' 3 | require 'optparse' 4 | 5 | class GeoIPSummary 6 | attr_accessor :country_name, :region_name, :city_name, :tree 7 | # 8 | # Pass the hash keys for the country name, region name and 9 | # city name that we'll encounter during the process_hash function. 10 | # 11 | def initialize(country_name, region_name, city_name) 12 | @country_name = country_name 13 | @region_name = region_name 14 | @city_name = city_name 15 | @tree = {} 16 | @tree['count'] = 0 17 | end 18 | 19 | def stringify(o) 20 | if o.kind_of?( ::String ) 21 | o.to_s.encode(o.encoding, "UTF-8", :invalid => :replace, :undef => :replace, :replace => '') 22 | else 23 | o.to_s 24 | end 25 | end 26 | 27 | def process_hash( json_hash ) 28 | country = stringify( json_hash[@country_name] ) 29 | region = stringify( json_hash[@region_name] || 'Undefined Region' ) 30 | city = stringify( json_hash[@city_name] || 'Undefined City' ) 31 | 32 | # Create subhashes and values as needed on down the tree 33 | @tree[country] ||= {} 34 | @tree[country]['count'] ||=0 35 | @tree[country][region] ||= {} 36 | @tree[country][region]['count'] ||= 0 37 | @tree[country][region][city] ||= 0 38 | 39 | # Now increment counters 40 | @tree['count'] += 1 41 | @tree[country]['count'] += 1 42 | @tree[country][region]['count'] +=1 43 | @tree[country][region][city] += 1 44 | end 45 | 46 | # Performs the final sorting of the hash, with descending order of counts 47 | # 48 | def order_tree 49 | @tree.each do | country, country_hash| 50 | if country != 'count' 51 | country_hash.each do | region, region_hash | 52 | @tree[country][region] = order_hash(@tree[country][region]) if region != 'count' 53 | end 54 | @tree[country] = order_hash(@tree[country]) 55 | end 56 | end 57 | @tree = order_hash(@tree) 58 | end 59 | 60 | private 61 | 62 | # Sorts the hash, and returns a copy of the hash in sorted order by their counts, or if 63 | # counts are equal then by their names. 64 | def order_hash(h) 65 | keys = h.keys.sort { | k1,k2 | 66 | if k1 == 'count' 67 | ret = -1 68 | elsif k2 == 'count' 69 | ret = 1 70 | else 71 | # Cities level is slightly different form, if hash at this level then compare 72 | # count value within hash, otherwise just compare values. mult by -1 to reverse 73 | # ordering 74 | if h[k1].class == Hash 75 | ret = ( h[k1]['count'] <=> h[k2]['count'] ) * -1 76 | ret = k1 <=> k2 if ret == 0 && k1!=nil && k2!=nil 77 | else 78 | ret = ( h[k1] <=> h[k2] ) * -1 79 | ret = k1 <=> k2 if ret == 0 && k1!=nil && k2!=nil 80 | end 81 | end 82 | ret 83 | } 84 | 85 | # build up return hash 86 | ret_hash = {} 87 | keys.each do | key | 88 | ret_hash[key] = h[key] 89 | end 90 | 91 | ret_hash 92 | end 93 | end 94 | HELP=<<EOF 95 | This script is used to summarize geoip data from data in a json file. The name of the json element for 96 | the country, region, and city must be provided. The output is a hash with the country/region/city data and 97 | the count of occurrences from the input file; this output hash is sorted in count descending order so that 98 | the most common country, region within a country, and city within a region is returned first. 99 | 100 | Example with dap: 101 | bzcat ../samples/ssl_certs.bz2 | ../bin/dap json + select host_ip + geo_ip host_ip + json | ./geo-ip-summary.rb --var host_ip > /tmp/ssl_geo.json 102 | EOF 103 | 104 | def parse_command_line(args) 105 | 106 | options={ 107 | :country => nil, 108 | :region => nil, 109 | :city => nil, 110 | :var => nil 111 | } 112 | 113 | OptionParser.new do | opts | 114 | opts.banner = HELP 115 | opts.separator '' 116 | 117 | opts.separator 'GeoIP name options:' 118 | 119 | opts.on( '--country country_key', 'The name of json key for the country.') do | val | 120 | options[:country] = val 121 | end 122 | 123 | opts.on( '--region region_key', 'The name of the json key for the region.') do | val | 124 | options[:region] = val 125 | end 126 | 127 | opts.on( '--city city_key', 'The name of the json key for the city.' ) do | val | 128 | options[:city] = val 129 | end 130 | 131 | opts.on('--var top-level-var', 'Sets the top level json name, for defining all of country/region/city') do | val | 132 | options[:var] = val 133 | options[:country] = "#{val}.country_name" 134 | options[:region] = "#{val}.region_name" 135 | options[:city] = "#{val}.city" 136 | end 137 | 138 | opts.on_tail('-h', '--help', 'Show this message') do 139 | puts opts 140 | exit(0) 141 | end 142 | opts.parse!(args) 143 | options 144 | end 145 | options 146 | end 147 | opts = parse_command_line(ARGV) 148 | 149 | 150 | raise 'Need json key names for country,region and city.' if opts[:country].nil? || opts[:region].nil? || opts[:city].nil? 151 | 152 | summarizer = GeoIPSummary.new(opts[:country], opts[:region], opts[:city]) 153 | 154 | 155 | $stdin.each_line do |line| 156 | json = Oj.load(line.unpack("C*").pack("C*").strip) rescue nil 157 | next unless json 158 | summarizer.process_hash(json) 159 | end 160 | 161 | Oj.default_options={:indent=>2} 162 | 163 | puts Oj.dump(summarizer.order_tree) 164 | -------------------------------------------------------------------------------- /tools/ipmi-vulns.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'oj' 4 | 5 | SEARCHES = { 6 | "data.ipmi_compat_password" => { value: "1", name: "straight-pass" }, 7 | "data.ipmi_compat_md2" => { value: "1", name: "md2" }, 8 | "data.ipmi_compat_none" => { value: "1", name: "noauth" }, 9 | "data.ipmi_user_disable_message_auth" => { value: "1", name: "permsg" }, 10 | "data.ipmi_user_disable_user_auth" => { value: "1", name: "usrlvl" } 11 | } 12 | 13 | def search(hash) 14 | SEARCHES.each do | key, vuln | 15 | if hash[key] == vuln[:value] 16 | hash["VULN-IPMI-#{vuln[:name].upcase}"] = "true" 17 | end 18 | end 19 | if (hash['data.ipmi_user_non_null'] == "0") && (hash['data.ipmi_user_null'] == "0") 20 | hash["VULN-IPMI-ANON"] = "true" 21 | end 22 | hash 23 | end 24 | 25 | $stdin.each_line do |line| 26 | json = Oj.load(line.unpack("C*").pack("C*").strip) rescue nil 27 | next unless json 28 | puts Oj.dump(search(json)) 29 | end 30 | -------------------------------------------------------------------------------- /tools/json-summarize.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | require 'oj' 3 | require 'optparse' 4 | 5 | HELP=<<EOF 6 | 7 | This script is used to locate the frequency of a given key in a json document. It will 8 | inspect and increment the frequency count for each instance of the key found in the json 9 | document, then order them in descending order and output a json document with the top n 10 | occurrences of the key value. 11 | 12 | Note that if passed a key that has unique values, this script can consume a lot of memory. 13 | 14 | Sample: 15 | unpigz -c /tmp/2014-05-05-mssql-udp-decoded.json.gz | ruby ~/src/dap/tools/json-summarize.rb --top 20 --key data.mssql.Version 16 | EOF 17 | 18 | 19 | def stringify(o) 20 | if o.kind_of?( ::String ) 21 | o.to_s.encode(o.encoding, "UTF-8", :invalid => :replace, :undef => :replace, :replace => '') 22 | else 23 | o.to_s 24 | end 25 | end 26 | 27 | def parse_command_line(args) 28 | 29 | options = { 30 | :key => nil, 31 | :number => 100, 32 | :subkey => nil, 33 | :subnumber => 100 34 | } 35 | 36 | OptionParser.new do | opts | 37 | opts.banner = HELP 38 | opts.separator '' 39 | 40 | opts.separator 'GeoIP name options:' 41 | 42 | opts.on( '--key keyname', 'The name of the json key to be summarized.') do | val | 43 | options[:key] = val 44 | end 45 | 46 | opts.on( '--subkey keyname', 'The name of the json subkey to be summarized under each key') do | val | 47 | options[:subkey] = val 48 | end 49 | 50 | opts.on( '--top num_items', 'Return top n occurrences.') do | val | 51 | options[:number] = val.to_i 52 | end 53 | 54 | opts.on( '--subtop num_items', 'Return top n occurrences in each subkey.') do | val | 55 | options[:subnumber] = val.to_i 56 | end 57 | 58 | opts.on_tail('-h', '--help', 'Show this message') do 59 | $stderr.puts puts opts 60 | exit(1) 61 | end 62 | opts.parse!(args) 63 | 64 | if not options[:key] 65 | $stderr.puts opts 66 | exit(1) 67 | end 68 | end 69 | 70 | options 71 | end 72 | 73 | 74 | summary = {} 75 | opts = parse_command_line(ARGV) 76 | key = opts[:key] 77 | skey = opts[:subkey] 78 | 79 | $stdin.each_line do |line| 80 | json = Oj.load(line.to_s.unpack("C*").pack("C*").strip) rescue nil 81 | next unless ( json && json[key] ) 82 | 83 | if json[key].kind_of?(Array) 84 | vals = json[key] 85 | else 86 | vals = [json[key],] 87 | end 88 | 89 | vals.each do |val| 90 | val = stringify(val) 91 | 92 | summary[val] ||= {} 93 | summary[val][:count] ||= 0 94 | summary[val][:count] += 1 95 | 96 | if skey 97 | if json[skey].kind_of?(Array) 98 | svals = json[skey] 99 | else 100 | svals = [json[skey],] 101 | end 102 | 103 | svals.each do |sval| 104 | sval = stringify(sval) 105 | summary[val][sval] ||= {} 106 | summary[val][sval][:count] ||= 0 107 | summary[val][sval][:count] += 1 108 | end 109 | end 110 | end 111 | 112 | end 113 | 114 | output = {} 115 | summary.keys.sort{|a,b| summary[b][:count] <=> summary[a][:count] }[0, opts[:number]].each do |k| 116 | unless skey 117 | output[k] = summary[k][:count] 118 | else 119 | output[k] = { "count" => summary[k][:count], skey => {} } 120 | summary[k].keys.select{|x| x != :count}.sort{|a,b| summary[k][b][:count] <=> summary[k][a][:count] }[0, opts[:subnumber]].each do |sk| 121 | output[k][skey][sk] = summary[k][sk][:count] 122 | end 123 | end 124 | end 125 | 126 | $stdout.puts Oj.dump(output) -------------------------------------------------------------------------------- /tools/netbios-counts.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'optparse' 4 | require 'ostruct' 5 | require 'oj' 6 | require 'json' 7 | 8 | options = OpenStruct.new 9 | options.top_count = 5 10 | options.exclude_default_counts = false 11 | 12 | OptionParser.new do |opts| 13 | opts.banner = "Usage: netbios-counts.rb [options]" 14 | 15 | opts.on("-c", "--count [NUM]", OptionParser::DecimalInteger, 16 | "Specify the number of top count results") do |count| 17 | options.top_count = count if count > 1 18 | end 19 | 20 | opts.on("--count-hostnames-containing [TEXT]", "Count hostnames that include the speified text") do |text| 21 | options.hostname_containing = text 22 | end 23 | 24 | opts.on("--exclude-default-counts", "Exclude the provided top counts") do 25 | options.exclude_default_counts = true 26 | end 27 | end.parse! 28 | 29 | NUM_TOP_RECORDS = options.top_count 30 | 31 | module Counter 32 | def count(hash) 33 | value = countable_value(hash) 34 | @counts[value] += 1 unless (value.empty? || value == 'UNKNOWN') 35 | end 36 | 37 | def top_counts 38 | [].tap do |counts| 39 | ordered_by_count.to_a.take(NUM_TOP_RECORDS).each do |values| 40 | counts << count_hash(values) 41 | end 42 | end 43 | end 44 | 45 | def ordered_by_count 46 | Hash[@counts.sort_by{|k, v| v}.reverse] 47 | end 48 | end 49 | 50 | class CompanyNameCounter 51 | include Counter 52 | 53 | def initialize 54 | @counts = Hash.new(0) 55 | end 56 | 57 | def countable_value(hash) 58 | hash['data.netbios_mac_company'].to_s 59 | end 60 | 61 | def count_hash(values) 62 | { 'name' => values[0], 'count' => values[1] } 63 | end 64 | 65 | def apply_to(hash) 66 | hash['top_companies'] = top_counts 67 | end 68 | end 69 | 70 | class NetbiosNameCounter 71 | include Counter 72 | 73 | def initialize 74 | @counts = Hash.new(0) 75 | end 76 | 77 | def countable_value(hash) 78 | hash['data.netbios_hname'].to_s 79 | end 80 | 81 | def count_hash(values) 82 | { 'hostname' => values[0], 'count' => values[1] } 83 | end 84 | 85 | def apply_to(hash) 86 | hash['top_netbios_hostnames'] = top_counts 87 | end 88 | end 89 | 90 | class MacAddressCounter 91 | include Counter 92 | 93 | def initialize 94 | @counts = Hash.new(0) 95 | end 96 | 97 | def countable_value(hash) 98 | address = hash['data.netbios_mac'].to_s 99 | [].tap do |data| 100 | unless (address.empty? || address == '00:00:00:00:00:00') 101 | data << address 102 | data << hash['data.netbios_hname'] 103 | data << hash['data.netbios_mac_company'] 104 | end 105 | end 106 | end 107 | 108 | def count_hash(values) 109 | { 110 | 'mac_address' => values[0][0], 111 | 'hostname' => values[0][1], 112 | 'company' => values[0][2], 113 | 'count' => values[1] 114 | } 115 | end 116 | 117 | def apply_to(hash) 118 | hash['top_mac_addresses'] = top_counts 119 | end 120 | end 121 | 122 | class GeoCounter 123 | def initialize 124 | @cities = Hash.new(0) 125 | @countries = Hash.new(0) 126 | @regions = Hash.new(0) 127 | end 128 | 129 | def count(hash) 130 | city = hash['ip.city'].to_s 131 | country_code = hash['ip.country_code'].to_s 132 | country_name = hash['ip.country_name'].to_s 133 | region = hash['ip.region'].to_s 134 | region_name = hash['ip.region_name'].to_s 135 | 136 | @cities[[city, country_code]] += 1 unless city.empty? 137 | @countries[[country_code, country_name]] += 1 unless country_code.empty? 138 | @regions[[region, region_name]] += 1 unless region.empty? 139 | end 140 | 141 | def top_cities 142 | [].tap do |counts| 143 | ordered_cities.to_a.take(NUM_TOP_RECORDS).each do |values| 144 | counts << { 145 | 'city' => values[0][0], 146 | 'country_code' => values[0][1], 147 | 'count' => values[1] 148 | } 149 | end 150 | end 151 | end 152 | 153 | def top_countries 154 | [].tap do |counts| 155 | ordered_countries.to_a.take(NUM_TOP_RECORDS).each do |values| 156 | counts << { 157 | 'country_code' => values[0][0], 158 | 'country_name' => values[0][1], 159 | 'count' => values[1] 160 | } 161 | end 162 | end 163 | end 164 | 165 | def top_regions 166 | [].tap do |counts| 167 | ordered_regions.to_a.take(NUM_TOP_RECORDS).each do |values| 168 | counts << { 169 | 'region' => values[0][0], 170 | 'region_name' => values[0][1], 171 | 'count' => values[1] 172 | } 173 | end 174 | end 175 | end 176 | 177 | def ordered_cities 178 | Hash[@cities.sort_by{|k, v| v}.reverse] 179 | end 180 | 181 | def ordered_countries 182 | Hash[@countries.sort_by{|k, v| v}.reverse] 183 | end 184 | 185 | def ordered_regions 186 | Hash[@regions.sort_by{|k, v| v}.reverse] 187 | end 188 | 189 | def apply_to(hash) 190 | hash['top_cities'] = top_cities unless top_cities.empty? 191 | hash['top_countries'] = top_countries unless top_countries.empty? 192 | hash['top_regions'] = top_regions unless top_regions.empty? 193 | end 194 | end 195 | 196 | class SambaCounter 197 | include Counter 198 | 199 | def initialize 200 | @counts = Hash.new(0) 201 | end 202 | 203 | def countable_value(hash) 204 | address = hash['data.netbios_mac'].to_s 205 | if (address == '00:00:00:00:00:00') 206 | hash['data.netbios_hname'] 207 | else 208 | '' 209 | end 210 | end 211 | 212 | def count_hash(values) 213 | { 'name' => values[0], 'count' => values[1] } 214 | end 215 | 216 | def apply_to(hash) 217 | hash['top_samba_names'] = top_counts 218 | end 219 | end 220 | 221 | class HostnameContainingCounter 222 | include Counter 223 | 224 | def initialize(text) 225 | @text = text 226 | @counts = Hash.new(0) 227 | end 228 | 229 | def countable_value(hash) 230 | hostname = hash['data.netbios_hname'].to_s 231 | [].tap do |data| 232 | if hostname.include?(@text) 233 | data << hostname 234 | data << hash['data.netbios_mac_company'] 235 | data << hash['ip.city'].to_s 236 | data << hash['ip.country_code'].to_s 237 | data << hash['ip.country_name'].to_s 238 | end 239 | end 240 | end 241 | 242 | def count_hash(values) 243 | { 244 | 'hostname' => values[0][0], 245 | 'company' => values[0][1], 246 | 'city' => values[0][2], 247 | 'country_code' => values[0][3], 248 | 'country_name' => values[0][4], 249 | 'count' => values[1] 250 | } 251 | end 252 | 253 | def apply_to(hash) 254 | hash["hostnames with '#{@text}'"] = top_counts 255 | end 256 | end 257 | 258 | counters = [] 259 | unless options.exclude_default_counts 260 | counters << CompanyNameCounter.new 261 | counters << NetbiosNameCounter.new 262 | counters << MacAddressCounter.new 263 | counters << GeoCounter.new 264 | counters << SambaCounter.new 265 | end 266 | 267 | counters << HostnameContainingCounter.new(options.hostname_containing) unless options.hostname_containing.nil? 268 | 269 | $stdin.each_line do |line| 270 | json = Oj.load(line.unpack("C*").pack("C*").strip) rescue nil 271 | next unless json 272 | counters.each { |counter| counter.count(json) } 273 | end 274 | 275 | summary = {} 276 | counters.each { |counter| counter.apply_to(summary) } 277 | 278 | puts JSON.pretty_generate(summary) -------------------------------------------------------------------------------- /tools/upnp-vulns.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | require 'oj' 3 | 4 | # Searches contains each of the services, within each service it contains 5 | # a hash key that will be compared against each of the items in the 6 | # regex hash, and if a hit is returned the value from the regex is inserted 7 | # into the hash with the output_key as the key. 8 | # 9 | SEARCHES = { 10 | :upnp => { 11 | :hash_key => 'data.upnp_server', 12 | :output_key => 'vulnerability', 13 | :regex => { 14 | /MiniUPnPd\/1\.0([\.\,\-\~\s]|$)/mi => ['CVE-2013-0229'], 15 | /MiniUPnPd\/1\.[0-3]([\.\,\-\~\s]|$)/mi => ['CVE-2013-0230'], 16 | /Intel SDK for UPnP devices.*|Portable SDK for UPnP devices(\/?\s*$|\/1\.([0-5]\..*|8\.0.*|(6\.[0-9]|6\.1[0-7])([\.\,\-\~\s]|$)))/mi => ['CVE-2012-5958', 'CVE-2012-5959'] 17 | } 18 | } 19 | } 20 | 21 | def search(hash, service) 22 | SEARCHES[service][:regex].each do | regex, value | 23 | if regex =~ hash[SEARCHES[service][:hash_key]].force_encoding('BINARY') 24 | # Handle cases that could be multiple hits, not for upnp but could be others. 25 | hash[SEARCHES[service][:output_key]] = ( hash[SEARCHES[service][:output_key]] ? hash[SEARCHES[service][:output_key]] + value : value ) 26 | end 27 | end if hash[SEARCHES[service][:hash_key]] 28 | hash 29 | end 30 | 31 | $stdin.each_line do |line| 32 | json = Oj.load(line.unpack("C*").pack("C*").strip) rescue nil 33 | next unless json 34 | puts Oj.dump(search(json, :upnp)) 35 | end 36 | -------------------------------------------------------------------------------- /tools/value-counts-to-md-table.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | info = {} 3 | $stdin.each_line do |line| 4 | line = line.unpack("C*").pack("C*").strip 5 | info[line] ||= 0 6 | info[line] +=1 7 | end 8 | 9 | 10 | puts " 11 | 12 | #### Top Values 13 | | Count | Value | 14 | |:------------- | ------------- |" 15 | 16 | max = 100 17 | cnt = 0 18 | info.keys.sort {|a,b| info[b] <=> info[a] }.each do |k| 19 | puts "| #{info[k]} | #{k} |" 20 | cnt +=1 21 | break if cnt > max 22 | end 23 | puts "" 24 | --------------------------------------------------------------------------------