├── LICENSE ├── README.md ├── applications ├── README.md ├── application_template │ ├── application_template.py │ └── modules │ │ ├── __init__.py │ │ └── kafkaIO.py ├── detection │ ├── README.md │ ├── ddos │ │ ├── README.md │ │ └── spark │ │ │ └── detection_ddos.py │ ├── dns_external_resolvers │ │ ├── README.md │ │ ├── spark │ │ │ ├── dns_external_resolvers.py │ │ │ └── modules │ │ │ │ ├── DNSResponseConverter.py │ │ │ │ ├── __init__.py │ │ │ │ └── kafkaIO.py │ │ └── web-interface │ │ │ ├── controllers │ │ │ └── dns_external_resolvers.py │ │ │ ├── static │ │ │ ├── css │ │ │ │ └── dns_external_resolvers.css │ │ │ └── js │ │ │ │ └── custom │ │ │ │ └── dns_external_resolvers.js │ │ │ └── views │ │ │ ├── dns_external_resolvers │ │ │ └── dns_external_resolvers.html │ │ │ └── menu │ │ │ └── dns_external_resolvers.html │ ├── dns_open_resolvers │ │ ├── README.md │ │ ├── spark │ │ │ ├── dns_open_resolvers.py │ │ │ ├── modules │ │ │ │ ├── DNSResponseConverter.py │ │ │ │ ├── __init__.py │ │ │ │ └── kafkaIO.py │ │ │ ├── whitelisted_domains.txt │ │ │ └── whitelisted_networks.txt │ │ └── web-interface │ │ │ ├── controllers │ │ │ └── dns_open_resolvers.py │ │ │ ├── static │ │ │ ├── css │ │ │ │ └── dns_open_resolvers.css │ │ │ └── js │ │ │ │ └── custom │ │ │ │ └── dns_open_resolvers.js │ │ │ └── views │ │ │ ├── dns_open_resolvers │ │ │ └── dns_open_resolvers.html │ │ │ └── menu │ │ │ └── dns_open_resolvers.html │ ├── pattern_finder │ │ ├── README.md │ │ ├── additional_data │ │ │ ├── README.md │ │ │ └── SSH_authentication_attack_detection │ │ │ │ ├── configuration.yml │ │ │ │ └── dataset │ │ │ │ ├── README.md │ │ │ │ ├── hydra.zip │ │ │ │ ├── medusa.zip │ │ │ │ └── ncrack.zip │ │ ├── spark │ │ │ ├── configuration.yml │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ ├── distance_functions │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── biflow_quadratic_form.py │ │ │ │ │ └── simple_quadratic_form.py │ │ │ │ ├── kafkaIO.py │ │ │ │ └── vector_definition │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── element_entropy.py │ │ │ └── pattern_finder.py │ │ └── web-interface │ │ │ ├── controllers │ │ │ └── pattern_finder.py │ │ │ ├── static │ │ │ ├── css │ │ │ │ └── pattern_finder.css │ │ │ └── js │ │ │ │ └── custom │ │ │ │ └── pattern_finder.js │ │ │ └── views │ │ │ ├── menu │ │ │ └── pattern_finder.html │ │ │ └── pattern_finder │ │ │ └── pattern_finder.html │ ├── ports_scan │ │ ├── README.md │ │ ├── spark │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ └── kafkaIO.py │ │ │ └── ports_scan.py │ │ └── web-interface │ │ │ ├── controllers │ │ │ └── ports_scan.py │ │ │ ├── static │ │ │ ├── css │ │ │ │ └── ports_scan.css │ │ │ └── js │ │ │ │ └── custom │ │ │ │ └── ports_scan.js │ │ │ └── views │ │ │ ├── menu │ │ │ └── ports_scan.html │ │ │ └── ports_scan │ │ │ └── ports_scan.html │ ├── reflect_ddos │ │ ├── README.md │ │ └── spark │ │ │ └── reflectdos_main.py │ └── ssh_auth_simple │ │ ├── README.md │ │ ├── spark │ │ ├── modules │ │ │ ├── __init__.py │ │ │ └── kafkaIO.py │ │ └── ssh_auth_simple.py │ │ └── web-interface │ │ ├── controllers │ │ └── ssh_auth_simple.py │ │ ├── static │ │ ├── css │ │ │ └── ssh_auth_simple.css │ │ └── js │ │ │ └── custom │ │ │ └── ssh_auth_simple.js │ │ └── views │ │ ├── menu │ │ └── ssh_auth_simple.html │ │ └── ssh_auth_simple │ │ └── ssh_auth_simple.html └── statistics │ ├── dns_statistics │ ├── README.md │ ├── spark │ │ ├── dns_statistics.py │ │ ├── filtered_out_domains.txt │ │ └── modules │ │ │ ├── __init__.py │ │ │ └── kafkaIO.py │ └── web-interface │ │ ├── controllers │ │ └── dns_statistics.py │ │ ├── static │ │ ├── css │ │ │ └── dns_statistics.css │ │ └── js │ │ │ └── custom │ │ │ └── dns_statistics.js │ │ └── views │ │ ├── dns_statistics │ │ └── dns_statistics.html │ │ └── menu │ │ └── dns_statistics.html │ ├── hosts_profiling │ ├── README.md │ └── spark │ │ └── host_daily_profile.py │ ├── hosts_statistics │ ├── README.md │ ├── spark │ │ ├── host_stats.py │ │ ├── modules │ │ │ ├── __init__.py │ │ │ └── kafkaIO.py │ │ └── top_n_host_stats.py │ └── web-interface │ │ ├── controllers │ │ └── host_statistics.py │ │ ├── static │ │ ├── css │ │ │ └── host_statistics.css │ │ └── js │ │ │ ├── charts │ │ │ ├── highcharts.data.js │ │ │ ├── highcharts.exporting.js │ │ │ ├── highcharts.js │ │ │ └── highcharts.map.js │ │ │ └── custom │ │ │ └── host_statistics.js │ │ └── views │ │ ├── host_statistics │ │ └── host_statistics.html │ │ └── menu │ │ └── host_statistics.html │ ├── protocols_statistics │ ├── README.md │ └── spark │ │ ├── modules │ │ ├── __init__.py │ │ └── kafkaIO.py │ │ └── protocols_statistics.py │ └── tls_classification │ ├── README.md │ ├── spark │ ├── modules │ │ ├── __init__.py │ │ └── kafkaIO.py │ ├── tls_classification.py │ └── tls_classification_dictionary.csv │ └── web-interface │ ├── controllers │ └── tls_classification_statistics.py │ ├── static │ ├── css │ │ └── tls_classification_statistics.css │ └── js │ │ └── custom │ │ └── tls_classification_statistics.js │ └── views │ ├── menu │ └── tls_classification_statistics.html │ └── tls_classification_statistics │ └── tls_classification_statistics.html ├── images ├── architecture.png └── logo-text-small.png ├── provisioning ├── README.md ├── Vagrantfile ├── ansible │ ├── README.md │ ├── all.yml │ ├── ansible.cfg │ ├── consumer.yml │ ├── group_vars │ │ ├── README.md │ │ ├── all │ │ │ └── main.yml │ │ ├── consumer │ │ │ └── main.yml │ │ ├── producer │ │ │ └── main.yml │ │ ├── sparkMaster │ │ │ └── main.yml │ │ └── sparkSlave │ │ │ └── main.yml │ ├── inventory.ini.example │ ├── producer.yml │ ├── roles │ │ ├── README.md │ │ ├── applications │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ ├── dependencies.yml │ │ │ │ ├── install.yml │ │ │ │ └── main.yml │ │ ├── common │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── hosts.yml │ │ │ │ ├── java-oracle.yml │ │ │ │ ├── main.yml │ │ │ │ └── users.yml │ │ │ └── templates │ │ │ │ └── settings.xml.j2 │ │ ├── elk │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── config.yml │ │ │ │ ├── install.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── curator.yml │ │ │ │ ├── curatorAction.yml │ │ │ │ ├── elasticsearch_elasticsearch.yml.j2 │ │ │ │ ├── elasticsearch_logging.yml.j2 │ │ │ │ ├── kibana.yml.j2 │ │ │ │ ├── logstash_kafka-to-elastic.conf.j2 │ │ │ │ └── logstash_templates_spark-elasticsearch-template.json.j2 │ │ ├── get-vars │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── ipfixcol │ │ │ ├── files │ │ │ │ └── ipfixcol.service │ │ │ ├── tasks │ │ │ │ ├── base.yml │ │ │ │ ├── dependencies.yml │ │ │ │ ├── ipfix-elements.yml │ │ │ │ ├── libfastbit.yml │ │ │ │ ├── main.yml │ │ │ │ ├── start.yml │ │ │ │ ├── storage-plugins.yml │ │ │ │ └── tools.yml │ │ │ ├── templates │ │ │ │ ├── ipfixcol.conf.j2 │ │ │ │ ├── ipfixcol.j2 │ │ │ │ ├── startup.xml.tcp.j2 │ │ │ │ └── startup.xml.udp.j2 │ │ │ └── vars │ │ │ │ ├── main.yml │ │ │ │ ├── packages.Debian-9.yml │ │ │ │ ├── packages.Ubuntu-18.yml │ │ │ │ ├── packages.apt.yml │ │ │ │ └── packages.yum.yml │ │ ├── kafka │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── install.yml │ │ │ │ ├── main.yml │ │ │ │ └── start.yml │ │ │ └── templates │ │ │ │ ├── kafka-broker.j2 │ │ │ │ ├── kafka-broker.service.j2 │ │ │ │ ├── kafka-server-start.sh.j2 │ │ │ │ └── kafka-server.properties.j2 │ │ ├── spark │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── dependencies.yml │ │ │ │ ├── download_locally.yml │ │ │ │ ├── install.yml │ │ │ │ ├── main.yml │ │ │ │ ├── prepareMaster.yml │ │ │ │ └── service.yml │ │ │ └── templates │ │ │ │ ├── run-application.sh.j2 │ │ │ │ ├── spark-defaults.conf.j2 │ │ │ │ ├── spark-master.service.j2 │ │ │ │ └── spark-slave.service.j2 │ │ ├── ubuntu-systemd-normalizer │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── deploy.yml │ │ │ │ ├── main.yml │ │ │ │ └── users.yml │ │ │ ├── templates │ │ │ │ └── tcpnormalizer@.service.j2 │ │ │ └── vars │ │ │ │ └── main.yml │ │ └── web │ │ │ ├── defaults │ │ │ └── main.yml │ │ │ ├── main.yml │ │ │ ├── tasks │ │ │ ├── dependencies.yml │ │ │ ├── install_webapp.yml │ │ │ ├── main.yml │ │ │ └── setup.yml │ │ │ └── templates │ │ │ ├── chgpasswd.py.j2 │ │ │ └── web2py.conf.j2 │ ├── site.yml │ ├── sparkMaster.yml │ ├── sparkSlave.yml │ └── tmp │ │ └── sparkMaster.pub ├── configuration.yml └── test │ ├── README.md │ └── integration │ ├── README.md │ ├── integration-test.yml │ ├── inventory.ini.example │ └── roles │ └── integration-test │ ├── defaults │ └── main.yml │ ├── files │ └── test-data.ipfix │ ├── tasks │ ├── main.yml │ ├── prepareMaster.yml │ ├── prepareProducer.yml │ └── query.yml │ └── templates │ └── query_for_data.py └── web-interface ├── README.md ├── Stream4Flow ├── __init__.py ├── controllers │ ├── default.py │ ├── protocols_statistics.py │ └── system.py ├── cron │ ├── crontab │ └── crontab.example ├── databases │ └── stream4flow.sqlite ├── languages │ ├── cs.py │ ├── default.py │ ├── plural-cs.py │ └── plural-en.py ├── models │ ├── db.py │ └── sessions.py ├── modules │ ├── __init__.py │ └── global_functions.py ├── private │ └── appconfig.ini ├── static │ ├── 403.html │ ├── 404.html │ ├── 500.html │ ├── css │ │ ├── animate.css │ │ ├── bootstrap-table.min.css │ │ ├── bootstrap.css │ │ ├── clndr.css │ │ ├── custom.css │ │ ├── font-awesome.css │ │ ├── jquery.datetimepicker.min.css │ │ ├── jqvmap.css │ │ └── style.css │ ├── fonts │ │ ├── FontAwesome.otf │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.svg │ │ ├── fontawesome-webfont.ttf │ │ ├── fontawesome-webfont.woff │ │ ├── fontawesome-webfont.woff2 │ │ ├── glyphicons-halflings-regular.woff │ │ └── glyphicons-halflings-regular.woff2 │ ├── images │ │ ├── architecture.png │ │ ├── favicon.ico │ │ ├── logo_small.png │ │ └── logo_small_lines.png │ └── js │ │ ├── charts │ │ ├── custom.js │ │ └── zingchart.min.js │ │ ├── common │ │ ├── bootstrap-table.min.js │ │ ├── bootstrap.js │ │ ├── classie.js │ │ ├── clndr.js │ │ ├── jquery-1.11.1.min.js │ │ ├── jquery-3.1.0.min.js │ │ ├── jquery.circlechart.js │ │ ├── jquery.datetimepicker.full.min.js │ │ ├── jquery.nicescroll.min.js │ │ ├── jquery.vmap.js │ │ ├── jquery.vmap.sampledata.js │ │ ├── jquery.vmap.world.js │ │ ├── metisMenu.min.js │ │ ├── modernizr.custom.js │ │ ├── moment-2.2.1.js │ │ ├── underscore-min.js │ │ ├── validator.min.js │ │ └── wow.min.js │ │ ├── custom │ │ ├── datetime_interval.js │ │ ├── default.js │ │ └── protocols_statistics.js │ │ └── template │ │ ├── custom.js │ │ ├── scripts.js │ │ ├── site.js │ │ └── skycons.js └── views │ ├── default │ └── index.html │ ├── error.html │ ├── layout.html │ ├── logo.html │ ├── menu.html │ ├── menu │ └── protocols_statistics.html │ ├── protocols_statistics │ └── protocols_statistics.html │ └── system │ ├── about.html │ └── users_management.html └── routes.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Masaryk University 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /applications/README.md: -------------------------------------------------------------------------------- 1 | ## Applications for Stream4Flow 2 | 3 | Applications for real-time network traffic analysis 4 | 5 | ### Detection 6 | 7 | Applications aimed at a network attack detection using network flows. 8 | 9 | ### Statistics 10 | 11 | Applications for a computation of statistics from network traffic. 12 | 13 | ### Application Template 14 | 15 | Template for creating new applications using provided module and Spark operations, with possibility of adding more modules. -------------------------------------------------------------------------------- /applications/application_template/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/application_template/modules/__init__.py -------------------------------------------------------------------------------- /applications/detection/README.md: -------------------------------------------------------------------------------- 1 | ## Detection methods 2 | 3 | This folder contains application for various detection methods. 4 | 5 | ### Usage 6 | 7 | 1. Copy folder to a machine running Spark Master into `/home//applications/` (default username is **spark**) 8 | 2. Run individual application running `/home//applications/run-application.sh` with parameters as provided in README for a given application -------------------------------------------------------------------------------- /applications/detection/ddos/README.md: -------------------------------------------------------------------------------- 1 | ## DDOS detection 2 | 3 | ### Description 4 | A method for detection of DoS/DDoS attacks based on an evaluation of the incoming/outgoing packet volume ratio and its variance to the long-time ratio. 5 | 6 | ### Usage: 7 | - General 8 | `detection_ddos.py -iz : -it -oz : -ot -nf ` 9 | 10 | - Stream4Flow example (using network range 10.10.0.0/16) 11 | `/home/spark/applications/run-application.sh /home/spark/applications/detection/ddos/detection_ddos.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -nf "10\.10\..+"` -------------------------------------------------------------------------------- /applications/detection/dns_external_resolvers/README.md: -------------------------------------------------------------------------------- 1 | ## External DNS Servers Usage Detection 2 | 3 | ### Description 4 | A method for a detection of external dns resolvers usage in the specified local network. 5 | 6 | ### Usage: 7 | - General 8 | `dns_external_resolvers.py -iz : -it 9 | -oz : -ot -ln /` 10 | 11 | - Stream4Flow example 12 | `/home/spark/applications/run-application.sh ./detection/dns_external_resolvers/spark/dns_external_resolvers.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -ln 10.10.0.0/16` -------------------------------------------------------------------------------- /applications/detection/dns_external_resolvers/spark/modules/DNSResponseConverter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # 4 | # MIT License 5 | # 6 | # Copyright (c) 2016 Tomas Pavuk <433592@mail.muni.cz>, Institute of Computer Science, Masaryk University 7 | # 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | # 15 | # The above copyright notice and this permission notice shall be included in all 16 | # copies or substantial portions of the Software. 17 | # 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | # 26 | 27 | from netaddr import IPAddress # IP address conversion 28 | 29 | 30 | def convert_dns_rdata(dnsr_data, dnsr_data_type): 31 | """ 32 | Checks the data type version and calls correct parsing function 33 | 34 | :param dnsr_data: Data to parse 35 | :param dnsr_data_type: Type of the data 36 | :return: Parsed data 37 | """ 38 | if dnsr_data_type == 1: 39 | return convert_ipv4(dnsr_data) 40 | if dnsr_data_type == 28: 41 | return convert_ipv6(dnsr_data) 42 | return convert_string(dnsr_data) 43 | 44 | 45 | def convert_ipv4(data_to_convert): 46 | """ 47 | Parse IPv4 address from byte array 48 | 49 | :param data_to_convert: Input byte array 50 | :return: Parsed IPv4 address 51 | """ 52 | return str(IPAddress(int(data_to_convert[:10], 16))) 53 | 54 | 55 | def convert_ipv6(data_to_convert): 56 | """ 57 | Parse IPv6 address from byte array 58 | 59 | :param data_to_convert: Input byte array 60 | :return: Parsed IPv6 address 61 | """ 62 | return str(IPAddress(int(data_to_convert[:34], 16))) 63 | 64 | 65 | def convert_string(data_to_convert): 66 | """ 67 | Parse ASCII string from byte array 68 | 69 | :param data_to_convert: Input byte array 70 | :return: Parsed string 71 | """ 72 | return data_to_convert[2:].decode('hex') 73 | -------------------------------------------------------------------------------- /applications/detection/dns_external_resolvers/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/dns_external_resolvers/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/detection/dns_external_resolvers/web-interface/static/css/dns_external_resolvers.css: -------------------------------------------------------------------------------- 1 | /* DNS Statistics */ 2 | .chart-dns-external-top .chart-status, .zingchart { 3 | height: 400px; 4 | } 5 | .chart-status { 6 | height: 300px; 7 | } 8 | #table-dns-external { 9 | margin-top: -40px; 10 | padding-left: 15px; 11 | padding-right: 15px; 12 | } 13 | .table-status { 14 | text-align: center; 15 | color: #777777; 16 | width: 100%; 17 | margin-top: 170px; 18 | height: 300px; 19 | } 20 | .table-status span { 21 | position: relative; 22 | top: -4px; 23 | font-size: 20px; 24 | font-weight: 600; 25 | margin-left: 0.5em; 26 | } 27 | #load-all-charts-button { 28 | margin-right: 20px; 29 | } -------------------------------------------------------------------------------- /applications/detection/dns_external_resolvers/web-interface/views/menu/dns_external_resolvers.html: -------------------------------------------------------------------------------- 1 | 2 |
  • 3 | DNS External Resolvers 4 |
  • 5 | -------------------------------------------------------------------------------- /applications/detection/dns_open_resolvers/README.md: -------------------------------------------------------------------------------- 1 | ## Open DNS Servers Usage Detection 2 | 3 | ### Description 4 | A method for a detection of open dns resolvers usage in the specified local network. 5 | 6 | ### Usage: 7 | - General 8 | `dns_open_resolvers.py -iz : -it 9 | -oz : -ot -ln /` 10 | 11 | - Stream4Flow example 12 | `/home/spark/applications/run-application.sh ./detection/dns_open_resolvers/spark/dns_open_resolvers.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -ln 10.10.0.0/16` 13 | -------------------------------------------------------------------------------- /applications/detection/dns_open_resolvers/spark/modules/DNSResponseConverter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # 4 | # MIT License 5 | # 6 | # Copyright (c) 2016 Tomas Pavuk <433592@mail.muni.cz>, Institute of Computer Science, Masaryk University 7 | # 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | # 15 | # The above copyright notice and this permission notice shall be included in all 16 | # copies or substantial portions of the Software. 17 | # 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | # 26 | 27 | from netaddr import IPAddress # IP address conversion 28 | 29 | 30 | def convert_dns_rdata(dnsr_data, dnsr_data_type): 31 | """ 32 | Checks the data type version and calls correct parsing function 33 | 34 | :param dnsr_data: Data to parse 35 | :param dnsr_data_type: Type of the data 36 | :return: Parsed data 37 | """ 38 | if dnsr_data_type == 1: 39 | return convert_ipv4(dnsr_data) 40 | if dnsr_data_type == 28: 41 | return convert_ipv6(dnsr_data) 42 | return convert_string(dnsr_data) 43 | 44 | 45 | def convert_ipv4(data_to_convert): 46 | """ 47 | Parse IPv4 address from byte array 48 | 49 | :param data_to_convert: Input byte array 50 | :return: Parsed IPv4 address 51 | """ 52 | return str(IPAddress(int(data_to_convert[:10], 16))) 53 | 54 | 55 | def convert_ipv6(data_to_convert): 56 | """ 57 | Parse IPv6 address from byte array 58 | 59 | :param data_to_convert: Input byte array 60 | :return: Parsed IPv6 address 61 | """ 62 | return str(IPAddress(int(data_to_convert[:34], 16))) 63 | 64 | 65 | def convert_string(data_to_convert): 66 | """ 67 | Parse ASCII string from byte array 68 | 69 | :param data_to_convert: Input byte array 70 | :return: Parsed string 71 | """ 72 | return data_to_convert[2:].decode('hex').replace("\x00", "") 73 | -------------------------------------------------------------------------------- /applications/detection/dns_open_resolvers/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/dns_open_resolvers/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/detection/dns_open_resolvers/spark/whitelisted_domains.txt: -------------------------------------------------------------------------------- 1 | google.com -------------------------------------------------------------------------------- /applications/detection/dns_open_resolvers/spark/whitelisted_networks.txt: -------------------------------------------------------------------------------- 1 | 192.168.0.0/16 -------------------------------------------------------------------------------- /applications/detection/dns_open_resolvers/web-interface/static/css/dns_open_resolvers.css: -------------------------------------------------------------------------------- 1 | /* DNS Statistics */ 2 | .chart-status { 3 | height: 300px; 4 | } 5 | #table-dns-open { 6 | margin-top: -40px; 7 | padding-left: 15px; 8 | padding-right: 15px; 9 | } 10 | .table-status { 11 | text-align: center; 12 | color: #777777; 13 | width: 100%; 14 | margin-top: 170px; 15 | height: 300px; 16 | } 17 | .table-status span { 18 | position: relative; 19 | top: -4px; 20 | font-size: 20px; 21 | font-weight: 600; 22 | margin-left: 0.5em; 23 | } 24 | #load-all-charts-button { 25 | margin-right: 20px; 26 | } -------------------------------------------------------------------------------- /applications/detection/dns_open_resolvers/web-interface/views/menu/dns_open_resolvers.html: -------------------------------------------------------------------------------- 1 |
  • 2 | DNS Open Resolvers 3 |
  • -------------------------------------------------------------------------------- /applications/detection/pattern_finder/README.md: -------------------------------------------------------------------------------- 1 | ## Detection of Patterns in IP Flow Data 2 | 3 | ### Description 4 | A highly flexible, easily extensible and modular application, capable of analyzing IP flow data, and comparing known patterns with real measurements in real time. 5 | 6 | 7 | ### Application Usage 8 | 9 | - General 10 | `./pattern_finder.py -iz : -it -oz : -ot -c ` 11 | 12 | - Stream4Flow example 13 | `/home/spark/applications/run-application.sh /home/spark/applications/detection/pattern_finder/spark/pattern_finder.py -iz producer:2181 -it ipfix.entry -oz producer:9092 14 | -ot app.pattern-finder -c configuration.yml` 15 | 16 | 17 | ### Additional Data 18 | 19 | You can find additional materials for detection of authentication attacks on SSH service in [additional_data](additional_data/SSH_authentication_attack_detection) directory. 20 | -------------------------------------------------------------------------------- /applications/detection/pattern_finder/additional_data/README.md: -------------------------------------------------------------------------------- 1 | # Additional Data 2 | 3 | Directory containing all additional data for the application (e.g., datasets or specific configuration files). 4 | -------------------------------------------------------------------------------- /applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/configuration.yml: -------------------------------------------------------------------------------- 1 | # 2 | # MIT License 3 | # 4 | # Copyright (c) 2018 Milan Cermak , Institute of Computer Science, Masaryk University 5 | # 6 | # Permission is hereby granted, free of charge, to any person obtaining a copy 7 | # of this software and associated documentation files (the "Software"), to deal 8 | # in the Software without restriction, including without limitation the rights 9 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | # copies of the Software, and to permit persons to whom the Software is 11 | # furnished to do so, subject to the following conditions: 12 | # 13 | # The above copyright notice and this permission notice shall be included in all 14 | # copies or substantial portions of the Software. 15 | # 16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | # SOFTWARE. 23 | # 24 | 25 | 26 | # -------------------------------------- Common Application Settings -------------------------------------- # 27 | configuration: 28 | name: SSH Brute-force Attack Detection 29 | window: 300 30 | slice: 5 31 | 32 | 33 | # -------------------------------------- Input Data Filter ----------------------------------------------- # 34 | filter: 35 | - element_names: 36 | - ipfix.sourceIPv4Address 37 | - ipfix.destinationIPv4Address 38 | type: exists 39 | - element_names: 40 | - ipfix.sourceTransportPort 41 | - ipfix.destinationTransportPort 42 | type: int 43 | values: 44 | - 22 45 | - element_names: 46 | - ipfix.protocolIdentifier 47 | type: int 48 | values: 49 | - 6 50 | 51 | # -------------------------------------- Vector Definition ------------------------------------------------ # 52 | vectors: 53 | key: 54 | type: biflow 55 | elements: 56 | src_ip: ipfix.sourceIPv4Address 57 | dst_ip: ipfix.destinationIPv4Address 58 | src_port: ipfix.sourceTransportPort 59 | dst_port: ipfix.destinationTransportPort 60 | flow_start: ipfix.flowStartMilliseconds 61 | time_difference: 500 62 | values: 63 | - type: element 64 | element: ipfix.packetDeltaCount 65 | - type: element 66 | element: ipfix.octetDeltaCount 67 | - type: direct 68 | value: 1 69 | - type: operation 70 | operator: sub 71 | elements: 72 | - ipfix.flowEndMilliseconds 73 | - ipfix.flowStartMilliseconds 74 | 75 | 76 | # -------------------------------------- Additional Output Fields ----------------------------------------- # 77 | output: 78 | - name: src_ip 79 | element: ipfix.sourceIPv4Address 80 | type: request 81 | - name: dst_ip 82 | element: ipfix.destinationIPv4Address 83 | type: request 84 | 85 | 86 | # -------------------------------------- Distance Function and Pattern Definition ------------------------- # 87 | distance: 88 | distance_module: biflow_quadratic_form 89 | patterns: 90 | - name: hydra 91 | request: [16, 1973, 11959.5] 92 | response: [25, 3171, 11959.5] 93 | - name: medusa 94 | request: [18, 2528, 6079] 95 | response: [25, 3715, 6079] 96 | - name: ncrack-1 97 | request: [13, 2860, 2549.5] 98 | response: [14, 2103, 2548.5] 99 | - name: ncrack-2 100 | request: [16, 3340, 10050] 101 | response: [21, 2675, 10048] 102 | distribution: 103 | default: 104 | intervals: [0, 2, 3, 4, 5, 7] 105 | weights: [3, 2, 1, 1, 2, 3] 106 | limit: 7 107 | -------------------------------------------------------------------------------- /applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/dataset/README.md: -------------------------------------------------------------------------------- 1 | # SSH Dictionary Attacks 2 | 3 | Annotated units of SSH dictionary attack performed by `medusa`, `hydra` and `ncrack` tools. 4 | 5 | ## Hydra 8.4 ([hydra.zip](hydra.zip)) 6 | 7 | Webpage: https://www.thc.org/thc-hydra/ 8 | 9 | **Annotated units:** 10 | - hydra-1_tasks.pcap 11 | - _command:_ `$ ./hydra -l user -x "1:5:a" -t 1 ssh://10.0.0.3/` 12 | - _attacker:_ 240.0.1.2 13 | - _defender:_ 240.125.0.2 14 | - hydra-4_tasks.pcap 15 | - _command:_ `$ ./hydra -l user -x "1:5:a" -t 4 ssh://10.0.0.3/` 16 | - _attacker:_ 240.0.1.3 17 | - _defender:_240.125.0.2 18 | - hydra-8_tasks.pcap 19 | - _command:_ `$ ./hydra -l user -x "1:5:a" -t 8 ssh://10.0.0.3/` 20 | - _attacker:_ 240.0.1.4 21 | - _defender:_ 240.125.0.2 22 | - hydra-16_tasks.pcap 23 | - _command:_ `$ ./hydra -l user -x "1:5:a" -t 16 ssh://10.0.0.3/` 24 | - _attacker:_ 240.0.1.5 25 | - _defender:_ 240.125.0.2 26 | - hydra-24_tasks.pcap 27 | - _command:_ `$ ./hydra -l user -x "1:5:a" -t 24 ssh://10.0.0.3/` 28 | - _attacker:_ 240.0.1.6 29 | - _defender:_ 240.125.0.2 30 | 31 | 32 | ## Medusa 2.2 ([medusa.zip](medusa.zip)) 33 | 34 | Webpage: http://foofus.net/goons/jmk/medusa/medusa.html 35 | 36 | **Annotated units:** 37 | - medusa-1_tasks.pcap 38 | - _command:_ `$ medusa -M ssh -u user -P -h 10.0.0.3 -t 1` 39 | - _attacker:_ 240.0.2.2 40 | - _defender:_ 240.125.0.2 41 | - medusa-4_tasks.pcap 42 | - _command:_ `$ medusa -M ssh -u user -P -h 10.0.0.3 -t 4` 43 | - _attacker:_ 240.0.2.3 44 | - _defender:_ 240.125.0.2 45 | - medusa-8_tasks.pcap 46 | - _command:_ `$ medusa -M ssh -u user -P -h 10.0.0.3 -t 8` 47 | - _attacker:_ 240.0.2.4 48 | - _defender:_ 240.125.0.2 49 | - medusa-16_tasks.pcap 50 | - _command:_ `$ medusa -M ssh -u user -P -h 10.0.0.3 -t 16` 51 | - _attacker:_ 240.0.2.5 52 | - _defender:_ 240.125.0.2 53 | - medusa-24_tasks.pcap 54 | - _command:_ `$ medusa -M ssh -u user -P -h 10.0.0.3 -t 24` 55 | - _attacker:_ 240.0.2.6 56 | - _defender:_ 240.125.0.2 57 | 58 | 59 | ## Ncrack 0.5 ([ncrack.zip](ncrack.zip)) 60 | 61 | Webpage: https://nmap.org/ncrack/ 62 | 63 | **Annotated units:** 64 | - ncrack-paranoid.pcap 65 | - _command:_ `$ ncrack --user user1,user2,user3 10.0.0.3:22 -T paranoid` 66 | - _attacker:_ 240.0.3.2 67 | - _defender:_ 240.125.0.2 68 | - ncrack-sneaky.pcap 69 | - _command:_ `$ ncrack --user user1,user2,user3 10.0.0.3:22 -T sneaky` 70 | - _attacker:_ 240.0.3.3 71 | - _defender:_ 240.125.0.2 72 | - ncrack-polite.pcap 73 | - _command:_ `$ ncrack --user user1,user2,user3 10.0.0.3:22 -T polite` 74 | - _attacker:_ 240.0.3.4 75 | - _defender:_ 240.125.0.2 76 | - ncrack-normal.pcap 77 | - _command:_ `$ ncrack --user user1,user2,user3 10.0.0.3:22 -T normal` 78 | - _attacker:_ 240.0.3.5 79 | - _defender:_ 240.125.0.2 80 | - ncrack-aggressive.pcap 81 | - _command:_ `$ ncrack --user user1,user2,user3 10.0.0.3:22 -T aggressive` 82 | - _attacker:_ 240.0.3.6 83 | - _defender:_ 240.125.0.2 84 | -------------------------------------------------------------------------------- /applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/dataset/hydra.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/dataset/hydra.zip -------------------------------------------------------------------------------- /applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/dataset/medusa.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/dataset/medusa.zip -------------------------------------------------------------------------------- /applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/dataset/ncrack.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/pattern_finder/additional_data/SSH_authentication_attack_detection/dataset/ncrack.zip -------------------------------------------------------------------------------- /applications/detection/pattern_finder/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/pattern_finder/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/detection/pattern_finder/spark/modules/distance_functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/pattern_finder/spark/modules/distance_functions/__init__.py -------------------------------------------------------------------------------- /applications/detection/pattern_finder/spark/modules/distance_functions/biflow_quadratic_form.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # 4 | # MIT License 5 | # 6 | # Copyright (c) 2018 Milan Cermak , Institute of Computer Science, Masaryk University 7 | # 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | # 15 | # The above copyright notice and this permission notice shall be included in all 16 | # copies or substantial portions of the Software. 17 | # 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | # 26 | 27 | import math # Mathematical functions 28 | 29 | 30 | def get_distance(vector, configuration): 31 | """ 32 | Compute Quadratic form distance of given vector and patterns specified in the configuration. 33 | 34 | :param vector: vector that is compared with patterns specified in configuration 35 | :param configuration: loaded application configuration 36 | :return: Dictionary of pattern names and its distances from given vector 37 | """ 38 | distances = {} 39 | for pattern in configuration['distance']['patterns']: 40 | request_distance = sum([((v - p) / p) ** 2 for v, p in zip(vector['vector']['request'], pattern['request'])]) 41 | response_distance = sum([((v - p) / p) ** 2 for v, p in zip(vector['vector']['response'], pattern['response'])]) 42 | distance = math.sqrt(request_distance + response_distance) 43 | distances[pattern['name']] = distance 44 | return {'distances': distances} 45 | -------------------------------------------------------------------------------- /applications/detection/pattern_finder/spark/modules/distance_functions/simple_quadratic_form.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # 4 | # MIT License 5 | # 6 | # Copyright (c) 2016 Milan Cermak , Institute of Computer Science, Masaryk University 7 | # 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | # 15 | # The above copyright notice and this permission notice shall be included in all 16 | # copies or substantial portions of the Software. 17 | # 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | # 26 | 27 | import math # Mathematical functions 28 | 29 | 30 | def get_distance(vector, configuration): 31 | """ 32 | Compute Quadratic form distance of given vector and patterns specified in the configuration. 33 | 34 | :param vector: vector that is compared with patterns specified in configuration 35 | :param configuration: loaded application configuration 36 | :return: Dictionary of pattern names and its distances from given vector 37 | """ 38 | distances = {} 39 | for pattern in configuration['distance']['patterns']: 40 | distances[pattern['name']] = math.sqrt(sum([((v - p) / float(p)) ** 2 if p else float("inf") for v, p in zip(vector['vector'], pattern['vector'])])) 41 | return {'distances': distances} 42 | -------------------------------------------------------------------------------- /applications/detection/pattern_finder/spark/modules/vector_definition/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/pattern_finder/spark/modules/vector_definition/__init__.py -------------------------------------------------------------------------------- /applications/detection/pattern_finder/spark/modules/vector_definition/element_entropy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # 4 | # MIT License 5 | # 6 | # Copyright (c) 2018 Milan Cermak , Institute of Computer Science, Masaryk University 7 | # 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | # 15 | # The above copyright notice and this permission notice shall be included in all 16 | # copies or substantial portions of the Software. 17 | # 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | # 26 | 27 | import math 28 | 29 | 30 | def element_entropy(value): 31 | """ 32 | Compute entropy of given element string. 33 | 34 | :param value: String value to compute entropy 35 | :return: Computed entropy of a given string 36 | """ 37 | # Get probability of chars in string 38 | prob = [ float(value.count(c)) / len(value) for c in dict.fromkeys(list(value)) ] 39 | # Calculate and return the entropy 40 | return - sum([p * math.log(p) / math.log(2.0) for p in prob]) 41 | -------------------------------------------------------------------------------- /applications/detection/pattern_finder/web-interface/static/css/pattern_finder.css: -------------------------------------------------------------------------------- 1 | /* Pattern Finder */ 2 | #filter { 3 | width: 120px; 4 | } 5 | .chart-pattern-finder-top .chart-status, .chart-pattern-finder-top .zingchart { 6 | height: 300px; 7 | } 8 | #table-pattern-finder{ 9 | margin-top: -40px; 10 | padding-left: 15px; 11 | padding-right: 15px; 12 | } 13 | .table-status { 14 | text-align: center; 15 | color: #777777; 16 | width: 100%; 17 | } 18 | .table-status span { 19 | position: relative; 20 | top: -4px; 21 | font-size: 20px; 22 | font-weight: 600; 23 | margin-left: 0.5em; 24 | } -------------------------------------------------------------------------------- /applications/detection/pattern_finder/web-interface/views/menu/pattern_finder.html: -------------------------------------------------------------------------------- 1 |
  • 2 | PatternFinder 3 |
  • -------------------------------------------------------------------------------- /applications/detection/ports_scan/README.md: -------------------------------------------------------------------------------- 1 | ## Horizontal and Vertical TCP Ports Scan Detection 2 | 3 | ### Description 4 | A method for a detection of horizontal and vertical TCP ports scans on the network using adjustable threshold for number of 5 | flows. Default values are: window size = 60, min amount of flows = 20. 6 | 7 | ### Usage: 8 | - General 9 | `ports_scan.py -iz : -it -oh :` 10 | 11 | - Stream4Flow example 12 | `/home/spark/applications/run-application.sh /home/spark/applications/detection/ports_scan/spark/ports_scan.py 13 | -iz producer:2181 -it ipfix.entry -oh consumer:20101` -------------------------------------------------------------------------------- /applications/detection/ports_scan/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/ports_scan/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/detection/ports_scan/spark/modules/kafkaIO.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2016 Tomas Pavuk <433592@mail.muni.cz>, Institute of Computer Science, Masaryk University 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in all 13 | # copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | # 23 | 24 | import ujson as json # Fast JSON parser 25 | import sys # Common system functions 26 | import os # Common operating system functions 27 | 28 | from pyspark import SparkContext # Spark API 29 | from pyspark.streaming import StreamingContext # Spark streaming API 30 | from pyspark.streaming.kafka import KafkaUtils # Spark streaming Kafka receiver 31 | 32 | from kafka import KafkaProducer # Kafka Python client 33 | 34 | 35 | def initialize_and_parse_input_stream(input_zookeeper, input_topic, microbatch_duration): 36 | """ 37 | Initialize spark context, streaming context, input DStream and parse json from DStream. 38 | :param input_zookeeper: input zookeeper hostname:port 39 | :param input_topic: input kafka topic 40 | :param microbatch_duration: duration of micro batches in seconds 41 | :return ssc, parsed_stream: initialized streaming context and json with data from DStream 42 | """ 43 | # Application name used as identifier 44 | application_name = os.path.basename(sys.argv[0]) 45 | # Spark context initialization 46 | sc = SparkContext(appName=application_name + " " + " ".join(sys.argv[1:])) # Application name used as the appName 47 | ssc = StreamingContext(sc, microbatch_duration) 48 | 49 | # Initialize input DStream of flows from specified Zookeeper server and Kafka topic 50 | input_stream = KafkaUtils.createStream(ssc, input_zookeeper, "spark-consumer-" + application_name, 51 | {input_topic: 1}) 52 | 53 | # Parse input stream in the json format 54 | parsed_stream = input_stream.map(lambda line: json.loads(line[1])) 55 | 56 | return ssc, parsed_stream 57 | 58 | 59 | def initialize_kafka_producer(output_zookeeper): 60 | """ 61 | Initialize Kafka producer for output. 62 | :param: output_zookeper: output zookeeper hostname:port 63 | """ 64 | # Application name used as identifier 65 | application_name = os.path.basename(sys.argv[0]) 66 | return KafkaProducer(bootstrap_servers=output_zookeeper, client_id="spark-producer-" + application_name) 67 | 68 | 69 | def process_data_and_send_result(processed_input, kafka_producer, output_topic, window_duration, processing_function): 70 | """ 71 | For each RDD in processed_input call the processing function. 72 | :param processed_input: input which is being formatted by processing_function and send to output 73 | :param kafka_producer: producer through which output is sent 74 | :param output_topic: output kafka topic 75 | :param processing_function: function which formats output and calls send_data_to_kafka 76 | """ 77 | # Call the processing function 78 | processed_input.foreachRDD(lambda rdd: processing_function(rdd.collectAsMap(), kafka_producer, output_topic, window_duration)) 79 | 80 | # Send any remaining buffered records 81 | kafka_producer.flush() 82 | 83 | 84 | def send_data_to_kafka(data, producer, topic): 85 | """ 86 | Send given data to the specified kafka topic. 87 | :param data: data to send 88 | :param producer: producer that sends the data 89 | :param topic: name of the receiving kafka topic 90 | """ 91 | producer.send(topic, str(data)) 92 | 93 | 94 | def spark_start(ssc): 95 | """ 96 | Start Spark streaming context 97 | :param ssc: initialized streaming context 98 | """ 99 | ssc.start() 100 | ssc.awaitTermination() 101 | -------------------------------------------------------------------------------- /applications/detection/ports_scan/web-interface/static/css/ports_scan.css: -------------------------------------------------------------------------------- 1 | /* TCP Ports Scan */ 2 | #filter { 3 | width: 120px; 4 | } 5 | .chart-ports-scan-histogram .chart-status, .chart-ports-scan-histogram .zingchart { 6 | height: 420px; 7 | } 8 | .chart-ports-scan-top .chart-status, .chart-ports-scan-top .zingchart { 9 | height: 300px; 10 | } 11 | #table-ports-scan{ 12 | margin-top: -40px; 13 | padding-left: 15px; 14 | padding-right: 15px; 15 | } 16 | .table-status { 17 | text-align: center; 18 | color: #777777; 19 | width: 100%; 20 | height: 300px; 21 | } 22 | .table-status span { 23 | position: relative; 24 | top: -4px; 25 | font-size: 20px; 26 | font-weight: 600; 27 | margin-left: 0.5em; 28 | } 29 | /* Hack to see the whole watermark */ 30 | #chart-ports-scan-histogram-license-text { 31 | padding-top: 20px !important; 32 | margin-left: -20px !important; 33 | } 34 | #chart-ports-scan-histogram-top { 35 | height: 420px !important; 36 | } -------------------------------------------------------------------------------- /applications/detection/ports_scan/web-interface/views/menu/ports_scan.html: -------------------------------------------------------------------------------- 1 |
  • 2 | TCP Ports Scan 3 |
  • -------------------------------------------------------------------------------- /applications/detection/reflect_ddos/README.md: -------------------------------------------------------------------------------- 1 | ## DDOS detection 2 | 3 | ### Description 4 | A method for detection of reflected DoS/DDoS attacks based on an evaluation of the incoming/outgoing bytes volume ratio. he detection is aimed to protect DNS known servers in local network infrastructure (only DNS traffic on UDP is considered) 5 | 6 | ### Usage: 7 | - General 8 | `detection_reflectddos.py -iz : -it 9 | -oz : -ot 10 | 11 | - Stream4Flow example (using network range 10.10.0.0/16) 12 | `/home/spark/applications/run-application.sh /home/spark/applications/detection/reflected_ddos/spark/detection_reflectddos.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -dns "10.10.0.1,10.10.0.2` -------------------------------------------------------------------------------- /applications/detection/ssh_auth_simple/README.md: -------------------------------------------------------------------------------- 1 | ## SSH Authentication Attack Detection 2 | 3 | ### Description 4 | A method for a detection of attacks on SSH authentication (brute-force or dictionary) based 5 | on simple threshold values of SSH connections. 6 | 7 | ### Usage: 8 | - General 9 | `ssh_auth_simple.py -iz : -it -oz 10 | : -ot ` 11 | 12 | - Stream4Flow example 13 | `/home/spark/applications/run-application.sh /home/spark/applications/detection/ssh_auth_simple/spark/ssh_auth_simple.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output` -------------------------------------------------------------------------------- /applications/detection/ssh_auth_simple/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/detection/ssh_auth_simple/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/detection/ssh_auth_simple/web-interface/static/css/ssh_auth_simple.css: -------------------------------------------------------------------------------- 1 | /* SSH Auth Simple */ 2 | .chart-ssh-simple-histogram .chart-status, .chart-ssh-simple-histogram .zingchart { 3 | height: 420px; 4 | } 5 | #filter { 6 | width: 120px; 7 | } 8 | .chart-ssh-simple-top .chart-status, .chart-ssh-simple-top .zingchart { 9 | height: 300px; 10 | } 11 | #table-ssh-simple{ 12 | margin-top: -40px; 13 | padding-left: 15px; 14 | padding-right: 15px; 15 | } 16 | .table-status { 17 | text-align: center; 18 | color: #777777; 19 | width: 100%; 20 | } 21 | .table-status span { 22 | position: relative; 23 | top: -4px; 24 | font-size: 20px; 25 | font-weight: 600; 26 | margin-left: 0.5em; 27 | } 28 | /* Hack to see the whole watermark */ 29 | #chart-ssh-simple-histogram-license-text { 30 | padding-top: 20px !important; 31 | margin-left: -20px !important; 32 | } 33 | #chart-ssh-simple-histogram-top { 34 | height: 420px !important; 35 | } -------------------------------------------------------------------------------- /applications/detection/ssh_auth_simple/web-interface/views/menu/ssh_auth_simple.html: -------------------------------------------------------------------------------- 1 |
  • 2 | SSH Auth Simple 3 |
  • -------------------------------------------------------------------------------- /applications/statistics/dns_statistics/README.md: -------------------------------------------------------------------------------- 1 | ## DNS statistics 2 | 3 | ### Description 4 | 5 | Counts the following statistics from defined input topic every 20 seconds: 6 | - Record types 7 | - Response codes 8 | - Queried domains by domain name 9 | - Queried domains that do not exist 10 | - Queried external dns servers from local network 11 | - Queried dns servers on local network from the outside network 12 | - Domains with hosts which queried them 13 | 14 | ### Usage: 15 | - General 16 | ` dns_statistics.py -iz : -it -oz : -ot -lc /` 17 | 18 | - To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. You also need a kafka topic to which output will be sent. 19 | Then you can run the example 20 | `/home/spark/applications/run-application.sh /home/spark/applications/statistics/dns_statistics/dns_statistics.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -lc 10.10.0.0/16` 21 | 22 | 23 | -------------------------------------------------------------------------------- /applications/statistics/dns_statistics/spark/filtered_out_domains.txt: -------------------------------------------------------------------------------- 1 | uribl 2 | spameatingmonkey 3 | spamhaus -------------------------------------------------------------------------------- /applications/statistics/dns_statistics/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/statistics/dns_statistics/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/statistics/dns_statistics/web-interface/static/css/dns_statistics.css: -------------------------------------------------------------------------------- 1 | /* DNS Statistics */ 2 | .chart-dns-stats .chart-status, .chart-dns-stats .zingchart { 3 | height: 300px; 4 | } 5 | .chart-status { 6 | height: 300px; 7 | } 8 | #table-dns-stats { 9 | margin-top: -40px; 10 | padding-left: 15px; 11 | padding-right: 15px; 12 | } 13 | .table-status { 14 | text-align: center; 15 | color: #777777; 16 | width: 100%; 17 | margin-top: 170px; 18 | height: 300px; 19 | } 20 | .table-status span { 21 | position: relative; 22 | top: -4px; 23 | font-size: 20px; 24 | font-weight: 600; 25 | margin-left: 0.5em; 26 | } 27 | #load-all-charts-button { 28 | margin-right: 20px; 29 | } -------------------------------------------------------------------------------- /applications/statistics/dns_statistics/web-interface/views/menu/dns_statistics.html: -------------------------------------------------------------------------------- 1 | 2 |
  • 3 | DNS Statistics 4 |
  • 5 | -------------------------------------------------------------------------------- /applications/statistics/hosts_profiling/README.md: -------------------------------------------------------------------------------- 1 | ## Host profiling 2 | 3 | ### Description 4 | An application for collecting aggregated characteristics of hosts in a longer period of time, typically 24 hours. 5 | The application produces the temporal summary of activity of every host observed over the longer period of time. 6 | 7 | The aggregation and observation intervals can be set by `-lw` (long window), and `-sw` (short window) params, 8 | defining the duration of **aggregations** (-**short window**) and a duration of **observation** (=**long window**) in seconds. 9 | The long window must be a **multiple** of short window. 10 | 11 | By default, the durations are set to `-lw 86400 -sw 3600` to observe and deliver in a daily interval aggregated in hourly intervals. 12 | 13 | The application outputs the data of host logs with temporal identification as keys. The logs contain the aggregated data delivered 14 | from [host_stats](https://github.com/CSIRT-MU/Stream4Flow/blob/master/applications/statistics/hosts_statistics/spark/host_stats.py) 15 | application: 16 | - **Packets**: for each host a sum of packets transferred in each of small windows 17 | - **Bytes**: for each host a sum of bytes transferred in each of small aggregation windows 18 | - **Flows**: for each host a sum of flows transferred in each of small aggregation windows 19 | 20 | Note that the application requires a running **Kafka** and Spark's **host_stats** application with output zookeeper 21 | and output topic **matching** the input zookeeper and input topic of **this** application. 22 | 23 | In addition, the **host_stats** app is supposed to deliver data in **time interval dividing** the **``-sw``**, otherwise the results 24 | of **host_daily_profile** will be **biased**. 25 | 26 | ### Usage: 27 | After setting up the stream data producer with matching ``-oz `` and `` -ot ``, start the application as follows: 28 | 29 | - General: 30 | 31 | ```commandline 32 | host_daily_profile.py -iz : -it -oz : -ot -sw -lw 33 | ``` 34 | 35 | - Stream4Flow example: 36 | 37 | ```commandline 38 | /home/spark/applications/run-application.sh /home/spark/applications/hosts_profiling/host_daily_profile.py -iz producer:2181 -it host.stats -oz producer:9092 -ot results.daily -sw 3600 -lw 86400 39 | ``` 40 | 41 | ...starts the application with **24h** delivery interval of **1h** statistics aggregations. -------------------------------------------------------------------------------- /applications/statistics/hosts_statistics/README.md: -------------------------------------------------------------------------------- 1 | ## Host statistics 2 | 3 | ### Description 4 | An application for computing statistics for all hosts in network. Computed statistics for each host in each window are following: 5 | - **Basic Characteristics**: sum of flows, packets and bytes 6 | - **Port Statistics**: number of distinct destination ports 7 | - **Communication Peers**: number of distinct communication peers 8 | - **Average Flow Duration**: average duration of flows 9 | - **TCP Flags Distribution**: number of each individual TCP flags 10 | 11 | ### Usage 12 | - General: 13 | 14 | ` host_stats.py -iz : -it -oz : -ot --ln -w -m ` 15 | 16 | - Stream4Flow example (using network range 10.10.0.0/16): 17 | 18 | `/home/spark/applications/run-application.sh /home/spark/applications/host_statistics/host_stats.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot host.stats -ln "10.0.0.0/24" -w 10 -m 10` 19 | 20 | 21 | ## Top N Host statistics 22 | 23 | ### Description 24 | An application for collecting the top N characteristics for all hosts, particularly: 25 | 26 | - **Top N destination ports**: ports of the highest number of flows on given ports from each source IP 27 | - **Destination IPs**: destination IP addresses with the highest number of flows from each source IP 28 | - **HTTP Hosts**: destination HTTP addresses with the highest number of flows for each source IP 29 | 30 | ## Usage 31 | - General: 32 | 33 | `top_n_host_stats.py --iz : -it -oz : -ot -n -wd -ws -c 34 | ` 35 | 36 | - Stream4Flow example (using network range 10.10.0.0/16): 37 | 38 | `/run-application.sh statistics/hosts_statistics/spark/application_template.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -n 10.10.0.0/16 -wd 10 -ws 10 -c 10` 39 | -------------------------------------------------------------------------------- /applications/statistics/hosts_statistics/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/statistics/hosts_statistics/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/statistics/hosts_statistics/web-interface/static/css/host_statistics.css: -------------------------------------------------------------------------------- 1 | /* Host Statistics */ 2 | #network { 3 | width: 139px; 4 | } 5 | .close-detailed-panel { 6 | float: right; 7 | margin: 0.9em 1.8em 1em 0; 8 | font-size: 0.8em; 9 | cursor: pointer; 10 | } 11 | .chart-type-selector { 12 | padding-left: 1em; 13 | margin-left: 1.1em; 14 | margin-right: 2em; 15 | } 16 | .chart-status, .zingchart { 17 | height: 420px; 18 | } 19 | .zingchart { 20 | display: none; 21 | } 22 | /* Hack to see the whole zingchart watermark */ 23 | div[id$="-license-text"] { 24 | padding-top: 9px !important; 25 | margin-left: -20px !important; 26 | } 27 | -------------------------------------------------------------------------------- /applications/statistics/hosts_statistics/web-interface/views/host_statistics/host_statistics.html: -------------------------------------------------------------------------------- 1 | {{extend 'layout.html'}} 2 | 3 | 10 | 11 | 12 |

    Host Statistics

    13 | 14 | {{if not session.logged:}} 15 | 18 | {{else:}} 19 | 20 | {{if ('alert_type' in globals()) and alert_type != "":}} 21 | 30 | {{pass}} 31 | 32 | 33 | 34 |
    35 |
    36 |

    Options

    37 |
    38 |
    39 |
    40 |
    41 |
    42 | 43 | 50 |
    51 |
    52 | 53 | 54 |
    55 |
    56 | 57 | 58 |
    59 |
    60 | 61 | 69 |
    70 |
    71 | 72 | 73 |
    74 | 75 |
    76 |
    77 |
    78 |
    79 | 80 | 81 | 82 |
    83 |

    Heatmap of Flows Sum per Host

    84 | 85 |
    86 | 87 |
    88 |
    89 | 90 | 91 | 92 |
    93 |
    94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | {{pass}} 104 | -------------------------------------------------------------------------------- /applications/statistics/hosts_statistics/web-interface/views/menu/host_statistics.html: -------------------------------------------------------------------------------- 1 | 2 |
  • 3 | Host Statistics 4 |
  • -------------------------------------------------------------------------------- /applications/statistics/protocols_statistics/README.md: -------------------------------------------------------------------------------- 1 | ## Protocol statistics 2 | 3 | ### Description 4 | 5 | Counts number of flows, packets, and bytes for TCP, UDP, and other flows received from Kafka every 10 seconds. Template application for a application developers. 6 | 7 | ### Usage: 8 | - General 9 | ` protocols_statistics.py -iz : -it -oz : -ot ` 10 | 11 | - To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. Then 12 | you can run the example 13 | `/home/spark/applications/run-application.sh /home/spark/applications/examples/protocols_statistics.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output` 14 | 15 | 16 | -------------------------------------------------------------------------------- /applications/statistics/protocols_statistics/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/statistics/protocols_statistics/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/statistics/tls_classification/README.md: -------------------------------------------------------------------------------- 1 | ## TLS Classification Statistics 2 | 3 | ### Description 4 | 5 | Clyssify TLS clients and counts the following statistics from defined input topic every 30 seconds: 6 | - Operating system 7 | - Browser type 8 | - Device type and application 9 | 10 | ### Usage: 11 | - General 12 | ` tls_classification.py -iz : -it -oz : -ot -d ` 13 | 14 | - To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. You also need a kafka topic to which output will be sent. 15 | Then you can run the example 16 | `/home/spark/applications/run-application.sh /home/spark/applications/statistics/dns_statistics/tls_classification.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -d /home/spark/applications/statistics/dns_statistics/tls_classification_dictionary.csv` 17 | -------------------------------------------------------------------------------- /applications/statistics/tls_classification/spark/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/applications/statistics/tls_classification/spark/modules/__init__.py -------------------------------------------------------------------------------- /applications/statistics/tls_classification/web-interface/controllers/tls_classification_statistics.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Import Elasticsearch library 4 | import elasticsearch 5 | from elasticsearch_dsl import Search, Q, A 6 | # Import advanced python collections 7 | import collections 8 | # Import global functions 9 | from global_functions import escape 10 | 11 | #----------------- Main Functions -------------------# 12 | 13 | 14 | def tls_classification_statistics(): 15 | """ 16 | Show the main page of the TLS classification statistics section. 17 | 18 | :return: Empty dictionary 19 | """ 20 | # Use standard view 21 | response.view = request.controller + '/tls_classification_statistics.html' 22 | return dict() 23 | 24 | 25 | #----------------- Chart Functions ------------------# 26 | 27 | 28 | def get_top_n_statistics(): 29 | """ 30 | Obtains TOP N TLS classification statistics. 31 | 32 | :return: JSON with status "ok" or "error" and requested data. 33 | """ 34 | 35 | # Check login 36 | if not session.logged: 37 | json_response = '{"status": "Error", "data": "You must be logged!"}' 38 | return json_response 39 | 40 | # Check mandatory inputs 41 | if not (request.get_vars.beginning and request.get_vars.end and request.get_vars.type and request.get_vars.number): 42 | json_response = '{"status": "Error", "data": "Some mandatory argument is missing!"}' 43 | return json_response 44 | 45 | # Parse inputs and set correct format 46 | beginning = escape(request.get_vars.beginning) 47 | end = escape(request.get_vars.end) 48 | type = escape(request.get_vars.type) 49 | number = int(escape(request.get_vars.number)) 50 | 51 | try: 52 | # Elastic query 53 | client = elasticsearch.Elasticsearch([{'host': myconf.get('consumer.hostname'), 'port': myconf.get('consumer.port')}]) 54 | elastic_bool = [] 55 | elastic_bool.append({'range': {'@timestamp': {'gte': beginning, 'lte': end}}}) 56 | elastic_bool.append({'term': {'@stat_type': type}}) 57 | 58 | # Prepare query 59 | qx = Q({'bool': {'must': elastic_bool}}) 60 | search_ip = Search(using=client, index='_all').query(qx) 61 | search_ip.aggs.bucket('all_nested', 'nested', path='data_array') \ 62 | .bucket('by_key', 'terms', field='data_array.key.raw', size=2147483647) \ 63 | .bucket('stats_sum', 'sum', field='data_array.value') 64 | 65 | # Get result 66 | results = search_ip.execute() 67 | 68 | # Prepare data variable 69 | data = "" 70 | # Prepare ordered collection 71 | counter = collections.Counter() 72 | 73 | for all_buckets in results.aggregations.all_nested.by_key: 74 | counter[all_buckets.key] += int(all_buckets.stats_sum.value) 75 | 76 | # Select top N (number) values 77 | for value, count in counter.most_common(number): 78 | data += value + "," + str(count) + "," 79 | 80 | # Remove trailing comma 81 | data = data[:-1] 82 | 83 | if data == "": 84 | json_response = '{"status": "Empty", "data": "No data found"}' 85 | else: 86 | json_response = '{"status": "Ok", "data": "' + data + '"}' 87 | return json_response 88 | 89 | except Exception as e: 90 | json_response = '{"status": "Error", "data": "Elasticsearch query exception: ' + escape(str(e)) + '"}' 91 | return json_response 92 | -------------------------------------------------------------------------------- /applications/statistics/tls_classification/web-interface/static/css/tls_classification_statistics.css: -------------------------------------------------------------------------------- 1 | /* TLS Classification Statistics */ 2 | .chart-tls-classification-top .chart-status, .chart-tls-classification-top .zingchart { 3 | height: 320px; 4 | } 5 | .chart-status { 6 | height: 300px; 7 | } 8 | #chart-tls-classification-top-application { 9 | margin-top: -30px; 10 | } 11 | -------------------------------------------------------------------------------- /applications/statistics/tls_classification/web-interface/views/menu/tls_classification_statistics.html: -------------------------------------------------------------------------------- 1 | 2 |
  • 3 | TLS Classification 4 |
  • 5 | -------------------------------------------------------------------------------- /images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/images/architecture.png -------------------------------------------------------------------------------- /images/logo-text-small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/images/logo-text-small.png -------------------------------------------------------------------------------- /provisioning/README.md: -------------------------------------------------------------------------------- 1 | # Stream4Flow provisioning 2 | 3 | ## Vagrant Provisioning 4 | 5 | All configuration of guest deployed by vagrant provisioning is in [configuration.yml](./configuration.yml). 6 | 7 | **Configurable options:** 8 | - *common* – Settings common for all guests 9 | - *box, box_url* – used Vagrant boxes (using different boxes can cause malfunction of the provisioning) 10 | - *provision_on_guest* – true value allows to provision guest separately, false will provision all at once but faster 11 | - *producer* – Producer guest settings 12 | - *ip* – used address of the guest 13 | - *memory* – reserved memory (decrease can cause malfunction of the framework) 14 | - *cpu* – a number of virtual CPUs (decrease can cause malfunction of the framework) 15 | - *sparkMaster* – Spark master guest settings 16 | - *ip* – used address of the guest 17 | - *memory* – reserved memory (decrease can cause malfunction of the framework) 18 | - *cpu* – a number of virtual CPUs (decrease can cause malfunction of the framework) 19 | - *sparkSlave* – Slave guests settings (each slave will have the same configuration) 20 | - *count* – a number of slaves that will be provisioned (max. 155) 21 | - *ip_prefix* – IP address prefix of slave guests (suffix starts at 101) 22 | - *memory* – reserved memory (decrease can cause malfunction of the framework) 23 | - *cpu* – a number of virtual CPUs (decrease can cause malfunction of the framework) 24 | - *consumer* – Consumer guest settings 25 | - *ip* – used address of the guest 26 | - *memory* – reserved memory (decrease can cause malfunction of the framework) 27 | - *cpu* – a number of virtual CPUs (decrease can cause malfunction of the framework) 28 | 29 | ### Vagrant commands: 30 | - `vagrant up` – Brings up the whole framework 31 | - `vagrant up ` – Brings up the guest 32 | - `vagrant halt ` – Shutdown the guest 33 | - `vagrant destroy `– Completely delete given guest and its associated resources (virtual hard drives, ...) 34 | - `vagrant provision ` – Run Ansible provisioning on the guest 35 | - `vagrant ssh ` – Connect to the guest via SSH 36 | 37 | Available guest names: *producer*, *sparkMaster*, *sparkSlave101* ... *sparkSlave156*, *consumer*. 38 | 39 | ## Ansible Provisioning 40 | 41 | Stream4Flow framework confogiration and variables are available in [ansible/group_vars/*](./ansible/group_vars/). 42 | - Templates of configuration files are stored in *ansible/roles//templates/** 43 | -------------------------------------------------------------------------------- /provisioning/ansible/README.md: -------------------------------------------------------------------------------- 1 | # Ansible provisioning 2 | 3 | ## Playbook structure 4 | 5 | ### Files 6 | - site.yml - default playbook to built Stream4Flow cluster 7 | - all.yml - deploys general configuration for all machines in a cluster 8 | - producer.yml - deploys producer machine 9 | - consumer.yml - deploys consumer machine 10 | - sparkMaster.yml - deploys Spark Master machine 11 | - spakrSlave.yml - deploys Spark Slave machine 12 | - inventory.ini.example - example inventory file for ansible, which can be used in [cluster deployment](https://github.com/CSIRT-MU/Stream4Flow#cluster-deployment). 13 | 14 | ### Directories 15 | - group_wars - contains configurable settings for Stream4Flow 16 | - roles - contains individual roles -------------------------------------------------------------------------------- /provisioning/ansible/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | roles: 4 | - get-vars 5 | - common 6 | -------------------------------------------------------------------------------- /provisioning/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | # Config file for ansible -- https://ansible.com/ 2 | # =============================================== 3 | 4 | [defaults] 5 | 6 | # Controls whether Ansible will raise an error or warning if a task has no 7 | # choice but to create world readable temporary files to execute a module on 8 | # the remote machine. This option is False by default for security. Users may 9 | # turn this on to have behaviour more like Ansible prior to 2.1.x. See 10 | # https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user 11 | # for more secure ways to fix this than enabling this option. 12 | allow_world_readable_tmpfiles = True 13 | -------------------------------------------------------------------------------- /provisioning/ansible/consumer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: consumer 3 | roles: 4 | - get-vars 5 | - elk 6 | - web 7 | -------------------------------------------------------------------------------- /provisioning/ansible/group_vars/README.md: -------------------------------------------------------------------------------- 1 | # Ansible variables 2 | This directory stores all configurable variables. Divided by roles that are using them. 3 | 4 | ## Variables for all: 5 | Contains variables for user configuration. 6 | - **user**: User for Spark installation 7 | - **user_passwd**: User password 8 | - **maven_proxy**: Enable proxy support {False/True} 9 | - **proxy_id**:The unique identifier for this proxy. This is used to differentiate between proxy elements. 10 | - **proxy_user,proxy_pass**:These elements appear as a pair denoting the login and password required to authenticate to this proxy server. 11 | - **proxy_host,proxy_port,proxy_protocol**: The protocol://host:port of the proxy, seperated into discrete elements. 12 | - **non_proxy_hosts**:This is a list of hosts which should not be proxied. The delimiter of the list is the expected type of the proxy server; the example above is pipe delimited - comma delimited is also common. 13 | 14 | ## Consumer variables 15 | Contains variables for web configuration. 16 | - **cert_subj**: SSL certificate subject - set according to your needs 17 | - **web2py_passwd**: Password for web2py administration through web interface 18 | - **repository_url**: Stream4Flow repository url for installing web 19 | 20 | ## Producer variables 21 | Contains variables for Apache Kafka and Ipfixcol configuration. 22 | 23 | ### Kafka variables 24 | - **kafka_dir**: Kafka home directory 25 | - **kafka_download_url**: Download location of Kafka 26 | - **kafka_filename**: Name of Kafka directory when unarchived 27 | - **retention**: Retention setting for the Kafka topic 28 | - **kafka_maximum_heap_space**: Maximum java heap space for kafka in MB (Default 0.5 of total RAM) 29 | - **kafka_minimum_heap_space**: Minimum java heap space for kafka in MB (Default 0.25 of total RAM) 30 | 31 | ### Ipfixcol variables 32 | - **script_path**: Path to the location of ipfixcol scripts 33 | - **script_filename**: Filename of ipfixcol script to run. Allowed values are: startup.xml.tcp and startup.xml.udp. If you want to change the script later after deployment, set the IPFIXCOL_SCRIPT environment variable accordingly in /etc/default/ipfixcol 34 | 35 | ## SparkMaster and sparkSlave variables 36 | Contains variables for Apache Spark configuration. 37 | - **download mirrors** - URLs from where to download Apache Spark and Kafka Assembly 38 | - **spark_inflated_dir_name**: Name of Spark directory when unarchived 39 | - **spark_batch_size**: size of Spark's Batch 40 | - **spark_worker_cores**: number of Spark Worker's CPU 41 | 42 | -------------------------------------------------------------------------------- /provisioning/ansible/group_vars/all/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # login 3 | user: spark 4 | 5 | # user password created with: 6 | # python -c 'import crypt; print crypt.crypt("Stream4Flow", "$1$wolF4maertS$")' 7 | user_passwd: $1$wolF4mae$jSX8IG9goPtqVfomlbla6. 8 | 9 | maven_proxy: False 10 | 11 | proxy_id: not_set 12 | proxy_protocol: not_set 13 | proxy_user: not_set 14 | proxy_pass: not_set 15 | proxy_host: not_set 16 | proxy_port: not_set 17 | non_proxy_hosts: not_set -------------------------------------------------------------------------------- /provisioning/ansible/group_vars/consumer/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: roles/web/defaults/main.yml 3 | 4 | # CSR subject 5 | cert_subj: "/C=CZ/ST=CzechRepublic/L=Brno/O=Stream4Flow/OU=Stream4Flow/CN={{ ansible_host }}" 6 | 7 | #Password for web2py administration through web interface 8 | web2py_passwd: Stream4Flow 9 | 10 | #Repository from where web is installled 11 | repository_url: "https://github.com/CSIRT-MU/Stream4Flow" 12 | 13 | #Number of indices to keep, older indices will be deleted 14 | number_of_indices: 30 15 | -------------------------------------------------------------------------------- /provisioning/ansible/group_vars/producer/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #Kafka home directory 3 | kafka_dir: /opt/kafka 4 | #Download location of Kafka 5 | kafka_download_url: https://archive.apache.org/dist/kafka/2.5.0/kafka_2.12-2.5.0.tgz 6 | #Name of Kafka directory when unarchived 7 | kafka_filename: kafka_2.12-2.5.0 8 | #Retention setting for the Kafka topic 9 | retention: 30000 10 | #Maximum java heap space for kafka in MB (Default 0.5 of total RAM) 11 | kafka_maximum_heap_space: "{{(ansible_memtotal_mb * 0.5)|int}}" 12 | #Maximum java heap space for kafka in MB (Default 0.25 of total RAM) 13 | kafka_minimum_heap_space: "{{(ansible_memtotal_mb * 0.25)|int}}" 14 | 15 | # Set the configuration file path for ipfixcol to load here: 16 | # Path to the location of ipfixcol scripts 17 | script_path: /usr/local/etc/ipfixcol/ 18 | #Filename of ipfixcol script to run. Allowed values are: startup.xml.tcp and startup.xml.udp. 19 | #If you want to change the script later after deployment, set the IPFIXCOL_SCRIPT environment variable accordingly in /etc/default/ipfixcol 20 | script_filename: startup.xml.udp 21 | 22 | -------------------------------------------------------------------------------- /provisioning/ansible/group_vars/sparkMaster/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # download mirrors 3 | url_spark: "https://archive.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz" 4 | url_kafka_assembly: "http://search.maven.org/remotecontent?filepath=org/apache/spark/spark-streaming-kafka-0-8-assembly_2.11/2.1.1/spark-streaming-kafka-0-8-assembly_2.11-2.1.1.jar" 5 | 6 | # Name of spark directory after inflating downloaded archive 7 | spark_inflated_dir_name: spark-2.1.1-bin-hadoop2.7 8 | 9 | # spark test settings 10 | spark_batch_size: 5000 11 | spark_worker_cores: 2 12 | # spark worker memory in MB 13 | spark_worker_memory: "{{ ansible_memtotal_mb }}" 14 | spark_masterurl: spark://{{ masterIP }}:7077 15 | 16 | repository_url: "https://github.com/CSIRT-MU/Stream4Flow" 17 | repository_tmp: "/tmp/Stream4Flow" 18 | dir_applications: /home/{{ user }}/applications/ 19 | -------------------------------------------------------------------------------- /provisioning/ansible/group_vars/sparkSlave/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # download mirrors 3 | url_spark: "https://archive.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz" 4 | url_kafka_assembly: "http://search.maven.org/remotecontent?filepath=org/apache/spark/spark-streaming-kafka-0-8-assembly_2.11/2.1.1/spark-streaming-kafka-0-8-assembly_2.11-2.1.1.jar" 5 | 6 | # Name of spark directory after inflating downloaded archive 7 | spark_inflated_dir_name: spark-2.1.1-bin-hadoop2.7 8 | 9 | # spark test settings 10 | spark_batch_size: 5000 11 | spark_worker_cores: 2 12 | # spark worker memory in MB 13 | spark_worker_memory: "{{ ansible_memtotal_mb }}" 14 | -------------------------------------------------------------------------------- /provisioning/ansible/inventory.ini.example: -------------------------------------------------------------------------------- 1 | [producer] 2 | producer ansible_host=192.168.0.2 3 | 4 | [consumer] 5 | consumer ansible_host=192.168.0.3 6 | 7 | [sparkMaster] 8 | sparkMaster ansible_host=192.168.0.100 9 | 10 | [sparkSlave] 11 | sparkSlave101 ansible_host=192.168.0.101 12 | sparkSlave102 ansible_host=192.168.0.102 13 | -------------------------------------------------------------------------------- /provisioning/ansible/producer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: producer 3 | roles: 4 | - get-vars 5 | - ubuntu-systemd-normalizer 6 | - kafka 7 | - ipfixcol 8 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Roles 2 | 3 | ## Common 4 | 5 | Common tasks done for all manchines in a cluster 6 | 7 | - install java 8 | - add users 9 | - set a host file 10 | 11 | ## Elk 12 | 13 | Install Elastic cluster on the consumer machine 14 | 15 | **Templates:** 16 | - elasticsearch_elasticsearch.yml.j2 - elasticsearch base configuration 17 | - elasticsearch_logging.yml.j2 - elasticsearch logging configuration 18 | - kibana.yml.j2 - Kibana configuration 19 | - logstash_kafka-to-elastic.conf.j2 - Logstash configuration file 20 | - logstash_templates_spark-elasticsearch-template.json.j2 - Logstash template file 21 | 22 | ## Example-application 23 | 24 | Install necessary prerequisites on the spark cluster and deploy the example application on the spark master. 25 | 26 | ## IPFICcol 27 | 28 | Install IPFIXcol on the producer machine. 29 | 30 | **Variables:** 31 | - build - set of commands to build IPfixcol binary 32 | - build_fastbit_compile - set to 'true' if you want to compile fastbit from source (should be necessary only if downloaded binary version doesn't work) 33 | - script_path - path to the location of ipfixcol scripts 34 | - script_filename - filename of ipfixcol script to run. Allowed values are: startup.xml.tcp and startup.xml.udp. If you want to change the script later after deployment, set the IPFIXCOL_SCRIPT environment variable accordingly in /etc/default/ipfixcol 35 | - packages.apt.yml: List of required apt packages to download. 36 | 37 | **Templates:** 38 | - ipfixcol.conf.j2: Ubuntu upstart file for ipfixcol service 39 | - ipfixcol.j2: defaults file for ipfixcol service 40 | 41 | ## Kafka 42 | 43 | Installs Aoache Kafka on the producer machine 44 | 45 | **Templates:** 46 | - kafka-broker.service.j2: Ubuntu upstart file for kafka service 47 | - kafka-broker.j2: Defaults file for kafka service 48 | - kafka-server-start.sh.j2: Start script for Kafka server 49 | 50 | ## Spark 51 | 52 | Installs Apache Spark on the spark cluster 53 | 54 | **Templates** 55 | - run-application.sh.j2 - startup script for a spark application 56 | 57 | 58 | **Variables:** 59 | - user: under which user Spark should be installed 60 | - slave{x}IP: IP address of Spark Slave, where {x} is a number from 1.. If adding more slaves, add their IPs here as other entries like slave2IP: slave3IP: ... 61 | - download mirrors: URLs from where to download Apache Spark and Kafka Assembly 62 | - spark_inflated_dir_name: Name of Spark directory when unarchived 63 | - spark test settings: Settings for Apache Spark 64 | 65 | ## Ubuntu-systemd-normalizer 66 | 67 | **Templates** 68 | - tcpnormalizer@.service.j2: systemd service script 69 | 70 | **Variables** 71 | - normalizer_user: set user for normalizer 72 | 73 | ## Web 74 | 75 | Install web interface on the consumer machine 76 | 77 | **Variables:** 78 | - cert_subj: SSL certificate subject - set according to your needs 79 | - phantomjs_url: PhantomJS download URL 80 | - web2py_passwd: Password for web2py administration through web interface 81 | 82 | **Templates**: 83 | - chgpasswd.py.j2: Script to change default web2py password to value set in web2py_passwd (see above). 84 | - web2py.conf.j2: Web2py configuration file 85 | 86 | **Files** 87 | - routes.py: Default application settings 88 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/applications/defaults/main.yml: -------------------------------------------------------------------------------- 1 | repository_url: "https://github.com/CSIRT-MU/Stream4Flow" 2 | repository_tmp: "/tmp/Stream4Flow" 3 | 4 | dir_applications: /home/{{ user }}/applications/ -------------------------------------------------------------------------------- /provisioning/ansible/roles/applications/tasks/dependencies.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install pip 3 | - name: Install pip and git 4 | apt: name={{ item }} state=latest update_cache=yes 5 | with_items: 6 | - python-pip 7 | - git 8 | # Instal ujson, termcolor and kafka modules for application 9 | - name: Install python modules 10 | pip: name={{item}} executable=pip 11 | with_items: 12 | - ujson 13 | - termcolor 14 | - ipaddress 15 | - kafka 16 | 17 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/applications/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Clone Stream4Flow git repository 3 | git: 4 | repo: "{{ repository_url }}" 5 | dest: "{{ repository_tmp }}" 6 | version: master 7 | 8 | - name: Copy all applications from Stream4Flow git reporsitory 9 | command: cp -r "{{ repository_tmp }}/applications/." "{{ dir_applications }}/" 10 | 11 | - name: Set correct owner and permissions of applications 12 | file: 13 | dest: "{{ dir_applications }}/{{ item }}" 14 | owner: spark 15 | group: spark 16 | mode: 0644 17 | recurse: yes 18 | with_items: ["detection", "statistics", "application_template"] 19 | 20 | - name: Set correct perissions of applications directories 21 | file: 22 | path: "{{ dir_applications }}/" 23 | mode: u=rwX,g=rX,o=rX 24 | recurse: yes -------------------------------------------------------------------------------- /provisioning/ansible/roles/applications/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install dependencies 3 | include: dependencies.yml 4 | become: yes 5 | 6 | - name: Install applications 7 | include: install.yml 8 | become: yes 9 | when: "'sparkMaster' in group_names" 10 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: roles/common/defaults/main.yml 3 | 4 | # login 5 | user: spark 6 | 7 | # user password created with: 8 | # python -c 'import crypt; print crypt.crypt("Stream4Flow", "$1$wolF4maertS$")' 9 | user_passwd: $1$wolF4mae$jSX8IG9goPtqVfomlbla6. 10 | 11 | maven_proxy: False 12 | 13 | proxy_id: not_set 14 | proxy_protocol: not_set 15 | proxy_user: not_set 16 | proxy_pass: not_set 17 | proxy_host: not_set 18 | proxy_port: not_set 19 | non_proxy_hosts: not_set -------------------------------------------------------------------------------- /provisioning/ansible/roles/common/tasks/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Build /etc/hosts file" 3 | lineinfile: dest=/etc/hosts regexp='.*{{ item }}$' line="{{ hostvars[item].ansible_host }} {{ item }}" state=present 4 | with_inventory_hostnames: all 5 | 6 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/common/tasks/java-oracle.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install Oracle Java 3 | - name: install add-apt-repository 4 | apt: name=software-properties-common state=latest 5 | - name: Add Oracle Java Repository 6 | apt_repository: repo='ppa:linuxuprising/java' 7 | - name: Accept Java 14 License debconf 8 | debconf: name='oracle-java14-installer' question='shared/accepted-oracle-license-v1-2' value='true' vtype='select' 9 | - name: Accept Java 14 License apt 10 | apt_key: keyserver=hkp://keyserver.ubuntu.com:80 id=EEA14886 11 | - name: Install Oracle Java 14 12 | apt: 13 | name: [oracle-java14-installer, ca-certificates, oracle-java14-set-default] 14 | state: latest 15 | update_cache: yes 16 | - name: Install Maven 3 17 | apt: 18 | name: maven 19 | state: latest 20 | update_cache: yes 21 | 22 | - name: Create .m2 directory 23 | file: dest=/home/{{ user }}/.m2 state=directory 24 | when: maven_proxy 25 | 26 | - name: Make this file on producer 27 | template: src=settings.xml.j2 dest=/home/{{ user }}/.m2/settings.xml 28 | when: 29 | - proxy_id != "not_set" 30 | - proxy_protocol != "not_set" 31 | - proxy_user != "not_set" 32 | - proxy_pass != "not_set" 33 | - proxy_host != "not_set" 34 | - proxy_port != "not_set" 35 | - non_proxy_hosts != "not_set" 36 | - maven_proxy -------------------------------------------------------------------------------- /provisioning/ansible/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add all hosts to /etc/hosts 3 | include: hosts.yml 4 | become: yes 5 | 6 | - name: Install Oracle Java 8 7 | include: java-oracle.yml 8 | become: yes 9 | 10 | - name: Add users 11 | include: users.yml 12 | become: yes -------------------------------------------------------------------------------- /provisioning/ansible/roles/common/tasks/users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Add users 3 | 4 | # Create user for running Stream4Flow 5 | - name: Create Spark user. 6 | user: name={{ user }} comment="Stream4Flow user" shell=/bin/bash groups=sudo generate_ssh_key=yes ssh_key_bits=4096 ssh_key_file=.ssh/id_rsa password={{ user_passwd }} 7 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/defaults/main.yml: -------------------------------------------------------------------------------- 1 | number_of_indices: 30 -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Copy ELK config files 3 | 4 | # Elasticsearch files 5 | - name: Copy elasticsearch.yml (elasticsearch) 6 | template: src=templates/elasticsearch_elasticsearch.yml.j2 dest=/etc/elasticsearch/elasticsearch.yml mode=0644 7 | 8 | - name: Copy logging.yml (elasticsearch) 9 | template: src=templates/elasticsearch_logging.yml.j2 dest=/etc/elasticsearch/logging.yml mode=0644 10 | 11 | - name: Create data directory. 12 | file: path=/data/elasticsearch state=directory recurse=yes owner=elasticsearch group=elasticsearch mode=0775 13 | 14 | - name: Restart Elasticsearch 15 | service: name=elasticsearch state=restarted enabled=yes 16 | 17 | # Kibana files 18 | - name: Copy kibana.yml (kibana) 19 | template: src=templates/kibana.yml.j2 dest=/etc/kibana/kibana.yml mode=0664 20 | 21 | - name: Restart Kibana 22 | service: name=kibana state=restarted enabled=yes 23 | 24 | # Logstash files 25 | - name: Copy kafka-to-elastic.conf (logstash) 26 | template: src=templates/logstash_kafka-to-elastic.conf.j2 dest=/etc/logstash/conf.d/kafka-to-elastic.conf mode=0664 owner=logstash group=logstash 27 | 28 | - name: Create 'templates' directory 29 | file: path=/etc/logstash/conf.d/templates state=directory owner=logstash group=logstash mode=0755 30 | 31 | - name: Copy templates_spark-elasticsearch-template.json (logstash) 32 | template: src=templates/logstash_templates_spark-elasticsearch-template.json.j2 dest=/etc/logstash/conf.d/templates/spark-elasticsearch-template.json mode=0764 owner=logstash group=logstash 33 | 34 | - name: Restart Logstash 35 | service: name=logstash state=restarted enabled=yes 36 | 37 | # Curator files 38 | - name: Make configuration directory 39 | file: path=/home/{{ user }}/.curator state=directory mode=0755 40 | 41 | - name: Copy curator.yml 42 | template: src=templates/curator.yml dest=/home/{{ user }}/.curator mode=0755 43 | 44 | - name: Copy curatorAction.yml 45 | template: src=templates/curatorAction.yml dest=/home/{{ user }}/.curator mode=0755 46 | 47 | - name: Configure cron jobs for Elasticsearch Curator. 48 | cron: 49 | name: Delete indices older than 30 days 50 | job: " /usr/local/bin/curator --config /home/{{ user }}/.curator/curator.yml /home/{{ user }}/.curator/curatorAction.yml > /var/log/curator.log 2>&1 " 51 | minute: 0 52 | hour: 1 53 | day: '*' 54 | weekday: '*' 55 | month: '*' 56 | 57 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install ELK with dependencies 3 | 4 | # Add keys and repositories 5 | - name: add key for elasticsearch 6 | apt_key: url=https://artifacts.elastic.co/GPG-KEY-elasticsearch 7 | - name: add repository 8 | apt_repository: repo='deb https://artifacts.elastic.co/packages/7.x/apt stable main' 9 | # Install ELK 10 | - name: install elk and dependencies 11 | apt: name={{ item }} state=present update_cache=yes 12 | with_items: 13 | - elasticsearch 14 | - kibana 15 | - logstash 16 | - python-pip 17 | 18 | - name: Install elasticsearch-curator 19 | pip: "name={{ item }}" 20 | with_items: 21 | - elasticsearch-curator 22 | - argparse 23 | 24 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install ELK Stack with dependencies 2 | include: install.yml 3 | become: yes 4 | 5 | - name: Copy ELK config files to host 6 | include: config.yml 7 | become: yes 8 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/templates/curator.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Remember, leave a key empty if there is no value. None will be a string, 3 | # not a Python "NoneType" 4 | client: 5 | hosts: 6 | - {{ ansible_host }} 7 | port: 9200 8 | url_prefix: 9 | use_ssl: False 10 | certificate: 11 | client_cert: 12 | client_key: 13 | ssl_no_validate: False 14 | http_auth: 15 | timeout: 30 16 | master_only: False 17 | 18 | logging: 19 | loglevel: INFO 20 | logfile: 21 | logformat: default 22 | blacklist: ['elasticsearch', 'urllib3'] -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/templates/curatorAction.yml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: delete_indices 4 | description: >- 5 | Delete indices older than 30 days (based on index name), for logstash- 6 | prefixed indices. Ignore the error if the filter does not result in an 7 | actionable list of indices (ignore_empty_list) and exit cleanly. 8 | options: 9 | ignore_empty_list: True 10 | timeout_override: 11 | continue_if_exception: False 12 | filters: 13 | - filtertype: pattern 14 | kind: prefix 15 | value: spark- 16 | exclude: 17 | - filtertype: age 18 | source: name 19 | direction: older 20 | timestring: '%Y.%m.%d' 21 | unit: days 22 | unit_count: {{ number_of_indices}} 23 | exclude: -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/templates/elasticsearch_elasticsearch.yml.j2: -------------------------------------------------------------------------------- 1 | # ======================== Elasticsearch Configuration ========================= 2 | # 3 | # NOTE: Elasticsearch comes with reasonable defaults for most settings. 4 | # Before you set out to tweak and tune the configuration, make sure you 5 | # understand what are you trying to accomplish and the consequences. 6 | # 7 | # The primary way of configuring a node is via this file. This template lists 8 | # the most important settings you may want to configure for a production cluster. 9 | # 10 | # Please see the documentation for further information on configuration options: 11 | # 12 | # 13 | # ---------------------------------- Cluster ----------------------------------- 14 | # 15 | # Use a descriptive name for your cluster: 16 | # 17 | # cluster.name: my-application 18 | # 19 | # ------------------------------------ Node ------------------------------------ 20 | # 21 | # Use a descriptive name for the node: 22 | # 23 | # node.name: node-1 24 | # 25 | # Add custom attributes to the node: 26 | # 27 | # node.rack: r1 28 | # 29 | # ----------------------------------- Paths ------------------------------------ 30 | # 31 | # Path to directory where to store the data (separate multiple locations by comma): 32 | # 33 | path.data: /data/elasticsearch 34 | # 35 | # Path to log files: 36 | # 37 | path.logs: /var/log/elasticsearch 38 | # 39 | # ----------------------------------- Memory ----------------------------------- 40 | # 41 | # Lock the memory on startup: 42 | # 43 | # bootstrap.mlockall: true 44 | # 45 | # Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory 46 | # available on the system and that the owner of the process is allowed to use this limit. 47 | # 48 | # Elasticsearch performs poorly when the system is swapping the memory. 49 | # 50 | # ---------------------------------- Network ----------------------------------- 51 | # 52 | # Set the bind address to a specific IP (IPv4 or IPv6): 53 | # 54 | # network.host: 192.168.0.1 55 | network.host: {{ ansible_host }} 56 | # 57 | # Set a custom port for HTTP: 58 | # 59 | # http.port: 9200 60 | # 61 | # Single node usage 62 | # 63 | discovery.type: single-node 64 | # 65 | # For more information, see the documentation at: 66 | # 67 | # 68 | # --------------------------------- Discovery ---------------------------------- 69 | # 70 | # Pass an initial list of hosts to perform discovery when new node is started: 71 | # The default list of hosts is ["127.0.0.1", "[::1]"] 72 | # 73 | # discovery.zen.ping.unicast.hosts: ["host1", "host2"] 74 | # 75 | # Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): 76 | # 77 | # discovery.zen.minimum_master_nodes: 3 78 | # 79 | # For more information, see the documentation at: 80 | # 81 | # 82 | # ---------------------------------- Gateway ----------------------------------- 83 | # 84 | # Block initial recovery after a full cluster restart until N nodes are started: 85 | # 86 | # gateway.recover_after_nodes: 3 87 | # 88 | # For more information, see the documentation at: 89 | # 90 | # 91 | # ---------------------------------- Various ----------------------------------- 92 | # 93 | # Disable starting multiple nodes on a single system: 94 | # 95 | # node.max_local_storage_nodes: 1 96 | # 97 | # Require explicit names when deleting indices: 98 | # 99 | # action.destructive_requires_name: true 100 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/templates/elasticsearch_logging.yml.j2: -------------------------------------------------------------------------------- 1 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG 2 | es.logger.level: INFO 3 | rootLogger: ${es.logger.level}, console, file 4 | logger: 5 | # log action execution errors for easier debugging 6 | action: DEBUG 7 | 8 | # deprecation logging, turn to DEBUG to see them 9 | deprecation: INFO, deprecation_log_file 10 | 11 | # reduce the logging for aws, too much is logged under the default INFO 12 | com.amazonaws: WARN 13 | # aws will try to do some sketchy JMX stuff, but its not needed. 14 | com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR 15 | com.amazonaws.metrics.AwsSdkMetrics: ERROR 16 | 17 | org.apache.http: INFO 18 | 19 | # gateway 20 | #gateway: DEBUG 21 | #index.gateway: DEBUG 22 | 23 | # peer shard recovery 24 | #indices.recovery: DEBUG 25 | 26 | # discovery 27 | #discovery: TRACE 28 | 29 | index.search.slowlog: TRACE, index_search_slow_log_file 30 | index.indexing.slowlog: TRACE, index_indexing_slow_log_file 31 | 32 | additivity: 33 | index.search.slowlog: false 34 | index.indexing.slowlog: false 35 | deprecation: false 36 | 37 | appender: 38 | console: 39 | type: console 40 | layout: 41 | type: consolePattern 42 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 43 | 44 | file: 45 | type: dailyRollingFile 46 | file: ${path.logs}/${cluster.name}.log 47 | datePattern: "'.'yyyy-MM-dd" 48 | layout: 49 | type: pattern 50 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n" 51 | 52 | # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. 53 | # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html 54 | #file: 55 | #type: extrasRollingFile 56 | #file: ${path.logs}/${cluster.name}.log 57 | #rollingPolicy: timeBased 58 | #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz 59 | #layout: 60 | #type: pattern 61 | #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 62 | 63 | deprecation_log_file: 64 | type: dailyRollingFile 65 | file: ${path.logs}/${cluster.name}_deprecation.log 66 | datePattern: "'.'yyyy-MM-dd" 67 | layout: 68 | type: pattern 69 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 70 | 71 | index_search_slow_log_file: 72 | type: dailyRollingFile 73 | file: ${path.logs}/${cluster.name}_index_search_slowlog.log 74 | datePattern: "'.'yyyy-MM-dd" 75 | layout: 76 | type: pattern 77 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 78 | 79 | index_indexing_slow_log_file: 80 | type: dailyRollingFile 81 | file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log 82 | datePattern: "'.'yyyy-MM-dd" 83 | layout: 84 | type: pattern 85 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 86 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/templates/kibana.yml.j2: -------------------------------------------------------------------------------- 1 | # Kibana is served by a back end server. This controls which port to use. 2 | # server.port: 5601 3 | 4 | # The host to bind the server to. 5 | # server.host: "0.0.0.0" 6 | server.host: "{{ ansible_host }}" 7 | 8 | # If you are running kibana behind a proxy, and want to mount it at a path, 9 | # specify that path here. The basePath can't end in a slash. 10 | # server.basePath: "" 11 | 12 | # The maximum payload size in bytes on incoming server requests. 13 | # server.maxPayloadBytes: 1048576 14 | 15 | # The Elasticsearch instance to use for all your queries. 16 | elasticsearch.hosts: "http://{{ ansible_host }}:9200" 17 | 18 | # preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false, 19 | # then the host you use to connect to *this* Kibana instance will be sent. 20 | # elasticsearch.preserveHost: true 21 | 22 | # Kibana uses an index in Elasticsearch to store saved searches, visualizations 23 | # and dashboards. It will create a new index if it doesn't already exist. 24 | # kibana.index: ".kibana" 25 | 26 | # The default application to load. 27 | # kibana.defaultAppId: "discover" 28 | 29 | # If your Elasticsearch is protected with basic auth, these are the user credentials 30 | # used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana 31 | # users will still need to authenticate with Elasticsearch (which is proxied through 32 | # the Kibana server) 33 | # elasticsearch.username: "user" 34 | # elasticsearch.password: "pass" 35 | 36 | # SSL for outgoing requests from the Kibana Server to the browser (PEM formatted) 37 | # server.ssl.cert: /path/to/your/server.crt 38 | # server.ssl.key: /path/to/your/server.key 39 | 40 | # Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted) 41 | # elasticsearch.ssl.cert: /path/to/your/client.crt 42 | # elasticsearch.ssl.key: /path/to/your/client.key 43 | 44 | # If you need to provide a CA certificate for your Elasticsearch instance, put 45 | # the path of the pem file here. 46 | # elasticsearch.ssl.ca: /path/to/your/CA.pem 47 | 48 | # Set to false to have a complete disregard for the validity of the SSL 49 | # certificate. 50 | # elasticsearch.ssl.verify: true 51 | 52 | # Time in milliseconds to wait for elasticsearch to respond to pings, defaults to 53 | # request_timeout setting 54 | # elasticsearch.pingTimeout: 1500 55 | 56 | # Time in milliseconds to wait for responses from the back end or elasticsearch. 57 | # This must be > 0 58 | # elasticsearch.requestTimeout: 300000 59 | 60 | # Time in milliseconds for Elasticsearch to wait for responses from shards. 61 | # Set to 0 to disable. 62 | # elasticsearch.shardTimeout: 0 63 | 64 | # Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying 65 | # elasticsearch.startupTimeout: 5000 66 | 67 | # Set the path to where you would like the process id file to be created. 68 | # pid.file: /var/run/kibana.pid 69 | 70 | # If you would like to send the log output to a file you can set the path below. 71 | # logging.dest: stdout 72 | 73 | # Set this to true to suppress all logging output. 74 | # logging.silent: false 75 | 76 | # Set this to true to suppress all logging output except for error messages. 77 | # logging.quiet: false 78 | 79 | # Set this to true to log all events, including system usage information and all requests. 80 | # logging.verbose: false 81 | 82 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/templates/logstash_kafka-to-elastic.conf.j2: -------------------------------------------------------------------------------- 1 | input { 2 | kafka{ 3 | topics => ["results.output"] 4 | bootstrap_servers => "{{ producerIP }}:9092" 5 | codec => "json_lines" 6 | } 7 | } 8 | output{ 9 | elasticsearch{ 10 | hosts => "{{ ansible_host }}:9200" 11 | codec => json 12 | index => "spark-%{+YYYY.MM.dd}" 13 | template => "/etc/logstash/conf.d/templates/spark-elasticsearch-template.json" 14 | template_name => "spark" 15 | } 16 | stdout{ 17 | codec => rubydebug 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/elk/templates/logstash_templates_spark-elasticsearch-template.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "template" : "spark-*", 3 | "settings" : { 4 | "index.refresh_interval" : "5s" 5 | }, 6 | "mappings" : { 7 | "_default_" : { 8 | "_all" : {"enabled" : true, "omit_norms" : true}, 9 | "dynamic_templates" : [ { 10 | "message_field" : { 11 | "match" : "message", 12 | "match_mapping_type" : "string", 13 | "mapping" : { 14 | "type" : "string", "index" : "analyzed", "omit_norms" : true, 15 | "fielddata" : { "format" : "disabled" } 16 | } 17 | } 18 | }, { 19 | "string_fields" : { 20 | "match" : "*", 21 | "match_mapping_type" : "string", 22 | "mapping" : { 23 | "type" : "string", "index" : "analyzed", "omit_norms" : true, 24 | "fielddata" : { "format" : "disabled" }, 25 | "fields" : { 26 | "raw" : {"type": "string", "index" : "not_analyzed", "doc_values" : true, "ignore_above" : 256} 27 | } 28 | } 29 | } 30 | }, { 31 | "float_fields" : { 32 | "match" : "*", 33 | "match_mapping_type" : "float", 34 | "mapping" : { "type" : "float", "doc_values" : true } 35 | } 36 | }, { 37 | "double_fields" : { 38 | "match" : "*", 39 | "match_mapping_type" : "double", 40 | "mapping" : { "type" : "double", "doc_values" : true } 41 | } 42 | }, { 43 | "byte_fields" : { 44 | "match" : "*", 45 | "match_mapping_type" : "byte", 46 | "mapping" : { "type" : "byte", "doc_values" : true } 47 | } 48 | }, { 49 | "short_fields" : { 50 | "match" : "*", 51 | "match_mapping_type" : "short", 52 | "mapping" : { "type" : "short", "doc_values" : true } 53 | } 54 | }, { 55 | "integer_fields" : { 56 | "match" : "*", 57 | "match_mapping_type" : "integer", 58 | "mapping" : { "type" : "integer", "doc_values" : true } 59 | } 60 | }, { 61 | "long_fields" : { 62 | "match" : "*", 63 | "match_mapping_type" : "long", 64 | "mapping" : { "type" : "long", "doc_values" : true } 65 | } 66 | }, { 67 | "date_fields" : { 68 | "match" : "*", 69 | "match_mapping_type" : "date", 70 | "mapping" : { "type" : "date", "doc_values" : true } 71 | } 72 | }, { 73 | "geo_point_fields" : { 74 | "match" : "*", 75 | "match_mapping_type" : "geo_point", 76 | "mapping" : { "type" : "geo_point", "doc_values" : true } 77 | } 78 | } ], 79 | "properties" : { 80 | "@timestamp": { "type": "date", "doc_values" : true }, 81 | "@version": { "type": "string", "index": "not_analyzed", "doc_values" : true }, 82 | "geoip" : { 83 | "type" : "object", 84 | "dynamic": true, 85 | "properties" : { 86 | "ip": { "type": "ip", "doc_values" : true }, 87 | "location" : { "type" : "geo_point", "doc_values" : true }, 88 | "latitude" : { "type" : "float", "doc_values" : true }, 89 | "longitude" : { "type" : "float", "doc_values" : true } 90 | } 91 | }, 92 | "src_ip" : { "type" : "ip" }, 93 | "dst_ip" : { "type" : "ip" }, 94 | "dns_ip" : { "type" : "ip" }, 95 | "data_array": { 96 | "type": "nested", 97 | "properties": { 98 | "ip": { 99 | "type": "ip" 100 | }, 101 | "key": { 102 | "type": "text", 103 | "norms": false, 104 | "fields": { 105 | "raw": { 106 | "type": "keyword", 107 | "ignore_above": 256 108 | } 109 | } 110 | }, 111 | "value": { 112 | "type": "long" 113 | } 114 | } 115 | } 116 | } 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/get-vars/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: "Gather facts" 2 | setup: 3 | with_items: groups['all'] 4 | 5 | - name: "Setup masterIP variable" 6 | set_fact: masterIP="{{ hostvars[item].ansible_host }}" 7 | with_inventory_hostnames: sparkMaster 8 | 9 | - name: "Setup producerIP variable" 10 | set_fact: producerIP="{{ hostvars[item].ansible_host }}" 11 | with_inventory_hostnames: producer -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/files/ipfixcol.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Ipfixcol service 3 | Documentation=https://github.com/CESNET/ipfixcol 4 | Requires=network.target remote-fs.target 5 | After=network.target remote-fs.target 6 | 7 | [Service] 8 | Type=simple 9 | PIDFile=/var/run/ipfixcol.pid 10 | User=root 11 | Group=root 12 | EnvironmentFile=/etc/default/ipfixcol 13 | ExecStart=/usr/local/bin/ipfixcol -c $IPFIXCOL_SCRIPT -v 2 14 | ExecStop= 15 | Restart=on-failure 16 | SyslogIdentifier=ipfixcol 17 | 18 | [Install] 19 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/tasks/base.yml: -------------------------------------------------------------------------------- 1 | # IPFIXcol build 2 | - name: Clone IPFIXcol git 3 | git: repo={{ build.git }} dest={{ build.dir }} version={{ build.branch }} force=yes 4 | 5 | - name: Build IPFIXcol base 6 | command: > 7 | {{ item }} 8 | chdir={{ build.dir }}/base/ 9 | creates=/tmp/ipfixcol-base.installed 10 | with_items: 11 | - "{{ build.cmds }}" 12 | - ldconfig 13 | - "touch /tmp/ipfixcol-base.installed" 14 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/tasks/dependencies.yml: -------------------------------------------------------------------------------- 1 | # Dependencies for IPFIXcol base 2 | - name: Check for yum packaging system 3 | set_fact: 4 | pkg_type: yum 5 | when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' 6 | tags: always 7 | - name: Check for apt packaging system 8 | set_fact: 9 | pkg_type: apt 10 | when: ansible_pkg_mgr == 'apt' 11 | tags: always 12 | 13 | - name: Include dependency package variables 14 | include_vars: "{{ item }}" 15 | with_first_found: 16 | - "packages.{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" 17 | - "packages.{{ pkg_type }}.yml" 18 | tags: always 19 | 20 | - name: Install dnf support for Fedora 21 | command: dnf -y install python2-dnf 22 | when: ansible_pkg_mgr == 'dnf' 23 | tags: dependencies 24 | 25 | - name: Install dependencies for IPFIXcol 26 | action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest" 27 | with_items: "{{ packages.base }}" 28 | tags: dependencies 29 | 30 | - name: Download FastBit compiled library 31 | include: libfastbit.yml 32 | tags: 33 | - fastbit_lib 34 | - fastbit 35 | - fbitmerge 36 | - fbitdump 37 | 38 | 39 | - name: postgres dependencies 40 | action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest" 41 | with_items: "{{ packages.postgres }}" 42 | tags: postgres 43 | 44 | - name: stats dependencies 45 | action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest" 46 | with_items: "{{ packages.stats }}" 47 | tags: 48 | - profile_stats 49 | - stats 50 | 51 | 52 | 53 | 54 | 55 | - -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/tasks/libfastbit.yml: -------------------------------------------------------------------------------- 1 | # Install libfastbit by either downloading precompiled 2 | # binaries or combiling from git 3 | 4 | # Install using binaries 5 | - name: Download FastBit compiled library (2/2) 6 | get_url: > 7 | url={{ build.fastbit.url }}/libfastbit-{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.tar.gz 8 | dest=/tmp/{{ build.fastbit.archive }} validate_certs=no 9 | when: not (build_fastbit_compile | bool) 10 | 11 | - name: Install FastBit library 12 | command: "{{ item }} chdir=/tmp/ creates=/tmp/libfastbit.installed" 13 | with_items: 14 | - "tar -xzpf {{ build.fastbit.archive }} -C /" 15 | # move pkgconfig file to path common to all supported distributions 16 | - "mv /usr/local/lib/pkgconfig/fastbit.pc /usr/share/pkgconfig/fastbit.pc" 17 | - "ldconfig" 18 | - "touch /tmp/libfastbit.installed" 19 | when: not (build_fastbit_compile | bool) 20 | 21 | # Install from sources (https://github.com/CESNET/libfastbit) 22 | - name: Clone libfastbit repository 23 | git: repo={{ build.fastbit.git }} dest={{ build.fastbit.dir }} 24 | when: build_fastbit_compile | bool 25 | ignore_errors: yes 26 | 27 | - name: Build libfastbit 28 | command: > 29 | {{ item }} 30 | chdir={{ build.fastbit.dir }} 31 | creates=/tmp/libfastbit.installed 32 | with_items: 33 | - "autoreconf -i" 34 | - "./configure --disable-static" 35 | - "make" 36 | - "make install" 37 | - "mv /usr/local/lib/pkgconfig/fastbit.pc /usr/share/pkgconfig/fastbit.pc" 38 | - "ldconfig" 39 | - "touch /tmp/libfastbit.installed" 40 | when: build_fastbit_compile | bool -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Include dependency tasks 2 | include: dependencies.yml 3 | become: yes 4 | 5 | - name: IPFIXcol base build 6 | include: base.yml 7 | tags: base 8 | become: yes 9 | 10 | - name: IPFIXcol storage plugins 11 | include: storage-plugins.yml 12 | become: yes 13 | 14 | - name: Tools for viewing stored data 15 | include: tools.yml 16 | become: yes 17 | 18 | - name: Additional IPFIX elements 19 | include: ipfix-elements.yml 20 | become: yes 21 | 22 | - name: Start IPFIXcol 23 | include: start.yml 24 | become: yes 25 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/tasks/start.yml: -------------------------------------------------------------------------------- 1 | # Configure and start IPFIXcol 2 | 3 | - name: Copy TCP config file for IPFIXcol 4 | template: src=templates/startup.xml.tcp.j2 dest={{ script_path }}startup.xml.tcp mode=0644 5 | 6 | - name: Copy UDP config file for IPFIXcol 7 | template: src=templates/startup.xml.udp.j2 dest={{ script_path }}startup.xml.udp mode=0644 8 | 9 | - name: Copy upstart files for IPFIXcol (1/2) 10 | template: src=templates/ipfixcol.j2 dest=/etc/default/ipfixcol mode=0644 owner=spark group=root 11 | 12 | - name: Copy upstart files for IPFIXcol (2/2) 13 | copy: src=ipfixcol.service dest=/etc/systemd/system owner=root group=root mode=0644 14 | 15 | - name: Reload systemd daemon 16 | shell: systemctl daemon-reload 17 | 18 | - name: Start IPFIXcol 19 | service: name=ipfixcol state=started enabled=yes 20 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/tasks/storage-plugins.yml: -------------------------------------------------------------------------------- 1 | # Storage plugins 2 | 3 | # fastbit 4 | - name: Build fastbit storage plugin 5 | command: > 6 | {{ item }} 7 | chdir={{ build.dir }}/plugins/storage/fastbit 8 | creates=/usr/local/share/ipfixcol/plugins/ipfixcol-fastbit-output.so 9 | with_items: "{{ build.cmds }}" 10 | tags: fastbit 11 | 12 | # json 13 | - name: Build json storage plugin 14 | command: > 15 | {{ item }} 16 | chdir={{ build.dir }}/plugins/storage/json 17 | creates=/usr/local/share/ipfixcol/plugins/ipfixcol-json-output.so 18 | with_items: "{{ build.cmds }}" 19 | tags: json 20 | 21 | # nfdump 22 | - name: Build nfdump storage plugin 23 | command: > 24 | {{ item }} 25 | chdir={{ build.dir }}/plugins/storage/nfdump 26 | creates=/usr/local/share/ipfixcol/plugins/ipfixcol-nfdump-output.so 27 | with_items: "{{ build.cmds }}" 28 | tags: nfdump-storage 29 | 30 | # postgres 31 | - name: Build postgres storage plugin 32 | command: > 33 | {{ item }} 34 | chdir={{ build.dir }}/plugins/storage/postgres 35 | creates=/usr/local/share/ipfixcol/plugins/ipfixcol-postgres-output.so 36 | with_items: "{{ build.cmds }}" 37 | tags: postgres 38 | 39 | # statistics 40 | - name: Build statistics storage plugin 41 | command: > 42 | {{ item }} 43 | chdir={{ build.dir }}/plugins/storage/statistics 44 | creates=/usr/local/share/ipfixcol/plugins/ipfixcol-statistics-output.so 45 | with_items: "{{ build.cmds }}" 46 | tags: statistics -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/tasks/tools.yml: -------------------------------------------------------------------------------- 1 | # Tools 2 | 3 | # fbitdump 4 | - name: Build fbitdump tool 5 | command: > 6 | {{ item }} 7 | chdir={{ build.dir }}/tools/fbitdump 8 | creates=/usr/local/bin/fbitdump 9 | with_items: "{{ build.cmds }}" 10 | tags: fbitdump -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/templates/ipfixcol.conf.j2: -------------------------------------------------------------------------------- 1 | # Ubuntu upstart file at /etc/init/ipfixcol.conf 2 | 3 | description "ipfixcol" 4 | 5 | start on runlevel [2345] 6 | stop on runlevel [!12345] 7 | 8 | respawn 9 | respawn limit 2 5 10 | 11 | umask 007 12 | 13 | kill timeout 30 14 | 15 | setuid vagrant 16 | setgid root 17 | 18 | script 19 | 20 | [ -f /etc/default/ipfixcol ] && . /etc/default/ipfixcol 21 | if [ -z "$IPFIXCOL_SCRIPT" ] ; then 22 | echo "IPFIXCOL_SCRIPT is not set, please set it in /etc/default/ipfixcol" >&2 23 | exit 1 24 | fi 25 | 26 | /usr/local/bin/ipfixcol -c $IPFIXCOL_SCRIPT -v 2 27 | 28 | end script 29 | 30 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/templates/ipfixcol.j2: -------------------------------------------------------------------------------- 1 | IPFIXCOL_SCRIPT="{{ script_path }}{{ script_filename }}" 2 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/templates/startup.xml.tcp.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TCP collector 6 | 7 | Listening port 4739 8 | 4739 9 | {{ ansible_host }} 10 | 11 | json writer 12 | 13 | 14 | 26 | 27 | 28 | 29 | 31 | json writer 32 | 33 | 55 | 56 | 57 | 58 | JSON storage plugin 59 | 60 | json 61 | no 62 | raw 63 | unix 64 | raw 65 | yes 66 | no 67 | 74 | 75 | send 76 | 127.0.0.1 77 | 56789 78 | tcp 79 | 80 | 81 | 82 | 83 | 84 | 85 | 94 | yes 95 | 96 | 97 | 98 | 99 | 100 | 105 | 106 | 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/templates/startup.xml.udp.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 14 | 15 | 16 | UDP collector 17 | 18 | Listening port 4739 19 | 4739 20 | 1800 21 | 1800 22 | {{ ansible_host }} 23 | 24 | json writer 25 | 26 | 27 | 28 | 29 | 31 | json writer 32 | 33 | 55 | 56 | 57 | 58 | JSON storage plugin 59 | 60 | json 61 | no 62 | raw 63 | unix 64 | raw 65 | yes 66 | no 67 | 74 | 75 | send 76 | 127.0.0.1 77 | 56789 78 | tcp 79 | 80 | 81 | 82 | 83 | 84 | 85 | 94 | yes 95 | 96 | 97 | 98 | 99 | 100 | 105 | 106 | 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/vars/main.yml: -------------------------------------------------------------------------------- 1 | build: 2 | dir: /tmp/ipfixcol 3 | git: https://github.com/CESNET/ipfixcol.git 4 | branch: master 5 | cmds: [ 6 | autoreconf -i, 7 | ./configure, 8 | make, 9 | make install 10 | ] 11 | fastbit: 12 | archive: libfastbit.tar.gz 13 | url: https://dior.ics.muni.cz/~velan/libfastbit/ 14 | git: https://github.com/CESNET/libfastbit.git 15 | dir: /tmp/libfastbit 16 | 17 | # this can be easily changed from command line 18 | build_fastbit_compile: false 19 | build_rpms: false 20 | 21 | # Set the configuration file path for ipfixcol to load here: 22 | script_path: /usr/local/etc/ipfixcol/ 23 | script_filename: startup.xml.udp 24 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/vars/packages.Debian-9.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | base: [ 3 | autoconf, 4 | bison, 5 | build-essential, 6 | docbook-xsl, 7 | doxygen, 8 | flex, 9 | git, 10 | liblzo2-dev, 11 | libtool, 12 | libsctp-dev, 13 | libssl1.0-dev, 14 | libxml2, 15 | libxml2-dev, 16 | pkg-config, 17 | xsltproc 18 | ] 19 | udp_cpg: [libcpg-dev, corosync-dev] 20 | geoip: [libgeoip-dev] 21 | stats: [librrd-dev] 22 | uid: [libsqlite3-dev] 23 | dhcp: [libsqlite3-dev] 24 | postgres: [libpq-dev] -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/vars/packages.Ubuntu-18.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | base: [ 3 | autoconf, 4 | bison, 5 | build-essential, 6 | docbook-xsl, 7 | doxygen, 8 | flex, 9 | git, 10 | liblzo2-dev, 11 | libtool, 12 | libsctp-dev, 13 | libssl1.0-dev, 14 | libxml2, 15 | libxml2-dev, 16 | pkg-config, 17 | xsltproc 18 | ] 19 | udp_cpg: [libcpg-dev, corosync-dev] 20 | geoip: [libgeoip-dev] 21 | stats: [librrd-dev] 22 | uid: [libsqlite3-dev] 23 | dhcp: [libsqlite3-dev] 24 | postgres: [libpq-dev] 25 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/vars/packages.apt.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | base: [ 3 | autoconf, 4 | bison, 5 | build-essential, 6 | docbook-xsl, 7 | doxygen, 8 | flex, 9 | git, 10 | liblzo2-dev, 11 | libtool, 12 | libsctp-dev, 13 | libssl-dev, 14 | libxml2, 15 | libxml2-dev, 16 | pkg-config, 17 | xsltproc 18 | ] 19 | geoip: [libgeoip-dev] 20 | stats: [librrd-dev] 21 | uid: [libsqlite3-dev] 22 | postgres: [libpq-dev] -------------------------------------------------------------------------------- /provisioning/ansible/roles/ipfixcol/vars/packages.yum.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | base: [ 3 | autoconf, 4 | bison, 5 | docbook-style-xsl, 6 | doxygen, 7 | flex, 8 | gcc , 9 | gcc-c++ , 10 | git, 11 | libtool, 12 | libxml2, 13 | libxml2-devel, 14 | libxslt, 15 | lksctp-tools-devel, 16 | lzo-devel, 17 | make, 18 | openssl-devel, 19 | pkgconfig 20 | ] 21 | geoip: [GeoIP-devel] 22 | stats: [rrdtool-devel] 23 | uid: [sqlite-devel] 24 | postgres: [postgresql-devel] -------------------------------------------------------------------------------- /provisioning/ansible/roles/kafka/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: roles/kafka/defaults/main.yml 3 | 4 | kafka_dir: /opt/kafka 5 | kafka_download_url: https://archive.apache.org/dist/kafka/2.3.0/kafka_2.12-2.3.0.tgz 6 | kafka_filename: kafka_2.12-2.3.0 7 | retention: 30000 8 | kafka_maximum_heap_space: "{{(ansible_memtotal_mb * 0.5)|int}}" 9 | kafka_minimum_heap_space: "{{(ansible_memtotal_mb * 0.25)|int}}" -------------------------------------------------------------------------------- /provisioning/ansible/roles/kafka/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install Apache Kafka 3 | 4 | # Install zookeperd 5 | - name: Install zookeper 6 | apt: name=zookeeperd state=latest update_cache=yes 7 | 8 | - name: Download Apache Kafka 9 | unarchive: src={{ kafka_download_url }} dest=/tmp/ copy=no creates=/tmp/{{ kafka_filename }} 10 | 11 | - name: Create kafka group 12 | group: name=kafka state=present 13 | 14 | - name: Create kafka user 15 | user: name=kafka comment="Apache Kafka" group=kafka shell=/usr/sbin/nologin home=/home/kafka 16 | 17 | - name: Create /opt/kafka directory 18 | file: path={{ kafka_dir }} state=directory mode=0755 19 | 20 | - name: Move Kafka files to {{ kafka_dir }} 21 | shell: if ! [ "$(ls -A {{ kafka_dir }})" ]; then (mv /tmp/{{ kafka_filename }}/* /opt/kafka) fi 22 | 23 | - name: Set appropriate permissions on /opt/kafka 24 | file: dest=/opt/kafka owner=kafka group=kafka recurse=yes 25 | 26 | - name: Copy Kafka configuration files (1/2) 27 | template: src=templates/kafka-broker.j2 dest=/etc/default/kafka-broker mode=0644 owner=kafka group=kafka 28 | 29 | - name: Copy Kafka configuration files (2/2) 30 | template: src=templates/kafka-broker.service.j2 dest=/etc/systemd/system/kafka-broker.service mode=0644 owner=kafka group=kafka 31 | 32 | - name: Copy modified Kafka server.properties configuration 33 | template: src=templates/kafka-server.properties.j2 dest={{ kafka_dir}}/config/server.properties mode=0755 owner=kafka group=kafka 34 | 35 | - name: Reload systemd daemon 36 | shell: systemctl daemon-reload 37 | 38 | 39 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/kafka/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install Apache Kafka 2 | include: install.yml 3 | become: yes 4 | 5 | - name: Start Apache Kafka 6 | include: start.yml 7 | become: yes 8 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/kafka/tasks/start.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Start and configure Apache Kafka 3 | - name: Ensure zookeper is running 4 | service: name=zookeeper state=started enabled=yes 5 | 6 | - name: Start Kafka server 7 | service: name=kafka-broker state=started enabled=yes 8 | 9 | - name: Wait untill Kafka server starts 10 | pause: seconds=5 11 | 12 | - name: Check if input topic was already added 13 | shell: /opt/kafka/bin/kafka-topics.sh --list --zookeeper localhost:2181 | grep ipfix.entry 14 | register: resultin 15 | failed_when: resultin.rc not in [0,1] 16 | 17 | - name: Check if output topic was already added 18 | shell: /opt/kafka/bin/kafka-topics.sh --list --zookeeper localhost:2181 | grep results.output 19 | register: resultout 20 | failed_when: resultout.rc not in [0,1] 21 | 22 | - name: Add Kafka topic "ipfix.entry" 23 | shell: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic ipfix.entry --partitions 1 --replication-factor 1 24 | when: resultin.rc == 1 25 | 26 | - name: Set retention on the input topic 27 | shell: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic ipfix.entry --config retention.ms={{ retention }} 28 | when: resultin.rc == 1 29 | 30 | - name: Add Kafka topic "results.output" 31 | shell: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic results.output --partitions 1 --replication-factor 1 32 | when: resultout.rc == 1 33 | 34 | - name: Set retention on the output topic 35 | shell: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic results.output --config retention.ms={{ retention }} 36 | when: resultout.rc == 1 -------------------------------------------------------------------------------- /provisioning/ansible/roles/kafka/templates/kafka-broker.j2: -------------------------------------------------------------------------------- 1 | # /etc/default/kafka-broker 2 | 3 | ENABLE="yes" 4 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/kafka/templates/kafka-broker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Apache Kafka server (broker) 3 | Documentation=http://kafka.apache.org/documentation.html 4 | Requires=network.target remote-fs.target 5 | After=network.target remote-fs.target 6 | 7 | [Service] 8 | Type=simple 9 | PIDFile=/var/run/kafka.pid 10 | User=kafka 11 | Group=kafka 12 | ExecStart={{ kafka_dir }}/bin/kafka-server-start.sh {{ kafka_dir }}/config/server.properties 13 | ExecStop={{ kafka_dir }}/bin/kafka-server-stop.sh 14 | Restart=on-failure 15 | SyslogIdentifier=kafka 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/kafka/templates/kafka-server-start.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | if [ $# -lt 1 ]; 18 | then 19 | echo "USAGE: $0 [-daemon] server.properties [--override property=value]*" 20 | exit 1 21 | fi 22 | base_dir=$(dirname $0) 23 | 24 | if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then 25 | export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" 26 | fi 27 | 28 | if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then 29 | export KAFKA_HEAP_OPTS="-Xmx{{kafka_maximum_heap_space}}M -Xms{{kafka_minimum_heap_space}}M" 30 | fi 31 | 32 | EXTRA_ARGS="-name kafkaServer -loggc" 33 | 34 | COMMAND=$1 35 | case $COMMAND in 36 | -daemon) 37 | EXTRA_ARGS="-daemon "$EXTRA_ARGS 38 | shift 39 | ;; 40 | *) 41 | ;; 42 | esac 43 | 44 | exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" 45 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: roles/spark/defaults/main.yml 3 | 4 | # login 5 | user: spark 6 | 7 | # work directories 8 | dir_spark: /opt/spark 9 | dir_applications: /home/{{ user }}/applications/ 10 | 11 | # download mirrors 12 | url_spark: "https://archive.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz" 13 | url_kafka_assembly: "http://search.maven.org/remotecontent?filepath=org/apache/spark/spark-streaming-kafka-0-8-assembly_2.11/2.1.1/spark-streaming-kafka-0-8-assembly_2.11-2.1.1.jar" 14 | 15 | # Name of spark directory after inflating downloaded archive 16 | spark_inflated_dir_name: spark-2.1.1-bin-hadoop2.7 17 | 18 | # spark test settings 19 | spark_batch_size: 5000 20 | spark_worker_cores: 2 21 | # spark worker memory in MB 22 | spark_worker_memory: "{{ ansible_memtotal_mb }}" 23 | spark_masterurl: spark://{{ masterIP }}:7077 24 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/tasks/dependencies.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install dependencies for Spark 3 | 4 | # Install Scala 5 | - name: Install Scala 6 | apt: name=scala state=latest update_cache=yes 7 | 8 | # Install Python 9 | - name: Install Python 2.7 10 | apt: name=python2.7 state=latest update_cache=yes 11 | 12 | - name: Download master's public key 13 | fetch: src=/home/{{ user }}/.ssh/id_rsa.pub dest=tmp/sparkMaster.pub flat=yes 14 | when: "'sparkMaster' in group_names" 15 | 16 | - name: Add Spark master's public SSH key to authorized_keys 17 | authorized_key: user={{ user }} key="{{ item }}" 18 | with_file: 19 | - tmp/sparkMaster.pub 20 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/tasks/download_locally.yml: -------------------------------------------------------------------------------- 1 | # Download installation files to localhost 2 | 3 | # Apache Spark 4 | - name: Download Apache Spark 5 | get_url: url={{ url_spark }} dest=/tmp/spark.tgz 6 | delegate_to: 127.0.0.1 7 | 8 | # Spark Kafka 9 | - name: Download Spark Kafka 10 | get_url: url={{ url_kafka_assembly }} dest=/tmp/spark-streaming-kafka-assembly.jar 11 | delegate_to: 127.0.0.1 12 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # install spark on each machine into dir_spark directory 3 | - name: Create directory structure for Spark 4 | file: path={{ dir_spark }}/spark-bin state=directory recurse=yes owner={{ user }} group={{ user }} 5 | 6 | # --strip option cannot be used with Ansible 2.1.0 due to a bug: https://github.com/ansible/ansible-modules-core/issues/2480 7 | - name: Copy and untar Spark package to host 8 | unarchive: src=/tmp/spark.tgz dest=/tmp/ owner={{ user }} group={{ user }} creates=/tmp/{{ spark_inflated_dir_name }} 9 | 10 | - name: Fix directory structure 1/2. 11 | shell: /bin/mv /tmp/{{ spark_inflated_dir_name }}/* {{ dir_spark }}/spark-bin/ creates={{ dir_spark }}/spark-bin/RELEASE 12 | 13 | - name: Fix directory structure 2/2. 14 | file: path=/tmp/{{ spark_inflated_dir_name }} state=absent 15 | 16 | - name: Prepare log4j.properties config file 17 | copy: remote_src=True src={{ dir_spark }}/spark-bin/conf/log4j.properties.template 18 | dest={{ dir_spark }}/spark-bin/conf/log4j.properties 19 | 20 | - name: Edit log4j.properties config file 21 | replace: dest={{ dir_spark }}/spark-bin/conf/log4j.properties regexp='log4j\.rootCategory=INFO, console' replace='log4j.rootCategory=WARN, console' 22 | 23 | - name: Copy the default spark config to spark directory 24 | template: src=spark-defaults.conf.j2 dest={{ dir_spark }}/spark-bin/conf/spark-defaults.conf owner={{ user }} group={{ user }} mode=0750 25 | 26 | - include: prepareMaster.yml 27 | when: "'sparkMaster' in group_names" 28 | become: yes 29 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Download required Spark packages locally 2 | include: download_locally.yml 3 | 4 | - name: Install dependencies 5 | include: dependencies.yml 6 | become: yes 7 | 8 | - name: Install Apache Spark and Kafka 9 | include: install.yml 10 | become: yes 11 | 12 | - name: Deploy Spark master and slaves service 13 | include: service.yml 14 | become: yes 15 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/tasks/prepareMaster.yml: -------------------------------------------------------------------------------- 1 | - name: Create application directory on Master 2 | file: path={{ dir_applications }} state=directory recurse=yes owner={{ user }} group={{ user }} 3 | tags: 4 | - master 5 | 6 | - name: Copy the run script to the master 7 | template: src=run-application.sh.j2 dest={{ dir_applications }}/run-application.sh owner={{ user }} group={{ user }} mode=0750 8 | tags: 9 | - master 10 | 11 | - name: Copy kafka assembly to the master 12 | copy: src=/tmp/spark-streaming-kafka-assembly.jar dest={{ dir_spark }}/spark-bin/jars/spark-streaming-kafka-assembly.jar owner={{ user }} group={{ user }} 13 | tags: 14 | - master 15 | 16 | - name: Install zip on the master 17 | apt: name=zip state=latest update_cache=yes 18 | tags: 19 | - master 20 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/tasks/service.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy Spark Master service definition 3 | template: 4 | src: templates/spark-master.service.j2 5 | mode: 0644 6 | dest: /etc/systemd/system/spark-master.service 7 | when: "'sparkMaster' in group_names" 8 | 9 | - name: Start Spark Master service 10 | systemd: 11 | name: spark-master 12 | enabled: true 13 | state: started 14 | when: "'sparkMaster' in group_names" 15 | 16 | - name: Copy Spark Slave service definition 17 | template: 18 | src: templates/spark-slave.service.j2 19 | mode: 0644 20 | dest: /etc/systemd/system/spark-slave.service 21 | when: "'sparkSlave' in group_names" 22 | 23 | - name: Start Spark Slave service 24 | systemd: 25 | name: spark-slave 26 | enabled: true 27 | state: started 28 | when: "'sparkSlave' in group_names" 29 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/templates/run-application.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$1" ] 4 | then 5 | echo "You must specify the application with arguments" 6 | exit 1; 7 | fi 8 | APPLICATION=$@ 9 | MODULESDIR=$(dirname "$1")/modules 10 | 11 | # Output colors 12 | GREEN='\033[0;32m' 13 | NC='\033[0m' # No Color 14 | PYFILES='' 15 | 16 | if [ -d $MODULESDIR ] 17 | then 18 | echo -e "${GREEN}[info] Creating zip with all modules${NC}" 19 | zip -r $MODULESDIR{.zip,} 20 | PYFILES="--py-files $MODULESDIR.zip" 21 | fi 22 | 23 | echo -e "${GREEN}[info] Running application $APPLICATION...${NC}" 24 | 25 | export SPARK_MASTER_HOST={{ masterIP }} 26 | export SPARK_LOCAL_HOST={{ masterIP }} 27 | 28 | {{ dir_spark }}/spark-bin/bin/spark-submit --total-executor-cores {{ groups['sparkSlave'] | length if (groups['sparkSlave'] | length) >= 2 else 2}} --executor-memory {{ spark_worker_memory | string + 'M' if spark_worker_memory < 2048 else '2G' }} $PYFILES $APPLICATION 29 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/templates/spark-defaults.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # Default system properties included when running spark-submit. 19 | # This is useful for setting default environmental settings. 20 | 21 | 22 | spark.master {{ spark_masterurl }} 23 | spark.eventLog.enabled false 24 | spark.jars {{ dir_spark }}/spark-bin/jars/spark-streaming-kafka-assembly.jar 25 | 26 | # Dynamic executors allocation 27 | spark.streaming.dynamicAllocation.enabled true 28 | spark.streaming.dynamicAllocation.minExecutors {{ 2 if (groups['sparkSlave'] | length) >= 2 else 1}} 29 | spark.streaming.dynamicAllocation.maxExecutors {{ groups['sparkSlave'] | length if (groups['sparkSlave'] | length) >= 2 else 2}} 30 | spark.streaming.dynamicAllocation.initialExecutors {{ groups['sparkSlave'] | length }} 31 | spark.streaming.dynamicAllocation.reserveRate 0.1 32 | spark.cores.max {{ groups['sparkSlave'] | length if (groups['sparkSlave'] | length) >= 2 else 2}} 33 | spark.executor.memory {{ spark_worker_memory | string + 'M' if spark_worker_memory < 2048 else '2G' }} 34 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/templates/spark-master.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Spark Master service 3 | Documentation=https://spark.apache.org/docs/latest/ 4 | Requires=network.target remote-fs.target 5 | After=network.target remote-fs.target 6 | 7 | [Service] 8 | Type=forking 9 | User={{ user }} 10 | Group={{ user }} 11 | Environment=SPARK_MASTER_HOST={{ masterIP }} SPARK_LOCAL_HOST={{ masterIP }} 12 | ExecStart={{ dir_spark }}/spark-bin/sbin/start-master.sh 13 | ExecStop={{ dir_spark }}/spark-bin/sbin/stop-master.sh 14 | Restart=on-failure 15 | SyslogIdentifier=spark-master 16 | 17 | [Install] 18 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /provisioning/ansible/roles/spark/templates/spark-slave.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Spark Slave service 3 | Documentation=https://spark.apache.org/docs/latest/ 4 | Requires=network.target remote-fs.target 5 | After=network.target remote-fs.target 6 | 7 | [Service] 8 | Type=forking 9 | User={{ user }} 10 | Group={{ user }} 11 | Environment=SPARK_MASTER_HOST={{ masterIP }} SPARK_LOCAL_HOST={{ ansible_host }} 12 | ExecStart={{ dir_spark }}/spark-bin/sbin/start-slave.sh {{ spark_masterurl }} -m {{ spark_worker_memory }}M 13 | ExecStop={{ dir_spark }}/spark-bin/sbin/stop-slave.sh 14 | Restart=on-failure 15 | SyslogIdentifier=spark-slave 16 | 17 | [Install] 18 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /provisioning/ansible/roles/ubuntu-systemd-normalizer/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Restart Service Instance 'default' 4 | become: yes 5 | command: systemctl restart tcpnormalizer@default.service 6 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ubuntu-systemd-normalizer/tasks/deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Get From Git 4 | git: 5 | repo: https://github.com/CSIRT-MU/simple_tcp_normalizer.git 6 | dest: /opt/simple_tcp_normalizer 7 | version: master 8 | 9 | - name: Build 10 | command: mvn package 11 | args: 12 | chdir: /opt/simple_tcp_normalizer 13 | 14 | - name: Set Owner 15 | file: 16 | path: /opt/simple_tcp_normalizer 17 | owner: "{{ normalizer_user }}" 18 | group: "{{ normalizer_user }}" 19 | recurse: yes 20 | 21 | - name: Copy Systemd Service 22 | template: src=templates/tcpnormalizer@.service.j2 dest=/etc/systemd/system/tcpnormalizer@.service mode=664 owner=root group=root 23 | 24 | - name: Enable Autostart of Service Instance 'default' 25 | command: systemctl enable tcpnormalizer@default.service 26 | notify: Restart Service Instance 'default' -------------------------------------------------------------------------------- /provisioning/ansible/roles/ubuntu-systemd-normalizer/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Include User Creation 4 | include: users.yml 5 | become: yes 6 | 7 | - name: Include Deploy 8 | include: deploy.yml 9 | become: yes -------------------------------------------------------------------------------- /provisioning/ansible/roles/ubuntu-systemd-normalizer/tasks/users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create normalizer user group 4 | group: 5 | name: "{{ normalizer_user }}" 6 | state: present 7 | 8 | - name: Create normalizer user 9 | user: 10 | name: "{{ normalizer_user }}" 11 | group: "{{ normalizer_user }}" 12 | comment: "TCP Normalizer User" 13 | shell: /usr/sbin/nologin 14 | home: /home/{{ normalizer_user }} -------------------------------------------------------------------------------- /provisioning/ansible/roles/ubuntu-systemd-normalizer/templates/tcpnormalizer@.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Simple TCP Normalizer 3 | After=network.target remote-fs.target syslog.target 4 | 5 | [Service] 6 | ExecStart=/opt/simple_tcp_normalizer/bin/tcp_normalizer.sh --config /opt/simple_tcp_normalizer/config/%i.yml 7 | Type=simple 8 | User={{ normalizer_user }} 9 | Restart=on-failure 10 | SuccessExitStatus=143 11 | SyslogIdentifier=tcpnormalizer 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/ubuntu-systemd-normalizer/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | normalizer_user: normalizer -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: roles/web/defaults/main.yml 3 | 4 | # CSR subject 5 | cert_subj: "/C=CZ/ST=CzechRepublic/L=Brno/O=Stream4Flow/OU=Stream4Flow/CN={{ ansible_host }}" 6 | 7 | web2py_passwd: Stream4Flow 8 | 9 | 10 | repository_url: "https://github.com/CSIRT-MU/Stream4Flow" 11 | repository_tmp: "/tmp/Stream4Flow" 12 | 13 | hostname: "{{ ansible_hostname }}" -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install dependencies 2 | include: dependencies.yml 3 | become: yes 4 | 5 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/tasks/dependencies.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install dependencies for running web. 3 | 4 | # Install required apt packages 5 | - name: Install required apt packages 6 | apt: name={{ item }} state=latest update_cache=yes 7 | with_items: 8 | - apache2 9 | - apache2-utils 10 | - apache2-bin 11 | - libxml2-dev 12 | - python-dev 13 | - libapache2-mod-wsgi 14 | - xvfb 15 | - git 16 | - build-essential 17 | - chrpath 18 | - libssl-dev 19 | - libxft-dev 20 | - unzip 21 | 22 | 23 | # Install python modules 24 | - name: Install python modules 25 | pip: name={{item}} executable=pip 26 | with_items: 27 | - elasticsearch 28 | - elasticsearch-dsl 29 | - simplejson 30 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/tasks/install_webapp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Clone git repository 3 | git: 4 | repo: "{{ repository_url }}" 5 | dest: "{{ repository_tmp }}" 6 | version: master 7 | 8 | - name: Copy routes.py to /var/www/web2py 9 | command: cp "{{ repository_tmp }}/web-interface/routes.py" /var/www/web2py/ 10 | 11 | - name: Copy Stream4Flow web2py application 12 | command: cp -r "{{ repository_tmp }}/web-interface/Stream4Flow" /var/www/web2py/applications/ 13 | 14 | - name: Set correct owner and permissions of files 15 | file: 16 | dest: /var/www/web2py/applications/ 17 | owner: www-data 18 | group: www-data 19 | mode: 0644 20 | recurse: yes 21 | 22 | - name: Set correct perissions of directories 23 | file: 24 | path: /var/www/web2py/applications/ 25 | mode: u=rwX,g=rX,o=rX 26 | recurse: yes 27 | 28 | - name: Set hostname in appconfig.ini 29 | lineinfile: 30 | dest: /var/www/web2py/applications/Stream4Flow/private/appconfig.ini 31 | regexp: "hostname" 32 | line: "hostname = {{ hostname }} " 33 | 34 | - name: Remove Stream4Flow repository 35 | file: 36 | path: "{{ repository_tmp }}" 37 | state: absent -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Download and install dependencies. 2 | include: dependencies.yml 3 | become: yes 4 | 5 | - name: Setup the environment. 6 | include: setup.yml 7 | become: yes 8 | 9 | - name: Add Stream4Flow application to web2py 10 | include: install_webapp.yml 11 | become: yes 12 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/tasks/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Setup SSH, iptables, Apache, SSL and web2py 3 | 4 | - name: Edit sshd_conf (1/2) 5 | lineinfile: "dest=/etc/ssh/sshd_config state=present regexp='#AuthorizedKeysFile.*' line='AuthorizedKeysFile %h/.ssh/authorized_keys'" 6 | 7 | - name: Edit sshd_conf (2/2) 8 | lineinfile: "dest=/etc/ssh/sshd_config state=present regexp='PasswordAuthentication.*' line='PasswordAuthentication no'" 9 | 10 | - name: Get web2py 11 | unarchive: src=https://mdipierro.pythonanywhere.com/examples/static/web2py_src.zip dest=/var/www copy=no 12 | 13 | - name: Change ownership on web2py folder 14 | file: path=/var/www/web2py owner=www-data group=www-data recurse=yes 15 | 16 | - name: Modify web2py directory structure 17 | shell: mv /var/www/web2py/handlers/wsgihandler.py /var/www/web2py/wsgihandler.py 18 | 19 | - name: Copy web2py Apache site 20 | template: src=templates/web2py.conf.j2 dest=/etc/apache2/sites-available/web2py.conf 21 | 22 | - name: Remove default page from Apache 23 | file: path=/etc/apache2/sites-enabled/000-default.conf state=absent 24 | 25 | - name: Enable Apache modules 26 | shell: a2enmod ssl proxy proxy_http headers expires wsgi rewrite 27 | 28 | - name: Enable web2py site in Apache 29 | shell: a2ensite web2py.conf 30 | 31 | - name: Restart Apache 32 | service: name=apache2 state=restarted enabled=yes 33 | 34 | - name: Copy script to set web2py password 35 | template: src=templates/chgpasswd.py.j2 dest=/var/www/web2py/chgpasswd.py mode=0740 owner=www-data group=www-data 36 | 37 | - name: Run script to set web2py password 38 | shell: ./chgpasswd.py chdir=/var/www/web2py/ 39 | become: True 40 | 41 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/templates/chgpasswd.py.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | #from gluon.widget import console 4 | from gluon.main import save_password 5 | 6 | #console() 7 | save_password("{{ web2py_passwd }}",443) 8 | -------------------------------------------------------------------------------- /provisioning/ansible/roles/web/templates/web2py.conf.j2: -------------------------------------------------------------------------------- 1 | WSGIDaemonProcess web2py user=www-data group=www-data processes=1 threads=1 2 | 3 | 4 | WSGIProcessGroup web2py 5 | WSGIScriptAlias / /var/www/web2py/wsgihandler.py 6 | WSGIPassAuthorization On 7 | 8 | 9 | AllowOverride None 10 | Require all denied 11 | 12 | Require all granted 13 | 14 | 15 | 16 | AliasMatch ^/([^/]+)/static/(?:_[\d]+.[\d]+.[\d]+/)?(.*) \ 17 | /var/www/web2py/applications/$1/static/$2 18 | 19 | 20 | Options -Indexes 21 | ExpiresActive On 22 | ExpiresDefault "access plus 1 hour" 23 | Require all granted 24 | 25 | 26 | CustomLog /var/log/apache2/ssl-access.log common 27 | ErrorLog /var/log/apache2/error.log 28 | 29 | -------------------------------------------------------------------------------- /provisioning/ansible/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: site.yml 3 | 4 | - include: all.yml 5 | - include: producer.yml 6 | - include: sparkMaster.yml 7 | - include: sparkSlave.yml 8 | - include: consumer.yml 9 | -------------------------------------------------------------------------------- /provisioning/ansible/sparkMaster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: sparkMaster 3 | roles: 4 | - get-vars 5 | - spark 6 | - applications 7 | -------------------------------------------------------------------------------- /provisioning/ansible/sparkSlave.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: sparkSlave 3 | roles: 4 | - get-vars 5 | - spark 6 | - applications 7 | -------------------------------------------------------------------------------- /provisioning/ansible/tmp/sparkMaster.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCrzpmlApMsu2bcP+LGBsdTmJax2vjiQ83cqTyL0bzW2JKIyDvBFHUud0NyOGWFuOei0PXt+3JJ1umWLxvpXHv1VwVfyvy88rE8vv3InUgJ5g67i7Tmi9aWJV6iQSI/WWcFmJ4SMmWoFoQ85SfKiU5AESQS3rJDMIi6niCufTfDspduqGNtfNGXdKCvn8FQwWQudoPom41RTUhCJ4clsOGqBH6GE3GTJZf/w/o1WW/+6vsYy+XIaH7CKqjcg87xzC1GQ184EqpJuR/K/JJM2wghoz374OmHkMiM4Pv2adi8BikoUo/YjWkphhXJbVoh6EEv1GlJerHqX6W9uKkMlVHZW8gz9fbzoRToNEBdEerUL/KBMDpKPSR06Hmebijr1oEzKMUgHE5a4VJYprAiAI7aXxY4RespHsl1UzMbGj7QlRI4UfQeh9qmG7LQ6jOIgGZeEnLU3GCsrtdMSaqM6OneKnFSeSczDB5pDX5LrskmA17hSrYH9DgdF8ARU+3v9kqYgmLJ0Uwr3xpF19Nn/PnlJRYL/t251GJJp1mmZGK9LpP94kqg05uPWdDqcB8EuSLjwPx0fnEyqXaW7id/cBhM1/C9I730ScqNel9m6EIE1TP5lPspK7aCSoCumHX24Eb+x0c85qIaO+TBH06pRqISIypyn32keEW4oTSxRCvJFw== ansible-generated on sparkMaster 2 | -------------------------------------------------------------------------------- /provisioning/configuration.yml: -------------------------------------------------------------------------------- 1 | # """ 2 | # This configuration allows to set system properties of Stream4Flow virtual 3 | # guests provisioned by Vagrant. 4 | # 5 | # (Values below represents minimal requirements to run Stream4Flow properly.) 6 | # """ 7 | 8 | # """ 9 | # Common settings for all virtual guest. 10 | # """ 11 | common: 12 | # Select Vagrant guest virtual machine box. You can search for boxes at 13 | # https://atlas.hashicorp.com/search. 14 | box: ubuntu/xenial64 15 | box_url: ubuntu/xenial64 16 | 17 | # Set provision_on_guest to "true" if you want to use ansible provisioning 18 | # on guests (this option will rapidly slow down provisioning time but 19 | # allows provision on Windows and provision of a separate guest). 20 | provision_on_guest: true 21 | 22 | 23 | # """ 24 | # Individual system settings for each of quests. Selected IP addresses should 25 | # not exists in your current network. Size of main memory is in MB. 26 | # """ 27 | 28 | # Producer node with IPFIXcol and Kafka 29 | producer: 30 | ip: 192.168.0.2 31 | memory: 4096 32 | cpu: 2 33 | 34 | # Spark Master node 35 | sparkMaster: 36 | ip: 192.168.0.100 37 | memory: 2048 38 | cpu: 2 39 | 40 | # Spark slaves (workers) 41 | sparkSlave: 42 | # Number of slaves that will be provisioned 43 | count: 1 44 | # Prefix of slaves IP addresses (suffix starts at 101) 45 | ip_prefix: 192.168.0. 46 | memory: 2048 47 | cpu: 2 48 | 49 | # Consumer node with ElasticStack and web interface 50 | consumer: 51 | ip: 192.168.0.3 52 | memory: 4096 53 | cpu: 2 54 | -------------------------------------------------------------------------------- /provisioning/test/README.md: -------------------------------------------------------------------------------- 1 | # Stream4Flow tests 2 | This directory contains tests. 3 | ## Integration test 4 | Basic test to find out if framework was deployed correctly. -------------------------------------------------------------------------------- /provisioning/test/integration/README.md: -------------------------------------------------------------------------------- 1 | # Integration test for Stream4Flow 2 | 3 | This test is designed for determining the correctness of cluster deployment. 4 | The test consists of multiple stages. 5 | - Run protocol statistics application on sparkMaster. 6 | - Copy test data to producer. 7 | - Send data once with ipfixsend to producer . 8 | - After data are processed stored data are read and checked if expected numbers of flows matches number of flows in any stored data. 9 | 10 | The test must be ran directly after cluster's deployment or there must not be any stored data in elasticsearch. 11 | Stored data could lead to an false-positive result . 12 | 13 | For running the test run ansible-playbook integration-test.yml -i inventory.ini.example -U spark --ask-pass command in ~/Stream4Flow/provisioning/test/integration-test directory. -------------------------------------------------------------------------------- /provisioning/test/integration/integration-test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: [sparkMaster,producer,consumer] 3 | remote_user: spark 4 | roles: 5 | - integration-test -------------------------------------------------------------------------------- /provisioning/test/integration/inventory.ini.example: -------------------------------------------------------------------------------- 1 | [producer] 2 | producer ansible_host=192.168.0.2 3 | 4 | [consumer] 5 | consumer ansible_host=192.168.0.3 6 | 7 | [sparkMaster] 8 | sparkMaster ansible_host=192.168.0.100 9 | 10 | [sparkSlave] 11 | sparkSlave101 ansible_host=192.168.0.101 12 | sparkSlave102 ansible_host=192.168.0.102 13 | -------------------------------------------------------------------------------- /provisioning/test/integration/roles/integration-test/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | user: spark 3 | consumer_ip: 192.168.0.3 4 | consumer_port: 9200 5 | producer_ip: 192.168.0.2 6 | producer_port: 4739 7 | time_app_start: 25 8 | time_sending: 35 9 | -------------------------------------------------------------------------------- /provisioning/test/integration/roles/integration-test/files/test-data.ipfix: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/provisioning/test/integration/roles/integration-test/files/test-data.ipfix -------------------------------------------------------------------------------- /provisioning/test/integration/roles/integration-test/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare sparkMaster 3 | include: prepareMaster.yml 4 | when: "'sparkMaster' in group_names" 5 | 6 | - name: Test 7 | include: prepareProducer.yml 8 | when: "'producer' in group_names" 9 | - name: Query 10 | include: query.yml 11 | when: "'consumer' in group_names" 12 | -------------------------------------------------------------------------------- /provisioning/test/integration/roles/integration-test/tasks/prepareMaster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Start application on sparkMaster 3 | shell: cd /home/{{ user }}/applications; screen -L -d -m ./run-application.sh ./statistics/protocols_statistics/spark/protocols_statistics.py -iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output; sleep 1; 4 | 5 | - name: Wait 25seconds 6 | pause: seconds={{ time_app_start }} 7 | -------------------------------------------------------------------------------- /provisioning/test/integration/roles/integration-test/tasks/prepareProducer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy test Data to producer 3 | copy: src="test-data.ipfix" dest="/tmp/test-data.ipfix" 4 | 5 | 6 | - name: Send data with ipfixsend 7 | command: ipfixsend -i /tmp/test-data.ipfix -d {{ producer_ip }} -p {{ producer_port }} -t UDP -n 10 8 | 9 | - name: Wait for sending 10 | pause: seconds=30 11 | 12 | -------------------------------------------------------------------------------- /provisioning/test/integration/roles/integration-test/tasks/query.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy query script 4 | template: src="query_for_data.py" dest="/tmp/query_for_data.py" 5 | 6 | - name: Wait for sending 7 | wait_for: 8 | host: consumer 9 | state: present 10 | delay: 10 11 | timeout: "{{ time_sending }}" 12 | 13 | - name: Run query test 14 | command: python /tmp/query_for_data.py 15 | register: out 16 | 17 | - debug: var=out.stdout 18 | -------------------------------------------------------------------------------- /provisioning/test/integration/roles/integration-test/templates/query_for_data.py: -------------------------------------------------------------------------------- 1 | from urllib import urlopen 2 | import simplejson as json 3 | 4 | 5 | 6 | def contains(set): 7 | if 40 in set: 8 | if 60 in set: 9 | if 20 in set: 10 | return True 11 | return False 12 | 13 | 14 | def getdata(): 15 | url = urlopen('http://{{ consumer_ip }}:{{ consumer_port }}/spark-*/_search').read() 16 | url = json.loads(url) 17 | a = [] 18 | for i in range(0, len(url.get('hits').get('hits'))): 19 | a.append(url.get('hits').get('hits')[i].get('_source').get('flows')) 20 | return set(a) 21 | 22 | 23 | if __name__ == "__main__": 24 | if contains(getdata()): 25 | print("Number of flows in elasticsearch is as expected") 26 | exit(0) 27 | else: 28 | print("Something went wrong - data are not as expected") 29 | exit(1) -------------------------------------------------------------------------------- /web-interface/README.md: -------------------------------------------------------------------------------- 1 | # Stream4Flow - Web Interface 2 | 3 | Web interface of the Stream4Flow framework. -------------------------------------------------------------------------------- /web-interface/Stream4Flow/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/controllers/protocols_statistics.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Import Elasticsearch library 4 | import elasticsearch 5 | from elasticsearch_dsl import Search, Q, A 6 | # Import global functions 7 | from global_functions import escape 8 | 9 | 10 | #----------------- Main Functions -------------------# 11 | 12 | 13 | def protocols_statistics(): 14 | """ 15 | Show the main page of the Protocols Statistics section. 16 | 17 | :return: Empty dictionary 18 | """ 19 | # Use standard view 20 | response.view = request.controller + '/protocols_statistics.html' 21 | return dict() 22 | 23 | 24 | #----------------- Chart Functions ------------------# 25 | 26 | 27 | def get_statistics(): 28 | """ 29 | Obtains statistics about TCP, UDP a other protocols. 30 | 31 | :return: JSON with status "ok" or "error" and requested data. 32 | """ 33 | 34 | # Check login 35 | if not session.logged: 36 | json_response = '{"status": "Error", "data": "You must be logged!"}' 37 | return json_response 38 | 39 | # Check mandatory inputs 40 | if not (request.get_vars.beginning and request.get_vars.end and request.get_vars.aggregation and request.get_vars.type): 41 | json_response = '{"status": "Error", "data": "Some mandatory argument is missing!"}' 42 | return json_response 43 | 44 | # Parse inputs and set correct format 45 | beginning = escape(request.get_vars.beginning) 46 | end = escape(request.get_vars.end) 47 | aggregation = escape(request.get_vars.aggregation) 48 | type = escape(request.get_vars.type) # name of field to create sum from, one of {flows, packets, bytes } 49 | 50 | try: 51 | # Elastic query 52 | client = elasticsearch.Elasticsearch([{'host': myconf.get('consumer.hostname'), 'port': myconf.get('consumer.port')}]) 53 | elastic_bool = [] 54 | elastic_bool.append({'range': {'@timestamp': {'gte': beginning, 'lte': end}}}) 55 | elastic_bool.append({'term': {'@type': 'protocols_statistics'}}) 56 | 57 | qx = Q({'bool': {'must': elastic_bool}}) 58 | s = Search(using=client, index='_all').query(qx) 59 | s.aggs.bucket('by_time', 'date_histogram', field='@timestamp', interval=aggregation)\ 60 | .bucket('by_type', 'terms', field='protocol.raw')\ 61 | .bucket('sum_of_flows', 'sum', field=type) 62 | s.sort('@timestamp') 63 | result = s.execute() 64 | 65 | # Result Parsing into CSV in format: timestamp, tcp protocol value, udp protocol value, other protocols value 66 | data_raw = {} 67 | data = "Timestamp,TCP protocol,UDP protocol,Other protocols;" # CSV header 68 | for interval in result.aggregations.by_time.buckets: 69 | timestamp = interval.key 70 | timestamp_values = [''] * 3 71 | data_raw[timestamp] = timestamp_values 72 | for bucket in interval.by_type.buckets: 73 | value = bucket.sum_of_flows.value 74 | if bucket.key == "tcp": 75 | data_raw[timestamp][0] = str(int(value)) 76 | elif bucket.key == "udp": 77 | data_raw[timestamp][1] = str(int(value)) 78 | elif bucket.key == "other": 79 | data_raw[timestamp][2] = str(int(value)) 80 | 81 | data += str(timestamp) + "," + str(data_raw[timestamp][0]) + "," + str(data_raw[timestamp][1]) + "," + str(data_raw[timestamp][2]) + ";" 82 | 83 | json_response = '{"status": "Ok", "data": "' + data + '"}' 84 | return json_response 85 | 86 | except Exception as e: 87 | json_response = '{"status": "Error", "data": "Elasticsearch query exception: ' + escape(str(e)) + '"}' 88 | return json_response 89 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/cron/crontab: -------------------------------------------------------------------------------- 1 | #crontab -------------------------------------------------------------------------------- /web-interface/Stream4Flow/cron/crontab.example: -------------------------------------------------------------------------------- 1 | #crontab -------------------------------------------------------------------------------- /web-interface/Stream4Flow/databases/stream4flow.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/databases/stream4flow.sqlite -------------------------------------------------------------------------------- /web-interface/Stream4Flow/languages/plural-cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | { 3 | # "singular form (0)": ["first plural form (1)", "second plural form (2)", ...], 4 | 'vteřina': ['vteřiny', 'vteřin'], 5 | 'vteřinou': ['vteřinami', 'vteřinami'], 6 | 'minuta': ['minuty', 'minut'], 7 | 'minutou': ['minutami', 'minutami'], 8 | 'hodina': ['hodiny','hodin'], 9 | 'hodinou': ['hodinami','hodinami'], 10 | 'den': ['dny','dnů'], 11 | 'dnem': ['dny','dny'], 12 | 'týden': ['týdny','týdnů'], 13 | 'týdnem': ['týdny','týdny'], 14 | 'měsíc': ['měsíce','měsíců'], 15 | 'měsícem': ['měsíci','měsíci'], 16 | 'rok': ['roky','let'], 17 | 'rokem': ['roky','lety'], 18 | 'záznam': ['záznamy', 'záznamů'], 19 | 'soubor': ['soubory', 'souborů'] 20 | } 21 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/languages/plural-en.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | { 3 | # "singular form (0)": ["first plural form (1)", "second plural form (2)", ...], 4 | 'account': ['accounts'], 5 | 'book': ['books'], 6 | 'is': ['are'], 7 | 'man': ['men'], 8 | 'miss': ['misses'], 9 | 'person': ['people'], 10 | 'quark': ['quarks'], 11 | 'shop': ['shops'], 12 | 'this': ['these'], 13 | 'was': ['were'], 14 | 'woman': ['women'], 15 | } 16 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/models/db.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ------------------------------------------------------------------------- 4 | # if SSL/HTTPS is properly configured and you want all HTTP requests to 5 | # be redirected to HTTPS, uncomment the line below: 6 | # ------------------------------------------------------------------------- 7 | # request.requires_https() 8 | 9 | # ------------------------------------------------------------------------- 10 | # app configuration made easy. Look inside private/appconfig.ini 11 | # ------------------------------------------------------------------------- 12 | from gluon.contrib.appconfig import AppConfig 13 | 14 | # ------------------------------------------------------------------------- 15 | # once in production, remove reload=True to gain full speed 16 | # ------------------------------------------------------------------------- 17 | myconf = AppConfig(reload=True) 18 | 19 | # --------------------------------------------------------------------- 20 | # if NOT running on Google App Engine use SQLite or other DB 21 | # --------------------------------------------------------------------- 22 | db = DAL(myconf.get('db.uri'), 23 | pool_size=myconf.get('db.pool_size'), 24 | migrate_enabled=myconf.get('db.migrate'), 25 | check_reserved=['all'], 26 | auto_import=True) 27 | 28 | # ------------------------------------------------------------------------- 29 | # by default give a view/generic.extension to all actions from localhost 30 | # none otherwise. a pattern can be 'controller/function.extension' 31 | # ------------------------------------------------------------------------- 32 | response.generic_patterns = ['*'] if request.is_local else [] 33 | # ------------------------------------------------------------------------- 34 | # choose a style for forms 35 | # ------------------------------------------------------------------------- 36 | response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other 37 | response.form_label_separator = myconf.get('forms.separator') or '' 38 | 39 | # ------------------------------------------------------------------------- 40 | # (optional) optimize handling of static files 41 | # ------------------------------------------------------------------------- 42 | # response.optimize_css = 'concat,minify,inline' 43 | # response.optimize_js = 'concat,minify,inline' 44 | 45 | # ------------------------------------------------------------------------- 46 | # (optional) static assets folder versioning 47 | # ------------------------------------------------------------------------- 48 | # response.static_version = '0.0.0' 49 | 50 | # ------------------------------------------------------------------------- 51 | # Define your tables below (or better in another model file) for example 52 | # 53 | # >>> db.define_table('mytable', Field('myfield', 'string')) 54 | # 55 | # Fields can be 'string','text','password','integer','double','boolean' 56 | # 'date','time','datetime','blob','upload', 'reference TABLENAME' 57 | # There is an implicit 'id integer autoincrement' field 58 | # Consult manual for more options, validators, etc. 59 | # 60 | # More API examples for controllers: 61 | # 62 | # >>> db.mytable.insert(myfield='value') 63 | # >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL) 64 | # >>> for row in rows: print row.id, row.myfield 65 | # ------------------------------------------------------------------------- 66 | 67 | db.define_table('users', 68 | Field('username', 'string', required=True, notnull=True, unique=True), 69 | Field('name', 'string', required=True, notnull=True), 70 | Field('organization', 'string', required=True, notnull=True), 71 | Field('email', 'string', required=True, notnull=True), 72 | Field('role', 'string', required=True, notnull=True)) 73 | 74 | db.define_table('users_auth', 75 | Field('user_id', 'integer', required=True, notnull=True, unique=True), 76 | Field('salt', 'string', length=20, required=True, notnull=True), 77 | Field('password', 'string', length=64, required=True, notnull=True)) 78 | 79 | db.define_table('users_logins', 80 | Field('user_id', 'integer', required=True, notnull=True, unique=True), 81 | Field('last_login', 'datetime', required=True, notnull=True)) 82 | 83 | # ------------------------------------------------------------------------- 84 | # after defining tables, uncomment below to enable auditing 85 | # ------------------------------------------------------------------------- 86 | # auth.enable_record_versioning(db) 87 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/models/sessions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Enable to get current timestamp 4 | import time 5 | # Enable to get uuid of the web2py user 6 | from gluon.utils import web2py_uuid 7 | 8 | 9 | #----------------- Session Management ---------------# 10 | 11 | 12 | # Store session in the clint cookie 13 | session.secure() 14 | cookie_key = cache.ram('cookie_key',lambda: web2py_uuid(),None) 15 | session.connect(request, response, cookie_key=cookie_key, check_client=True, compression_level=None) 16 | 17 | # Make expired sessions log out 18 | SESSION_TIMEOUT = 30*60 # 30 x 60 seconds 19 | if session.logged and session.lastrequest and (session.lastrequest < time.time()-SESSION_TIMEOUT) and (request.function != "logout"): 20 | redirect("/logout?automatic=true") 21 | # Set last request time 22 | session.lastrequest=time.time() 23 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/modules/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/modules/global_functions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Enable SHA-256 sum 4 | import hashlib 5 | 6 | #----------------- Common Functions -----------------# 7 | 8 | 9 | def escape(html): 10 | """ 11 | Replaces ampresands, quotes and carets in the given HTML with their safe versions. 12 | 13 | :return: Escaped HTML 14 | """ 15 | return html.replace('&', '&').replace('<', '<').replace(' > ', '>').replace('"', '"').replace("'", ''') 16 | 17 | 18 | #----------------- User Functions -------------------# 19 | 20 | 21 | def check_username(db, username): 22 | """ 23 | Checks if given username exists in the database. 24 | 25 | :param username: username to check 26 | :return: True if username is in the database, False otherwise 27 | """ 28 | 29 | if db(db.users.username == username).select(): 30 | return True 31 | return False 32 | 33 | 34 | def check_password(db, username, password): 35 | """ 36 | Check if given password correspond to given user. 37 | 38 | :param username: Username to check password. 39 | :param password: Password to check 40 | :return: True if password correspond to the user, False otherwise. 41 | """ 42 | 43 | # Check if user exists 44 | if not check_username(db, username): 45 | return False 46 | 47 | # Get user id 48 | user_id = db(db.users.username == username).select(db.users.id)[0].id 49 | # Get salt and corresponding hash 50 | salt = db(db.users_auth.user_id == user_id).select(db.users_auth.salt)[0].salt 51 | hash = hashlib.sha256(salt + password).hexdigest() 52 | 53 | # Verify generated hash 54 | if db(db.users_auth.user_id == user_id and db.users_auth.password == hash).select(): 55 | return True 56 | return False -------------------------------------------------------------------------------- /web-interface/Stream4Flow/private/appconfig.ini: -------------------------------------------------------------------------------- 1 | ; App configuration 2 | [app] 3 | name = Stream4Flow 4 | author = Milan Čermák 5 | description = Stream4Flow Administrator Panel 6 | keywords = Stream4Flow 7 | generator = Web2py Web Framework 8 | 9 | ; Consumer 10 | [consumer] 11 | hostname = consumer 12 | port = 9200 13 | 14 | ; Host configuration 15 | [host] 16 | names = localhost:*, 127.0.0.1:*, *:*, * 17 | 18 | ; db configuration 19 | [db] 20 | uri = sqlite://stream4flow.sqlite 21 | migrate = false 22 | pool_size = 10 ; ignored for sqlite 23 | 24 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/403.html: -------------------------------------------------------------------------------- 1 | 403 2 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/404.html: -------------------------------------------------------------------------------- 1 | 404 2 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/500.html: -------------------------------------------------------------------------------- 1 | 500 2 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/css/animate.css: -------------------------------------------------------------------------------- 1 | @charset "UTF-8"; 2 | 3 | /*! 4 | Animate.css - http://daneden.me/animate 5 | Licensed under the MIT license - http://opensource.org/licenses/MIT 6 | 7 | Copyright (c) 2013 Daniel Eden 8 | */ 9 | 10 | @-webkit-keyframes flipInX { 11 | 0% { 12 | -webkit-transform: perspective(400px) rotateX(90deg); 13 | transform: perspective(400px) rotateX(90deg); 14 | opacity: 0; 15 | } 16 | 17 | 40% { 18 | -webkit-transform: perspective(400px) rotateX(-10deg); 19 | transform: perspective(400px) rotateX(-10deg); 20 | } 21 | 22 | 70% { 23 | -webkit-transform: perspective(400px) rotateX(10deg); 24 | transform: perspective(400px) rotateX(10deg); 25 | } 26 | 27 | 100% { 28 | -webkit-transform: perspective(400px) rotateX(0deg); 29 | transform: perspective(400px) rotateX(0deg); 30 | opacity: 1; 31 | } 32 | } 33 | 34 | @keyframes flipInX { 35 | 0% { 36 | -webkit-transform: perspective(400px) rotateX(90deg); 37 | -ms-transform: perspective(400px) rotateX(90deg); 38 | transform: perspective(400px) rotateX(90deg); 39 | opacity: 0; 40 | } 41 | 42 | 40% { 43 | -webkit-transform: perspective(400px) rotateX(-10deg); 44 | -ms-transform: perspective(400px) rotateX(-10deg); 45 | transform: perspective(400px) rotateX(-10deg); 46 | } 47 | 48 | 70% { 49 | -webkit-transform: perspective(400px) rotateX(10deg); 50 | -ms-transform: perspective(400px) rotateX(10deg); 51 | transform: perspective(400px) rotateX(10deg); 52 | } 53 | 54 | 100% { 55 | -webkit-transform: perspective(400px) rotateX(0deg); 56 | -ms-transform: perspective(400px) rotateX(0deg); 57 | transform: perspective(400px) rotateX(0deg); 58 | opacity: 1; 59 | } 60 | } 61 | 62 | .flipInX { 63 | -webkit-backface-visibility: visible !important; 64 | -ms-backface-visibility: visible !important; 65 | backface-visibility: visible !important; 66 | -webkit-animation-name: flipInX; 67 | animation-name: flipInX; 68 | } 69 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/css/custom.css: -------------------------------------------------------------------------------- 1 | #wrapper { 2 | width: 100%; 3 | } 4 | #page-wrapper { 5 | padding:5em 2em 2.5em; 6 | background-color: #F1F1F1; 7 | } 8 | .navbar-top-links { 9 | margin-right: 0; 10 | } 11 | 12 | .navbar-top-links li { 13 | display: inline-block; 14 | } 15 | 16 | .navbar-top-links li:last-child { 17 | margin-right: 15px; 18 | } 19 | 20 | .navbar-top-links li a { 21 | padding: 15px; 22 | min-height: 50px; 23 | } 24 | 25 | .navbar-top-links .dropdown-menu li { 26 | display: block; 27 | } 28 | 29 | .navbar-top-links .dropdown-menu li:last-child { 30 | margin-right: 0; 31 | } 32 | 33 | .navbar-top-links .dropdown-menu li a { 34 | padding: 3px 20px; 35 | min-height: 0; 36 | } 37 | 38 | .navbar-top-links .dropdown-menu li a div { 39 | white-space: normal; 40 | } 41 | 42 | .navbar-top-links .dropdown-messages, 43 | .navbar-top-links .dropdown-tasks, 44 | .navbar-top-links .dropdown-alerts { 45 | width: 310px; 46 | min-width: 0; 47 | } 48 | .navbar-top-links .dropdown-messages { 49 | margin-left: 5px; 50 | } 51 | .navbar-top-links .dropdown-tasks { 52 | margin-left: -59px; 53 | } 54 | 55 | .navbar-top-links .dropdown-alerts { 56 | margin-left: -123px; 57 | } 58 | 59 | .navbar-top-links .dropdown-user { 60 | right: 0; 61 | left: auto; 62 | } 63 | 64 | .sidebar .sidebar-nav.navbar-collapse { 65 | padding-right: 0; 66 | padding-left: 0; 67 | } 68 | 69 | .sidebar .sidebar-search { 70 | padding: 15px; 71 | } 72 | .sidebar .arrow { 73 | float: right; 74 | } 75 | 76 | .sidebar .fa.arrow:before { 77 | content: "\f104"; 78 | } 79 | 80 | .sidebar .active>a>.fa.arrow:before { 81 | content: "\f107"; 82 | } 83 | 84 | .sidebar .nav-second-level li, 85 | .sidebar .nav-third-level li { 86 | border-bottom: 0!important; 87 | margin-bottom: 0; 88 | } 89 | 90 | .sidebar .nav-second-level li a { 91 | padding-left: 53px; 92 | font-size: .9em; 93 | } 94 | .sidebar .nav-third-level li a { 95 | padding-left: 52px; 96 | } 97 | @media(min-width:768px){ 98 | .navbar-top-links .dropdown-messages, 99 | .navbar-top-links .dropdown-tasks, 100 | .navbar-top-links .dropdown-alerts { 101 | margin-left: auto; 102 | } 103 | } 104 | .chart-status{ 105 | text-align: center; 106 | padding-top: 15%; 107 | color: #777777; 108 | width:100%; 109 | } 110 | .chart-protocols-statistics .chart-status{ 111 | height:540px; 112 | } 113 | .chart-status span{ 114 | position: relative; 115 | top: -4px; 116 | font-size: 20px; 117 | font-weight: 600; 118 | margin-left: 0.5em; 119 | } 120 | #chart-tabs { 121 | padding-left: 1em; 122 | margin: 0 1em; 123 | } 124 | .nav-tabs > li { 125 | margin-bottom: -2px; 126 | } 127 | .nav-tabs a{ 128 | color: #668586; 129 | } 130 | #chart-type-selector{ 131 | padding-left: 1em; 132 | margin-left: 1.1em; 133 | margin-right: 2em; 134 | } 135 | .dashboard { 136 | margin: 1em 0 0; 137 | } 138 | #network-statistics, #framework-architecture { 139 | padding: 1em; 140 | height: 480px; 141 | } 142 | #chart-sum-status { 143 | padding-top: 30%; 144 | } 145 | #framework-architecture img{ 146 | width: 65%; 147 | display: block; 148 | margin: 1em auto; 149 | } 150 | #framework-architecture p{ 151 | text-align: justify; 152 | padding: 1em 1em 0em 1em; 153 | font-size: 15px; 154 | } 155 | /* Datetime */ 156 | #datetime-beginning, #datetime-end { 157 | width: 140px; 158 | } 159 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/css/jqvmap.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * jQVMap Version 1.0 3 | * 4 | * http://jqvmap.com 5 | * Copyright 2012, Peter Schmalfeldt 6 | * Licensed under the MIT license. 7 | * 8 | * Fork Me @ https://github.com/manifestinteractive/jqvmap 9 | */ 10 | .jqvmap-label 11 | { 12 | position: absolute; 13 | display: none; 14 | -webkit-border-radius: 3px; 15 | -moz-border-radius: 3px; 16 | border-radius: 3px; 17 | background: #292929; 18 | color: white; 19 | font-family: sans-serif, Verdana; 20 | font-size: smaller; 21 | padding: 3px; 22 | } 23 | .jqvmap-zoomin, .jqvmap-zoomout { 24 | position: absolute; 25 | left: 10px; 26 | -webkit-border-radius: 3px; 27 | -moz-border-radius: 3px; 28 | border-radius: 3px; 29 | background: #585858; 30 | padding: 1px; 31 | color: white; 32 | width: 35px; 33 | height: 25px; 34 | cursor: pointer; 35 | line-height: 21px; 36 | text-align: center; 37 | bottom: 10px; 38 | } 39 | .jqvmap-zoomout { 40 | line-height: 23px; 41 | left: 47px; 42 | } 43 | .jqvmap-region 44 | { 45 | cursor: pointer; 46 | } 47 | .jqvmap-ajax_response 48 | { 49 | width: 100%; 50 | height: 500px; 51 | } 52 | @media(min-width:320px) { 53 | .jqvmap-zoomin, .jqvmap-zoomout { 54 | width: 26px; 55 | height: 15px; 56 | line-height: 14px; 57 | font-size: 0.8em; 58 | } 59 | .jqvmap-zoomout { 60 | left: 41px; 61 | } 62 | } -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/images/architecture.png -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/images/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/images/favicon.ico -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/images/logo_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/images/logo_small.png -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/images/logo_small_lines.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/images/logo_small_lines.png -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/charts/custom.js: -------------------------------------------------------------------------------- 1 | // Reflow zingcharts when main page width is changed 2 | $("#page-wrapper").on('transitionend', function () { 3 | $('.zingchart').each(function() { 4 | zingchart.exec(this.id, 'resize', { 5 | width: '100%' 6 | }); 7 | }); 8 | }); -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/charts/zingchart.min.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSIRT-MU/Stream4Flow/897fca153ea2cc80ae7accb1d14a65c67d05c36f/web-interface/Stream4Flow/static/js/charts/zingchart.min.js -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/common/classie.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * classie - class helper functions 3 | * from bonzo https://github.com/ded/bonzo 4 | * 5 | * classie.has( elem, 'my-class' ) -> true/false 6 | * classie.add( elem, 'my-new-class' ) 7 | * classie.remove( elem, 'my-unwanted-class' ) 8 | * classie.toggle( elem, 'my-class' ) 9 | */ 10 | 11 | /*jshint browser: true, strict: true, undef: true */ 12 | 13 | ( function( window ) { 14 | 15 | 'use strict'; 16 | 17 | // class helper functions from bonzo https://github.com/ded/bonzo 18 | 19 | function classReg( className ) { 20 | return new RegExp("(^|\\s+)" + className + "(\\s+|$)"); 21 | } 22 | 23 | // classList support for class management 24 | // altho to be fair, the api sucks because it won't accept multiple classes at once 25 | var hasClass, addClass, removeClass; 26 | 27 | if ( 'classList' in document.documentElement ) { 28 | hasClass = function( elem, c ) { 29 | return elem.classList.contains( c ); 30 | }; 31 | addClass = function( elem, c ) { 32 | elem.classList.add( c ); 33 | }; 34 | removeClass = function( elem, c ) { 35 | elem.classList.remove( c ); 36 | }; 37 | } 38 | else { 39 | hasClass = function( elem, c ) { 40 | return classReg( c ).test( elem.className ); 41 | }; 42 | addClass = function( elem, c ) { 43 | if ( !hasClass( elem, c ) ) { 44 | elem.className = elem.className + ' ' + c; 45 | } 46 | }; 47 | removeClass = function( elem, c ) { 48 | elem.className = elem.className.replace( classReg( c ), ' ' ); 49 | }; 50 | } 51 | 52 | function toggleClass( elem, c ) { 53 | var fn = hasClass( elem, c ) ? removeClass : addClass; 54 | fn( elem, c ); 55 | } 56 | 57 | window.classie = { 58 | // full names 59 | hasClass: hasClass, 60 | addClass: addClass, 61 | removeClass: removeClass, 62 | toggleClass: toggleClass, 63 | // short names 64 | has: hasClass, 65 | add: addClass, 66 | remove: removeClass, 67 | toggle: toggleClass 68 | }; 69 | 70 | })( window ); -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/common/jquery.circlechart.js: -------------------------------------------------------------------------------- 1 | (function($){ 2 | $.fn.extend({ 3 | //pass the options variable to the function 4 | percentcircle: function(options) { 5 | //Set the default values, use comma to separate the settings, example: 6 | var defaults = { 7 | animate : true, 8 | diameter : 115, 9 | guage: 4, 10 | coverBg: '#fff', 11 | bgColor: '#efefef', 12 | fillColor: '#4F52BA', 13 | percentSize: '15px', 14 | percentWeight: 'normal' 15 | }, 16 | styles = { 17 | cirContainer : { 18 | 'width':defaults.diameter, 19 | 'height':defaults.diameter 20 | }, 21 | cir : { 22 | 'position': 'relative', 23 | 'text-align': 'center', 24 | 'width': defaults.diameter, 25 | 'height': defaults.diameter, 26 | 'border-radius': '100%', 27 | 'background-color': defaults.bgColor, 28 | 'background-image' : 'linear-gradient(91deg, transparent 50%, '+defaults.bgColor+' 50%), linear-gradient(90deg, '+defaults.bgColor+' 50%, transparent 50%)' 29 | }, 30 | cirCover: { 31 | 'position': 'relative', 32 | 'top': defaults.guage, 33 | 'left': defaults.guage, 34 | 'text-align': 'center', 35 | 'width': defaults.diameter - (defaults.guage * 2), 36 | 'height': defaults.diameter - (defaults.guage * 2), 37 | 'border-radius': '100%', 38 | 'background-color': defaults.coverBg 39 | }, 40 | percent: { 41 | 'display':'block', 42 | 'width': defaults.diameter, 43 | 'height': defaults.diameter, 44 | 'line-height': defaults.diameter + 'px', 45 | 'vertical-align': 'middle', 46 | 'font-size': defaults.percentSize, 47 | 'font-weight': defaults.percentWeight, 48 | 'color': defaults.fillColor 49 | } 50 | }; 51 | 52 | var that = this, 53 | template = '
    {{percentage}}
    ', 54 | options = $.extend(defaults, options) 55 | 56 | function init(){ 57 | that.each(function(){ 58 | var $this = $(this), 59 | //we need to check for a percent otherwise set to 0; 60 | perc = Math.round($this.data('percent')), //get the percentage from the element 61 | deg = perc * 3.6, 62 | stop = options.animate ? 0 : deg, 63 | $chart = $(template.replace('{{percentage}}',perc+'%')); 64 | //set all of the css properties forthe chart 65 | $chart.css(styles.cirContainer).find('.ab').css(styles.cir).find('.cir').css(styles.cirCover).find('.perc').css(styles.percent); 66 | 67 | $this.append($chart); //add the chart back to the target element 68 | setTimeout(function(){ 69 | animateChart(deg,parseInt(stop),$chart.find('.ab')); //both values set to the same value to keep the function from looping and animating 70 | },250) 71 | }); 72 | } 73 | 74 | var animateChart = function (stop,curr,$elm){ 75 | var deg = curr; 76 | if(curr <= stop){ 77 | if (deg>=180){ 78 | $elm.css('background-image','linear-gradient(' + (90+deg) + 'deg, transparent 50%, '+options.fillColor+' 50%),linear-gradient(90deg, '+options.fillColor+' 50%, transparent 50%)'); 79 | }else{ 80 | $elm.css('background-image','linear-gradient(' + (deg-90) + 'deg, transparent 50%, '+options.bgColor+' 50%),linear-gradient(90deg, '+options.fillColor+' 50%, transparent 50%)'); 81 | } 82 | curr ++; 83 | setTimeout(function(){ 84 | animateChart(stop,curr,$elm); 85 | },1); 86 | } 87 | }; 88 | 89 | init(); //kick off the goodness 90 | } 91 | }); 92 | 93 | })(jQuery); -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/common/jquery.vmap.sampledata.js: -------------------------------------------------------------------------------- 1 | var sample_data = {"af":"16.63","al":"11.58","dz":"158.97","ao":"85.81","ag":"1.1","ar":"351.02","am":"8.83","au":"1219.72","at":"366.26","az":"52.17","bs":"7.54","bh":"21.73","bd":"105.4","bb":"3.96","by":"52.89","be":"461.33","bz":"1.43","bj":"6.49","bt":"1.4","bo":"19.18","ba":"16.2","bw":"12.5","br":"2023.53","bn":"11.96","bg":"44.84","bf":"8.67","bi":"1.47","kh":"11.36","cm":"21.88","ca":"1563.66","cv":"1.57","cf":"2.11","td":"7.59","cl":"199.18","cn":"5745.13","co":"283.11","km":"0.56","cd":"12.6","cg":"11.88","cr":"35.02","ci":"22.38","hr":"59.92","cy":"22.75","cz":"195.23","dk":"304.56","dj":"1.14","dm":"0.38","do":"50.87","ec":"61.49","eg":"216.83","sv":"21.8","gq":"14.55","er":"2.25","ee":"19.22","et":"30.94","fj":"3.15","fi":"231.98","fr":"2555.44","ga":"12.56","gm":"1.04","ge":"11.23","de":"3305.9","gh":"18.06","gr":"305.01","gd":"0.65","gt":"40.77","gn":"4.34","gw":"0.83","gy":"2.2","ht":"6.5","hn":"15.34","hk":"226.49","hu":"132.28","is":"12.77","in":"1430.02","id":"695.06","ir":"337.9","iq":"84.14","ie":"204.14","il":"201.25","it":"2036.69","jm":"13.74","jp":"5390.9","jo":"27.13","kz":"129.76","ke":"32.42","ki":"0.15","kr":"986.26","undefined":"5.73","kw":"117.32","kg":"4.44","la":"6.34","lv":"23.39","lb":"39.15","ls":"1.8","lr":"0.98","ly":"77.91","lt":"35.73","lu":"52.43","mk":"9.58","mg":"8.33","mw":"5.04","my":"218.95","mv":"1.43","ml":"9.08","mt":"7.8","mr":"3.49","mu":"9.43","mx":"1004.04","md":"5.36","mn":"5.81","me":"3.88","ma":"91.7","mz":"10.21","mm":"35.65","na":"11.45","np":"15.11","nl":"770.31","nz":"138","ni":"6.38","ne":"5.6","ng":"206.66","no":"413.51","om":"53.78","pk":"174.79","pa":"27.2","pg":"8.81","py":"17.17","pe":"153.55","ph":"189.06","pl":"438.88","pt":"223.7","qa":"126.52","ro":"158.39","ru":"1476.91","rw":"5.69","ws":"0.55","st":"0.19","sa":"434.44","sn":"12.66","rs":"38.92","sc":"0.92","sl":"1.9","sg":"217.38","sk":"86.26","si":"46.44","sb":"0.67","za":"354.41","es":"1374.78","lk":"48.24","kn":"0.56","lc":"1","vc":"0.58","sd":"65.93","sr":"3.3","sz":"3.17","se":"444.59","ch":"522.44","sy":"59.63","tw":"426.98","tj":"5.58","tz":"22.43","th":"312.61","tl":"0.62","tg":"3.07","to":"0.3","tt":"21.2","tn":"43.86","tr":"729.05","tm":0,"ug":"17.12","ua":"136.56","ae":"239.65","gb":"2258.57","us":"14624.18","uy":"40.71","uz":"37.72","vu":"0.72","ve":"285.21","vn":"101.99","ye":"30.02","zm":"15.69","zw":"5.57"}; -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/common/wow.min.js: -------------------------------------------------------------------------------- 1 | /*! WOW - v0.1.9 - 2014-05-10 2 | * Copyright (c) 2014 Matthieu Aussaguel; Licensed MIT */(function(){var a,b,c=function(a,b){return function(){return a.apply(b,arguments)}};a=function(){function a(){}return a.prototype.extend=function(a,b){var c,d;for(c in a)d=a[c],null!=d&&(b[c]=d);return b},a.prototype.isMobile=function(a){return/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(a)},a}(),b=this.WeakMap||(b=function(){function a(){this.keys=[],this.values=[]}return a.prototype.get=function(a){var b,c,d,e,f;for(f=this.keys,b=d=0,e=f.length;e>d;b=++d)if(c=f[b],c===a)return this.values[b]},a.prototype.set=function(a,b){var c,d,e,f,g;for(g=this.keys,c=e=0,f=g.length;f>e;c=++e)if(d=g[c],d===a)return void(this.values[c]=b);return this.keys.push(a),this.values.push(b)},a}()),this.WOW=function(){function d(a){null==a&&(a={}),this.scrollCallback=c(this.scrollCallback,this),this.scrollHandler=c(this.scrollHandler,this),this.start=c(this.start,this),this.scrolled=!0,this.config=this.util().extend(a,this.defaults),this.animationNameCache=new b}return d.prototype.defaults={boxClass:"wow",animateClass:"animated",offset:0,mobile:!0},d.prototype.init=function(){var a;return this.element=window.document.documentElement,"interactive"===(a=document.readyState)||"complete"===a?this.start():document.addEventListener("DOMContentLoaded",this.start)},d.prototype.start=function(){var a,b,c,d;if(this.boxes=this.element.getElementsByClassName(this.config.boxClass),this.boxes.length){if(this.disabled())return this.resetStyle();for(d=this.boxes,b=0,c=d.length;c>b;b++)a=d[b],this.applyStyle(a,!0);return window.addEventListener("scroll",this.scrollHandler,!1),window.addEventListener("resize",this.scrollHandler,!1),this.interval=setInterval(this.scrollCallback,50)}},d.prototype.stop=function(){return window.removeEventListener("scroll",this.scrollHandler,!1),window.removeEventListener("resize",this.scrollHandler,!1),null!=this.interval?clearInterval(this.interval):void 0},d.prototype.show=function(a){return this.applyStyle(a),a.className=""+a.className+" "+this.config.animateClass},d.prototype.applyStyle=function(a,b){var c,d,e;return d=a.getAttribute("data-wow-duration"),c=a.getAttribute("data-wow-delay"),e=a.getAttribute("data-wow-iteration"),this.animate(function(f){return function(){return f.customStyle(a,b,d,c,e)}}(this))},d.prototype.animate=function(){return"requestAnimationFrame"in window?function(a){return window.requestAnimationFrame(a)}:function(a){return a()}}(),d.prototype.resetStyle=function(){var a,b,c,d,e;for(d=this.boxes,e=[],b=0,c=d.length;c>b;b++)a=d[b],e.push(a.setAttribute("style","visibility: visible;"));return e},d.prototype.customStyle=function(a,b,c,d,e){return b&&this.cacheAnimationName(a),a.style.visibility=b?"hidden":"visible",c&&this.vendorSet(a.style,{animationDuration:c}),d&&this.vendorSet(a.style,{animationDelay:d}),e&&this.vendorSet(a.style,{animationIterationCount:e}),this.vendorSet(a.style,{animationName:b?"none":this.cachedAnimationName(a)}),a},d.prototype.vendors=["moz","webkit"],d.prototype.vendorSet=function(a,b){var c,d,e,f;f=[];for(c in b)d=b[c],a[""+c]=d,f.push(function(){var b,f,g,h;for(g=this.vendors,h=[],b=0,f=g.length;f>b;b++)e=g[b],h.push(a[""+e+c.charAt(0).toUpperCase()+c.substr(1)]=d);return h}.call(this));return f},d.prototype.vendorCSS=function(a,b){var c,d,e,f,g,h;for(d=window.getComputedStyle(a),c=d.getPropertyCSSValue(b),h=this.vendors,f=0,g=h.length;g>f;f++)e=h[f],c=c||d.getPropertyCSSValue("-"+e+"-"+b);return c},d.prototype.animationName=function(a){var b;try{b=this.vendorCSS(a,"animation-name").cssText}catch(c){b=window.getComputedStyle(a).getPropertyValue("animation-name")}return"none"===b?"":b},d.prototype.cacheAnimationName=function(a){return this.animationNameCache.set(a,this.animationName(a))},d.prototype.cachedAnimationName=function(a){return this.animationNameCache.get(a)},d.prototype.scrollHandler=function(){return this.scrolled=!0},d.prototype.scrollCallback=function(){var a;return this.scrolled&&(this.scrolled=!1,this.boxes=function(){var b,c,d,e;for(d=this.boxes,e=[],b=0,c=d.length;c>b;b++)a=d[b],a&&(this.isVisible(a)?this.show(a):e.push(a));return e}.call(this),!this.boxes.length)?this.stop():void 0},d.prototype.offsetTop=function(a){for(var b;void 0===a.offsetTop;)a=a.parentNode;for(b=a.offsetTop;a=a.offsetParent;)b+=a.offsetTop;return b},d.prototype.isVisible=function(a){var b,c,d,e,f;return c=a.getAttribute("data-wow-offset")||this.config.offset,f=window.pageYOffset,e=f+this.element.clientHeight-c,d=this.offsetTop(a),b=d+a.clientHeight,e>=d&&b>=f},d.prototype.util=function(){return this._util||(this._util=new a)},d.prototype.disabled=function(){return!this.config.mobile&&this.util().isMobile(navigator.userAgent)},d}()}).call(this); -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/custom/datetime_interval.js: -------------------------------------------------------------------------------- 1 | // Set interval on select interval change 2 | function setInterval(value) { 3 | // On "custom" value show datetimepicker for "#beginning" 4 | if (value == "custom") { 5 | $('#datetime-beginning').datetimepicker('toggle'); 6 | return; 7 | }; 8 | 9 | // Check NaN 10 | if ( isNaN(value) ) return; 11 | 12 | // Get current datetime with seconds rounded to tens 13 | var datetime = new Date(new Date().getTime() - new Date().getTimezoneOffset()*60*1000); 14 | var secondsRounded = parseInt(datetime.getSeconds() / 10) * 10; 15 | datetime.setSeconds(secondsRounded); 16 | 17 | // Set '#beginning' to current time 18 | $('#datetime-end').val( datetime.toISOString().substr(0, 19).replace('T', ' ') ); 19 | 20 | // Set '#end' to current time subtract given value 21 | datetime.setHours(datetime.getHours() - parseInt(value)); 22 | $('#datetime-beginning').val( datetime.toISOString().substr(0, 19).replace('T', ' ') ); 23 | } 24 | 25 | // Set default interval to 2 hours 26 | $(window).load(setInterval(2)); 27 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/custom/default.js: -------------------------------------------------------------------------------- 1 | // Generate a chart and set it to the given div 2 | function generateSumChart(data) { 3 | // Elements ID 4 | var chartId = 'chart-sum'; 5 | var chartIdStatus = chartId + '-status'; 6 | 7 | // Hide status element 8 | $('#' + chartIdStatus).hide(); 9 | // Show chart element 10 | $('#' + chartId).show(); 11 | 12 | // ZingChart configuration 13 | var myConfig = { 14 | type: "bar", 15 | backgroundColor:'#fff', 16 | legend:{ 17 | align: 'center', 18 | verticalAlign: 'bottom', 19 | backgroundColor:'none', 20 | borderWidth: 0, 21 | item:{ 22 | fontColor:'#444444', 23 | cursor: 'hand' 24 | }, 25 | marker:{ 26 | type:'circle', 27 | borderWidth: 0, 28 | cursor: 'hand' 29 | }, 30 | toggleAction: 'remove' 31 | }, 32 | plotarea:{ 33 | margin: '15px' 34 | }, 35 | plot:{ 36 | valueBox:{ 37 | visible: true, 38 | thousandsSeparator: ',' 39 | }, 40 | animation:{ 41 | effect: 4, 42 | speed: 1 43 | }, 44 | barWidth: '66%' 45 | }, 46 | scaleX:{ 47 | label:{ 48 | visible: false 49 | } 50 | }, 51 | scaleY:{ 52 | visible: false, 53 | progression: 'log', 54 | logBase: 10, 55 | minValue: 0 56 | }, 57 | tooltip:{ 58 | visible: false, 59 | }, 60 | csv:{ 61 | dataString: data, 62 | rowSeparator: ';', 63 | separator: ',', 64 | verticalLabels: true 65 | } 66 | }; 67 | 68 | // Render ZingChart with width based on the whole panel 69 | zingchart.render({ 70 | id: chartId, 71 | data: myConfig, 72 | height: $('#network-statistics').height() - 60, 73 | width: $('#network-statistics').width(), 74 | }); 75 | }; 76 | 77 | 78 | // Obtain chart data and generate chart 79 | function loadSumChart() { 80 | // Elements ID 81 | var chartId = '#chart-sum'; 82 | var chartIdStatus = chartId + '-status'; 83 | 84 | // Hide chart element 85 | $(chartId).hide(); 86 | // Show status element 87 | $(chartIdStatus).show(); 88 | 89 | // Set loading status 90 | $(chartIdStatus).html( 91 | '\ 92 | Loading...' 93 | ) 94 | 95 | // Get Elasticsearch data 96 | $.ajax({ 97 | async: true, 98 | type: 'GET', 99 | url: './get-summary-statistics', 100 | success: function(raw) { 101 | var response = jQuery.parseJSON(raw); 102 | if (response.status == "Ok") { 103 | // Replace separator ';' to new line to create a CSV string and generate a chart 104 | generateSumChart(response.data); 105 | } else { 106 | // Show error message 107 | $(chartIdStatus).html( 108 | '\ 109 | ' + response.status + ': ' + response.data + '' 110 | ) 111 | } 112 | } 113 | }); 114 | }; 115 | 116 | 117 | // Load all charts when page loaded 118 | $(window).load(loadSumChart()); 119 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/template/custom.js: -------------------------------------------------------------------------------- 1 | $(function() { 2 | $('#side-menu').metisMenu(); 3 | }); 4 | 5 | //Loads the correct sidebar on window load, 6 | //collapses the sidebar on window resize. 7 | // Sets the min-height of #page-wrapper to window size 8 | $(function() { 9 | $(window).bind("load resize", function() { 10 | topOffset = 50; 11 | width = (this.window.innerWidth > 0) ? this.window.innerWidth : this.screen.width; 12 | if (width < 768) { 13 | $('div.navbar-collapse').addClass('collapse'); 14 | topOffset = 100; // 2-row-menu 15 | } else { 16 | $('div.navbar-collapse').removeClass('collapse'); 17 | } 18 | 19 | height = ((this.window.innerHeight > 0) ? this.window.innerHeight : this.screen.height) - 1; 20 | height = height - topOffset; 21 | if (height < 1) height = 1; 22 | if (height > topOffset) { 23 | $("#page-wrapper").css("min-height", (height) + "px"); 24 | } 25 | }); 26 | 27 | var url = window.location; 28 | var element = $('ul.nav a').filter(function() { 29 | return this.href == url || url.href.indexOf(this.href) == 0; 30 | }).addClass('active').parent().parent().addClass('in').parent(); 31 | if (element.is('li')) { 32 | element.addClass('active'); 33 | } 34 | }); 35 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/template/scripts.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | "use strict"; 3 | 4 | // custom scrollbar 5 | 6 | $("html").niceScroll({styler:"fb",cursorcolor:"#82BFBF", cursorwidth: '6', cursorborderradius: '10px', background: '#373f47', spacebarenabled:false, cursorborder: '0', zindex: '1000'}); 7 | 8 | $(".scrollbar1").niceScroll({styler:"fb",cursorcolor:"rgba(97, 100, 193, 0.78)", cursorwidth: '6', cursorborderradius: '0',autohidemode: 'false', background: '#F1F1F1', spacebarenabled:false, cursorborder: '0'}); 9 | 10 | 11 | 12 | $(".scrollbar1").getNiceScroll(); 13 | if ($('body').hasClass('scrollbar1-collapsed')) { 14 | $(".scrollbar1").getNiceScroll().hide(); 15 | } 16 | 17 | })(jQuery); 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/static/js/template/site.js: -------------------------------------------------------------------------------- 1 | // call this from the developer console and you can control both instances 2 | var calendars = {}; 3 | 4 | $(document).ready( function() { 5 | 6 | // assuming you've got the appropriate language files, 7 | // clndr will respect whatever moment's language is set to. 8 | // moment.lang('ru'); 9 | 10 | // here's some magic to make sure the dates are happening this month. 11 | var thisMonth = moment().format('YYYY-MM'); 12 | 13 | var eventArray = [ 14 | { startDate: thisMonth + '-10', endDate: thisMonth + '-14', title: 'Multi-Day Event' }, 15 | { startDate: thisMonth + '-23', endDate: thisMonth + '-26', title: 'Another Multi-Day Event' } 16 | ]; 17 | 18 | // the order of the click handlers is predictable. 19 | // direct click action callbacks come first: click, nextMonth, previousMonth, nextYear, previousYear, or today. 20 | // then onMonthChange (if the month changed). 21 | // finally onYearChange (if the year changed). 22 | 23 | calendars.clndr1 = $('.cal1').clndr({ 24 | events: eventArray, 25 | // constraints: { 26 | // startDate: '2013-11-01', 27 | // endDate: '2013-11-15' 28 | // }, 29 | clickEvents: { 30 | click: function(target) { 31 | console.log(target); 32 | if($(target.element).hasClass('inactive')) { 33 | console.log('not a valid datepicker date.'); 34 | } else { 35 | console.log('VALID datepicker date.'); 36 | } 37 | }, 38 | nextMonth: function() { 39 | console.log('next month.'); 40 | }, 41 | previousMonth: function() { 42 | console.log('previous month.'); 43 | }, 44 | onMonthChange: function() { 45 | console.log('month changed.'); 46 | }, 47 | nextYear: function() { 48 | console.log('next year.'); 49 | }, 50 | previousYear: function() { 51 | console.log('previous year.'); 52 | }, 53 | onYearChange: function() { 54 | console.log('year changed.'); 55 | } 56 | }, 57 | multiDayEvents: { 58 | startDate: 'startDate', 59 | endDate: 'endDate' 60 | }, 61 | showAdjacentMonths: true, 62 | adjacentDaysChangeMonth: false 63 | }); 64 | 65 | // calendars.clndr2 = $('.cal2').clndr({ 66 | // template: $('#template-calendar').html(), 67 | // events: eventArray, 68 | // startWithMonth: moment().add('month', 1), 69 | // clickEvents: { 70 | // click: function(target) { 71 | // console.log(target); 72 | // } 73 | // } 74 | // }); 75 | 76 | // bind both clndrs to the left and right arrow keys 77 | $(document).keydown( function(e) { 78 | if(e.keyCode == 37) { 79 | // left arrow 80 | calendars.clndr1.back(); 81 | calendars.clndr2.back(); 82 | } 83 | if(e.keyCode == 39) { 84 | // right arrow 85 | calendars.clndr1.forward(); 86 | calendars.clndr2.forward(); 87 | } 88 | }); 89 | 90 | }); -------------------------------------------------------------------------------- /web-interface/Stream4Flow/views/default/index.html: -------------------------------------------------------------------------------- 1 | {{extend 'layout.html'}} 2 | 3 | 7 | 8 | {{if ('alert_type' in globals()) and alert_type != "":}} 9 | 18 | {{pass}} 19 | 20 |
    21 | 22 |
    23 |
    24 |

    Current Network Statistics

    25 | 26 |
    27 | 28 |
    29 |
    30 |
    31 | 32 | 33 |
    34 |
    35 |

    Framework Architecture

    36 | 37 | 38 | 39 |

    40 | The basis of the Stream4Flow framework is formed by the 41 | IPFIXCol collector, which 42 | enables incoming IP flow records to be transformed into the JSON format provided to the 43 | Kafka messaging system. The selection of Kafka 44 | was based on its scalability and partitioning possibilities, which provide sufficient data throughput. 45 | Apache Spark was selected as the data 46 | stream processing framework for its quick IP flow data throughput, available programming languages 47 | (Scala, Java, or Python) and MapReduce programming model. The analysis results are stored in 48 | Elastic Stack containing Kibana, which enables 49 | browsing and visualizing the results. The Stream4Flow framework also contains an additional web 50 | interface in order to make administration easier and visualize complex results of the analysis. 51 |

    52 |
    53 |
    54 |
    55 | 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/views/error.html: -------------------------------------------------------------------------------- 1 | {{extend 'layout.html'}} 2 | 3 | {{if ('alert_type' in globals()) and alert_type != "":}} 4 | 13 | {{pass}} -------------------------------------------------------------------------------- /web-interface/Stream4Flow/views/logo.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/views/menu.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /web-interface/Stream4Flow/views/menu/protocols_statistics.html: -------------------------------------------------------------------------------- 1 | 2 |
  • 3 | Protocols Statistics 4 |
  • -------------------------------------------------------------------------------- /web-interface/Stream4Flow/views/system/about.html: -------------------------------------------------------------------------------- 1 | {{extend 'layout.html'}} 2 | 3 | 7 | 8 |

    About

    9 | 10 |
    11 |
    12 |
    13 |
    14 |

    Acknowledgement

    15 |
    16 |
    17 | Development of this system and corresponding research was supported by the Technology Agency of the Czech Republic under No. TA04010062 Technology for processing and analysis of network data in big data concept. 18 |
    19 |
    20 |
    21 |
    22 |
    23 | -------------------------------------------------------------------------------- /web-interface/routes.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ---------------------------------------------------------------------------------------------------------------------- 4 | # This is an app-specific example router 5 | # 6 | # This simple router is used for setting languages from app/languages directory 7 | # as a part of the application path: app//controller/function 8 | # Language from default.py or 'en' (if the file is not found) is used as 9 | # a default_language 10 | # 11 | # See /examples/routes.parametric.example.py for parameter's detail 12 | # ---------------------------------------------------------------------------------------------------------------------- 13 | 14 | # ---------------------------------------------------------------------------------------------------------------------- 15 | # To enable this route file you must do the steps: 16 | # 1. rename /examples/routes.parametric.example.py to routes.py 17 | # 2. rename this APP/routes.example.py to APP/routes.py (where APP - is your application directory) 18 | # 3. restart web2py (or reload routes in web2py admin interface) 19 | # 20 | # YOU CAN COPY THIS FILE TO ANY APPLICATION'S ROOT DIRECTORY WITHOUT CHANGES! 21 | # ---------------------------------------------------------------------------------------------------------------------- 22 | 23 | from gluon.fileutils import abspath 24 | from gluon.languages import read_possible_languages 25 | 26 | # ---------------------------------------------------------------------------------------------------------------------- 27 | # NOTE! app - is an application based router's parameter with name of an application. E.g.'welcome' 28 | # ---------------------------------------------------------------------------------------------------------------------- 29 | 30 | routers = dict( 31 | BASE=dict( 32 | default_application='Stream4Flow', 33 | map_hyphen=True 34 | ) 35 | ) 36 | 37 | # ---------------------------------------------------------------------------------------------------------------------- 38 | # NOTE! To change language in your application using these rules add this line in one of your models files: 39 | # ---------------------------------------------------------------------------------------------------------------------- 40 | # if request.uri_language: T.force(request.uri_language) 41 | --------------------------------------------------------------------------------