├── .gitignore ├── .travis.yml ├── spec └── codecs │ ├── ipfix.dat │ ├── netflow5.dat │ ├── ipfix_test_mikrotik_tpl.dat │ ├── ipfix_test_yaf_tpl45841.dat │ ├── netflow5_test_invalid01.dat │ ├── netflow5_test_invalid02.dat │ ├── netflow5_test_microtik.dat │ ├── netflow9_test_invalid01.dat │ ├── netflow9_test_valid01.dat │ ├── ipfix_test_barracuda_tpl.dat │ ├── ipfix_test_netscaler_data.dat │ ├── ipfix_test_netscaler_tpl.dat │ ├── ipfix_test_nokia_bras_tpl.dat │ ├── ipfix_test_viptela_tpl257.dat │ ├── ipfix_test_vmware_vds_tpl.dat │ ├── ipfix_test_yaf_data45841.dat │ ├── ipfix_test_yaf_data45873.dat │ ├── ipfix_test_yaf_data53248.dat │ ├── netflow9_test_h3c_tpl3281.dat │ ├── netflow9_test_nprobe_data.dat │ ├── netflow9_test_nprobe_dpi.dat │ ├── netflow9_test_nprobe_tpl.dat │ ├── ipfix_test_barracuda_data256.dat │ ├── ipfix_test_ixia_tpldata256.dat │ ├── ipfix_test_ixia_tpldata271.dat │ ├── ipfix_test_mikrotik_data258.dat │ ├── ipfix_test_mikrotik_data259.dat │ ├── ipfix_test_openbsd_pflow_tpl.dat │ ├── ipfix_test_procera_data52935.dat │ ├── ipfix_test_procera_tpl52935.dat │ ├── ipfix_test_viptela_data257.dat │ ├── netflow5_test_juniper_mx80.dat │ ├── netflow9_test_cisco_1941K9.dat │ ├── netflow9_test_cisco_wlc_tpl.dat │ ├── netflow9_test_h3c_data3281.dat │ ├── netflow9_test_macaddr_data.dat │ ├── ipfix_test_nokia_bras_data256.dat │ ├── ipfix_test_openbsd_pflow_data.dat │ ├── ipfix_test_vmware_vds_data264.dat │ ├── ipfix_test_vmware_vds_data266.dat │ ├── ipfix_test_yaf_tpls_option_tpl.dat │ ├── netflow9_cisco_asr1001x_tpl259.dat │ ├── netflow9_test_cisco_asa_1_data.dat │ ├── netflow9_test_cisco_asa_1_tpl.dat │ ├── netflow9_test_cisco_asa_2_data.dat │ ├── netflow9_test_macaddr_tpl.dat │ ├── ipfix_test_vmware_vds_data266_267.dat │ ├── netflow9_test_cisco_aci_data256.dat │ ├── netflow9_test_cisco_asa_2_tpl_26x.dat │ ├── netflow9_test_cisco_asa_2_tpl_27x.dat │ ├── netflow9_test_cisco_asr9k_data256.dat │ ├── netflow9_test_cisco_asr9k_data260.dat │ ├── netflow9_test_cisco_asr9k_tpl260.dat │ ├── netflow9_test_cisco_asr9k_tpl266.dat │ ├── netflow9_test_cisco_nbar_data262.dat │ ├── netflow9_test_cisco_nbar_tpl262.dat │ ├── netflow9_test_cisco_wlc_data261.dat │ ├── netflow9_test_juniper_srx_tplopt.dat │ ├── netflow9_test_paloalto_panos_data.dat │ ├── netflow9_test_paloalto_panos_tpl.dat │ ├── netflow9_test_softflowd_tpl_data.dat │ ├── netflow9_test_ubnt_edgerouter_tpl.dat │ ├── netflow9_test_cisco_aci_tpl256-258.dat │ ├── netflow9_test_cisco_asr9k_opttpl256.dat │ ├── netflow9_test_cisco_asr9k_opttpl257.dat │ ├── netflow9_test_cisco_asr9k_opttpl334.dat │ ├── netflow9_test_cisco_nbar_opttpl260.dat │ ├── netflow9_test_huawei_netstream_data.dat │ ├── netflow9_test_huawei_netstream_tpl.dat │ ├── netflow9_test_0length_fields_tpl_data.dat │ ├── netflow9_test_cisco_wlc_8510_tpl_262.dat │ ├── netflow9_test_paloalto_81_tpl256-263.dat │ ├── netflow9_test_streamcore_tpl_data256.dat │ ├── netflow9_test_streamcore_tpl_data260.dat │ ├── netflow9_test_ubnt_edgerouter_data1024.dat │ ├── netflow9_test_ubnt_edgerouter_data1025.dat │ ├── netflow9_test_unknown_tpl266_292_data.dat │ ├── netflow9_test_field_layer2segmentid_data.dat │ ├── netflow9_test_field_layer2segmentid_tpl.dat │ ├── netflow9_test_fortigate_fortios_521_tpl.dat │ ├── netflow9_test_fortigate_fortios_521_data256.dat │ ├── netflow9_test_fortigate_fortios_521_data257.dat │ ├── ipfix_test_barracuda_extended_uniflow_data256.dat │ ├── ipfix_test_barracuda_extended_uniflow_tpl256.dat │ ├── ipfix_test_juniper_mx240_junos151r6s3_data512.dat │ ├── netflow9_test_h3c_netstream_varstring_tpl3281.dat │ ├── ipfix_test_juniper_mx240_junos151r6s3_opttpl512.dat │ ├── netflow9_test_h3c_netstream_varstring_data3281.dat │ ├── netflow9_test_fortigate_fortios_542_appid_data258_262.dat │ ├── netflow9_test_fortigate_fortios_542_appid_tpl258-269.dat │ ├── netflow9_test_iptnetflow_reduced_size_encoding_tpldata260.dat │ ├── netflow9_test_paloalto_81_data257_1flowset_in_large_zerofilled_packet.dat │ ├── benchmarks │ ├── IPAddr.rb │ ├── MacAddr.rb │ ├── ACLidASA.rb │ ├── IP6Addr.rb │ ├── benchmark_fields.rb │ ├── flowStartMilliseconds.rb │ ├── netflow_bench_cisco_asr.py │ ├── netflow_bench_cisco_asa.py │ └── ipfix_bench_sonicwall.py │ ├── ipfix_stress.py │ └── netflow_stress.py ├── Rakefile ├── NOTICE.TXT ├── .github ├── PULL_REQUEST_TEMPLATE.md ├── ISSUE_TEMPLATE.md └── CONTRIBUTING.md ├── Gemfile ├── logstash-codec-netflow.gemspec ├── CONTRIBUTORS ├── lib └── logstash │ └── codecs │ ├── netflow │ ├── iana2yaml.rb │ ├── util.rb │ └── netflow.yaml │ └── netflow.rb ├── README.md ├── RFC_COMPLIANCE_NETFLOW_v9.md ├── CHANGELOG.md ├── docs └── index.asciidoc ├── LICENSE └── RFC_COMPLIANCE_IPFIX.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.gem 2 | Gemfile.lock 3 | .bundle 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | import: 2 | - logstash-plugins/.ci:travis/travis.yml@1.x -------------------------------------------------------------------------------- /spec/codecs/ipfix.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix.dat -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | @files=[] 2 | 3 | task :default do 4 | system("rake -T") 5 | end 6 | 7 | require "logstash/devutils/rake" 8 | -------------------------------------------------------------------------------- /spec/codecs/netflow5.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow5.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_mikrotik_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_mikrotik_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_yaf_tpl45841.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_yaf_tpl45841.dat -------------------------------------------------------------------------------- /spec/codecs/netflow5_test_invalid01.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow5_test_invalid01.dat -------------------------------------------------------------------------------- /spec/codecs/netflow5_test_invalid02.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow5_test_invalid02.dat -------------------------------------------------------------------------------- /spec/codecs/netflow5_test_microtik.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow5_test_microtik.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_invalid01.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_invalid01.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_valid01.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_valid01.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_barracuda_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_barracuda_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_netscaler_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_netscaler_data.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_netscaler_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_netscaler_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_nokia_bras_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_nokia_bras_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_viptela_tpl257.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_viptela_tpl257.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_vmware_vds_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_vmware_vds_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_yaf_data45841.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_yaf_data45841.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_yaf_data45873.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_yaf_data45873.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_yaf_data53248.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_yaf_data53248.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_h3c_tpl3281.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_h3c_tpl3281.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_nprobe_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_nprobe_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_nprobe_dpi.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_nprobe_dpi.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_nprobe_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_nprobe_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_barracuda_data256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_barracuda_data256.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_ixia_tpldata256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_ixia_tpldata256.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_ixia_tpldata271.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_ixia_tpldata271.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_mikrotik_data258.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_mikrotik_data258.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_mikrotik_data259.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_mikrotik_data259.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_openbsd_pflow_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_openbsd_pflow_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_procera_data52935.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_procera_data52935.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_procera_tpl52935.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_procera_tpl52935.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_viptela_data257.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_viptela_data257.dat -------------------------------------------------------------------------------- /spec/codecs/netflow5_test_juniper_mx80.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow5_test_juniper_mx80.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_1941K9.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_1941K9.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_wlc_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_wlc_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_h3c_data3281.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_h3c_data3281.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_macaddr_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_macaddr_data.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_nokia_bras_data256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_nokia_bras_data256.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_openbsd_pflow_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_openbsd_pflow_data.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_vmware_vds_data264.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_vmware_vds_data264.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_vmware_vds_data266.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_vmware_vds_data266.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_yaf_tpls_option_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_yaf_tpls_option_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_cisco_asr1001x_tpl259.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_cisco_asr1001x_tpl259.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asa_1_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asa_1_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asa_1_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asa_1_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asa_2_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asa_2_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_macaddr_tpl.dat: -------------------------------------------------------------------------------- 1 | uHVЀaD  8P 8P*) -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_vmware_vds_data266_267.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_vmware_vds_data266_267.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_aci_data256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_aci_data256.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asa_2_tpl_26x.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asa_2_tpl_26x.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asa_2_tpl_27x.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asa_2_tpl_27x.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asr9k_data256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asr9k_data256.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asr9k_data260.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asr9k_data260.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asr9k_tpl260.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asr9k_tpl260.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asr9k_tpl266.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asr9k_tpl266.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_nbar_data262.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_nbar_data262.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_nbar_tpl262.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_nbar_tpl262.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_wlc_data261.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_wlc_data261.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_juniper_srx_tplopt.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_juniper_srx_tplopt.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_paloalto_panos_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_paloalto_panos_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_paloalto_panos_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_paloalto_panos_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_softflowd_tpl_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_softflowd_tpl_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_ubnt_edgerouter_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_ubnt_edgerouter_tpl.dat -------------------------------------------------------------------------------- /NOTICE.TXT: -------------------------------------------------------------------------------- 1 | Elasticsearch 2 | Copyright 2012-2015 Elasticsearch 3 | 4 | This product includes software developed by The Apache Software 5 | Foundation (http://www.apache.org/). -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_aci_tpl256-258.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_aci_tpl256-258.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asr9k_opttpl256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asr9k_opttpl256.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asr9k_opttpl257.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asr9k_opttpl257.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_asr9k_opttpl334.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_asr9k_opttpl334.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_nbar_opttpl260.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_nbar_opttpl260.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_huawei_netstream_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_huawei_netstream_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_huawei_netstream_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_huawei_netstream_tpl.dat -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Thanks for contributing to Logstash! If you haven't already signed our CLA, here's a handy link: https://www.elastic.co/contributor-agreement/ 2 | -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_0length_fields_tpl_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_0length_fields_tpl_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_cisco_wlc_8510_tpl_262.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_cisco_wlc_8510_tpl_262.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_paloalto_81_tpl256-263.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_paloalto_81_tpl256-263.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_streamcore_tpl_data256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_streamcore_tpl_data256.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_streamcore_tpl_data260.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_streamcore_tpl_data260.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_ubnt_edgerouter_data1024.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_ubnt_edgerouter_data1024.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_ubnt_edgerouter_data1025.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_ubnt_edgerouter_data1025.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_unknown_tpl266_292_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_unknown_tpl266_292_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_field_layer2segmentid_data.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_field_layer2segmentid_data.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_field_layer2segmentid_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_field_layer2segmentid_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_fortigate_fortios_521_tpl.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_fortigate_fortios_521_tpl.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_fortigate_fortios_521_data256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_fortigate_fortios_521_data256.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_fortigate_fortios_521_data257.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_fortigate_fortios_521_data257.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_barracuda_extended_uniflow_data256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_barracuda_extended_uniflow_data256.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_barracuda_extended_uniflow_tpl256.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_barracuda_extended_uniflow_tpl256.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_juniper_mx240_junos151r6s3_data512.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_juniper_mx240_junos151r6s3_data512.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_h3c_netstream_varstring_tpl3281.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_h3c_netstream_varstring_tpl3281.dat -------------------------------------------------------------------------------- /spec/codecs/ipfix_test_juniper_mx240_junos151r6s3_opttpl512.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/ipfix_test_juniper_mx240_junos151r6s3_opttpl512.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_h3c_netstream_varstring_data3281.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_h3c_netstream_varstring_data3281.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_fortigate_fortios_542_appid_data258_262.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_fortigate_fortios_542_appid_data258_262.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_fortigate_fortios_542_appid_tpl258-269.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_fortigate_fortios_542_appid_tpl258-269.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_iptnetflow_reduced_size_encoding_tpldata260.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_iptnetflow_reduced_size_encoding_tpldata260.dat -------------------------------------------------------------------------------- /spec/codecs/netflow9_test_paloalto_81_data257_1flowset_in_large_zerofilled_packet.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/logstash-plugins/logstash-codec-netflow/HEAD/spec/codecs/netflow9_test_paloalto_81_data257_1flowset_in_large_zerofilled_packet.dat -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gemspec 4 | 5 | logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash" 6 | use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1" 7 | 8 | if Dir.exist?(logstash_path) && use_logstash_source 9 | gem 'logstash-core', :path => "#{logstash_path}/logstash-core" 10 | gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api" 11 | end 12 | 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Please post all product and debugging questions on our [forum](https://discuss.elastic.co/c/logstash). Your questions will reach our wider community members there, and if we confirm that there is a bug, then we can open a new issue here. 2 | 3 | For all general issues, please provide the following details for fast resolution: 4 | 5 | - Version: 6 | - Operating System: 7 | - Config File (if you have sensitive info, please remove it): 8 | - Sample Data: 9 | - Steps to Reproduce: 10 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/IPAddr.rb: -------------------------------------------------------------------------------- 1 | require 'benchmark' 2 | require 'ipaddr' 3 | 4 | Benchmark.bm do |x| 5 | x.report { 6 | # Implementation pre v3.11.0 7 | ip = 3232235521 8 | 2000000.times do 9 | IPAddr.new_ntoh([ip].pack('N')).to_s 10 | end } 11 | 12 | x.report { 13 | # Implementation as of v3.11.2 14 | ip = 3232235521 15 | 2000000.times do 16 | [ip].pack('N').unpack('C4').join('.') 17 | end } 18 | 19 | x.report { 20 | ip = 3232235521 21 | 2000000.times do 22 | b = "%08x" % ip 23 | "%d.%d.%d.%d" % [b[0..1].to_i(16), b[2..3].to_i(16), b[4..5].to_i(16), b[6..7].to_i(16)] 24 | end } 25 | 26 | end 27 | 28 | # user system total real 29 | # 21.330000 0.000000 21.330000 ( 21.348559) 30 | # 4.410000 0.000000 4.410000 ( 4.411973) 31 | # 6.450000 0.000000 6.450000 ( 6.446321) 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/MacAddr.rb: -------------------------------------------------------------------------------- 1 | require 'benchmark' 2 | 3 | Benchmark.bm do |x| 4 | x.report { 5 | # Implementation pre v3.11.0 6 | bytes=[41, 41, 41, 41, 41, 41] 7 | 2000000.times do 8 | bytes.collect { |byte| 9 | unless byte.nil? 10 | byte.to_s(16).rjust(2,'0') 11 | end 12 | }.join(":") 13 | end } 14 | 15 | x.report { 16 | # Implementation as of v3.11.1 17 | bytes='AAAAAA' 18 | 2000000.times do 19 | b = bytes.unpack('H*')[0] 20 | b.scan(/../).collect { |byte| byte }.join(":") 21 | end } 22 | 23 | x.report { 24 | bytes='AAAAAA' 25 | 2000000.times do 26 | b = bytes.unpack('H*')[0] 27 | b[0..1] + ":" + b[2..3] + ":" + b[4..5] + ":" + b[6..7] + ":" + b[8..9] + ":" + b[10..11] 28 | end } 29 | end 30 | 31 | # user system total real 32 | # 8.400000 0.000000 8.400000 ( 8.408549) 33 | # 10.960000 0.000000 10.960000 ( 10.959357) 34 | # 5.600000 0.000000 5.600000 ( 5.597817) 35 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/ACLidASA.rb: -------------------------------------------------------------------------------- 1 | require 'benchmark' 2 | 3 | Benchmark.bm do |x| 4 | x.report { 5 | # Implementation pre v3.11.0 6 | bytes=[41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41] 7 | 2000000.times do 8 | b = bytes.collect { |byte| 9 | unless byte.nil? 10 | byte.to_s(16).rjust(2,'0') 11 | end 12 | }.join 13 | b.scan(/......../).collect { |aclid| aclid }.join("-") 14 | end } 15 | 16 | x.report { 17 | # Implementation as of v3.11.1 18 | bytes='AAAAAAAAAAAA' 19 | 2000000.times do 20 | b = bytes.unpack('H*')[0] 21 | b.scan(/......../).collect { |aclid| aclid }.join("-") 22 | end } 23 | 24 | x.report { 25 | # Implementation as of v3.11.2 26 | bytes='AAAAAAAAAAAA' 27 | 2000000.times do 28 | b = bytes.unpack('H*')[0] 29 | b[0..7] + "-" + b[8..15] + "-" + b[16..23] 30 | end } 31 | end 32 | 33 | # user system total real 34 | # 19.710000 0.000000 19.710000 ( 19.717288) 35 | # 7.000000 0.000000 7.000000 ( 7.003011) 36 | # 3.500000 0.000000 3.500000 ( 3.501547) 37 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/IP6Addr.rb: -------------------------------------------------------------------------------- 1 | require 'benchmark' 2 | require 'ipaddr' 3 | require 'bindata' 4 | 5 | Benchmark.bm do |x| 6 | x.report { 7 | # Implementation since v0.1 8 | ip = 85060308944708794891899627827609206785 9 | 2000000.times do 10 | IPAddr.new_ntoh((0..7).map { |i| 11 | (ip >> (112 - 16 * i)) & 0xffff 12 | }.pack('n8')).to_s 13 | end } 14 | 15 | x.report { 16 | # Implementation since v4.2.0 17 | ip = 85060308944708794891899627827609206785 18 | 2000000.times do 19 | b = "%032x" % ip 20 | c = b[0..3] + ":" + b[4..7] + ":" + b[8..11] + ":" + b[12..15] + ":" + b[16..19] + ":" + b[20..23] + ":" + b[24..27] + ":" + b[28..31] 21 | IPAddr.new(c).to_s 22 | end } 23 | 24 | x.report { 25 | # Alternative. Loses compressed IPv6 notation 26 | ip = 85060308944708794891899627827609206785 27 | 2000000.times do 28 | b = "%032x" % ip 29 | b[0..3] + ":" + b[4..7] + ":" + b[8..11] + ":" + b[12..15] + ":" + b[16..19] + ":" + b[20..23] + ":" + b[24..27] + ":" + b[28..31] 30 | end } 31 | 32 | end 33 | 34 | # user system total real 35 | # 81.500000 0.000000 81.500000 ( 81.498991) 36 | # 78.210000 0.000000 78.210000 ( 78.252662) 37 | # 11.710000 0.010000 11.720000 ( 11.712025) 38 | 39 | -------------------------------------------------------------------------------- /logstash-codec-netflow.gemspec: -------------------------------------------------------------------------------- 1 | Gem::Specification.new do |s| 2 | 3 | s.name = 'logstash-codec-netflow' 4 | s.version = '4.3.2' 5 | s.licenses = ['Apache License (2.0)'] 6 | s.summary = "Reads Netflow v5, Netflow v9 and IPFIX data" 7 | s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program" 8 | s.authors = ["Elastic"] 9 | s.email = 'info@elastic.co' 10 | s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html" 11 | s.require_paths = ["lib"] 12 | 13 | # Files 14 | s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"] 15 | 16 | # Tests 17 | s.test_files = s.files.grep(%r{^(test|spec|features)/}) 18 | 19 | # Special flag to let us know this is actually a logstash plugin 20 | s.metadata = { "logstash_plugin" => "true", "logstash_group" => "codec" } 21 | 22 | # Gem dependencies 23 | s.add_runtime_dependency "logstash-core-plugin-api", "~> 2.0" 24 | s.add_runtime_dependency 'logstash-mixin-event_support', '~> 1.0' 25 | 26 | s.add_runtime_dependency 'bindata', ['>= 1.5.0'] 27 | s.add_development_dependency 'logstash-devutils', ['>= 1.0.0'] 28 | end 29 | 30 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | The following is a list of people who have contributed ideas, code, bug 2 | reports, or in general have helped logstash along its way. 3 | 4 | Contributors: 5 | * Aaron Mildenstein (untergeek) 6 | * Adam Kaminski (thimslugga) 7 | * Ana (janniten) 8 | * Andrew Cholakian (andrewvc) 9 | * Ayden Beeson (abeeson) 10 | * Bjørn Ruberg (bruberg) 11 | * Colin Surprenant (colinsurprenant) 12 | * Daniel Nägele (analogbyte) 13 | * Dan Hermann (danhermann) 14 | * Diyaldine Maoulida 15 | * Evgeniy Sudyr (ejectck) 16 | * G.J. Moed (gjmoed) 17 | * Gmoz Shih 18 | * Jason Liu (JasonLZJ) 19 | * James Park-Watt (jimmypw) 20 | * Jason Keller (jasonkeller) 21 | * Jayme Johnston 22 | * Jeremy Foran (jeremyforan) 23 | * Jordan Sissel (jordansissel) 24 | * Jorrit Folmer (jorritfolmer) 25 | * Keenan Tims (ktims) 26 | * Magnus Kessler (kesslerm) 27 | * Marian Craciunescu (marian-craciunescu) 28 | * Matt Dainty (bodgit) 29 | * Max Caines (maxcaines) 30 | * Paul Warren (pwarren) 31 | * Pedro de Oliveira 32 | * Philipp Kahr 33 | * Philippe Veys 34 | * Pier-Hugues Pellerin (ph) 35 | * Pulkit Agrawal (propulkit) 36 | * Raju Nair (rajutech76) 37 | * Richard Pijnenburg (electrical) 38 | * Rob Cowart (robcowart) 39 | * Ry Biesemeyer (yaauie) 40 | * Salvador Ferrer (salva-ferrer) 41 | * Vishal Solanki 42 | * Will Rigby (wrigby) 43 | * Yehonatan Devorkin (Devorkin) 44 | * Rojuinex 45 | * Sjaak01 46 | * debadair 47 | * HenryTheSir 48 | * hkshirish 49 | * hhindlem 50 | * niempy 51 | * jstopinsek 52 | * sliddjur 53 | * szhong12 54 | * zwirk 55 | 56 | Maintainer: 57 | * - 58 | 59 | Note: If you've sent us patches, bug reports, or otherwise contributed to 60 | Logstash, and you aren't on the list above and want to be, please let us know 61 | and we'll make sure you're here. Contributions from folks like you are what make 62 | open source awesome. 63 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/benchmark_fields.rb: -------------------------------------------------------------------------------- 1 | require 'benchmark' 2 | require 'bindata' 3 | require '../../../lib/logstash/codecs/netflow/util.rb' 4 | 5 | Benchmark.bm(16) do |x| 6 | x.report("IP4Addr") { 7 | data = ["344c01f9"].pack("H*") 8 | 200000.times do 9 | IP4Addr.read(data) 10 | end } 11 | 12 | x.report("IP6Addr") { 13 | data = ["fe80000000000000e68d8cfffe20ede6"].pack("H*") 14 | 200000.times do 15 | IP6Addr.read(data) 16 | end } 17 | 18 | x.report("IP6Addr_Test") { 19 | data = ["fe80000000000000e68d8cfffe20ede6"].pack("H*") 20 | 200000.times do 21 | IP6Addr_Test.read(data) 22 | end } 23 | 24 | x.report("MacAddr") { 25 | data = ["005056c00001"].pack("H*") 26 | 200000.times do 27 | MacAddr.read(data) 28 | end } 29 | 30 | x.report("ACLIdASA") { 31 | data = ["433a1af1be9efe9600000000"].pack("H*") 32 | 200000.times do 33 | ACLIdASA.read(data) 34 | end } 35 | 36 | x.report("Application_Id64") { 37 | data = ["140000304400003dc8"].pack("H*") 38 | 200000.times do 39 | Application_Id64.read(data) 40 | end } 41 | 42 | x.report("VarString") { 43 | data = ["184c534e34344031302e3233312e3232332e31313300000000"].pack("H*") 44 | 200000.times do 45 | VarString.read(data) 46 | end } 47 | 48 | x.report("VarString_Test") { 49 | data = ["184c534e34344031302e3233312e3232332e31313300000000"].pack("H*") 50 | 200000.times do 51 | VarString_Test.read(data) 52 | end } 53 | 54 | end 55 | 56 | # user system total real 57 | # IP4Addr 24.120000 0.000000 24.120000 ( 24.123782) 58 | # IP6Addr 37.940000 0.010000 37.950000 ( 37.950464) 59 | # MacAddr 25.270000 0.000000 25.270000 ( 25.282082) 60 | # ACLIdASA 24.870000 0.000000 24.870000 ( 24.882335) 61 | # Application_Id64 41.270000 0.000000 41.270000 ( 41.305001) 62 | # VarString 39.030000 0.000000 39.030000 ( 39.062235) 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/flowStartMilliseconds.rb: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | require 'benchmark' 3 | require 'bindata' 4 | 5 | 6 | Benchmark.bm do |x| 7 | x.report { 8 | # Original IPFIX version, simplified 9 | k = 'flowStartMilliseconds' 10 | data = '1000' 11 | v = BinData::String.new(:read_length => 4) 12 | v.read(data) 13 | 2000000.times do 14 | case k.to_s 15 | when /^flow(?:Start|End)Seconds$/ 16 | event = 'blah' 17 | when /^flow(?:Start|End)(Milli|Micro|Nano)seconds$/ 18 | case $1 19 | when 'Milli' 20 | event = v.snapshot.to_f / 1_000 21 | end 22 | end 23 | end } 24 | 25 | x.report { 26 | # Verion that omits v.snapshot, simplified 27 | k = 'flowStartMilliseconds' 28 | data = '1000' 29 | v = BinData::String.new(:read_length => 4) 30 | v.read(data) 31 | 2000000.times do 32 | case k.to_s 33 | when /^flow(?:Start|End)Seconds$/ 34 | event = 'blah' 35 | when /^flow(?:Start|End)(Milli|Micro|Nano)seconds$/ 36 | case $1 37 | when 'Milli' 38 | event = data.to_f / 1_000 39 | end 40 | end 41 | end } 42 | 43 | x.report { 44 | # Original Netflow9 version, simplified 45 | class MockFlowset < BinData::Record 46 | endian :little 47 | uint8 :uptime 48 | uint8 :unix_sec 49 | end 50 | SWITCHED = /_switched$/ 51 | data1 = 'AB' 52 | flowset = MockFlowset.read(data1) 53 | k = 'first_switched' 54 | v = 20 55 | 2000000.times do 56 | case k.to_s 57 | when SWITCHED 58 | millis = flowset.uptime - v 59 | seconds = flowset.unix_sec - (millis / 1000) 60 | # v9 did away with the nanosecs field 61 | micros = 1000000 - (millis % 1000) 62 | event = v 63 | else 64 | event = 'blah' 65 | end 66 | end } 67 | 68 | end 69 | 70 | # user system total real 71 | # 4.730000 0.000000 4.730000 ( 4.731333) 72 | # 2.400000 0.000000 2.400000 ( 2.401072) 73 | # 2.750000 0.000000 2.750000 ( 2.747525) 74 | -------------------------------------------------------------------------------- /lib/logstash/codecs/netflow/iana2yaml.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'open-uri' 4 | require 'csv' 5 | require 'yaml' 6 | 7 | # Convert IANA types to those used by BinData or created by ourselves 8 | def iana2bindata(type) 9 | case type 10 | when /^unsigned(\d+)$/ 11 | return 'uint' + $1 12 | when /^signed(\d+)$/ 13 | return 'int' + $1 14 | when 'float32' 15 | return 'float' 16 | when 'float64' 17 | return 'double' 18 | when 'ipv4Address' 19 | return 'ip4_addr' 20 | when 'ipv6Address' 21 | return 'ip6_addr' 22 | when 'macAddress' 23 | return 'mac_addr' 24 | when 'octetArray', 'string' 25 | return 'string' 26 | when 'dateTimeSeconds' 27 | return 'uint32' 28 | when 'dateTimeMilliseconds', 'dateTimeMicroseconds', 'dateTimeNanoseconds' 29 | return 'uint64' 30 | when 'boolean' 31 | return 'uint8' 32 | when 'basicList', 'subTemplateList', 'subTemplateMultiList' 33 | return 'skip' 34 | else 35 | raise "Unknown type #{type}" 36 | end 37 | end 38 | 39 | def iana2hash(url) 40 | fields = { 0 => {} } 41 | 42 | # Read in IANA-registered Information Elements (PEN 0) 43 | CSV.new(open(url), :headers => :first_row, :converters => :numeric).each do |line| 44 | # If it's not a Fixnum it's something like 'x-y' used to mark reserved blocks 45 | next if line['ElementID'].class != Fixnum 46 | 47 | # Blacklisted ID's 48 | next if [0].include?(line['ElementID']) 49 | 50 | # Skip any elements with no name 51 | next unless line['Name'] and line['Data Type'] 52 | 53 | fields[0][line['ElementID']] = [iana2bindata(line['Data Type']).to_sym] 54 | if fields[0][line['ElementID']][0] != :skip 55 | fields[0][line['ElementID']] << line['Name'].to_sym 56 | end 57 | end 58 | 59 | # Overrides 60 | fields[0][210][0] = :skip # 210 is PaddingOctets so skip them properly 61 | fields[0][210].delete_at(1) 62 | 63 | # Generate the reverse PEN (PEN 29305) 64 | reversed = fields[0].reject { |k| 65 | # Excluded according to RFC 5103 66 | [40,41,42,130,131,137,145,148,149,163,164,165,166,167,168,173,210,211,212,213,214,215,216,217,239].include?(k) 67 | }.map { |k,v| 68 | [k, v.size > 1 ? [v[0], ('reverse' + v[1].to_s.slice(0,1).capitalize + v[1].to_s.slice(1..-1)).to_sym] : [v[0]]] 69 | } 70 | fields[29305] = Hash[reversed] 71 | 72 | return fields 73 | end 74 | 75 | ipfix_fields = iana2hash('http://www.iana.org/assignments/ipfix/ipfix-information-elements.csv') 76 | 77 | puts YAML.dump(ipfix_fields) 78 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Logstash 2 | 3 | All contributions are welcome: ideas, patches, documentation, bug reports, 4 | complaints, etc! 5 | 6 | Programming is not a required skill, and there are many ways to help out! 7 | It is more important to us that you are able to contribute. 8 | 9 | That said, some basic guidelines, which you are free to ignore :) 10 | 11 | ## Want to learn? 12 | 13 | Want to lurk about and see what others are doing with Logstash? 14 | 15 | * The irc channel (#logstash on irc.freenode.org) is a good place for this 16 | * The [forum](https://discuss.elastic.co/c/logstash) is also 17 | great for learning from others. 18 | 19 | ## Got Questions? 20 | 21 | Have a problem you want Logstash to solve for you? 22 | 23 | * You can ask a question in the [forum](https://discuss.elastic.co/c/logstash) 24 | * Alternately, you are welcome to join the IRC channel #logstash on 25 | irc.freenode.org and ask for help there! 26 | 27 | ## Have an Idea or Feature Request? 28 | 29 | * File a ticket on [GitHub](https://github.com/elastic/logstash/issues). Please remember that GitHub is used only for issues and feature requests. If you have a general question, the [forum](https://discuss.elastic.co/c/logstash) or IRC would be the best place to ask. 30 | 31 | ## Something Not Working? Found a Bug? 32 | 33 | If you think you found a bug, it probably is a bug. 34 | 35 | * If it is a general Logstash or a pipeline issue, file it in [Logstash GitHub](https://github.com/elasticsearch/logstash/issues) 36 | * If it is specific to a plugin, please file it in the respective repository under [logstash-plugins](https://github.com/logstash-plugins) 37 | * or ask the [forum](https://discuss.elastic.co/c/logstash). 38 | 39 | # Contributing Documentation and Code Changes 40 | 41 | If you have a bugfix or new feature that you would like to contribute to 42 | logstash, and you think it will take more than a few minutes to produce the fix 43 | (ie; write code), it is worth discussing the change with the Logstash users and developers first! You can reach us via [GitHub](https://github.com/elastic/logstash/issues), the [forum](https://discuss.elastic.co/c/logstash), or via IRC (#logstash on freenode irc) 44 | Please note that Pull Requests without tests will not be merged. If you would like to contribute but do not have experience with writing tests, please ping us on IRC/forum or create a PR and ask our help. 45 | 46 | ## Contributing to plugins 47 | 48 | Check our [documentation](https://www.elastic.co/guide/en/logstash/current/contributing-to-logstash.html) on how to contribute to plugins or write your own! It is super easy! 49 | 50 | ## Contribution Steps 51 | 52 | 1. Test your changes! [Run](https://github.com/elastic/logstash#testing) the test suite 53 | 2. Please make sure you have signed our [Contributor License 54 | Agreement](https://www.elastic.co/contributor-agreement/). We are not 55 | asking you to assign copyright to us, but to give us the right to distribute 56 | your code without restriction. We ask this of all contributors in order to 57 | assure our users of the origin and continuing existence of the code. You 58 | only need to sign the CLA once. 59 | 3. Send a pull request! Push your changes to your fork of the repository and 60 | [submit a pull 61 | request](https://help.github.com/articles/using-pull-requests). In the pull 62 | request, describe what your changes do and mention any bugs/issues related 63 | to the pull request. 64 | 65 | 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Logstash Plugin 2 | 3 | [![Travis Build Status](https://travis-ci.com/logstash-plugins/logstash-codec-netflow.svg)](https://travis-ci.com/logstash-plugins/logstash-codec-netflow) 4 | 5 | This is a plugin for [Logstash](https://github.com/elastic/logstash). 6 | 7 | It is fully free and fully open source. The license is Apache 2.0, meaning you are pretty much free to use it however you want in whatever way. 8 | 9 | ## Documentation 10 | 11 | Logstash provides infrastructure to automatically generate documentation for this plugin. We use the asciidoc format to write documentation so any comments in the source code will be first converted into asciidoc and then into html. All plugin documentation are placed under one [central location](http://www.elastic.co/guide/en/logstash/current/). 12 | 13 | - For formatting code or config example, you can use the asciidoc `[source,ruby]` directive 14 | - For more asciidoc formatting tips, see the excellent reference here https://github.com/elastic/docs#asciidoc-guide 15 | 16 | ## Need Help? 17 | 18 | Need help? Try #logstash on freenode IRC or the https://discuss.elastic.co/c/logstash discussion forum. 19 | 20 | ## Developing 21 | 22 | ### 1. Plugin Developement and Testing 23 | 24 | #### Code 25 | - To get started, you'll need JRuby with the Bundler gem installed. 26 | 27 | - Create a new plugin or clone and existing from the GitHub [logstash-plugins](https://github.com/logstash-plugins) organization. We also provide [example plugins](https://github.com/logstash-plugins?query=example). 28 | 29 | - Install dependencies 30 | ```sh 31 | bundle install 32 | ``` 33 | 34 | #### Test 35 | 36 | - Update your dependencies 37 | 38 | ```sh 39 | bundle install 40 | ``` 41 | 42 | - Run tests 43 | 44 | ```sh 45 | bundle exec rspec 46 | ``` 47 | 48 | ### 2. Running your unpublished Plugin in Logstash 49 | 50 | #### 2.1 Run in a local Logstash clone 51 | 52 | - Edit Logstash `Gemfile` and add the local plugin path, for example: 53 | ```ruby 54 | gem "logstash-filter-awesome", :path => "/your/local/logstash-filter-awesome" 55 | ``` 56 | - Install plugin 57 | ```sh 58 | # Logstash 2.3 and higher 59 | bin/logstash-plugin install --no-verify 60 | 61 | # Prior to Logstash 2.3 62 | bin/plugin install --no-verify 63 | 64 | ``` 65 | - Run Logstash with your plugin 66 | ```sh 67 | bin/logstash -e 'filter {awesome {}}' 68 | ``` 69 | At this point any modifications to the plugin code will be applied to this local Logstash setup. After modifying the plugin, simply rerun Logstash. 70 | 71 | #### 2.2 Run in an installed Logstash 72 | 73 | You can use the same **2.1** method to run your plugin in an installed Logstash by editing its `Gemfile` and pointing the `:path` to your local plugin development directory or you can build the gem and install it using: 74 | 75 | - Build your plugin gem 76 | ```sh 77 | gem build logstash-filter-awesome.gemspec 78 | ``` 79 | - Install the plugin from the Logstash home 80 | ```sh 81 | # Logstash 2.3 and higher 82 | bin/logstash-plugin install --no-verify 83 | 84 | # Prior to Logstash 2.3 85 | bin/plugin install --no-verify 86 | 87 | ``` 88 | - Start Logstash and proceed to test the plugin 89 | 90 | ## Contributing 91 | 92 | All contributions are welcome: ideas, patches, documentation, bug reports, complaints, and even something you drew up on a napkin. 93 | 94 | Programming is not a required skill. Whatever you've seen about open source and maintainers or community members saying "send patches or die" - you will not see that here. 95 | 96 | It is more important to the community that you are able to contribute. 97 | 98 | For more information about contributing, see the [CONTRIBUTING](https://github.com/elastic/logstash/blob/master/CONTRIBUTING.md) file. -------------------------------------------------------------------------------- /spec/codecs/ipfix_stress.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import sys 3 | import time 4 | import random 5 | 6 | ## Standalone IPFIX stressor 7 | ## Used to reproduce issue 134 https://github.com/logstash-plugins/logstash-codec-netflow/issues/134 8 | 9 | host = 'host02' 10 | port = 2055 11 | 12 | tpl = '\x00\n\x00\xa4Z\xd2\xc6\xfc\x00\x00K\xce\xabfn\xab\x00\x02\x00\x94\xce\xc7\x00\x17\x00\x08\x00\x04\x00\x1b\x00\x10\x00\x07\x00\x02\x00\x0c\x00\x04\x00\x1c\x00\x10\x00\x0b\x00\x02\x00\x10\x00\x04\x00\x11\x00\x04\x00\x04\x00\x01\x80\x01\xff\xff\x00\x00<%\x80\x1c\xff\xff\x00\x00<%\x00\x96\x00\x04\x00\x97\x00\x04\x80\x03\x00\x08\x00\x00<%\x80\x04\x00\x08\x00\x00<%\x80\x15\xff\xff\x00\x00<%\x80\x19\x00\x04\x00\x00<%\x80\x1a\xff\xff\x00\x00<%\x80\x16\xff\xff\x00\x00<%\x80\x0f\xff\xff\x00\x00<%\x80\x02\xff\xff\x00\x00<%\x80\x10\xff\xff\x00\x00<%\x80/\xff\xff\x00\x00<%' 13 | 14 | # 8 flows: 15 | data = '\x00\n\x05KZ\xd2\xc78\x00\x00K\xd4\xabfn\xab\xce\xc7\x05;\xb5\xd6WG\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd2\x1b\x8a,\xa1\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xba\xde\x00\x00\x1d\x97\x00\x00\x1d\x97\x06\x0eBeing analyzed\x00Z\xd2\xc6zZ\xd2\xc6\xfe\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!INITIAL,SERVER_IS_LOCAL,BEGINNING\x0eBeing analyzed\x00\x05IPFIX\x00\x00\x00\x00 \x01\x03\x88\xcf\n\x00\x06\x00\x00\x00\x00\x00\x00\x00\x01\x00\x88\x00\x00\x00\x00 \x01\x03\x88\xcf\n\x00\x06\x00\x00\x00\x00\x00\x00\x00\x02\x00\x87\x00\x00\x00\x00\x00\x00\x00\x00:\x1aIP protocol 58 (IPv6-ICMP)\x00Z\xd2\xc6\xecZ\xd2\xc6\xfe\x00\x00\x00\x00\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\x00\x00\x00-INITIAL,SERVER_IS_LOCAL,BEGINNING,ESTABLISHED\x1aIP protocol 58 (IPv6-ICMP)\x00\x05IPFIX\x05\xbc\x0b#\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac{\x8a,\xa1\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00V\xec\x00\x00\x1d\x97\x00\x00\x1d\x97\x06\x0eBeing analyzed\x00Z\xd2\xc6\x84Z\xd2\xc7\x02\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!INITIAL,SERVER_IS_LOCAL,BEGINNING\x0eBeing analyzed\x00\x05IPFIX\xceu\x19Y\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8a,\xa1\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\xe2\x00\x00\x1d\x97\x01\x14IP protocol 1 (ICMP)\x00Z\xd2\xc6\xfeZ\xd2\xc7*\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!INITIAL,SERVER_IS_LOCAL,BEGINNING\x14IP protocol 1 (ICMP)\x00\x05IPFIX\x00\x00\x00\x00 \x01\x03\x88\xcf\n\x00\x06\x00\x00\x00\x00\x00\x00\x00\x01\x00\x88\x00\x00\x00\x00 \x01\x03\x88\xcf\n\x00\x06\x00\x00\x00\x00\x00\x00\x00\x02\x00\x87\x00\x00\x00\x00\x00\x00\x00\x00:\x1aIP protocol 58 (IPv6-ICMP)\x00Z\xd2\xc7\nZ\xd2\xc7*\x00\x00\x00\x00\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\x00\x00\x00-INITIAL,SERVER_IS_LOCAL,BEGINNING,ESTABLISHED\x1aIP protocol 58 (IPv6-ICMP)\x00\x05IPFIX\xb9\xe8\x1d\xc7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xda=\x8a,\xa1\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x1b\x00\x00\x1d\x97\x00\x00\x1d\x97\x06\x0eBeing analyzed\x00Z\xd2\xc6\xfbZ\xd2\xc78\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!INITIAL,SERVER_IS_LOCAL,BEGINNING\x0eBeing analyzed\x00\x05IPFIX\xb1\xbc\xe4\x89\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\xd6\x8a,\xa1\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xd0\x00\x00\x1d\x97\x00\x00\x1d\x97\x06\x0eBeing analyzed\x00Z\xd2\xc7\x1aZ\xd2\xc78\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!INITIAL,SERVER_IS_LOCAL,BEGINNING\x0eBeing analyzed\x00\x05IPFIX\x8a,\xa1\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x83\x99\x8a,\xa1\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb3\x00\x00\x1d\x97\x00\x00\x1d\x97\x06\x05BGP-4\x00Z\xd2\xc6\x0cZ\xd2\xc78\x00\x00\x00\x00\x00\x00\x1b\xa4\x00\x00\x00\x00\x00\x00\x0c\xee\x00\x00\x00\x00\x00\x00\x006INTERACTIVE,CLIENT_IS_LOCAL,INBOUND,ESTABLISHED,ACTIVE\x05BGP-4\x00\x05IPFIX' 16 | 17 | 18 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 19 | 20 | print("IPFIX v9: sending 1 template 1 data packet in an infinite loop") 21 | 22 | duration = 0.0 23 | while True: 24 | for i in range(0,400): 25 | sock.sendto(tpl, (host, port)) 26 | sock.sendto(data, (host, port)) 27 | sys.stdout.write('.') 28 | sys.stdout.flush() 29 | time.sleep(random.random()) 30 | print 31 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/netflow_bench_cisco_asr.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import sys 3 | import time 4 | 5 | 6 | # Netflow v9 template 7 | tpl = '\x00\t\x00\x01e\x9c\xc0_XF\x8eU\x01u\xc7\x03\x00\x00\x08\x81\x00\x00\x00d\x01\x04\x00\x17\x00\x02\x00\x04\x00\x01\x00\x04\x00\x08\x00\x04\x00\x0c\x00\x04\x00\n\x00\x04\x00\x0e\x00\x04\x00\x15\x00\x04\x00\x16\x00\x04\x00\x07\x00\x02\x00\x0b\x00\x02\x00\x10\x00\x04\x00\x11\x00\x04\x00\x12\x00\x04\x00\t\x00\x01\x00\r\x00\x01\x00\x04\x00\x01\x00\x06\x00\x01\x00\x05\x00\x01\x00=\x00\x01\x00Y\x00\x01\x000\x00\x02\x00\xea\x00\x04\x00\xeb\x00\x04' 8 | 9 | # Netflow v9 data, 21 flows: 10 | data = '\x00\t\x00\x15e\x9c\xbcqXF\x8eT\x01u\xc6\xa1\x00\x00\x08\x81\x01\x04\x05\\\x00\x00\x00\x01\x00\x00\x00(\n\x00\t\x92\n\x00\x1fQ\x00\x00\x00n\x00\x00\x00\x9ee\x9cG\x05e\x9cG\x05\xd3\x01\x01\xbb\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x10\x14\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00h\n\x00\x11*\n\x00#\x04\x00\x00\x00W\x00\x00\x00\x9ee\x9cI\x88e\x9cG\x07\x8e\x84\x01\xbb\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x15\x10\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x004\n\x00\x16o\n\x00"\x8d\x00\x00\x00h\x00\x00\x00\x9ee\x9cG\ne\x9cG\nA\xae\x01\xbb\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x18\x10\x06\x11\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x01\xb3\n\x00\x17;\n\x00$\xaa\x00\x00\x00V\x00\x00\x00\x9ee\x9cG\x0ce\x9cG\x0c\x005\xfd,\x00\x00\x00\x00\x00\x00\xfb\xf1\n\x00\x0e\x1f\x19\x13\x11\x00\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x03\xc9\n\x00"G\n\x00\x14\xf2\x00\x00\x00\x9e\x00\x00\x00je\x9cG\re\x9cG\r\x01\xbb\x07\xdd\x00\x00\xfb\xf0\x00\x00\xff\xa2\n\x00\x12\x05\x10\x15\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00h\n\x00\n\x85\n\x00\x1ef\x00\x00\x00n\x00\x00\x00\x9ee\x9cG\re\x9cF\xba\x89\xc9\x00P\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x10\x10\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x004\n\x00%\x1d\n\x00\x06\x18\x00\x00\x00f\x00\x00\x00\xa2e\x9cG\x10e\x9cG\x10\x00P\xdd\xc3\x00\x00;\x1d\x00\x00\xff\x97\n\x00\x00\xf2\x18\x10\x06\x10 \x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x02f\n\x00 \xb0\n\x00\x0bq\x00\x00\x00\x9e\x00\x00\x00.e\x9cG\x10e\x9cG\x10\x01\xbb\xdd\xfe\x00\x00\xfb\xf0\x00\x00\xff\x98\n\x00\x12i\x14\x10\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x03\x00\x00\x10\xfe\n\x00\x0c\x15\n\x00\x0f&\x00\x00\x00W\x00\x00\x00\x9ee\x9cG\x11e\x9c1\xe7\x01\xbb\x9c\x8e\x00\x00\x80\xa6\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x02\x15\n\x00\x04\xd4\n\x00\x03n\x00\x00\x00\xa2\x00\x00\x00fe\x9cT\x07e\x9cG\x12\xc6\x03\x01\xbb\x00\x00\xff\x97\x00\x00\x00F\n\x00\x10e\x10\x11\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x01E\x00\x005\\\n\x00!z\n\x00\x01\x88\x00\x00\x00\x9e\x00\x00\x00he\x9co\xd0e\x9c"\x1a\xe5\xbe\x00P\x00\x00\xfb\xf1\x00\x00\x00\x00\x00\x00\x00\x00\x15\x1b\x06\x10\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00Y\n\x00\x14\xf2\n\x00"G\x00\x00\x00j\x00\x00\x00\x9ee\x9cG\x14e\x9cG\x14\x07\xdd\x01\xbb\x00\x00\xff\xa2\x00\x00\xfb\xf0\n\x00\x0e!\x15\x10\x06\x18`\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x03A\n\x00\r\x19\n\x00\x0f&\x00\x00\x00W\x00\x00\x00\x9ee\x9cG\x16e\x9cG\x16\x01\xbb\xc9\xa5\x00\x00\x80\xa6\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x06Y\n\x00\x19;\n\x00\x02\x12\x00\x00\x00\x9e\x00\x00\x00ne\x9cG\x18e\x9cF\xbf\x01\xbb\xf4\x00\x00\x00\xfb\xf0\x00\x00\xff\x9d\n\x00\x12~\x10\x10\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00a\x00\x02+h\n\x00\x07I\n\x00\x1b\xa8\x00\x00\x00V\x00\x00\x00\x9ee\x9cu\xabe\x9c1\xfe\xeb\x98\x01\xd1\x00\x00\xff\x9c\x00\x00\xfb\xf0\n\x00\x0e!\x10\x10\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00:\x00\x00\x0b\xc8\n\x00\x132\n\x00\x1b\xa9\x00\x00\x00j\x00\x00\x00\x9ee\x9cO\xcbe\x9cE:\x86\x94\x03\xe3\x00\x00\xff\xb7\x00\x00\xfb\xf0\n\x00\x0e!\x12\x10\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x15\x00\x00{\x0c\n\x00\x1c\x96\n\x00\x18\r\x00\x00\x00\x9e\x00\x00\x00he\x9cHYe\x9cF\xf0\x01\xbb\xc2\xfd\x00\x00\xfb\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x10\x19\x06\x10\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x03\x00\x00\x0bg\n\x00\x1a\xbc\n\x00\x15\xc8\x00\x00\x00\x9e\x00\x00\x00We\x9cGfe\x9cE\xec\x03\xe1\xc4N\x00\x00\xfb\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x10\x19\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x05\x00\x00\x11\xa2\n\x00\x1d"\n\x00\x0f&\x00\x00\x00K\x00\x00\x00\x9ee\x9cm`e\x9cA\xfe\x01\xbb\x8c\x8f\x00\x00;A\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x01F\n\x00\x08\xc8\n\x00\x05\xe0\x00\x00\x00f\x00\x00\x00\xa2e\x9cG\x1de\x9cG\x1dZX\xc9\xd7\x00\x00\x03\x15\x00\x00\xff\x97\n\x00\x00\xf2\x10\x10\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00p\n\x00\x1d.\n\x00\x0f&\x00\x00\x00K\x00\x00\x00\x9ee\x9cG\x1de\x9c@\xea\x01\xbb\xcc\x8c\x00\x00;A\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x12\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00' 11 | 12 | host = 'host02' 13 | port = 2055 14 | N = 10000 15 | 16 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 17 | sock.sendto(tpl, (host, port)) 18 | time.sleep(0.2) 19 | 20 | 21 | print("%d: started sending %d Cisco ASR 9000 flows in %d packets totaling %d bytes" % (time.time(),N*21, N, N*len(data))) 22 | for i in range(0, N): 23 | sock.sendto(data, (host, port)) 24 | -------------------------------------------------------------------------------- /spec/codecs/netflow_stress.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import sys 3 | import time 4 | import random 5 | 6 | ## Standalone Netflow v9 stressor 7 | ## Used to reproduce issue 91 https://github.com/logstash-plugins/logstash-codec-netflow/issues/91 8 | 9 | host = 'host02' 10 | port = 2055 11 | 12 | tpl = '\x00\t\x00\x01e\x9c\xc0_XF\x8eU\x01u\xc7\x03\x00\x00\x08\x81\x00\x00\x00d\x01\x04\x00\x17\x00\x02\x00\x04\x00\x01\x00\x04\x00\x08\x00\x04\x00\x0c\x00\x04\x00\n\x00\x04\x00\x0e\x00\x04\x00\x15\x00\x04\x00\x16\x00\x04\x00\x07\x00\x02\x00\x0b\x00\x02\x00\x10\x00\x04\x00\x11\x00\x04\x00\x12\x00\x04\x00\t\x00\x01\x00\r\x00\x01\x00\x04\x00\x01\x00\x06\x00\x01\x00\x05\x00\x01\x00=\x00\x01\x00Y\x00\x01\x000\x00\x02\x00\xea\x00\x04\x00\xeb\x00\x04' 13 | 14 | # 21 flows: 15 | data = '\x00\t\x00\x15e\x9c\xbcqXF\x8eT\x01u\xc6\xa1\x00\x00\x08\x81\x01\x04\x05\\\x00\x00\x00\x01\x00\x00\x00(\n\x00\t\x92\n\x00\x1fQ\x00\x00\x00n\x00\x00\x00\x9ee\x9cG\x05e\x9cG\x05\xd3\x01\x01\xbb\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x10\x14\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00h\n\x00\x11*\n\x00#\x04\x00\x00\x00W\x00\x00\x00\x9ee\x9cI\x88e\x9cG\x07\x8e\x84\x01\xbb\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x15\x10\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x004\n\x00\x16o\n\x00"\x8d\x00\x00\x00h\x00\x00\x00\x9ee\x9cG\ne\x9cG\nA\xae\x01\xbb\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x18\x10\x06\x11\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x01\xb3\n\x00\x17;\n\x00$\xaa\x00\x00\x00V\x00\x00\x00\x9ee\x9cG\x0ce\x9cG\x0c\x005\xfd,\x00\x00\x00\x00\x00\x00\xfb\xf1\n\x00\x0e\x1f\x19\x13\x11\x00\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x03\xc9\n\x00"G\n\x00\x14\xf2\x00\x00\x00\x9e\x00\x00\x00je\x9cG\re\x9cG\r\x01\xbb\x07\xdd\x00\x00\xfb\xf0\x00\x00\xff\xa2\n\x00\x12\x05\x10\x15\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00h\n\x00\n\x85\n\x00\x1ef\x00\x00\x00n\x00\x00\x00\x9ee\x9cG\re\x9cF\xba\x89\xc9\x00P\x00\x00\x00\x00\x00\x00\xfb\xf0\n\x00\x0e!\x10\x10\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x004\n\x00%\x1d\n\x00\x06\x18\x00\x00\x00f\x00\x00\x00\xa2e\x9cG\x10e\x9cG\x10\x00P\xdd\xc3\x00\x00;\x1d\x00\x00\xff\x97\n\x00\x00\xf2\x18\x10\x06\x10 \x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x02f\n\x00 \xb0\n\x00\x0bq\x00\x00\x00\x9e\x00\x00\x00.e\x9cG\x10e\x9cG\x10\x01\xbb\xdd\xfe\x00\x00\xfb\xf0\x00\x00\xff\x98\n\x00\x12i\x14\x10\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x03\x00\x00\x10\xfe\n\x00\x0c\x15\n\x00\x0f&\x00\x00\x00W\x00\x00\x00\x9ee\x9cG\x11e\x9c1\xe7\x01\xbb\x9c\x8e\x00\x00\x80\xa6\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x02\x15\n\x00\x04\xd4\n\x00\x03n\x00\x00\x00\xa2\x00\x00\x00fe\x9cT\x07e\x9cG\x12\xc6\x03\x01\xbb\x00\x00\xff\x97\x00\x00\x00F\n\x00\x10e\x10\x11\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x01E\x00\x005\\\n\x00!z\n\x00\x01\x88\x00\x00\x00\x9e\x00\x00\x00he\x9co\xd0e\x9c"\x1a\xe5\xbe\x00P\x00\x00\xfb\xf1\x00\x00\x00\x00\x00\x00\x00\x00\x15\x1b\x06\x10\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00Y\n\x00\x14\xf2\n\x00"G\x00\x00\x00j\x00\x00\x00\x9ee\x9cG\x14e\x9cG\x14\x07\xdd\x01\xbb\x00\x00\xff\xa2\x00\x00\xfb\xf0\n\x00\x0e!\x15\x10\x06\x18`\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x03A\n\x00\r\x19\n\x00\x0f&\x00\x00\x00W\x00\x00\x00\x9ee\x9cG\x16e\x9cG\x16\x01\xbb\xc9\xa5\x00\x00\x80\xa6\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x06Y\n\x00\x19;\n\x00\x02\x12\x00\x00\x00\x9e\x00\x00\x00ne\x9cG\x18e\x9cF\xbf\x01\xbb\xf4\x00\x00\x00\xfb\xf0\x00\x00\xff\x9d\n\x00\x12~\x10\x10\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00a\x00\x02+h\n\x00\x07I\n\x00\x1b\xa8\x00\x00\x00V\x00\x00\x00\x9ee\x9cu\xabe\x9c1\xfe\xeb\x98\x01\xd1\x00\x00\xff\x9c\x00\x00\xfb\xf0\n\x00\x0e!\x10\x10\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00:\x00\x00\x0b\xc8\n\x00\x132\n\x00\x1b\xa9\x00\x00\x00j\x00\x00\x00\x9ee\x9cO\xcbe\x9cE:\x86\x94\x03\xe3\x00\x00\xff\xb7\x00\x00\xfb\xf0\n\x00\x0e!\x12\x10\x06\x10\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x15\x00\x00{\x0c\n\x00\x1c\x96\n\x00\x18\r\x00\x00\x00\x9e\x00\x00\x00he\x9cHYe\x9cF\xf0\x01\xbb\xc2\xfd\x00\x00\xfb\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x10\x19\x06\x10\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x03\x00\x00\x0bg\n\x00\x1a\xbc\n\x00\x15\xc8\x00\x00\x00\x9e\x00\x00\x00We\x9cGfe\x9cE\xec\x03\xe1\xc4N\x00\x00\xfb\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x10\x19\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x05\x00\x00\x11\xa2\n\x00\x1d"\n\x00\x0f&\x00\x00\x00K\x00\x00\x00\x9ee\x9cm`e\x9cA\xfe\x01\xbb\x8c\x8f\x00\x00;A\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x18\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x01\x00\x00\x01F\n\x00\x08\xc8\n\x00\x05\xe0\x00\x00\x00f\x00\x00\x00\xa2e\x9cG\x1de\x9cG\x1dZX\xc9\xd7\x00\x00\x03\x15\x00\x00\xff\x97\n\x00\x00\xf2\x10\x10\x06\x18\x00\x00@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00p\n\x00\x1d.\n\x00\x0f&\x00\x00\x00K\x00\x00\x00\x9ee\x9cG\x1de\x9c@\xea\x01\xbb\xcc\x8c\x00\x00;A\x00\x00\xfb\xf2\n\x00\x0e\x1b\x18\x18\x06\x12\x00\x01@\x00\x01`\x00\x00\x00`\x00\x00\x00\x00\x00\x00' 16 | 17 | 18 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 19 | 20 | print("NETFLOW v9: sending 1 template 1 data packet in an infinite loop") 21 | 22 | duration = 0.0 23 | while True: 24 | for i in range(0,400): 25 | sock.sendto(tpl, (host, port)) 26 | sock.sendto(data, (host, port)) 27 | sys.stdout.write('.') 28 | sys.stdout.flush() 29 | time.sleep(random.random()) 30 | print 31 | -------------------------------------------------------------------------------- /RFC_COMPLIANCE_NETFLOW_v9.md: -------------------------------------------------------------------------------- 1 | # Netflow v9 compliance 2 | 3 | The level of RFC compliance reached for collector-relevant requirements: 4 | 5 | | RFC | Level | 6 | |-----------|----------------------------------------------| 7 | | RFC 3954 | 100% of RFC "MUST" requirements implemented | 8 | | RFC 3954 | 0% of RFC "SHOULD" requirements implemented | 9 | | RFC 3954 | 83% of IEs 1-127 supported | 10 | | RFC 3954 | 90% of IEs 127-32768 supported | 11 | 12 | ## RFC 3954 collector compliance summary 13 | 14 | Summary of collector-relevant requirements implemented versus the total collector-relevant requirements: 15 | 16 | | Chapter |MUST |SHOULD| MAY| 17 | |----------------------------------------------|-----|-----|-----| 18 | | 1. Introduction | | | | 19 | | 2. Terminology | | | | 20 | | 3. NetFlow High-Level Picture on the Exporter| | | | 21 | | 4. Packet layout | | | | 22 | | 5. Export packet format | 1/1 | 0/2 | | 23 | | 6. Options | 1/1 | | | 24 | | 7. Template management | 3/3 | | | 25 | | 8. Field type definitions | | | | 26 | | 9. The collector side | 5/5 | 0/3 | | 27 | | 10. Security considerations | | | | 28 | 29 | ## RFC 3954 collector compliance details 30 | 31 | The tables below detail the collector-relevant requirements, and whether or not they are implemented: 32 | 33 | ### 5. Export packet format 34 | 35 | | Requirement |MUST |SHOULD| MAY| 36 | |---------------------------------------|-----|-----|-----| 37 | | 5.1 Incremental sequence counter of all Export Packets sent from the current Observation Domain by the Exporter. This value MUST be cumulative, and SHOULD be used by the Collector to identify whether any Export Packets have been missed. | | NO | | 38 | | 5.1 NetFlow Collectors SHOULD use the combination of the source IP address and the Source ID field to separate different export streams originating from the same Exporter. | | NO | | 39 | | 5.3 The Collector MUST use the FlowSet ID to find the corresponding Template Record and decode the Flow Records from the FlowSet. | YES | | | 40 | 41 | ### 6. Options 42 | 43 | | Requirement |MUST |SHOULD| MAY| 44 | |---------------------------------------|-----|-----|-----| 45 | | 6.2 The Collector MUST use the FlowSet ID to map the appropriate type and length to any field values that follow. | YES | | | 46 | 47 | ### 7. Template management 48 | 49 | | Requirement |MUST |SHOULD| MAY| 50 | |---------------------------------------|-----|-----|-----| 51 | | 7. the NetFlow Collector MUST store the Template Record to interpret the corresponding Flow Data Records that are received in subsequent data packets. | YES | | | 52 | | 7. A NetFlow Collector that receives Export Packets from several Observation Domains from the same Exporter MUST be aware that the uniqueness of the Template ID is not guaranteed across Observation Domains. | YES | | | 53 | | 7. If a Collector should receive a new definition for an already existing Template ID, it MUST discard the previous template definition and use the new one. | YES | | | 54 | 55 | ### 9. The collector side 56 | 57 | | Requirement |MUST |SHOULD| MAY| 58 | |---------------------------------------|-----|-----|-----| 59 | | 9. If the Template Records have not been received at the time Flow Data Records (or Options Data Records) are received, the Collector SHOULD store the Flow Data Records (or Options Data Records) and decode them after the Template Records are received. | | NO | | 60 | | 9. A Collector device MUST NOT assume that the Data FlowSet and the associated Template FlowSet (or Options Template FlowSet) are exported in the same Export Packet. | YES | | | 61 | | 9. The Collector MUST NOT assume that one and only one Template FlowSet is present in an Export Packet. | YES | | | 62 | | 9. The Collector MUST NOT attempt to decode the Flow or Options Data Records with an expired Template. | YES | | | 63 | | 9. At any given time the Collector SHOULD maintain the following for all the current Template Records and Options Template Records: Exporter, Observation Domain, Template ID, Template Definition, Last Received. | | NO | | 64 | | 9. In the event of a clock configuration change on the Exporter, the Collector SHOULD discard all Template Records and Options Template Records associated with that Exporter, in order for Collector to learn the new set of fields: Exporter, Observation Domain, Template ID, Template Definition, Last Received. | | NO | | 65 | | 9. If the Collector receives a new Template Record (for example, in the case of an Exporter restart) it MUST immediately override the existing Template Record. | YES | | | 66 | | 9. Finally, note that the Collector MUST accept padding in the Data FlowSet and Options Template FlowSet, which means for the Flow Data Records, the Options Data Records and the Template Records. | YES | | | 67 | 68 | 69 | 70 | ## RFC 3954 Information Elements support details 71 | 72 | From the IEs 1-127, these are not yet supported: 73 | 74 | |id | name 75 | |---|-------------- 76 | |70 |MPLS_LABEL_1 77 | |71 |MPLS_LABEL_2 78 | |72 |MPLS_LABEL_3 79 | |73 |MPLS_LABEL_4 80 | |74 |MPLS_LABEL_5 81 | |75 |MPLS_LABEL_6 82 | |76 |MPLS_LABEL_7 83 | |77 |MPLS_LABEL_8 84 | |78 |MPLS_LABEL_9 85 | |79 |MPLS_LABEL_10 86 | |90 | MPLS PAL RD 87 | |91 | MPLS PREFIX LEN 88 | |92 | SRC TRAFFIC INDEX 89 | |93 | DST TRAFFIC INDEX 90 | |95 | APPLICATION TAG 91 | |99 | replication factor 92 | |102| layer2packetSectionOffset 93 | |103| layer2packetSectionSize 94 | |104| layer2packetSectionData 95 | 96 | From the IEs 128-, these are not yet supported: 97 | 98 | |id | name |data type 99 | |---|--------------|----- 100 | |434|mibObjectValueInteger|signed32 101 | |435|mibObjectValueOctetString|octetArray 102 | |436|mibObjectValueOID|octetArray 103 | |437|mibObjectValueBits|octetArray 104 | |438|mibObjectValueIPAddress|ipv4Address 105 | |439|mibObjectValueCounter|unsigned64 106 | |440|mibObjectValueGauge|unsigned32 107 | |441|mibObjectValueTimeTicks|unsigned32 108 | |442|mibObjectValueUnsigned|unsigned32 109 | |443|mibObjectValueTable|subTemplateList 110 | |444|mibObjectValueRow|subTemplateList 111 | |445|mibObjectIdentifier|octetArray 112 | |446|mibSubIdentifier|unsigned32 113 | |447|mibIndexIndicator|unsigned64 114 | |448|mibCaptureTimeSemantics|unsigned8 115 | |449|mibContextEngineID|octetArray 116 | |450|mibContextName|string 117 | |451|mibObjectName|string 118 | |452|mibObjectDescription|string 119 | |453|mibObjectSyntax|string 120 | |454|mibModuleName|string 121 | |455|mobileIMSI|string 122 | |456|mobileMSISDN|string 123 | |457|httpStatusCode|unsigned16 124 | |458|sourceTransportPortsLimit|unsigned16 125 | |459|httpRequestMethod|string 126 | |460|httpRequestHost|string 127 | |461|httpRequestTarget|string 128 | |462|httpMessageVersion|string 129 | |463|natInstanceID|unsigned32 130 | |464|internalAddressRealm|octetArray 131 | |465|externalAddressRealm|octetArray 132 | |466|natQuotaExceededEvent|unsigned32 133 | |467|natThresholdEvent|unsigned32 134 | |468|httpUserAgent|string 135 | |469|httpContentType|string 136 | |470|httpReasonPhrase|string 137 | 138 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 4.3.2 2 | - Updates the milliseconds rounding for IPFIX start/end milliseconds fields. 3 | - Fix the test to run on Logstash 8 with microseconds precision. [#206](https://github.com/logstash-plugins/logstash-codec-netflow/pull/206) 4 | 5 | ## 4.3.1 6 | - Fixed unable to initialize the plugin with Logstash 8.10+ [#205](https://github.com/logstash-plugins/logstash-codec-netflow/pull/205) 7 | 8 | ## 4.3.0 9 | - Added Gigamon ipfix definitions [#199](https://github.com/logstash-plugins/logstash-codec-netflow/pull/199) 10 | 11 | ## 4.2.2 12 | - Feat: leverage event_factory support [#195](https://github.com/logstash-plugins/logstash-codec-netflow/pull/195) 13 | - Test: remove redundant asserts (to get the CI green) 14 | 15 | ## 4.2.1 16 | 17 | - Fix sub-second timestamp math 18 | 19 | ## 4.2.0 20 | 21 | - Added Cisco ACI to list of known working Netflow v9 exporters 22 | - Added support for IXIA Packet Broker IPFIX 23 | - Fixed issue with Procera float fields 24 | 25 | ## 4.1.2 26 | 27 | - Fixed issue where TTL in template registry was not being respected. 28 | 29 | ## 4.1.1 30 | 31 | - Reduced complexity of creating, persisting, loading an retrieving template caches. 32 | 33 | ## 4.1.0 34 | 35 | - Added support for Netflow v9 devices with VarString fields (H3C Netstream) 36 | 37 | ## 4.0.2 38 | 39 | - Fixed incorrect parsing of zero-filled Netflow 9 packets from Palo Alto 40 | 41 | ## 4.0.1 42 | 43 | - Fixed IPFIX options template parsing for Juniper MX240 JunOS 15.1 44 | 45 | ## 4.0.0 46 | 47 | - Added support for RFC6759 decoding of application_id. **This is a breaking change to the way application_id is decoded. The format changes from e.g. 0:40567 to 0..12356..40567** 48 | 49 | ## 3.14.1 50 | 51 | - Fixes exception when receiving Netflow 9 from H3C devices 52 | 53 | ## 3.14.0 54 | 55 | - Added support for Netflow 9 from H3C devices 56 | 57 | ## 3.13.2 58 | 59 | - Fixes incorrect definitions of IE 231 and IE 232 60 | 61 | ## 3.13.1 62 | 63 | - Fixes exceptions due to concurrent access of IPFIX templates, see issue #134 64 | 65 | ## 3.13.0 66 | 67 | - Added support for Netflow 9 reduced-size encoding support 68 | - Added support for Barracuda IPFIX Extended Uniflow 69 | 70 | ## 3.12.0 71 | 72 | - Added support for IPFIX from Procera/NetIntact/Sandvine 15.1 73 | 74 | ## 3.11.4 75 | 76 | - Workaround for breaking change in Netflow-Input-UDP > 3.2.0, see issue #122 77 | 78 | ## 3.11.3 79 | 80 | - Renamed some unknown VMware VDS fields 81 | 82 | ## 3.11.2 83 | 84 | - Further improved decoding performance of ASA ACL ids 85 | - Further improved decoding performance of MAC addresses 86 | - Improved decoding performance of IPv4 addresses 87 | 88 | ## 3.11.1 89 | 90 | - Improved decoding performance of ASA ACL ids 91 | - Improved decoding performance of mac addresses 92 | 93 | ## 3.11.0 94 | 95 | - Updated Netflow v9 IE coverage from 10% to 90% 96 | - Added support for Huawei Netstream 97 | 98 | ## 3.10.0 99 | 100 | - Added support for Nokia BRAS 101 | 102 | ## 3.9.1 103 | 104 | - Added Netflow v9 IE150 IE151, IE154, IE155 105 | 106 | ## 3.9.0 107 | 108 | - Added vIPtela support 109 | - Added fields for Cisco ASR1k 110 | 111 | ## 3.8.3 112 | 113 | - Fixed a race condition that could cause some errors when running in a multithreaded input 114 | 115 | ## 3.8.2 116 | 117 | - Fixed exceptions due to NilClass in util.rb and netflow.rb 118 | 119 | ## 3.8.1 120 | 121 | - Prevent Netflow and IPFIX templates from being modified concurrently 122 | - Improved Palo Alto support and added rspec test 123 | 124 | ## 3.8.0 125 | 126 | - Added initial YAF support with applabel and silk (but without DPI plugins because of complex data types) 127 | 128 | ## 3.7.1 129 | 130 | - Update gemspec summary 131 | - Added support for CISCO1941/K9 software 15.1 132 | - Added undocumented Netscaler fields 133 | 134 | ## 3.7.0 135 | 136 | - Added support for Cisco WLC 8510 software 8.2 137 | 138 | ## 3.6.0 139 | 140 | - Added support for nprobe L7 DPI 141 | - Added support for Fortigate FortiOS 5.4.x (application_id) 142 | 143 | ## 3.5.2 144 | 145 | - Fix some documentation issues 146 | 147 | ## 3.5.1 148 | 149 | - Added test for Fortigate FortiOS 5.2 (Netflow v9) 150 | - Added permission check to templates cache (Issue #80) 151 | - Clarified confusing warning about missing templates 152 | - Added test for Barracuda firewall (IPFIX) 153 | 154 | ## 3.5.0 155 | 156 | - Added support for Cisco WLC (Netflow v9) 157 | 158 | ## 3.4.0 159 | 160 | - Added support for Cisco NBAR (Netflow v9) 161 | 162 | ## 3.3.0 163 | 164 | - Added support for Cisco ASR 9000 (Netflow v9) 165 | 166 | ## 3.2.5 167 | 168 | - Added support for Streamcore StreamGroomer (Netflow v9) 169 | - Fixed docs so they can generate 170 | 171 | ## 3.2.4 172 | 173 | - Fixed 0-length template field length (Netflow 9) 174 | 175 | ## 3.2.3 176 | 177 | - Fixed 0-length scope field length (Netflow 9, Juniper SRX) 178 | - Fixed JRuby 9K compatibility 179 | 180 | ## 3.2.2 181 | 182 | - Added support for VMware VDS IPFIX although field definitions are unknown 183 | 184 | ## 3.2.1 185 | 186 | - Fix/Refactor IPFIX microsecond/nanosecond interpretation (NTP Timestamp based) 187 | - Note a possible bug in Netscaler implementation where the fraction is proabably output as microseconds 188 | - Correct rspec testing for new/correct implementation of microseconds, never noticed the insane values before, mea culpa 189 | 190 | ## 3.2.0 191 | 192 | - Add Netflow v9/v10 template caching, configurable TTL 193 | - Add option for including flowset_id for Netflow v10 194 | - Refactor/simplify Netflow v9/v10 templates processing 195 | - Add variable length field support 196 | - Add OctetArray support 197 | - Add Citrix Netscaler (IPFIX) support 198 | - Add spec tests and anonymized test data for all of the above 199 | 200 | ## 3.1.4 201 | 202 | - Added support for MPLS labels 203 | - Added support for decoding forwarded status field (Netflow 9) 204 | 205 | ## 3.1.3 206 | 207 | - Confirmed support and tests added for 4 Netflow/IPFIX exporters 208 | 209 | ## 3.1.2 210 | 211 | - Relax constraint on logstash-core-plugin-api to >= 1.60 <= 2.99 212 | 213 | ## 3.1.1 214 | 215 | - Small update due to breaking change in BinData gem (issue #41) 216 | 217 | ## 3.1.0 218 | 219 | - Added IPFIX support 220 | 221 | ## 3.0.1 222 | 223 | - Republish all the gems under jruby. 224 | 225 | ## 3.0.0 226 | 227 | - Update the plugin to the version 2.0 of the plugin api, this change is required for Logstash 5.0 compatibility. See https://github.com/elastic/logstash/issues/5141 228 | - Fixed exception if Netflow data contains MAC addresses (issue #26, issue #34) 229 | - Fixed exceptions when receiving invalid Netflow v5 and v9 data (issue #17, issue 18) 230 | - Fixed decoding Netflow templates from multiple (non-identical) exporters 231 | - Add support for Cisco ASA fields 232 | - Add support for Netflow 9 options template with scope fields 233 | 234 | # 2.0.5 235 | 236 | - Depend on logstash-core-plugin-api instead of logstash-core, removing the need to mass update plugins on major releases of logstash 237 | 238 | # 2.0.4 239 | 240 | - New dependency requirements for logstash-core for the 5.0 release 241 | 242 | ## 2.0.3 243 | 244 | - Fixed JSON compare flaw in specs 245 | 246 | ## 2.0.0 247 | 248 | - Plugins were updated to follow the new shutdown semantic, this mainly allows Logstash to instruct input plugins to terminate gracefully, 249 | instead of using Thread.raise on the plugins' threads. Ref: https://github.com/elastic/logstash/pull/3895 250 | - Dependency on logstash-core update to 2.0 251 | 252 | -------------------------------------------------------------------------------- /docs/index.asciidoc: -------------------------------------------------------------------------------- 1 | :plugin: netflow 2 | :type: codec 3 | 4 | /////////////////////////////////////////// 5 | START - GENERATED VARIABLES, DO NOT EDIT! 6 | /////////////////////////////////////////// 7 | :version: %VERSION% 8 | :release_date: %RELEASE_DATE% 9 | :changelog_url: %CHANGELOG_URL% 10 | :include_path: ../../../../logstash/docs/include 11 | /////////////////////////////////////////// 12 | END - GENERATED VARIABLES, DO NOT EDIT! 13 | /////////////////////////////////////////// 14 | 15 | [id="plugins-{type}s-{plugin}"] 16 | 17 | === Netflow codec plugin 18 | 19 | include::{include_path}/plugin_header.asciidoc[] 20 | 21 | ==== Description 22 | 23 | The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. 24 | 25 | ==== Supported Netflow/IPFIX exporters 26 | 27 | This codec supports: 28 | 29 | * Netflow v5 30 | * Netflow v9 31 | * IPFIX 32 | 33 | The following Netflow/IPFIX exporters have been seen and tested with the most recent version of the Netflow Codec: 34 | 35 | [cols="6,^2,^2,^2,12",options="header"] 36 | |=========================================================================================== 37 | |Netflow exporter | v5 | v9 | IPFIX | Remarks 38 | |Barracuda Firewall | | | y | With support for Extended Uniflow 39 | |Cisco ACI | | y | | 40 | |Cisco ASA | | y | | 41 | |Cisco ASR 1k | | | N | Fails because of duplicate fields 42 | |Cisco ASR 9k | | y | | 43 | |Cisco IOS 12.x | | y | | 44 | |Cisco ISR w/ HSL | | N | | Fails because of duplicate fields, see: https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 45 | |Cisco WLC | | y | | 46 | |Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown 47 | |fprobe | y | | | 48 | |Fortigate FortiOS | | y | | 49 | |Huawei Netstream | | y | | 50 | |ipt_NETFLOW | y | y | y | 51 | |IXIA packet broker | | | y | 52 | |Juniper MX | y | | y | SW > 12.3R8. Fails to decode IPFIX from Junos 16.1 due to duplicate field names which we currently don't support. 53 | |Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow 54 | |nProbe | y | y | y | L7 DPI fields now also supported 55 | |Nokia BRAS | | | y | 56 | |OpenBSD pflow | y | N | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 57 | |Riverbed | | N | | Not supported due to field ID conflicts. Workaround available in the definitions directory over at Elastiflow 58 | |Sandvine Procera PacketLogic| | | y | v15.1 59 | |Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd 60 | |Sophos UTM | | | y | 61 | |Streamcore Streamgroomer | | y | | 62 | |Palo Alto PAN-OS | | y | | 63 | |Ubiquiti Edgerouter X | | y | | With MPLS labels 64 | |VMware VDS | | | y | Still some unknown fields 65 | |YAF | | | y | With silk and applabel, but no DPI plugin support 66 | |vIPtela | | | y | 67 | |=========================================================================================== 68 | 69 | ==== Usage 70 | 71 | Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: 72 | 73 | [source, ruby] 74 | -------------------------- 75 | input { 76 | udp { 77 | port => 2055 78 | codec => netflow 79 | } 80 | } 81 | -------------------------- 82 | 83 | For high-performance production environments the configuration below will decode up to 15000 flows/sec from a Cisco ASR 9000 router on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. 84 | 85 | Note that for richer flows from a Cisco ASA firewall this number will be at least 3x lower. 86 | 87 | [source, ruby] 88 | -------------------------- 89 | input { 90 | udp { 91 | port => 2055 92 | codec => netflow 93 | receive_buffer_bytes => 16777216 94 | workers => 16 95 | } 96 | -------------------------- 97 | 98 | To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: 99 | 100 | # sysctl -w net.core.rmem_max=$((1024*1024*16)) 101 | 102 | [id="plugins-{type}s-{plugin}-options"] 103 | ==== Netflow Codec Configuration Options 104 | 105 | [cols="<,<,<",options="header",] 106 | |======================================================================= 107 | |Setting |Input type|Required 108 | | <> |a valid filesystem path|No 109 | | <> |<>|No 110 | | <> |<>|No 111 | | <> |a valid filesystem path|No 112 | | <> |a valid filesystem path|No 113 | | <> |<>|No 114 | | <> |<>|No 115 | |======================================================================= 116 | 117 |   118 | 119 | [id="plugins-{type}s-{plugin}-cache_save_path"] 120 | ===== `cache_save_path` 121 | 122 | * Value type is <> 123 | * There is no default value for this setting. 124 | 125 | Enables the template cache and saves it in the specified directory. This 126 | minimizes data loss after Logstash restarts because the codec doesn't have to 127 | wait for the arrival of templates, but instead reload already received 128 | templates received during previous runs. 129 | 130 | Template caches are saved as: 131 | 132 | * <>/netflow_templates.cache for Netflow v9 templates. 133 | * <>/ipfix_templates.cache for IPFIX templates. 134 | 135 | [id="plugins-{type}s-{plugin}-cache_ttl"] 136 | ===== `cache_ttl` 137 | 138 | * Value type is <> 139 | * Default value is `4000` 140 | 141 | Netflow v9/v10 template cache TTL (seconds) 142 | 143 | [id="plugins-{type}s-{plugin}-include_flowset_id"] 144 | ===== `include_flowset_id` 145 | 146 | * Value type is <> 147 | * Default value is `false` 148 | 149 | Only makes sense for ipfix, v9 already includes this 150 | Setting to true will include the flowset_id in events 151 | Allows you to work with sequences, for instance with the aggregate filter 152 | 153 | [id="plugins-{type}s-{plugin}-ipfix_definitions"] 154 | ===== `ipfix_definitions` 155 | 156 | * Value type is <> 157 | * There is no default value for this setting. 158 | 159 | Override YAML file containing IPFIX field definitions 160 | 161 | Very similar to the Netflow version except there is a top level Private 162 | Enterprise Number (PEN) key added: 163 | 164 | [source,yaml] 165 | -------------------------- 166 | pen: 167 | id: 168 | - :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string 169 | - :name 170 | id: 171 | - :skip 172 | -------------------------- 173 | 174 | There is an implicit PEN 0 for the standard fields. 175 | 176 | See for the base set. 177 | 178 | [id="plugins-{type}s-{plugin}-netflow_definitions"] 179 | ===== `netflow_definitions` 180 | 181 | * Value type is <> 182 | * There is no default value for this setting. 183 | 184 | Override YAML file containing Netflow field definitions 185 | 186 | Each Netflow field is defined like so: 187 | 188 | [source,yaml] 189 | -------------------------- 190 | id: 191 | - default length in bytes 192 | - :name 193 | id: 194 | - :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string 195 | - :name 196 | id: 197 | - :skip 198 | -------------------------- 199 | 200 | See for the base set. 201 | 202 | [id="plugins-{type}s-{plugin}-target"] 203 | ===== `target` 204 | 205 | * Value type is <> 206 | * Default value is `"netflow"` 207 | 208 | Specify into what field you want the Netflow data. 209 | 210 | [id="plugins-{type}s-{plugin}-versions"] 211 | ===== `versions` 212 | 213 | * Value type is <> 214 | * Default value is `[5, 9, 10]` 215 | 216 | Specify which Netflow versions you will accept. 217 | 218 | 219 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/netflow_bench_cisco_asa.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import sys 3 | import time 4 | 5 | 6 | # Netflow v9 template 7 | tpl = '\x00\t\x00\r\x00\x1fz\xc4V\x17\x8dE\x00\x00\x02\x95\x00\x00\x00\x00\x00\x00\x03\xe0\x01\x00\x00\x15\x00\x94\x00\x04\x00\x08\x00\x04\x00\x07\x00\x02\x00\n\x00\x02\x00\x0c\x00\x04\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb0\x00\x01\x00\xb1\x00\x01\x9cA\x00\x04\x9cB\x00\x04\x9cC\x00\x02\x9cD\x00\x02\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00\x14\x01\x01\x00\x15\x00\x94\x00\x04\x00\x08\x00\x04\x00\x07\x00\x02\x00\n\x00\x02\x00\x0c\x00\x04\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb0\x00\x01\x00\xb1\x00\x01\x9cA\x00\x04\x9cB\x00\x04\x9cC\x00\x02\x9cD\x00\x02\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00A\x01\x02\x00\x11\x00\x94\x00\x04\x00\x1b\x00\x10\x00\x07\x00\x02\x00\n\x00\x02\x00\x1c\x00\x10\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb2\x00\x01\x00\xb3\x00\x01\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00\x14\x01\x03\x00\x11\x00\x94\x00\x04\x00\x1b\x00\x10\x00\x07\x00\x02\x00\n\x00\x02\x00\x1c\x00\x10\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb2\x00\x01\x00\xb3\x00\x01\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00A\x01\x04\x00\x12\x00\x08\x00\x04\x00\x07\x00\x02\x00\n\x00\x02\x00\x0c\x00\x04\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb0\x00\x01\x00\xb1\x00\x01\x9cA\x00\x04\x9cB\x00\x04\x9cC\x00\x02\x9cD\x00\x02\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x01\x05\x00\x0e\x00\x08\x00\x04\x00\x07\x00\x02\x00\n\x00\x02\x00\x0c\x00\x04\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb0\x00\x01\x00\xb1\x00\x01\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x01\x06\x00\x0e\x00\x1b\x00\x10\x00\x07\x00\x02\x00\n\x00\x02\x00\x1c\x00\x10\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb2\x00\x01\x00\xb3\x00\x01\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x01\x07\x00\x12\x00\x94\x00\x04\x00\x08\x00\x04\x00\x07\x00\x02\x00\n\x00\x02\x00\x0c\x00\x04\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb0\x00\x01\x00\xb1\x00\x01\x9cA\x00\x04\x9cB\x00\x04\x9cC\x00\x02\x9cD\x00\x02\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x01\x08\x00\x0e\x00\x94\x00\x04\x00\x1b\x00\x10\x00\x07\x00\x02\x00\n\x00\x02\x00\x1c\x00\x10\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb2\x00\x01\x00\xb3\x00\x01\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x01\t\x00\x16\x00\x94\x00\x04\x00\x08\x00\x04\x00\x07\x00\x02\x00\n\x00\x02\x00\x0c\x00\x04\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb0\x00\x01\x00\xb1\x00\x01\x9cA\x00\x04\x9cB\x00\x04\x9cC\x00\x02\x9cD\x00\x02\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x00\x98\x00\x08\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00\x14\x01\n\x00\x16\x00\x94\x00\x04\x00\x08\x00\x04\x00\x07\x00\x02\x00\n\x00\x02\x00\x0c\x00\x04\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb0\x00\x01\x00\xb1\x00\x01\x9cA\x00\x04\x9cB\x00\x04\x9cC\x00\x02\x9cD\x00\x02\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x00\x98\x00\x08\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00A\x01\x0b\x00\x12\x00\x94\x00\x04\x00\x1b\x00\x10\x00\x07\x00\x02\x00\n\x00\x02\x00\x1c\x00\x10\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb2\x00\x01\x00\xb3\x00\x01\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x00\x98\x00\x08\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00\x14\x01\x0c\x00\x12\x00\x94\x00\x04\x00\x1b\x00\x10\x00\x07\x00\x02\x00\n\x00\x02\x00\x1c\x00\x10\x00\x0b\x00\x02\x00\x0e\x00\x02\x00\x04\x00\x01\x00\xb2\x00\x01\x00\xb3\x00\x01\x9cE\x00\x01\x80\xea\x00\x02\x01C\x00\x08\x00U\x00\x04\x00\x98\x00\x08\x80\xe8\x00\x0c\x80\xe9\x00\x0c\x9c@\x00A' 8 | 9 | # Cisco ASA 14 flows: 10 | data = '\x00\t\x00\x0e\x00\x1f\x80\xfdV\x17\x8dG\x00\x00\x02\x96\x00\x00\x00\x00\x01\t\x05\x98\x00\x00!4\xc0\xa8\x0e\x01\x00\x00\x00\x03\x02\x02\x02\x0bD\x8d\x00\x02\x01\x00\x00\xc0\xa8\x0e\x01\x02\x02\x02\x0b\x00\x00D\x8d\x02\x07\xe9\x00\x00\x01PK\xff\xd7\xdf\x00\x00\x008\x00\x00\x01PK\xff\xcf\xf1\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!5\xc0\xa8\x17\x16D\x8d\x00\x02\xa4\xa4%\x0b\x00\x00\x00\x03\x01\x08\x00\xc0\xa8\x17\x16\xa4\xa4%\x0bD\x8d\x00\x00\x02\x07\xe9\x00\x00\x01PK\xff\xda#\x00\x00\x008\x00\x00\x01PK\xff\xd2I\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!6\xa4\xa4%\x0b\x00\x00\x00\x03\xc0\xa8\x17\x16D\x8d\x00\x02\x01\x00\x00\xa4\xa4%\x0b\xc0\xa8\x17\x16\x00\x00D\x8d\x02\x07\xe9\x00\x00\x01PK\xff\xdaK\x00\x00\x008\x00\x00\x01PK\xff\xd2S\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!7\xc0\xa8\x17\x14E\x8d\x00\x02\xa4\xa4%\x0b\x00\x00\x00\x03\x01\x08\x00\xc0\xa8\x17\x14\xa4\xa4%\x0bE\x8d\x00\x00\x02\x07\xe9\x00\x00\x01PK\xff\xdb\x13\x00\x00\x008\x00\x00\x01PK\xff\xd3/\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!8\xa4\xa4%\x0b\x00\x00\x00\x03\xc0\xa8\x17\x14E\x8d\x00\x02\x01\x00\x00\xa4\xa4%\x0b\xc0\xa8\x17\x14\x00\x00E\x8d\x02\x07\xe9\x00\x00\x01PK\xff\xdb\x1d\x00\x00\x008\x00\x00\x01PK\xff\xd39\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!9\xc0\xa8\x0e\x0bE\x8d\x00\x03\x02\x02\x02\x0b\x00\x00\x00\x02\x01\x08\x00\xc0\xa8\x0e\x0b\x02\x02\x02\x0bE\x8d\x00\x00\x02\x07\xe9\x00\x00\x01PK\xff\xdb\xdb\x00\x00\x008\x00\x00\x01PK\xff\xd3\xed\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!:\x02\x02\x02\x0b\x00\x00\x00\x02\xc0\xa8\x0e\x0bE\x8d\x00\x03\x01\x00\x00\x02\x02\x02\x0b\xc0\xa8\x0e\x0b\x00\x00E\x8d\x02\x07\xe9\x00\x00\x01PK\xff\xdb\xef\x00\x00\x008\x00\x00\x01PK\xff\xd3\xf7\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!;\x02\x02\x02\x0bE\x8d\x00\x02\xc0\xa8\x0e\x01\x00\x00\x00\x03\x01\x08\x00\x02\x02\x02\x0b\xc0\xa8\x0e\x01E\x8d\x00\x00\x02\x07\xe9\x00\x00\x01PK\xff\xdb\xef\x00\x00\x008\x00\x00\x01PK\xff\xd4\x01\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!<\xc0\xa8\x0e\x01\x00\x00\x00\x03\x02\x02\x02\x0bE\x8d\x00\x02\x01\x00\x00\xc0\xa8\x0e\x01\x02\x02\x02\x0b\x00\x00E\x8d\x02\x07\xe9\x00\x00\x01PK\xff\xdb\xef\x00\x00\x008\x00\x00\x01PK\xff\xd4\x0b\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!M\xa4\xa4%\x0b\x00\x00\x00\x03\xc0\xa8\x17\x01\x00\x00\x00\x02\x01\x03\x03\xa4\xa4%\x0b\xc0\xa8\x17\x01\x00\x00\x00\x00\x02\x07\xe0\x00\x00\x01PK\xff\xdee\x00\x00\x00\xa0\x00\x00\x01PK\xff\xdee\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!=\xc0\xa8\x17\x16F\x8d\x00\x02\xa4\xa4%\x0b\x00\x00\x00\x03\x01\x08\x00\xc0\xa8\x17\x16\xa4\xa4%\x0bF\x8d\x00\x00\x02\x07\xe9\x00\x00\x01PK\xff\xdee\x00\x00\x008\x00\x00\x01PK\xff\xd6\x81\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!>\xa4\xa4%\x0b\x00\x00\x00\x03\xc0\xa8\x17\x16F\x8d\x00\x02\x01\x00\x00\xa4\xa4%\x0b\xc0\xa8\x17\x16\x00\x00F\x8d\x02\x07\xe9\x00\x00\x01PK\xff\xdey\x00\x00\x008\x00\x00\x01PK\xff\xd6\x8b\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!?\xc0\xa8\x17\x14F\x8d\x00\x02\xa4\xa4%\x0b\x00\x00\x00\x03\x01\x08\x00\xc0\xa8\x17\x14\xa4\xa4%\x0bF\x8d\x00\x00\x02\x07\xe9\x00\x00\x01PK\xff\xdfA\x00\x00\x008\x00\x00\x01PK\xff\xd7]\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!@\xa4\xa4%\x0b\x00\x00\x00\x03\xc0\xa8\x17\x14F\x8d\x00\x02\x01\x00\x00\xa4\xa4%\x0b\xc0\xa8\x17\x14\x00\x00F\x8d\x02\x07\xe9\x00\x00\x01PK\xff\xdfU\x00\x00\x008\x00\x00\x01PK\xff\xd7g\x0f\x8e\x7f\xf3\xfc\x1a\x03\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' 11 | 12 | host = 'host02' 13 | port = 2055 14 | N = 15000 15 | 16 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 17 | sock.sendto(tpl, (host, port)) 18 | time.sleep(0.2) 19 | 20 | 21 | print("%d: started sending %d Cisco ASA flows in %d packets totaling %d bytes" % (time.time(),N*14, N, N*len(data))) 22 | for i in range(0, N): 23 | sock.sendto(data, (host, port)) 24 | -------------------------------------------------------------------------------- /spec/codecs/benchmarks/ipfix_bench_sonicwall.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | import socket 3 | import sys 4 | import time 5 | 6 | 7 | # IPFIX template 8 | tpl = "000a00585b6b5242000010bda07e8c0000020048010000100001000400020004000400010008000400070002000a0004000b0002000c0004000e0004000f0004001500040016000400e1000400e2000400e3000200e40002".decode("hex") 9 | ''' 10 | Cisco NetFlow/IPFIX 11 | Version: 10 12 | Length: 88 13 | Timestamp: Aug 8, 2018 14:27:46.000000000 MDT 14 | ExportTime: 1533760066 15 | FlowSequence: 4285 16 | Observation Domain Id: 2692647936 17 | Set 1 [id=2] (Data Template): 256 18 | FlowSet Id: Data Template (V10 [IPFIX]) (2) 19 | FlowSet Length: 72 20 | Template (Id = 256, Count = 16) 21 | Template Id: 256 22 | Field Count: 16 23 | Field (1/16): BYTES 24 | 0... .... .... .... = Pen provided: No 25 | .000 0000 0000 0001 = Type: BYTES (1) 26 | Length: 4 27 | Field (2/16): PKTS 28 | 0... .... .... .... = Pen provided: No 29 | .000 0000 0000 0010 = Type: PKTS (2) 30 | Length: 4 31 | Field (3/16): PROTOCOL 32 | 0... .... .... .... = Pen provided: No 33 | .000 0000 0000 0100 = Type: PROTOCOL (4) 34 | Length: 1 35 | Field (4/16): IP_SRC_ADDR 36 | 0... .... .... .... = Pen provided: No 37 | .000 0000 0000 1000 = Type: IP_SRC_ADDR (8) 38 | Length: 4 39 | Field (5/16): L4_SRC_PORT 40 | 0... .... .... .... = Pen provided: No 41 | .000 0000 0000 0111 = Type: L4_SRC_PORT (7) 42 | Length: 2 43 | Field (6/16): INPUT_SNMP 44 | 0... .... .... .... = Pen provided: No 45 | .000 0000 0000 1010 = Type: INPUT_SNMP (10) 46 | Length: 4 47 | Field (7/16): L4_DST_PORT 48 | 0... .... .... .... = Pen provided: No 49 | .000 0000 0000 1011 = Type: L4_DST_PORT (11) 50 | Length: 2 51 | Field (8/16): IP_DST_ADDR 52 | 0... .... .... .... = Pen provided: No 53 | .000 0000 0000 1100 = Type: IP_DST_ADDR (12) 54 | Length: 4 55 | Field (9/16): OUTPUT_SNMP 56 | 0... .... .... .... = Pen provided: No 57 | .000 0000 0000 1110 = Type: OUTPUT_SNMP (14) 58 | Length: 4 59 | Field (10/16): IP_NEXT_HOP 60 | 0... .... .... .... = Pen provided: No 61 | .000 0000 0000 1111 = Type: IP_NEXT_HOP (15) 62 | Length: 4 63 | Field (11/16): LAST_SWITCHED 64 | 0... .... .... .... = Pen provided: No 65 | .000 0000 0001 0101 = Type: LAST_SWITCHED (21) 66 | Length: 4 67 | Field (12/16): FIRST_SWITCHED 68 | 0... .... .... .... = Pen provided: No 69 | .000 0000 0001 0110 = Type: FIRST_SWITCHED (22) 70 | Length: 4 71 | Field (13/16): postNATSourceIPv4Address 72 | 0... .... .... .... = Pen provided: No 73 | .000 0000 1110 0001 = Type: postNATSourceIPv4Address (225) 74 | Length: 4 75 | Field (14/16): postNATDestinationIPv4Address 76 | 0... .... .... .... = Pen provided: No 77 | .000 0000 1110 0010 = Type: postNATDestinationIPv4Address (226) 78 | Length: 4 79 | Field (15/16): postNAPTSourceTransportPort 80 | 0... .... .... .... = Pen provided: No 81 | .000 0000 1110 0011 = Type: postNAPTSourceTransportPort (227) 82 | Length: 2 83 | Field (16/16): postNAPTDestinationTransportPort 84 | 0... .... .... .... = Pen provided: No 85 | .000 0000 1110 0100 = Type: postNAPTDestinationTransportPort (228) 86 | Length: 2 87 | ''' 88 | 89 | data = "000a011d5b6b86c50000acf0a07e8c000100010d0010d49a000002fa06acd9022501bb00000002a3290a0000ed000000010a00000100dedac800debb88acd90225c0a8a84101bbdd690000009d00000001114b4b4b4b003500000002222c0a0000ed000000010a00000100de715000de71504b4b4b4bc0a8a8410035398f0000024600000005114a7d8a7101bb00000002d3d10a0000ed000000010a00000100de715000de71504a7d8a71c0a8a84101bb9ca40000038300000004114a7d8a7101bb0000000268920a0000ed000000010a00000100de715000de71504a7d8a71c0a8a84101bba46b000001c6000000040623a8ede501bb000000023d1b0a0000ed000000010a00000100de753800023a5023a8ede5c0a8a84101bbeb10".decode("hex") 90 | 91 | ''' 92 | Cisco NetFlow/IPFIX 93 | Version: 10 94 | Length: 285 95 | Timestamp: Aug 8, 2018 18:11:49.000000000 MDT 96 | ExportTime: 1533773509 97 | FlowSequence: 44272 98 | Observation Domain Id: 2692647936 99 | Set 1 [id=256] (5 flows) 100 | FlowSet Id: (Data) (256) 101 | FlowSet Length: 269 102 | [Template Frame: 54] 103 | Flow 1 104 | Octets: 1103002 105 | Packets: 762 106 | Protocol: TCP (6) 107 | SrcAddr: 172.217.2.37 108 | SrcPort: 443 (443) 109 | InputInt: 2 110 | DstPort: 41769 (41769) 111 | DstAddr: 10.0.0.237 112 | OutputInt: 1 113 | NextHop: 10.0.0.1 114 | [Duration: 8.000000000 seconds (switched)] 115 | StartTime: 14597.000000000 seconds 116 | EndTime: 14605.000000000 seconds 117 | Post NAT Source IPv4 Address: 172.217.2.37 118 | Post NAT Destination IPv4 Address: 192.168.168.65 119 | Post NAPT Source Transport Port: 443 120 | Post NAPT Destination Transport Port: 56681 121 | Flow 2 122 | Octets: 157 123 | Packets: 1 124 | Protocol: UDP (17) 125 | SrcAddr: 75.75.75.75 126 | SrcPort: 53 (53) 127 | InputInt: 2 128 | DstPort: 8748 (8748) 129 | DstAddr: 10.0.0.237 130 | OutputInt: 1 131 | NextHop: 10.0.0.1 132 | [Duration: 0.000000000 seconds (switched)] 133 | StartTime: 14578.000000000 seconds 134 | EndTime: 14578.000000000 seconds 135 | Post NAT Source IPv4 Address: 75.75.75.75 136 | Post NAT Destination IPv4 Address: 192.168.168.65 137 | Post NAPT Source Transport Port: 53 138 | Post NAPT Destination Transport Port: 14735 139 | Flow 3 140 | Octets: 582 141 | Packets: 5 142 | Protocol: UDP (17) 143 | SrcAddr: 74.125.138.113 144 | SrcPort: 443 (443) 145 | InputInt: 2 146 | DstPort: 54225 (54225) 147 | DstAddr: 10.0.0.237 148 | OutputInt: 1 149 | NextHop: 10.0.0.1 150 | [Duration: 0.000000000 seconds (switched)] 151 | StartTime: 14578.000000000 seconds 152 | EndTime: 14578.000000000 seconds 153 | Post NAT Source IPv4 Address: 74.125.138.113 154 | Post NAT Destination IPv4 Address: 192.168.168.65 155 | Post NAPT Source Transport Port: 443 156 | Post NAPT Destination Transport Port: 40100 157 | Flow 4 158 | Octets: 899 159 | Packets: 4 160 | Protocol: UDP (17) 161 | SrcAddr: 74.125.138.113 162 | SrcPort: 443 (443) 163 | InputInt: 2 164 | DstPort: 26770 (26770) 165 | DstAddr: 10.0.0.237 166 | OutputInt: 1 167 | NextHop: 10.0.0.1 168 | [Duration: 0.000000000 seconds (switched)] 169 | StartTime: 14578.000000000 seconds 170 | EndTime: 14578.000000000 seconds 171 | Post NAT Source IPv4 Address: 74.125.138.113 172 | Post NAT Destination IPv4 Address: 192.168.168.65 173 | Post NAPT Source Transport Port: 443 174 | Post NAPT Destination Transport Port: 42091 175 | Flow 5 176 | Octets: 454 177 | Packets: 4 178 | Protocol: TCP (6) 179 | SrcAddr: 35.168.237.229 180 | SrcPort: 443 (443) 181 | InputInt: 2 182 | DstPort: 15643 (15643) 183 | DstAddr: 10.0.0.237 184 | OutputInt: 1 185 | NextHop: 10.0.0.1 186 | [Duration: 14433.000000000 seconds (switched)] 187 | StartTime: 146.000000000 seconds 188 | EndTime: 14579.000000000 seconds 189 | Post NAT Source IPv4 Address: 35.168.237.229 190 | Post NAT Destination IPv4 Address: 192.168.168.65 191 | Post NAPT Source Transport Port: 443 192 | Post NAPT Destination Transport Port: 60176 193 | ''' 194 | 195 | host = sys.argv[1] 196 | port = 2055 197 | N = 150000 198 | flowsPerPacket = 5 199 | 200 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 201 | sock.sendto(tpl, (host, port)) 202 | time.sleep(0.2) 203 | 204 | ts = time.time() 205 | print("%d: started sending %d SonicWALL IPFIX flows in %d packets totaling %d bytes" % (ts,N*flowsPerPacket, N, N*len(data))) 206 | print("%d: flow size %d, packet size %d" % (ts, len(data) / flowsPerPacket, len(data))) 207 | 208 | for i in range(0, N): 209 | sock.sendto(data, (host, port)) 210 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2020 Elastic and contributors 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /lib/logstash/codecs/netflow/util.rb: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | require "bindata" 3 | require "ipaddr" 4 | 5 | class IP4Addr < BinData::Primitive 6 | endian :big 7 | uint32 :storage 8 | 9 | def set(val) 10 | unless val.nil? 11 | self.storage = val.split('.').inject(0) {|total,value| (total << 8 ) + value.to_i} 12 | end 13 | end 14 | 15 | def get 16 | # This is currently the fastest implementation 17 | # For benchmarks see spec/codecs/benchmarks/IPaddr.rb 18 | unless self.storage.nil? 19 | [self.storage].pack('N').unpack('C4').join('.') 20 | end 21 | end 22 | end 23 | 24 | class IP6Addr < BinData::Primitive 25 | endian :big 26 | uint128 :storage 27 | 28 | def set(val) 29 | unless val.nil? 30 | ip = IPAddr.new(val) 31 | if ! ip.ipv6? 32 | raise ArgumentError, "invalid IPv6 address `#{val}'" 33 | end 34 | self.storage = ip.to_i 35 | end 36 | end 37 | 38 | def get 39 | # There are faster implementations, however they come with the 40 | # loss of compressed IPv6 notation. 41 | # For benchmarks see spec/codecs/benchmarks/IP6Addr.rb 42 | unless self.storage.nil? 43 | b = "%032x" % self.storage 44 | c = b[0..3] + ":" + b[4..7] + ":" + b[8..11] + ":" + b[12..15] + ":" + b[16..19] + ":" + b[20..23] + ":" + b[24..27] + ":" + b[28..31] 45 | IPAddr.new(c).to_s 46 | end 47 | end 48 | end 49 | 50 | class MacAddr < BinData::Primitive 51 | string :bytes, :length => 6 52 | 53 | def set(val) 54 | unless val.nil? 55 | ints = val.split(/:/).collect { |int| int.to_i(16) } 56 | self.bytes = ints 57 | end 58 | end 59 | 60 | def get 61 | # This is currently the fastest implementation 62 | # For benchmarks see spec/codecs/benchmarks/MacAddr.rb 63 | b = self.bytes.unpack('H*')[0] 64 | b[0..1] + ":" + b[2..3] + ":" + b[4..5] + ":" + b[6..7] + ":" + b[8..9] + ":" + b[10..11] 65 | end 66 | end 67 | 68 | class VarSkip < BinData::Primitive 69 | endian :big 70 | uint8 :length_1 71 | uint16 :length_2, :onlyif => lambda { length_1 == 255 } 72 | skip :length => lambda { (length_1 == 255) ? length_2 : length_1 } 73 | 74 | def get 75 | "" 76 | end 77 | end 78 | 79 | class VarString < BinData::Primitive 80 | endian :big 81 | uint8 :length_1 82 | uint16 :length_2, :onlyif => lambda { length_1 == 255 } 83 | string :data, :trim_padding => true, :length => lambda { (length_1 == 255) ? length_2 : length_1 } 84 | 85 | def set(val) 86 | self.data = val 87 | end 88 | 89 | def get 90 | self.data 91 | end 92 | 93 | def snapshot 94 | super.encode("ASCII-8BIT", "UTF-8", invalid: :replace, undef: :replace) 95 | end 96 | end 97 | 98 | class ACLIdASA < BinData::Primitive 99 | string :bytes, :length => 12 100 | 101 | def set(val) 102 | unless val.nil? 103 | self.bytes = val.split("-").collect { |aclid| aclid.scan(/../).collect { |hex| hex.to_i(16)} }.flatten 104 | end 105 | end 106 | 107 | def get 108 | # This is currently the fastest implementation 109 | # For benchmarks see spec/codecs/benchmarks/ACLIdASA.rb 110 | b = self.bytes.unpack('H*')[0] 111 | b[0..7] + "-" + b[8..15] + "-" + b[16..23] 112 | end 113 | end 114 | 115 | class MPLSLabelStackOctets < BinData::Record 116 | endian :big 117 | bit20 :label 118 | bit3 :experimental 119 | bit1 :bottom_of_stack 120 | uint8 :ttl 121 | end 122 | 123 | class Forwarding_Status < BinData::Record 124 | endian :big 125 | bit2 :status 126 | bit6 :reason 127 | end 128 | 129 | class Application_Id16 < BinData::Primitive 130 | endian :big 131 | uint8 :classification_id 132 | uint24 :selector_id 133 | 134 | def set(val) 135 | unless val.nil? 136 | self.classification_id=val.to_i<<24 137 | self.selector_id = val.to_i-((val.to_i>>24)<<24) 138 | end 139 | end 140 | 141 | def get 142 | self.classification_id.to_s + ".." + self.selector_id.to_s 143 | end 144 | end 145 | 146 | 147 | class Application_Id24 < BinData::Primitive 148 | endian :big 149 | uint8 :classification_id 150 | uint16 :selector_id 151 | 152 | def set(val) 153 | unless val.nil? 154 | self.classification_id=val.to_i<<16 155 | self.selector_id = val.to_i-((val.to_i>>16)<<16) 156 | end 157 | end 158 | 159 | def get 160 | self.classification_id.to_s + ".." + self.selector_id.to_s 161 | end 162 | end 163 | 164 | 165 | class Application_Id32 < BinData::Primitive 166 | endian :big 167 | uint8 :classification_id 168 | uint24 :selector_id 169 | 170 | def set(val) 171 | unless val.nil? 172 | self.classification_id=val.to_i<<24 173 | self.selector_id = val.to_i-((val.to_i>>24)<<24) 174 | end 175 | end 176 | 177 | def get 178 | self.classification_id.to_s + ".." + self.selector_id.to_s 179 | end 180 | end 181 | 182 | 183 | class Application_Id40 < BinData::Primitive 184 | endian :big 185 | uint8 :classification_id 186 | uint32 :selector_id 187 | 188 | def set(val) 189 | unless val.nil? 190 | self.classification_id=val.to_i<<32 191 | self.selector_id = val.to_i-((val.to_i>>32)<<32) 192 | end 193 | end 194 | 195 | def get 196 | self.classification_id.to_s + ".." + self.selector_id.to_s 197 | end 198 | end 199 | 200 | 201 | class Appid56PanaL7Pen < BinData::Record 202 | # RFC6759 chapter 4.1: PANA-L7-PEN 203 | # This implements the "application ids MAY be encoded in a smaller number of bytes" 204 | # Used in Application_Id56 choice statement 205 | endian :big 206 | uint32 :pen_id 207 | uint16 :selector_id 208 | end 209 | 210 | 211 | class Application_Id56 < BinData::Primitive 212 | endian :big 213 | uint8 :classification_id 214 | choice :selector_id, :selection => :classification_id do 215 | # for classification engine id 20 we switch to Appid64PanaL7Pen to decode 216 | appid56_pana_l7_pen 20 217 | uint48 :default 218 | end 219 | 220 | def set(val) 221 | unless val.nil? 222 | self.classification_id=val.to_i<<48 223 | if self.classification_id == 20 224 | # classification engine id 20 (PANA_L7_PEN) contains a 4-byte PEN: 225 | self.pen_id = val.to_i-((val.to_i>>48)<<48)>>16 226 | self.selector_id = val.to_i-((val.to_i>>16)<<16) 227 | else 228 | self.selector_id = val.to_i-((val.to_i>>48)<<48) 229 | end 230 | end 231 | end 232 | 233 | def get 234 | if self.classification_id == 20 235 | self.classification_id.to_s + ".." + self.selector_id[:pen_id].to_s + ".." + self.selector_id[:selector_id].to_s 236 | else 237 | self.classification_id.to_s + ".." + self.selector_id.to_s 238 | end 239 | end 240 | end 241 | 242 | 243 | class Appid64PanaL7Pen < BinData::Record 244 | # RFC6759 chapter 4.1: PANA-L7-PEN 245 | # This implements the 3 bytes default selector id length 246 | # Used in Application_Id64 choice statement 247 | endian :big 248 | uint32 :pen_id 249 | uint24 :selector_id 250 | end 251 | 252 | class Application_Id64 < BinData::Primitive 253 | endian :big 254 | uint8 :classification_id 255 | choice :selector_id, :selection => :classification_id do 256 | # for classification engine id 20 we switch to Appid64PanaL7Pen to decode 257 | appid64_pana_l7_pen 20 258 | uint56 :default 259 | end 260 | 261 | def set(val) 262 | unless val.nil? 263 | self.classification_id=val.to_i<<56 264 | if self.classification_id == 20 265 | # classification engine id 20 (PANA_L7_PEN) contains a 4-byte PEN: 266 | self.pen_id = val.to_i-((val.to_i>>56)<<56)>>24 267 | self.selector_id = val.to_i-((val.to_i>>24)<<24) 268 | else 269 | self.selector_id = val.to_i-((val.to_i>>56)<<56) 270 | end 271 | end 272 | end 273 | 274 | def get 275 | if self.classification_id == 20 276 | self.classification_id.to_s + ".." + self.selector_id[:pen_id].to_s + ".." + self.selector_id[:selector_id].to_s 277 | else 278 | self.classification_id.to_s + ".." + self.selector_id.to_s 279 | end 280 | end 281 | end 282 | 283 | class Appid72PanaL7Pen < BinData::Record 284 | # RFC6759 chapter 4.1: PANA-L7-PEN 285 | # This implements the "application ids MAY be encoded with a larger length" 286 | # Used in Application_Id72 choice statement 287 | endian :big 288 | uint32 :pen_id 289 | uint32 :selector_id 290 | end 291 | 292 | class Application_Id72 < BinData::Primitive 293 | endian :big 294 | uint8 :classification_id 295 | choice :selector_id, :selection => :classification_id do 296 | # for classification engine id 20 we switch to Appid72PanaL7Pen to decode 297 | appid72_pana_l7_pen 20 298 | uint64 :default 299 | end 300 | 301 | def set(val) 302 | unless val.nil? 303 | self.classification_id = val.to_i<<64 304 | if self.classification_id == 20 305 | # classification engine id 20 (PANA_L7_PEN) contains a 4-byte PEN: 306 | self.pen_id = val.to_i-((val.to_i>>64)<<64)>>32 307 | self.selector_id = val.to_i-((val.to_i>>32)<<32) 308 | else 309 | self.selector_id = val.to_i-((val.to_i>>64)<<64) 310 | end 311 | end 312 | end 313 | 314 | def get 315 | if self.classification_id == 20 316 | self.classification_id.to_s + ".." + self.selector_id[:pen_id].to_s + ".." + self.selector_id[:selector_id].to_s 317 | else 318 | self.classification_id.to_s + ".." + self.selector_id.to_s 319 | end 320 | end 321 | end 322 | 323 | class OctetArray < BinData::Primitive 324 | # arg_processor :octetarray 325 | mandatory_parameter :initial_length 326 | array :bytes, :type => :uint8, :initial_length => :initial_length 327 | 328 | def set(val) 329 | unless val.nil? 330 | self.bytes = val.scan(/../).collect { |hex| hex.to_i(16)} 331 | end 332 | end 333 | 334 | def get 335 | self.bytes.collect { |byte| byte.value.to_s(16).rjust(2,'0') }.join 336 | end 337 | end 338 | 339 | class Header < BinData::Record 340 | endian :big 341 | uint16 :version 342 | end 343 | 344 | class Netflow5PDU < BinData::Record 345 | endian :big 346 | uint16 :version 347 | uint16 :flow_records, :assert => lambda { flow_records.value.between?(1,30) } 348 | uint32 :uptime 349 | uint32 :unix_sec 350 | uint32 :unix_nsec 351 | uint32 :flow_seq_num 352 | uint8 :engine_type 353 | uint8 :engine_id 354 | bit2 :sampling_algorithm 355 | bit14 :sampling_interval 356 | array :records, :initial_length => :flow_records do 357 | ip4_addr :ipv4_src_addr 358 | ip4_addr :ipv4_dst_addr 359 | ip4_addr :ipv4_next_hop 360 | uint16 :input_snmp 361 | uint16 :output_snmp 362 | uint32 :in_pkts 363 | uint32 :in_bytes 364 | uint32 :first_switched 365 | uint32 :last_switched 366 | uint16 :l4_src_port 367 | uint16 :l4_dst_port 368 | skip :length => 1 369 | uint8 :tcp_flags # Split up the TCP flags maybe? 370 | uint8 :protocol 371 | uint8 :src_tos 372 | uint16 :src_as 373 | uint16 :dst_as 374 | uint8 :src_mask 375 | uint8 :dst_mask 376 | skip :length => 2 377 | end 378 | end 379 | 380 | class NetflowTemplateFlowset < BinData::Record 381 | endian :big 382 | array :templates, :read_until => lambda { flowset_length == 0 || array.num_bytes == flowset_length - 4 } do 383 | uint16 :template_id 384 | uint16 :field_count 385 | array :record_fields, :initial_length => :field_count do 386 | uint16 :field_type 387 | uint16 :field_length 388 | end 389 | end 390 | rest :rest, :onlyif => lambda { flowset_length == 0 } 391 | end 392 | 393 | class NetflowOptionFlowset < BinData::Record 394 | endian :big 395 | array :templates, :read_until => lambda { array.num_bytes == flowset_length - 4 } do 396 | uint16 :template_id 397 | uint16 :scope_length, :assert => lambda { scope_length > 0 } 398 | uint16 :option_length, :assert => lambda { option_length > 0 } 399 | array :scope_fields, :initial_length => lambda { scope_length / 4 } do 400 | uint16 :field_type 401 | uint16 :field_length 402 | end 403 | array :option_fields, :initial_length => lambda { option_length / 4 } do 404 | uint16 :field_type 405 | uint16 :field_length, :assert => lambda { field_length > 0 } 406 | end 407 | string :padding, :read_length => lambda { flowset_length - 4 - scope_length - option_length - 2 - 2 - 2 } 408 | end 409 | end 410 | 411 | class Netflow9PDU < BinData::Record 412 | endian :big 413 | uint16 :version 414 | uint16 :flow_records 415 | uint32 :uptime 416 | uint32 :unix_sec 417 | uint32 :flow_seq_num 418 | uint32 :source_id 419 | array :records, :read_until => :eof do 420 | uint16 :flowset_id, :assert => lambda { [0, 1, *(256..65535)].include?(flowset_id) } 421 | uint16 :flowset_length, :assert => lambda { flowset_length == 0 || flowset_length > 4 } 422 | choice :flowset_data, :selection => :flowset_id do 423 | netflow_template_flowset 0 424 | netflow_option_flowset 1 425 | string :default, :read_length => lambda { flowset_length - 4 } 426 | end 427 | end 428 | end 429 | 430 | class IpfixTemplateFlowset < BinData::Record 431 | endian :big 432 | array :templates, :read_until => lambda { flowset_length - 4 - array.num_bytes <= 2 } do 433 | uint16 :template_id 434 | uint16 :field_count 435 | array :record_fields, :initial_length => :field_count do 436 | bit1 :enterprise 437 | bit15 :field_type 438 | uint16 :field_length 439 | uint32 :enterprise_id, :onlyif => lambda { enterprise != 0 } 440 | end 441 | end 442 | # skip :length => lambda { flowset_length - 4 - set.num_bytes } ? 443 | end 444 | 445 | class IpfixOptionFlowset < BinData::Record 446 | endian :big 447 | array :templates, :read_until => lambda { flowset_length - 4 } do 448 | uint16 :template_id 449 | uint16 :field_count 450 | uint16 :scope_count, :assert => lambda { scope_count > 0 } 451 | array :scope_fields, :initial_length => lambda { scope_count } do 452 | bit1 :enterprise 453 | bit15 :field_type 454 | uint16 :field_length 455 | uint32 :enterprise_id, :onlyif => lambda { enterprise != 0 } 456 | end 457 | array :option_fields, :initial_length => lambda { field_count - scope_count } do 458 | bit1 :enterprise 459 | bit15 :field_type 460 | uint16 :field_length 461 | uint32 :enterprise_id, :onlyif => lambda { enterprise != 0 } 462 | end 463 | string :padding, :read_length => lambda { flowset_length - 4 - 2 - 2 - 2 - scope_fields.num_bytes - option_fields.num_bytes } 464 | end 465 | end 466 | 467 | class IpfixPDU < BinData::Record 468 | endian :big 469 | uint16 :version 470 | uint16 :pdu_length 471 | uint32 :unix_sec 472 | uint32 :flow_seq_num 473 | uint32 :observation_domain_id 474 | array :records, :read_until => lambda { array.num_bytes == pdu_length - 16 } do 475 | uint16 :flowset_id, :assert => lambda { [2, 3, *(256..65535)].include?(flowset_id) } 476 | uint16 :flowset_length, :assert => lambda { flowset_length > 4 } 477 | choice :flowset_data, :selection => :flowset_id do 478 | ipfix_template_flowset 2 479 | ipfix_option_flowset 3 480 | string :default, :read_length => lambda { flowset_length - 4 } 481 | end 482 | end 483 | end 484 | 485 | # https://gist.github.com/joshaven/184837 486 | class Vash < Hash 487 | def initialize(constructor = {}) 488 | @register ||= {} 489 | if constructor.is_a?(Hash) 490 | super() 491 | merge(constructor) 492 | else 493 | super(constructor) 494 | end 495 | end 496 | 497 | alias_method :regular_writer, :[]= unless method_defined?(:regular_writer) 498 | alias_method :regular_reader, :[] unless method_defined?(:regular_reader) 499 | 500 | def [](key) 501 | sterilize(key) 502 | clear(key) if expired?(key) 503 | regular_reader(key) 504 | end 505 | 506 | def []=(key, *args) 507 | if args.length == 2 508 | value, ttl = args[1], args[0] 509 | elsif args.length == 1 510 | value, ttl = args[0], 60 511 | else 512 | raise ArgumentError, "Wrong number of arguments, expected 2 or 3, received: #{args.length+1}\n"+ 513 | "Example Usage: volatile_hash[:key]=value OR volatile_hash[:key, ttl]=value" 514 | end 515 | sterilize(key) 516 | ttl(key, ttl) 517 | regular_writer(key, value) 518 | end 519 | 520 | def merge(hsh) 521 | hsh.map {|key,value| self[sterile(key)] = hsh[key]} 522 | self 523 | end 524 | 525 | def cleanup! 526 | now = Time.now.to_i 527 | @register.map {|k,v| clear(k) if v < now} 528 | end 529 | 530 | def clear(key) 531 | sterilize(key) 532 | @register.delete key 533 | self.delete key 534 | end 535 | 536 | private 537 | def expired?(key) 538 | Time.now.to_i > @register[key].to_i 539 | end 540 | 541 | def ttl(key, secs=60) 542 | @register[key] = Time.now.to_i + secs.to_i 543 | end 544 | 545 | def sterile(key) 546 | String === key ? key.chomp('!').chomp('=') : key.to_s.chomp('!').chomp('=').to_sym 547 | end 548 | 549 | def sterilize(key) 550 | key = sterile(key) 551 | end 552 | end 553 | 554 | -------------------------------------------------------------------------------- /RFC_COMPLIANCE_IPFIX.md: -------------------------------------------------------------------------------- 1 | # IPFIX RFC compliance 2 | 3 | The level of RFC compliance reached for collector-relevant requirements: 4 | 5 | | RFC | Level | 6 | |-----------|----------------------------------------------| 7 | | RFC 7011 | 42% of RFC "MUST" requirements implemented | 8 | | RFC 7011 | 19% of RFC "SHOULD" requirements implemented | 9 | | RFC 7012 | 83% of IE data types supported 10 | | RFC 7012 | 90% of IEs supported 11 | 12 | ## RFC 7011 collector compliance summary 13 | 14 | Summary of collector-relevant requirements implemented versus the total collector-relevant requirements: 15 | 16 | | Chapter |MUST |SHOULD| MAY| 17 | |---------------------------------------|-----|-----|-----| 18 | | 1. Introduction | | | | 19 | | 2. Terminology | | | | 20 | | 3. IPFIX message format | 2/2 | 0/2 | | 21 | | 4. Specific reporting requirements | 0/1 | | | 22 | | 5. Timing considerations | | 0/2 | | 23 | | 6. Linkage with the Information Model | | 0/1 | 0/1 | 24 | | 7. Variable Length IE | | | | 25 | | 8. Template management | 3/9 | 1/5 | 1/2 | 26 | | 9. The collecting process's side | 4/5 | 1/3 | 0/4 | 27 | | 10. Transport protocol | 5/8 | 1/3 | 3/3 | 28 | | 11. Security considerations | 0/8 | 1/5 | 2/3 | 29 | | 12. Management considerations | | | | 30 | | 13. IANA considerations | | | | 31 | 32 | ## RFC 7012 collector compliance summary 33 | 34 | | Chapter | MUST |SHOULD| MAY | 35 | |-----------------------------------|------|------|-----| 36 | | 1. Introduction | | | | 37 | | 2. Properties of IPFIX IE | | | | 38 | | 3. Type Space | | | | 39 | | 4. IE identitfiers | | | | 40 | | 5. IE | | | | 41 | | 6. Exteding the information model | | | | 42 | | 7. IANA considerations | | | 0/1 | 43 | | 8. Security considerations | | | | 44 | 45 | 46 | ## RFC7012 Information Elements data type support details 47 | 48 | | IE data type | Support | Variable Length support | 49 | |-----------------------|---------|-------------------------| 50 | | octetArray | Yes | Yes | 51 | | unsigned8 | Yes | | 52 | | unsigned16 | Yes | | 53 | | unsigned32 | Yes | | 54 | | unsigned64 | Yes | | 55 | | signed8 | Yes | | 56 | | signed16 | Yes | | 57 | | signed32 | Yes | | 58 | | signed64 | Yes | | 59 | | float32 | Yes | | 60 | | float64 | Yes | | 61 | | boolean | No | | 62 | | macAddress | Yes | | 63 | | string | Yes | Yes | 64 | | dateTimeSeconds | Yes | | 65 | | dateTimeMilliseconds | Yes | | 66 | | dateTimeMicroseconds | Yes | | 67 | | dateTimeNanoseconds | Yes | | 68 | | ipv4Address | Yes | | 69 | | ipv6Address | Yes | | 70 | | basicList | No | | 71 | | subTemplateList | No | | 72 | | subTemplateMultiList | No | | 73 | 74 | ## RFC 7011 collector compliance details 75 | 76 | The tables below detail the collector-relevant requirements, and whether or not they are implemented: 77 | 78 | ### 3. IPFIX Message Format 79 | 80 | | Requirement |MUST |SHOULD| MAY| 81 | |---------------------------------------|-----|-----|-----| 82 | |3.1 Collecting Processes SHOULD use the Transport Session and the Observation Domain ID field to separate different export streams originating from the same Exporter.| | NO | | 83 | |3.4.1 Collecting Processes MUST NOT assume incremental Template IDs | YES | | | 84 | |3.4.2.1 At a minimum, Collecting Processes SHOULD support as scope the observationDomainId, exportingProcessId, meteringProcessId, templateId, lineCardId, exporterIPv4Address, exporterIPv6Address, and ingressInterface Information Elements. | | ? | | 85 | | 3.4.2.2 As Exporting Processes are free to allocate Template IDs as they see fit, Collecting Processes MUST NOT assume incremental Template IDs, or anything about the contents of an Options Template based on its Template ID alone | YES | | | 86 | 87 | ### 4. Specific Reporting Requirements 88 | 89 | | Requirement |MUST |SHOULD| MAY| 90 | |---------------------------------------|-----|-----|-----| 91 | | The Collecting Process MUST check the possible combinations of Information Elements within the Options Template Records to correctly interpret the following Options Templates. | NO | | | 92 | 93 | ### 5. Timing considerations 94 | 95 | | Requirement |MUST |SHOULD| MAY| 96 | |---------------------------------------|-----|-----|-----| 97 | | 5.2 Collecting Processes SHOULD use the current date, or other contextual information, to properly interpret dateTimeSeconds values and the Export Time Message Header field. | | NO | | 98 | | 5.2 Collecting Processes SHOULD use the current date, or other contextual information, to determine the NTP era in order to properly interpret dateTimeMicroseconds and dateTimeNanoseconds values in received Data Records | | NO | | 99 | 100 | ### 6. Linkage with the Information Model 101 | 102 | | Requirement |MUST |SHOULD| MAY| 103 | |---------------------------------------|-----|-----|-----| 104 | | 6.1.6 Collecting Processes SHOULD detect and ignore IPFIX Messages containing ill-formed UTF-8 string values for Information Elements | | NO | | 105 | | 6.2. Reduced-size encoding of signed, unsigned, or float data types | | | NO | 106 | 107 | ### 8. Template Management 108 | 109 | | Requirement |MUST |SHOULD| MAY| 110 | |---------------------------------------|-----|-----|-----| 111 | |8. The Collecting Process MUST store all received Template Record information for the duration of each Transport Session until reuse or withdrawal as described in Section 8.1, or expiry over UDP as described in Section 8.4, so that it can interpret the corresponding Data Records.| YES | | | 112 | |8. The Collecting Process MUST NOT assume that the Template IDs from a given Exporting Process refer to the same Templates as they did in previous Transport Sessions from the same Exporting Process| NO | | | 113 | |8. Collecting Process MUST NOT use Templates from one Transport Session to decode Data Sets in a subsequent Transport Session.| NO | | | 114 | |8. Collecting Processes MUST properly handle Templates with multiple identical Information Elements.| NO | | | 115 | |8. a Collecting Process MUST NOT assume that the Data Set and the associated Template Set (or Options Template Set) are exported in the same IPFIX Message| YES | | | 116 | |8. Though a Collecting Process normally receives Template Records from the Exporting Process before receiving Data Records, this is not always the case, e.g., in the case of reordering or Collecting Process restart over UDP. In these cases, the Collecting Process MAY buffer Data Records for which it has no Templates, to wait for Template Records describing them; however, note that in the presence of Template withdrawal and redefinition (Section 8.1) this may lead to incorrect interpretation of Data Records.| | | NO | 117 | | 8.Different Observation Domains within a Transport Session MAY use the same Template ID value to refer to different Templates; Collecting Processes MUST properly handle this case.| NO | | | 118 | | 8.1 After receiving a Template Withdrawal, a Collecting Process MUST stop using the Template to interpret subsequently exported Data Sets. Note that this mechanism does not apply when UDP is used to transport IPFIX Messages; for that case, see Section 8.4.| NO | | | 119 | |8.1 If a Collecting Process receives a Template Withdrawal for a Template or Options Template it does not presently have stored, this indicates a malfunctioning or improperly implemented Exporting Process. The continued receipt and interpretation of Data Records are still possible, but the Collecting Process MUST ignore the Template Withdrawal and SHOULD log the error.| | NO | | 120 | | 8.1 If a Collecting Process receives a new Template Record or Options Template Record for an already-allocated Template ID, and that Template or Options Template is identical to the already-received Template or Options Template, it SHOULD log the retransmission | | NO | | 121 | |8.1 If a Collecting Process receives a new Template Record or Options Template Record for an already-allocated Template ID, and that Template or Options Template is different from the already-received Template or Options Template, this indicates a malfunctioning or improperly implemented Exporting Process. The continued receipt and unambiguous interpretation of Data Records for this Template ID are no longer possible, and the Collecting Process SHOULD log the error. | | NO | | 122 | |8.4 The Collecting Process MAY associate a lifetime with each Template received in a Transport Session. Templates not refreshed by the Exporting Process within the lifetime can then be discarded by the Collecting Process. The Template lifetime at the Collecting Process MAY be exposed by a configuration parameter or MAY be derived from observation of the interval of periodic Template retransmissions from the Exporting Process. In this latter case, the Template lifetime SHOULD default to at least 3 times the observed retransmission rate. | | | PARTIAL| 123 | |8.4 Template Withdrawals (Section 8.1) MUST NOT be sent by Exporting Processes exporting via UDP and MUST be ignored by Collecting Processes collecting via UDP | NO | | | 124 | |8.4 When a Collecting Process receives a new Template Record or Options Template Record via UDP for an already-allocated Template ID, and that Template or Options Template is identical to the already received Template or Options Template, it SHOULD NOT log the retransmission, as this is the normal operation of Template refresh over UDP.| | YES| | 125 | |8.4 The Collecting Process MUST replace the Template or Options Template for that Template ID with the newly received Template or Options Template. This is the normal operation of Template ID reuse over UDP. | YES | | | 126 | |8.4 The Collecting Process SHOULD maintain the following for all the current Template Records and Options Template Records: . | | NO| | 127 | 128 | ### 9. The collecting process's side 129 | 130 | | Requirement |MUST |SHOULD| MAY| 131 | |---------------------------------------|-----|-----|-----| 132 | |9. The Collecting Process MUST listen for association requests / connections to start new Transport Sessions from the Exporting Process. | YES | | | 133 | |9. The Collecting Process MUST note the Information Element identifier of any Information Element that it does not understand and MAY discard that Information Element from received Data Records.| YES | | | 134 | |9. The Collecting Process MUST accept padding in Data Records and Template Records. | YES | | | 135 | | 9. A Collector can detect out-of-sequence, dropped, or duplicate IPFIX Messages by tracking the Sequence Number. A Collector SHOULD provide a logging mechanism for tracking out-of- sequence IPFIX Messages. | | NO | | 136 | | 9.1 If the Collecting Process receives a malformed IPFIX Message, it MUST discard the IPFIX Message and SHOULD log the error. | YES | YES | | 137 | | 9.1 The Collecting Process MAY attempt to rectify the situation any way it sees fit, including: | | | NO | 138 | | 9.1 On the other hand, the Collecting Process SHOULD stop processing IPFIX Messages from clearly malfunctioning Exporting Processes (e.g., those from which the last few IPFIX Messages have been malformed). | | NO | | 139 | | 9.2 The Collecting Process MUST support the opening of multiple SCTP Streams | NO | | | 140 | | 9.3 The Collecting Process MAY discard all Transport Session state after no IPFIX Messages are received from a given Exporting Process within a given Transport Session during a configurable idle timeout. | | | NO | 141 | | 9.3 The Collecting Process SHOULD accept Data Records without the associated Template Record (or other definitions such as Common Properties) required to decode the Data Record. | | NO | | 142 | | 9.3 If the Template Records or other definitions have not been received at the time Data Records are received, the Collecting Process MAY store the Data Records for a short period of time and decode them after the Template Records or other definitions are received | | | NO | 143 | 144 | ### 10. Transport protocol 145 | 146 | | Requirement |MUST |SHOULD| MAY| 147 | |---------------------------------------|-----|-----|-----| 148 | | 10. A Collecting Process MUST be able to handle IPFIX Message lengths of up to 65535 octets. | YES (LS>v5.1)| | | 149 | |10. Transport Session state MUST NOT be migrated by an Exporting Process or Collecting Process among Transport Sessions using different transport protocols between the same Exporting Process and Collecting Process pair | NO | | | 150 | |10.1 SCTP [RFC4960] using the Partially Reliable SCTP (PR-SCTP) extension as specified in [RFC3758] MUST be implemented by all compliant implementations. | NO | | | 151 | |10.1 UDP [UDP] MAY also be implemented by compliant implementations | | | YES | 152 | |10.1 TCP [TCP] MAY also be implemented by compliant implementations. | | | YES | 153 | |10.1 It MUST be possible to configure both the Exporting and Collecting Processes to use different ports than the default. | YES | | | 154 | | 10.1 By default, the Collecting Process listens for secure connections on SCTP, TCP, and/or UDP port 4740 | | | NO | 155 | | 10.2.4 When a Collecting Process no longer wants to receive IPFIX Messages, it SHOULD shut down its end of the association. The Collecting Process SHOULD continue to receive and process IPFIX Messages until the Exporting Process has closed its end of the association. | | NO | | 156 | |10.2.4 When a Collecting Process detects that the SCTP association has been abnormally terminated, it MUST continue to listen for a new association establishment. | NO | | | 157 | | 10.2.4 When an Exporting Process detects that the SCTP association to the Collecting Process is abnormally terminated, it SHOULD try to re-establish the association. | | NO | | 158 | | 10.3 UDP MAY be used in deployments where Exporters and Collectors always communicate over dedicated links that are not susceptible to congestion | | | YES | 159 | | 10.3.2 UDP MUST NOT be used unless the application can tolerate some loss of IPFIX Messages. | | | | 160 | | 10.4 When a Collecting Process detects that the TCP connection to the Exporting Process has terminated abnormally, it MUST continue to listen for a new connection. | YES | | | 161 | |10.4 When a Collecting Process no longer wants to receive IPFIX Messages, it SHOULD close its end of the connection. The Collecting Process SHOULD continue to read IPFIX Messages until the Exporting Process has closed its end. | | YES | | 162 | 163 | ### 11. Security Considerations 164 | 165 | | Requirement |MUST |SHOULD| MAY| 166 | |---------------------------------------|-----|-----|-----| 167 | | 11. IPFIX Exporting Processes and Collecting Processes using UDP or SCTP MUST support DTLS version 1.0 and SHOULD support DTLS version 1.2 [RFC6347], including the mandatory ciphersuite(s) specified in each version. | NO | | | 168 | | 11. Exporting and Collecting Processes MUST NOT request, offer, or use any version of the Secure Socket Layer (SSL), or any version of TLS prior to 1.1, due to known security vulnerabilities in prior versions of TLS| NO | | | 11.3 When using TLS or DTLS, IPFIX Exporting Processes and IPFIX Collecting Processes SHOULD be identified by a certificate containing the DNS-ID | | NO | | 169 | | 11.3 The inclusion of Common Names (CN-IDs) in certificates identifying IPFIX Exporting Processes or Collecting Processes is NOT RECOMMENDED. | | NO | | 170 | |11.3 To prevent man-in-the-middle attacks from impostor Exporting or Collecting Processes, the acceptance of data from an unauthorized Exporting Process, or the export of data to an unauthorized Collecting Process, mutual authentication MUST be used for both TLS and DTLS. | NO | | | 171 | | 11.3 Collecting Processes MUST verify the reference identifiers of the Exporting Processes from which they are receiving IPFIX Messages against those stored in the certificates | NO | | | 172 | | 11.3 Collecting Processes MUST NOT accept IPFIX Messages from non-verified Exporting Processes. | NO | | | 173 | | 11.3 Exporting Processes and Collecting Processes MUST support the verification of certificates against an explicitly authorized list of peer certificates identified by Common Name and SHOULD support the verification of reference identifiers by matching the DNS-ID or CN-ID with a DNS lookup of the peer. | NO | | | 174 | | 11.3 IPFIX Exporting Processes and Collecting Processes MUST use non-NULL ciphersuites for authentication, integrity, and confidentiality. | NO | | | 175 | | 11.4 Collector rate limiting SHOULD be used to protect TLS and DTLS| |NO | | 176 | | 11.4 SYN cookies SHOULD be used by any Collecting Process accepting TCP connections. | | YES | | 177 | | 11.4 These rate and state limits MAY be provided by a Collecting Process, and if provided, the limits SHOULD be user configurable. | | | NO | 178 | | 11.5 IPFIX Message traffic transported via UDP and not secured via DTLS SHOULD be protected via segregation to a dedicated network. | | | | 179 | | 11.6 IPFIX Collecting Processes MUST detect potential IPFIX Message insertion or loss conditions by tracking the IPFIX Sequence Number and SHOULD provide a logging mechanism for reporting out-of-sequence messages. | NO | | | 180 | | 11.6 IPFIX Exporting and Collecting Processes SHOULD log any connection attempt that fails due to authentication failure | | NO | | 181 | | 11.6 IPFIX Exporting and Collecting Processes SHOULD detect and log any SCTP association reset or TCP connection reset. | | NO | | 182 | | 11.7 As IPFIX uses length-prefix encodings, Collector implementors should take care to ensure the detection of inconsistent values that could impact IPFIX Message decoding, and proper operation in the presence of such inconsistent values. | | | YES | 183 | | 11.7 Specifically, IPFIX Message, Set, and variable-length Information Element lengths must be checked for consistency to avoid buffer-sizing vulnerabilities. | | | YES | 184 | 185 | 186 | ## RFC7012 Information Elements support details 187 | 188 | IE 1-433 are supported 189 | 190 | These are not yet supported: 191 | 192 | |id | name | data type 193 | |---|---------------------|------------------------- 194 | |434|mibObjectValueInteger|signed32 195 | |435|mibObjectValueOctetString|octetArray 196 | |436|mibObjectValueOID|octetArray 197 | |437|mibObjectValueBits|octetArray 198 | |438|mibObjectValueIPAddress|ipv4Address 199 | |439|mibObjectValueCounter|unsigned64 200 | |440|mibObjectValueGauge|unsigned32 201 | |441|mibObjectValueTimeTicks|unsigned32 202 | |442|mibObjectValueUnsigned|unsigned32 203 | |443|mibObjectValueTable|subTemplateList 204 | |444|mibObjectValueRow|subTemplateList 205 | |445|mibObjectIdentifier|octetArray 206 | |446|mibSubIdentifier|unsigned32 207 | |447|mibIndexIndicator|unsigned64 208 | |448|mibCaptureTimeSemantics|unsigned8 209 | |449|mibContextEngineID|octetArray 210 | |450|mibContextName|string 211 | |451|mibObjectName|string 212 | |452|mibObjectDescription|string 213 | |453|mibObjectSyntax|string 214 | |454|mibModuleName|string 215 | |455|mobileIMSI|string 216 | |456|mobileMSISDN|string 217 | |457|httpStatusCode|unsigned16 218 | |458|sourceTransportPortsLimit|unsigned16 219 | |459|httpRequestMethod|string 220 | |460|httpRequestHost|string 221 | |461|httpRequestTarget|string 222 | |462|httpMessageVersion|string 223 | |463|natInstanceID|unsigned32 224 | |464|internalAddressRealm|octetArray 225 | |465|externalAddressRealm|octetArray 226 | |466|natQuotaExceededEvent|unsigned32 227 | |467|natThresholdEvent|unsigned32 228 | |468|httpUserAgent|string 229 | |469|httpContentType|string 230 | |470|httpReasonPhrase|string 231 | 232 | 233 | -------------------------------------------------------------------------------- /lib/logstash/codecs/netflow/netflow.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 0: 3 | - :skip 4 | 1: 5 | - 4 6 | - :in_bytes 7 | 2: 8 | - 4 9 | - :in_pkts 10 | 3: 11 | - 4 12 | - :flows 13 | 4: 14 | - :uint8 15 | - :protocol 16 | 5: 17 | - :uint8 18 | - :src_tos 19 | 6: 20 | - :uint8 21 | - :tcp_flags 22 | 7: 23 | - :uint16 24 | - :l4_src_port 25 | 8: 26 | - :ip4_addr 27 | - :ipv4_src_addr 28 | 9: 29 | - :uint8 30 | - :src_mask 31 | 10: 32 | - 2 33 | - :input_snmp 34 | 11: 35 | - :uint16 36 | - :l4_dst_port 37 | 12: 38 | - :ip4_addr 39 | - :ipv4_dst_addr 40 | 13: 41 | - :uint8 42 | - :dst_mask 43 | 14: 44 | - 2 45 | - :output_snmp 46 | 15: 47 | - :ip4_addr 48 | - :ipv4_next_hop 49 | 16: 50 | - 2 51 | - :src_as 52 | 17: 53 | - 2 54 | - :dst_as 55 | 18: 56 | - :ip4_addr 57 | - :bgp_ipv4_next_hop 58 | 19: 59 | - 4 60 | - :mul_dst_pkts 61 | 20: 62 | - 4 63 | - :mul_dst_bytes 64 | 21: 65 | - :uint32 66 | - :last_switched 67 | 22: 68 | - :uint32 69 | - :first_switched 70 | 23: 71 | - 4 72 | - :out_bytes 73 | 24: 74 | - 4 75 | - :out_pkts 76 | 25: 77 | - :uint16 78 | - :min_pkt_length 79 | 26: 80 | - :uint16 81 | - :max_pkt_length 82 | 27: 83 | - :ip6_addr 84 | - :ipv6_src_addr 85 | 28: 86 | - :ip6_addr 87 | - :ipv6_dst_addr 88 | 29: 89 | - :uint8 90 | - :ipv6_src_mask 91 | 30: 92 | - :uint8 93 | - :ipv6_dst_mask 94 | 31: 95 | - :uint24 96 | - :ipv6_flow_label 97 | 32: 98 | - :uint16 99 | - :icmp_type 100 | 33: 101 | - :uint8 102 | - :mul_igmp_type 103 | 34: 104 | - :uint32 105 | - :sampling_interval 106 | 35: 107 | - :uint8 108 | - :sampling_algorithm 109 | 36: 110 | - :uint16 111 | - :flow_active_timeout 112 | 37: 113 | - :uint16 114 | - :flow_inactive_timeout 115 | 38: 116 | - :uint8 117 | - :engine_type 118 | 39: 119 | - :uint8 120 | - :engine_id 121 | 40: 122 | - 4 123 | - :total_bytes_exp 124 | 41: 125 | - 4 126 | - :total_pkts_exp 127 | 42: 128 | - 4 129 | - :total_flows_exp 130 | 43: 131 | - :skip 132 | 44: 133 | - :ip4_addr 134 | - :ipv4_src_prefix 135 | 45: 136 | - :ip4_addr 137 | - :ipv4_dst_prefix 138 | 46: 139 | - :uint8 140 | - :mpls_top_label_type 141 | 47: 142 | - :uint32 143 | - :mpls_top_label_ip_addr 144 | 48: 145 | - 4 146 | - :flow_sampler_id 147 | 49: 148 | - :uint8 149 | - :flow_sampler_mode 150 | 50: 151 | - :uint32 152 | - :flow_sampler_random_interval 153 | 51: 154 | - :skip 155 | 52: 156 | - :uint8 157 | - :min_ttl 158 | 53: 159 | - :uint8 160 | - :max_ttl 161 | 54: 162 | - :uint16 163 | - :ipv4_ident 164 | 55: 165 | - :uint8 166 | - :dst_tos 167 | 56: 168 | - :mac_addr 169 | - :in_src_mac 170 | 57: 171 | - :mac_addr 172 | - :out_dst_mac 173 | 58: 174 | - :uint16 175 | - :src_vlan 176 | 59: 177 | - :uint16 178 | - :dst_vlan 179 | 60: 180 | - :uint8 181 | - :ip_protocol_version 182 | 61: 183 | - :uint8 184 | - :direction 185 | 62: 186 | - :ip6_addr 187 | - :ipv6_next_hop 188 | 63: 189 | - :ip6_addr 190 | - :bgp_ipv6_next_hop 191 | 64: 192 | - :uint32 193 | - :ipv6_option_headers 194 | 65: 195 | - :skip 196 | 66: 197 | - :skip 198 | 67: 199 | - :skip 200 | 68: 201 | - :skip 202 | 69: 203 | - :skip 204 | 80: 205 | - :mac_addr 206 | - :in_dst_mac 207 | 81: 208 | - :mac_addr 209 | - :out_src_mac 210 | 82: 211 | - :string 212 | - :if_name 213 | 83: 214 | - :string 215 | - :if_desc 216 | 84: 217 | - :string 218 | - :sampler_name 219 | 85: 220 | - :uint32 221 | - :in_permanent_bytes 222 | 86: 223 | - :uint32 224 | - :in_permanent_pkts 225 | 89: 226 | - :forwarding_status 227 | - :forwarding_status 228 | 92: 229 | - :uint32 230 | - :src_traffic_index 231 | 93: 232 | - :uint32 233 | - :dst_traffic_index 234 | 94: 235 | - :string 236 | - :application_description 237 | 95: 238 | - :application_id 239 | - :application_id 240 | 96: 241 | - :string 242 | - :application_name 243 | 98: 244 | - :uint8 245 | - :postIpDiffServCodePoint 246 | 99: 247 | - :uint32 248 | - :multicastReplicationFactor 249 | 101: 250 | - :uint8 251 | - :classificationEngineId 252 | 128: 253 | - :uint32 254 | - :bgpNextAdjacentAsNumber 255 | 129: 256 | - :uint32 257 | - :bgpPrevAdjacentAsNumber 258 | 130: 259 | - :ip4_addr 260 | - :exporterIPv4Address 261 | 131: 262 | - :ip6_addr 263 | - :exporterIPv6Address 264 | 132: 265 | - :uint64 266 | - :droppedOctetDeltaCount 267 | 133: 268 | - :uint64 269 | - :droppedPacketDeltaCount 270 | 134: 271 | - :uint64 272 | - :droppedOctetTotalCount 273 | 135: 274 | - :uint64 275 | - :droppedPacketTotalCount 276 | 136: 277 | - :uint8 278 | - :flow_end_reason 279 | 137: 280 | - :uint64 281 | - :commonPropertiesId 282 | 138: 283 | - :uint64 284 | - :observationPointId 285 | 139: 286 | - :uint16 287 | - :icmpTypeCodeIPv6 288 | 140: 289 | - :ip6_addr 290 | - :mplsTopLabelIPv6Address 291 | 141: 292 | - :uint32 293 | - :lineCardId 294 | 142: 295 | - :uint32 296 | - :portId 297 | 143: 298 | - :uint32 299 | - :meteringProcessId 300 | 144: 301 | - :uint32 302 | - :exportingProcessId 303 | 145: 304 | - :uint16 305 | - :templateId 306 | 146: 307 | - :uint8 308 | - :wlanChannelId 309 | 147: 310 | - :string 311 | - :wlanSSID 312 | 148: 313 | - :uint32 314 | - :conn_id 315 | 149: 316 | - :uint32 317 | - :observationDomainId 318 | 150: 319 | - :uint32 320 | - :flowStartSeconds 321 | 151: 322 | - :uint32 323 | - :flowEndSeconds 324 | 152: 325 | - 8 326 | - :flow_start_msec 327 | 153: 328 | - 8 329 | - :flow_end_msec 330 | 154: 331 | - :uint64 332 | - :flowStartMicroseconds 333 | 155: 334 | - :uint64 335 | - :flowEndMicroseconds 336 | 156: 337 | - :uint64 338 | - :flowStartNanoseconds 339 | 157: 340 | - :uint64 341 | - :flowEndNanoseconds 342 | 158: 343 | - :uint32 344 | - :flowStartDeltaMicroseconds 345 | 159: 346 | - :uint32 347 | - :flowEndDeltaMicroseconds 348 | 160: 349 | - :uint64 350 | - :systemInitTimeMilliseconds 351 | 161: 352 | - :uint32 353 | - :flowDurationMilliseconds 354 | 162: 355 | - :uint32 356 | - :flowDurationMicroseconds 357 | 163: 358 | - :uint64 359 | - :observedFlowTotalCount 360 | 164: 361 | - :uint64 362 | - :ignoredPacketTotalCount 363 | 165: 364 | - :uint64 365 | - :ignoredOctetTotalCount 366 | 166: 367 | - :uint64 368 | - :notSentFlowTotalCount 369 | 167: 370 | - :uint64 371 | - :notSentPacketTotalCount 372 | 168: 373 | - :uint64 374 | - :notSentOctetTotalCount 375 | 169: 376 | - :ip6_addr 377 | - :destinationIPv6Prefix 378 | 170: 379 | - :ip6_addr 380 | - :sourceIPv6Prefix 381 | 171: 382 | - :uint64 383 | - :postOctetTotalCount 384 | 172: 385 | - :uint64 386 | - :postPacketTotalCount 387 | 173: 388 | - :uint64 389 | - :flowKeyIndicator 390 | 174: 391 | - :uint64 392 | - :postMCastPacketTotalCount 393 | 175: 394 | - :uint64 395 | - :postMCastOctetTotalCount 396 | 176: 397 | - :uint8 398 | - :icmp_type 399 | 177: 400 | - :uint8 401 | - :icmp_code 402 | 178: 403 | - :uint8 404 | - :icmp_type_ipv6 405 | 179: 406 | - :uint8 407 | - :icmp_code_ipv6 408 | 180: 409 | - :uint16 410 | - :udp_src_port 411 | 181: 412 | - :uint16 413 | - :udp_dst_port 414 | 182: 415 | - :uint16 416 | - :tcp_src_port 417 | 183: 418 | - :uint16 419 | - :tcp_dst_port 420 | 183: 421 | - :uint16 422 | - :tcpDestinationPort 423 | 184: 424 | - :uint32 425 | - :tcpSequenceNumber 426 | 185: 427 | - :uint32 428 | - :tcpAcknowledgementNumber 429 | 186: 430 | - :uint16 431 | - :tcpWindowSize 432 | 187: 433 | - :uint16 434 | - :tcpUrgentPointer 435 | 188: 436 | - :uint8 437 | - :tcpHeaderLength 438 | 189: 439 | - :uint8 440 | - :ipHeaderLength 441 | 190: 442 | - :uint16 443 | - :totalLengthIPv4 444 | 191: 445 | - :uint16 446 | - :payloadLengthIPv6 447 | 192: 448 | - :uint8 449 | - :ipTTL 450 | 193: 451 | - :uint8 452 | - :nextHeaderIPv6 453 | 194: 454 | - :uint8 455 | - :ip_tos 456 | 195: 457 | - :uint8 458 | - :ip_dscp 459 | 196: 460 | - :uint8 461 | - :ipPrecedence 462 | 197: 463 | - :uint8 464 | - :fragmentFlags 465 | 198: 466 | - :uint64 467 | - :octetDeltaSumOfSquares 468 | 199: 469 | - :uint64 470 | - :octetTotalSumOfSquares 471 | 200: 472 | - :uint8 473 | - :mplsTopLabelTTL 474 | 201: 475 | - mpls_label_stack_octets 476 | - mpls_label_stack_octets 477 | 202: 478 | - :uint32 479 | - :mplsLabelStackDepth 480 | 203: 481 | - :uint8 482 | - :mplsTopLabelExp 483 | 204: 484 | - :uint32 485 | - :ipPayloadLength 486 | 205: 487 | - :uint16 488 | - :udpMessageLength 489 | 206: 490 | - :uint8 491 | - :isMulticast 492 | 207: 493 | - :uint8 494 | - :ipv4IHL 495 | 208: 496 | - :uint32 497 | - :ipv4Options 498 | 209: 499 | - :uint64 500 | - :tcpOptions 501 | 210: 502 | - :skip 503 | 211: 504 | - :ip4_addr 505 | - :collectorIPv4Address 506 | 212: 507 | - :ip6_addr 508 | - :collectorIPv6Address 509 | 213: 510 | - :uint32 511 | - :exportInterface 512 | 214: 513 | - :uint8 514 | - :exportProtocolVersion 515 | 215: 516 | - :uint8 517 | - :exportTransportProtocol 518 | 216: 519 | - :uint16 520 | - :collectorTransportPort 521 | 217: 522 | - :uint16 523 | - :exporterTransportPort 524 | 218: 525 | - :uint64 526 | - :tcpSynTotalCount 527 | 219: 528 | - :uint64 529 | - :tcpFinTotalCount 530 | 220: 531 | - :uint64 532 | - :tcpRstTotalCount 533 | 221: 534 | - :uint64 535 | - :tcpPshTotalCount 536 | 222: 537 | - :uint64 538 | - :tcpAckTotalCount 539 | 223: 540 | - :uint64 541 | - :tcpUrgTotalCount 542 | 224: 543 | - :uint64 544 | - :ipTotalLength 545 | 225: 546 | - :ip4_addr 547 | - :xlate_src_addr_ipv4 548 | 226: 549 | - :ip4_addr 550 | - :xlate_dst_addr_ipv4 551 | 227: 552 | - :uint16 553 | - :xlate_src_port 554 | 228: 555 | - :uint16 556 | - :xlate_dst_port 557 | 229: 558 | - :uint8 559 | - :natOriginatingAddressRealm 560 | 230: 561 | - :uint8 562 | - :natEvent 563 | 231: 564 | - :uint64 565 | - :fwd_flow_delta_bytes 566 | 232: 567 | - :uint64 568 | - :rev_flow_delta_bytes 569 | 233: 570 | - :uint8 571 | - :fw_event 572 | 234: 573 | - :uint32 574 | - :ingressVRFID 575 | 235: 576 | - :uint32 577 | - :egressVRFID 578 | 236: 579 | - :string 580 | - :VRFname 581 | 237: 582 | - :uint8 583 | - :postMplsTopLabelExp 584 | 238: 585 | - :uint16 586 | - :tcpWindowScale 587 | 239: 588 | - :uint8 589 | - :biflowDirection 590 | 240: 591 | - :uint8 592 | - :ethernetHeaderLength 593 | 241: 594 | - :uint16 595 | - :ethernetPayloadLength 596 | 242: 597 | - :uint16 598 | - :ethernetTotalLength 599 | 243: 600 | - :uint16 601 | - :dot1qVlanId 602 | 244: 603 | - :uint8 604 | - :dot1qPriority 605 | 245: 606 | - :uint16 607 | - :dot1qCustomerVlanId 608 | 246: 609 | - :uint8 610 | - :dot1qCustomerPriority 611 | 247: 612 | - :string 613 | - :metroEvcId 614 | 248: 615 | - :uint8 616 | - :metroEvcType 617 | 249: 618 | - :uint32 619 | - :pseudoWireId 620 | 250: 621 | - :uint16 622 | - :pseudoWireType 623 | 251: 624 | - :uint32 625 | - :pseudoWireControlWord 626 | 252: 627 | - :uint32 628 | - :ingressPhysicalInterface 629 | 253: 630 | - :uint32 631 | - :egressPhysicalInterface 632 | 254: 633 | - :uint16 634 | - :postDot1qVlanId 635 | 255: 636 | - :uint16 637 | - :postDot1qCustomerVlanId 638 | 256: 639 | - :uint16 640 | - :ethernetType 641 | 257: 642 | - :uint8 643 | - :postIpPrecedence 644 | 258: 645 | - :uint64 646 | - :collectionTimeMilliseconds 647 | 259: 648 | - :uint16 649 | - :exportSctpStreamId 650 | 260: 651 | - :uint32 652 | - :maxExportSeconds 653 | 261: 654 | - :uint32 655 | - :maxFlowEndSeconds 656 | 262: 657 | - :string 658 | - :messageMD5Checksum 659 | 263: 660 | - :uint8 661 | - :messageScope 662 | 264: 663 | - :uint32 664 | - :minExportSeconds 665 | 265: 666 | - :uint32 667 | - :minFlowStartSeconds 668 | 266: 669 | - :string 670 | - :opaqueOctets 671 | 267: 672 | - :uint8 673 | - :sessionScope 674 | 268: 675 | - :uint64 676 | - :maxFlowEndMicroseconds 677 | 269: 678 | - :uint64 679 | - :maxFlowEndMilliseconds 680 | 270: 681 | - :uint64 682 | - :maxFlowEndNanoseconds 683 | 271: 684 | - :uint64 685 | - :minFlowStartMicroseconds 686 | 272: 687 | - :uint64 688 | - :minFlowStartMilliseconds 689 | 273: 690 | - :uint64 691 | - :minFlowStartNanoseconds 692 | 274: 693 | - :string 694 | - :collectorCertificate 695 | 275: 696 | - :string 697 | - :exporterCertificate 698 | 276: 699 | - :uint8 700 | - :dataRecordsReliability 701 | 277: 702 | - :uint8 703 | - :observationPointType 704 | 278: 705 | - :uint32 706 | - :newConnectionDeltaCount 707 | 279: 708 | - :uint64 709 | - :connectionSumDurationSeconds 710 | 280: 711 | - :uint64 712 | - :connectionTransactionId 713 | 281: 714 | - :ip6_addr 715 | - :xlate_src_addr_ipv6 716 | 282: 717 | - :ip6_addr 718 | - :xlate_dst_addr_ipv6 719 | 283: 720 | - :uint32 721 | - :natPoolId 722 | 284: 723 | - :string 724 | - :natPoolName 725 | 285: 726 | - :uint16 727 | - :anonymizationFlags 728 | 286: 729 | - :uint16 730 | - :anonymizationTechnique 731 | 287: 732 | - :uint16 733 | - :informationElementIndex 734 | 288: 735 | - :string 736 | - :p2pTechnology 737 | 289: 738 | - :string 739 | - :tunnelTechnology 740 | 290: 741 | - :string 742 | - :encryptedTechnology 743 | 291: 744 | - :skip 745 | 292: 746 | - :skip 747 | 293: 748 | - :skip 749 | 294: 750 | - :uint8 751 | - :bgpValidityState 752 | 295: 753 | - :uint32 754 | - :IPSecSPI 755 | 296: 756 | - :uint32 757 | - :greKey 758 | 297: 759 | - :uint8 760 | - :natType 761 | 298: 762 | - :uint64 763 | - :initiatorPackets 764 | 299: 765 | - :uint64 766 | - :responderPackets 767 | 300: 768 | - :string 769 | - :observationDomainName 770 | 301: 771 | - :uint64 772 | - :selectionSequenceId 773 | 302: 774 | - :uint64 775 | - :selectorId 776 | 303: 777 | - :uint16 778 | - :informationElementId 779 | 304: 780 | - :uint16 781 | - :selectorAlgorithm 782 | 305: 783 | - :uint32 784 | - :samplingPacketInterval 785 | 306: 786 | - :uint32 787 | - :samplingPacketSpace 788 | 307: 789 | - :uint32 790 | - :samplingTimeInterval 791 | 308: 792 | - :uint32 793 | - :samplingTimeSpace 794 | 309: 795 | - :uint32 796 | - :samplingSize 797 | 310: 798 | - :uint32 799 | - :samplingPopulation 800 | 311: 801 | - :double 802 | - :samplingProbability 803 | 312: 804 | - :uint16 805 | - :dataLinkFrameSize 806 | 313: 807 | - :string 808 | - :ipHeaderPacketSection 809 | 314: 810 | - :string 811 | - :ipPayloadPacketSection 812 | 315: 813 | - :string 814 | - :dataLinkFrameSection 815 | 316: 816 | - :string 817 | - :mplsLabelStackSection 818 | 317: 819 | - :string 820 | - :mplsPayloadPacketSection 821 | 318: 822 | - :uint64 823 | - :selectorIdTotalPktsObserved 824 | 319: 825 | - :uint64 826 | - :selectorIdTotalPktsSelected 827 | 320: 828 | - :double 829 | - :absoluteError 830 | 321: 831 | - :double 832 | - :relativeError 833 | 322: 834 | - :uint32 835 | - :observationTimeSeconds 836 | 323: 837 | - 8 838 | - :event_time_msec 839 | 324: 840 | - :uint64 841 | - :observationTimeMicroseconds 842 | 325: 843 | - :uint64 844 | - :observationTimeNanoseconds 845 | 326: 846 | - :uint64 847 | - :digestHashValue 848 | 327: 849 | - :uint64 850 | - :hashIPPayloadOffset 851 | 328: 852 | - :uint64 853 | - :hashIPPayloadSize 854 | 329: 855 | - :uint64 856 | - :hashOutputRangeMin 857 | 330: 858 | - :uint64 859 | - :hashOutputRangeMax 860 | 331: 861 | - :uint64 862 | - :hashSelectedRangeMin 863 | 332: 864 | - :uint64 865 | - :hashSelectedRangeMax 866 | 333: 867 | - :uint8 868 | - :hashDigestOutput 869 | 334: 870 | - :uint64 871 | - :hashInitialiserValue 872 | 335: 873 | - :string 874 | - :selectorName 875 | 336: 876 | - :double 877 | - :upperCILimit 878 | 337: 879 | - :double 880 | - :lowerCILimit 881 | 338: 882 | - :double 883 | - :confidenceLevel 884 | 339: 885 | - :uint8 886 | - :informationElementDataType 887 | 340: 888 | - :string 889 | - :informationElementDescription 890 | 341: 891 | - :string 892 | - :informationElementName 893 | 342: 894 | - :uint64 895 | - :informationElementRangeBegin 896 | 343: 897 | - :uint64 898 | - :informationElementRangeEnd 899 | 344: 900 | - :uint8 901 | - :informationElementSemantics 902 | 345: 903 | - :uint16 904 | - :informationElementUnits 905 | 346: 906 | - :uint32 907 | - :privateEnterpriseNumber 908 | 347: 909 | - :string 910 | - :virtualStationInterfaceId 911 | 348: 912 | - :string 913 | - :virtualStationInterfaceName 914 | 349: 915 | - :string 916 | - :virtualStationUUID 917 | 350: 918 | - :string 919 | - :virtualStationName 920 | 351: 921 | - :uint64 922 | - :layer2SegmentId 923 | 352: 924 | - :uint64 925 | - :layer2OctetDeltaCount 926 | 353: 927 | - :uint64 928 | - :layer2OctetTotalCount 929 | 354: 930 | - :uint64 931 | - :ingressUnicastPacketTotalCount 932 | 355: 933 | - :uint64 934 | - :ingressMulticastPacketTotalCount 935 | 356: 936 | - :uint64 937 | - :ingressBroadcastPacketTotalCount 938 | 357: 939 | - :uint64 940 | - :egressUnicastPacketTotalCount 941 | 358: 942 | - :uint64 943 | - :egressBroadcastPacketTotalCount 944 | 359: 945 | - :uint64 946 | - :monitoringIntervalStartMilliSeconds 947 | 360: 948 | - :uint64 949 | - :monitoringIntervalEndMilliSeconds 950 | 361: 951 | - :uint16 952 | - :postNATPortBlockStart 953 | 362: 954 | - :uint16 955 | - :postNATPortBlockEnd 956 | 363: 957 | - :uint16 958 | - :portRangeStepSize 959 | 364: 960 | - :uint16 961 | - :portRangeNumPorts 962 | 365: 963 | - :mac_addr 964 | - :staMacAddress 965 | 366: 966 | - :ip4_addr 967 | - :staIPv4Address 968 | 367: 969 | - :mac_addr 970 | - :wtpMacAddress 971 | 368: 972 | - :uint32 973 | - :ingressInterfaceType 974 | 369: 975 | - :uint32 976 | - :egressInterfaceType 977 | 370: 978 | - :uint16 979 | - :rtpSequenceNumber 980 | 371: 981 | - :string 982 | - :userName 983 | 372: 984 | - :string 985 | - :applicationCategoryName 986 | 373: 987 | - :string 988 | - :applicationSubCategoryName 989 | 374: 990 | - :string 991 | - :applicationGroupName 992 | 375: 993 | - :uint64 994 | - :originalFlowsPresent 995 | 376: 996 | - :uint64 997 | - :originalFlowsInitiated 998 | 377: 999 | - :uint64 1000 | - :originalFlowsCompleted 1001 | 378: 1002 | - :uint64 1003 | - :distinctCountOfSourceIPAddress 1004 | 379: 1005 | - :uint64 1006 | - :distinctCountOfDestinationIPAddress 1007 | 380: 1008 | - :uint32 1009 | - :distinctCountOfSourceIPv4Address 1010 | 381: 1011 | - :uint32 1012 | - :distinctCountOfDestinationIPv4Address 1013 | 382: 1014 | - :uint64 1015 | - :distinctCountOfSourceIPv6Address 1016 | 383: 1017 | - :uint64 1018 | - :distinctCountOfDestinationIPv6Address 1019 | 384: 1020 | - :uint8 1021 | - :valueDistributionMethod 1022 | 385: 1023 | - :uint32 1024 | - :rfc3550JitterMilliseconds 1025 | 386: 1026 | - :uint32 1027 | - :rfc3550JitterMicroseconds 1028 | 387: 1029 | - :uint32 1030 | - :rfc3550JitterNanoseconds 1031 | 388: 1032 | - :uint8 1033 | - :dot1qDEI 1034 | 389: 1035 | - :uint8 1036 | - :dot1qCustomerDEI 1037 | 390: 1038 | - :uint16 1039 | - :flowSelectorAlgorithm 1040 | 391: 1041 | - :uint64 1042 | - :flowSelectedOctetDeltaCount 1043 | 392: 1044 | - :uint64 1045 | - :flowSelectedPacketDeltaCount 1046 | 393: 1047 | - :uint64 1048 | - :flowSelectedFlowDeltaCount 1049 | 394: 1050 | - :uint64 1051 | - :selectorIDTotalFlowsObserved 1052 | 395: 1053 | - :uint64 1054 | - :selectorIDTotalFlowsSelected 1055 | 396: 1056 | - :uint64 1057 | - :samplingFlowInterval 1058 | 397: 1059 | - :uint64 1060 | - :samplingFlowSpacing 1061 | 398: 1062 | - :uint64 1063 | - :flowSamplingTimeInterval 1064 | 399: 1065 | - :uint64 1066 | - :flowSamplingTimeSpacing 1067 | 400: 1068 | - :uint16 1069 | - :hashFlowDomain 1070 | 401: 1071 | - :uint64 1072 | - :transportOctetDeltaCount 1073 | 402: 1074 | - :uint64 1075 | - :transportPacketDeltaCount 1076 | 403: 1077 | - :ip4_addr 1078 | - :originalExporterIPv4Address 1079 | 404: 1080 | - :ip6_addr 1081 | - :originalExporterIPv6Address 1082 | 405: 1083 | - :uint32 1084 | - :originalObservationDomainId 1085 | 406: 1086 | - :uint32 1087 | - :intermediateProcessId 1088 | 407: 1089 | - :uint64 1090 | - :ignoredDataRecordTotalCount 1091 | 408: 1092 | - :uint16 1093 | - :dataLinkFrameType 1094 | 409: 1095 | - :uint16 1096 | - :sectionOffset 1097 | 410: 1098 | - :uint16 1099 | - :sectionExportedOctets 1100 | 411: 1101 | - :string 1102 | - :dot1qServiceInstanceTag 1103 | 412: 1104 | - :uint32 1105 | - :dot1qServiceInstanceId 1106 | 413: 1107 | - :uint8 1108 | - :dot1qServiceInstancePriority 1109 | 414: 1110 | - :mac_addr 1111 | - :dot1qCustomerSourceMacAddress 1112 | 415: 1113 | - :mac_addr 1114 | - :dot1qCustomerDestinationMacAddress 1115 | 417: 1116 | - :uint64 1117 | - :postLayer2OctetDeltaCount 1118 | 418: 1119 | - :uint64 1120 | - :postMCastLayer2OctetDeltaCount 1121 | 420: 1122 | - :uint64 1123 | - :postLayer2OctetTotalCount 1124 | 421: 1125 | - :uint64 1126 | - :postMCastLayer2OctetTotalCount 1127 | 422: 1128 | - :uint64 1129 | - :minimumLayer2TotalLength 1130 | 423: 1131 | - :uint64 1132 | - :maximumLayer2TotalLength 1133 | 424: 1134 | - :uint64 1135 | - :droppedLayer2OctetDeltaCount 1136 | 425: 1137 | - :uint64 1138 | - :droppedLayer2OctetTotalCount 1139 | 426: 1140 | - :uint64 1141 | - :ignoredLayer2OctetTotalCount 1142 | 427: 1143 | - :uint64 1144 | - :notSentLayer2OctetTotalCount 1145 | 428: 1146 | - :uint64 1147 | - :layer2OctetDeltaSumOfSquares 1148 | 429: 1149 | - :uint64 1150 | - :layer2OctetTotalSumOfSquares 1151 | 430: 1152 | - :uint64 1153 | - :layer2FrameDeltaCount 1154 | 431: 1155 | - :uint64 1156 | - :layer2FrameTotalCount 1157 | 432: 1158 | - :ip4_addr 1159 | - :pseudoWireDestinationIPv4Address 1160 | 433: 1161 | - :uint64 1162 | - :ignoredLayer2FrameTotalCount 1163 | 8192: 1164 | - :uint32 1165 | - :streamcore_wan_rtt 1166 | 8193: 1167 | - :uint32 1168 | - :streamcore_net_app_resp_time 1169 | 8194: 1170 | - :uint32 1171 | - :streamcore_total_app_resp_time 1172 | 8195: 1173 | - :uint16 1174 | - :streamcore_tcp_retrans_rate 1175 | 8196: 1176 | - :uint8 1177 | - :streamcore_call_direction 1178 | 8256: 1179 | - :string 1180 | - :streamcore_hostname 1181 | 8257: 1182 | - :string 1183 | - :streamcore_url 1184 | 8258: 1185 | - :string 1186 | - :streamcore_ssl_cn 1187 | 8259: 1188 | - :string 1189 | - :streamcore_ssl_org 1190 | 8320: 1191 | - :uint16 1192 | - :streamcore_mos_lq 1193 | 8321: 1194 | - :uint16 1195 | - :streamcore_net_delay 1196 | 8322: 1197 | - :uint16 1198 | - :streamcore_net_loss 1199 | 8323: 1200 | - :uint16 1201 | - :streamcore_net_jitter 1202 | 8324: 1203 | - :uint16 1204 | - :streamcore_net_discard 1205 | 8325: 1206 | - :uint8 1207 | - :streamcore_rtp_clockrate_in 1208 | 8326: 1209 | - :uint8 1210 | - :streamcore_rtp_clockrate_out 1211 | 8327: 1212 | - :uint8 1213 | - :streamcore_codec_in 1214 | 8328: 1215 | - :uint8 1216 | - :streamcore_codec_out 1217 | 8384: 1218 | - :uint32 1219 | - :streamcore_id_rule_1 1220 | 8385: 1221 | - :uint32 1222 | - :streamcore_id_rule_2 1223 | 8386: 1224 | - :uint32 1225 | - :streamcore_id_rule_3 1226 | 8387: 1227 | - :uint32 1228 | - :streamcore_id_rule_4 1229 | 8388: 1230 | - :uint32 1231 | - :streamcore_id_rule_5 1232 | 8389: 1233 | - :uint32 1234 | - :streamcore_id_rule_6 1235 | 8390: 1236 | - :uint32 1237 | - :streamcore_id_rule_7 1238 | 8391: 1239 | - :uint32 1240 | - :streamcore_id_rule_8 1241 | 8392: 1242 | - :uint32 1243 | - :streamcore_id_rule_9 1244 | 8393: 1245 | - :uint32 1246 | - :streamcore_id_rule_10 1247 | 20000: 1248 | - :uint16 1249 | - :wlan_id 1250 | 33000: 1251 | - :acl_id_asa 1252 | - :ingress_acl_id 1253 | 33001: 1254 | - :acl_id_asa 1255 | - egress_acl_id 1256 | 33002: 1257 | - :uint16 1258 | - :fw_ext_event 1259 | 40000: 1260 | - :string 1261 | - :username 1262 | 40001: 1263 | - :ip4_addr 1264 | - :xlate_src_addr_ipv4 1265 | 40002: 1266 | - :ip4_addr 1267 | - :xlate_dst_addr_ipv4 1268 | 40003: 1269 | - :uint16 1270 | - :xlate_src_port 1271 | 40004: 1272 | - :uint16 1273 | - :xlate_dst_port 1274 | 40005: 1275 | - :uint8 1276 | - :fw_event 1277 | 56701: 1278 | - :string 1279 | - :app_id 1280 | 56702: 1281 | - :string 1282 | - :user_id 1283 | 57590: 1284 | - :uint16 1285 | - :nprobe_proto 1286 | 57591: 1287 | - :string 1288 | - :nprobe_proto_name 1289 | -------------------------------------------------------------------------------- /lib/logstash/codecs/netflow.rb: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | require "logstash/codecs/base" 3 | require "logstash/namespace" 4 | require "logstash/timestamp" 5 | #require "logstash/json" 6 | require "json" 7 | 8 | require 'logstash/plugin_mixins/event_support/event_factory_adapter' 9 | 10 | class LogStash::Codecs::Netflow < LogStash::Codecs::Base 11 | 12 | include LogStash::PluginMixins::EventSupport::EventFactoryAdapter 13 | 14 | config_name "netflow" 15 | 16 | # Netflow v9/v10 template cache TTL (minutes) 17 | config :cache_ttl, :validate => :number, :default => 4000 18 | 19 | # Where to save the template cache 20 | # This helps speed up processing when restarting logstash 21 | # (So you don't have to await the arrival of templates) 22 | # cache will save as path/netflow_templates.cache and/or path/ipfix_templates.cache 23 | config :cache_save_path, :validate => :path 24 | 25 | # Specify into what field you want the Netflow data. 26 | config :target, :validate => :string, :default => "netflow" 27 | 28 | # Only makes sense for ipfix, v9 already includes this 29 | # Setting to true will include the flowset_id in events 30 | # Allows you to work with sequences, for instance with the aggregate filter 31 | config :include_flowset_id, :validate => :boolean, :default => false 32 | 33 | # Specify which Netflow versions you will accept. 34 | config :versions, :validate => :array, :default => [5, 9, 10] 35 | 36 | # Override YAML file containing Netflow field definitions 37 | config :netflow_definitions, :validate => :path 38 | 39 | # Override YAML file containing IPFIX field definitions 40 | config :ipfix_definitions, :validate => :path 41 | 42 | NETFLOW5_FIELDS = ['version', 'flow_seq_num', 'engine_type', 'engine_id', 'sampling_algorithm', 'sampling_interval', 'flow_records'] 43 | NETFLOW9_FIELDS = ['version', 'flow_seq_num'] 44 | NETFLOW9_SCOPES = { 45 | 1 => :scope_system, 46 | 2 => :scope_interface, 47 | 3 => :scope_line_card, 48 | 4 => :scope_netflow_cache, 49 | 5 => :scope_template, 50 | } 51 | IPFIX_FIELDS = ['version'] 52 | SWITCHED = /_switched$/ 53 | FLOWSET_ID = "flowset_id" 54 | 55 | def initialize(params = {}) 56 | super(params) 57 | @threadsafe = true 58 | end 59 | 60 | def clone(*args) 61 | self 62 | end 63 | 64 | def register 65 | require "logstash/codecs/netflow/util" 66 | 67 | @netflow_templates = TemplateRegistry.new(logger, @cache_ttl, @cache_save_path && "#{@cache_save_path}/netflow_templates.cache") 68 | @ipfix_templates = TemplateRegistry.new(logger, @cache_ttl, @cache_save_path && "#{@cache_save_path}/ipfix_templates.cache") 69 | 70 | # Path to default Netflow v9 field definitions 71 | filename = ::File.expand_path('netflow/netflow.yaml', ::File.dirname(__FILE__)) 72 | @netflow_fields = load_definitions(filename, @netflow_definitions) 73 | 74 | # Path to default IPFIX field definitions 75 | filename = ::File.expand_path('netflow/ipfix.yaml', ::File.dirname(__FILE__)) 76 | @ipfix_fields = load_definitions(filename, @ipfix_definitions) 77 | end # def register 78 | 79 | def decode(payload, metadata = nil, &block) 80 | # BinData::trace_reading do 81 | header = Header.read(payload) 82 | 83 | unless @versions.include?(header.version) 84 | @logger.warn("Ignoring Netflow version v#{header.version}") 85 | return 86 | end 87 | 88 | if header.version == 5 89 | flowset = Netflow5PDU.read(payload) 90 | flowset.records.each do |record| 91 | yield(decode_netflow5(flowset, record)) 92 | end 93 | elsif header.version == 9 94 | # BinData::trace_reading do 95 | flowset = Netflow9PDU.read(payload) 96 | flowset.records.each do |record| 97 | if metadata != nil 98 | decode_netflow9(flowset, record, metadata).each{|event| yield(event)} 99 | else 100 | decode_netflow9(flowset, record).each{|event| yield(event)} 101 | end 102 | # end 103 | end 104 | elsif header.version == 10 105 | # BinData::trace_reading do 106 | flowset = IpfixPDU.read(payload) 107 | flowset.records.each do |record| 108 | decode_ipfix(flowset, record).each { |event| yield(event) } 109 | end 110 | # end 111 | else 112 | @logger.warn("Unsupported Netflow version v#{header.version}") 113 | end 114 | # end 115 | rescue BinData::ValidityError, IOError => e 116 | @logger.warn("Invalid netflow packet received (#{e})") 117 | end 118 | 119 | private 120 | 121 | def decode_netflow5(flowset, record) 122 | event = { 123 | LogStash::Event::TIMESTAMP => LogStash::Timestamp.at(flowset.unix_sec.snapshot, flowset.unix_nsec.snapshot / 1000), 124 | @target => {} 125 | } 126 | 127 | # Copy some of the pertinent fields in the header to the event 128 | NETFLOW5_FIELDS.each do |f| 129 | event[@target][f] = flowset[f].snapshot 130 | end 131 | 132 | # Create fields in the event from each field in the flow record 133 | record.each_pair do |k, v| 134 | case k.to_s 135 | when SWITCHED 136 | # The flow record sets the first and last times to the device 137 | # uptime in milliseconds. Given the actual uptime is provided 138 | # in the flowset header along with the epoch seconds we can 139 | # convert these into absolute times 140 | millis = flowset.uptime - v 141 | seconds = flowset.unix_sec - (millis / 1000) 142 | micros = (flowset.unix_nsec / 1000) - ((millis % 1000) * 1000) 143 | if micros < 0 144 | seconds-- 145 | micros += 1000000 146 | end 147 | event[@target][k.to_s] = LogStash::Timestamp.at(seconds, micros).to_iso8601 148 | else 149 | event[@target][k.to_s] = v.snapshot 150 | end 151 | end 152 | 153 | event_factory.new_event(event) 154 | rescue BinData::ValidityError, IOError => e 155 | @logger.warn("Invalid netflow packet received (#{e})") 156 | end 157 | 158 | def decode_netflow9(flowset, record, metadata = nil) 159 | events = [] 160 | 161 | # Check for block of trailing padding 162 | if record.flowset_length == 0 163 | return events 164 | end 165 | 166 | case record.flowset_id 167 | when 0..1 168 | # Template flowset 169 | record.flowset_data.templates.each do |template| 170 | catch (:field) do 171 | fields = [] 172 | template_length = 0 173 | # Template flowset (0) or Options template flowset (1) ? 174 | if record.flowset_id == 0 175 | @logger.debug? and @logger.debug("Start processing template") 176 | template.record_fields.each do |field| 177 | if field.field_length > 0 178 | entry = netflow_field_for(field.field_type, field.field_length, template.template_id) 179 | throw :field unless entry 180 | fields += entry 181 | template_length += field.field_length 182 | end 183 | end 184 | else 185 | @logger.debug? and @logger.debug("Start processing options template") 186 | template.scope_fields.each do |field| 187 | if field.field_length > 0 188 | fields << [uint_field(0, field.field_length), NETFLOW9_SCOPES[field.field_type]] 189 | end 190 | template_length += field.field_length 191 | end 192 | template.option_fields.each do |field| 193 | entry = netflow_field_for(field.field_type, field.field_length, template.template_id) 194 | throw :field unless entry 195 | fields += entry 196 | template_length += field.field_length 197 | end 198 | end 199 | # We get this far, we have a list of fields 200 | #key = "#{flowset.source_id}|#{event["source"]}|#{template.template_id}" 201 | if metadata != nil 202 | key = "#{flowset.source_id}|#{template.template_id}|#{metadata["host"]}|#{metadata["port"]}" 203 | else 204 | key = "#{flowset.source_id}|#{template.template_id}" 205 | end 206 | @netflow_templates.register(key, fields) do |bindata| 207 | @logger.debug("Received template #{template.template_id} with fields #{fields.inspect}") 208 | @logger.debug("Received template #{template.template_id} of size #{template_length} bytes. Representing in #{bindata.num_bytes} BinData bytes") 209 | if template_length != bindata.num_bytes 210 | @logger.warn("Received template #{template.template_id} of size #{template_length} bytes doesn't match BinData representation we built (#{bindata.num_bytes} bytes)") 211 | end 212 | end 213 | end 214 | end 215 | when 256..65535 216 | # Data flowset 217 | #key = "#{flowset.source_id}|#{event["source"]}|#{record.flowset_id}" 218 | @logger.debug? and @logger.debug("Start processing data flowset #{record.flowset_id}") 219 | if metadata != nil 220 | key = "#{flowset.source_id}|#{record.flowset_id}|#{metadata["host"]}|#{metadata["port"]}" 221 | else 222 | key = "#{flowset.source_id}|#{record.flowset_id}" 223 | end 224 | 225 | template = @netflow_templates.fetch(key) 226 | 227 | if !template 228 | @logger.warn("Can't (yet) decode flowset id #{record.flowset_id} from source id #{flowset.source_id}, because no template to decode it with has been received. This message will usually go away after 1 minute.") 229 | return events 230 | end 231 | 232 | length = record.flowset_length - 4 233 | 234 | # Template shouldn't be longer than the record 235 | # As fas as padding is concerned, the RFC defines a SHOULD for 4-word alignment 236 | # so we won't complain about that. 237 | if template.num_bytes != nil 238 | if template.num_bytes > length 239 | @logger.warn("Template length exceeds flowset length, skipping", :template_id => record.flowset_id, :template_length => template.num_bytes, :record_length => length) 240 | return events 241 | end 242 | end 243 | 244 | array = BinData::Array.new(:type => template, :initial_length => length / template.num_bytes) 245 | records = array.read(record.flowset_data) 246 | 247 | flowcounter = 1 248 | records.each do |r| 249 | @logger.debug? and @logger.debug("Start processing flow #{flowcounter} from data flowset id #{record.flowset_id}") 250 | event = { 251 | LogStash::Event::TIMESTAMP => LogStash::Timestamp.at(flowset.unix_sec), 252 | @target => {} 253 | } 254 | 255 | # Fewer fields in the v9 header 256 | NETFLOW9_FIELDS.each do |f| 257 | event[@target][f] = flowset[f].snapshot 258 | end 259 | 260 | event[@target][FLOWSET_ID] = record.flowset_id.snapshot 261 | 262 | r.each_pair do |k, v| 263 | case k.to_s 264 | when SWITCHED 265 | millis = flowset.uptime - v 266 | seconds = flowset.unix_sec - (millis / 1000) 267 | # v9 did away with the nanosecs field 268 | micros = 1000000 - ((millis % 1000) * 1000) 269 | event[@target][k.to_s] = LogStash::Timestamp.at(seconds, micros).to_iso8601 270 | else 271 | event[@target][k.to_s] = v.snapshot 272 | end 273 | end 274 | 275 | events << event_factory.new_event(event) 276 | flowcounter += 1 277 | end 278 | else 279 | @logger.warn("Unsupported flowset id #{record.flowset_id}") 280 | end 281 | 282 | events 283 | rescue BinData::ValidityError, IOError => e 284 | @logger.warn("Invalid netflow packet received (#{e})") 285 | end 286 | 287 | def decode_ipfix(flowset, record) 288 | events = [] 289 | 290 | case record.flowset_id 291 | when 2..3 292 | record.flowset_data.templates.each do |template| 293 | catch (:field) do 294 | fields = [] 295 | # Template flowset (2) or Options template flowset (3) ? 296 | template_fields = (record.flowset_id == 2) ? template.record_fields : (template.scope_fields.to_ary + template.option_fields.to_ary) 297 | template_fields.each do |field| 298 | field_type = field.field_type 299 | field_length = field.field_length 300 | enterprise_id = field.enterprise ? field.enterprise_id : 0 301 | 302 | entry = ipfix_field_for(field_type, enterprise_id, field.field_length) 303 | throw :field unless entry 304 | fields += entry 305 | end 306 | # FIXME Source IP address required in key 307 | key = "#{flowset.observation_domain_id}|#{template.template_id}" 308 | 309 | @ipfix_templates.register(key, fields) 310 | end 311 | end 312 | when 256..65535 313 | # Data flowset 314 | key = "#{flowset.observation_domain_id}|#{record.flowset_id}" 315 | template = @ipfix_templates.fetch(key) 316 | 317 | if !template 318 | @logger.warn("Can't (yet) decode flowset id #{record.flowset_id} from observation domain id #{flowset.observation_domain_id}, because no template to decode it with has been received. This message will usually go away after 1 minute.") 319 | return events 320 | end 321 | 322 | array = BinData::Array.new(:type => template, :read_until => :eof) 323 | records = array.read(record.flowset_data) 324 | 325 | records.each do |r| 326 | event = { 327 | LogStash::Event::TIMESTAMP => LogStash::Timestamp.at(flowset.unix_sec), 328 | @target => {} 329 | } 330 | 331 | IPFIX_FIELDS.each do |f| 332 | event[@target][f] = flowset[f].snapshot 333 | end 334 | 335 | if @include_flowset_id 336 | event[@target][FLOWSET_ID] = record.flowset_id.snapshot 337 | end 338 | 339 | r.each_pair do |k, v| 340 | case k.to_s 341 | when /^flow(?:Start|End)Seconds$/ 342 | event[@target][k.to_s] = LogStash::Timestamp.at(v.snapshot).to_iso8601 343 | when /^flow(?:Start|End)(Milli|Micro|Nano)seconds$/ 344 | case $1 345 | when 'Milli' 346 | secs = v.snapshot.to_i / 1000 347 | micros = (v.snapshot.to_i % 1000) * 1000 348 | # Use the 2 args Timestamp.at to avoid the precision under milliseconds. Doing math division (like /1000 on a float) 349 | # could introduce error in representation that makes 0.192 millis to be expressed like 0.192000001 nanoseconds, 350 | # so here we cut to millis, but there is a rounding when representing to to_iso8601, so 191998 micros becomes 351 | # 192 millis in LogStash 8 while in previous versions it appears truncated like 191. 352 | event[@target][k.to_s] = LogStash::Timestamp.at(secs, micros).to_iso8601 353 | when 'Micro', 'Nano' 354 | # For now we'll stick to assuming ntp timestamps, 355 | # Netscaler implementation may be buggy though: 356 | # https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=11047 357 | # This only affects the fraction though 358 | ntp_seconds = (v.snapshot >> 32) & 0xFFFFFFFF 359 | ntp_fraction = (v.snapshot & 0xFFFFFFFF).to_f / 2**32 360 | event[@target][k.to_s] = LogStash::Timestamp.at(Time.utc(1900,1,1).to_i + ntp_seconds, ntp_fraction * 1000000).to_iso8601 361 | end 362 | else 363 | event[@target][k.to_s] = v.snapshot 364 | end 365 | end 366 | 367 | events << event_factory.new_event(event) 368 | end 369 | else 370 | @logger.warn("Unsupported flowset id #{record.flowset_id}") 371 | end 372 | 373 | events 374 | rescue BinData::ValidityError => e 375 | @logger.warn("Invalid IPFIX packet received (#{e})") 376 | end 377 | 378 | def load_definitions(defaults, extra) 379 | begin 380 | fields = YAML.load_file(defaults) 381 | rescue Exception => e 382 | raise "#{self.class.name}: Bad syntax in definitions file #{defaults}" 383 | end 384 | 385 | # Allow the user to augment/override/rename the default fields 386 | if extra 387 | raise "#{self.class.name}: definitions file #{extra} does not exist" unless File.exists?(extra) 388 | begin 389 | fields.merge!(YAML.load_file(extra)) 390 | rescue Exception => e 391 | raise "#{self.class.name}: Bad syntax in definitions file #{extra}" 392 | end 393 | end 394 | 395 | fields 396 | end 397 | 398 | def uint_field(length, default) 399 | # If length is 4, return :uint32, etc. and use default if length is 0 400 | ("uint" + (((length > 0) ? length : default) * 8).to_s).to_sym 401 | end # def uint_field 402 | 403 | def skip_field(field, type, length) 404 | if length == 65535 405 | field[0] = :VarSkip 406 | else 407 | field += [nil, {:length => length.to_i}] 408 | end 409 | 410 | field 411 | end # def skip_field 412 | 413 | def string_field(field, type, length) 414 | if length == 65535 415 | field[0] = :VarString 416 | else 417 | field[0] = :string 418 | field += [{ :length => length.to_i, :trim_padding => true }] 419 | end 420 | 421 | field 422 | end # def string_field 423 | 424 | def get_rfc6759_application_id_class(field,length) 425 | case length 426 | when 2 427 | field[0] = :Application_Id16 428 | when 3 429 | field[0] = :Application_Id24 430 | when 4 431 | field[0] = :Application_Id32 432 | when 5 433 | field[0] = :Application_Id40 434 | when 7 435 | field[0] = :Application_Id56 436 | when 8 437 | field[0] = :Application_Id64 438 | when 9 439 | field[0] = :Application_Id72 440 | else 441 | @logger.warn("Unsupported application_id length encountered, skipping", :field => field, :length => length) 442 | nil 443 | end 444 | field[0] 445 | end 446 | 447 | def netflow_field_for(type, length, template_id) 448 | if @netflow_fields.include?(type) 449 | field = @netflow_fields[type].clone 450 | if field.is_a?(Array) 451 | 452 | field[0] = uint_field(length, field[0]) if field[0].is_a?(Integer) 453 | 454 | # Small bit of fixup for: 455 | # - skip or string field types where the length is dynamic 456 | # - uint(8|16|24|32|64} where we use the length as specified by the 457 | # template instead of the YAML (e.g. ipv6_flow_label is 3 bytes in 458 | # the YAML and Cisco doc, but Cisco ASR9k sends 4 bytes). 459 | # Another usecase is supporting reduced-size encoding as per RFC7011 6.2 460 | # - application_id where we use the length as specified by the 461 | # template and map it to custom types for handling. 462 | # 463 | case field[0] 464 | when :uint8 465 | field[0] = uint_field(length, field[0]) 466 | when :uint16 467 | if length>2 468 | @logger.warn("Reduced-size encoding for uint16 is larger than uint16", :field => field, :length => length) 469 | end 470 | field[0] = uint_field(length, field[0]) 471 | when :uint24 472 | field[0] = uint_field(length, field[0]) 473 | when :uint32 474 | if length>4 475 | @logger.warn("Reduced-size encoding for uint32 is larger than uint32", :field => field, :length => length) 476 | end 477 | field[0] = uint_field(length, field[0]) 478 | when :uint64 479 | if length>8 480 | @logger.warn("Reduced-size encoding for uint64 is larger than uint64", :field => field, :length => length) 481 | end 482 | field[0] = uint_field(length, field[0]) 483 | when :application_id 484 | field[0] = get_rfc6759_application_id_class(field,length) 485 | when :skip 486 | field += [nil, {:length => length.to_i}] 487 | when :string 488 | field = string_field(field, type, length.to_i) 489 | end 490 | 491 | @logger.debug? and @logger.debug("Field definition complete for template #{template_id}", :field => field) 492 | 493 | [field] 494 | else 495 | @logger.warn("Definition should be an array", :field => field) 496 | nil 497 | end 498 | else 499 | @logger.warn("Unsupported field in template #{template_id}", :type => type, :length => length) 500 | nil 501 | end 502 | end # def netflow_field_for 503 | 504 | def ipfix_field_for(type, enterprise, length) 505 | if @ipfix_fields.include?(enterprise) 506 | if @ipfix_fields[enterprise].include?(type) 507 | field = @ipfix_fields[enterprise][type].clone 508 | else 509 | @logger.warn("Unsupported enterprise field", :type => type, :enterprise => enterprise, :length => length) 510 | end 511 | else 512 | @logger.warn("Unsupported enterprise", :enterprise => enterprise) 513 | end 514 | 515 | return nil unless field 516 | 517 | if field.is_a?(Array) 518 | case field[0] 519 | when :skip 520 | field = skip_field(field, type, length.to_i) 521 | when :string 522 | field = string_field(field, type, length.to_i) 523 | when :octetarray 524 | field[0] = :OctetArray 525 | field += [{:initial_length => length.to_i}] 526 | when :uint64 527 | field[0] = uint_field(length, 8) 528 | when :uint32 529 | field[0] = uint_field(length, 4) 530 | when :uint16 531 | field[0] = uint_field(length, 2) 532 | when :application_id 533 | field[0] = get_rfc6759_application_id_class(field,length) 534 | end 535 | 536 | @logger.debug("Definition complete", :field => field) 537 | [field] 538 | else 539 | @logger.warn("Definition should be an array", :field => field) 540 | end 541 | end 542 | 543 | class TemplateRegistry 544 | ## 545 | # @param logger [Logger] 546 | # @param ttl [Integer] 547 | # @param file_path [String] (optional) 548 | def initialize(logger, ttl, file_path=nil) 549 | @logger = logger 550 | @ttl = Integer(ttl) 551 | @file_path = file_path 552 | 553 | @mutex = Mutex.new 554 | 555 | @bindata_struct_cache = Vash.new 556 | @bindata_spec_cache = Vash.new 557 | 558 | do_load unless file_path.nil? 559 | end 560 | 561 | ## 562 | # Register a Template by name using an array of type/name tuples. 563 | # 564 | # @param key [String]: the key under which to save this template 565 | # @param field_tuples [Array>]: an array of [type,name] tuples, e.g., ["uint32","fieldName"] 566 | # @return [BinData::Struct] 567 | # 568 | # If a block is given, the template is yielded to the block _before_ being saved in the cache. 569 | # 570 | # @yieldparam [BinData::Struct] 571 | # @yieldreturn [void] 572 | # @yieldthrow :invalid_template : if the template is deemed invalid within the block, throwing this symbol causes 573 | # the template to not be cached. 574 | # 575 | # @threadsafe 576 | def register(key, field_tuples, &block) 577 | @mutex.synchronize do 578 | do_register(key, field_tuples, &block) 579 | end 580 | end 581 | 582 | ## 583 | # Fetch a Template by name 584 | # 585 | # @param key [String] 586 | # @return [BinData::Struct] 587 | # 588 | # @threadsafe 589 | def fetch(key) 590 | @mutex.synchronize do 591 | do_fetch(key) 592 | end 593 | end 594 | 595 | ## 596 | # Force persist, potentially cleaning up elements from the file-based cache that have already been evicted from 597 | # the memory-based cache 598 | def persist() 599 | @mutex.synchronize do 600 | do_persist 601 | end 602 | end 603 | 604 | private 605 | attr_reader :logger 606 | attr_reader :file_path 607 | 608 | ## 609 | # @see `TemplateRegistry#register(String,Array<>)` 610 | # @api private 611 | def do_register(key, field_tuples) 612 | template = BinData::Struct.new(:fields => field_tuples, :endian => :big) 613 | 614 | catch(:invalid_template) do 615 | yield(template) if block_given? 616 | 617 | @bindata_spec_cache[key, @ttl] = field_tuples 618 | @bindata_struct_cache[key, @ttl] = template 619 | 620 | do_persist 621 | 622 | template 623 | end 624 | end 625 | 626 | ## 627 | # @api private 628 | def do_load 629 | unless File.exists?(file_path) 630 | logger.warn('Template Cache does not exist', :file_path => file_path) 631 | return 632 | end 633 | 634 | logger.debug? and logger.debug('Loading templates from template cache', :file_path => file_path) 635 | file_data = File.read(file_path) 636 | templates_cache = JSON.parse(file_data) 637 | templates_cache.each do |key, fields| 638 | do_register(key, fields) 639 | end 640 | 641 | logger.warn('Template Cache not writable', file_path: file_path) unless File.writable?(file_path) 642 | rescue => e 643 | logger.error('Template Cache could not be loaded', :file_path => file_path, :exception => e.message) 644 | end 645 | 646 | ## 647 | # @see `TemplateRegistry#persist` 648 | # @api private 649 | def do_persist 650 | return if file_path.nil? 651 | 652 | logger.debug? and logger.debug('Writing templates to template cache', :file_path => file_path) 653 | 654 | fail('Template Cache not writable') if File.exists?(file_path) && !File.writable?(file_path) 655 | 656 | do_cleanup! 657 | 658 | templates_cache = @bindata_spec_cache 659 | 660 | File.open(file_path, 'w') do |file| 661 | file.write(templates_cache.to_json) 662 | end 663 | rescue Exception => e 664 | logger.error('Template Cache could not be saved', :file_path => file_path, :exception => e.message) 665 | end 666 | 667 | ## 668 | # @see `TemplateRegistry#cleanup` 669 | # @api private 670 | def do_cleanup! 671 | @bindata_spec_cache.cleanup! 672 | @bindata_struct_cache.cleanup! 673 | end 674 | 675 | ## 676 | # @see `TemplateRegistry#fetch(String)` 677 | # @api private 678 | def do_fetch(key) 679 | @bindata_struct_cache[key] 680 | end 681 | end 682 | end # class LogStash::Filters::Netflow 683 | --------------------------------------------------------------------------------