├── .gitignore
├── .travis.yml
├── Gemfile
├── README.md
├── Rakefile
├── VERSION
├── example
└── fluentd.conf
├── fluent-plugin-netflow.gemspec
├── lib
└── fluent
│ └── plugin
│ ├── in_netflow.rb
│ ├── netflow_fields.yaml
│ ├── netflow_records.rb
│ ├── parser_netflow.rb
│ └── vash.rb
└── test
├── dump
├── netflow.v5.dump
├── netflow.v9.dump
├── netflow.v9.flowStartMilliseconds.dump
├── netflow.v9.mpls-data.dump
├── netflow.v9.mpls-template.dump
├── netflow.v9.sampler.dump
├── netflow.v9.sampler_template.dump
├── netflow.v9.template.as2.dump
├── netflow.v9.template.dump
└── netflow.v9.template.flowStartMilliseconds.dump
├── helper.rb
├── test_in_netflow.rb
├── test_parser_netflow.rb
└── test_parser_netflow9.rb
/.gitignore:
--------------------------------------------------------------------------------
1 | *.gem
2 | *.rbc
3 | .bundle
4 | .config
5 | .yardoc
6 | Gemfile.lock
7 | InstalledFiles
8 | _yardoc
9 | coverage
10 | doc/
11 | lib/bundler/man
12 | pkg
13 | rdoc
14 | spec/reports
15 | test/tmp
16 | test/version_tmp
17 | tmp
18 | # For TextMate, emacs, vim
19 | *.tmproj
20 | tmtags
21 | *~
22 | \#*
23 | .\#*
24 | *.swp
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: ruby
2 |
3 | rvm:
4 | - 2.1
5 | - 2.2
6 | - 2.3.1
7 | - 2.4.0
8 | - ruby-head
9 | - rbx
10 |
11 | matrix:
12 | allow_failures:
13 | - rvm: ruby-head
14 | - rvm: rbx
15 |
16 | before_install: gem update bundler
17 |
18 | script: bundle exec rake test
19 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source "https://rubygems.org"
2 |
3 | gemspec
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Netflow plugin for Fluentd
2 |
3 | [](https://travis-ci.org/repeatedly/fluent-plugin-netflow)
4 |
5 |
6 | ## Overview
7 |
8 | [Fluentd](http://fluentd.org/) input plugin that acts as Netflow v5/v9 collector.
9 |
10 | ## Requirements
11 |
12 | | fluent-plugin-netflow | fluentd | ruby |
13 | |------------------------|---------|------|
14 | | >= 1.0.0 | >= v0.14.0 | >= 2.1 |
15 | | < 1.0.0 | >= v0.12.0 | >= 1.9 |
16 |
17 |
18 | ## Installation
19 |
20 | Use RubyGems:
21 |
22 | fluent-gem install fluent-plugin-netflow
23 |
24 |
25 | ## Configuration
26 |
27 |
28 | type netflow
29 | tag netflow.event
30 |
31 | # optional parameters
32 | bind 192.168.0.1
33 | port 2055
34 | cache_ttl 6000
35 | versions [5, 9]
36 | definitions /path/to/custom_fields.yaml
37 |
38 |
39 | **bind**
40 |
41 | IP address on which the plugin will accept Netflow.
42 | (Default: '0.0.0.0')
43 |
44 | **port**
45 |
46 | UDP port number on which tpe plugin will accept Netflow.
47 | (Default: 5140)
48 |
49 | **cache_ttl**
50 |
51 | Template cache TTL for Netflow v9 in seconds. Templates not refreshed from the Netflow v9 exporter within the TTL are expired at the plugin.
52 | (Default: 4000)
53 |
54 | **versions**
55 |
56 | Netflow versions which are acceptable.
57 | (Default:[5, 9])
58 |
59 | **switched_times_from_uptime**
60 |
61 | When set to true, the plugin stores system uptime for ```first_switched``` and ```last_switched``` instead of ISO8601-formatted absolute time.
62 | (Defaults: false)
63 |
64 | **definitions**
65 |
66 | YAML file containing Netflow field definitions to overfide pre-defined templates. Example is like below
67 |
68 | ```yaml
69 | ---
70 | option:
71 | 4: # field value
72 | - :uint8 # field length
73 | - :protocol # field type
74 | ```
75 |
76 |
77 | ## Performance Evaluation
78 |
79 | Benchmark for v5 protocol on Macbook Air (Early 2014, 1.7 GHz Intel Core i7):
80 | * 0 packets dropped in 32,000 records/second (for 3,000,000 packets)
81 | * 45,000 records/second in maximum (for flooding netflow packets)
82 |
83 | Tested with the packet generator below:
84 |
85 | * https://github.com/mshindo/NetFlow-Generator
86 | * `./flowgen -n3000000 -i50 -w1 -p5140 localhost`
87 |
88 | And configuration:
89 |
90 |
91 | @type netflow
92 | tag netflow.event
93 | bind 0.0.0.0
94 | port 5140
95 | switched_times_from_uptime yes
96 |
97 |
98 | @type flowcounter
99 | unit minute
100 | count_keys count # missing column for counting events only
101 | tag flowcount
102 |
103 |
104 | @type stdout
105 |
106 |
107 |
108 | ## Tips
109 |
110 | ### Use netflow parser in other plugins
111 |
112 | ```ruby
113 | require 'fluent/plugin/parser_netflow'
114 |
115 | parser = Fluent::Plugin::NetflowParser.new
116 | parser.configure(conf)
117 |
118 | # Netflow v5
119 | parser.call(payload) do |time, record|
120 | # do something
121 | end
122 |
123 | # Netflow v9
124 | parser.call(payload, source_ip_address) do |time, record|
125 | # do something
126 | end
127 | ```
128 |
129 | **NOTE:**
130 | If the plugin receives Netflow v9 from multiple sources, provide ```source_ip_address``` argument to parse correctly.
131 |
132 | ### Field definition for Netflow v9
133 |
134 | Both option and scope fields for Netflow v9 are defined in [YAML](https://www.ietf.org/rfc/rfc3954.txt) where two parameters are described for each field value like:
135 |
136 | ```yaml
137 | option:
138 | ...
139 | 4: # field value
140 | - :uint8 # field length
141 | - :protocol # field type
142 | ```
143 |
144 | See [RFC3954 document](https://www.ietf.org/rfc/rfc3954.txt) for more details.
145 |
146 | When int value specified for field length, the template parser in this plugin will prefer a field length in received template flowset over YAML. The int value in YAML will be used as a default value only when the length in received flowset is invalid.
147 |
148 | ```yaml
149 | option:
150 | 1:
151 | - 4 # means :unit32, which is just a default
152 | - :in_bytes
153 | ```
154 |
155 | When ```:skip``` is described for a field, the template parser will learn the length from received template flowset and skip the field when data flowsets are processed.
156 |
157 | ```yaml
158 | option:
159 | ...
160 | 43:
161 | - :skip
162 | ```
163 |
164 | **NOTE:**
165 | The definitions don't exactly reflect RFC3954 in order to cover some illegal implementations which export Netflow v9 in bad field length.
166 |
167 | ```yaml
168 | 31:
169 | - 3 # Some system exports in 4 bytes despite of RFC
170 | - :ipv6_flow_label
171 | ...
172 | 48:
173 | - 1 # Some system exports in 2 bytes despite of RFC
174 | - :flow_sampler_id
175 | ```
176 |
177 | ### PaloAlto Netflow
178 |
179 | PaloAlto Netflow has different field definitionas:
180 | See this definitions for PaloAlto Netflow: https://github.com/repeatedly/fluent-plugin-netflow/issues/27#issuecomment-269197495
181 |
182 | ### More speed ?
183 |
184 | :bullettrain_side: Try ```switched_times_from_uptime true``` option !
185 |
186 |
187 | ## TODO
188 |
189 | * Netflow v9 protocol parser optimization
190 | * Use Fluentd feature instead of own handlers
191 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 |
2 | require 'bundler'
3 | Bundler::GemHelper.install_tasks
4 |
5 | require 'rake/testtask'
6 |
7 | Rake::TestTask.new(:test) do |test|
8 | test.libs << 'lib' << 'test'
9 | test.test_files = FileList['test/**/test_*.rb']
10 | test.verbose = true
11 | end
12 |
13 | task :default => [:build]
14 |
15 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 1.1.0
2 |
--------------------------------------------------------------------------------
/example/fluentd.conf:
--------------------------------------------------------------------------------
1 |
2 | @type netflow
3 | bind 127.0.0.1
4 | tag example.netflow
5 |
6 |
7 |
8 | @type stdout
9 |
10 |
--------------------------------------------------------------------------------
/fluent-plugin-netflow.gemspec:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | $:.push File.expand_path('../lib', __FILE__)
3 |
4 | Gem::Specification.new do |gem|
5 | gem.name = "fluent-plugin-netflow"
6 | gem.description = "Netflow plugin for Fluentd"
7 | gem.homepage = "https://github.com/repeatedly/fluent-plugin-netflow"
8 | gem.summary = gem.description
9 | gem.version = File.read("VERSION").strip
10 | gem.authors = ["Masahiro Nakagawa"]
11 | gem.email = "repeatedly@gmail.com"
12 | #gem.platform = Gem::Platform::RUBY
13 | gem.license = 'Apache License (2.0)'
14 | gem.files = `git ls-files`.split("\n")
15 | gem.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
16 | gem.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
17 | gem.require_paths = ['lib']
18 |
19 | gem.add_dependency "fluentd", [">= 0.14.10", "< 2"]
20 | gem.add_dependency "bindata", "~> 2.1"
21 | gem.add_development_dependency "rake", ">= 0.9.2"
22 | gem.add_development_dependency "test-unit", "~> 3.0"
23 | end
24 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/in_netflow.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Fluent
3 | #
4 | # Copyright (C) 2014 Masahiro Nakagawa
5 | #
6 | # Licensed under the Apache License, Version 2.0 (the "License");
7 | # you may not use this file except in compliance with the License.
8 | # You may obtain a copy of the License at
9 | #
10 | # http://www.apache.org/licenses/LICENSE-2.0
11 | #
12 | # Unless required by applicable law or agreed to in writing, software
13 | # distributed under the License is distributed on an "AS IS" BASIS,
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | # See the License for the specific language governing permissions and
16 | # limitations under the License.
17 | #
18 |
19 | require 'fluent/plugin/input'
20 | require 'fluent/plugin/parser_netflow'
21 |
22 | module Fluent::Plugin
23 | class NetflowInput < Input
24 | Fluent::Plugin.register_input('netflow', self)
25 |
26 | helpers :server
27 |
28 | config_param :port, :integer, default: 5140
29 | config_param :bind, :string, default: '0.0.0.0'
30 | config_param :tag, :string
31 | config_param :protocol_type, default: :udp do |val|
32 | case val.downcase
33 | when 'udp'
34 | :udp
35 | else
36 | raise Fluent::ConfigError, "netflow input protocol type should be 'udp'"
37 | end
38 | end
39 | config_param :max_bytes, :integer, default: 2048
40 |
41 | def multi_workers_ready?
42 | true
43 | end
44 |
45 | def configure(conf)
46 | super
47 |
48 | @parser = Fluent::Plugin::NetflowParser.new
49 | @parser.configure(conf)
50 | end
51 |
52 | def start
53 | super
54 | server_create(:in_netflow_server, @port, bind: @bind, proto: @protocol_type, max_bytes: @max_bytes) do |data, sock|
55 | receive_data(sock.remote_host, data)
56 | end
57 | end
58 |
59 | def shutdown
60 | super
61 | end
62 |
63 | protected
64 |
65 | def receive_data(host, data)
66 | log.on_debug { log.debug "received logs", :host => host, :data => data }
67 |
68 | @parser.call(data, host) { |time, record|
69 | unless time && record
70 | log.warn "pattern not match: #{data.inspect}"
71 | return
72 | end
73 |
74 | record['host'] = host
75 | router.emit(@tag, Integer(time), record)
76 | }
77 | rescue => e
78 | log.warn "unexpected error on parsing", data: data.dump, error_class: e.class, error: e.message
79 | log.warn_backtrace
80 | end
81 | end
82 | end
83 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/netflow_fields.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | option:
3 | 1:
4 | - 4
5 | - :in_bytes
6 | 2:
7 | - 4
8 | - :in_pkts
9 | 3:
10 | - 4
11 | - :flows
12 | 4:
13 | - :uint8
14 | - :protocol
15 | 5:
16 | - :uint8
17 | - :src_tos
18 | 6:
19 | - :uint8
20 | - :tcp_flags
21 | 7:
22 | - :uint16
23 | - :l4_src_port
24 | 8:
25 | - :ip4_addr
26 | - :ipv4_src_addr
27 | 9:
28 | - :uint8
29 | - :src_mask
30 | 10:
31 | - 2
32 | - :input_snmp
33 | 11:
34 | - :uint16
35 | - :l4_dst_port
36 | 12:
37 | - :ip4_addr
38 | - :ipv4_dst_addr
39 | 13:
40 | - :uint8
41 | - :dst_mask
42 | 14:
43 | - 2
44 | - :output_snmp
45 | 15:
46 | - :ip4_addr
47 | - :ipv4_next_hop
48 | 16:
49 | - 2
50 | - :src_as
51 | 17:
52 | - 2
53 | - :dst_as
54 | 18:
55 | - :ip4_addr
56 | - :bgp_ipv4_next_hop
57 | 19:
58 | - 4
59 | - :mul_dst_pkts
60 | 20:
61 | - 4
62 | - :mul_dst_bytes
63 | 21:
64 | - :uint32
65 | - :last_switched
66 | 22:
67 | - :uint32
68 | - :first_switched
69 | 23:
70 | - 4
71 | - :out_bytes
72 | 24:
73 | - 4
74 | - :out_pkts
75 | 25:
76 | - :uint16
77 | - :min_pkt_length
78 | 26:
79 | - :uint16
80 | - :max_pkt_length
81 | 27:
82 | - :ip6_addr
83 | - :ipv6_src_addr
84 | 28:
85 | - :ip6_addr
86 | - :ipv6_dst_addr
87 | 29:
88 | - :uint8
89 | - :ipv6_src_mask
90 | 30:
91 | - :uint8
92 | - :ipv6_dst_mask
93 | 31:
94 | - 3
95 | - :ipv6_flow_label
96 | 32:
97 | - :uint16
98 | - :icmp_type
99 | 33:
100 | - :uint8
101 | - :mul_igmp_type
102 | 34:
103 | - :uint32
104 | - :sampling_interval
105 | 35:
106 | - :uint8
107 | - :sampling_algorithm
108 | 36:
109 | - :uint16
110 | - :flow_active_timeout
111 | 37:
112 | - :uint16
113 | - :flow_inactive_timeout
114 | 38:
115 | - :uint8
116 | - :engine_type
117 | 39:
118 | - :uint8
119 | - :engine_id
120 | 40:
121 | - 4
122 | - :total_bytes_exp
123 | 41:
124 | - 4
125 | - :total_pkts_exp
126 | 42:
127 | - 4
128 | - :total_flows_exp
129 | 43:
130 | - :skip
131 | 44:
132 | - :ip4_addr
133 | - :ipv4_src_prefix
134 | 45:
135 | - :ip4_addr
136 | - :ipv4_dst_prefix
137 | 46:
138 | - :uint8
139 | - :mpls_top_label_type
140 | 47:
141 | - :uint32
142 | - :mpls_top_label_ip_addr
143 | 48:
144 | - 1
145 | - :flow_sampler_id
146 | 49:
147 | - :uint8
148 | - :flow_sampler_mode
149 | 50:
150 | - :uint32
151 | - :flow_sampler_random_interval
152 | 51:
153 | - :skip
154 | 52:
155 | - :uint8
156 | - :min_ttl
157 | 53:
158 | - :uint8
159 | - :max_ttl
160 | 54:
161 | - :uint16
162 | - :ipv4_ident
163 | 55:
164 | - :uint8
165 | - :dst_tos
166 | 56:
167 | - :mac_addr
168 | - :in_src_mac
169 | 57:
170 | - :mac_addr
171 | - :out_dst_mac
172 | 58:
173 | - :uint16
174 | - :src_vlan
175 | 59:
176 | - :uint16
177 | - :dst_vlan
178 | 60:
179 | - :uint8
180 | - :ip_protocol_version
181 | 61:
182 | - :uint8
183 | - :direction
184 | 62:
185 | - :ip6_addr
186 | - :ipv6_next_hop
187 | 63:
188 | - :ip6_addr
189 | - :bgp_ipv6_next_hop
190 | 64:
191 | - :uint32
192 | - :ipv6_option_headers
193 | 65:
194 | - :skip
195 | 66:
196 | - :skip
197 | 67:
198 | - :skip
199 | 68:
200 | - :skip
201 | 69:
202 | - :skip
203 | 70:
204 | - :mpls_label
205 | - :mpls_label_1
206 | 71:
207 | - :mpls_label
208 | - :mpls_label_2
209 | 72:
210 | - :mpls_label
211 | - :mpls_label_3
212 | 73:
213 | - :mpls_label
214 | - :mpls_label_4
215 | 74:
216 | - :mpls_label
217 | - :mpls_label_5
218 | 75:
219 | - :mpls_label
220 | - :mpls_label_6
221 | 76:
222 | - :mpls_label
223 | - :mpls_label_7
224 | 77:
225 | - :mpls_label
226 | - :mpls_label_8
227 | 78:
228 | - :mpls_label
229 | - :mpls_label_9
230 | 79:
231 | - :mpls_label
232 | - :mpls_label_10
233 | 80:
234 | - :mac_addr
235 | - :in_dst_mac
236 | 81:
237 | - :mac_addr
238 | - :out_src_mac
239 | 82:
240 | - :string
241 | - :if_name
242 | 83:
243 | - :string
244 | - :if_desc
245 | 84:
246 | - :string
247 | - :sampler_name
248 | 89:
249 | - :uint8
250 | - :forwarding_status
251 | 91:
252 | - :uint8
253 | - :mpls_prefix_len
254 | 95:
255 | - 4
256 | - :app_id
257 | 150:
258 | - :uint32
259 | - :flowStartSeconds
260 | 151:
261 | - :uint32
262 | - :flowEndSeconds
263 | 152:
264 | - :uint64
265 | - :flowStartMilliseconds
266 | 153:
267 | - :uint64
268 | - :flowEndMilliseconds
269 | 154:
270 | - :uint64
271 | - :flowStartMicroseconds
272 | 155:
273 | - :uint64
274 | - :flowEndMicroseconds
275 | 156:
276 | - :uint64
277 | - :flowStartNanoseconds
278 | 157:
279 | - :uint64
280 | - :flowEndNanoseconds
281 | 234:
282 | - :uint32
283 | - :ingress_vrf_id
284 | 235:
285 | - :uint32
286 | - :egress_vrf_id
287 | 236:
288 | - :string
289 | - :vrf_name
290 |
291 | scope:
292 | 1:
293 | - :ip4_addr
294 | - :system
295 | 2:
296 | - :skip
297 | 3:
298 | - :skip
299 | 4:
300 | - :skip
301 | 5:
302 | - :skip
303 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/netflow_records.rb:
--------------------------------------------------------------------------------
1 | require "bindata"
2 |
3 | module Fluent
4 | module Plugin
5 | class NetflowParser < Parser
6 | class IP4Addr < BinData::Primitive
7 | endian :big
8 | uint32 :storage
9 |
10 | def set(val)
11 | ip = IPAddr.new(val)
12 | if ! ip.ipv4?
13 | raise ArgumentError, "invalid IPv4 address '#{val}'"
14 | end
15 | self.storage = ip.to_i
16 | end
17 |
18 | def get
19 | IPAddr.new_ntoh([self.storage].pack('N')).to_s
20 | end
21 | end
22 |
23 | class IP6Addr < BinData::Primitive
24 | endian :big
25 | uint128 :storage
26 |
27 | def set(val)
28 | ip = IPAddr.new(val)
29 | if ! ip.ipv6?
30 | raise ArgumentError, "invalid IPv6 address `#{val}'"
31 | end
32 | self.storage = ip.to_i
33 | end
34 |
35 | def get
36 | IPAddr.new_ntoh((0..7).map { |i|
37 | (self.storage >> (112 - 16 * i)) & 0xffff
38 | }.pack('n8')).to_s
39 | end
40 | end
41 |
42 | class MacAddr < BinData::Primitive
43 | array :bytes, type: :uint8, initial_length: 6
44 |
45 | def set(val)
46 | ints = val.split(/:/).collect { |int| int.to_i(16) }
47 | self.bytes = ints
48 | end
49 |
50 | def get
51 | self.bytes.collect { |byte| byte.value.to_s(16).rjust(2,'0') }.join(":")
52 | end
53 | end
54 |
55 | class MplsLabel < BinData::Primitive
56 | bit20 :label
57 | bit3 :exp
58 | bit1 :bottom
59 | def set(val)
60 | self.label = val >> 4
61 | self.exp = (val & 0b1111) >> 1
62 | self.bottom = val & 0b1
63 | end
64 | def get
65 | self.label
66 | end
67 | end
68 |
69 | class Header < BinData::Record
70 | endian :big
71 | uint16 :version
72 | end
73 |
74 | class Netflow5PDU < BinData::Record
75 | endian :big
76 | uint16 :version
77 | uint16 :flow_records
78 | uint32 :uptime
79 | uint32 :unix_sec
80 | uint32 :unix_nsec
81 | uint32 :flow_seq_num
82 | uint8 :engine_type
83 | uint8 :engine_id
84 | bit2 :sampling_algorithm
85 | bit14 :sampling_interval
86 | array :records, initial_length: :flow_records do
87 | ip4_addr :ipv4_src_addr
88 | ip4_addr :ipv4_dst_addr
89 | ip4_addr :ipv4_next_hop
90 | uint16 :input_snmp
91 | uint16 :output_snmp
92 | uint32 :in_pkts
93 | uint32 :in_bytes
94 | uint32 :first_switched
95 | uint32 :last_switched
96 | uint16 :l4_src_port
97 | uint16 :l4_dst_port
98 | skip length: 1
99 | uint8 :tcp_flags # Split up the TCP flags maybe?
100 | uint8 :protocol
101 | uint8 :src_tos
102 | uint16 :src_as
103 | uint16 :dst_as
104 | uint8 :src_mask
105 | uint8 :dst_mask
106 | skip length: 2
107 | end
108 | end
109 |
110 | class TemplateFlowset < BinData::Record
111 | endian :big
112 | array :templates, read_until: lambda { array.num_bytes == flowset_length - 4 } do
113 | uint16 :template_id
114 | uint16 :field_count
115 | array :template_fields, initial_length: :field_count do
116 | uint16 :field_type
117 | uint16 :field_length
118 | end
119 | end
120 | end
121 |
122 | class OptionFlowset < BinData::Record
123 | endian :big
124 | array :templates, read_until: lambda { flowset_length - 4 - array.num_bytes <= 2 } do
125 | uint16 :template_id
126 | uint16 :scope_length
127 | uint16 :option_length
128 | array :scope_fields, initial_length: lambda { scope_length / 4 } do
129 | uint16 :field_type
130 | uint16 :field_length
131 | end
132 | array :option_fields, initial_length: lambda { option_length / 4 } do
133 | uint16 :field_type
134 | uint16 :field_length
135 | end
136 | # 10 is byte length of fields. flowset_id, floset_length, template_id, option_scope_length, option_length
137 | skip length: lambda { flowset_length - 10 - templates[0][:scope_length] - templates[0][:option_length] }
138 | end
139 | end
140 |
141 | class Netflow9PDU < BinData::Record
142 | endian :big
143 | uint16 :version
144 | uint16 :flow_records
145 | uint32 :uptime
146 | uint32 :unix_sec
147 | uint32 :flow_seq_num
148 | uint32 :source_id
149 | array :records, read_until: :eof do
150 | uint16 :flowset_id
151 | uint16 :flowset_length
152 | choice :flowset_data, selection: :flowset_id do
153 | template_flowset 0
154 | option_flowset 1
155 | string :default, read_length: lambda { flowset_length - 4 }
156 | end
157 | end
158 | end
159 | end
160 | end
161 | end
162 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/parser_netflow.rb:
--------------------------------------------------------------------------------
1 | require "ipaddr"
2 | require 'yaml'
3 |
4 | require 'fluent/plugin/parser'
5 |
6 | require_relative 'netflow_records'
7 | require_relative 'vash'
8 |
9 | module Fluent
10 | module Plugin
11 | # port from logstash's netflow parser
12 | class NetflowParser < Parser
13 | Fluent::Plugin.register_parser('netflow', self)
14 |
15 | config_param :switched_times_from_uptime, :bool, default: false
16 | config_param :cache_ttl, :integer, default: 4000
17 | config_param :versions, :array, default: [5, 9]
18 | config_param :definitions, :string, default: nil
19 |
20 | # Cisco NetFlow Export Datagram Format
21 | # http://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html
22 | # Cisco NetFlow Version 9 Flow-Record Format
23 | # http://www.cisco.com/en/US/technologies/tk648/tk362/technologies_white_paper09186a00800a3db9.html
24 |
25 | def configure(conf)
26 | super
27 |
28 | @templates = Vash.new()
29 | @samplers_v9 = Vash.new()
30 | # Path to default Netflow v9 field definitions
31 | filename = File.expand_path('../netflow_fields.yaml', __FILE__)
32 |
33 | begin
34 | @template_fields = YAML.load_file(filename)
35 | rescue => e
36 | raise Fluent::ConfigError, "Bad syntax in definitions file #{filename}, error_class = #{e.class.name}, error = #{e.message}"
37 | end
38 |
39 | # Allow the user to augment/override/rename the supported Netflow fields
40 | if @definitions
41 | raise Fluent::ConfigError, "definitions file #{@definitions} doesn't exist" unless File.exist?(@definitions)
42 | begin
43 | template_fields_custom = YAML.load_file(@definitions)
44 | if template_fields_custom.first.last.is_a?(Array) # compatibility for older definition files
45 | @template_fields['option'].merge!(template_fields_custom)
46 | else
47 | @template_fields.each do |key, _|
48 | if template_fields_custom.key?(key)
49 | @template_fields[key].merge!(template_fields_custom[key])
50 | end
51 | end
52 | end
53 | rescue => e
54 | raise Fluent::ConfigError, "Bad syntax in definitions file #{@definitions}, error_class = #{e.class.name}, error = #{e.message}"
55 | end
56 | end
57 | end
58 |
59 | def call(payload, host=nil, &block)
60 | version,_ = payload[0,2].unpack('n')
61 | case version
62 | when 5
63 | forV5(payload, block)
64 | when 9
65 | # TODO: implement forV9
66 | pdu = Netflow9PDU.read(payload)
67 | handle_v9(host, pdu, block)
68 | else
69 | $log.warn "Unsupported Netflow version v#{version}: #{version.class}"
70 | end
71 | end
72 |
73 | private
74 |
75 | def ipv4_addr_to_string(uint32)
76 | "#{(uint32 & 0xff000000) >> 24}.#{(uint32 & 0x00ff0000) >> 16}.#{(uint32 & 0x0000ff00) >> 8}.#{uint32 & 0x000000ff}"
77 | end
78 |
79 | def msec_from_boot_to_time(msec, uptime, current_unix_time, current_nsec)
80 | millis = uptime - msec
81 | seconds = current_unix_time - (millis / 1000)
82 | micros = (current_nsec / 1000) - ((millis % 1000) * 1000)
83 | if micros < 0
84 | seconds -= 1
85 | micros += 1000000
86 | end
87 | Time.at(seconds, micros)
88 | end
89 |
90 | def format_for_switched(time)
91 | time.utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ".freeze)
92 | end
93 |
94 | def format_for_flowSeconds(time)
95 | time.utc.strftime("%Y-%m-%dT%H:%M:%S".freeze)
96 | end
97 |
98 | def format_for_flowMilliSeconds(time)
99 | time.utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ".freeze)
100 | end
101 |
102 | def format_for_flowMicroSeconds(time)
103 | time.utc.strftime("%Y-%m-%dT%H:%M:%S.%6NZ".freeze)
104 | end
105 |
106 | def format_for_flowNanoSeconds(time)
107 | time.utc.strftime("%Y-%m-%dT%H:%M:%S.%9NZ".freeze)
108 | end
109 |
110 | NETFLOW_V5_HEADER_FORMAT = 'nnNNNNnn'
111 | NETFLOW_V5_HEADER_BYTES = 24
112 | NETFLOW_V5_RECORD_FORMAT = 'NNNnnNNNNnnnnnnnxx'
113 | NETFLOW_V5_RECORD_BYTES = 48
114 |
115 | # V5 header
116 | # uint16 :version # n
117 | # uint16 :flow_records # n
118 | # uint32 :uptime # N
119 | # uint32 :unix_sec # N
120 | # uint32 :unix_nsec # N
121 | # uint32 :flow_seq_num # N
122 | # uint8 :engine_type # n -> 0xff00
123 | # uint8 :engine_id # -> 0x00ff
124 | # bit2 :sampling_algorithm # n -> 0b1100000000000000
125 | # bit14 :sampling_interval # -> 0b0011111111111111
126 |
127 | # V5 records
128 | # array :records, initial_length: :flow_records do
129 | # ip4_addr :ipv4_src_addr # uint32 N
130 | # ip4_addr :ipv4_dst_addr # uint32 N
131 | # ip4_addr :ipv4_next_hop # uint32 N
132 | # uint16 :input_snmp # n
133 | # uint16 :output_snmp # n
134 | # uint32 :in_pkts # N
135 | # uint32 :in_bytes # N
136 | # uint32 :first_switched # N
137 | # uint32 :last_switched # N
138 | # uint16 :l4_src_port # n
139 | # uint16 :l4_dst_port # n
140 | # skip length: 1 # n -> (ignored)
141 | # uint8 :tcp_flags # -> 0x00ff
142 | # uint8 :protocol # n -> 0xff00
143 | # uint8 :src_tos # -> 0x00ff
144 | # uint16 :src_as # n
145 | # uint16 :dst_as # n
146 | # uint8 :src_mask # n -> 0xff00
147 | # uint8 :dst_mask # -> 0x00ff
148 | # skip length: 2 # xx
149 | # end
150 | def forV5(payload, block)
151 | version, flow_records, uptime, unix_sec, unix_nsec, flow_seq_num, engine, sampling = payload.unpack(NETFLOW_V5_HEADER_FORMAT)
152 | engine_type = (engine & 0xff00) >> 8
153 | engine_id = engine & 0x00ff
154 | sampling_algorithm = (sampling & 0b1100000000000000) >> 14
155 | sampling_interval = sampling & 0b0011111111111111
156 |
157 | time = Fluent::EventTime.new(unix_sec.to_i, unix_nsec.to_i)
158 |
159 | records_bytes = payload.bytesize - NETFLOW_V5_HEADER_BYTES
160 |
161 | if records_bytes / NETFLOW_V5_RECORD_BYTES != flow_records
162 | $log.warn "bytesize mismatch, records_bytes:#{records_bytes}, records:#{flow_records}"
163 | return
164 | end
165 |
166 | format_full = NETFLOW_V5_RECORD_FORMAT * flow_records
167 | objects = payload[NETFLOW_V5_HEADER_BYTES, records_bytes].unpack(format_full)
168 |
169 | while objects.size > 0
170 | src_addr, dst_addr, next_hop, input_snmp, output_snmp,
171 | in_pkts, in_bytes, first_switched, last_switched, l4_src_port, l4_dst_port,
172 | tcp_flags_16, protocol_src_tos, src_as, dst_as, src_dst_mask = objects.shift(16)
173 | record = {
174 | "version" => version,
175 | "uptime" => uptime,
176 | "flow_records" => flow_records,
177 | "flow_seq_num" => flow_seq_num,
178 | "engine_type" => engine_type,
179 | "engine_id" => engine_id,
180 | "sampling_algorithm" => sampling_algorithm,
181 | "sampling_interval" => sampling_interval,
182 |
183 | "ipv4_src_addr" => ipv4_addr_to_string(src_addr),
184 | "ipv4_dst_addr" => ipv4_addr_to_string(dst_addr),
185 | "ipv4_next_hop" => ipv4_addr_to_string(next_hop),
186 | "input_snmp" => input_snmp,
187 | "output_snmp" => output_snmp,
188 | "in_pkts" => in_pkts,
189 | "in_bytes" => in_bytes,
190 | "first_switched" => first_switched,
191 | "last_switched" => last_switched,
192 | "l4_src_port" => l4_src_port,
193 | "l4_dst_port" => l4_dst_port,
194 | "tcp_flags" => tcp_flags_16 & 0x00ff,
195 | "protocol" => (protocol_src_tos & 0xff00) >> 8,
196 | "src_tos" => (protocol_src_tos & 0x00ff),
197 | "src_as" => src_as,
198 | "dst_as" => dst_as,
199 | "src_mask" => (src_dst_mask & 0xff00) >> 8,
200 | "dst_mask" => (src_dst_mask & 0x00ff)
201 | }
202 | unless @switched_times_from_uptime
203 | record["first_switched"] = format_for_switched(msec_from_boot_to_time(record["first_switched"], uptime, unix_sec, unix_nsec))
204 | record["last_switched"] = format_for_switched(msec_from_boot_to_time(record["last_switched"] , uptime, unix_sec, unix_nsec))
205 | end
206 |
207 | block.call(time, record)
208 | end
209 | end
210 |
211 | def handle_v9(host, pdu, block)
212 | pdu.records.each do |flowset|
213 | case flowset.flowset_id
214 | when 0
215 | handle_v9_flowset_template(host, pdu, flowset)
216 | when 1
217 | handle_v9_flowset_options_template(host, pdu, flowset)
218 | when 256..65535
219 | handle_v9_flowset_data(host, pdu, flowset, block)
220 | else
221 | $log.warn 'Unsupported flowset', flowset_id: flowset.flowset_id
222 | end
223 | end
224 | end
225 |
226 | def handle_v9_flowset_template(host, pdu, flowset)
227 | flowset.flowset_data.templates.each do |template|
228 | catch (:field) do
229 | template_fields = []
230 | template.template_fields.each do |field|
231 | entry = netflow_field_for(field.field_type, field.field_length)
232 | throw :field unless entry
233 |
234 | template_fields << entry
235 | end
236 | # We get this far, we have a list of fields
237 | key = "#{host}|#{pdu.source_id}|#{template.template_id}"
238 | @templates[key, @cache_ttl] = BinData::Struct.new(endian: :big, fields: template_fields)
239 | # Purge any expired templates
240 | @templates.cleanup!
241 | end
242 | end
243 | end
244 |
245 | NETFLOW_V9_FIELD_CATEGORIES = ['scope', 'option']
246 |
247 | def handle_v9_flowset_options_template(host, pdu, flowset)
248 | flowset.flowset_data.templates.each do |template|
249 | catch (:field) do
250 | template_fields = []
251 |
252 | NETFLOW_V9_FIELD_CATEGORIES.each do |category|
253 | template["#{category}_fields"].each do |field|
254 | entry = netflow_field_for(field.field_type, field.field_length, category)
255 | throw :field unless entry
256 |
257 | template_fields << entry
258 | end
259 | end
260 |
261 | # We get this far, we have a list of fields
262 | key = "#{host}|#{pdu.source_id}|#{template.template_id}"
263 | @templates[key, @cache_ttl] = BinData::Struct.new(endian: :big, fields: template_fields)
264 | # Purge any expired templates
265 | @templates.cleanup!
266 | end
267 | end
268 | end
269 |
270 | FIELDS_FOR_COPY_V9 = ['version', 'flow_seq_num']
271 |
272 | def handle_v9_flowset_data(host, pdu, flowset, block)
273 | template_key = "#{host}|#{pdu.source_id}|#{flowset.flowset_id}"
274 | template = @templates[template_key]
275 | if ! template
276 | $log.warn 'No matching template for',
277 | host: host, source_id: pdu.source_id, flowset_id: flowset.flowset_id
278 | return
279 | end
280 |
281 | length = flowset.flowset_length - 4
282 |
283 | # Template shouldn't be longer than the flowset and there should
284 | # be at most 3 padding bytes
285 | if template.num_bytes > length or ! (length % template.num_bytes).between?(0, 3)
286 | $log.warn "Template length doesn't fit cleanly into flowset",
287 | template_id: flowset.flowset_id, template_length: template.num_bytes, flowset_length: length
288 | return
289 | end
290 |
291 | array = BinData::Array.new(type: template, initial_length: length / template.num_bytes)
292 |
293 | template_fields = array.read(flowset.flowset_data)
294 | template_fields.each do |r|
295 | if is_sampler?(r)
296 | sampler_key = "#{host}|#{pdu.source_id}|#{r.flow_sampler_id}"
297 | register_sampler_v9 sampler_key, r
298 | next
299 | end
300 |
301 | time = Fluent::EventTime.new(pdu.unix_sec.to_i)
302 | event = {}
303 |
304 | # Fewer fields in the v9 header
305 | FIELDS_FOR_COPY_V9.each do |f|
306 | event[f] = pdu[f]
307 | end
308 |
309 | event['flowset_id'] = flowset.flowset_id
310 |
311 | r.each_pair do |k, v|
312 | case k
313 | when :first_switched
314 | unless @switched_times_from_uptime
315 | event[k.to_s] = format_for_switched(msec_from_boot_to_time(v.snapshot, pdu.uptime, time, 0))
316 | end
317 | when :last_switched
318 | unless @switched_times_from_uptime
319 | event[k.to_s] = format_for_switched(msec_from_boot_to_time(v.snapshot, pdu.uptime, time, 0))
320 | end
321 | when :flowStartSeconds
322 | event[k.to_s] = format_for_flowSeconds(Time.at(v.snapshot, 0))
323 | when :flowEndSeconds
324 | event[k.to_s] = format_for_flowSeconds(Time.at(v.snapshot, 0))
325 | when :flowStartMilliseconds
326 | divisor = 1_000
327 | microseconds = (v.snapshot % 1_000) * 1_000
328 | event[k.to_s] = format_for_flowMilliSeconds(Time.at(v.snapshot / divisor, microseconds))
329 | when :flowEndMilliseconds
330 | divisor = 1_000
331 | microseconds = (v.snapshot % 1_000) * 1_000
332 | event[k.to_s] = format_for_flowMilliSeconds(Time.at(v.snapshot / divisor, microseconds))
333 | when :flowStartMicroseconds
334 | divisor = 1_000_000
335 | microseconds = (v.snapshot % 1_000_000)
336 | event[k.to_s] = format_for_flowMicroSeconds(Time.at(v.snapshot / divisor, microseconds))
337 | when :flowEndMicroseconds
338 | divisor = 1_000_000
339 | microseconds = (v.snapshot % 1_000_000)
340 | event[k.to_s] = format_for_flowMicroSeconds(Time.at(v.snapshot / divisor, microseconds))
341 | when :flowStartNanoseconds
342 | divisor = 1_000_000_000
343 | microseconds = (v.snapshot % 1_000_000_000) / 1_000
344 | nanoseconds = v.snapshot % 1_000_000_000
345 | time_with_nano = Time.at(v.snapshot / divisor, microseconds)
346 | time_with_nano.nsec = nanoseconds
347 | event[k.to_s] = format_for_flowNanoSeconds(time_with_nano)
348 | when :flowEndNanoseconds
349 | divisor = 1_000_000_000
350 | microseconds = (v.snapshot % 1_000_000_000) / 1_000
351 | nanoseconds = v.snapshot % 1_000_000_000
352 | time_with_nano = Time.at(v.snapshot / divisor, microseconds)
353 | time_with_nano.nsec = nanoseconds
354 | event[k.to_s] = format_for_flowNanoSeconds(time_with_nano)
355 | else
356 | event[k.to_s] = v.snapshot
357 | end
358 | end
359 |
360 | if sampler_id = r['flow_sampler_id']
361 | sampler_key = "#{host}|#{pdu.source_id}|#{sampler_id}"
362 | if sampler = @samplers_v9[sampler_key]
363 | event['sampling_algorithm'] ||= sampler['flow_sampler_mode']
364 | event['sampling_interval'] ||= sampler['flow_sampler_random_interval']
365 | end
366 | end
367 |
368 | block.call(time, event)
369 | end
370 | end
371 |
372 | def uint_field(length, default)
373 | # If length is 4, return :uint32, etc. and use default if length is 0
374 | ("uint" + (((length > 0) ? length : default) * 8).to_s).to_sym
375 | end
376 |
377 | def netflow_field_for(type, length, category = 'option'.freeze)
378 | unless field = @template_fields[category][type]
379 | $log.warn "Skip unsupported field", type: type, length: length
380 | return [:skip, nil, {length: length}]
381 | end
382 |
383 | unless field.is_a?(Array)
384 | $log.warn "Skip non-Array definition", field: field
385 | return [:skip, nil, {length: length}]
386 | end
387 |
388 | # Small bit of fixup for numeric value, :skip or :string field length, which are dynamic
389 | case field[0]
390 | when Integer
391 | [uint_field(length, field[0]), field[1]]
392 | when :skip
393 | field + [nil, {length: length}]
394 | when :string
395 | field + [{length: length, trim_padding: true}]
396 | else
397 | field
398 | end
399 | end
400 |
401 | # covers Netflow v9 and v10 (a.k.a IPFIX)
402 | def is_sampler?(record)
403 | record['flow_sampler_id'] && record['flow_sampler_mode'] && record['flow_sampler_random_interval']
404 | end
405 |
406 | def register_sampler_v9(key, sampler)
407 | @samplers_v9[key, @cache_ttl] = sampler
408 | @samplers_v9.cleanup!
409 | end
410 | end
411 | end
412 | end
413 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/vash.rb:
--------------------------------------------------------------------------------
1 | module Fluent
2 | module Plugin
3 | class NetflowParser < Parser
4 | # https://gist.github.com/joshaven/184837
5 | class Vash < Hash
6 | def initialize(constructor = {})
7 | @register ||= {}
8 | if constructor.is_a?(Hash)
9 | super()
10 | merge(constructor)
11 | else
12 | super(constructor)
13 | end
14 | end
15 |
16 | alias_method :regular_writer, :[]= unless method_defined?(:regular_writer)
17 | alias_method :regular_reader, :[] unless method_defined?(:regular_reader)
18 |
19 | def [](key)
20 | sterilize(key)
21 | clear(key) if expired?(key)
22 | regular_reader(key)
23 | end
24 |
25 | def []=(key, *args)
26 | if args.length == 2
27 | value, ttl = args[1], args[0]
28 | elsif args.length == 1
29 | value, ttl = args[0], 60
30 | else
31 | raise ArgumentError, "Wrong number of arguments, expected 2 or 3, received: #{args.length+1}\n"+
32 | "Example Usage: volatile_hash[:key]=value OR volatile_hash[:key, ttl]=value"
33 | end
34 | sterilize(key)
35 | ttl(key, ttl)
36 | regular_writer(key, value)
37 | end
38 |
39 | def merge(hsh)
40 | hsh.map {|key,value| self[sterile(key)] = hsh[key]}
41 | self
42 | end
43 |
44 | def cleanup!
45 | now = Time.now.to_i
46 | @register.map {|k,v| clear(k) if v < now}
47 | end
48 |
49 | def clear(key)
50 | sterilize(key)
51 | @register.delete key
52 | self.delete key
53 | end
54 |
55 | private
56 |
57 | def expired?(key)
58 | Time.now.to_i > @register[key].to_i
59 | end
60 |
61 | def ttl(key, secs=60)
62 | @register[key] = Time.now.to_i + secs.to_i
63 | end
64 |
65 | def sterile(key)
66 | String === key ? key.chomp('!').chomp('=') : key.to_s.chomp('!').chomp('=').to_sym
67 | end
68 |
69 | def sterilize(key)
70 | key = sterile(key)
71 | end
72 | end
73 | end
74 | end
75 | end
76 |
--------------------------------------------------------------------------------
/test/dump/netflow.v5.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v5.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.flowStartMilliseconds.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.flowStartMilliseconds.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.mpls-data.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.mpls-data.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.mpls-template.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.mpls-template.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.sampler.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.sampler.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.sampler_template.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.sampler_template.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.template.as2.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.template.as2.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.template.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.template.dump
--------------------------------------------------------------------------------
/test/dump/netflow.v9.template.flowStartMilliseconds.dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repeatedly/fluent-plugin-netflow/a12092128406b25bcee6952779f87ecbaf0bafc0/test/dump/netflow.v9.template.flowStartMilliseconds.dump
--------------------------------------------------------------------------------
/test/helper.rb:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | require 'bundler'
3 | begin
4 | Bundler.setup(:default, :development)
5 | rescue Bundler::BundlerError => e
6 | $stderr.puts e.message
7 | $stderr.puts "Run `bundle install` to install missing gems"
8 | exit e.status_code
9 | end
10 | require 'test/unit'
11 |
12 | $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
13 | $LOAD_PATH.unshift(File.dirname(__FILE__))
14 | require 'fluent/test'
15 |
16 | # $log = Fluent::Log.new(Fluent::Test::DummyLogDevice.new, Fluent::Log::LEVEL_INFO)
17 |
18 | require 'fluent/plugin/parser_netflow'
19 | require 'fluent/plugin/in_netflow'
20 |
21 | def unused_port
22 | s = TCPServer.open(0)
23 | port = s.addr[1]
24 | s.close
25 | port
26 | end
27 |
--------------------------------------------------------------------------------
/test/test_in_netflow.rb:
--------------------------------------------------------------------------------
1 | require 'helper'
2 | require 'fluent/test/driver/input'
3 |
4 | class NetflowInputTest < Test::Unit::TestCase
5 | def setup
6 | Fluent::Test.setup
7 | end
8 |
9 | PORT = unused_port
10 | CONFIG = %[
11 | port #{PORT}
12 | bind 127.0.0.1
13 | tag test.netflow
14 | ]
15 |
16 | def create_driver(conf=CONFIG)
17 | Fluent::Test::Driver::Input.new(Fluent::Plugin::NetflowInput).configure(conf)
18 | end
19 |
20 | def test_configure
21 | d = create_driver
22 | assert_equal PORT, d.instance.port
23 | assert_equal '127.0.0.1', d.instance.bind
24 | assert_equal 'test.netflow', d.instance.tag
25 | assert_equal :udp, d.instance.protocol_type
26 | assert_equal 2048, d.instance.max_bytes
27 |
28 | assert_raise Fluent::ConfigError do
29 | d = create_driver CONFIG + %[
30 | protocol_type tcp
31 | ]
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/test/test_parser_netflow.rb:
--------------------------------------------------------------------------------
1 | require 'helper'
2 | require 'fluent/test/driver/parser'
3 |
4 | class NetflowParserTest < Test::Unit::TestCase
5 | def setup
6 | Fluent::Test.setup
7 | end
8 |
9 | def create_parser(conf={})
10 | parser = Fluent::Plugin::NetflowParser.new
11 | parser.configure(Fluent::Config::Element.new('ROOT', '', conf, []))
12 | parser
13 | end
14 |
15 | test 'configure' do
16 | assert_nothing_raised do
17 | parser = create_parser
18 | end
19 | end
20 |
21 | test 'parse v5 binary data, dumped by netflow-generator' do
22 | # generated by https://github.com/mshindo/NetFlow-Generator
23 | parser = create_parser
24 | raw_data = File.binread(File.join(__dir__, "dump/netflow.v5.dump"))
25 | bytes_for_1record = 72
26 | assert_equal bytes_for_1record, raw_data.size
27 | parsed = []
28 | parser.call(raw_data) do |time, data|
29 | parsed << [time, data]
30 | end
31 | assert_equal 1, parsed.size
32 | assert_equal Time.parse('2016-02-29 11:14:00 -0800').to_i, parsed.first[0]
33 | expected_record = {
34 | # header
35 | "version" => 5,
36 | "uptime" => 1785097000,
37 | "flow_records" => 1,
38 | "flow_seq_num" => 1,
39 | "engine_type" => 1,
40 | "engine_id" => 1,
41 | "sampling_algorithm" => 0,
42 | "sampling_interval" => 0,
43 |
44 | # record
45 | "ipv4_src_addr" => "10.0.0.11",
46 | "ipv4_dst_addr" => "20.0.0.187",
47 | "ipv4_next_hop" => "30.0.0.254",
48 | "input_snmp" => 1,
49 | "output_snmp" => 2,
50 | "in_pkts" => 173,
51 | "in_bytes" => 4581,
52 | "first_switched" => "2016-02-29T19:13:59.215Z",
53 | "last_switched" => "2016-02-29T19:14:00.090Z",
54 | "l4_src_port" => 1001,
55 | "l4_dst_port" => 3001,
56 | "tcp_flags" => 27,
57 | "protocol" => 6,
58 | "src_tos" => 0,
59 | "src_as" => 101,
60 | "dst_as" => 201,
61 | "src_mask" => 24,
62 | "dst_mask" => 24,
63 | }
64 | assert_equal expected_record, parsed.first[1]
65 | end
66 |
67 | DEFAULT_UPTIME = 1048383625 # == (((12 * 24 + 3) * 60 + 13) * 60 + 3) * 1000 + 625
68 | # 12days 3hours 13minutes 3seconds 625 milliseconds
69 |
70 | DEFAULT_TIME = Time.parse('2016-02-29 11:14:00 -0800').to_i
71 | DEFAULT_NSEC = rand(1_000_000_000)
72 |
73 | def msec_from_boot_to_time_by_rational(msec, uptime: DEFAULT_UPTIME, sec: DEFAULT_TIME, nsec: DEFAULT_NSEC)
74 | current_time = Rational(sec) + Rational(nsec, 1_000_000_000)
75 | diff_msec = uptime - msec
76 | target_time = current_time - Rational(diff_msec, 1_000)
77 | Time.at(target_time)
78 | end
79 |
80 | def msec_from_boot_to_time(msec, uptime: DEFAULT_UPTIME, sec: DEFAULT_TIME, nsec: DEFAULT_NSEC)
81 | millis = uptime - msec
82 | seconds = sec - (millis / 1000)
83 | micros = (nsec / 1000) - ((millis % 1000) * 1000)
84 | if micros < 0
85 | seconds -= 1
86 | micros += 1000000
87 | end
88 | Time.at(seconds, micros)
89 | end
90 |
91 | def format_for_switched(time)
92 | time.utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ")
93 | end
94 |
95 | test 'converting msec from boottime to time works correctly' do
96 | assert_equal msec_from_boot_to_time(300).to_i, msec_from_boot_to_time_by_rational(300).to_i
97 | assert_equal msec_from_boot_to_time(300).usec, msec_from_boot_to_time_by_rational(300).usec
98 | end
99 |
100 | test 'check performance degradation about stringifying *_switched times' do
101 | parser = create_parser({"switched_times_from_uptime" => true})
102 | data = v5_data(
103 | version: 5,
104 | flow_records: 50,
105 | uptime: DEFAULT_UPTIME,
106 | unix_sec: DEFAULT_TIME,
107 | unix_nsec: DEFAULT_NSEC,
108 | flow_seq_num: 1,
109 | engine_type: 1,
110 | engine_id: 1,
111 | sampling_algorithm: 0,
112 | sampling_interval: 0,
113 | records: [
114 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
115 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
116 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
117 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
118 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
119 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
120 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
121 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
122 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
123 | v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
124 | ]
125 | )
126 |
127 | bench_data = data.to_binary_s # 50 records
128 |
129 | # configure to leave uptime-based value as-is
130 | count = 0
131 | GC.start
132 | t1 = Time.now
133 | 1000.times do
134 | parser.call(bench_data) do |time, record|
135 | # do nothing
136 | count += 1
137 | end
138 | end
139 | t2 = Time.now
140 | uptime_based_switched = t2 - t1
141 |
142 | assert{ count == 50000 }
143 |
144 | # make time conversion to use Rational
145 | count = 0
146 | GC.start
147 | t3 = Time.now
148 | 1000.times do
149 | parser.call(bench_data) do |time, record|
150 | record["first_switched"] = format_for_switched(msec_from_boot_to_time_by_rational(record["first_switched"]))
151 | record["last_switched"] = format_for_switched(msec_from_boot_to_time_by_rational(record["last_switched"]))
152 | count += 1
153 | end
154 | end
155 | t4 = Time.now
156 | using_rational = t4 - t3
157 |
158 | assert{ count == 50000 }
159 |
160 | # skip time formatting
161 | count = 0
162 | GC.start
163 | t5 = Time.now
164 | 1000.times do
165 | parser.call(bench_data) do |time, record|
166 | record["first_switched"] = msec_from_boot_to_time(record["first_switched"])
167 | record["last_switched"] = msec_from_boot_to_time(record["last_switched"])
168 | count += 1
169 | end
170 | end
171 | t6 = Time.now
172 | skip_time_formatting = t6 - t5
173 |
174 | assert{ count == 50000 }
175 |
176 | # with full time conversion (default)
177 | parser = create_parser
178 | count = 0
179 | GC.start
180 | t7 = Time.now
181 | 1000.times do
182 | parser.call(bench_data) do |time, record|
183 | count += 1
184 | end
185 | end
186 | t8 = Time.now
187 | default_formatting = t8 - t7
188 |
189 | assert{ count == 50000 }
190 |
191 | assert{ using_rational > default_formatting }
192 | assert{ default_formatting > skip_time_formatting }
193 | assert{ skip_time_formatting > uptime_based_switched }
194 | end
195 |
196 | test 'parse v5 binary data contains 1 record, generated from definition' do
197 | parser = create_parser
198 | parsed = []
199 |
200 | time1 = DEFAULT_TIME
201 | data1 = v5_data(
202 | version: 5,
203 | flow_records: 1,
204 | uptime: DEFAULT_UPTIME,
205 | unix_sec: DEFAULT_TIME,
206 | unix_nsec: DEFAULT_NSEC,
207 | flow_seq_num: 1,
208 | engine_type: 1,
209 | engine_id: 1,
210 | sampling_algorithm: 0,
211 | sampling_interval: 0,
212 | records: [
213 | v5_record,
214 | ]
215 | )
216 |
217 | parser.call(data1.to_binary_s) do |time, record|
218 | parsed << [time, record]
219 | end
220 |
221 | assert_equal 1, parsed.size
222 | assert_instance_of Fluent::EventTime, parsed.first[0]
223 | assert_equal time1, parsed.first[0]
224 |
225 | event = parsed.first[1]
226 |
227 | assert_equal 5, event["version"]
228 | assert_equal 1, event["flow_records"]
229 | assert_equal 1, event["flow_seq_num"]
230 | assert_equal 1, event["engine_type"]
231 | assert_equal 1, event["engine_id"]
232 | assert_equal 0, event["sampling_algorithm"]
233 | assert_equal 0, event["sampling_interval"]
234 |
235 | assert_equal "10.0.1.122", event["ipv4_src_addr"]
236 | assert_equal "192.168.0.3", event["ipv4_dst_addr"]
237 | assert_equal "10.0.0.3", event["ipv4_next_hop"]
238 | assert_equal 1, event["input_snmp"]
239 | assert_equal 2, event["output_snmp"]
240 | assert_equal 156, event["in_pkts"]
241 | assert_equal 1024, event["in_bytes"]
242 | assert_equal format_for_switched(msec_from_boot_to_time(DEFAULT_UPTIME - 13000)), event["first_switched"]
243 | assert_equal format_for_switched(msec_from_boot_to_time(DEFAULT_UPTIME - 12950)), event["last_switched"]
244 | assert_equal 1048, event["l4_src_port"]
245 | assert_equal 80, event["l4_dst_port"]
246 | assert_equal 27, event["tcp_flags"]
247 | assert_equal 6, event["protocol"]
248 | assert_equal 0, event["src_tos"]
249 | assert_equal 101, event["src_as"]
250 | assert_equal 201, event["dst_as"]
251 | assert_equal 24, event["src_mask"]
252 | assert_equal 24, event["dst_mask"]
253 | end
254 |
255 | test 'parse v5 binary data contains 1 record, generated from definition, leaving switched times as using uptime' do
256 | parser = create_parser({"switched_times_from_uptime" => true})
257 | parsed = []
258 |
259 | time1 = DEFAULT_TIME
260 | data1 = v5_data(
261 | version: 5,
262 | flow_records: 1,
263 | uptime: DEFAULT_UPTIME,
264 | unix_sec: DEFAULT_TIME,
265 | unix_nsec: DEFAULT_NSEC,
266 | flow_seq_num: 1,
267 | engine_type: 1,
268 | engine_id: 1,
269 | sampling_algorithm: 0,
270 | sampling_interval: 0,
271 | records: [
272 | v5_record,
273 | ]
274 | )
275 |
276 | parser.call(data1.to_binary_s) do |time, record|
277 | parsed << [time, record]
278 | end
279 |
280 | assert_equal 1, parsed.size
281 | assert_equal time1, parsed.first[0]
282 |
283 | event = parsed.first[1]
284 |
285 | assert_equal 5, event["version"]
286 | assert_equal 1, event["flow_records"]
287 | assert_equal 1, event["flow_seq_num"]
288 | assert_equal 1, event["engine_type"]
289 | assert_equal 1, event["engine_id"]
290 | assert_equal 0, event["sampling_algorithm"]
291 | assert_equal 0, event["sampling_interval"]
292 |
293 | assert_equal "10.0.1.122", event["ipv4_src_addr"]
294 | assert_equal "192.168.0.3", event["ipv4_dst_addr"]
295 | assert_equal "10.0.0.3", event["ipv4_next_hop"]
296 | assert_equal 1, event["input_snmp"]
297 | assert_equal 2, event["output_snmp"]
298 | assert_equal 156, event["in_pkts"]
299 | assert_equal 1024, event["in_bytes"]
300 | assert_equal (DEFAULT_UPTIME - 13000), event["first_switched"]
301 | assert_equal (DEFAULT_UPTIME - 12950), event["last_switched"]
302 | assert_equal 1048, event["l4_src_port"]
303 | assert_equal 80, event["l4_dst_port"]
304 | assert_equal 27, event["tcp_flags"]
305 | assert_equal 6, event["protocol"]
306 | assert_equal 0, event["src_tos"]
307 | assert_equal 101, event["src_as"]
308 | assert_equal 201, event["dst_as"]
309 | assert_equal 24, event["src_mask"]
310 | assert_equal 24, event["dst_mask"]
311 | end
312 |
313 | require 'fluent/plugin/netflow_records'
314 | def ipv4addr(v)
315 | addr = Fluent::Plugin::NetflowParser::IP4Addr.new
316 | addr.set(v)
317 | addr
318 | end
319 |
320 | def ipv6addr(v)
321 | addr = Fluent::Plugin::NetflowParser::IP6Addr.new
322 | addr.set(v)
323 | addr
324 | end
325 |
326 | def macaddr(v)
327 | addr = Fluent::Plugin::NetflowParser::MacAddr.new
328 | addr.set(v)
329 | addr
330 | end
331 |
332 | def mplslabel(v)
333 | label = Fluent::Plugin::NetflowParser::MplsLabel.new
334 | label.set(v)
335 | label
336 | end
337 |
338 | def v5_record(hash={})
339 | {
340 | ipv4_src_addr: "10.0.1.122",
341 | ipv4_dst_addr: "192.168.0.3",
342 | ipv4_next_hop: "10.0.0.3",
343 | input_snmp: 1,
344 | output_snmp: 2,
345 | in_pkts: 156,
346 | in_bytes: 1024,
347 | first_switched: DEFAULT_UPTIME - 13000, # 13seconds ago
348 | last_switched: DEFAULT_UPTIME - 12950, # 50msec later after first switched
349 | l4_src_port: 1048,
350 | l4_dst_port: 80,
351 | tcp_flags: 27,
352 | protocol: 6,
353 | src_tos: 0,
354 | src_as: 101,
355 | dst_as: 201,
356 | src_mask: 24,
357 | dst_mask: 24,
358 | }.merge(hash)
359 | end
360 |
361 | def v5_data(hash={})
362 | hash = hash.dup
363 | hash[:records] = (hash[:records] || []).map{|r|
364 | r = r.dup
365 | [:ipv4_src_addr, :ipv4_dst_addr, :ipv4_next_hop].each do |key|
366 | r[key] = ipv4addr(r[key]) if r[key]
367 | end
368 | r
369 | }
370 | Fluent::Plugin::NetflowParser::Netflow5PDU.new(hash)
371 | end
372 |
373 | def v9_template(hash)
374 | end
375 |
376 | def v9_option(hash)
377 | end
378 |
379 | def v9_data(hash)
380 | end
381 | end
382 |
--------------------------------------------------------------------------------
/test/test_parser_netflow9.rb:
--------------------------------------------------------------------------------
1 | require 'helper'
2 |
3 | class Netflow9ParserTest < Test::Unit::TestCase
4 | def setup
5 | Fluent::Test.setup
6 | end
7 |
8 | def create_parser(conf={})
9 | parser = Fluent::Plugin::NetflowParser.new
10 | parser.configure(Fluent::Config::Element.new('ROOT', '', conf, []))
11 | parser
12 | end
13 |
14 | def raw_template
15 | @raw_template ||= File.read(File.expand_path('../dump/netflow.v9.template.dump', __FILE__))
16 | end
17 |
18 | def raw_flowStartMilliseconds_template
19 | @raw_flowStartMilliseconds_template ||= File.read(File.expand_path('../dump/netflow.v9.template.flowStartMilliseconds.dump', __FILE__))
20 | end
21 |
22 | def raw_mpls_template
23 | @raw_mpls_template ||= File.read(File.expand_path('../dump/netflow.v9.mpls-template.dump', __FILE__))
24 | end
25 |
26 | def raw_data
27 | @raw_data ||= File.read(File.expand_path('../dump/netflow.v9.dump', __FILE__))
28 | end
29 |
30 | def raw_flowStartMilliseconds_data
31 | @raw_flowStartMilliseconds_data ||= File.read(File.expand_path('../dump/netflow.v9.flowStartMilliseconds.dump', __FILE__))
32 | end
33 |
34 | def raw_mpls_data
35 | @raw_mpls_data ||= File.read(File.expand_path('../dump/netflow.v9.mpls-data.dump', __FILE__))
36 | end
37 |
38 | def raw_sampler_template
39 | @raw_sampler_template ||= File.read(File.expand_path('../dump/netflow.v9.sampler_template.dump', __FILE__))
40 | end
41 |
42 | def raw_sampler_data
43 | @raw_sampler_data ||= File.read(File.expand_path('../dump/netflow.v9.sampler.dump', __FILE__))
44 | end
45 |
46 | def raw_2byte_as_template
47 | @raw_2byte_as_template ||= File.read(File.expand_path('../dump/netflow.v9.template.as2.dump', __FILE__))
48 | end
49 |
50 | DEFAULT_HOST = '127.0.0.1'
51 |
52 | test 'parse netflow v9 binary data before loading corresponding template' do
53 | parser = create_parser
54 |
55 | assert_equal 92, raw_data.size
56 | parser.call(raw_data, DEFAULT_HOST) do |time, record|
57 | assert false, 'nothing emitted'
58 | end
59 | end
60 |
61 | test 'parse netflow v9 binary data' do
62 | parser = create_parser
63 |
64 | parsed = []
65 | parser.call raw_template, DEFAULT_HOST
66 | parser.call(raw_data, DEFAULT_HOST) do |time, record|
67 | parsed << [time, record]
68 | end
69 |
70 | assert_equal 1, parsed.size
71 | assert_instance_of Fluent::EventTime, parsed.first[0]
72 | assert_equal Time.parse('2016-02-12T04:02:25Z').to_i, parsed.first[0]
73 | expected_record = {
74 | # header
75 | 'version' => 9,
76 | 'flow_seq_num' => 4645895,
77 | 'flowset_id' => 260,
78 |
79 | # flowset
80 | 'in_pkts' => 1,
81 | 'in_bytes' => 60,
82 | 'ipv4_src_addr' => '192.168.0.1',
83 | 'ipv4_dst_addr' => '192.168.0.2',
84 | 'input_snmp' => 54,
85 | 'output_snmp' => 29,
86 | 'last_switched' => '2016-02-12T04:02:09.053Z',
87 | 'first_switched' => '2016-02-12T04:02:09.053Z',
88 | 'l4_src_port' => 80,
89 | 'l4_dst_port' => 32822,
90 | 'src_as' => 0,
91 | 'dst_as' => 65000,
92 | 'bgp_ipv4_next_hop' => '192.168.0.3',
93 | 'src_mask' => 24,
94 | 'dst_mask' => 24,
95 | 'protocol' => 6,
96 | 'tcp_flags' => 0x12,
97 | 'src_tos' => 0x0,
98 | 'direction' => 0,
99 | 'forwarding_status' => 0b01000000,
100 | 'flow_sampler_id' => 1,
101 | 'ingress_vrf_id' => 1610612736,
102 | 'egress_vrf_id' => 1610612736
103 | }
104 | assert_equal expected_record, parsed.first[1]
105 | end
106 |
107 | test 'parse netflow v9 binary data (flowStartMilliseconds)' do
108 | parser = create_parser
109 |
110 | parsed = []
111 | parser.call raw_flowStartMilliseconds_template, DEFAULT_HOST
112 | parser.call(raw_flowStartMilliseconds_data, DEFAULT_HOST) do |time, record|
113 | parsed << [time, record]
114 | end
115 |
116 | assert_equal 1, parsed.size
117 | assert_equal Time.parse('2016-02-12T04:02:25Z').to_i, parsed.first[0]
118 | expected_record = {
119 | # header
120 | 'version' => 9,
121 | 'flow_seq_num' => 4645895,
122 | 'flowset_id' => 261,
123 |
124 | # flowset
125 | 'in_pkts' => 1,
126 | 'in_bytes' => 60,
127 | 'ipv4_src_addr' => '192.168.0.1',
128 | 'ipv4_dst_addr' => '192.168.0.2',
129 | 'input_snmp' => 54,
130 | 'output_snmp' => 29,
131 | 'flowEndMilliseconds' => '2016-02-12T04:02:09.053Z',
132 | 'flowStartMilliseconds' => '2016-02-12T04:02:09.053Z',
133 | 'l4_src_port' => 80,
134 | 'l4_dst_port' => 32822,
135 | 'src_as' => 0,
136 | 'dst_as' => 65000,
137 | 'bgp_ipv4_next_hop' => '192.168.0.3',
138 | 'src_mask' => 24,
139 | 'dst_mask' => 24,
140 | 'protocol' => 6,
141 | 'tcp_flags' => 0x12,
142 | 'src_tos' => 0x0,
143 | 'direction' => 0,
144 | 'forwarding_status' => 0b01000000,
145 | 'flow_sampler_id' => 1,
146 | 'ingress_vrf_id' => 1610612736,
147 | 'egress_vrf_id' => 1610612736
148 | }
149 | assert_equal expected_record, parsed.first[1]
150 | end
151 |
152 | test 'parse netflow v9 binary data after sampler data is cached' do
153 | parser = create_parser
154 |
155 | parsed = []
156 | [raw_sampler_template, raw_sampler_data, raw_template].each {|raw| parser.call(raw, DEFAULT_HOST){} }
157 | parser.call(raw_data, DEFAULT_HOST) do |time, record|
158 | parsed << [time, record]
159 | end
160 |
161 | assert_equal 2, parsed.first[1]['sampling_algorithm']
162 | assert_equal 5000, parsed.first[1]['sampling_interval']
163 | end
164 |
165 | test 'parse netflow v9 binary data with host-based template cache' do
166 | parser = create_parser
167 | another_host = DEFAULT_HOST.next
168 |
169 | parsed = []
170 | parser.call raw_template, DEFAULT_HOST
171 | parser.call(raw_data, another_host) do |time, record|
172 | assert false, 'nothing emitted'
173 | end
174 | parser.call raw_template, another_host
175 | parser.call(raw_data, another_host) do |time, record|
176 | parsed << [time, record]
177 | end
178 |
179 | assert_equal 1, parsed.size
180 | end
181 |
182 | test 'parse netflow v9 binary data with host-based sampler cache' do
183 | parser = create_parser
184 | another_host = DEFAULT_HOST.next
185 |
186 | parsed = []
187 | [raw_sampler_template, raw_sampler_data, raw_template].each {|raw| parser.call(raw, DEFAULT_HOST){} }
188 | parser.call(raw_template, another_host){}
189 | parser.call(raw_data, another_host) do |time, record|
190 | parsed << [time, record]
191 | end
192 |
193 | assert_equal nil, parsed.first[1]['sampling_algorithm']
194 | assert_equal nil, parsed.first[1]['sampling_interval']
195 | end
196 |
197 | test 'parse netflow v9 binary data with templates whose AS field length varies' do
198 | parser = create_parser
199 |
200 | parsed = []
201 | [raw_2byte_as_template, raw_template].each {|raw| parser.call(raw, DEFAULT_HOST){} }
202 | parser.call(raw_data, DEFAULT_HOST) do |time, record|
203 | parsed << [time, record]
204 | end
205 |
206 | assert_equal 1, parsed.size
207 | assert_equal 0, parsed.first[1]['src_as']
208 | assert_equal 65000, parsed.first[1]['dst_as']
209 | end
210 |
211 | test 'parse netflow v9 binary data contains mpls information' do
212 | parser = create_parser
213 |
214 | parsed = []
215 | [raw_sampler_template, raw_sampler_data, raw_mpls_template].each {|raw| parser.call(raw, DEFAULT_HOST){} }
216 | parser.call(raw_mpls_data, DEFAULT_HOST) do |time, record|
217 | parsed << [time, record]
218 | end
219 |
220 | assert_equal 24002, parsed.first[1]['mpls_label_1']
221 | assert_equal '192.168.32.100', parsed.first[1]['ipv4_src_addr']
222 | assert_equal '172.16.32.2', parsed.first[1]['ipv4_dst_addr']
223 | end
224 | end
225 |
--------------------------------------------------------------------------------