├── .github
└── workflows
│ ├── issue-auto-closer.yml
│ ├── linux.yml
│ └── windows.yml
├── .gitignore
├── Gemfile
├── ISSUE_TEMPLATE.md
├── LICENSE.txt
├── README.md
├── Rakefile
├── example
└── fluentd.conf
├── fluent-plugin-cloudwatch-logs.gemspec
├── lib
└── fluent
│ └── plugin
│ ├── cloudwatch
│ ├── logs.rb
│ └── logs
│ │ └── version.rb
│ ├── in_cloudwatch_logs.rb
│ └── out_cloudwatch_logs.rb
└── test
├── plugin
├── test_in_cloudwatch_logs.rb
└── test_out_cloudwatch_logs.rb
└── test_helper.rb
/.github/workflows/issue-auto-closer.yml:
--------------------------------------------------------------------------------
1 | name: Autocloser
2 | on: [issues]
3 | jobs:
4 | autoclose:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - name: Autoclose issues that did not follow issue template
8 | uses: roots/issue-closer-action@v1.1
9 | with:
10 | repo-token: ${{ secrets.GITHUB_TOKEN }}
11 | issue-close-message: "@${issue.user.login} this issue was automatically closed because it did not follow the issue template."
12 | issue-pattern: "(.*Problem.*)|(.*Expected Behavior or What you need to ask.*)|(.*Using Fluentd and CloudWatchLogs plugin versions.*)"
13 |
--------------------------------------------------------------------------------
/.github/workflows/linux.yml:
--------------------------------------------------------------------------------
1 | name: Testing on Ubuntu
2 | on:
3 | push:
4 | branches:
5 | - master
6 | pull_request:
7 | jobs:
8 | build:
9 | runs-on: ${{ matrix.os }}
10 | strategy:
11 | fail-fast: false
12 | matrix:
13 | ruby: [ '3.2', '3.3', '3.4' ]
14 | os:
15 | - ubuntu-latest
16 | name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
17 | steps:
18 | - uses: actions/checkout@v4
19 | - uses: ruby/setup-ruby@v1
20 | with:
21 | ruby-version: ${{ matrix.ruby }}
22 | - name: unit testing
23 | env:
24 | CI: true
25 | run: |
26 | bundle install --jobs 4 --retry 3
27 | bundle exec rake test
28 |
--------------------------------------------------------------------------------
/.github/workflows/windows.yml:
--------------------------------------------------------------------------------
1 | name: Testing on Windows
2 | on:
3 | push:
4 | branches:
5 | - master
6 | pull_request:
7 | jobs:
8 | build:
9 | runs-on: ${{ matrix.os }}
10 | strategy:
11 | fail-fast: false
12 | matrix:
13 | ruby: [ '3.2', '3.3', '3.4' ]
14 | os:
15 | - windows-latest
16 | name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
17 | steps:
18 | - uses: actions/checkout@v4
19 | - uses: ruby/setup-ruby@v1
20 | with:
21 | ruby-version: ${{ matrix.ruby }}
22 | - name: unit testing
23 | env:
24 | CI: true
25 | run: |
26 | bundle install --jobs 4 --retry 3
27 | bundle exec rake test
28 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.gem
2 | *.rbc
3 | .bundle
4 | .config
5 | .yardoc
6 | Gemfile.lock
7 | InstalledFiles
8 | _yardoc
9 | coverage
10 | doc/
11 | lib/bundler/man
12 | pkg
13 | rdoc
14 | spec/reports
15 | test/tmp
16 | test/version_tmp
17 | tmp
18 | *.bundle
19 | *.so
20 | *.o
21 | *.a
22 | mkmf.log
23 | .idea
24 | *.iml
25 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | # Specify your gem's dependencies in fluent-plugin-cloudwatch-logs.gemspec
4 | gemspec
5 |
--------------------------------------------------------------------------------
/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | #### Problem
2 |
3 | ...
4 |
5 | #### Steps to replicate
6 |
7 | Provide example config and message
8 |
9 | #### Expected Behavior or What you need to ask
10 |
11 | ...
12 |
13 | #### Using Fluentd and CloudWatchLogs plugin versions
14 |
15 | * OS version
16 | * Bare Metal or within Docker or Kubernetes or others?
17 | * Fluentd v0.12 or v0.14/v1.0
18 | * paste result of ``fluentd --version`` or ``td-agent --version``
19 | * Dependent gem versions
20 | * paste boot log of fluentd or td-agent
21 | * paste result of ``fluent-gem list``, ``td-agent-gem list`` or your Gemfile.lock
22 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014 Ryota Arai
2 |
3 | MIT License
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining
6 | a copy of this software and associated documentation files (the
7 | "Software"), to deal in the Software without restriction, including
8 | without limitation the rights to use, copy, modify, merge, publish,
9 | distribute, sublicense, and/or sell copies of the Software, and to
10 | permit persons to whom the Software is furnished to do so, subject to
11 | the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be
14 | included in all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # fluent-plugin-cloudwatch-logs
2 |
3 | [](http://badge.fury.io/rb/fluent-plugin-cloudwatch-logs)
4 |
5 | [CloudWatch Logs](http://aws.amazon.com/blogs/aws/cloudwatch-log-service/) Plugin for Fluentd
6 |
7 | ## Requirements
8 |
9 | |fluent-plugin-cloudwatch-logs| fluentd | ruby |
10 | |-----------------------------|------------------|--------|
11 | | >= 0.8.0 | >= 1.8.0 | >= 2.4 |
12 | | >= 0.5.0 && < 0.8.0 | >= 0.14.15 | >= 2.1 |
13 | | <= 0.4.5 | ~> 0.12.0 * | >= 1.9 |
14 |
15 | * May not support all future fluentd features
16 |
17 | ## Installation
18 |
19 | ### For Fluentd
20 |
21 | ```sh
22 | gem install fluent-plugin-cloudwatch-logs
23 | ```
24 |
25 | ### For fluent-package
26 |
27 | ```sh
28 | fluent-gem install fluent-plugin-cloudwatch-logs
29 | ```
30 |
31 | ### For td-agent
32 |
33 | ```sh
34 | td-agent-gem install fluent-plugin-cloudwatch-logs
35 | ```
36 |
37 | ## Preparation
38 |
39 | Create IAM user with a policy like the following:
40 |
41 | ```json
42 | {
43 | "Version": "2012-10-17",
44 | "Statement": [
45 | {
46 | "Effect": "Allow",
47 | "Action": [
48 | "logs:*",
49 | "s3:GetObject"
50 | ],
51 | "Resource": [
52 | "arn:aws:logs:us-east-1:*:*",
53 | "arn:aws:s3:::*"
54 | ]
55 | }
56 | ]
57 | }
58 | ```
59 |
60 | More restricted IAM policy for `out_cloudwatch_logs` is:
61 |
62 | ```json
63 | {
64 | "Version": "2012-10-17",
65 | "Statement": [
66 | {
67 | "Action": [
68 | "logs:PutLogEvents",
69 | "logs:CreateLogGroup",
70 | "logs:PutRetentionPolicy",
71 | "logs:CreateLogStream",
72 | "logs:DescribeLogGroups",
73 | "logs:DescribeLogStreams"
74 | ],
75 | "Effect": "Allow",
76 | "Resource": "*"
77 | }
78 | ]
79 | }
80 | ```
81 |
82 | Also, more restricted IAM policy for `in_cloudwatch_logs` is:
83 |
84 | ```json
85 | {
86 | "Version": "2012-10-17",
87 | "Statement": [
88 | {
89 | "Action": [
90 | "logs:GetLogEvents",
91 | "logs:DescribeLogStreams"
92 | ],
93 | "Effect": "Allow",
94 | "Resource": "*"
95 | }
96 | ]
97 | }
98 | ```
99 |
100 | ## Authentication
101 |
102 | There are several methods to provide authentication credentials. Be aware that there are various tradeoffs for these methods,
103 | although most of these tradeoffs are highly dependent on the specific environment.
104 |
105 | ### Environment
106 |
107 | Set region and credentials via the environment:
108 |
109 | ```sh
110 | export AWS_REGION=us-east-1
111 | export AWS_ACCESS_KEY_ID="YOUR_ACCESS_KEY"
112 | export AWS_SECRET_ACCESS_KEY="YOUR_SECRET_ACCESS_KEY"
113 | ```
114 |
115 | Note: For this to work persistently the environment will need to be set in the startup scripts or docker variables.
116 |
117 | ### AWS Configuration
118 |
119 | The plugin will look for the `$HOME/.aws/config` and `$HOME/.aws/credentials` for configuration information. To setup, as the
120 | fluentd user, run:
121 |
122 | ```sh
123 | aws configure
124 | ```
125 |
126 | ### Configuration Parameters
127 |
128 | The authentication information can also be set
129 |
130 | ## Example
131 |
132 | Start fluentd:
133 |
134 | ```sh
135 | fluentd -c example/fluentd.conf
136 | ```
137 |
138 | Send sample log to CloudWatch Logs:
139 |
140 | ```sh
141 | echo '{"hello":"world"}' | fluent-cat test.cloudwatch_logs.out
142 | ```
143 |
144 | Fetch sample log from CloudWatch Logs:
145 |
146 | ```sh
147 | # stdout
148 | 2014-07-17 00:28:02 +0900 test.cloudwatch_logs.in: {"hello":"world"}
149 | ```
150 |
151 | ## Configuration
152 |
153 | ### out_cloudwatch_logs
154 |
155 | ```aconf
156 |
157 | @type cloudwatch_logs
158 | log_group_name log-group-name
159 | log_stream_name log-stream-name
160 | auto_create_stream true
161 | #message_keys key1,key2,key3,...
162 | #max_message_length 32768
163 | #use_tag_as_group false
164 | #use_tag_as_stream false
165 | #include_time_key true
166 | #localtime true
167 | #log_group_name_key group_name_key
168 | #log_stream_name_key stream_name_key
169 | #remove_log_group_name_key true
170 | #remove_log_stream_name_key true
171 | #put_log_events_retry_wait 1s
172 | #put_log_events_retry_limit 17
173 | #put_log_events_disable_retry_limit false
174 | #endpoint http://localhost:5000/
175 | #json_handler json
176 | #log_rejected_request true
177 | #
178 | # role_arn "#{ENV['AWS_ROLE_ARN']}"
179 | # role_session_name ROLE_SESSION_NAME
180 | # web_identity_token_file "#{ENV['AWS_WEB_IDENTITY_TOKEN_FILE']}"
181 | #
182 | #
183 | # @type ltsv
184 | #
185 |
186 | ```
187 |
188 | * `auto_create_stream`: to create log group and stream automatically. (defaults to false)
189 | * `aws_key_id`: AWS Access Key. See [Authentication](#authentication) for more information.
190 | * `aws_sec_key`: AWS Secret Access Key. See [Authentication](#authentication) for more information.
191 | * `concurrency`: use to set the number of threads pushing data to CloudWatch. (default: 1)
192 | * `endpoint`: use this parameter to connect to the local API endpoint (for testing)
193 | * `ssl_verify_peer`: when `true` (default), SSL peer certificates are verified when establishing a connection. Setting to `false` can be useful for testing.
194 | * `http_proxy`: use to set an optional HTTP proxy
195 | * `include_time_key`: include time key as part of the log entry (defaults to UTC)
196 | * `json_handler`: name of the library to be used to handle JSON data. For now, supported libraries are `json` (default) and `yajl`.
197 | * `localtime`: use localtime timezone for `include_time_key` output (overrides UTC default)
198 | * `log_group_aws_tags`: set a hash with keys and values to tag the log group resource
199 | * `log_group_aws_tags_key`: use specified field of records as AWS tags for the log group
200 | * `log_group_name`: name of log group to store logs
201 | * `log_group_name_key`: use specified field of records as log group name
202 | * `log_rejected_request`: output `rejected_log_events_info` request log. (defaults to false)
203 | * `log_stream_name`: name of log stream to store logs
204 | * `log_stream_name_key`: use specified field of records as log stream name
205 | * `max_events_per_batch`: maximum number of events to send at once (default 10000)
206 | * `max_message_length`: maximum length of the message
207 | * `message_keys`: keys to send messages as events
208 | * `put_log_events_disable_retry_limit`: if true, `put_log_events_retry_limit` will be ignored
209 | * `put_log_events_retry_limit`: maximum count of retry (if exceeding this, the events will be discarded)
210 | * `put_log_events_retry_wait`: time before retrying PutLogEvents (retry interval increases exponentially like `put_log_events_retry_wait * (2 ^ retry_count)`)
211 | * `region`: AWS Region. See [Authentication](#authentication) for more information.
212 | * `remove_log_group_aws_tags_key`: remove field specified by `log_group_aws_tags_key`
213 | * `remove_log_group_name_key`: remove field specified by `log_group_name_key`
214 | * `remove_log_stream_name_key`: remove field specified by `log_stream_name_key`
215 | * `remove_retention_in_days_key`: remove field specified by `retention_in_days_key`
216 | * `retention_in_days`: use to set the expiry time for log group when created with `auto_create_stream`. (default to no expiry)
217 | * `retention_in_days_key`: use specified field of records as retention period
218 | * `use_tag_as_group`: to use tag as a group name
219 | * `use_tag_as_stream`: to use tag as a stream name
220 | * ``: For EKS authentication.
221 | * `role_arn`: The Amazon Resource Name (ARN) of the role to assume. This parameter is required when using ``.
222 | * `role_session_name`: An identifier for the assumed role session. This parameter is required when using ``.
223 | * `web_identity_token_file`: The absolute path to the file on disk containing the OIDC token. This parameter is required when using ``.
224 | * `policy`: An IAM policy in JSON format. (default `nil`)
225 | * `duration_seconds`: The duration, in seconds, of the role session. The value can range from
226 | 900 seconds (15 minutes) to 43200 seconds (12 hours). By default, the value
227 | is set to 3600 seconds (1 hour). (default `nil`)
228 | * ``: For specifying records format. See [formatter overview](https://docs.fluentd.org/formatter) and [formatter section overview](https://docs.fluentd.org/configuration/format-section) on the official documentation.
229 |
230 | **NOTE:** `retention_in_days` requests additional IAM permission `logs:PutRetentionPolicy` for log_group.
231 | Please refer to [the PutRetentionPolicy column in documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/permissions-reference-cwl.html) for details.
232 |
233 | ### in_cloudwatch_logs
234 |
235 | ```aconf
236 |
237 | @type cloudwatch_logs
238 | tag cloudwatch.in
239 | log_group_name group
240 | #add_log_group_name true
241 | #log_group_name_key group_name_key
242 | #use_log_group_name_prefix true
243 | log_stream_name stream
244 | #use_log_stream_name_prefix true
245 | state_file /var/lib/fluent/group_stream.in.state
246 | #endpoint http://localhost:5000/
247 | #json_handler json
248 | # start_time "2020-03-01 00:00:00Z"
249 | # end_time "2020-04-30 15:00:00Z"
250 | # time_range_format "%Y-%m-%d %H:%M:%S%z"
251 | # Users can use `format` or `` directive to parse non-JSON CloudwatchLogs' log
252 | # format none # or csv, tsv, regexp etc.
253 | #
254 | # @type none # or csv, tsv, regexp etc.
255 | #
256 | #
257 | # @type local # or redis, memcached, etc.
258 | #
259 | #
260 | # role_arn "#{ENV['AWS_ROLE_ARN']}"
261 | # role_session_name ROLE_SESSION_NAME
262 | # web_identity_token_file "#{ENV['AWS_WEB_IDENTITY_TOKEN_FILE']}"
263 | #
264 |
265 | ```
266 |
267 | * `aws_key_id`: AWS Access Key. See [Authentication](#authentication) for more information.
268 | * `aws_sec_key`: AWS Secret Access Key. See [Authentication](#authentication) for more information.
269 | * `aws_sts_role_arn`: the role ARN to assume when using cross-account sts authentication
270 | * `aws_sts_session_name`: the session name to use with sts authentication (default: `fluentd`)
271 | * `aws_use_sts`: use [AssumeRoleCredentials](http://docs.aws.amazon.com/sdkforruby/api/Aws/AssumeRoleCredentials.html) to authenticate, rather than the [default credential hierarchy](http://docs.aws.amazon.com/sdkforruby/api/Aws/CloudWatchLogs/Client.html#initialize-instance_method). See 'Cross-Account Operation' below for more detail.
272 | * `endpoint`: use this parameter to connect to the local API endpoint (for testing)
273 | * `ssl_verify_peer`: when `true` (default), SSL peer certificates are verified when establishing a connection. Setting to `false` can be useful for testing.
274 | * `fetch_interval`: time period in seconds between checking CloudWatch for new logs. (default: 60)
275 | * `http_proxy`: use to set an optional HTTP proxy
276 | * `json_handler`: name of the library to be used to handle JSON data. For now, supported libraries are `json` (default) and `yajl`.
277 | * `log_group_name`: name of log group to fetch logs
278 | * `add_log_group_name`: add record into the name of log group (default `false`)
279 | * `log_group_name_key`: specify the key where adding record into the name of log group (default `'log_group'`)
280 | * `use_log_group_name_prefix`: to use `log_group_name` as log group name prefix (default `false`)
281 | * `log_stream_name`: name of log stream to fetch logs
282 | * `region`: AWS Region. See [Authentication](#authentication) for more information.
283 | * `throttling_retry_seconds`: time period in seconds to retry a request when aws CloudWatch rate limit exceeds (default: nil)
284 | * `include_metadata`: include metadata such as `log_group_name` and `log_stream_name`. (default: false)
285 | * `state_file`: file to store current state (e.g. next\_forward\_token). This parameter is deprecated. Use `` instead.
286 | * `tag`: fluentd tag
287 | * `use_log_stream_name_prefix`: to use `log_stream_name` as log stream name prefix (default false)
288 | * `use_todays_log_stream`: use todays and yesterdays date as log stream name prefix (formatted YYYY/MM/DD). (default: `false`)
289 | * `use_aws_timestamp`: get timestamp from Cloudwatch event for non json logs, otherwise fluentd will parse the log to get the timestamp (default `false`)
290 | * `start_time`: specify starting time range for obtaining logs. (default: `nil`)
291 | * `end_time`: specify ending time range for obtaining logs. (default: `nil`)
292 | * `time_range_format`: specify time format for time range. (default: `%Y-%m-%d %H:%M:%S`)
293 | * `format`: specify CloudWatchLogs' log format. (default `nil`)
294 | * ``: specify parser plugin configuration. see also: https://docs.fluentd.org/v/1.0/parser#how-to-use
295 | * ``: specify storage plugin configuration. see also: https://docs.fluentd.org/v/1.0/storage#how-to-use
296 | * ``: For EKS authentication.
297 | * `role_arn`: The Amazon Resource Name (ARN) of the role to assume. This parameter is required when using ``.
298 | * `role_session_name`: An identifier for the assumed role session. This parameter is required when using ``.
299 | * `web_identity_token_file`: The absolute path to the file on disk containing the OIDC token. This parameter is required when using ``.
300 | * `policy`: An IAM policy in JSON format. (default `nil`)
301 | * `duration_seconds`: The duration, in seconds, of the role session. The value can range from
302 | 900 seconds (15 minutes) to 43200 seconds (12 hours). By default, the value
303 | is set to 3600 seconds (1 hour). (default `nil`)
304 |
305 | ## Test
306 |
307 | Set credentials:
308 |
309 | ```aconf
310 | $ export AWS_REGION=us-east-1
311 | $ export AWS_ACCESS_KEY_ID="YOUR_ACCESS_KEY"
312 | $ export AWS_SECRET_ACCESS_KEY="YOUR_SECRET_KEY"
313 | ```
314 |
315 | Run tests:
316 |
317 | ```sh
318 | rake test
319 | ```
320 |
321 | Or, If you do not want to use IAM roll or ENV(this is just like writing to configuration file) :
322 |
323 | ```sh
324 | rake aws_key_id=YOUR_ACCESS_KEY aws_sec_key=YOUR_SECRET_KEY region=us-east-1 test
325 | ```
326 |
327 | If you want to run the test suite against a mock server, set `endpoint` as below:
328 |
329 | ```sh
330 | export endpoint='http://localhost:5000/'
331 | rake test
332 | ```
333 |
334 |
335 | ## Caution
336 |
337 | If an event message exceeds API limit (1MB), the event will be discarded.
338 |
339 | ## Cross-Account Operation
340 |
341 | In order to have an instance of this plugin running in one AWS account to fetch logs from another account cross-account IAM authentication is required. Whilst this can be accomplished by configuring specific instances of the plugin manually with credentials for the source account in question this is not desirable for a number of reasons.
342 |
343 | In this case IAM can be used to allow the fluentd instance in one account ("A") to ingest Cloudwatch logs from another ("B") via the following mechanic:
344 |
345 | * plugin instance running in account "A" has an IAM instance role assigned to the underlying EC2 instance
346 | * The IAM instance role and associated policies permit the EC2 instance to assume a role in another account
347 | * An IAM role in account "B" and associated policies allow read access to the Cloudwatch Logs service, as appropriate.
348 |
349 | ### IAM Detail: Consuming Account "A"
350 |
351 | * Create an IAM role `cloudwatch`
352 | * Attach a policy to allow the role holder to assume another role (where `ACCOUNT-B` is substituted for the appropriate account number):
353 |
354 | ```json
355 | {
356 | "Version": "2012-10-17",
357 | "Statement": [
358 | {
359 | "Effect": "Allow",
360 | "Action": [
361 | "sts:*"
362 | ],
363 | "Resource": [
364 | "arn:aws:iam::ACCOUNT-B:role/fluentd"
365 | ]
366 | }
367 | ]
368 | }
369 | ```
370 |
371 | * Ensure the EC2 instance on which this plugin is executing as role `cloudwatch` as its assigned IAM instance role.
372 |
373 | ### IAM Detail: Log Source Account "B"
374 |
375 | * Create an IAM role `fluentd`
376 | * Ensure the `fluentd` role as account "A" as a trusted entity:
377 |
378 | ```json
379 | {
380 | "Version": "2012-10-17",
381 | "Statement": [
382 | {
383 | "Effect": "Allow",
384 | "Principal": {
385 | "AWS": "arn:aws:iam::ACCOUNT-A:root"
386 | },
387 | "Action": "sts:AssumeRole"
388 | }
389 | ]
390 | }
391 | ```
392 |
393 | * Attach a policy:
394 |
395 | ```json
396 | {
397 | "Version": "2012-10-17",
398 | "Statement": [
399 | {
400 | "Effect": "Allow",
401 | "Action": [
402 | "logs:DescribeDestinations",
403 | "logs:DescribeExportTasks",
404 | "logs:DescribeLogGroups",
405 | "logs:DescribeLogStreams",
406 | "logs:DescribeMetricFilters",
407 | "logs:DescribeSubscriptionFilters",
408 | "logs:FilterLogEvents",
409 | "logs:GetLogEvents"
410 | ],
411 | "Resource": [
412 | "arn:aws:logs:eu-west-1:ACCOUNT-B:log-group:LOG_GROUP_NAME_FOR_CONSUMPTION:*"
413 | ]
414 | }
415 | ]
416 | }
417 | ```
418 |
419 | ### Configuring the plugin for STS authentication
420 |
421 | ```aconf
422 |
423 | @type cloudwatch_logs
424 | region us-east-1 # You must supply a region
425 | aws_use_sts true
426 | aws_sts_role_arn arn:aws:iam::ACCOUNT-B:role/fluentd
427 | log_group_name LOG_GROUP_NAME_FOR_CONSUMPTION
428 | log_stream_name SOME_PREFIX
429 | use_log_stream_name_prefix true
430 | state_file /path/to/state_file
431 | format /(?.+)/
432 |
433 | ```
434 |
435 | ### Using build-in placeholders, but they don't replace placeholders with actual values, why?
436 |
437 | Built-in placeholders use buffer metadata when replacing placeholders with actual values.
438 | So, you should specify buffer attributes what you want to replace placeholders with.
439 |
440 | Using `${tag}` placeholders, you should specify `tag` attributes in buffer:
441 |
442 | ```aconf
443 |
444 | @type memory
445 |
446 | ```
447 |
448 | Using `%Y%m%d` placeholders, you should specify `time` attributes in buffer:
449 |
450 | ```aconf
451 |
452 | @type memory
453 | timekey 3600
454 |
455 | ```
456 |
457 | In more detail, please refer to [the officilal document for built-in placeholders](https://docs.fluentd.org/v1.0/articles/buffer-section#placeholders).
458 |
459 | ## TODO
460 |
461 | * out_cloudwatch_logs
462 | * if the data is too big for API, split into multiple requests
463 | * check data size
464 | * in_cloudwatch_logs
465 | * fallback to start_time because next_token expires after 24 hours
466 |
467 | ## Contributing
468 |
469 | 1. Fork it ( https://github.com/[my-github-username]/fluent-plugin-cloudwatch-logs/fork )
470 | 2. Create your feature branch (`git checkout -b my-new-feature`)
471 | 3. Commit your changes (`git commit -am 'Add some feature'`)
472 | 4. Push to the branch (`git push origin my-new-feature`)
473 | 5. Create a new Pull Request
474 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | require "bundler/gem_tasks"
2 |
3 | require 'rake/testtask'
4 |
5 | Rake::TestTask.new(:test) do |test|
6 | test.libs << 'test'
7 | test.test_files = FileList['test/plugin/*.rb']
8 | end
9 |
10 | task :default => :test
11 |
--------------------------------------------------------------------------------
/example/fluentd.conf:
--------------------------------------------------------------------------------
1 |
2 | @type forward
3 |
4 |
5 |
6 | @type cloudwatch_logs
7 | tag test.cloudwatch_logs.in
8 | log_group_name fluent-plugin-cloudwatch-example
9 | log_stream_name fluent-plugin-cloudwatch-example
10 | state_file /tmp/fluent-plugin-cloudwatch-example.state
11 |
12 |
13 |
14 | @type cloudwatch_logs
15 | log_group_name fluent-plugin-cloudwatch-example
16 | log_stream_name fluent-plugin-cloudwatch-example
17 | auto_create_stream true
18 |
19 |
20 |
21 | @type stdout
22 |
23 |
24 |
--------------------------------------------------------------------------------
/fluent-plugin-cloudwatch-logs.gemspec:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | lib = File.expand_path('../lib', __FILE__)
3 | $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4 | require 'fluent/plugin/cloudwatch/logs/version'
5 |
6 | Gem::Specification.new do |spec|
7 | spec.name = "fluent-plugin-cloudwatch-logs"
8 | spec.version = Fluent::Plugin::Cloudwatch::Logs::VERSION
9 | spec.authors = ["Ryota Arai"]
10 | spec.email = ["ryota.arai@gmail.com"]
11 | spec.summary = %q{CloudWatch Logs Plugin for Fluentd}
12 | spec.homepage = "https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs"
13 | spec.license = "MIT"
14 |
15 | spec.files = `git ls-files -z`.split("\x0")
16 | spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
17 | spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
18 | spec.require_paths = ["lib"]
19 |
20 | spec.add_dependency 'fluentd', '>= 1.8.0'
21 | spec.add_dependency 'aws-sdk-cloudwatchlogs', '~> 1.0'
22 |
23 | spec.add_development_dependency "bundler"
24 | spec.add_development_dependency "rake"
25 | spec.add_development_dependency "test-unit"
26 | spec.add_development_dependency "test-unit-rr"
27 | spec.add_development_dependency "mocha"
28 | spec.add_development_dependency "nokogiri"
29 | end
30 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/cloudwatch/logs.rb:
--------------------------------------------------------------------------------
1 | require "fluent/plugin/cloudwatch/logs/version"
2 |
3 | module Fluent
4 | module Plugin
5 | module Cloudwatch
6 | module Logs
7 | # Your code goes here...
8 | end
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/cloudwatch/logs/version.rb:
--------------------------------------------------------------------------------
1 | module Fluent
2 | module Plugin
3 | module Cloudwatch
4 | module Logs
5 | VERSION = "0.15.0"
6 | end
7 | end
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/in_cloudwatch_logs.rb:
--------------------------------------------------------------------------------
1 | require 'date'
2 | require 'time'
3 | require 'fluent/plugin/input'
4 | require 'fluent/plugin/parser'
5 | require 'yajl'
6 |
7 | module Fluent::Plugin
8 | class CloudwatchLogsInput < Input
9 | Fluent::Plugin.register_input('cloudwatch_logs', self)
10 |
11 | helpers :parser, :thread, :compat_parameters, :storage
12 |
13 | DEFAULT_STORAGE_TYPE = 'local'
14 |
15 | config_param :aws_key_id, :string, default: nil, secret: true
16 | config_param :aws_sec_key, :string, default: nil, secret: true
17 | config_param :aws_use_sts, :bool, default: false
18 | config_param :aws_sts_role_arn, :string, default: nil
19 | config_param :aws_sts_session_name, :string, default: 'fluentd'
20 | config_param :aws_sts_external_id, :string, default: nil
21 | config_param :aws_sts_policy, :string, default: nil
22 | config_param :aws_sts_duration_seconds, :time, default: nil
23 | config_param :aws_sts_endpoint_url, :string, default: nil
24 | config_param :aws_ecs_authentication, :bool, default: false
25 | config_param :region, :string, default: nil
26 | config_param :endpoint, :string, default: nil
27 | config_param :ssl_verify_peer, :bool, :default => true
28 | config_param :tag, :string
29 | config_param :log_group_name, :string
30 | config_param :add_log_group_name, :bool, default: false
31 | config_param :log_group_name_key, :string, default: 'log_group'
32 | config_param :use_log_group_name_prefix, :bool, default: false
33 | config_param :log_stream_name, :string, default: nil
34 | config_param :use_log_stream_name_prefix, :bool, default: false
35 | config_param :state_file, :string, default: nil,
36 | deprecated: "Use instead."
37 | config_param :fetch_interval, :time, default: 60
38 | config_param :http_proxy, :string, default: nil
39 | config_param :json_handler, :enum, list: [:yajl, :json], default: :yajl
40 | config_param :use_todays_log_stream, :bool, default: false
41 | config_param :use_aws_timestamp, :bool, default: false
42 | config_param :start_time, :string, default: nil
43 | config_param :end_time, :string, default: nil
44 | config_param :time_range_format, :string, default: "%Y-%m-%d %H:%M:%S"
45 | config_param :throttling_retry_seconds, :time, default: nil
46 | config_param :include_metadata, :bool, default: false
47 | config_section :web_identity_credentials, multi: false do
48 | config_param :role_arn, :string
49 | config_param :role_session_name, :string
50 | config_param :web_identity_token_file, :string, default: nil #required
51 | config_param :policy, :string, default: nil
52 | config_param :duration_seconds, :time, default: nil
53 | end
54 |
55 | config_section :parse do
56 | config_set_default :@type, 'none'
57 | end
58 |
59 | config_section :storage do
60 | config_set_default :usage, 'store_next_tokens'
61 | config_set_default :@type, DEFAULT_STORAGE_TYPE
62 | config_set_default :persistent, false
63 | end
64 |
65 | def initialize
66 | super
67 |
68 | @parser = nil
69 | require 'aws-sdk-cloudwatchlogs'
70 | end
71 |
72 | def configure(conf)
73 | compat_parameters_convert(conf, :parser)
74 | super
75 | configure_parser(conf)
76 |
77 | @start_time = (Time.strptime(@start_time, @time_range_format).to_f * 1000).floor if @start_time
78 | @end_time = (Time.strptime(@end_time, @time_range_format).to_f * 1000).floor if @end_time
79 | if @start_time && @end_time && (@end_time < @start_time)
80 | raise Fluent::ConfigError, "end_time(#{@end_time}) should be greater than start_time(#{@start_time})."
81 | end
82 | @next_token_storage = storage_create(usage: 'store_next_tokens', conf: config, default_type: DEFAULT_STORAGE_TYPE)
83 | end
84 |
85 | def start
86 | super
87 | options = {}
88 | options[:region] = @region if @region
89 | options[:endpoint] = @endpoint if @endpoint
90 | options[:ssl_verify_peer] = @ssl_verify_peer
91 | options[:http_proxy] = @http_proxy if @http_proxy
92 |
93 | if @aws_use_sts
94 | Aws.config[:region] = options[:region]
95 | credentials_options = {
96 | role_arn: @aws_sts_role_arn,
97 | role_session_name: @aws_sts_session_name,
98 | external_id: @aws_sts_external_id,
99 | policy: @aws_sts_policy,
100 | duration_seconds: @aws_sts_duration_seconds
101 | }
102 | credentials_options[:sts_endpoint_url] = @aws_sts_endpoint_url if @aws_sts_endpoint_url
103 | if @region and @aws_sts_endpoint_url
104 | credentials_options[:client] = Aws::STS::Client.new(:region => @region, endpoint: @aws_sts_endpoint_url)
105 | elsif @region
106 | credentials_options[:client] = Aws::STS::Client.new(:region => @region)
107 | end
108 | options[:credentials] = Aws::AssumeRoleCredentials.new(credentials_options)
109 | elsif @web_identity_credentials
110 | c = @web_identity_credentials
111 | credentials_options = {}
112 | credentials_options[:role_arn] = c.role_arn
113 | credentials_options[:role_session_name] = c.role_session_name
114 | credentials_options[:web_identity_token_file] = c.web_identity_token_file
115 | credentials_options[:policy] = c.policy if c.policy
116 | credentials_options[:duration_seconds] = c.duration_seconds if c.duration_seconds
117 | if @region
118 | credentials_options[:client] = Aws::STS::Client.new(:region => @region)
119 | end
120 | options[:credentials] = Aws::AssumeRoleWebIdentityCredentials.new(credentials_options)
121 | elsif @aws_ecs_authentication
122 | # collect AWS credential from ECS relative uri ENV variable
123 | aws_container_credentials_relative_uri = ENV["AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"]
124 | options[:credentials] = Aws::ECSCredentials.new({credential_path: aws_container_credentials_relative_uri}).credentials
125 | else
126 | options[:credentials] = Aws::Credentials.new(@aws_key_id, @aws_sec_key) if @aws_key_id && @aws_sec_key
127 | end
128 |
129 | @logs = Aws::CloudWatchLogs::Client.new(options)
130 |
131 | @finished = false
132 | thread_create(:in_cloudwatch_logs_runner, &method(:run))
133 |
134 | @json_handler = case @json_handler
135 | when :yajl
136 | Yajl
137 | when :json
138 | JSON
139 | end
140 | end
141 |
142 | def shutdown
143 | @finished = true
144 | super
145 | end
146 |
147 | # No private for testing
148 | def state_key_for(log_stream_name, log_group_name = nil)
149 | if log_group_name && log_stream_name
150 | "#{@state_file}_#{log_group_name.gsub(File::SEPARATOR, '-')}_#{log_stream_name.gsub(File::SEPARATOR, '-')}"
151 | elsif log_stream_name
152 | "#{@state_file}_#{log_stream_name.gsub(File::SEPARATOR, '-')}"
153 | else
154 | @state_file
155 | end
156 | end
157 |
158 | private
159 | def configure_parser(conf)
160 | if conf['format']
161 | @parser = parser_create
162 | elsif parser_config = conf.elements('parse').first
163 | @parser = parser_create(conf: parser_config)
164 | end
165 | end
166 |
167 | def migrate_state_file_to_storage(log_stream_name)
168 | @next_token_storage.put(:"#{state_key_for(log_stream_name)}", File.read(state_key_for(log_stream_name)).chomp)
169 | File.delete(state_key_for(log_stream_name))
170 | end
171 |
172 | def next_token(log_stream_name, log_group_name = nil)
173 | if @next_token_storage.persistent && File.exist?(state_key_for(log_stream_name))
174 | migrate_state_file_to_storage(log_stream_name)
175 | end
176 | @next_token_storage.get(:"#{state_key_for(log_stream_name, log_group_name)}")
177 | end
178 |
179 | def store_next_token(token, log_stream_name = nil, log_group_name = nil)
180 | @next_token_storage.put(:"#{state_key_for(log_stream_name, log_group_name)}", token)
181 | end
182 |
183 | def run
184 | @next_fetch_time = Time.now
185 |
186 | until @finished
187 | if Time.now > @next_fetch_time
188 | @next_fetch_time += @fetch_interval
189 |
190 | if @use_log_group_name_prefix
191 | log_group_names = describe_log_groups(@log_group_name).map{|log_group|
192 | log_group.log_group_name
193 | }
194 | else
195 | log_group_names = [@log_group_name]
196 | end
197 | log_group_names.each do |log_group_name|
198 | if @use_log_stream_name_prefix || @use_todays_log_stream
199 | log_stream_name_prefix = @use_todays_log_stream ? get_todays_date : @log_stream_name
200 | begin
201 | log_streams = describe_log_streams(log_stream_name_prefix, nil, nil, log_group_name)
202 | log_streams.concat(describe_log_streams(get_yesterdays_date, nil, nil, log_group_name)) if @use_todays_log_stream
203 | log_streams.each do |log_stream|
204 | log_stream_name = log_stream.log_stream_name
205 | events = get_events(log_group_name, log_stream_name)
206 | metadata = if @include_metadata
207 | {
208 | "log_stream_name" => log_stream_name,
209 | "log_group_name" => log_group_name
210 | }
211 | else
212 | {}
213 | end
214 | events.each do |event|
215 | emit(log_group_name, log_stream_name, event, metadata)
216 | end
217 | end
218 | rescue Aws::CloudWatchLogs::Errors::ResourceNotFoundException
219 | log.warn "'#{@log_stream_name}' prefixed log stream(s) are not found"
220 | next
221 | end
222 | else
223 | events = get_events(log_group_name, @log_stream_name)
224 | metadata = if @include_metadata
225 | {
226 | "log_stream_name" => @log_stream_name,
227 | "log_group_name" => @log_group_name
228 | }
229 | else
230 | {}
231 | end
232 | events.each do |event|
233 | emit(log_group_name, log_stream_name, event, metadata)
234 | end
235 | end
236 | end
237 | end
238 | sleep 1
239 | end
240 | end
241 |
242 | def emit(group, stream, event, metadata)
243 | if @parser
244 | @parser.parse(event.message) {|time,record|
245 | if @use_aws_timestamp
246 | time = (event.timestamp / 1000).floor
247 | end
248 | if @add_log_group_name
249 | record[@log_group_name_key] = group
250 | end
251 | unless metadata.empty?
252 | record.merge!("metadata" => metadata)
253 | end
254 | router.emit(@tag, time, record)
255 | }
256 | else
257 | time = (event.timestamp / 1000).floor
258 | begin
259 | record = @json_handler.load(event.message)
260 | if @add_log_group_name
261 | record[@log_group_name_key] = group
262 | end
263 | unless metadata.empty?
264 | record.merge!("metadata" => metadata)
265 | end
266 | router.emit(@tag, time, record)
267 | rescue JSON::ParserError, Yajl::ParseError => error # Catch parser errors
268 | log.error "Invalid JSON encountered while parsing event.message"
269 | router.emit_error_event(@tag, time, { message: event.message }, error)
270 | end
271 | end
272 | end
273 |
274 | def get_events(log_group_name, log_stream_name)
275 | throttling_handler('get_log_events') do
276 | request = {
277 | log_group_name: log_group_name,
278 | log_stream_name: log_stream_name
279 | }
280 | request.merge!(start_time: @start_time) if @start_time
281 | request.merge!(end_time: @end_time) if @end_time
282 | if @use_log_group_name_prefix
283 | log_next_token = next_token(log_stream_name, log_group_name)
284 | else
285 | log_next_token = next_token(log_stream_name)
286 | end
287 | request[:next_token] = log_next_token if !log_next_token.nil? && !log_next_token.empty?
288 | request[:start_from_head] = true if read_from_head?(log_next_token)
289 | response = @logs.get_log_events(request)
290 | if valid_next_token(log_next_token, response.next_forward_token)
291 | if @use_log_group_name_prefix
292 | store_next_token(response.next_forward_token, log_stream_name, log_group_name)
293 | else
294 | store_next_token(response.next_forward_token, log_stream_name)
295 | end
296 | end
297 |
298 | response.events
299 | end
300 | end
301 |
302 | def read_from_head?(next_token)
303 | (!next_token.nil? && !next_token.empty?) || @start_time || @end_time
304 | end
305 |
306 | def describe_log_streams(log_stream_name_prefix, log_streams = nil, next_token = nil, log_group_name=nil)
307 | throttling_handler('describe_log_streams') do
308 | request = {
309 | log_group_name: log_group_name != nil ? log_group_name : @log_group_name
310 | }
311 | request[:next_token] = next_token if next_token
312 | request[:log_stream_name_prefix] = log_stream_name_prefix if log_stream_name_prefix
313 | response = @logs.describe_log_streams(request)
314 | if log_streams
315 | log_streams.concat(response.log_streams)
316 | else
317 | log_streams = response.log_streams
318 | end
319 | if response.next_token
320 | log_streams = describe_log_streams(log_stream_name_prefix, log_streams, response.next_token, log_group_name)
321 | end
322 | log_streams
323 | end
324 | end
325 |
326 | def throttling_handler(method_name)
327 | yield
328 | rescue Aws::CloudWatchLogs::Errors::ThrottlingException => err
329 | if throttling_retry_seconds
330 | log.warn "ThrottlingException #{method_name}. Waiting #{throttling_retry_seconds} seconds to retry."
331 | sleep throttling_retry_seconds
332 |
333 | throttling_handler(method_name) { yield }
334 | else
335 | raise err
336 | end
337 | end
338 |
339 | def describe_log_groups(log_group_name_prefix, log_groups = nil, next_token = nil)
340 | request = {
341 | log_group_name_prefix: log_group_name_prefix
342 | }
343 | request[:next_token] = next_token if next_token
344 | response = @logs.describe_log_groups(request)
345 | if log_groups
346 | log_groups.concat(response.log_groups)
347 | else
348 | log_groups = response.log_groups
349 | end
350 | if response.next_token
351 | log_groups = describe_log_groups(log_group_name_prefix, log_groups, response.next_token)
352 | end
353 | log_groups
354 | end
355 |
356 | def valid_next_token(prev_token, next_token)
357 | next_token && prev_token != next_token.chomp
358 | end
359 |
360 | def get_todays_date
361 | Date.today.strftime("%Y/%m/%d")
362 | end
363 |
364 | def get_yesterdays_date
365 | (Date.today - 1).strftime("%Y/%m/%d")
366 | end
367 | end
368 | end
369 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/out_cloudwatch_logs.rb:
--------------------------------------------------------------------------------
1 | require 'fluent/plugin/output'
2 | require 'fluent/msgpack_factory'
3 | require 'thread'
4 | require 'yajl'
5 |
6 | module Fluent::Plugin
7 | class CloudwatchLogsOutput < Output
8 | Fluent::Plugin.register_output('cloudwatch_logs', self)
9 |
10 | class TooLargeEventError < Fluent::UnrecoverableError; end
11 |
12 | helpers :compat_parameters, :inject, :formatter
13 |
14 | DEFAULT_BUFFER_TYPE = "memory"
15 |
16 | config_param :aws_key_id, :string, :default => nil, :secret => true
17 | config_param :aws_sec_key, :string, :default => nil, :secret => true
18 | config_param :aws_instance_profile_credentials_retries, :integer, default: nil
19 | config_param :aws_use_sts, :bool, default: false
20 | config_param :aws_sts_role_arn, :string, default: nil
21 | config_param :aws_sts_session_name, :string, default: 'fluentd'
22 | config_param :aws_sts_external_id, :string, default: nil
23 | config_param :aws_sts_policy, :string, default: nil
24 | config_param :aws_sts_duration_seconds, :time, default: nil
25 | config_param :aws_sts_endpoint_url, :string, default: nil
26 | config_param :aws_ecs_authentication, :bool, default: false
27 | config_param :region, :string, :default => nil
28 | config_param :endpoint, :string, :default => nil
29 | config_param :ssl_verify_peer, :bool, :default => true
30 | config_param :log_group_name, :string, :default => nil
31 | config_param :log_stream_name, :string, :default => nil
32 | config_param :auto_create_stream, :bool, default: false
33 | config_param :message_keys, :array, :default => [], value_type: :string
34 | config_param :max_message_length, :integer, :default => nil
35 | config_param :max_events_per_batch, :integer, :default => 10000
36 | config_param :use_tag_as_group, :bool, :default => false # TODO: Rename to use_tag_as_group_name ?
37 | config_param :use_tag_as_stream, :bool, :default => false # TODO: Rename to use_tag_as_stream_name ?
38 | config_param :log_group_name_key, :string, :default => nil
39 | config_param :log_stream_name_key, :string, :default => nil
40 | config_param :remove_log_group_name_key, :bool, :default => false
41 | config_param :remove_log_stream_name_key, :bool, :default => false
42 | config_param :http_proxy, :string, default: nil
43 | config_param :put_log_events_retry_wait, :time, default: 1.0
44 | config_param :put_log_events_retry_limit, :integer, default: 17
45 | config_param :put_log_events_disable_retry_limit, :bool, default: false
46 | config_param :concurrency, :integer, default: 1
47 | config_param :log_group_aws_tags, :hash, default: nil
48 | config_param :log_group_aws_tags_key, :string, default: nil
49 | config_param :remove_log_group_aws_tags_key, :bool, default: false
50 | config_param :retention_in_days, :integer, default: nil
51 | config_param :retention_in_days_key, :string, default: nil
52 | config_param :remove_retention_in_days_key, :bool, default: false
53 | config_param :json_handler, :enum, list: [:yajl, :json], :default => :yajl
54 | config_param :log_rejected_request, :bool, :default => false
55 | config_section :web_identity_credentials, multi: false do
56 | config_param :role_arn, :string
57 | config_param :role_session_name, :string
58 | config_param :web_identity_token_file, :string, default: nil #required
59 | config_param :policy, :string, default: nil
60 | config_param :duration_seconds, :time, default: nil
61 | end
62 |
63 | config_section :buffer do
64 | config_set_default :@type, DEFAULT_BUFFER_TYPE
65 | end
66 | config_section :format do
67 | config_set_default :@type, 'json'
68 | end
69 |
70 | MAX_EVENTS_SIZE = 1_048_576
71 | MAX_EVENT_SIZE = 1024 * 1024
72 | EVENT_HEADER_SIZE = 26
73 |
74 | def initialize
75 | super
76 |
77 | require 'aws-sdk-cloudwatchlogs'
78 | end
79 |
80 | def configure(conf)
81 | compat_parameters_convert(conf, :buffer, :inject)
82 | super
83 |
84 | unless [conf['log_group_name'], conf['use_tag_as_group'], conf['log_group_name_key']].compact.size == 1
85 | raise Fluent::ConfigError, "Set only one of log_group_name, use_tag_as_group and log_group_name_key"
86 | end
87 |
88 | unless [conf['log_stream_name'], conf['use_tag_as_stream'], conf['log_stream_name_key']].compact.size == 1
89 | raise Fluent::ConfigError, "Set only one of log_stream_name, use_tag_as_stream and log_stream_name_key"
90 | end
91 |
92 | if [conf['log_group_aws_tags'], conf['log_group_aws_tags_key']].compact.size > 1
93 | raise ConfigError, "Set only one of log_group_aws_tags, log_group_aws_tags_key"
94 | end
95 |
96 | if [conf['retention_in_days'], conf['retention_in_days_key']].compact.size > 1
97 | raise ConfigError, "Set only one of retention_in_days, retention_in_days_key"
98 | end
99 |
100 | formatter_conf = conf.elements('format').first
101 | @formatter_proc = unless formatter_conf
102 | unless @message_keys.empty?
103 | Proc.new { |tag, time, record|
104 | @message_keys.map{|k| record[k].to_s }.reject{|e| e.empty? }.join(' ')
105 | }
106 | else
107 | Proc.new { |tag, time, record|
108 | @json_handler.dump(record)
109 | }
110 | end
111 | else
112 | formatter = formatter_create(usage: 'cloudwatch-logs-plugin', conf: formatter_conf)
113 | formatter.method(:format)
114 | end
115 | end
116 |
117 | def start
118 | super
119 |
120 | options = {}
121 | options[:logger] = log if log
122 | options[:log_level] = :debug if log
123 | options[:region] = @region if @region
124 | options[:endpoint] = @endpoint if @endpoint
125 | options[:ssl_verify_peer] = @ssl_verify_peer
126 | options[:instance_profile_credentials_retries] = @aws_instance_profile_credentials_retries if @aws_instance_profile_credentials_retries
127 |
128 | if @aws_use_sts
129 | Aws.config[:region] = options[:region]
130 | credentials_options = {
131 | role_arn: @aws_sts_role_arn,
132 | role_session_name: @aws_sts_session_name,
133 | external_id: @aws_sts_external_id,
134 | policy: @aws_sts_policy,
135 | duration_seconds: @aws_sts_duration_seconds
136 | }
137 | credentials_options[:sts_endpoint_url] = @aws_sts_endpoint_url if @aws_sts_endpoint_url
138 | if @region and @aws_sts_endpoint_url
139 | credentials_options[:client] = Aws::STS::Client.new(:region => @region, endpoint: @aws_sts_endpoint_url)
140 | elsif @region
141 | credentials_options[:client] = Aws::STS::Client.new(:region => @region)
142 | end
143 | options[:credentials] = Aws::AssumeRoleCredentials.new(credentials_options)
144 | elsif @web_identity_credentials
145 | c = @web_identity_credentials
146 | credentials_options = {}
147 | credentials_options[:role_arn] = c.role_arn
148 | credentials_options[:role_session_name] = c.role_session_name
149 | credentials_options[:web_identity_token_file] = c.web_identity_token_file
150 | credentials_options[:policy] = c.policy if c.policy
151 | credentials_options[:duration_seconds] = c.duration_seconds if c.duration_seconds
152 | if @region
153 | credentials_options[:client] = Aws::STS::Client.new(:region => @region)
154 | end
155 | options[:credentials] = Aws::AssumeRoleWebIdentityCredentials.new(credentials_options)
156 | elsif @aws_ecs_authentication
157 | # collect AWS credential from ECS relative uri ENV variable
158 | aws_container_credentials_relative_uri = ENV["AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"]
159 | options[:credentials] = Aws::ECSCredentials.new({credential_path: aws_container_credentials_relative_uri}).credentials
160 | else
161 | options[:credentials] = Aws::Credentials.new(@aws_key_id, @aws_sec_key) if @aws_key_id && @aws_sec_key
162 | end
163 | options[:http_proxy] = @http_proxy if @http_proxy
164 | @logs ||= Aws::CloudWatchLogs::Client.new(options)
165 | @sequence_tokens = {}
166 | @store_next_sequence_token_mutex = Mutex.new
167 |
168 | log.debug "Aws::CloudWatchLogs::Client initialized: log.level #{log.level} => #{options[:log_level]}"
169 |
170 | @json_handler = case @json_handler
171 | when :yajl
172 | Yajl
173 | when :json
174 | JSON
175 | end
176 | end
177 |
178 | def format(tag, time, record)
179 | record = inject_values_to_record(tag, time, record)
180 | Fluent::MessagePackFactory.msgpack_packer.pack([tag, time, record]).to_s
181 | end
182 |
183 | def formatted_to_msgpack_binary?
184 | true
185 | end
186 |
187 | def multi_workers_ready?
188 | true
189 | end
190 |
191 | def write(chunk)
192 | log_group_name = extract_placeholders(@log_group_name, chunk) if @log_group_name
193 | log_stream_name = extract_placeholders(@log_stream_name, chunk) if @log_stream_name
194 | aws_tags = @log_group_aws_tags.each {|k, v|
195 | @log_group_aws_tags[extract_placeholders(k, chunk)] = extract_placeholders(v, chunk)
196 | } if @log_group_aws_tags
197 |
198 | queue = Thread::Queue.new
199 |
200 | chunk.enum_for(:msgpack_each).select {|tag, time, record|
201 | if record.nil?
202 | log.warn "record is nil (tag=#{tag})"
203 | false
204 | else
205 | true
206 | end
207 | }.group_by {|tag, time, record|
208 | group = case
209 | when @use_tag_as_group
210 | tag
211 | when @log_group_name_key
212 | if @remove_log_group_name_key
213 | record.delete(@log_group_name_key)
214 | else
215 | record[@log_group_name_key]
216 | end
217 | else
218 | log_group_name
219 | end
220 |
221 | stream = case
222 | when @use_tag_as_stream
223 | tag
224 | when @log_stream_name_key
225 | if @remove_log_stream_name_key
226 | record.delete(@log_stream_name_key)
227 | else
228 | record[@log_stream_name_key]
229 | end
230 | else
231 | log_stream_name
232 | end
233 |
234 | [group, stream]
235 | }.each {|group_stream, rs|
236 | group_name, stream_name = group_stream
237 |
238 | if stream_name.nil?
239 | log.warn "stream_name is nil (group_name=#{group_name})"
240 | next
241 | end
242 |
243 | unless log_group_exists?(group_name)
244 | #rs = [[name, timestamp, record],[name,timestamp,record]]
245 | #get tags and retention from first record
246 | #as we create log group only once, values from first record will persist
247 | record = rs[0][2]
248 |
249 | awstags = aws_tags
250 | unless @log_group_aws_tags_key.nil?
251 | if @remove_log_group_aws_tags_key
252 | awstags = record.delete(@log_group_aws_tags_key)
253 | else
254 | awstags = record[@log_group_aws_tags_key]
255 | end
256 | end
257 |
258 | retention_in_days = @retention_in_days
259 | unless @retention_in_days_key.nil?
260 | if @remove_retention_in_days_key
261 | retention_in_days = record.delete(@retention_in_days_key)
262 | else
263 | retention_in_days = record[@retention_in_days_key]
264 | end
265 | end
266 |
267 | if @auto_create_stream
268 | create_log_group(group_name, awstags, retention_in_days)
269 | else
270 | log.warn "Log group '#{group_name}' does not exist"
271 | next
272 | end
273 | end
274 |
275 | unless log_stream_exists?(group_name, stream_name)
276 | if @auto_create_stream
277 | create_log_stream(group_name, stream_name)
278 | else
279 | log.warn "Log stream '#{stream_name}' does not exist"
280 | next
281 | end
282 | end
283 |
284 | events = []
285 | rs.each do |t, time, record|
286 | if @log_group_aws_tags_key && @remove_log_group_aws_tags_key
287 | record.delete(@log_group_aws_tags_key)
288 | end
289 |
290 | if @retention_in_days_key && @remove_retention_in_days_key
291 | record.delete(@retention_in_days_key)
292 | end
293 |
294 | record = drop_empty_record(record)
295 |
296 | time_ms = (time.to_f * 1000).floor
297 |
298 | scrub_record!(record)
299 | message = @formatter_proc.call(t, time, record)
300 |
301 | if message.empty?
302 | log.warn "Within specified message_key(s): (#{@message_keys.join(',')}) do not have non-empty record. Skip."
303 | next
304 | end
305 |
306 | if @max_message_length
307 | message = message.slice(0, @max_message_length)
308 | end
309 |
310 | events << {timestamp: time_ms, message: message}
311 | end
312 | # The log events in the batch must be in chronological ordered by their timestamp.
313 | # http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
314 | events = events.sort_by {|e| e[:timestamp] }
315 |
316 | queue << [group_name, stream_name, events]
317 | }
318 |
319 | @concurrency.times do
320 | queue << nil
321 | end
322 | threads = @concurrency.times.map do |i|
323 | Thread.start do
324 | while job = queue.shift
325 | group_name, stream_name, events = job
326 | put_events_by_chunk(group_name, stream_name, events)
327 | end
328 | end
329 | end
330 | threads.each(&:join)
331 | end
332 |
333 | private
334 |
335 | def drop_empty_record(record)
336 | new_record = record.dup
337 | new_record.each_key do |k|
338 | if new_record[k] == ""
339 | new_record.delete(k)
340 | end
341 | end
342 | new_record
343 | end
344 |
345 | def scrub_record!(record)
346 | case record
347 | when Hash
348 | record.each_value {|v| scrub_record!(v) }
349 | when Array
350 | record.each {|v| scrub_record!(v) }
351 | when String
352 | # The AWS API requires UTF-8 encoding
353 | # https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogsConcepts.html
354 | record.force_encoding('UTF-8')
355 | record.scrub!
356 | end
357 | end
358 |
359 | def delete_sequence_token(group_name, stream_name)
360 | @sequence_tokens[group_name].delete(stream_name)
361 | end
362 |
363 | def next_sequence_token(group_name, stream_name)
364 | @sequence_tokens[group_name][stream_name]
365 | end
366 |
367 | def store_next_sequence_token(group_name, stream_name, token)
368 | @store_next_sequence_token_mutex.synchronize do
369 | @sequence_tokens[group_name][stream_name] = token
370 | end
371 | end
372 |
373 | def put_events_by_chunk(group_name, stream_name, events)
374 | chunk = []
375 |
376 | # The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
377 | # http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
378 | total_bytesize = 0
379 | while event = events.shift
380 | event_bytesize = event[:message].bytesize + EVENT_HEADER_SIZE
381 | if MAX_EVENT_SIZE < event_bytesize
382 | raise TooLargeEventError, "Log event in #{group_name} is discarded because it is too large: #{event_bytesize} bytes exceeds limit of #{MAX_EVENT_SIZE}"
383 | end
384 |
385 | new_chunk = chunk + [event]
386 |
387 | chunk_span_too_big = new_chunk.size > 1 && new_chunk[-1][:timestamp] - new_chunk[0][:timestamp] >= 1000 * 60 * 60 * 24
388 | chunk_too_big = total_bytesize + event_bytesize > MAX_EVENTS_SIZE
389 | chunk_too_long = @max_events_per_batch && chunk.size >= @max_events_per_batch
390 | if chunk_too_big or chunk_span_too_big or chunk_too_long
391 | put_events(group_name, stream_name, chunk, total_bytesize)
392 | chunk = [event]
393 | total_bytesize = event_bytesize
394 | else
395 | chunk << event
396 | total_bytesize += event_bytesize
397 | end
398 | end
399 |
400 | unless chunk.empty?
401 | put_events(group_name, stream_name, chunk, total_bytesize)
402 | end
403 | end
404 |
405 | def put_events(group_name, stream_name, events, events_bytesize)
406 | response = nil
407 | retry_count = 0
408 |
409 | until response
410 | args = {
411 | log_events: events,
412 | log_group_name: group_name,
413 | log_stream_name: stream_name,
414 | }
415 |
416 | token = next_sequence_token(group_name, stream_name)
417 | args[:sequence_token] = token if token
418 |
419 | begin
420 | t = Time.now
421 | response = @logs.put_log_events(args)
422 | request = {
423 | "group" => group_name,
424 | "stream" => stream_name,
425 | "events_count" => events.size,
426 | "events_bytesize" => events_bytesize,
427 | "sequence_token" => token,
428 | "thread" => Thread.current.object_id,
429 | "request_sec" => Time.now - t,
430 | }
431 | if response.rejected_log_events_info != nil && @log_rejected_request
432 | log.warn response.rejected_log_events_info
433 | log.warn "Called PutLogEvents API", request
434 | else
435 | log.debug "Called PutLogEvents API", request
436 | end
437 | rescue Aws::CloudWatchLogs::Errors::InvalidSequenceTokenException, Aws::CloudWatchLogs::Errors::DataAlreadyAcceptedException => err
438 | sleep 1 # to avoid too many API calls
439 | store_next_sequence_token(group_name, stream_name, err.expected_sequence_token)
440 | log.warn "updating upload sequence token forcefully because unrecoverable error occured", {
441 | "error" => err,
442 | "log_group" => group_name,
443 | "log_stream" => stream_name,
444 | "new_sequence_token" => token,
445 | }
446 | retry_count += 1
447 | rescue Aws::CloudWatchLogs::Errors::ResourceNotFoundException => err
448 | if @auto_create_stream && err.message == 'The specified log stream does not exist.'
449 | log.warn 'Creating log stream because "The specified log stream does not exist." error is got', {
450 | "error" => err,
451 | "log_group" => group_name,
452 | "log_stream" => stream_name,
453 | }
454 | create_log_stream(group_name, stream_name)
455 | delete_sequence_token(group_name, stream_name)
456 | retry_count += 1
457 | else
458 | raise err
459 | end
460 | rescue Aws::CloudWatchLogs::Errors::ThrottlingException => err
461 | if @put_log_events_retry_limit < 1
462 | log.warn "failed to PutLogEvents and discard logs because put_log_events_retry_limit is less than 1", {
463 | "error_class" => err.class.to_s,
464 | "error" => err.message,
465 | }
466 | return
467 | elsif !@put_log_events_disable_retry_limit && @put_log_events_retry_limit < retry_count
468 | log.error "failed to PutLogEvents and discard logs because retry count exceeded put_log_events_retry_limit", {
469 | "error_class" => err.class.to_s,
470 | "error" => err.message,
471 | }
472 | return
473 | else
474 | sleep_sec = @put_log_events_retry_wait * (2 ** retry_count)
475 | sleep_sec += sleep_sec * (0.25 * (rand - 0.5))
476 | log.warn "failed to PutLogEvents", {
477 | "next_retry" => Time.now + sleep_sec,
478 | "error_class" => err.class.to_s,
479 | "error" => err.message,
480 | }
481 | sleep(sleep_sec)
482 | retry_count += 1
483 | end
484 | end
485 | end
486 |
487 | if 0 < retry_count
488 | log.warn "retry succeeded"
489 | end
490 |
491 | store_next_sequence_token(group_name, stream_name, response.next_sequence_token)
492 | end
493 |
494 | def create_log_group(group_name, log_group_aws_tags = nil, retention_in_days = nil)
495 | begin
496 | @logs.create_log_group(log_group_name: group_name, tags: log_group_aws_tags)
497 | unless retention_in_days.nil?
498 | put_retention_policy(group_name, retention_in_days)
499 | end
500 | @sequence_tokens[group_name] = {}
501 | rescue Aws::CloudWatchLogs::Errors::ResourceAlreadyExistsException
502 | log.debug "Log group '#{group_name}' already exists"
503 | end
504 | end
505 |
506 | def put_retention_policy(group_name, retention_in_days)
507 | begin
508 | @logs.put_retention_policy({
509 | log_group_name: group_name,
510 | retention_in_days: retention_in_days
511 | })
512 | rescue Aws::CloudWatchLogs::Errors::InvalidParameterException => error
513 | log.warn "failed to set retention policy for Log group '#{group_name}' with error #{error.backtrace}"
514 | end
515 | end
516 |
517 | def create_log_stream(group_name, stream_name)
518 | begin
519 | @logs.create_log_stream(log_group_name: group_name, log_stream_name: stream_name)
520 | @sequence_tokens[group_name] ||= {}
521 | @sequence_tokens[group_name][stream_name] = nil
522 | rescue Aws::CloudWatchLogs::Errors::ResourceAlreadyExistsException
523 | log.debug "Log stream '#{stream_name}' already exists"
524 | end
525 | end
526 |
527 | def log_group_exists?(group_name)
528 | if @sequence_tokens[group_name]
529 | true
530 | elsif check_log_group_existence(group_name)
531 | @sequence_tokens[group_name] = {}
532 | true
533 | else
534 | false
535 | end
536 | end
537 |
538 | def check_log_group_existence(group_name)
539 | response = @logs.describe_log_groups(log_group_name_prefix: group_name)
540 | response.each {|page|
541 | if page.log_groups.find {|i| i.log_group_name == group_name }
542 | return true
543 | end
544 | }
545 |
546 | false
547 | end
548 |
549 | def log_stream_exists?(group_name, stream_name)
550 | if not @sequence_tokens[group_name]
551 | false
552 | elsif @sequence_tokens[group_name].has_key?(stream_name)
553 | true
554 | elsif (log_stream = find_log_stream(group_name, stream_name))
555 | @sequence_tokens[group_name][stream_name] = log_stream.upload_sequence_token
556 | true
557 | else
558 | false
559 | end
560 | end
561 |
562 | def find_log_stream(group_name, stream_name)
563 | response = @logs.describe_log_streams(log_group_name: group_name, log_stream_name_prefix: stream_name)
564 | response.each {|page|
565 | if (log_stream = page.log_streams.find {|i| i.log_stream_name == stream_name })
566 | return log_stream
567 | end
568 | sleep 0.1
569 | }
570 | end
571 |
572 | nil
573 | end
574 | end
575 |
--------------------------------------------------------------------------------
/test/plugin/test_in_cloudwatch_logs.rb:
--------------------------------------------------------------------------------
1 | require 'test_helper'
2 | require 'fluent/test/driver/input'
3 | require 'fluent/test/helpers'
4 | require 'date'
5 | require 'fluent/plugin/in_cloudwatch_logs'
6 | require 'ostruct'
7 |
8 | class CloudwatchLogsInputTest < Test::Unit::TestCase
9 | include CloudwatchLogsTestHelper
10 | include Fluent::Test::Helpers
11 |
12 | def setup
13 | Fluent::Test.setup
14 | end
15 |
16 | sub_test_case "configure" do
17 | def test_configure
18 | d = create_driver(<<-EOC)
19 | @type cloudwatch_logs
20 | aws_key_id test_id
21 | aws_sec_key test_key
22 | region us-east-1
23 | tag test
24 | log_group_name group
25 | log_stream_name stream
26 | use_log_stream_name_prefix true
27 | state_file /tmp/state
28 | use_aws_timestamp true
29 | start_time "2019-06-18 00:00:00Z"
30 | end_time "2020-01-18 00:00:00Z"
31 | time_range_format "%Y-%m-%d %H:%M:%S%z"
32 | throttling_retry_seconds 30
33 | EOC
34 |
35 | assert_equal('test_id', d.instance.aws_key_id)
36 | assert_equal('test_key', d.instance.aws_sec_key)
37 | assert_equal('us-east-1', d.instance.region)
38 | assert_equal('test', d.instance.tag)
39 | assert_equal('group', d.instance.log_group_name)
40 | assert_equal('stream', d.instance.log_stream_name)
41 | assert_equal(true, d.instance.use_log_stream_name_prefix)
42 | assert_equal('/tmp/state', d.instance.state_file)
43 | assert_equal(:yajl, d.instance.json_handler)
44 | assert_equal(true, d.instance.use_aws_timestamp)
45 | assert_equal(1560816000000, d.instance.start_time)
46 | assert_equal(1579305600000, d.instance.end_time)
47 | assert_equal("%Y-%m-%d %H:%M:%S%z", d.instance.time_range_format)
48 | assert_equal(30, d.instance.throttling_retry_seconds)
49 | end
50 |
51 | test 'invalid time range' do
52 | assert_raise(Fluent::ConfigError) do
53 | create_driver(<<-EOC)
54 | @type cloudwatch_logs
55 | aws_key_id test_id
56 | aws_sec_key test_key
57 | region us-east-1
58 | tag test
59 | log_group_name group
60 | log_stream_name stream
61 | use_log_stream_name_prefix true
62 | state_file /tmp/state
63 | use_aws_timestamp true
64 | start_time "2019-06-18 00:00:00Z"
65 | end_time "2019-01-18 00:00:00Z"
66 | time_range_format "%Y-%m-%d %H:%M:%S%z"
67 | EOC
68 | end
69 | end
70 | end
71 |
72 | sub_test_case "real world" do
73 | def setup
74 | omit if ENV["CI"] == "true"
75 | end
76 |
77 | def teardown
78 | return if ENV["CI"] == "true"
79 |
80 | clear_log_group
81 | end
82 |
83 | def test_emit
84 | create_log_stream
85 |
86 | time_ms = (Time.now.to_f * 1000).floor
87 | put_log_events([
88 | {timestamp: time_ms, message: '{"cloudwatch":"logs1"}'},
89 | {timestamp: time_ms, message: '{"cloudwatch":"logs2"}'},
90 | ])
91 |
92 | sleep 5
93 |
94 | d = create_driver
95 | d.run(expect_emits: 2, timeout: 5)
96 |
97 | emits = d.events
98 | assert_equal(2, emits.size)
99 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs1'}], emits[0])
100 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs2'}], emits[1])
101 | end
102 |
103 | sub_test_case "use_log_group_name_prefix true" do
104 | test "emit" do
105 | set_log_group_name("fluent-plugin-cloudwatch-group-prefix-test-#{Time.now.to_f}")
106 | create_log_stream
107 |
108 | time_ms = (Time.now.to_f * 1000).floor
109 | put_log_events([
110 | {timestamp: time_ms, message: '{"cloudwatch":"logs1"}'},
111 | {timestamp: time_ms, message: '{"cloudwatch":"logs2"}'},
112 | ])
113 |
114 | sleep 5
115 |
116 | config = <<-EOC
117 | tag test
118 | @type cloudwatch_logs
119 | log_group_name fluent-plugin-cloudwatch-group-prefix-test
120 | use_log_group_name_prefix true
121 | log_stream_name #{log_stream_name}
122 | state_file /tmp/state
123 | fetch_interval 1
124 | #{aws_key_id}
125 | #{aws_sec_key}
126 | #{region}
127 | #{endpoint}
128 | EOC
129 |
130 | d = create_driver(config)
131 | d.run(expect_emits: 2, timeout: 5)
132 |
133 | emits = d.events
134 | assert_equal(2, emits.size)
135 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs1'}], emits[0])
136 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs2'}], emits[1])
137 | end
138 |
139 | test "emit with add_log_group_name" do
140 | set_log_group_name("fluent-plugin-cloudwatch-add-log-group-#{Time.now.to_f}")
141 | create_log_stream
142 |
143 | time_ms = (Time.now.to_f * 1000).floor
144 | put_log_events([
145 | {timestamp: time_ms, message: '{"cloudwatch":"logs1"}'},
146 | {timestamp: time_ms, message: '{"cloudwatch":"logs2"}'},
147 | ])
148 |
149 | sleep 5
150 |
151 | log_group_name_key = 'log_group_key'
152 | config = <<-EOC
153 | tag test
154 | @type cloudwatch_logs
155 | log_group_name fluent-plugin-cloudwatch-add-log-group
156 | use_log_group_name_prefix true
157 | add_log_group_name true
158 | log_group_name_key #{log_group_name_key}
159 | log_stream_name #{log_stream_name}
160 | state_file /tmp/state
161 | fetch_interval 1
162 | #{aws_key_id}
163 | #{aws_sec_key}
164 | #{region}
165 | #{endpoint}
166 | EOC
167 |
168 | d = create_driver(config)
169 | d.run(expect_emits: 2, timeout: 5)
170 |
171 | emits = d.events
172 | assert_equal(2, emits.size)
173 | assert_true emits[0][2].has_key?(log_group_name_key)
174 | emits[0][2].delete(log_group_name_key)
175 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs1'}], emits[0])
176 | assert_true emits[1][2].has_key?(log_group_name_key)
177 | emits[1][2].delete(log_group_name_key)
178 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs2'}], emits[1])
179 | end
180 |
181 | test "emit with add_log_group_name and csv" do
182 | cloudwatch_config = {'tag' => "test",
183 | '@type' => 'cloudwatch_logs',
184 | 'log_group_name' => "fluent-plugin-cloudwatch-with-csv-format",
185 | 'log_stream_name' => "#{log_stream_name}",
186 | 'use_log_group_name_prefix' => true,
187 | }
188 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_key_id)) if ENV['aws_key_id']
189 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_sec_key)) if ENV['aws_sec_key']
190 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(region)) if ENV['region']
191 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(endpoint)) if ENV['endpoint']
192 |
193 | csv_format_config = config_element('ROOT', '', cloudwatch_config, [
194 | config_element('parse', '', {'@type' => 'csv',
195 | 'keys' => 'time,message',
196 | 'time_key' => 'time'}),
197 | config_element('storage', '', {'@type' => 'local',
198 | 'path' => '/tmp/state'})
199 | ])
200 | log_group_name = "fluent-plugin-cloudwatch-with-csv-format-#{Time.now.to_f}"
201 | set_log_group_name(log_group_name)
202 | create_log_stream
203 |
204 | time_ms = (Time.now.to_f * 1000).floor
205 | log_time_ms = time_ms - 10000
206 | put_log_events([
207 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs1"},
208 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs2"},
209 | ])
210 |
211 | sleep 5
212 |
213 | d = create_driver(csv_format_config)
214 | d.run(expect_emits: 2, timeout: 5)
215 | next_token = d.instance.instance_variable_get(:@next_token_storage)
216 | assert_true next_token.get(d.instance.state_key_for(log_stream_name, log_group_name)).is_a?(String)
217 |
218 | emits = d.events
219 | assert_equal(2, emits.size)
220 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
221 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
222 | end
223 | end
224 |
225 | def test_emit_with_metadata
226 | create_log_stream
227 |
228 | time_ms = (Time.now.to_f * 1000).floor
229 | put_log_events([
230 | {timestamp: time_ms, message: '{"cloudwatch":"logs1"}'},
231 | {timestamp: time_ms, message: '{"cloudwatch":"logs2"}'},
232 | ])
233 |
234 | sleep 5
235 |
236 | d = create_driver(default_config + %[include_metadata true])
237 | d.run(expect_emits: 2, timeout: 5)
238 |
239 | emits = d.events
240 | assert_true(emits[0][2].has_key?("metadata"))
241 | assert_true(emits[1][2].has_key?("metadata"))
242 | emits[0][2].delete_if {|k, v|
243 | k == "metadata"
244 | }
245 | emits[1][2].delete_if {|k, v|
246 | k == "metadata"
247 | }
248 | assert_equal(2, emits.size)
249 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs1'}], emits[0])
250 | assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs2'}], emits[1])
251 | end
252 |
253 | def test_emit_with_aws_timestamp
254 | create_log_stream
255 |
256 | time_ms = (Time.now.to_f * 1000).floor
257 | log_time_ms = time_ms - 10000
258 | put_log_events([
259 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs1"},
260 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs2"},
261 | ])
262 |
263 | sleep 5
264 |
265 | d = create_driver(csv_format_config_aws_timestamp)
266 | d.run(expect_emits: 2, timeout: 5)
267 |
268 | emits = d.events
269 | assert_equal(2, emits.size)
270 | assert_equal(['test', (time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
271 | assert_equal(['test', (time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
272 | end
273 |
274 | def test_emit_with_aws_timestamp_and_time_range
275 | create_log_stream
276 |
277 | time_ms = (Time.now.to_f * 1000).floor
278 | before_6h_time_ms = ((Time.now.to_f - 60*60*6) * 1000).floor
279 | log_time_ms = time_ms - 10000
280 | put_log_events([
281 | {timestamp: before_6h_time_ms, message: Time.at((before_6h_time_ms - 10000)/1000.floor).to_s + ",Cloudwatch non json logs1"},
282 | {timestamp: before_6h_time_ms, message: Time.at((before_6h_time_ms - 10000)/1000.floor).to_s + ",Cloudwatch non json logs2"},
283 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs3"},
284 | ])
285 |
286 | sleep 5
287 |
288 | d = create_driver(csv_format_config_aws_timestamp + %[
289 | start_time #{Time.at(Time.now.to_f - 60*60*8).to_s}
290 | end_time #{Time.at(Time.now.to_f - 60*60*4).to_s}
291 | time_range_format "%Y-%m-%d %H:%M:%S %z"
292 | ])
293 | d.run(expect_emits: 2, timeout: 5)
294 |
295 | emits = d.events
296 | assert_equal(2, emits.size)
297 | assert_equal(['test', (before_6h_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
298 | assert_equal(['test', (before_6h_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
299 | end
300 |
301 | def test_emit_with_log_timestamp
302 | create_log_stream
303 |
304 | time_ms = (Time.now.to_f * 1000).floor
305 | log_time_ms = time_ms - 10000
306 | put_log_events([
307 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs1"},
308 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs2"},
309 | ])
310 |
311 | sleep 5
312 |
313 | d = create_driver(csv_format_config)
314 | d.run(expect_emits: 2, timeout: 5)
315 |
316 | emits = d.events
317 | assert_equal(2, emits.size)
318 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
319 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
320 | end
321 |
322 | test "emit with csv" do
323 | cloudwatch_config = {'tag' => "test",
324 | '@type' => 'cloudwatch_logs',
325 | 'log_group_name' => "#{log_group_name}",
326 | 'log_stream_name' => "#{log_stream_name}",
327 | }
328 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_key_id)) if ENV['aws_key_id']
329 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_sec_key)) if ENV['aws_sec_key']
330 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(region)) if ENV['region']
331 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(endpoint)) if ENV['endpoint']
332 |
333 | csv_format_config = config_element('ROOT', '', cloudwatch_config, [
334 | config_element('parse', '', {'@type' => 'csv',
335 | 'keys' => 'time,message',
336 | 'time_key' => 'time'}),
337 | config_element('storage', '', {'@type' => 'local',
338 | 'path' => '/tmp/state'})
339 | ])
340 | create_log_stream
341 |
342 | time_ms = (Time.now.to_f * 1000).floor
343 | log_time_ms = time_ms - 10000
344 | put_log_events([
345 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs1"},
346 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs2"},
347 | ])
348 |
349 | sleep 5
350 |
351 | d = create_driver(csv_format_config)
352 | d.run(expect_emits: 2, timeout: 5)
353 |
354 | emits = d.events
355 | assert_equal(2, emits.size)
356 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
357 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
358 | end
359 |
360 | test "emit with csv with metadata" do
361 | cloudwatch_config = {'tag' => "test",
362 | '@type' => 'cloudwatch_logs',
363 | 'log_group_name' => "#{log_group_name}",
364 | 'log_stream_name' => "#{log_stream_name}",
365 | 'include_metadata' => true,
366 | }
367 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_key_id)) if ENV['aws_key_id']
368 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_sec_key)) if ENV['aws_sec_key']
369 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(region)) if ENV['region']
370 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(endpoint)) if ENV['endpoint']
371 |
372 | csv_format_config = config_element('ROOT', '', cloudwatch_config, [
373 | config_element('parse', '', {'@type' => 'csv',
374 | 'keys' => 'time,message',
375 | 'time_key' => 'time'}),
376 | config_element('storage', '', {'@type' => 'local',
377 | 'path' => '/tmp/state'})
378 |
379 | ])
380 | create_log_stream
381 |
382 | time_ms = (Time.now.to_f * 1000).floor
383 | log_time_ms = time_ms - 10000
384 | put_log_events([
385 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs1"},
386 | {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs2"},
387 | ])
388 |
389 | sleep 5
390 |
391 | d = create_driver(csv_format_config)
392 | d.run(expect_emits: 2, timeout: 5)
393 |
394 | emits = d.events
395 | assert_true(emits[0][2].has_key?("metadata"))
396 | assert_true(emits[1][2].has_key?("metadata"))
397 | emits[0][2].delete_if {|k, v|
398 | k == "metadata"
399 | }
400 | emits[1][2].delete_if {|k, v|
401 | k == "metadata"
402 | }
403 | assert_equal(2, emits.size)
404 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
405 | assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
406 | end
407 |
408 | def test_emit_width_format
409 | create_log_stream
410 |
411 | time_ms = (Time.now.to_f * 1000).floor
412 | put_log_events([
413 | {timestamp: time_ms, message: 'logs1'},
414 | {timestamp: time_ms, message: 'logs2'},
415 | ])
416 |
417 | sleep 5
418 |
419 | d = create_driver(<<-EOC)
420 | tag test
421 | @type cloudwatch_logs
422 | log_group_name #{log_group_name}
423 | log_stream_name #{log_stream_name}
424 | state_file /tmp/state
425 | format /^(?[^ ]*)?/
426 | #{aws_key_id}
427 | #{aws_sec_key}
428 | #{region}
429 | #{endpoint}
430 | EOC
431 |
432 | d.run(expect_emits: 2, timeout: 5)
433 |
434 | emits = d.events
435 | assert_equal(2, emits.size)
436 | assert_equal('test', emits[0][0])
437 | assert_in_delta((time_ms / 1000).floor, emits[0][1], 10)
438 | assert_equal({'cloudwatch' => 'logs1'}, emits[0][2])
439 | assert_equal('test', emits[1][0])
440 | assert_in_delta((time_ms / 1000).floor, emits[1][1], 10)
441 | assert_equal({'cloudwatch' => 'logs2'}, emits[1][2])
442 | end
443 |
444 | test "emit with regexp" do
445 | cloudwatch_config = {'tag' => "test",
446 | '@type' => 'cloudwatch_logs',
447 | 'log_group_name' => "#{log_group_name}",
448 | 'log_stream_name' => "#{log_stream_name}",
449 | }
450 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_key_id)) if ENV['aws_key_id']
451 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_sec_key)) if ENV['aws_sec_key']
452 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(region)) if ENV['region']
453 | cloudwatch_config = cloudwatch_config.merge!(config_elementify(endpoint)) if ENV['endpoint']
454 |
455 | regex_format_config = config_element('ROOT', '', cloudwatch_config, [
456 | config_element('parse', '', {'@type' => 'regexp',
457 | 'expression' => "/^(?[^ ]*)?/",
458 | }),
459 | config_element('storage', '', {'@type' => 'local',
460 | 'path' => '/tmp/state'})
461 | ])
462 | create_log_stream
463 |
464 | time_ms = (Time.now.to_f * 1000).floor
465 | put_log_events([
466 | {timestamp: time_ms, message: 'logs1'},
467 | {timestamp: time_ms, message: 'logs2'},
468 | ])
469 |
470 | sleep 5
471 |
472 | d = create_driver(regex_format_config)
473 |
474 | d.run(expect_emits: 2, timeout: 5)
475 |
476 | emits = d.events
477 | assert_equal(2, emits.size)
478 | assert_equal('test', emits[0][0])
479 | assert_in_delta((time_ms / 1000).floor, emits[0][1], 10)
480 | assert_equal({'cloudwatch' => 'logs1'}, emits[0][2])
481 | assert_equal('test', emits[1][0])
482 | assert_in_delta((time_ms / 1000).floor, emits[1][1], 10)
483 | assert_equal({'cloudwatch' => 'logs2'}, emits[1][2])
484 | end
485 |
486 | def test_emit_with_prefix
487 | new_log_stream("testprefix")
488 | create_log_stream
489 |
490 | time_ms = (Time.now.to_f * 1000).floor
491 | put_log_events([
492 | {timestamp: time_ms + 1000, message: '{"cloudwatch":"logs1"}'},
493 | {timestamp: time_ms + 2000, message: '{"cloudwatch":"logs2"}'},
494 | ])
495 |
496 | new_log_stream("testprefix")
497 | create_log_stream
498 | put_log_events([
499 | {timestamp: time_ms + 3000, message: '{"cloudwatch":"logs3"}'},
500 | {timestamp: time_ms + 4000, message: '{"cloudwatch":"logs4"}'},
501 | ])
502 |
503 | sleep 5
504 |
505 | d = create_driver(<<-EOC)
506 | tag test
507 | @type cloudwatch_logs
508 | log_group_name #{log_group_name}
509 | log_stream_name testprefix
510 | use_log_stream_name_prefix true
511 | state_file /tmp/state
512 | #{aws_key_id}
513 | #{aws_sec_key}
514 | #{region}
515 | #{endpoint}
516 | EOC
517 | d.run(expect_emits: 4, timeout: 5)
518 |
519 | emits = d.events
520 | assert_equal(4, emits.size)
521 | assert_true(emits.include? ['test', ((time_ms + 1000) / 1000).floor, {'cloudwatch' => 'logs1'}])
522 | assert_true(emits.include? ['test', ((time_ms + 2000) / 1000).floor, {'cloudwatch' => 'logs2'}])
523 | assert_true(emits.include? ['test', ((time_ms + 3000) / 1000).floor, {'cloudwatch' => 'logs3'}])
524 | assert_true(emits.include? ['test', ((time_ms + 4000) / 1000).floor, {'cloudwatch' => 'logs4'}])
525 | end
526 |
527 | def test_emit_with_todays_log_stream
528 | new_log_stream("testprefix")
529 | create_log_stream
530 |
531 | today = DateTime.now.strftime("%Y/%m/%d")
532 | yesterday = (Date.today - 1).strftime("%Y/%m/%d")
533 | tomorrow = (Date.today + 1).strftime("%Y/%m/%d")
534 |
535 |
536 | time_ms = (Time.now.to_f * 1000).floor
537 | put_log_events([
538 | {timestamp: time_ms + 1000, message: '{"cloudwatch":"logs1"}'},
539 | {timestamp: time_ms + 2000, message: '{"cloudwatch":"logs2"}'},
540 | ])
541 |
542 | new_log_stream(today)
543 | create_log_stream
544 | put_log_events([
545 | {timestamp: time_ms + 3000, message: '{"cloudwatch":"logs3"}'},
546 | {timestamp: time_ms + 4000, message: '{"cloudwatch":"logs4"}'},
547 | ])
548 |
549 | new_log_stream(yesterday)
550 | create_log_stream
551 | put_log_events([
552 | {timestamp: time_ms + 5000, message: '{"cloudwatch":"logs5"}'},
553 | {timestamp: time_ms + 6000, message: '{"cloudwatch":"logs6"}'},
554 | ])
555 |
556 | new_log_stream(tomorrow)
557 | create_log_stream
558 | put_log_events([
559 | {timestamp: time_ms + 7000, message: '{"cloudwatch":"logs7"}'},
560 | {timestamp: time_ms + 8000, message: '{"cloudwatch":"logs8"}'},
561 | ])
562 |
563 | new_log_stream(today)
564 | create_log_stream
565 | put_log_events([
566 | {timestamp: time_ms + 9000, message: '{"cloudwatch":"logs9"}'},
567 | {timestamp: time_ms + 10000, message: '{"cloudwatch":"logs10"}'},
568 | ])
569 |
570 | new_log_stream(yesterday)
571 | create_log_stream
572 | put_log_events([
573 | {timestamp: time_ms + 11000, message: '{"cloudwatch":"logs11"}'},
574 | {timestamp: time_ms + 12000, message: '{"cloudwatch":"logs12"}'},
575 | ])
576 |
577 | sleep 15
578 |
579 | d = create_driver(<<-EOC)
580 | tag test
581 | @type cloudwatch_logs
582 | log_group_name #{log_group_name}
583 | use_todays_log_stream true
584 | state_file /tmp/state
585 | #{aws_key_id}
586 | #{aws_sec_key}
587 | #{region}
588 | #{endpoint}
589 | EOC
590 | d.run(expect_emits: 8, timeout: 15)
591 |
592 | emits = d.events
593 | assert_equal(8, emits.size)
594 | assert_false(emits.include? ['test', ((time_ms + 1000) / 1000).floor, {'cloudwatch' => 'logs1'}])
595 | assert_false(emits.include? ['test', ((time_ms + 2000) / 1000).floor, {'cloudwatch' => 'logs2'}])
596 | assert_true(emits.include? ['test', ((time_ms + 3000) / 1000).floor, {'cloudwatch' => 'logs3'}])
597 | assert_true(emits.include? ['test', ((time_ms + 4000) / 1000).floor, {'cloudwatch' => 'logs4'}])
598 | assert_true(emits.include? ['test', ((time_ms + 5000) / 1000).floor, {'cloudwatch' => 'logs5'}])
599 | assert_true(emits.include? ['test', ((time_ms + 6000) / 1000).floor, {'cloudwatch' => 'logs6'}])
600 | assert_false(emits.include? ['test', ((time_ms + 7000) / 1000).floor, {'cloudwatch' => 'logs7'}])
601 | assert_false(emits.include? ['test', ((time_ms + 8000) / 1000).floor, {'cloudwatch' => 'logs8'}])
602 | assert_true(emits.include? ['test', ((time_ms + 9000) / 1000).floor, {'cloudwatch' => 'logs9'}])
603 | assert_true(emits.include? ['test', ((time_ms + 10000) / 1000).floor, {'cloudwatch' => 'logs10'}])
604 | assert_true(emits.include? ['test', ((time_ms + 11000) / 1000).floor, {'cloudwatch' => 'logs11'}])
605 | assert_true(emits.include? ['test', ((time_ms + 12000) / 1000).floor, {'cloudwatch' => 'logs12'}])
606 | end
607 | end
608 |
609 | sub_test_case "stub responses" do
610 | setup do
611 | @client = Aws::CloudWatchLogs::Client.new(stub_responses: true)
612 | mock(Aws::CloudWatchLogs::Client).new(anything) do
613 | @client
614 | end
615 | end
616 |
617 | test "emit" do
618 | time_ms = (Time.now.to_f * 1000).floor
619 | log_stream = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
620 | @client.stub_responses(:describe_log_streams, { log_streams: [log_stream], next_token: nil })
621 | cloudwatch_logs_events = [
622 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: { cloudwatch: "logs1" }.to_json, ingestion_time: time_ms),
623 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: { cloudwatch: "logs2" }.to_json, ingestion_time: time_ms)
624 | ]
625 | @client.stub_responses(:get_log_events, { events: cloudwatch_logs_events, next_forward_token: nil })
626 |
627 | d = create_driver
628 | d.run(expect_emits: 2, timeout: 5)
629 |
630 | events = d.events
631 | assert_equal(2, events.size)
632 | assert_equal(["test", (time_ms / 1000), { "cloudwatch" => "logs1" }], events[0])
633 | assert_equal(["test", (time_ms / 1000), { "cloudwatch" => "logs2" }], events[1])
634 | end
635 |
636 | test "emit with aws_timestamp" do
637 | time_ms = (Time.now.to_f * 1000).floor
638 | log_time_ms = time_ms - 10000
639 | log_time_str = Time.at(log_time_ms / 1000.floor).to_s
640 | log_stream = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
641 | @client.stub_responses(:describe_log_streams, { log_streams: [log_stream], next_token: nil })
642 | cloudwatch_logs_events = [
643 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: "#{log_time_str},Cloudwatch non json logs1"),
644 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: "#{log_time_str},Cloudwatch non json logs2")
645 | ]
646 | @client.stub_responses(:get_log_events, { events: cloudwatch_logs_events, next_forward_token: nil })
647 |
648 | d = create_driver(csv_format_config_aws_timestamp)
649 | d.run(expect_emits: 2, timeout: 5)
650 |
651 | events = d.events
652 | assert_equal(2, events.size)
653 | assert_equal(["test", (time_ms / 1000).floor, { "message" => "Cloudwatch non json logs1" }], events[0])
654 | assert_equal(["test", (time_ms / 1000).floor, { "message" => "Cloudwatch non json logs2" }], events[1])
655 | end
656 |
657 | test "emit with log_timestamp" do
658 | time_ms = (Time.now.to_f * 1000).floor
659 | log_time_ms = time_ms - 10000
660 | log_time_str = Time.at(log_time_ms / 1000.floor).to_s
661 | log_stream = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
662 | @client.stub_responses(:describe_log_streams, { log_streams: [log_stream], next_token: nil })
663 | cloudwatch_logs_events = [
664 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: "#{log_time_str},Cloudwatch non json logs1"),
665 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: "#{log_time_str},Cloudwatch non json logs2")
666 | ]
667 | @client.stub_responses(:get_log_events, { events: cloudwatch_logs_events, next_forward_token: nil })
668 |
669 | d = create_driver(csv_format_config)
670 | d.run(expect_emits: 2, timeout: 5)
671 |
672 | emits = d.events
673 | assert_equal(2, emits.size)
674 | assert_equal(["test", (log_time_ms / 1000).floor, { "message" => "Cloudwatch non json logs1" }], emits[0])
675 | assert_equal(["test", (log_time_ms / 1000).floor, { "message" => "Cloudwatch non json logs2" }], emits[1])
676 | end
677 |
678 | test "emit with format" do
679 | config = <<-CONFIG
680 | tag test
681 | @type cloudwatch_logs
682 | log_group_name #{log_group_name}
683 | log_stream_name #{log_stream_name}
684 | state_file /tmp/state
685 | format /^(?[^ ]*)?/
686 | #{aws_key_id}
687 | #{aws_sec_key}
688 | #{region}
689 | #{endpoint}
690 | CONFIG
691 | time_ms = (Time.now.to_f * 1000).floor
692 |
693 | log_stream = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
694 | @client.stub_responses(:describe_log_streams, { log_streams: [log_stream], next_token: nil })
695 | cloudwatch_logs_events = [
696 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: "logs1", ingestion_time: time_ms),
697 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms, message: "logs2", ingestion_time: time_ms)
698 | ]
699 | @client.stub_responses(:get_log_events, { events: cloudwatch_logs_events, next_forward_token: nil })
700 |
701 | d = create_driver(config)
702 | d.run(expect_emits: 2, timeout: 5)
703 |
704 | events = d.events
705 | assert_equal(2, events.size)
706 | assert_equal("test", events[0][0])
707 | assert_in_delta(time_ms / 1000.0, events[0][1], 1.0)
708 | assert_equal({ "cloudwatch" => "logs1" }, events[0][2])
709 | assert_equal("test", events[1][0])
710 | assert_in_delta(time_ms / 1000.0, events[1][1], 1.0)
711 | assert_equal({ "cloudwatch" => "logs2" }, events[1][2])
712 | end
713 |
714 | test "emit with prefix" do
715 | config = <<-CONFIG
716 | tag test
717 | @type cloudwatch_logs
718 | log_group_name #{log_group_name}
719 | log_stream_name testprefix
720 | use_log_stream_name_prefix true
721 | state_file /tmp/state
722 | #{aws_key_id}
723 | #{aws_sec_key}
724 | #{region}
725 | #{endpoint}
726 | CONFIG
727 | time_ms = (Time.now.to_f * 1000).floor
728 | log_stream1 = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
729 | log_stream2 = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
730 | @client.stub_responses(:describe_log_streams, { log_streams: [log_stream1, log_stream2], next_token: nil })
731 | cloudwatch_logs_events1 = [
732 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms + 1000, message: { cloudwatch: "logs1" }.to_json, ingestion_time: time_ms),
733 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms + 2000, message: { cloudwatch: "logs2" }.to_json, ingestion_time: time_ms)
734 | ]
735 | cloudwatch_logs_events2 = [
736 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms + 3000, message: { cloudwatch: "logs3" }.to_json, ingestion_time: time_ms),
737 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms + 4000, message: { cloudwatch: "logs4" }.to_json, ingestion_time: time_ms)
738 | ]
739 | @client.stub_responses(:get_log_events, [
740 | { events: cloudwatch_logs_events1, next_forward_token: nil },
741 | { events: cloudwatch_logs_events2, next_forward_token: nil },
742 | ])
743 |
744 | d = create_driver(config)
745 | d.run(expect_emits: 4, timeout: 5)
746 |
747 | events = d.events
748 | assert_equal(4, events.size)
749 | assert_equal(["test", (time_ms + 1000) / 1000, { "cloudwatch" => "logs1" }], events[0])
750 | assert_equal(["test", (time_ms + 2000) / 1000, { "cloudwatch" => "logs2" }], events[1])
751 | assert_equal(["test", (time_ms + 3000) / 1000, { "cloudwatch" => "logs3" }], events[2])
752 | assert_equal(["test", (time_ms + 4000) / 1000, { "cloudwatch" => "logs4" }], events[3])
753 | end
754 |
755 | test "emit with today's log stream" do
756 | omit "This testcase is unstable in CI." if ENV["CI"] == "true"
757 |
758 | config = <<-CONFIG
759 | tag test
760 | @type cloudwatch_logs
761 | log_group_name #{log_group_name}
762 | use_todays_log_stream true
763 | state_file /tmp/state
764 | fetch_interval 0.1
765 | #{aws_key_id}
766 | #{aws_sec_key}
767 | #{region}
768 | #{endpoint}
769 | CONFIG
770 |
771 | today = Date.today.strftime("%Y/%m/%d")
772 | yesterday = (Date.today - 1).strftime("%Y/%m/%d")
773 | time_ms = (Time.now.to_f * 1000).floor
774 |
775 | log_stream = ->(name) { Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "#{name}_#{SecureRandom.uuid}") }
776 | @client.stub_responses(:describe_log_streams, ->(context) {
777 | if context.params[:log_stream_name_prefix].start_with?(today)
778 | { log_streams: [log_stream.call(today)], next_token: nil }
779 | elsif context.params[:log_stream_name_prefix].start_with?(yesterday)
780 | { log_streams: [log_stream.call(yesterday)], next_token: nil }
781 | else
782 | { log_streams: [], next_token: nil }
783 | end
784 | })
785 | count = 0
786 | @client.stub_responses(:get_log_events, ->(context) {
787 | n = count * 2 + 1
788 | cloudwatch_logs_events = [
789 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms + n * 1000, message: { cloudwatch: "logs#{n}" }.to_json, ingestion_time: time_ms),
790 | Aws::CloudWatchLogs::Types::OutputLogEvent.new(timestamp: time_ms + (n + 1) * 1000, message: { cloudwatch: "logs#{n + 1}" }.to_json, ingestion_time: time_ms)
791 | ]
792 | count += 1
793 | if context.params[:log_stream_name].start_with?(today)
794 | { events: cloudwatch_logs_events, next_forward_token: nil }
795 | elsif context.params[:log_stream_name].start_with?(yesterday)
796 | { events: cloudwatch_logs_events, next_forward_token: nil }
797 | else
798 | flunk("Failed log_stream_name: #{context.params[:log_stream_name]}")
799 | end
800 | })
801 |
802 | d = create_driver(config)
803 | d.run(expect_emits: 8, timeout: 15)
804 |
805 | events = d.events
806 | assert_equal(8, events.size)
807 | assert_equal(["test", ((time_ms + 1000) / 1000), { "cloudwatch" => "logs1" }], events[0])
808 | assert_equal(["test", ((time_ms + 2000) / 1000), { "cloudwatch" => "logs2" }], events[1])
809 | assert_equal(["test", ((time_ms + 3000) / 1000), { "cloudwatch" => "logs3" }], events[2])
810 | assert_equal(["test", ((time_ms + 4000) / 1000), { "cloudwatch" => "logs4" }], events[3])
811 | assert_equal(["test", ((time_ms + 5000) / 1000), { "cloudwatch" => "logs5" }], events[4])
812 | assert_equal(["test", ((time_ms + 6000) / 1000), { "cloudwatch" => "logs6" }], events[5])
813 | assert_equal(["test", ((time_ms + 7000) / 1000), { "cloudwatch" => "logs7" }], events[6])
814 | assert_equal(["test", ((time_ms + 8000) / 1000), { "cloudwatch" => "logs8" }], events[7])
815 | end
816 |
817 | test "retry on Aws::CloudWatchLogs::Errors::ThrottlingException in get_log_events" do
818 | config = <<-CONFIG
819 | tag test
820 | @type cloudwatch_logs
821 | log_group_name #{log_group_name}
822 | state_file /tmp/state
823 | fetch_interval 0.1
824 | throttling_retry_seconds 0.2
825 | CONFIG
826 |
827 | # it will raises the error 2 times
828 | counter = 0
829 | times = 2
830 | stub(@client).get_log_events(anything) {
831 | counter += 1
832 | counter <= times ? raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error")) : OpenStruct.new(events: [], next_forward_token: nil)
833 | }
834 |
835 | d = create_driver(config)
836 |
837 | # so, it is expected to valid_next_token once
838 | mock(d.instance).valid_next_token(nil, nil).once
839 |
840 | d.run
841 | assert_equal(2, d.logs.select {|l| l =~ /ThrottlingException get_log_events. Waiting 0.2 seconds to retry/ }.size)
842 | end
843 |
844 | test "retry on Aws::CloudWatchLogs::Errors::ThrottlingException in describe_log_streams" do
845 | config = <<-CONFIG
846 | tag test
847 | @type cloudwatch_logs
848 | log_group_name #{log_group_name}
849 | use_log_stream_name_prefix true
850 | state_file /tmp/state
851 | fetch_interval 0.1
852 | throttling_retry_seconds 0.2
853 | CONFIG
854 |
855 | # it will raises the error 2 times
856 | log_stream = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
857 | counter = 0
858 | times = 2
859 | stub(@client).describe_log_streams(anything) {
860 | counter += 1
861 | counter <= times ? raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error")) : OpenStruct.new(log_streams: [log_stream], next_token: nil)
862 | }
863 |
864 | d = create_driver(config)
865 |
866 | d.run
867 | assert_equal(2, d.logs.select {|l| l =~ /ThrottlingException describe_log_streams. Waiting 0.2 seconds to retry/ }.size)
868 | end
869 | end
870 |
871 | private
872 |
873 | def default_config
874 | <<-EOC
875 | tag test
876 | @type cloudwatch_logs
877 | log_group_name #{log_group_name}
878 | log_stream_name #{log_stream_name}
879 | state_file /tmp/state
880 | fetch_interval 1
881 | #{aws_key_id}
882 | #{aws_sec_key}
883 | #{region}
884 | #{endpoint}
885 | EOC
886 | end
887 |
888 | def csv_format_config
889 | <<-EOC
890 | tag test
891 | @type cloudwatch_logs
892 | log_group_name #{log_group_name}
893 | log_stream_name #{log_stream_name}
894 | state_file /tmp/state
895 | fetch_interval 1
896 | #{aws_key_id}
897 | #{aws_sec_key}
898 | #{region}
899 | #{endpoint}
900 | format csv
901 | keys time,message
902 | time_key time
903 | EOC
904 | end
905 |
906 | def csv_format_config_aws_timestamp
907 | csv_format_config.concat("use_aws_timestamp true")
908 | end
909 |
910 | def create_driver(conf = default_config)
911 | Fluent::Test::Driver::Input.new(Fluent::Plugin::CloudwatchLogsInput).configure(conf)
912 | end
913 | end
914 |
--------------------------------------------------------------------------------
/test/plugin/test_out_cloudwatch_logs.rb:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | require_relative '../test_helper'
3 | require 'fileutils'
4 | require 'fluent/test/driver/output'
5 | require 'fluent/test/helpers'
6 | require 'fluent/plugin/out_cloudwatch_logs'
7 |
8 | class CloudwatchLogsOutputTest < Test::Unit::TestCase
9 | include CloudwatchLogsTestHelper
10 | include Fluent::Test::Helpers
11 |
12 | def setup
13 | Fluent::Test.setup
14 | end
15 |
16 | sub_test_case "configure" do
17 | def test_configure
18 | d = create_driver(<<-EOC)
19 | @type cloudwatch_logs
20 | aws_key_id test_id
21 | aws_sec_key test_key
22 | region us-east-1
23 | log_group_name test_group
24 | log_stream_name test_stream
25 | auto_create_stream false
26 | log_group_aws_tags { "tagkey": "tagvalue", "tagkey_2": "tagvalue_2"}
27 | retention_in_days 5
28 | message_keys fluentd, aws, cloudwatch
29 | EOC
30 |
31 | assert_equal('test_id', d.instance.aws_key_id)
32 | assert_equal('test_key', d.instance.aws_sec_key)
33 | assert_equal('us-east-1', d.instance.region)
34 | assert_equal('test_group', d.instance.log_group_name)
35 | assert_equal('test_stream', d.instance.log_stream_name)
36 | assert_equal(false, d.instance.auto_create_stream)
37 | assert_equal("tagvalue", d.instance.log_group_aws_tags.fetch("tagkey"))
38 | assert_equal("tagvalue_2", d.instance.log_group_aws_tags.fetch("tagkey_2"))
39 | assert_equal(5, d.instance.retention_in_days)
40 | assert_equal(:yajl, d.instance.json_handler)
41 | assert_equal(["fluentd","aws","cloudwatch"], d.instance.message_keys)
42 | end
43 | end
44 |
45 | sub_test_case "real world" do
46 | def setup
47 | omit if ENV["CI"] == "true"
48 | end
49 |
50 | def teardown
51 | return if ENV["CI"] == "true"
52 | clear_log_group
53 | end
54 |
55 | def test_write
56 | new_log_stream
57 |
58 | d = create_driver
59 | time = event_time
60 | d.run(default_tag: fluentd_tag, flush: true) do
61 | d.feed(time, {'cloudwatch' => 'logs1'})
62 | # Addition converts EventTime to seconds
63 | d.feed(time + 1, {'cloudwatch' => 'logs2'})
64 | end
65 |
66 | sleep 10
67 |
68 | logs = d.logs
69 | events = get_log_events
70 | assert_equal(2, events.size)
71 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
72 | assert_equal('{"cloudwatch":"logs1"}', events[0].message)
73 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
74 | assert_equal('{"cloudwatch":"logs2"}', events[1].message)
75 |
76 | assert(logs.any?{|log| log.include?("Called PutLogEvents API") })
77 | end
78 |
79 | sub_test_case "formatter" do
80 | test "csv" do
81 | new_log_stream
82 |
83 | config = {'@type' => 'cloudwatch_logs',
84 | 'auto_create_stream' => true,
85 | 'log_stream_name' => log_stream_name,
86 | 'log_group_name' => log_group_name,
87 | '@log_level' => 'debug'}
88 | config.merge!(config_elementify(aws_key_id)) if aws_key_id
89 | config.merge!(config_elementify(aws_sec_key)) if aws_sec_key
90 | config.merge!(config_elementify(region)) if region
91 | config.merge!(config_elementify(endpoint)) if endpoint
92 |
93 | d = create_driver(
94 | Fluent::Config::Element.new('ROOT', '', config, [
95 | Fluent::Config::Element.new('buffer', 'tag, time', {
96 | '@type' => 'memory',
97 | 'timekey' => 3600
98 | }, []),
99 | Fluent::Config::Element.new('format', '', {
100 | '@type' => 'csv',
101 | 'fields' => ["message","cloudwatch"],
102 | }, []),
103 | ]))
104 |
105 | time = event_time
106 | d.run(default_tag: fluentd_tag, flush: true) do
107 | d.feed(time, {'cloudwatch' => 'logs1'})
108 | # Addition converts EventTime to seconds
109 | d.feed(time + 1, {'cloudwatch' => 'logs2'})
110 | end
111 |
112 | sleep 10
113 |
114 | logs = d.logs
115 | events = get_log_events
116 | assert_equal(2, events.size)
117 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
118 | assert_equal('"","logs1"', events[0].message.strip)
119 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
120 | assert_equal('"","logs2"', events[1].message.strip)
121 |
122 | assert(logs.any?{|log| log.include?("Called PutLogEvents API") })
123 | end
124 |
125 | test "ltsv" do
126 | new_log_stream
127 |
128 | config = {'@type' => 'cloudwatch_logs',
129 | 'auto_create_stream' => true,
130 | 'log_stream_name' => log_stream_name,
131 | 'log_group_name' => log_group_name,
132 | '@log_level' => 'debug'}
133 | config.merge!(config_elementify(aws_key_id)) if aws_key_id
134 | config.merge!(config_elementify(aws_sec_key)) if aws_sec_key
135 | config.merge!(config_elementify(region)) if region
136 | config.merge!(config_elementify(endpoint)) if endpoint
137 |
138 | d = create_driver(
139 | Fluent::Config::Element.new('ROOT', '', config, [
140 | Fluent::Config::Element.new('buffer', 'tag, time', {
141 | '@type' => 'memory',
142 | 'timekey' => 3600
143 | }, []),
144 | Fluent::Config::Element.new('format', '', {
145 | '@type' => 'ltsv',
146 | 'fields' => ["message","cloudwatch"],
147 | }, []),
148 | ]))
149 |
150 | time = event_time
151 | d.run(default_tag: fluentd_tag, flush: true) do
152 | d.feed(time, {'cloudwatch' => 'logs1'})
153 | # Addition converts EventTime to seconds
154 | d.feed(time + 1, {'cloudwatch' => 'logs2'})
155 | end
156 |
157 | sleep 10
158 |
159 | logs = d.logs
160 | events = get_log_events
161 | assert_equal(2, events.size)
162 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
163 | assert_equal('cloudwatch:logs1', events[0].message.strip)
164 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
165 | assert_equal('cloudwatch:logs2', events[1].message.strip)
166 |
167 | assert(logs.any?{|log| log.include?("Called PutLogEvents API") })
168 | end
169 |
170 | test "single_value" do
171 | new_log_stream
172 |
173 | config = {'@type' => 'cloudwatch_logs',
174 | 'auto_create_stream' => true,
175 | 'log_stream_name' => log_stream_name,
176 | 'log_group_name' => log_group_name,
177 | '@log_level' => 'debug'}
178 | config.merge!(config_elementify(aws_key_id)) if aws_key_id
179 | config.merge!(config_elementify(aws_sec_key)) if aws_sec_key
180 | config.merge!(config_elementify(region)) if region
181 | config.merge!(config_elementify(endpoint)) if endpoint
182 |
183 | d = create_driver(
184 | Fluent::Config::Element.new('ROOT', '', config, [
185 | Fluent::Config::Element.new('buffer', 'tag, time', {
186 | '@type' => 'memory',
187 | 'timekey' => 3600
188 | }, []),
189 | Fluent::Config::Element.new('format', '', {
190 | '@type' => 'single_value',
191 | 'message_key' => "cloudwatch",
192 | }, []),
193 | ]))
194 |
195 | time = event_time
196 | d.run(default_tag: fluentd_tag, flush: true) do
197 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'Hi!'})
198 | # Addition converts EventTime to seconds
199 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'Hi!'})
200 | end
201 |
202 | sleep 10
203 |
204 | logs = d.logs
205 | events = get_log_events
206 | assert_equal(2, events.size)
207 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
208 | assert_equal('logs1', events[0].message.strip)
209 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
210 | assert_equal('logs2', events[1].message.strip)
211 |
212 | assert(logs.any?{|log| log.include?("Called PutLogEvents API") })
213 | end
214 | end
215 |
216 | def test_write_utf8
217 | new_log_stream
218 |
219 | d = create_driver
220 | time = event_time
221 | d.run(default_tag: fluentd_tag) do
222 | d.feed(time, { 'cloudwatch' => 'これは日本語です'.force_encoding('UTF-8')})
223 | end
224 |
225 | sleep 10
226 |
227 | events = get_log_events
228 | assert_equal(1, events.size)
229 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
230 | assert_equal('{"cloudwatch":"これは日本語です"}', events[0].message)
231 | end
232 |
233 | def test_write_24h_apart
234 | new_log_stream
235 |
236 | d = create_driver(<<-EOC)
237 | #{default_config}
238 | log_group_name #{log_group_name}
239 | log_stream_name #{log_stream_name}
240 | utc
241 | EOC
242 | time = event_time
243 | d.run(default_tag: fluentd_tag) do
244 | d.feed(time - 60 * 60 * 25, {'cloudwatch' => 'logs0'})
245 | d.feed(time, {'cloudwatch' => 'logs1'})
246 | d.feed(time + 1, {'cloudwatch' => 'logs2'})
247 | end
248 |
249 | sleep 10
250 |
251 | events = get_log_events
252 | assert_equal(3, events.size)
253 | assert_equal((time.to_i - 60 * 60 * 25) * 1000, events[0].timestamp)
254 | assert_equal('{"cloudwatch":"logs0"}', events[0].message)
255 | assert_equal((time.to_f * 1000).floor, events[1].timestamp)
256 | assert_equal('{"cloudwatch":"logs1"}', events[1].message)
257 | assert_equal((time.to_i + 1) * 1000, events[2].timestamp)
258 | assert_equal('{"cloudwatch":"logs2"}', events[2].message)
259 | end
260 |
261 | def test_write_with_message_keys
262 | new_log_stream
263 |
264 | d = create_driver(<<-EOC)
265 | #{default_config}
266 | message_keys message,cloudwatch
267 | log_group_name #{log_group_name}
268 | log_stream_name #{log_stream_name}
269 | EOC
270 |
271 | time = event_time
272 | d.run(default_tag: fluentd_tag) do
273 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1'})
274 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'message2'})
275 | end
276 |
277 | sleep 10
278 |
279 | events = get_log_events
280 | assert_equal(2, events.size)
281 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
282 | assert_equal('message1 logs1', events[0].message)
283 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
284 | assert_equal('message2 logs2', events[1].message)
285 | end
286 |
287 | def test_write_with_max_message_length
288 | new_log_stream
289 |
290 | d = create_driver(<<-EOC)
291 | #{default_config}
292 | message_keys message,cloudwatch
293 | max_message_length 10
294 | log_group_name #{log_group_name}
295 | log_stream_name #{log_stream_name}
296 | EOC
297 |
298 | time = event_time
299 | d.run(default_tag: fluentd_tag) do
300 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1'})
301 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'message2'})
302 | end
303 |
304 | sleep 10
305 |
306 | events = get_log_events
307 | assert_equal(2, events.size)
308 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
309 | assert_equal('message1 l', events[0].message)
310 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
311 | assert_equal('message2 l', events[1].message)
312 | end
313 |
314 | def test_write_use_tag_as_group
315 | new_log_stream
316 |
317 | d = create_driver(<<-EOC)
318 | #{default_config}
319 | message_keys message,cloudwatch
320 | use_tag_as_group true
321 | log_stream_name #{log_stream_name}
322 | EOC
323 |
324 | time = event_time
325 | d.run(default_tag: fluentd_tag) do
326 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1'})
327 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'message2'})
328 | end
329 |
330 | sleep 10
331 |
332 | events = get_log_events(fluentd_tag)
333 | assert_equal(2, events.size)
334 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
335 | assert_equal('message1 logs1', events[0].message)
336 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
337 | assert_equal('message2 logs2', events[1].message)
338 | end
339 |
340 | def test_write_use_tag_as_stream
341 | new_log_stream
342 |
343 | d = create_driver(<<-EOC)
344 | #{default_config}
345 | message_keys message,cloudwatch
346 | use_tag_as_stream true
347 | log_group_name #{log_group_name}
348 | EOC
349 |
350 | time = event_time
351 | d.run(default_tag: fluentd_tag) do
352 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1'})
353 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'message2'})
354 | end
355 |
356 | sleep 10
357 |
358 | events = get_log_events(log_group_name, fluentd_tag)
359 | assert_equal(2, events.size)
360 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
361 | assert_equal('message1 logs1', events[0].message)
362 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
363 | assert_equal('message2 logs2', events[1].message)
364 | end
365 |
366 | def test_write_use_placeholders
367 | new_log_stream
368 |
369 | config = {'@type' => 'cloudwatch_logs',
370 | 'auto_create_stream' => true,
371 | 'message_keys' => ["message","cloudwatch"],
372 | 'log_stream_name' => "${tag}",
373 | 'log_group_name' => log_group_name}
374 | config.merge!(config_elementify(aws_key_id)) if aws_key_id
375 | config.merge!(config_elementify(aws_sec_key)) if aws_sec_key
376 | config.merge!(config_elementify(region)) if region
377 | config.merge!(config_elementify(endpoint)) if endpoint
378 |
379 | d = create_driver(
380 | Fluent::Config::Element.new('ROOT', '', config,[
381 | Fluent::Config::Element.new('buffer', 'tag, time', {
382 | '@type' => 'memory',
383 | 'timekey' => 3600
384 | }, [])
385 | ])
386 | )
387 |
388 | time = event_time
389 | d.run(default_tag: fluentd_tag) do
390 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1'})
391 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'message2'})
392 | end
393 |
394 | sleep 10
395 |
396 | events = get_log_events(log_group_name, fluentd_tag)
397 | assert_equal(2, events.size)
398 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
399 | assert_equal('message1 logs1', events[0].message)
400 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
401 | assert_equal('message2 logs2', events[1].message)
402 | end
403 |
404 | def test_write_use_placeholders_parts
405 | new_log_stream
406 |
407 | config = {'@type' => 'cloudwatch_logs',
408 | 'auto_create_stream' => true,
409 | 'message_keys' => ["message","cloudwatch"],
410 | 'log_stream_name' => "${tag[0]}-${tag[1]}-${tag[2]}-${tag[3]}",
411 | 'log_group_name' => log_group_name}
412 | config.merge!(config_elementify(aws_key_id)) if aws_key_id
413 | config.merge!(config_elementify(aws_sec_key)) if aws_sec_key
414 | config.merge!(config_elementify(region)) if region
415 | config.merge!(config_elementify(endpoint)) if endpoint
416 |
417 | d = create_driver(
418 | Fluent::Config::Element.new('ROOT', '', config, [
419 | Fluent::Config::Element.new('buffer', 'tag, time', {
420 | '@type' => 'memory',
421 | 'timekey' => 3600
422 | }, [])
423 | ])
424 | )
425 |
426 | time = event_time
427 | d.run(default_tag: fluentd_tag) do
428 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1'})
429 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'message2'})
430 | end
431 |
432 | sleep 10
433 |
434 | events = get_log_events(log_group_name, 'fluent-plugin-cloudwatch-test')
435 | assert_equal(2, events.size)
436 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
437 | assert_equal('message1 logs1', events[0].message)
438 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
439 | assert_equal('message2 logs2', events[1].message)
440 | end
441 |
442 | def test_write_use_time_placeholders
443 | new_log_stream
444 |
445 | config = {'@type' => 'cloudwatch_logs',
446 | 'auto_create_stream' => true,
447 | 'message_keys' => ["message","cloudwatch"],
448 | 'log_stream_name' => "fluent-plugin-cloudwatch-test-%Y%m%d",
449 | 'log_group_name' => log_group_name}
450 | config.merge!(config_elementify(aws_key_id)) if aws_key_id
451 | config.merge!(config_elementify(aws_sec_key)) if aws_sec_key
452 | config.merge!(config_elementify(region)) if region
453 | config.merge!(config_elementify(endpoint)) if endpoint
454 |
455 | d = create_driver(
456 | Fluent::Config::Element.new('ROOT', '', config,[
457 | Fluent::Config::Element.new('buffer', 'tag, time', {
458 | '@type' => 'memory',
459 | 'timekey' => 3600
460 | }, [])
461 | ])
462 | )
463 |
464 | time = event_time
465 | d.run(default_tag: fluentd_tag) do
466 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1'})
467 | d.feed(time + 1, {'cloudwatch' => 'logs2', 'message' => 'message2'})
468 | end
469 |
470 | sleep 10
471 |
472 | events = get_log_events(log_group_name, "fluent-plugin-cloudwatch-test-#{Time.at(time).strftime("%Y%m%d")}")
473 | assert_equal(2, events.size)
474 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
475 | assert_equal('message1 logs1', events[0].message)
476 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
477 | assert_equal('message2 logs2', events[1].message)
478 | end
479 |
480 | def test_include_time_key
481 | new_log_stream
482 |
483 | d = create_driver(<<-EOC)
484 | #{default_config}
485 | include_time_key true
486 | log_group_name #{log_group_name}
487 | log_stream_name #{log_stream_name}
488 | utc
489 | EOC
490 |
491 | time = event_time
492 | d.run(default_tag: fluentd_tag) do
493 | d.feed(time, {'cloudwatch' => 'logs1'})
494 | d.feed(time + 1, {'cloudwatch' => 'logs2'})
495 | end
496 |
497 | sleep 10
498 |
499 | events = get_log_events
500 | assert_equal(2, events.size)
501 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
502 | assert_equal("{\"cloudwatch\":\"logs1\",\"time\":\"#{Time.at(time.to_r).utc.strftime("%Y-%m-%dT%H:%M:%SZ")}\"}", events[0].message)
503 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
504 | assert_equal("{\"cloudwatch\":\"logs2\",\"time\":\"#{Time.at((time+1).to_r).utc.strftime("%Y-%m-%dT%H:%M:%SZ")}\"}", events[1].message)
505 | end
506 |
507 | def test_include_time_key_localtime
508 | new_log_stream
509 |
510 | d = create_driver(<<-EOC)
511 | #{default_config}
512 | include_time_key true
513 | localtime true
514 | log_group_name #{log_group_name}
515 | log_stream_name #{log_stream_name}
516 | EOC
517 |
518 | time = event_time
519 | d.run(default_tag: fluentd_tag) do
520 | d.feed(time, {'cloudwatch' => 'logs1'})
521 | d.feed(time + 1, {'cloudwatch' => 'logs2'})
522 | end
523 |
524 | sleep 10
525 |
526 | events = get_log_events
527 | assert_equal(2, events.size)
528 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
529 | assert_equal("{\"cloudwatch\":\"logs1\",\"time\":\"#{Time.at(time.to_r).strftime("%Y-%m-%dT%H:%M:%S%:z")}\"}", events[0].message)
530 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
531 | assert_equal("{\"cloudwatch\":\"logs2\",\"time\":\"#{Time.at((time+1).to_r).to_time.strftime("%Y-%m-%dT%H:%M:%S%:z")}\"}", events[1].message)
532 | end
533 |
534 | def test_log_group_name_key_and_log_stream_name_key
535 | new_log_stream
536 |
537 | d = create_driver(<<-EOC)
538 | #{default_config}
539 | log_group_name_key group_name_key
540 | log_stream_name_key stream_name_key
541 | @log_level debug
542 | EOC
543 |
544 | stream1 = new_log_stream
545 | stream2 = new_log_stream
546 |
547 | records = [
548 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name, 'stream_name_key' => stream1},
549 | {'cloudwatch' => 'logs2', 'message' => 'message1', 'group_name_key' => log_group_name, 'stream_name_key' => stream2},
550 | {'cloudwatch' => 'logs3', 'message' => 'message1', 'group_name_key' => log_group_name, 'stream_name_key' => stream1},
551 | ]
552 |
553 | time = event_time
554 | d.run(default_tag: fluentd_tag) do
555 | records.each_with_index do |record, i|
556 | d.feed(time + i, record)
557 | end
558 | end
559 |
560 | logs = d.logs
561 | # Call API once for each stream
562 | assert_equal(2, logs.select {|l| l =~ /Called PutLogEvents API/ }.size)
563 |
564 | sleep 10
565 |
566 | events = get_log_events(log_group_name, stream1)
567 | assert_equal(2, events.size)
568 | assert_equal(time.to_i * 1000, events[0].timestamp)
569 | assert_equal((time.to_i + 2) * 1000, events[1].timestamp)
570 | assert_equal(records[0], JSON.parse(events[0].message))
571 | assert_equal(records[2], JSON.parse(events[1].message))
572 |
573 | events = get_log_events(log_group_name, stream2)
574 | assert_equal(1, events.size)
575 | assert_equal((time.to_i + 1) * 1000, events[0].timestamp)
576 | assert_equal(records[1], JSON.parse(events[0].message))
577 | end
578 |
579 | def test_remove_log_group_name_key_and_remove_log_stream_name_key
580 | new_log_stream
581 |
582 | d = create_driver(<<-EOC)
583 | #{default_config}
584 | log_group_name_key group_name_key
585 | log_stream_name_key stream_name_key
586 | remove_log_group_name_key true
587 | remove_log_stream_name_key true
588 | EOC
589 |
590 | time = event_time
591 | d.run(default_tag: fluentd_tag) do
592 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name, 'stream_name_key' => log_stream_name})
593 | end
594 |
595 | sleep 10
596 |
597 | events = get_log_events(log_group_name, log_stream_name)
598 | assert_equal(1, events.size)
599 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
600 | assert_equal({'cloudwatch' => 'logs1', 'message' => 'message1'}, JSON.parse(events[0].message))
601 | end
602 |
603 | def test_log_group_aws_tags
604 | clear_log_group
605 |
606 | d = create_driver(<<-EOC)
607 | #{default_config}
608 | auto_create_stream true
609 | use_tag_as_stream true
610 | log_group_name_key group_name_key
611 | log_group_aws_tags {"tag1": "value1", "tag2": "value2"}
612 | EOC
613 |
614 | records = [
615 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name},
616 | {'cloudwatch' => 'logs2', 'message' => 'message1', 'group_name_key' => log_group_name},
617 | {'cloudwatch' => 'logs3', 'message' => 'message1', 'group_name_key' => log_group_name},
618 | ]
619 |
620 | time = Time.now
621 | d.run(default_tag: fluentd_tag) do
622 | records.each_with_index do |record, i|
623 | d.feed(time.to_i + i, record)
624 | end
625 | end
626 |
627 | awstags = get_log_group_tags
628 | assert_equal("value1", awstags.fetch("tag1"))
629 | assert_equal("value2", awstags.fetch("tag2"))
630 | end
631 |
632 | def test_log_group_aws_tags_with_placeholders
633 | clear_log_group
634 |
635 | config = {
636 | "@type" => "cloudwatch_logs",
637 | "auto_create_stream" => true,
638 | "use_tag_as_stream" => true,
639 | "log_group_name_key" => "group_name_key",
640 | "log_group_aws_tags" => '{"tag1": "${tag}", "tag2": "${namespace_name}"}',
641 | }
642 | config.merge!(config_elementify(aws_key_id)) if aws_key_id
643 | config.merge!(config_elementify(aws_sec_key)) if aws_sec_key
644 | config.merge!(config_elementify(region)) if region
645 | config.merge!(config_elementify(endpoint)) if endpoint
646 |
647 | d = create_driver(
648 | Fluent::Config::Element.new('ROOT', '', config, [
649 | Fluent::Config::Element.new('buffer', 'tag, namespace_name', {
650 | '@type' => 'memory',
651 | }, [])
652 | ])
653 | )
654 |
655 | records = [
656 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name, "namespace_name" => "fluentd"},
657 | {'cloudwatch' => 'logs2', 'message' => 'message1', 'group_name_key' => log_group_name, "namespace_name" => "fluentd"},
658 | {'cloudwatch' => 'logs3', 'message' => 'message1', 'group_name_key' => log_group_name, "namespace_name" => "fluentd"},
659 | ]
660 |
661 | time = Time.now
662 | d.run(default_tag: fluentd_tag) do
663 | records.each_with_index do |record, i|
664 | d.feed(time.to_i + i, record)
665 | end
666 | end
667 |
668 | awstags = get_log_group_tags
669 | assert_equal(fluentd_tag, awstags.fetch("tag1"))
670 | assert_equal("fluentd", awstags.fetch("tag2"))
671 | end
672 |
673 | def test_retention_in_days
674 | clear_log_group
675 |
676 | d = create_driver(<<-EOC)
677 | #{default_config}
678 | auto_create_stream true
679 | use_tag_as_stream true
680 | log_group_name_key group_name_key
681 | retention_in_days 7
682 | EOC
683 |
684 | records = [
685 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name},
686 | {'cloudwatch' => 'logs2', 'message' => 'message1', 'group_name_key' => log_group_name},
687 | {'cloudwatch' => 'logs3', 'message' => 'message1', 'group_name_key' => log_group_name},
688 | ]
689 |
690 | time = Time.now
691 | d.run(default_tag: fluentd_tag) do
692 | records.each_with_index do |record, i|
693 | d.feed(time.to_i + i, record)
694 | end
695 | end
696 |
697 | retention = get_log_group_retention_days
698 | assert_equal(d.instance.retention_in_days, retention)
699 | end
700 |
701 | def test_invalid_retention_in_days
702 | clear_log_group
703 |
704 | d = create_driver(<<-EOC)
705 | #{default_config}
706 | auto_create_stream true
707 | use_tag_as_stream true
708 | log_group_name_key group_name_key
709 | retention_in_days 4
710 | EOC
711 |
712 | records = [
713 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name},
714 | {'cloudwatch' => 'logs2', 'message' => 'message1', 'group_name_key' => log_group_name},
715 | {'cloudwatch' => 'logs3', 'message' => 'message1', 'group_name_key' => log_group_name},
716 | ]
717 |
718 | time = Time.now
719 | d.run(default_tag: fluentd_tag) do
720 | records.each_with_index do |record, i|
721 | d.feed(time.to_i + i, record)
722 | end
723 | end
724 |
725 | assert(d.logs.any?{|log| log.include?("failed to set retention policy for Log group")})
726 | end
727 |
728 | def test_remove_retention_in_days_key
729 | new_log_stream
730 |
731 | d = create_driver(<<-EOC)
732 | #{default_config}
733 | log_group_name #{log_group_name}
734 | log_stream_name #{log_stream_name}
735 | retention_in_days_key retention_in_days
736 | remove_retention_in_days_key true
737 | EOC
738 |
739 | records = [
740 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'retention_in_days' => '7'},
741 | {'cloudwatch' => 'logs2', 'message' => 'message2', 'retention_in_days' => '7'},
742 | ]
743 |
744 | time = Time.now
745 | d.run(default_tag: fluentd_tag) do
746 | records.each_with_index do |record, i|
747 | d.feed(time.to_i + i, record)
748 | end
749 | end
750 |
751 | sleep 10
752 |
753 | events = get_log_events
754 | assert_equal(2, events.size)
755 | assert_equal({'cloudwatch' => 'logs1', 'message' => 'message1'}, JSON.parse(events[0].message))
756 | assert_equal({'cloudwatch' => 'logs2', 'message' => 'message2'}, JSON.parse(events[1].message))
757 | end
758 |
759 | def test_log_group_aws_tags_key
760 | clear_log_group
761 |
762 | d = create_driver(<<-EOC)
763 | #{default_config}
764 | auto_create_stream true
765 | use_tag_as_stream true
766 | log_group_name_key group_name_key
767 | log_group_aws_tags_key aws_tags
768 | EOC
769 |
770 | records = [
771 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name, 'aws_tags' => {"tag1" => "value1", "tag2" => "value2"}},
772 | {'cloudwatch' => 'logs2', 'message' => 'message1', 'group_name_key' => log_group_name, 'aws_tags' => {"tag1" => "value1", "tag2" => "value2"}},
773 | {'cloudwatch' => 'logs3', 'message' => 'message1', 'group_name_key' => log_group_name, 'aws_tags' => {"tag1" => "value1", "tag2" => "value2"}}
774 | ]
775 |
776 | time = Time.now
777 | d.run(default_tag: fluentd_tag) do
778 | records.each_with_index do |record, i|
779 | d.feed(time.to_i + i, record)
780 | end
781 | end
782 |
783 | awstags = get_log_group_tags
784 | assert_equal("value1", awstags.fetch("tag1"))
785 | assert_equal("value2", awstags.fetch("tag2"))
786 | end
787 |
788 | def test_remove_log_group_aws_tags_key
789 | new_log_stream
790 |
791 | d = create_driver(<<-EOC)
792 | #{default_config}
793 | log_group_name #{log_group_name}
794 | log_stream_name #{log_stream_name}
795 | log_group_aws_tags_key log_group_tags
796 | remove_log_group_aws_tags_key true
797 | EOC
798 |
799 | records = [
800 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'log_group_tags' => {"tag1" => "value1", "tag2" => "value2"}},
801 | {'cloudwatch' => 'logs2', 'message' => 'message2', 'log_group_tags' => {"tag1" => "value1", "tag2" => "value2"}},
802 | ]
803 |
804 | time = Time.now
805 | d.run(default_tag: fluentd_tag) do
806 | records.each_with_index do |record, i|
807 | d.feed(time.to_i + i, record)
808 | end
809 | end
810 |
811 | sleep 10
812 |
813 | events = get_log_events
814 | assert_equal(2, events.size)
815 | assert_equal({'cloudwatch' => 'logs1', 'message' => 'message1'}, JSON.parse(events[0].message))
816 | assert_equal({'cloudwatch' => 'logs2', 'message' => 'message2'}, JSON.parse(events[1].message))
817 | end
818 |
819 | def test_log_group_aws_tags_key_same_group_diff_tags
820 | clear_log_group
821 |
822 | d = create_driver(<<-EOC)
823 | #{default_config}
824 | auto_create_stream true
825 | use_tag_as_stream true
826 | log_group_name_key group_name_key
827 | log_group_aws_tags_key aws_tags
828 | EOC
829 |
830 | records = [
831 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name, 'aws_tags' => {"tag1" => "value1", "tag2" => "value2"}},
832 | {'cloudwatch' => 'logs3', 'message' => 'message1', 'group_name_key' => log_group_name, 'aws_tags' => {"tag3" => "value3", "tag4" => "value4"}}
833 | ]
834 |
835 | time = Time.now
836 | d.run(default_tag: fluentd_tag) do
837 | records.each_with_index do |record, i|
838 | d.feed(time.to_i + i, record)
839 | end
840 | end
841 |
842 | awstags = get_log_group_tags
843 | assert_equal("value1", awstags.fetch("tag1"))
844 | assert_equal("value2", awstags.fetch("tag2"))
845 | assert_raise KeyError do
846 | awstags.fetch("tag3")
847 | end
848 | assert_raise KeyError do
849 | awstags.fetch("tag4")
850 | end
851 | end
852 |
853 | def test_log_group_aws_tags_key_no_tags
854 | clear_log_group
855 |
856 | d = create_driver(<<-EOC)
857 | #{default_config}
858 | auto_create_stream true
859 | log_group_name_key group_name_key
860 | log_stream_name_key stream_name_key
861 | remove_log_group_name_key true
862 | remove_log_stream_name_key true
863 | log_group_aws_tags_key aws_tags
864 | EOC
865 |
866 | stream = log_stream_name
867 | records = [
868 | {'cloudwatch' => 'logs1', 'message' => 'message1', 'group_name_key' => log_group_name, 'stream_name_key' => stream},
869 | {'cloudwatch' => 'logs2', 'message' => 'message2', 'group_name_key' => log_group_name, 'stream_name_key' => stream}
870 | ]
871 |
872 | time = Time.now
873 | d.run(default_tag: fluentd_tag) do
874 | records.each_with_index do |record, i|
875 | d.feed(time.to_i + i, record)
876 | end
877 | end
878 |
879 | sleep 10
880 |
881 | awstags = get_log_group_tags
882 |
883 | assert_raise KeyError do
884 | awstags.fetch("tag1")
885 | end
886 |
887 | events = get_log_events(log_group_name, stream)
888 | assert_equal(2, events.size)
889 | assert_equal(time.to_i * 1000, events[0].timestamp)
890 | assert_equal({'cloudwatch' => 'logs1', 'message' => 'message1'}, JSON.parse(events[0].message))
891 | assert_equal({'cloudwatch' => 'logs2', 'message' => 'message2'}, JSON.parse(events[1].message))
892 | end
893 |
894 | def test_retrying_on_throttling_exception_with_put_log_events_retry_limit_as_zero
895 | client = Aws::CloudWatchLogs::Client.new
896 | @called = false
897 | stub(client).put_log_events(anything) {
898 | raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error"))
899 | }.once.ordered
900 |
901 | d = create_driver(<<-EOC)
902 | #{default_config}
903 | log_group_name #{log_group_name}
904 | log_stream_name #{log_stream_name}
905 | @log_level debug
906 | put_log_events_retry_limit 0
907 | EOC
908 | time = event_time
909 | d.instance.instance_variable_set(:@logs, client)
910 | d.run(default_tag: fluentd_tag) do
911 | d.feed(time, {'message' => 'message1'})
912 | end
913 |
914 | logs = d.logs
915 | assert_equal(0, logs.select {|l| l =~ /Called PutLogEvents API/ }.size)
916 | assert_equal(1, logs.select {|l| l =~ /failed to PutLogEvents/ }.size)
917 | assert_equal(0, logs.select {|l| l =~ /retry succeeded/ }.size)
918 | end
919 |
920 | def test_retrying_on_throttling_exception
921 | resp = Object.new
922 | mock(resp).rejected_log_events_info {}
923 | mock(resp).next_sequence_token {}
924 | client = Aws::CloudWatchLogs::Client.new
925 | @called = false
926 | stub(client).put_log_events(anything) {
927 | raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error"))
928 | }.once.ordered
929 | stub(client).put_log_events(anything) { resp }.once.ordered
930 |
931 | d = create_driver
932 | time = event_time
933 | d.instance.instance_variable_set(:@logs, client)
934 | d.run(default_tag: fluentd_tag) do
935 | d.feed(time, {'message' => 'message1'})
936 | end
937 |
938 | logs = d.logs
939 | assert_equal(1, logs.select {|l| l =~ /Called PutLogEvents API/ }.size)
940 | assert_equal(1, logs.select {|l| l =~ /failed to PutLogEvents/ }.size)
941 | assert_equal(1, logs.select {|l| l =~ /retry succeeded/ }.size)
942 | end
943 |
944 | def test_retrying_on_throttling_exception_and_throw_away
945 | client = Aws::CloudWatchLogs::Client.new
946 | mock(client).put_log_events(anything).times(any_times) {
947 | raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error"))
948 | }
949 | time = Fluent::Engine.now
950 | d = create_driver(<<-EOC)
951 | #{default_config}
952 | log_group_name #{log_group_name}
953 | log_stream_name #{log_stream_name}
954 | put_log_events_retry_limit 1
955 | @log_level debug
956 | EOC
957 | d.instance.instance_variable_set(:@logs, client)
958 | d.run(default_tag: fluentd_tag) do
959 | d.feed(time, {'message' => 'message1'})
960 | end
961 |
962 | logs = d.logs
963 | assert_equal(0, logs.select {|l| l =~ /Called PutLogEvents API/ }.size)
964 | assert_equal(3, logs.select {|l| l =~ /failed to PutLogEvents/ }.size)
965 | assert_equal(1, logs.select {|l| l =~ /failed to PutLogEvents and discard logs/ }.size)
966 | end
967 |
968 | def test_too_large_event
969 | time = Fluent::Engine.now
970 | d = create_driver(<<-EOC)
971 | #{default_config}
972 | log_group_name #{log_group_name}
973 | log_stream_name #{log_stream_name}
974 | @log_level debug
975 | EOC
976 | d.run(default_tag: fluentd_tag) do
977 | d.feed(time, {'message' => '*' * 1024 * 1024})
978 | end
979 |
980 | logs = d.logs
981 | assert(logs.any?{|log| log =~ /Log event in .* discarded because it is too large/})
982 | end
983 |
984 | def test_do_not_emit_empty_record
985 | new_log_stream
986 |
987 | d = create_driver(<<-EOC)
988 | #{default_config}
989 | message_keys cloudwatch,message
990 | log_group_name #{log_group_name}
991 | log_stream_name #{log_stream_name}
992 | EOC
993 |
994 | time = event_time
995 | d.run(default_tag: fluentd_tag) do
996 | d.feed(time, {'cloudwatch' => 'logs1', 'message' => ''})
997 | d.feed(time + 1, {'cloudwatch' => '', 'message' => 'message2'})
998 | d.feed(time + 2, {'cloudwatch' => '', 'message' => ''})
999 | end
1000 |
1001 | sleep 10
1002 |
1003 | events = get_log_events
1004 | assert_equal(2, events.size)
1005 | assert_equal((time.to_f * 1000).floor, events[0].timestamp)
1006 | assert_equal('logs1', events[0].message)
1007 | assert_equal((time.to_i + 1) * 1000, events[1].timestamp)
1008 | assert_equal('message2', events[1].message)
1009 |
1010 | logs = d.logs
1011 | assert(logs.any?{|log| log =~ /Within specified message_key\(s\): \(cloudwatch,message\) do not have non-empty record. Skip./})
1012 | end
1013 | end
1014 |
1015 | def test_scrub_record
1016 | record = {
1017 | "hash" => {
1018 | "str" => "\xAE",
1019 | },
1020 | "array" => [
1021 | "\xAE",
1022 | ],
1023 | "str" => "\xAE",
1024 | }
1025 |
1026 | d = create_driver
1027 | d.instance.send(:scrub_record!, record)
1028 |
1029 | assert_equal("�", record["hash"]["str"])
1030 | assert_equal("�", record["array"][0])
1031 | assert_equal("�", record["str"])
1032 | end
1033 |
1034 | private
1035 | def default_config
1036 | <<-EOC
1037 | @type cloudwatch_logs
1038 | auto_create_stream true
1039 | #{aws_key_id}
1040 | #{aws_sec_key}
1041 | #{region}
1042 | #{endpoint}
1043 | EOC
1044 | end
1045 |
1046 | def create_driver(conf = nil)
1047 | unless conf
1048 | conf = <<-EOC
1049 | #{default_config}
1050 | log_group_name #{log_group_name}
1051 | log_stream_name #{log_stream_name}
1052 | @log_level debug
1053 | EOC
1054 | end
1055 | Fluent::Test::Driver::Output.new(Fluent::Plugin::CloudwatchLogsOutput).configure(conf)
1056 | end
1057 | end
1058 |
--------------------------------------------------------------------------------
/test/test_helper.rb:
--------------------------------------------------------------------------------
1 | require 'test/unit'
2 | require 'test/unit/rr'
3 | # require 'mocha/test_unit'
4 | require 'fluent/test'
5 | require 'securerandom'
6 |
7 | require 'aws-sdk-cloudwatchlogs'
8 |
9 | module CloudwatchLogsTestHelper
10 | private
11 | def logs
12 | options = {}
13 | options[:credentials] = Aws::Credentials.new(ENV['aws_key_id'], ENV['aws_sec_key']) if ENV['aws_key_id'] && ENV['aws_sec_key']
14 | options[:region] = ENV['region'] if ENV['region']
15 | options[:endpoint] = ENV['endpoint'] if ENV['endpoint']
16 | options[:http_proxy] = ENV['http_proxy'] if ENV['http_proxy']
17 | @logs ||= Aws::CloudWatchLogs::Client.new(options)
18 | end
19 |
20 | def set_log_group_name(log_group_name)
21 | @log_group_name = log_group_name
22 | end
23 |
24 | def log_group_name
25 | @log_group_name ||= "fluent-plugin-cloudwatch-test-#{Time.now.to_f}"
26 | end
27 |
28 | def aws_key_id
29 | "aws_key_id #{ENV['aws_key_id']}" if ENV['aws_key_id']
30 | end
31 |
32 | def aws_sec_key
33 | "aws_sec_key #{ENV['aws_sec_key']}" if ENV['aws_sec_key']
34 | end
35 |
36 | def region
37 | "region #{ENV['region']}" if ENV['region']
38 | end
39 |
40 | def endpoint
41 | "endpoint #{ENV['endpoint']}" if ENV['endpoint']
42 | end
43 |
44 | def config_elementify(conf)
45 | conf.split(' ').each_slice(2).map{|k, v| {k => v}}.first
46 | end
47 |
48 | def log_stream_name(log_stream_name_prefix = nil)
49 | @log_stream_name ||= new_log_stream(log_stream_name_prefix)
50 | end
51 |
52 | def new_log_stream(log_stream_name_prefix = nil)
53 | uuid = SecureRandom.uuid
54 | @log_stream_name = log_stream_name_prefix ? log_stream_name_prefix + uuid : uuid
55 | end
56 |
57 | def get_log_group_tags(name = nil)
58 | name ||= log_group_name
59 | logs.list_tags_log_group(log_group_name: name).tags
60 | end
61 |
62 | def get_log_group_retention_days(name = nil)
63 | name ||= log_group_name
64 | logs.describe_log_groups(log_group_name_prefix: name, limit: 1).log_groups.first.retention_in_days
65 | end
66 |
67 | def clear_log_group
68 | [log_group_name, fluentd_tag].each do |name|
69 | begin
70 | logs.delete_log_group(log_group_name: name)
71 | rescue Aws::CloudWatchLogs::Errors::ResourceNotFoundException
72 | # pass
73 | end
74 | end
75 | end
76 |
77 | def fluentd_tag
78 | @fluentd_tag ||= "fluent.plugin.cloudwatch.test.#{Time.now.to_f}"
79 | end
80 |
81 | def create_log_stream()
82 | begin
83 | logs.create_log_group(log_group_name: log_group_name)
84 | rescue Aws::CloudWatchLogs::Errors::ResourceAlreadyExistsException
85 | # pass
86 | end
87 |
88 | begin
89 | logs.create_log_stream(log_group_name: log_group_name, log_stream_name: log_stream_name)
90 | rescue Aws::CloudWatchLogs::Errors::ResourceAlreadyExistsException
91 | # pass
92 | end
93 | end
94 |
95 | def get_log_events(group = log_group_name, stream = log_stream_name)
96 | logs.get_log_events(log_group_name: group, log_stream_name: stream).events
97 | end
98 |
99 | def put_log_events(events)
100 | args = {
101 | log_events: events,
102 | log_group_name: log_group_name,
103 | log_stream_name: log_stream_name,
104 | }
105 | logs.put_log_events(args)
106 | end
107 | end
108 |
--------------------------------------------------------------------------------