├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml └── workflows │ └── build.yml ├── .gitignore ├── CHANGELOG.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── COPYRIGHT ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── THIRD-PARTY ├── VERSION ├── aggregate ├── Readme.md ├── aggregate.pb.go ├── aggregate.proto ├── aggregator.go └── aggregator_test.go ├── fluent-bit-kinesis.go ├── go.mod ├── go.sum ├── kinesis ├── kinesis.go ├── kinesis_test.go └── mock_kinesis │ └── mock_kinesis.go └── util └── random.go /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @aws/aws-firelens 2 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available:* 2 | 3 | *Description of changes:* 4 | 5 | 6 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: "/" # Location of package manifests 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ mainline ] 6 | pull_request: 7 | branches: [ mainline ] 8 | 9 | jobs: 10 | 11 | build: 12 | name: Build 13 | runs-on: ubuntu-latest 14 | steps: 15 | 16 | - name: Set up Go 1.20 17 | uses: actions/setup-go@v2 18 | with: 19 | go-version: "1.20" 20 | id: go 21 | 22 | - name: Install cross-compiler for Windows 23 | run: sudo apt-get install -y gcc-multilib gcc-mingw-w64 24 | 25 | - name: Check out code into the Go module directory 26 | uses: actions/checkout@v2 27 | 28 | - name: golint 29 | run: go install golang.org/x/lint/golint@latest 30 | 31 | - name: Build 32 | run: make build windows-release test 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # build output dir 12 | bin 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 1.10.2 4 | * Enhancement - Upgrade Go version to 1.20 5 | 6 | ## 1.10.1 7 | * Enhancement - Added different base user agent for Linux and Windows 8 | 9 | ## 1.10.0 10 | * Feature - Add support for building this plugin on Windows. *Note that this is only support in this plugin repo for Windows compilation.* 11 | 12 | ## 1.9.0 13 | * Feature - Add timeout config for AWS SDK Go HTTP calls (#178) 14 | * Bug - Fix message loss issue using concurrency feature with 0 retries (#179) 15 | 16 | ## 1.8.1 17 | * Bug - Fix truncation issue after compression (#183) 18 | 19 | ## 1.8.0 20 | * Feature - Add support for gzip compression of records (#162) 21 | 22 | ## 1.7.3 23 | * Enhancement - Upgrade Go version to 1.17 24 | 25 | ## 1.7.2 26 | * Bug - Fix aggregator size estimation (#155) 27 | * Bug - Fix partition key computation for aggregation (#158) 28 | 29 | ## 1.7.0 30 | * Feature - Add new option replace_dots to replace dots in key names (#79) 31 | * Enhancement - Add support for nested partition_key in log record (#30) 32 | * Enhancement - Change the log severity from `info` to `debug` for aggregation log statements (#78) 33 | 34 | ## 1.6.1 35 | * Bug - Truncate records to max size (#74) 36 | 37 | ## 1.6.0 38 | * Feature - Add support for zlib compression of records (#26) 39 | * Feature - Add KPL aggregation support (#16) 40 | 41 | ## 1.5.0 42 | * Feature - Add log_key option for kinesis output plugin (#40) 43 | 44 | ## 1.4.0 45 | * Feature - Add sts_endpoint param for custom STS API endpoint (#39) 46 | 47 | ## 1.3.0 48 | * Feature - Add experimental concurrency feature (#33) 49 | 50 | ## 1.2.2 51 | * Bug - Remove exponential backoff code (#27) 52 | 53 | ## 1.2.1 54 | * Bug Fix - Updated logic to calculate the individual and maximum record size (#22) 55 | 56 | ## 1.2.0 57 | * Feature - Add time_key and time_key_format config options to add timestamp to records (#17) 58 | 59 | ## 1.1.0 60 | * Feature - Support IAM Roles for Service Accounts in Amazon EKS (#6) 61 | * Enhancement - Change the log severity from `error` to `warning` for retryable API errors (#7) 62 | 63 | ## 1.0.0 64 | Initial release of the Amazon Kinesis Streams for Fluent Bit Plugin 65 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | / @aws/aws-firelens 2 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check [existing open](https://github.com/awslabs/amazon-kinesis-streams-for-fluent-bit/issues), or [recently closed](https://github.com/awslabs/amazon-kinesis-streams-for-fluent-bit/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *master* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/amazon-kinesis-streams-for-fluent-bit/labels/help%20wanted) issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](https://github.com/awslabs/amazon-kinesis-streams-for-fluent-bit/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | 61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 62 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | not use this file except in compliance with the License. A copy of the 5 | License is located at 6 | 7 | http://aws.amazon.com/apache2.0/ 8 | 9 | or in the "license" file accompanying this file. This file is distributed 10 | on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 | express or implied. See the License for the specific language governing 12 | permissions and limitations under the License. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | 14 | # Build settings. 15 | GOARCH ?= amd64 16 | COMPILER ?= x86_64-w64-mingw32-gcc # Cross-compiler for Windows 17 | 18 | all: build 19 | 20 | SOURCES := $(shell find . -name '*.go') 21 | PLUGIN_BINARY := ./bin/kinesis.so 22 | PLUGIN_VERSION := $(shell cat VERSION) 23 | 24 | .PHONY: release 25 | release: 26 | mkdir -p ./bin 27 | go build -buildmode c-shared -o ./bin/kinesis.so ./ 28 | @echo "Built Amazon Kinesis Data Streams Fluent Bit Plugin v$(PLUGIN_VERSION)" 29 | 30 | .PHONY: windows-release 31 | windows-release: 32 | mkdir -p ./bin 33 | GOOS=windows GOARCH=$(GOARCH) CGO_ENABLED=1 CC=$(COMPILER) go build -buildmode c-shared -o ./bin/kinesis.dll ./ 34 | @echo "Built Amazon Kinesis Data Streams Fluent Bit Plugin v$(PLUGIN_VERSION) for Windows" 35 | 36 | 37 | .PHONY: build 38 | build: $(PLUGIN_BINARY) release 39 | 40 | $(PLUGIN_BINARY): $(SOURCES) 41 | PATH=${PATH} golint ./kinesis 42 | 43 | .PHONY: generate 44 | generate: $(SOURCES) 45 | go generate ./... 46 | 47 | .PHONY: test 48 | test: 49 | go test -timeout=120s -v -cover ./... 50 | 51 | .PHONY: clean 52 | clean: 53 | rm -rf ./bin/* 54 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Fluent Bit Plugin for Kinesis Streams 2 | Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Test Actions Status](https://github.com/aws/amazon-kinesis-streams-for-fluent-bit/workflows/Build/badge.svg)](https://github.com/aws/amazon-kinesis-streams-for-fluent-bit/actions) 2 | ## Fluent Bit Plugin for Amazon Kinesis Data Streams 3 | 4 | **NOTE: A new higher performance Fluent Bit Kinesis Plugin has been released.** Check out our [official guidance](#new-higher-performance-core-fluent-bit-plugin). 5 | 6 | A Fluent Bit output plugin for Amazon Kinesis Data Streams. 7 | 8 | #### Security disclosures 9 | 10 | If you think you’ve found a potential security issue, please do not post it in the Issues. Instead, please follow the instructions [here](https://aws.amazon.com/security/vulnerability-reporting/) or email AWS security directly at [aws-security@amazon.com](mailto:aws-security@amazon.com). 11 | 12 | ### Usage 13 | 14 | Run `make` to build `./bin/kinesis.so`. Then use with Fluent Bit: 15 | ``` 16 | ./fluent-bit -e ./kinesis.so -i cpu \ 17 | -o kinesis \ 18 | -p "region=us-west-2" \ 19 | -p "stream=test-stream" 20 | ``` 21 | 22 | For building Windows binaries, we need to install `mingw-64w` for cross-compilation. The same can be done using- 23 | ``` 24 | sudo apt-get install -y gcc-multilib gcc-mingw-w64 25 | ``` 26 | After this step, run `make windows-release`. Then use with Fluent Bit on Windows: 27 | ``` 28 | ./fluent-bit.exe -e ./kinesis.dll -i dummy ` 29 | -o kinesis ` 30 | -p "region=us-west-2" ` 31 | -p "stream=test-stream" 32 | ``` 33 | 34 | ### Plugin Options 35 | 36 | * `region`: The region which your Kinesis Data Stream is in. 37 | * `stream`: The name of the Kinesis Data Stream that you want log records sent to. 38 | * `partition_key`: A partition key is used to group data by shard within a stream. A Kinesis Data Stream uses the partition key that is associated with each data record to determine which shard a given data record belongs to. For example, if your logs come from Docker containers, you can use container_id as the partition key, and the logs will be grouped and stored on different shards depending upon the id of the container they were generated from. As the data within a shard are coarsely ordered, you will get all your logs from one container in one shard roughly in order. Nested partition key is supported and you can use `->` to point to your target key which is nested under another key. For example, your `partition_key` could be `kubernetes->pod_name`. If you don't set a partition key or put an invalid one, a random key will be generated, and the logs will be directed to random shards. If the partition key is invalid, the plugin will print an warning message. 39 | * `data_keys`: By default, the whole log record will be sent to Kinesis. If you specify key name(s) with this option, then only those keys and values will be sent to Kinesis. For example, if you are using the Fluentd Docker log driver, you can specify `data_keys log` and only the log message will be sent to Kinesis. If you specify multiple keys, they should be comma delimited. 40 | * `log_key`: By default, the whole log record will be sent to Kinesis. If you specify a key name with this option, then only the value of that key will be sent to Kinesis. For example, if you are using the Fluentd Docker log driver, you can specify `log_key log` and only the log message will be sent to Kinesis. 41 | * `role_arn`: ARN of an IAM role to assume (for cross account access). 42 | * `endpoint`: Specify a custom endpoint for the Kinesis Streams API. 43 | * `sts_endpoint`: Specify a custom endpoint for the STS API; used to assume your custom role provided with `role_arn`. 44 | * `append_newline`: If you set append_newline as true, a newline will be addded after each log record. 45 | * `time_key`: Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will not be added to records sent to Kinesis. The timestamp inserted comes from the timestamp that Fluent Bit associates with the log record, which is set by the input that collected it. For example, if you are reading a log file with the [tail input](https://docs.fluentbit.io/manual/pipeline/inputs/tail), then the timestamp for each log line/record can be obtained/parsed by using a Fluent Bit parser on the log line. 46 | * `time_key_format`: [strftime](http://man7.org/linux/man-pages/man3/strftime.3.html) compliant format string for the timestamp; for example, `%Y-%m-%dT%H:%M:%S%z`. This option is used with `time_key`. You can also use `%L` for milliseconds and `%f` for microseconds. Remember that the `time_key` option only inserts the timestamp Fluent Bit has for each record into the record. So the record must have been collected with a timestamp with precision in order to use sub-second precision formatters. If you are using ECS FireLens, make sure you are running Amazon ECS Container Agent v1.42.0 or later, otherwise the timestamps associated with your stdout & stderr container logs will only have second precision. 47 | * `experimental_concurrency`: Specify a limit of concurrent go routines for flushing records to kinesis. By default `experimental_concurrency` is set to 0 and records are flushed in Fluent Bit's single thread. This means that requests to Kinesis will block the execution of Fluent Bit. If this value is set to `4` for example then calls to Flush records from fluentbit will spawn concurrent go routines until the limit of `4` concurrent go routines are running. Once the `experimental_concurrency` limit is reached calls to Flush will return a retry code. The upper limit of the `experimental_concurrency` option is `10`. WARNING: Enabling `experimental_concurrency` can lead to data loss if the retry count is reached. Enabling concurrency will increase resource usage (memory and CPU). 48 | * `experimental_concurrency_retries`: Specify a limit to the number of retries concurrent goroutines will attempt. By default `4` retries will be attempted before records are dropped. 49 | * `aggregation`: Setting `aggregation` to `true` will enable KPL aggregation of records sent to Kinesis. This feature changes the behavior of the `partition_key` feature. See the KPL aggregation section below for more details. 50 | * `compression`: Specify an algorithm for compression of each record. Supported compression algorithms are `zlib` and `gzip`. By default this feature is disabled and records are not compressed. 51 | * `replace_dots`: Replace dot characters in key names with the value of this option. For example, if you add `replace_dots _` in your config then all occurrences of `.` will be replaced with an underscore. By default, dots will not be replaced. 52 | * `http_request_timeout`: Specify a timeout (in seconds) for the underlying AWS SDK Go HTTP call when sending records to Kinesis. By default, a timeout of `0` is used, indicating no timeout. Note that even with no timeout, the default behavior of the AWS SDK Go library may still lead to an eventual timeout. 53 | 54 | ### Permissions 55 | 56 | The plugin requires `kinesis:PutRecords` permissions. 57 | 58 | ### Credentials 59 | 60 | This plugin uses the AWS SDK Go, and uses its [default credential provider chain](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html). If you are using the plugin on Amazon EC2 or Amazon ECS or Amazon EKS, the plugin will use your EC2 instance role or [ECS Task role permissions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) or [EKS IAM Roles for Service Accounts for pods](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). The plugin can also retrieve credentials from a [shared credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html), or from the standard `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN` environment variables. 61 | 62 | ### Environment Variables 63 | 64 | * `FLB_LOG_LEVEL`: Set the log level for the plugin. Valid values are: `debug`, `info`, and `error` (case insensitive). Default is `info`. **Note**: Setting log level in the Fluent Bit Configuration file using the Service key will not affect the plugin log level (because the plugin is external). 65 | * `SEND_FAILURE_TIMEOUT`: Allows you to configure a timeout if the plugin can not send logs to Kinesis Streams. The timeout is specified as a [Golang duration](https://golang.org/pkg/time/#ParseDuration), for example: `5m30s`. If the plugin has failed to make any progress for the given period of time, then it will exit and kill Fluent Bit. This is useful in scenarios where you want your logging solution to fail fast if it has been misconfigured (i.e. network or credentials have not been set up to allow it to send to Kinesis Streams). 66 | 67 | ### Fluent Bit Versions 68 | 69 | This plugin has been tested with Fluent Bit 1.2.0+. It may not work with older Fluent Bit versions. We recommend using the latest version of Fluent Bit as it will contain the newest features and bug fixes. 70 | 71 | ### Example Fluent Bit Config File 72 | 73 | ``` 74 | [INPUT] 75 | Name forward 76 | Listen 0.0.0.0 77 | Port 24224 78 | 79 | [OUTPUT] 80 | Name kinesis 81 | Match * 82 | region us-west-2 83 | stream my-kinesis-stream-name 84 | partition_key container_id 85 | append_newline true 86 | replace_dots _ 87 | ``` 88 | 89 | ### AWS for Fluent Bit 90 | 91 | We distribute a container image with Fluent Bit and this plugin. 92 | 93 | ##### GitHub 94 | 95 | [github.com/aws/aws-for-fluent-bit](https://github.com/aws/aws-for-fluent-bit) 96 | 97 | ##### Amazon ECR Public Gallery 98 | 99 | [aws-for-fluent-bit](https://gallery.ecr.aws/aws-observability/aws-for-fluent-bit) 100 | 101 | Our images are available in Amazon ECR Public Gallery. You can download images with different tags by following command: 102 | 103 | ``` 104 | docker pull public.ecr.aws/aws-observability/aws-for-fluent-bit: 105 | ``` 106 | 107 | For example, you can pull the image with latest version by: 108 | 109 | ``` 110 | docker pull public.ecr.aws/aws-observability/aws-for-fluent-bit:latest 111 | ``` 112 | 113 | If you see errors for image pull limits, try log into public ECR with your AWS credentials: 114 | 115 | ``` 116 | aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws 117 | ``` 118 | 119 | You can check the [Amazon ECR Public official doc](https://docs.aws.amazon.com/AmazonECR/latest/public/get-set-up-for-amazon-ecr.html) for more details. 120 | 121 | ##### Docker Hub 122 | 123 | [amazon/aws-for-fluent-bit](https://hub.docker.com/r/amazon/aws-for-fluent-bit/tags) 124 | 125 | ##### Amazon ECR 126 | 127 | You can use our SSM Public Parameters to find the Amazon ECR image URI in your region: 128 | 129 | ``` 130 | aws ssm get-parameters-by-path --path /aws/service/aws-for-fluent-bit/ 131 | ``` 132 | 133 | For more see [our docs](https://github.com/aws/aws-for-fluent-bit#public-images). 134 | 135 | 136 | ### KPL aggregation 137 | 138 | KPL aggregation can be enabled by setting the `aggregation` parameter to `true` (default is false). With aggregation enabled each Record in the PutRecords request can contain multiple serialized records in the KCL protobuf structure. This batch of records will only count as a single record towards the Kinesis records per second limit (currently 1000 records/sec per shard). 139 | 140 | The advantages of enabling KPL aggregation are: 141 | 142 | - Increased throughput, and decreased Kinesis costs for smaller records (records less than 1K). 143 | - Less overhead in error checking PutRecords results (fewer PutRecords results to verify). 144 | - Firehose will de-aggregate the records automatically (free de-aggregation if Firehose is leveraged). 145 | 146 | The disadvantages are: 147 | - The flush time (or buffer size) will need to be tuned to take advantage of aggregation (more on that below). 148 | - You must use the KCL library to read data from kinesis to de-aggregate the protobuf serialization (if Firehose isn't the consumer). 149 | - The `partition_key` feature isn't fully compatible with aggregation given multiple records are in each PutRecord structure. The `partition_key` value of the first record in the batch will be used to route the entire batch to a given shard. Given this limitation, using both `partition_key` and `aggregation` simultaneously requires careful consideration. In most container log use cases, all logs from a single container/pod are sent in the same stream, thus if you use the pod/container as the partition key, it should still work as expected since all records in an aggregated batch can use the same partition key. In other use cases, aggregation will cause records that should have had different partition keys to have the same partition key. 150 | 151 | KPL Aggregated Record Reference: https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md 152 | 153 | #### Tuning for aggregation 154 | 155 | When using `aggregation` the buffers and flush time may need to be tuned. For low volume use cases a longer flush time maybe preferable to take full advantage of the aggregation cost savings. 156 | 157 | More specifically, increasing the flush value will ensure the most records are aggregated taking full advantage of the cost savings. 158 | 159 | ``` 160 | [SERVICE] 161 | Flush 20 162 | ``` 163 | 164 | 165 | ### Example Fluent Bit Aggregation Config File 166 | 167 | ``` 168 | [SERVICE] 169 | Flush 20 170 | 171 | [INPUT] 172 | Name forward 173 | Listen 0.0.0.0 174 | Port 24224 175 | 176 | [OUTPUT] 177 | Name kinesis 178 | Match * 179 | region us-west-2 180 | stream my-kinesis-stream-name 181 | aggregation true 182 | append_newline true 183 | ``` 184 | 185 | ### ZLIB Compression 186 | 187 | Enabling `zlib` compression will compress each record individually reducing the network bandwidth required to send logs. Using this feature in conjunction with `aggregation` can greatly reduce the number of Kinesis shards required. 188 | 189 | Compression Advantages: 190 | 191 | - Reduces network bandwidth required 192 | - Reduces Kinesis shard count in some scenarios 193 | 194 | Compression Disadvantages: 195 | 196 | - Fluentbit will require more CPU and memory to send records 197 | - A consumer must decompress the records 198 | 199 | 200 | Example config: 201 | 202 | ``` 203 | [SERVICE] 204 | Flush 20 205 | 206 | [INPUT] 207 | Name forward 208 | Listen 0.0.0.0 209 | Port 24224 210 | 211 | [OUTPUT] 212 | Name kinesis 213 | Match * 214 | region us-west-2 215 | stream my-kinesis-stream-name 216 | compression zlib 217 | append_newline true 218 | ``` 219 | 220 | ### New Higher Performance Core Fluent Bit Plugin 221 | 222 | We have released a [new higher performance Kinesis Streams plugin](https://docs.fluentbit.io/manual/pipeline/outputs/kinesis) named `kinesis_streams`. 223 | 224 | That plugin has many of the features of this older, lower performance and less efficient plugin. Please compare this document with its [documentation](https://docs.fluentbit.io/manual/pipeline/outputs/kinesis) for an up to date feature set comparison between the two plugins. 225 | 226 | #### Do you plan to deprecate this older plugin? 227 | 228 | This plugin will continue to be supported. However, we are pausing development on it and will focus on the high performance version instead. 229 | 230 | #### Which plugin should I use? 231 | 232 | If the features of the higher performance plugin are sufficient for your use cases, please use it. It can achieve higher throughput and will consume less CPU and memory. 233 | 234 | As time goes on we expect new features to be added to the C plugin only, however, this is determined on a case by case basis. There is some feature gap between the two plugins. Please consult the [C plugin documentation](https://docs.fluentbit.io/manual/pipeline/outputs/firehose) and this document for the features offered by each plugin. 235 | 236 | #### How can I migrate to the higher performance plugin? 237 | 238 | For many users, you can simply replace the plugin name `kinesis` with the new name `kinesis_streams`. 239 | 240 | #### Do you accept contributions to both plugins? 241 | 242 | Yes. The high performance plugin is written in C, and this plugin is written in Golang. We understand that Go is an easier language for amateur contributors to write code in- that is one of the primary reasons we are continuing to maintain this repo. 243 | 244 | However, if you can write code in C, please consider contributing new features to the [higher performance plugin](https://github.com/fluent/fluent-bit/tree/master/plugins/out_kinesis_firehose). 245 | -------------------------------------------------------------------------------- /THIRD-PARTY: -------------------------------------------------------------------------------- 1 | ** github.com/aws/amazon-kinesis-firehose-for-fluent-bit; version 0883cb76f511 2 | -- https://github.com/aws/amazon-kinesis-firehose-for-fluent-bit 3 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | ** github.com/aws/aws-sdk-go; version v1.25.1 -- 5 | https://github.com/aws/aws-sdk-go 6 | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 7 | Copyright 2014-2015 Stripe, Inc. 8 | ** github.com/fluent/fluent-bit-go; version ea13c021720c -- 9 | https://github.com/fluent/fluent-bit-go 10 | Copyright (C) 2015-2017 Treasure Data Inc. 11 | ** github.com/golang/mock; version v1.3.1 -- https://github.com/golang/mock 12 | Copyright 2010 Google Inc. 13 | ** github.com/google/gofuzz; version v1.0.0 -- https://github.com/google/gofuzz 14 | Copyright 2014 Google Inc. All rights reserved. 15 | ** github.com/jmespath/go-jmespath; version c2b33e8439af -- 16 | https://github.com/jmespath/go-jmespath 17 | Copyright 2015 James Saryerwinnie 18 | ** github.com/modern-go/concurrent; version e0a39a4cb421 -- 19 | https://github.com/modern-go/concurrent 20 | None 21 | ** github.com/modern-go/reflect2; version 4b7aa43c6742 -- 22 | https://github.com/modern-go/reflect2/blob/master/LICENSE 23 | None 24 | 25 | Apache License 26 | 27 | Version 2.0, January 2004 28 | 29 | http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND 30 | DISTRIBUTION 31 | 32 | 1. Definitions. 33 | 34 | "License" shall mean the terms and conditions for use, reproduction, and 35 | distribution as defined by Sections 1 through 9 of this document. 36 | 37 | "Licensor" shall mean the copyright owner or entity authorized by the 38 | copyright owner that is granting the License. 39 | 40 | "Legal Entity" shall mean the union of the acting entity and all other 41 | entities that control, are controlled by, or are under common control 42 | with that entity. For the purposes of this definition, "control" means 43 | (i) the power, direct or indirect, to cause the direction or management 44 | of such entity, whether by contract or otherwise, or (ii) ownership of 45 | fifty percent (50%) or more of the outstanding shares, or (iii) 46 | beneficial ownership of such entity. 47 | 48 | "You" (or "Your") shall mean an individual or Legal Entity exercising 49 | permissions granted by this License. 50 | 51 | "Source" form shall mean the preferred form for making modifications, 52 | including but not limited to software source code, documentation source, 53 | and configuration files. 54 | 55 | "Object" form shall mean any form resulting from mechanical 56 | transformation or translation of a Source form, including but not limited 57 | to compiled object code, generated documentation, and conversions to 58 | other media types. 59 | 60 | "Work" shall mean the work of authorship, whether in Source or Object 61 | form, made available under the License, as indicated by a copyright 62 | notice that is included in or attached to the work (an example is 63 | provided in the Appendix below). 64 | 65 | "Derivative Works" shall mean any work, whether in Source or Object form, 66 | that is based on (or derived from) the Work and for which the editorial 67 | revisions, annotations, elaborations, or other modifications represent, 68 | as a whole, an original work of authorship. For the purposes of this 69 | License, Derivative Works shall not include works that remain separable 70 | from, or merely link (or bind by name) to the interfaces of, the Work and 71 | Derivative Works thereof. 72 | 73 | "Contribution" shall mean any work of authorship, including the original 74 | version of the Work and any modifications or additions to that Work or 75 | Derivative Works thereof, that is intentionally submitted to Licensor for 76 | inclusion in the Work by the copyright owner or by an individual or Legal 77 | Entity authorized to submit on behalf of the copyright owner. For the 78 | purposes of this definition, "submitted" means any form of electronic, 79 | verbal, or written communication sent to the Licensor or its 80 | representatives, including but not limited to communication on electronic 81 | mailing lists, source code control systems, and issue tracking systems 82 | that are managed by, or on behalf of, the Licensor for the purpose of 83 | discussing and improving the Work, but excluding communication that is 84 | conspicuously marked or otherwise designated in writing by the copyright 85 | owner as "Not a Contribution." 86 | 87 | "Contributor" shall mean Licensor and any individual or Legal Entity on 88 | behalf of whom a Contribution has been received by Licensor and 89 | subsequently incorporated within the Work. 90 | 91 | 2. Grant of Copyright License. Subject to the terms and conditions of this 92 | License, each Contributor hereby grants to You a perpetual, worldwide, 93 | non-exclusive, no-charge, royalty-free, irrevocable copyright license to 94 | reproduce, prepare Derivative Works of, publicly display, publicly perform, 95 | sublicense, and distribute the Work and such Derivative Works in Source or 96 | Object form. 97 | 98 | 3. Grant of Patent License. Subject to the terms and conditions of this 99 | License, each Contributor hereby grants to You a perpetual, worldwide, 100 | non-exclusive, no-charge, royalty-free, irrevocable (except as stated in 101 | this section) patent license to make, have made, use, offer to sell, sell, 102 | import, and otherwise transfer the Work, where such license applies only to 103 | those patent claims licensable by such Contributor that are necessarily 104 | infringed by their Contribution(s) alone or by combination of their 105 | Contribution(s) with the Work to which such Contribution(s) was submitted. 106 | If You institute patent litigation against any entity (including a 107 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a 108 | Contribution incorporated within the Work constitutes direct or contributory 109 | patent infringement, then any patent licenses granted to You under this 110 | License for that Work shall terminate as of the date such litigation is 111 | filed. 112 | 113 | 4. Redistribution. You may reproduce and distribute copies of the Work or 114 | Derivative Works thereof in any medium, with or without modifications, and 115 | in Source or Object form, provided that You meet the following conditions: 116 | 117 | (a) You must give any other recipients of the Work or Derivative Works a 118 | copy of this License; and 119 | 120 | (b) You must cause any modified files to carry prominent notices stating 121 | that You changed the files; and 122 | 123 | (c) You must retain, in the Source form of any Derivative Works that You 124 | distribute, all copyright, patent, trademark, and attribution notices 125 | from the Source form of the Work, excluding those notices that do not 126 | pertain to any part of the Derivative Works; and 127 | 128 | (d) If the Work includes a "NOTICE" text file as part of its 129 | distribution, then any Derivative Works that You distribute must include 130 | a readable copy of the attribution notices contained within such NOTICE 131 | file, excluding those notices that do not pertain to any part of the 132 | Derivative Works, in at least one of the following places: within a 133 | NOTICE text file distributed as part of the Derivative Works; within the 134 | Source form or documentation, if provided along with the Derivative 135 | Works; or, within a display generated by the Derivative Works, if and 136 | wherever such third-party notices normally appear. The contents of the 137 | NOTICE file are for informational purposes only and do not modify the 138 | License. You may add Your own attribution notices within Derivative Works 139 | that You distribute, alongside or as an addendum to the NOTICE text from 140 | the Work, provided that such additional attribution notices cannot be 141 | construed as modifying the License. 142 | 143 | You may add Your own copyright statement to Your modifications and may 144 | provide additional or different license terms and conditions for use, 145 | reproduction, or distribution of Your modifications, or for any such 146 | Derivative Works as a whole, provided Your use, reproduction, and 147 | distribution of the Work otherwise complies with the conditions stated in 148 | this License. 149 | 150 | 5. Submission of Contributions. Unless You explicitly state otherwise, any 151 | Contribution intentionally submitted for inclusion in the Work by You to the 152 | Licensor shall be under the terms and conditions of this License, without 153 | any additional terms or conditions. Notwithstanding the above, nothing 154 | herein shall supersede or modify the terms of any separate license agreement 155 | you may have executed with Licensor regarding such Contributions. 156 | 157 | 6. Trademarks. This License does not grant permission to use the trade 158 | names, trademarks, service marks, or product names of the Licensor, except 159 | as required for reasonable and customary use in describing the origin of the 160 | Work and reproducing the content of the NOTICE file. 161 | 162 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in 163 | writing, Licensor provides the Work (and each Contributor provides its 164 | Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 165 | KIND, either express or implied, including, without limitation, any 166 | warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or 167 | FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining 168 | the appropriateness of using or redistributing the Work and assume any risks 169 | associated with Your exercise of permissions under this License. 170 | 171 | 8. Limitation of Liability. In no event and under no legal theory, whether 172 | in tort (including negligence), contract, or otherwise, unless required by 173 | applicable law (such as deliberate and grossly negligent acts) or agreed to 174 | in writing, shall any Contributor be liable to You for damages, including 175 | any direct, indirect, special, incidental, or consequential damages of any 176 | character arising as a result of this License or out of the use or inability 177 | to use the Work (including but not limited to damages for loss of goodwill, 178 | work stoppage, computer failure or malfunction, or any and all other 179 | commercial damages or losses), even if such Contributor has been advised of 180 | the possibility of such damages. 181 | 182 | 9. Accepting Warranty or Additional Liability. While redistributing the Work 183 | or Derivative Works thereof, You may choose to offer, and charge a fee for, 184 | acceptance of support, warranty, indemnity, or other liability obligations 185 | and/or rights consistent with this License. However, in accepting such 186 | obligations, You may act only on Your own behalf and on Your sole 187 | responsibility, not on behalf of any other Contributor, and only if You 188 | agree to indemnify, defend, and hold each Contributor harmless for any 189 | liability incurred by, or claims asserted against, such Contributor by 190 | reason of your accepting any such warranty or additional liability. END OF 191 | TERMS AND CONDITIONS 192 | 193 | APPENDIX: How to apply the Apache License to your work. 194 | 195 | To apply the Apache License to your work, attach the following boilerplate 196 | notice, with the fields enclosed by brackets "[]" replaced with your own 197 | identifying information. (Don't include the brackets!) The text should be 198 | enclosed in the appropriate comment syntax for the file format. We also 199 | recommend that a file or class name and description of purpose be included on 200 | the same "printed page" as the copyright notice for easier identification 201 | within third-party archives. 202 | 203 | Copyright [yyyy] [name of copyright owner] 204 | 205 | Licensed under the Apache License, Version 2.0 (the "License"); 206 | 207 | you may not use this file except in compliance with the License. 208 | 209 | You may obtain a copy of the License at 210 | 211 | http://www.apache.org/licenses/LICENSE-2.0 212 | 213 | Unless required by applicable law or agreed to in writing, software 214 | 215 | distributed under the License is distributed on an "AS IS" BASIS, 216 | 217 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 218 | 219 | See the License for the specific language governing permissions and 220 | 221 | limitations under the License. 222 | 223 | * For github.com/aws/amazon-kinesis-firehose-for-fluent-bit see also this 224 | required NOTICE: 225 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 226 | * For github.com/aws/aws-sdk-go see also this required NOTICE: 227 | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 228 | Copyright 2014-2015 Stripe, Inc. 229 | * For github.com/fluent/fluent-bit-go see also this required NOTICE: 230 | Copyright (C) 2015-2017 Treasure Data Inc. 231 | * For github.com/golang/mock see also this required NOTICE: 232 | Copyright 2010 Google Inc. 233 | * For github.com/google/gofuzz see also this required NOTICE: 234 | Copyright 2014 Google Inc. All rights reserved. 235 | * For github.com/jmespath/go-jmespath see also this required NOTICE: 236 | Copyright 2015 James Saryerwinnie 237 | * For github.com/modern-go/concurrent see also this required NOTICE: 238 | None 239 | * For github.com/modern-go/reflect2 see also this required NOTICE: 240 | None 241 | 242 | ------ 243 | 244 | ** golang.org/x/crypto; version c2843e01d9a2 -- 245 | https://godoc.org/golang.org/x/crypto 246 | Copyright (c) 2009 The Go Authors. All rights reserved. 247 | ** golang.org/x/net; version d8887717615a -- https://godoc.org/golang.org/x/net 248 | Copyright (c) 2009 The Go Authors. All rights reserved. 249 | ** golang.org/x/sync; version 112230192c58 -- 250 | https://godoc.org/golang.org/x/sync 251 | Copyright (c) 2009 The Go Authors. All rights reserved. 252 | ** golang.org/x/text; version v0.3.0 -- https://godoc.org/golang.org/x/text 253 | Copyright (c) 2009 The Go Authors. All rights reserved. 254 | ** golang.org/x/tools; version 36563e24a262 -- 255 | https://godoc.org/golang.org/x/tools 256 | Copyright (c) 2009 The Go Authors. All rights reserved. 257 | 258 | Copyright (c) 2009 The Go Authors. All rights reserved. 259 | 260 | Redistribution and use in source and binary forms, with or without 261 | modification, are permitted provided that the following conditions are 262 | met: 263 | 264 | * Redistributions of source code must retain the above copyright 265 | notice, this list of conditions and the following disclaimer. 266 | * Redistributions in binary form must reproduce the above 267 | copyright notice, this list of conditions and the following disclaimer 268 | in the documentation and/or other materials provided with the 269 | distribution. 270 | * Neither the name of Google Inc. nor the names of its 271 | contributors may be used to endorse or promote products derived from 272 | this software without specific prior written permission. 273 | 274 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 275 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 276 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 277 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 278 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 279 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 280 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 281 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 282 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 284 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 285 | 286 | ------ 287 | 288 | ** github.com/pmezard/go-difflib; version v1.0.0 -- 289 | https://github.com/pmezard/go-difflib 290 | Copyright (c) 2013, Patrick Mezard 291 | 292 | Copyright (c) 2013, Patrick Mezard 293 | All rights reserved. 294 | 295 | Redistribution and use in source and binary forms, with or without 296 | modification, are permitted provided that the following conditions are 297 | met: 298 | 299 | Redistributions of source code must retain the above copyright 300 | notice, this list of conditions and the following disclaimer. 301 | Redistributions in binary form must reproduce the above copyright 302 | notice, this list of conditions and the following disclaimer in the 303 | documentation and/or other materials provided with the distribution. 304 | The names of its contributors may not be used to endorse or promote 305 | products derived from this software without specific prior written 306 | permission. 307 | 308 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 309 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 310 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 311 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 312 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 313 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 314 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 315 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 316 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 317 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 318 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 319 | 320 | ------ 321 | 322 | ** github.com/davecgh/go-spew; version v1.1.0 -- 323 | https://github.com/davecgh/go-spew 324 | Copyright (c) 2015-2016 Dave Collins 325 | 326 | ISC License 327 | 328 | Copyright (c) 2012-2016 Dave Collins 329 | 330 | Permission to use, copy, modify, and/or distribute this software for any 331 | purpose with or without fee is hereby granted, provided that the above 332 | copyright notice and this permission notice appear in all copies. 333 | 334 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 335 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 336 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 337 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 338 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 339 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 340 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 341 | 342 | ------ 343 | 344 | ** github.com/stretchr/testify; version v1.2.2 -- 345 | https://github.com/stretchr/testify 346 | Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell 347 | 348 | MIT License 349 | 350 | Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell 351 | 352 | Permission is hereby granted, free of charge, to any person obtaining a copy 353 | of this software and associated documentation files (the "Software"), to deal 354 | in the Software without restriction, including without limitation the rights 355 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 356 | copies of the Software, and to permit persons to whom the Software is 357 | furnished to do so, subject to the following conditions: 358 | 359 | The above copyright notice and this permission notice shall be included in all 360 | copies or substantial portions of the Software. 361 | 362 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 363 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 364 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 365 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 366 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 367 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 368 | SOFTWARE. 369 | 370 | ------ 371 | 372 | ** github.com/sirupsen/logrus; version v1.4.2 -- 373 | https://github.com/sirupsen/logrus 374 | Copyright (c) 2014 Simon Eskildsen 375 | 376 | The MIT License (MIT) 377 | 378 | Copyright (c) 2014 Simon Eskildsen 379 | 380 | Permission is hereby granted, free of charge, to any person obtaining a copy 381 | of this software and associated documentation files (the "Software"), to deal 382 | in the Software without restriction, including without limitation the rights 383 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 384 | copies of the Software, and to permit persons to whom the Software is 385 | furnished to do so, subject to the following conditions: 386 | 387 | The above copyright notice and this permission notice shall be included in 388 | all copies or substantial portions of the Software. 389 | 390 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 391 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 392 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 393 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 394 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 395 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 396 | THE SOFTWARE. 397 | 398 | ------ 399 | 400 | ** github.com/konsorten/go-windows-terminal-sequences; version v1.0.1 -- 401 | https://github.com/konsorten/go-windows-terminal-sequences 402 | Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) 403 | 404 | (The MIT License) 405 | 406 | Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) 407 | 408 | Permission is hereby granted, free of charge, to any person obtaining a copy of 409 | this software and associated documentation files (the 'Software'), to deal in 410 | the Software without restriction, including without limitation the rights to 411 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 412 | of the Software, and to permit persons to whom the Software is furnished to do 413 | so, subject to the following conditions: 414 | 415 | The above copyright notice and this permission notice shall be included in all 416 | copies or substantial portions of the Software. 417 | 418 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 419 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 420 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 421 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 422 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 423 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 424 | SOFTWARE. 425 | 426 | ------ 427 | 428 | ** github.com/ugorji/go; version v1.1.4 -- https://github.com/ugorji/go 429 | Copyright (c) 2012-2015 Ugorji Nwoke. 430 | 431 | The MIT License (MIT) 432 | 433 | Copyright (c) 2012-2015 Ugorji Nwoke. 434 | All rights reserved. 435 | 436 | Permission is hereby granted, free of charge, to any person obtaining a copy 437 | of this software and associated documentation files (the "Software"), to deal 438 | in the Software without restriction, including without limitation the rights 439 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 440 | copies of the Software, and to permit persons to whom the Software is 441 | furnished to do so, subject to the following conditions: 442 | 443 | The above copyright notice and this permission notice shall be included in all 444 | copies or substantial portions of the Software. 445 | 446 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 447 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 448 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 449 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 450 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 451 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 452 | SOFTWARE. 453 | 454 | ------ 455 | 456 | ** github.com/json-iterator/go; version v1.1.6 -- 457 | https://github.com/json-iterator/go 458 | Copyright (c) 2016 json-iterator 459 | 460 | MIT License 461 | 462 | Copyright (c) 2016 json-iterator 463 | 464 | Permission is hereby granted, free of charge, to any person obtaining a copy 465 | of this software and associated documentation files (the "Software"), to deal 466 | in the Software without restriction, including without limitation the rights 467 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 468 | copies of the Software, and to permit persons to whom the Software is 469 | furnished to do so, subject to the following conditions: 470 | 471 | The above copyright notice and this permission notice shall be included in all 472 | copies or substantial portions of the Software. 473 | 474 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 475 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 476 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 477 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 478 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 479 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 480 | SOFTWARE. 481 | 482 | ------ 483 | 484 | ** github.com/cenkalti/backoff; version v2.1.1 -- 485 | https://github.com/cenkalti/backoff 486 | Copyright (c) 2014 Cenk Alt 487 | 488 | The MIT License (MIT) 489 | 490 | Copyright (c) 2014 Cenk Altı 491 | 492 | Permission is hereby granted, free of charge, to any person obtaining a copy of 493 | this software and associated documentation files (the "Software"), to deal in 494 | the Software without restriction, including without limitation the rights to 495 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 496 | of 497 | the Software, and to permit persons to whom the Software is furnished to do so, 498 | subject to the following conditions: 499 | 500 | The above copyright notice and this permission notice shall be included in all 501 | copies or substantial portions of the Software. 502 | 503 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 504 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 505 | FITNESS 506 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 507 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 508 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 509 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 510 | 511 | ------ 512 | 513 | ** github.com/stretchr/objx; version v0.1.0 -- https://github.com/stretchr/objx 514 | Copyright (c) 2014 Stretchr, Inc. 515 | Copyright (c) 2017-2018 objx contributors 516 | 517 | The MIT License 518 | 519 | Copyright (c) 2014 Stretchr, Inc. 520 | Copyright (c) 2017-2018 objx contributors 521 | 522 | Permission is hereby granted, free of charge, to any person obtaining a copy 523 | of this software and associated documentation files (the "Software"), to deal 524 | in the Software without restriction, including without limitation the rights 525 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 526 | copies of the Software, and to permit persons to whom the Software is 527 | furnished to do so, subject to the following conditions: 528 | 529 | The above copyright notice and this permission notice shall be included in all 530 | copies or substantial portions of the Software. 531 | 532 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 533 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 534 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 535 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 536 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 537 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 538 | SOFTWARE. -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.10.2 2 | -------------------------------------------------------------------------------- /aggregate/Readme.md: -------------------------------------------------------------------------------- 1 | # Aggregation 2 | 3 | This module implements KPL record aggregation. 4 | 5 | https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md 6 | 7 | 8 | ## Generating the aggregate.pb.go file from aggregate.proto 9 | 10 | ### Install protoc 11 | 12 | https://developers.google.com/protocol-buffers/docs/downloads 13 | 14 | 15 | ### Install protoc-gen-go 16 | 17 | go get google.golang.org/protobuf/cmd/protoc-gen-g 18 | go install google.golang.org/protobuf/cmd/protoc-gen-go 19 | 20 | 21 | ### Install protobuf go library 22 | 23 | go get github.com/golang/protobuf 24 | 25 | 26 | ## Generating the protobuf go code 27 | 28 | protoc -I=. --go_out=. aggregate.proto 29 | 30 | -------------------------------------------------------------------------------- /aggregate/aggregate.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.25.0-devel 4 | // protoc v3.12.3 5 | // source: aggregate/aggregate.proto 6 | 7 | package aggregate 8 | 9 | import ( 10 | proto "github.com/golang/protobuf/proto" 11 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 12 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 13 | reflect "reflect" 14 | sync "sync" 15 | ) 16 | 17 | const ( 18 | // Verify that this generated code is sufficiently up-to-date. 19 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 20 | // Verify that runtime/protoimpl is sufficiently up-to-date. 21 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 22 | ) 23 | 24 | // This is a compile-time assertion that a sufficiently up-to-date version 25 | // of the legacy proto package is being used. 26 | const _ = proto.ProtoPackageIsVersion4 27 | 28 | type AggregatedRecord struct { 29 | state protoimpl.MessageState 30 | sizeCache protoimpl.SizeCache 31 | unknownFields protoimpl.UnknownFields 32 | 33 | PartitionKeyTable []string `protobuf:"bytes,1,rep,name=partition_key_table,json=partitionKeyTable" json:"partition_key_table,omitempty"` 34 | ExplicitHashKeyTable []string `protobuf:"bytes,2,rep,name=explicit_hash_key_table,json=explicitHashKeyTable" json:"explicit_hash_key_table,omitempty"` 35 | Records []*Record `protobuf:"bytes,3,rep,name=records" json:"records,omitempty"` 36 | } 37 | 38 | func (x *AggregatedRecord) Reset() { 39 | *x = AggregatedRecord{} 40 | if protoimpl.UnsafeEnabled { 41 | mi := &file_aggregate_aggregate_proto_msgTypes[0] 42 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 43 | ms.StoreMessageInfo(mi) 44 | } 45 | } 46 | 47 | func (x *AggregatedRecord) String() string { 48 | return protoimpl.X.MessageStringOf(x) 49 | } 50 | 51 | func (*AggregatedRecord) ProtoMessage() {} 52 | 53 | func (x *AggregatedRecord) ProtoReflect() protoreflect.Message { 54 | mi := &file_aggregate_aggregate_proto_msgTypes[0] 55 | if protoimpl.UnsafeEnabled && x != nil { 56 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 57 | if ms.LoadMessageInfo() == nil { 58 | ms.StoreMessageInfo(mi) 59 | } 60 | return ms 61 | } 62 | return mi.MessageOf(x) 63 | } 64 | 65 | // Deprecated: Use AggregatedRecord.ProtoReflect.Descriptor instead. 66 | func (*AggregatedRecord) Descriptor() ([]byte, []int) { 67 | return file_aggregate_aggregate_proto_rawDescGZIP(), []int{0} 68 | } 69 | 70 | func (x *AggregatedRecord) GetPartitionKeyTable() []string { 71 | if x != nil { 72 | return x.PartitionKeyTable 73 | } 74 | return nil 75 | } 76 | 77 | func (x *AggregatedRecord) GetExplicitHashKeyTable() []string { 78 | if x != nil { 79 | return x.ExplicitHashKeyTable 80 | } 81 | return nil 82 | } 83 | 84 | func (x *AggregatedRecord) GetRecords() []*Record { 85 | if x != nil { 86 | return x.Records 87 | } 88 | return nil 89 | } 90 | 91 | type Tag struct { 92 | state protoimpl.MessageState 93 | sizeCache protoimpl.SizeCache 94 | unknownFields protoimpl.UnknownFields 95 | 96 | Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` 97 | Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` 98 | } 99 | 100 | func (x *Tag) Reset() { 101 | *x = Tag{} 102 | if protoimpl.UnsafeEnabled { 103 | mi := &file_aggregate_aggregate_proto_msgTypes[1] 104 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 105 | ms.StoreMessageInfo(mi) 106 | } 107 | } 108 | 109 | func (x *Tag) String() string { 110 | return protoimpl.X.MessageStringOf(x) 111 | } 112 | 113 | func (*Tag) ProtoMessage() {} 114 | 115 | func (x *Tag) ProtoReflect() protoreflect.Message { 116 | mi := &file_aggregate_aggregate_proto_msgTypes[1] 117 | if protoimpl.UnsafeEnabled && x != nil { 118 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 119 | if ms.LoadMessageInfo() == nil { 120 | ms.StoreMessageInfo(mi) 121 | } 122 | return ms 123 | } 124 | return mi.MessageOf(x) 125 | } 126 | 127 | // Deprecated: Use Tag.ProtoReflect.Descriptor instead. 128 | func (*Tag) Descriptor() ([]byte, []int) { 129 | return file_aggregate_aggregate_proto_rawDescGZIP(), []int{1} 130 | } 131 | 132 | func (x *Tag) GetKey() string { 133 | if x != nil && x.Key != nil { 134 | return *x.Key 135 | } 136 | return "" 137 | } 138 | 139 | func (x *Tag) GetValue() string { 140 | if x != nil && x.Value != nil { 141 | return *x.Value 142 | } 143 | return "" 144 | } 145 | 146 | type Record struct { 147 | state protoimpl.MessageState 148 | sizeCache protoimpl.SizeCache 149 | unknownFields protoimpl.UnknownFields 150 | 151 | PartitionKeyIndex *uint64 `protobuf:"varint,1,req,name=partition_key_index,json=partitionKeyIndex" json:"partition_key_index,omitempty"` 152 | ExplicitHashKeyIndex *uint64 `protobuf:"varint,2,opt,name=explicit_hash_key_index,json=explicitHashKeyIndex" json:"explicit_hash_key_index,omitempty"` 153 | Data []byte `protobuf:"bytes,3,req,name=data" json:"data,omitempty"` 154 | Tags []*Tag `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` 155 | } 156 | 157 | func (x *Record) Reset() { 158 | *x = Record{} 159 | if protoimpl.UnsafeEnabled { 160 | mi := &file_aggregate_aggregate_proto_msgTypes[2] 161 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 162 | ms.StoreMessageInfo(mi) 163 | } 164 | } 165 | 166 | func (x *Record) String() string { 167 | return protoimpl.X.MessageStringOf(x) 168 | } 169 | 170 | func (*Record) ProtoMessage() {} 171 | 172 | func (x *Record) ProtoReflect() protoreflect.Message { 173 | mi := &file_aggregate_aggregate_proto_msgTypes[2] 174 | if protoimpl.UnsafeEnabled && x != nil { 175 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 176 | if ms.LoadMessageInfo() == nil { 177 | ms.StoreMessageInfo(mi) 178 | } 179 | return ms 180 | } 181 | return mi.MessageOf(x) 182 | } 183 | 184 | // Deprecated: Use Record.ProtoReflect.Descriptor instead. 185 | func (*Record) Descriptor() ([]byte, []int) { 186 | return file_aggregate_aggregate_proto_rawDescGZIP(), []int{2} 187 | } 188 | 189 | func (x *Record) GetPartitionKeyIndex() uint64 { 190 | if x != nil && x.PartitionKeyIndex != nil { 191 | return *x.PartitionKeyIndex 192 | } 193 | return 0 194 | } 195 | 196 | func (x *Record) GetExplicitHashKeyIndex() uint64 { 197 | if x != nil && x.ExplicitHashKeyIndex != nil { 198 | return *x.ExplicitHashKeyIndex 199 | } 200 | return 0 201 | } 202 | 203 | func (x *Record) GetData() []byte { 204 | if x != nil { 205 | return x.Data 206 | } 207 | return nil 208 | } 209 | 210 | func (x *Record) GetTags() []*Tag { 211 | if x != nil { 212 | return x.Tags 213 | } 214 | return nil 215 | } 216 | 217 | var File_aggregate_aggregate_proto protoreflect.FileDescriptor 218 | 219 | var file_aggregate_aggregate_proto_rawDesc = []byte{ 220 | 0x0a, 0x19, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2f, 0x61, 0x67, 0x67, 0x72, 221 | 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x61, 0x67, 0x67, 222 | 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x22, 0xa6, 0x01, 0x0a, 0x10, 0x41, 0x67, 0x67, 0x72, 0x65, 223 | 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x70, 224 | 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x74, 0x61, 0x62, 225 | 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 226 | 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x65, 227 | 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x6b, 0x65, 0x79, 228 | 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x65, 0x78, 229 | 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4b, 0x65, 0x79, 0x54, 0x61, 0x62, 230 | 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 231 | 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 232 | 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 233 | 0x2d, 0x0a, 0x03, 0x54, 0x61, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 234 | 0x02, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 235 | 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xa7, 236 | 0x01, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x61, 0x72, 237 | 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 238 | 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 239 | 0x6e, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x35, 0x0a, 0x17, 0x65, 0x78, 0x70, 240 | 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 241 | 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x65, 0x78, 0x70, 0x6c, 242 | 0x69, 0x63, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 243 | 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x02, 0x28, 0x0c, 0x52, 0x04, 244 | 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 245 | 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x54, 246 | 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 247 | } 248 | 249 | var ( 250 | file_aggregate_aggregate_proto_rawDescOnce sync.Once 251 | file_aggregate_aggregate_proto_rawDescData = file_aggregate_aggregate_proto_rawDesc 252 | ) 253 | 254 | func file_aggregate_aggregate_proto_rawDescGZIP() []byte { 255 | file_aggregate_aggregate_proto_rawDescOnce.Do(func() { 256 | file_aggregate_aggregate_proto_rawDescData = protoimpl.X.CompressGZIP(file_aggregate_aggregate_proto_rawDescData) 257 | }) 258 | return file_aggregate_aggregate_proto_rawDescData 259 | } 260 | 261 | var file_aggregate_aggregate_proto_msgTypes = make([]protoimpl.MessageInfo, 3) 262 | var file_aggregate_aggregate_proto_goTypes = []interface{}{ 263 | (*AggregatedRecord)(nil), // 0: aggregate.AggregatedRecord 264 | (*Tag)(nil), // 1: aggregate.Tag 265 | (*Record)(nil), // 2: aggregate.Record 266 | } 267 | var file_aggregate_aggregate_proto_depIdxs = []int32{ 268 | 2, // 0: aggregate.AggregatedRecord.records:type_name -> aggregate.Record 269 | 1, // 1: aggregate.Record.tags:type_name -> aggregate.Tag 270 | 2, // [2:2] is the sub-list for method output_type 271 | 2, // [2:2] is the sub-list for method input_type 272 | 2, // [2:2] is the sub-list for extension type_name 273 | 2, // [2:2] is the sub-list for extension extendee 274 | 0, // [0:2] is the sub-list for field type_name 275 | } 276 | 277 | func init() { file_aggregate_aggregate_proto_init() } 278 | func file_aggregate_aggregate_proto_init() { 279 | if File_aggregate_aggregate_proto != nil { 280 | return 281 | } 282 | if !protoimpl.UnsafeEnabled { 283 | file_aggregate_aggregate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 284 | switch v := v.(*AggregatedRecord); i { 285 | case 0: 286 | return &v.state 287 | case 1: 288 | return &v.sizeCache 289 | case 2: 290 | return &v.unknownFields 291 | default: 292 | return nil 293 | } 294 | } 295 | file_aggregate_aggregate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { 296 | switch v := v.(*Tag); i { 297 | case 0: 298 | return &v.state 299 | case 1: 300 | return &v.sizeCache 301 | case 2: 302 | return &v.unknownFields 303 | default: 304 | return nil 305 | } 306 | } 307 | file_aggregate_aggregate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { 308 | switch v := v.(*Record); i { 309 | case 0: 310 | return &v.state 311 | case 1: 312 | return &v.sizeCache 313 | case 2: 314 | return &v.unknownFields 315 | default: 316 | return nil 317 | } 318 | } 319 | } 320 | type x struct{} 321 | out := protoimpl.TypeBuilder{ 322 | File: protoimpl.DescBuilder{ 323 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 324 | RawDescriptor: file_aggregate_aggregate_proto_rawDesc, 325 | NumEnums: 0, 326 | NumMessages: 3, 327 | NumExtensions: 0, 328 | NumServices: 0, 329 | }, 330 | GoTypes: file_aggregate_aggregate_proto_goTypes, 331 | DependencyIndexes: file_aggregate_aggregate_proto_depIdxs, 332 | MessageInfos: file_aggregate_aggregate_proto_msgTypes, 333 | }.Build() 334 | File_aggregate_aggregate_proto = out.File 335 | file_aggregate_aggregate_proto_rawDesc = nil 336 | file_aggregate_aggregate_proto_goTypes = nil 337 | file_aggregate_aggregate_proto_depIdxs = nil 338 | } 339 | -------------------------------------------------------------------------------- /aggregate/aggregate.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | package aggregate; 3 | 4 | message AggregatedRecord { 5 | repeated string partition_key_table = 1; 6 | repeated string explicit_hash_key_table = 2; 7 | repeated Record records = 3; 8 | } 9 | 10 | message Tag { 11 | required string key = 1; 12 | optional string value = 2; 13 | } 14 | 15 | message Record { 16 | required uint64 partition_key_index = 1; 17 | optional uint64 explicit_hash_key_index = 2; 18 | required bytes data = 3; 19 | repeated Tag tags = 4; 20 | } 21 | -------------------------------------------------------------------------------- /aggregate/aggregator.go: -------------------------------------------------------------------------------- 1 | package aggregate 2 | 3 | import ( 4 | "crypto/md5" 5 | "fmt" 6 | 7 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/util" 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/service/kinesis" 10 | "github.com/sirupsen/logrus" 11 | "google.golang.org/protobuf/encoding/protowire" 12 | "google.golang.org/protobuf/proto" 13 | ) 14 | 15 | var ( 16 | // Magic number for KCL aggregated records. See this for details: https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md 17 | kclMagicNumber = []byte{0xF3, 0x89, 0x9A, 0xC2} 18 | kclMagicNumberLen = len(kclMagicNumber) 19 | ) 20 | 21 | const ( 22 | maximumRecordSize = 1024 * 1024 // 1 MB 23 | defaultMaxAggRecordSize = 20 * 1024 // 20K 24 | initialAggRecordSize = 0 25 | fieldNumberSize = 1 // All field numbers are below 16, meaning they will only take up 1 byte 26 | ) 27 | 28 | // Aggregator kinesis aggregator 29 | type Aggregator struct { 30 | partitionKeys map[string]uint64 31 | records []*Record 32 | aggSize int // Size of both records, and partitionKeys in bytes 33 | maxAggRecordSize int 34 | stringGen *util.RandomStringGenerator 35 | } 36 | 37 | // NewAggregator create a new aggregator 38 | func NewAggregator(stringGen *util.RandomStringGenerator) *Aggregator { 39 | 40 | return &Aggregator{ 41 | partitionKeys: make(map[string]uint64, 0), 42 | records: make([]*Record, 0), 43 | maxAggRecordSize: defaultMaxAggRecordSize, 44 | aggSize: initialAggRecordSize, 45 | stringGen: stringGen, 46 | } 47 | } 48 | 49 | // AddRecord to the aggregate buffer. 50 | // Will return a kinesis PutRecordsRequest once buffer is full, or if the data exceeds the aggregate limit. 51 | func (a *Aggregator) AddRecord(partitionKey string, hasPartitionKey bool, data []byte) (entry *kinesis.PutRecordsRequestEntry, err error) { 52 | 53 | if hasPartitionKey { 54 | partitionKeySize := len([]byte(partitionKey)) 55 | if partitionKeySize < 1 { 56 | return nil, fmt.Errorf("Invalid partition key provided") 57 | } 58 | } 59 | 60 | dataSize := len(data) 61 | 62 | // If this is a very large record, then don't aggregate it. 63 | if dataSize >= a.maxAggRecordSize { 64 | if !hasPartitionKey { 65 | partitionKey = a.stringGen.RandomString() 66 | } 67 | return &kinesis.PutRecordsRequestEntry{ 68 | Data: data, 69 | PartitionKey: aws.String(partitionKey), 70 | }, nil 71 | } 72 | 73 | if !hasPartitionKey { 74 | if len(a.partitionKeys) > 0 { 75 | // Take any partition key from the map, as long as one exists 76 | for k, _ := range a.partitionKeys { 77 | partitionKey = k 78 | break 79 | } 80 | } else { 81 | partitionKey = a.stringGen.RandomString() 82 | } 83 | } 84 | 85 | // Check if we need to add a new partition key, and if we do how much space it will take 86 | pKeyIdx, pKeyAddedSize := a.checkPartitionKey(partitionKey) 87 | 88 | // data field size is proto size of data + data field number size 89 | // partition key field size is varint of index size + field number size 90 | dataFieldSize := protowire.SizeBytes(dataSize) + fieldNumberSize 91 | pkeyFieldSize := protowire.SizeVarint(pKeyIdx) + fieldNumberSize 92 | // Total size is byte size of data + pkey field + field number of parent proto 93 | 94 | if a.getSize()+protowire.SizeBytes(dataFieldSize+pkeyFieldSize)+fieldNumberSize+pKeyAddedSize >= maximumRecordSize { 95 | // Aggregate records, and return if error 96 | entry, err = a.AggregateRecords() 97 | if err != nil { 98 | return entry, err 99 | } 100 | 101 | if !hasPartitionKey { 102 | // choose a new partition key if needed now that we've aggregated the previous records 103 | partitionKey = a.stringGen.RandomString() 104 | } 105 | // Recompute field size, since it changed 106 | pKeyIdx, _ = a.checkPartitionKey(partitionKey) 107 | pkeyFieldSize = protowire.SizeVarint(pKeyIdx) + fieldNumberSize 108 | } 109 | 110 | // Add new record, and update aggSize 111 | partitionKeyIndex := a.addPartitionKey(partitionKey) 112 | 113 | a.records = append(a.records, &Record{ 114 | Data: data, 115 | PartitionKeyIndex: &partitionKeyIndex, 116 | }) 117 | 118 | a.aggSize += protowire.SizeBytes(dataFieldSize+pkeyFieldSize) + fieldNumberSize 119 | 120 | return entry, err 121 | } 122 | 123 | // AggregateRecords will flush proto-buffered records into a put request 124 | func (a *Aggregator) AggregateRecords() (entry *kinesis.PutRecordsRequestEntry, err error) { 125 | 126 | if len(a.records) == 0 { 127 | return nil, nil 128 | } 129 | 130 | pkeys := a.getPartitionKeys() 131 | 132 | agg := &AggregatedRecord{ 133 | PartitionKeyTable: pkeys, 134 | Records: a.records, 135 | } 136 | 137 | protoBufData, err := proto.Marshal(agg) 138 | if err != nil { 139 | logrus.Errorf("Failed to encode record: %v", err) 140 | return nil, err 141 | } 142 | 143 | md5Sum := md5.New() 144 | md5Sum.Write(protoBufData) 145 | md5CheckSum := md5Sum.Sum(nil) 146 | 147 | kclData := append(kclMagicNumber, protoBufData...) 148 | kclData = append(kclData, md5CheckSum...) 149 | 150 | logrus.Debugf("[kinesis ] Aggregated (%d) records of size (%d) with total size (%d), partition key (%s)\n", len(a.records), a.getSize(), len(kclData), pkeys[0]) 151 | 152 | // Clear buffer if aggregation didn't fail 153 | a.clearBuffer() 154 | 155 | return &kinesis.PutRecordsRequestEntry{ 156 | Data: kclData, 157 | PartitionKey: aws.String(pkeys[0]), 158 | }, nil 159 | } 160 | 161 | // GetRecordCount gets number of buffered records 162 | func (a *Aggregator) GetRecordCount() int { 163 | return len(a.records) 164 | } 165 | 166 | func (a *Aggregator) addPartitionKey(partitionKey string) uint64 { 167 | 168 | if idx, ok := a.partitionKeys[partitionKey]; ok { 169 | return idx 170 | } 171 | 172 | idx := uint64(len(a.partitionKeys)) 173 | a.partitionKeys[partitionKey] = idx 174 | 175 | partitionKeyLen := len([]byte(partitionKey)) 176 | a.aggSize += protowire.SizeBytes(partitionKeyLen) + fieldNumberSize 177 | return idx 178 | } 179 | 180 | func (a *Aggregator) checkPartitionKey(partitionKey string) (uint64, int) { 181 | if idx, ok := a.partitionKeys[partitionKey]; ok { 182 | return idx, 0 183 | } 184 | 185 | idx := uint64(len(a.partitionKeys)) 186 | partitionKeyLen := len([]byte(partitionKey)) 187 | return idx, protowire.SizeBytes(partitionKeyLen) + fieldNumberSize 188 | } 189 | 190 | func (a *Aggregator) getPartitionKeys() []string { 191 | keys := make([]string, 0) 192 | for pk := range a.partitionKeys { 193 | keys = append(keys, pk) 194 | } 195 | return keys 196 | } 197 | 198 | // getSize of protobuf records, partitionKeys, magicNumber, and md5sum in bytes 199 | func (a *Aggregator) getSize() int { 200 | return kclMagicNumberLen + md5.Size + a.aggSize 201 | } 202 | 203 | func (a *Aggregator) clearBuffer() { 204 | a.partitionKeys = make(map[string]uint64, 0) 205 | a.records = make([]*Record, 0) 206 | a.aggSize = initialAggRecordSize 207 | } 208 | -------------------------------------------------------------------------------- /aggregate/aggregator_test.go: -------------------------------------------------------------------------------- 1 | package aggregate 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/util" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | const concurrencyRetryLimit = 4 11 | 12 | func TestAddRecordCalculatesCorrectSize(t *testing.T) { 13 | generator := util.NewRandomStringGenerator(18) 14 | aggregator := NewAggregator(generator) 15 | 16 | _, err := aggregator.AddRecord("", false, []byte("test value")) 17 | assert.Equal(t, nil, err, "Expected aggregator not to return error") 18 | assert.Equal(t, 36, aggregator.aggSize, "Expected aggregator to compute correct size") 19 | 20 | _, err = aggregator.AddRecord("test partition key 2", true, []byte("test value 2")) 21 | assert.Equal(t, nil, err, "Expected aggregator not to return error") 22 | assert.Equal(t, 76, aggregator.aggSize, "Expected aggregator to compute correct size") 23 | } 24 | 25 | func TestAddRecordDoesNotAddNewRandomPartitionKey(t *testing.T) { 26 | generator := util.NewRandomStringGenerator(18) 27 | aggregator := NewAggregator(generator) 28 | 29 | _, err := aggregator.AddRecord("", false, []byte("test value")) 30 | assert.Equal(t, nil, err, "Expected aggregator not to return error") 31 | assert.Equal(t, 36, aggregator.aggSize, "Expected aggregator to compute correct size") 32 | 33 | _, err = aggregator.AddRecord("", false, []byte("test value 2")) 34 | assert.Equal(t, nil, err, "Expected aggregator not to return error") 35 | assert.Equal(t, 1, len(aggregator.partitionKeys), "Expected aggregator to reuse partitionKey value") 36 | } 37 | -------------------------------------------------------------------------------- /fluent-bit-kinesis.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | // not use this file except in compliance with the License. A copy of the 5 | // License is located at 6 | // 7 | // http://aws.amazon.com/apache2.0/ 8 | // 9 | // or in the "license" file accompanying this file. This file is distributed 10 | // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 | // express or implied. See the License for the specific language governing 12 | // permissions and limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "C" 18 | "fmt" 19 | "strconv" 20 | "strings" 21 | "time" 22 | "unsafe" 23 | 24 | "github.com/aws/amazon-kinesis-firehose-for-fluent-bit/plugins" 25 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/kinesis" 26 | kinesisAPI "github.com/aws/aws-sdk-go/service/kinesis" 27 | "github.com/fluent/fluent-bit-go/output" 28 | "github.com/sirupsen/logrus" 29 | ) 30 | 31 | const ( 32 | // Kinesis API Limit https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.PutRecords 33 | maximumRecordsPerPut = 500 34 | maximumConcurrency = 10 35 | defaultConcurrentRetries = 4 36 | ) 37 | 38 | var ( 39 | pluginInstances []*kinesis.OutputPlugin 40 | ) 41 | 42 | func addPluginInstance(ctx unsafe.Pointer) error { 43 | pluginID := len(pluginInstances) 44 | output.FLBPluginSetContext(ctx, pluginID) 45 | instance, err := newKinesisOutput(ctx, pluginID) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | pluginInstances = append(pluginInstances, instance) 51 | return nil 52 | } 53 | 54 | func getPluginInstance(ctx unsafe.Pointer) *kinesis.OutputPlugin { 55 | pluginID := output.FLBPluginGetContext(ctx).(int) 56 | return pluginInstances[pluginID] 57 | } 58 | 59 | func newKinesisOutput(ctx unsafe.Pointer, pluginID int) (*kinesis.OutputPlugin, error) { 60 | stream := output.FLBPluginConfigKey(ctx, "stream") 61 | logrus.Infof("[kinesis %d] plugin parameter stream = '%s'", pluginID, stream) 62 | region := output.FLBPluginConfigKey(ctx, "region") 63 | logrus.Infof("[kinesis %d] plugin parameter region = '%s'", pluginID, region) 64 | dataKeys := output.FLBPluginConfigKey(ctx, "data_keys") 65 | logrus.Infof("[kinesis %d] plugin parameter data_keys = '%s'", pluginID, dataKeys) 66 | partitionKey := output.FLBPluginConfigKey(ctx, "partition_key") 67 | logrus.Infof("[kinesis %d] plugin parameter partition_key = '%s'", pluginID, partitionKey) 68 | roleARN := output.FLBPluginConfigKey(ctx, "role_arn") 69 | logrus.Infof("[kinesis %d] plugin parameter role_arn = '%s'", pluginID, roleARN) 70 | kinesisEndpoint := output.FLBPluginConfigKey(ctx, "endpoint") 71 | logrus.Infof("[kinesis %d] plugin parameter endpoint = '%s'", pluginID, kinesisEndpoint) 72 | stsEndpoint := output.FLBPluginConfigKey(ctx, "sts_endpoint") 73 | logrus.Infof("[kinesis %d] plugin parameter sts_endpoint = '%s'", pluginID, stsEndpoint) 74 | appendNewline := output.FLBPluginConfigKey(ctx, "append_newline") 75 | logrus.Infof("[kinesis %d] plugin parameter append_newline = %s", pluginID, appendNewline) 76 | timeKey := output.FLBPluginConfigKey(ctx, "time_key") 77 | logrus.Infof("[kinesis %d] plugin parameter time_key = '%s'", pluginID, timeKey) 78 | timeKeyFmt := output.FLBPluginConfigKey(ctx, "time_key_format") 79 | logrus.Infof("[kinesis %d] plugin parameter time_key_format = '%s'", pluginID, timeKeyFmt) 80 | concurrency := output.FLBPluginConfigKey(ctx, "experimental_concurrency") 81 | logrus.Infof("[kinesis %d] plugin parameter experimental_concurrency = '%s'", pluginID, concurrency) 82 | concurrencyRetries := output.FLBPluginConfigKey(ctx, "experimental_concurrency_retries") 83 | logrus.Infof("[kinesis %d] plugin parameter experimental_concurrency_retries = '%s'", pluginID, concurrencyRetries) 84 | logKey := output.FLBPluginConfigKey(ctx, "log_key") 85 | logrus.Infof("[kinesis %d] plugin parameter log_key = '%s'", pluginID, logKey) 86 | aggregation := output.FLBPluginConfigKey(ctx, "aggregation") 87 | logrus.Infof("[kinesis %d] plugin parameter aggregation = '%s'", pluginID, aggregation) 88 | compression := output.FLBPluginConfigKey(ctx, "compression") 89 | logrus.Infof("[kinesis %d] plugin parameter compression = '%s'", pluginID, compression) 90 | replaceDots := output.FLBPluginConfigKey(ctx, "replace_dots") 91 | logrus.Infof("[kinesis %d] plugin parameter replace_dots = '%s'", pluginID, replaceDots) 92 | httpRequestTimeout := output.FLBPluginConfigKey(ctx, "http_request_timeout") 93 | logrus.Infof("[kinesis %d] plugin parameter http_request_timeout = '%s'", pluginID, httpRequestTimeout) 94 | 95 | if stream == "" || region == "" { 96 | return nil, fmt.Errorf("[kinesis %d] stream and region are required configuration parameters", pluginID) 97 | } 98 | 99 | if partitionKey == "log" { 100 | return nil, fmt.Errorf("[kinesis %d] 'log' cannot be set as the partition key", pluginID) 101 | } 102 | 103 | if partitionKey == "" { 104 | logrus.Infof("[kinesis %d] no partition key provided. A random one will be generated.", pluginID) 105 | } 106 | 107 | appendNL := false 108 | if strings.ToLower(appendNewline) == "true" { 109 | appendNL = true 110 | } 111 | 112 | isAggregate := false 113 | if strings.ToLower(aggregation) == "true" { 114 | isAggregate = true 115 | } 116 | 117 | if isAggregate && partitionKey != "" { 118 | logrus.Warnf("[kinesis %d] 'partition_key' has different behavior when 'aggregation' enabled. All aggregated records will use a partition key sourced from the first record in the batch", pluginID) 119 | } 120 | 121 | var concurrencyInt, concurrencyRetriesInt int 122 | var err error 123 | if concurrency != "" { 124 | concurrencyInt, err = parseNonNegativeConfig("experimental_concurrency", concurrency, pluginID) 125 | if err != nil { 126 | return nil, err 127 | } 128 | 129 | if concurrencyInt > maximumConcurrency { 130 | return nil, fmt.Errorf("[kinesis %d] Invalid 'experimental_concurrency' value (%s) specified, must be less than or equal to %d", pluginID, concurrency, maximumConcurrency) 131 | } 132 | 133 | if concurrencyInt > 0 { 134 | logrus.Warnf("[kinesis %d] WARNING: Enabling concurrency can lead to data loss. If 'experimental_concurrency_retries' is reached data will be lost.", pluginID) 135 | } 136 | } 137 | 138 | if concurrencyRetries != "" { 139 | concurrencyRetriesInt, err = parseNonNegativeConfig("experimental_concurrency_retries", concurrencyRetries, pluginID) 140 | if err != nil { 141 | return nil, err 142 | } 143 | } else { 144 | concurrencyRetriesInt = defaultConcurrentRetries 145 | } 146 | 147 | var comp kinesis.CompressionType 148 | if strings.ToLower(compression) == string(kinesis.CompressionZlib) { 149 | comp = kinesis.CompressionZlib 150 | } else if strings.ToLower(compression) == string(kinesis.CompressionGzip) { 151 | comp = kinesis.CompressionGzip 152 | } else if strings.ToLower(compression) == string(kinesis.CompressionNone) || compression == "" { 153 | comp = kinesis.CompressionNone 154 | } else { 155 | return nil, fmt.Errorf("[kinesis %d] Invalid 'compression' value (%s) specified, must be 'zlib', 'gzip', 'none', or undefined", pluginID, compression) 156 | } 157 | 158 | var httpRequestTimeoutDuration time.Duration 159 | if httpRequestTimeout != "" { 160 | httpRequestTimeoutInt, err := parseNonNegativeConfig("http_request_timeout", httpRequestTimeout, pluginID) 161 | if err != nil { 162 | return nil, err 163 | } 164 | httpRequestTimeoutDuration = time.Duration(httpRequestTimeoutInt) * time.Second 165 | } 166 | 167 | return kinesis.NewOutputPlugin(region, stream, dataKeys, partitionKey, roleARN, kinesisEndpoint, stsEndpoint, timeKey, timeKeyFmt, logKey, replaceDots, concurrencyInt, concurrencyRetriesInt, isAggregate, appendNL, comp, pluginID, httpRequestTimeoutDuration) 168 | } 169 | 170 | func parseNonNegativeConfig(configName string, configValue string, pluginID int) (int, error) { 171 | configValueInt, err := strconv.Atoi(configValue) 172 | if err != nil { 173 | return 0, fmt.Errorf("[kinesis %d] Invalid '%s' value (%s) specified: %v", pluginID, configName, configValue, err) 174 | } 175 | if configValueInt < 0 { 176 | return 0, fmt.Errorf("[kinesis %d] Invalid '%s' value (%s) specified, must be a non-negative number", pluginID, configName, configValue) 177 | } 178 | return configValueInt, nil 179 | } 180 | 181 | // The "export" comments have syntactic meaning 182 | // This is how the compiler knows a function should be callable from the C code 183 | 184 | //export FLBPluginRegister 185 | func FLBPluginRegister(ctx unsafe.Pointer) int { 186 | return output.FLBPluginRegister(ctx, "kinesis", "Amazon Kinesis Data Streams Fluent Bit Plugin.") 187 | } 188 | 189 | //export FLBPluginInit 190 | func FLBPluginInit(ctx unsafe.Pointer) int { 191 | plugins.SetupLogger() 192 | err := addPluginInstance(ctx) 193 | if err != nil { 194 | logrus.Errorf("[kinesis] Failed to initialize plugin: %v\n", err) 195 | return output.FLB_ERROR 196 | } 197 | return output.FLB_OK 198 | } 199 | 200 | //export FLBPluginFlushCtx 201 | func FLBPluginFlushCtx(ctx, data unsafe.Pointer, length C.int, tag *C.char) int { 202 | kinesisOutput := getPluginInstance(ctx) 203 | 204 | fluentTag := C.GoString(tag) 205 | 206 | events, count, retCode := unpackRecords(kinesisOutput, data, length) 207 | if retCode != output.FLB_OK { 208 | logrus.Errorf("[kinesis %d] failed to unpackRecords with tag: %s\n", kinesisOutput.PluginID, fluentTag) 209 | 210 | return retCode 211 | } 212 | 213 | logrus.Debugf("[kinesis %d] Flushing %d logs with tag: %s\n", kinesisOutput.PluginID, count, fluentTag) 214 | if kinesisOutput.Concurrency > 0 { 215 | return kinesisOutput.FlushConcurrent(count, events) 216 | } 217 | 218 | return kinesisOutput.Flush(&events) 219 | } 220 | 221 | func unpackRecords(kinesisOutput *kinesis.OutputPlugin, data unsafe.Pointer, length C.int) ([]*kinesisAPI.PutRecordsRequestEntry, int, int) { 222 | var ret int 223 | var ts interface{} 224 | var timestamp time.Time 225 | var record map[interface{}]interface{} 226 | count := 0 227 | 228 | records := make([]*kinesisAPI.PutRecordsRequestEntry, 0, maximumRecordsPerPut) 229 | 230 | // Create Fluent Bit decoder 231 | dec := output.NewDecoder(data, int(length)) 232 | 233 | for { 234 | //Extract Record 235 | ret, ts, record = output.GetRecord(dec) 236 | if ret != 0 { 237 | break 238 | } 239 | 240 | switch tts := ts.(type) { 241 | case output.FLBTime: 242 | timestamp = tts.Time 243 | case uint64: 244 | // when ts is of type uint64 it appears to 245 | // be the amount of seconds since unix epoch. 246 | timestamp = time.Unix(int64(tts), 0) 247 | default: 248 | timestamp = time.Now() 249 | } 250 | 251 | retCode := kinesisOutput.AddRecord(&records, record, ×tamp) 252 | if retCode != output.FLB_OK { 253 | return nil, 0, retCode 254 | } 255 | 256 | count++ 257 | } 258 | 259 | if kinesisOutput.IsAggregate() { 260 | retCode := kinesisOutput.FlushAggregatedRecords(&records) 261 | if retCode != output.FLB_OK { 262 | return nil, 0, retCode 263 | } 264 | } 265 | 266 | return records, count, output.FLB_OK 267 | } 268 | 269 | //export FLBPluginExit 270 | func FLBPluginExit() int { 271 | 272 | return output.FLB_OK 273 | } 274 | 275 | func main() { 276 | } 277 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/aws/amazon-kinesis-streams-for-fluent-bit 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/aws/amazon-kinesis-firehose-for-fluent-bit v1.7.1 7 | github.com/aws/aws-sdk-go v1.44.229 8 | github.com/fluent/fluent-bit-go v0.0.0-20201210173045-3fd1e0486df2 9 | github.com/golang/mock v1.6.0 10 | github.com/golang/protobuf v1.5.3 11 | github.com/json-iterator/go v1.1.12 12 | github.com/lestrrat-go/strftime v1.0.6 13 | github.com/sirupsen/logrus v1.9.0 14 | github.com/stretchr/testify v1.8.2 15 | google.golang.org/protobuf v1.30.0 16 | ) 17 | 18 | require ( 19 | github.com/davecgh/go-spew v1.1.1 // indirect 20 | github.com/jmespath/go-jmespath v0.4.0 // indirect 21 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect 22 | github.com/modern-go/reflect2 v1.0.2 // indirect 23 | github.com/pkg/errors v0.9.1 // indirect 24 | github.com/pmezard/go-difflib v1.0.0 // indirect 25 | github.com/ugorji/go/codec v1.1.7 // indirect 26 | golang.org/x/net v0.8.0 // indirect 27 | golang.org/x/sys v0.6.0 // indirect 28 | gopkg.in/yaml.v3 v3.0.1 // indirect 29 | ) 30 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/aws/amazon-kinesis-firehose-for-fluent-bit v1.7.1 h1:qkTy1A/157f7AN9bKC/WXyhYpIZpKhJXdcSCGaBLVKY= 2 | github.com/aws/amazon-kinesis-firehose-for-fluent-bit v1.7.1/go.mod h1:260TjZZVmdgzRc/csalI+v+lN7x3qmnmvvl4eAZxfOU= 3 | github.com/aws/aws-sdk-go v1.44.44/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= 4 | github.com/aws/aws-sdk-go v1.44.229 h1:lku0ZSHRzj/qtFVM//QE8VjV6kvJ6CFijDZSsjNaD9A= 5 | github.com/aws/aws-sdk-go v1.44.229/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= 6 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/fluent/fluent-bit-go v0.0.0-20201210173045-3fd1e0486df2 h1:G57WNyWS0FQf43hjRXLy5JT1V5LWVsSiEpkUcT67Ugk= 10 | github.com/fluent/fluent-bit-go v0.0.0-20201210173045-3fd1e0486df2/go.mod h1:L92h+dgwElEyUuShEwjbiHjseW410WIcNz+Bjutc8YQ= 11 | github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= 12 | github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= 13 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 14 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= 15 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 16 | github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= 17 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 18 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 19 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 20 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 21 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 22 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 23 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 24 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 25 | github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= 26 | github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= 27 | github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= 28 | github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= 29 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= 30 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 31 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 32 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 33 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 34 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 35 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 36 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 37 | github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= 38 | github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= 39 | github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 40 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 41 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 42 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 43 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 44 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 45 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 46 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 47 | github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 48 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 49 | github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= 50 | github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 51 | github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= 52 | github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= 53 | github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= 54 | github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= 55 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 56 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 57 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 58 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 59 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 60 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 61 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 62 | golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 63 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 64 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 65 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 66 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 67 | golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= 68 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 69 | golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= 70 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 71 | golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= 72 | golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= 73 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 74 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 75 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 76 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 77 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 78 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 79 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 80 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 81 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 82 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 83 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 84 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 85 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 86 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 87 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 88 | golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 89 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 90 | golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= 91 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 92 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 93 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 94 | golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 95 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 96 | golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= 97 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 98 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 99 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 100 | golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 101 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 102 | golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= 103 | golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= 104 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 105 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 106 | golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 107 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 108 | golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= 109 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 110 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 111 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 112 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 113 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 114 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 115 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 116 | google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= 117 | google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 118 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 119 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 120 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 121 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 122 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 123 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 124 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 125 | -------------------------------------------------------------------------------- /kinesis/kinesis.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | // not use this file except in compliance with the License. A copy of the 5 | // License is located at 6 | // 7 | // http://aws.amazon.com/apache2.0/ 8 | // 9 | // or in the "license" file accompanying this file. This file is distributed 10 | // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 | // express or implied. See the License for the specific language governing 12 | // permissions and limitations under the License. 13 | 14 | //go:generate mockgen -destination mock_kinesis/mock_kinesis.go -copyright_file=../COPYRIGHT github.com/aws/amazon-kinesis-streams-for-fluent-bit/kinesis PutRecordsClient 15 | 16 | // Package kinesis contains the OutputPlugin which sends log records to Kinesis Stream 17 | package kinesis 18 | 19 | import ( 20 | "bytes" 21 | "compress/gzip" 22 | "compress/zlib" 23 | "errors" 24 | "fmt" 25 | "math" 26 | "net/http" 27 | "os" 28 | "strings" 29 | "sync/atomic" 30 | "time" 31 | 32 | "github.com/aws/amazon-kinesis-firehose-for-fluent-bit/plugins" 33 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/aggregate" 34 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/util" 35 | "github.com/aws/aws-sdk-go/aws" 36 | "github.com/aws/aws-sdk-go/aws/awserr" 37 | "github.com/aws/aws-sdk-go/aws/credentials/stscreds" 38 | "github.com/aws/aws-sdk-go/aws/endpoints" 39 | "github.com/aws/aws-sdk-go/aws/session" 40 | "github.com/aws/aws-sdk-go/service/kinesis" 41 | "github.com/fluent/fluent-bit-go/output" 42 | fluentbit "github.com/fluent/fluent-bit-go/output" 43 | jsoniter "github.com/json-iterator/go" 44 | "github.com/lestrrat-go/strftime" 45 | "github.com/sirupsen/logrus" 46 | ) 47 | 48 | const ( 49 | truncatedSuffix = "[Truncated...]" 50 | truncationReductionPercent = 90 51 | truncationCompressionMaxAttempts = 10 52 | ) 53 | 54 | const ( 55 | // Kinesis API Limit https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.PutRecords 56 | maximumRecordsPerPut = 500 57 | maximumPutRecordBatchSize = 1024 * 1024 * 5 // 5 MB 58 | maximumRecordSize = 1024 * 1024 // 1 MB 59 | 60 | partitionKeyMaxLength = 256 61 | ) 62 | 63 | const ( 64 | // We use strftime format specifiers because this will one day be re-written in C 65 | defaultTimeFmt = "%Y-%m-%dT%H:%M:%S" 66 | ) 67 | 68 | // PutRecordsClient contains the kinesis PutRecords method call 69 | type PutRecordsClient interface { 70 | PutRecords(input *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) 71 | } 72 | 73 | // CompressionType indicates the type of compression to apply to each record 74 | type CompressionType string 75 | 76 | const ( 77 | // CompressionNone disables compression 78 | CompressionNone CompressionType = "none" 79 | // CompressionZlib enables zlib compression 80 | CompressionZlib = "zlib" 81 | // CompressionGzip enables gzip compression 82 | CompressionGzip = "gzip" 83 | ) 84 | 85 | // OutputPlugin sends log records to kinesis 86 | type OutputPlugin struct { 87 | // The name of the stream that you want log records sent to 88 | stream string 89 | // If specified, only these keys and values will be send as the log record 90 | dataKeys string 91 | // If specified, the value of that data key will be used as the partition key. 92 | // Otherwise a random string will be used. 93 | // Partition key decides in which shard of your stream the data belongs to 94 | partitionKey string 95 | // Decides whether to append a newline after each data record 96 | appendNewline bool 97 | timeKey string 98 | fmtStrftime *strftime.Strftime 99 | logKey string 100 | client PutRecordsClient 101 | timer *plugins.Timeout 102 | PluginID int 103 | stringGen *util.RandomStringGenerator 104 | Concurrency int 105 | concurrencyRetryLimit int 106 | // Concurrency is the limit, goroutineCount represents the running goroutines 107 | goroutineCount int32 108 | // Used to implement backoff for concurrent flushes 109 | concurrentRetries uint32 110 | isAggregate bool 111 | aggregator *aggregate.Aggregator 112 | compression CompressionType 113 | // If specified, dots in key names should be replaced with other symbols 114 | replaceDots string 115 | } 116 | 117 | // NewOutputPlugin creates an OutputPlugin object 118 | func NewOutputPlugin(region, stream, dataKeys, partitionKey, roleARN, kinesisEndpoint, stsEndpoint, timeKey, timeFmt, logKey, replaceDots string, concurrency, retryLimit int, isAggregate, appendNewline bool, compression CompressionType, pluginID int, httpRequestTimeout time.Duration) (*OutputPlugin, error) { 119 | client, err := newPutRecordsClient(roleARN, region, kinesisEndpoint, stsEndpoint, pluginID, httpRequestTimeout) 120 | if err != nil { 121 | return nil, err 122 | } 123 | 124 | timer, err := plugins.NewTimeout(func(d time.Duration) { 125 | logrus.Errorf("[kinesis %d] timeout threshold reached: Failed to send logs for %s\n", pluginID, d.String()) 126 | logrus.Errorf("[kinesis %d] Quitting Fluent Bit", pluginID) 127 | os.Exit(1) 128 | }) 129 | 130 | if err != nil { 131 | return nil, err 132 | } 133 | 134 | stringGen := util.NewRandomStringGenerator(8) 135 | 136 | var timeFormatter *strftime.Strftime 137 | if timeKey != "" { 138 | if timeFmt == "" { 139 | timeFmt = defaultTimeFmt 140 | } 141 | timeFormatter, err = strftime.New(timeFmt, strftime.WithMilliseconds('L'), strftime.WithMicroseconds('f')) 142 | if err != nil { 143 | logrus.Errorf("[kinesis %d] Issue with strftime format in 'time_key_format'", pluginID) 144 | return nil, err 145 | } 146 | } 147 | 148 | var aggregator *aggregate.Aggregator 149 | if isAggregate { 150 | aggregator = aggregate.NewAggregator(stringGen) 151 | } 152 | 153 | return &OutputPlugin{ 154 | stream: stream, 155 | client: client, 156 | dataKeys: dataKeys, 157 | partitionKey: partitionKey, 158 | appendNewline: appendNewline, 159 | timeKey: timeKey, 160 | fmtStrftime: timeFormatter, 161 | logKey: logKey, 162 | timer: timer, 163 | PluginID: pluginID, 164 | stringGen: stringGen, 165 | Concurrency: concurrency, 166 | concurrencyRetryLimit: retryLimit, 167 | isAggregate: isAggregate, 168 | aggregator: aggregator, 169 | compression: compression, 170 | replaceDots: replaceDots, 171 | }, nil 172 | } 173 | 174 | // newPutRecordsClient creates the Kinesis client for calling the PutRecords method 175 | func newPutRecordsClient(roleARN string, awsRegion string, kinesisEndpoint string, stsEndpoint string, pluginID int, httpRequestTimeout time.Duration) (*kinesis.Kinesis, error) { 176 | customResolverFn := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { 177 | if service == endpoints.KinesisServiceID && kinesisEndpoint != "" { 178 | return endpoints.ResolvedEndpoint{ 179 | URL: kinesisEndpoint, 180 | }, nil 181 | } else if service == endpoints.StsServiceID && stsEndpoint != "" { 182 | return endpoints.ResolvedEndpoint{ 183 | URL: stsEndpoint, 184 | }, nil 185 | } 186 | return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) 187 | } 188 | httpClient := &http.Client{ 189 | Timeout: httpRequestTimeout, 190 | } 191 | 192 | // Fetch base credentials 193 | baseConfig := &aws.Config{ 194 | Region: aws.String(awsRegion), 195 | EndpointResolver: endpoints.ResolverFunc(customResolverFn), 196 | CredentialsChainVerboseErrors: aws.Bool(true), 197 | HTTPClient: httpClient, 198 | } 199 | 200 | sess, err := session.NewSession(baseConfig) 201 | if err != nil { 202 | return nil, err 203 | } 204 | 205 | var svcSess = sess 206 | var svcConfig = baseConfig 207 | eksRole := os.Getenv("EKS_POD_EXECUTION_ROLE") 208 | if eksRole != "" { 209 | logrus.Debugf("[kinesis %d] Fetching EKS pod credentials.\n", pluginID) 210 | eksConfig := &aws.Config{} 211 | creds := stscreds.NewCredentials(svcSess, eksRole) 212 | eksConfig.Credentials = creds 213 | eksConfig.Region = aws.String(awsRegion) 214 | eksConfig.HTTPClient = httpClient 215 | svcConfig = eksConfig 216 | 217 | svcSess, err = session.NewSession(svcConfig) 218 | if err != nil { 219 | return nil, err 220 | } 221 | } 222 | if roleARN != "" { 223 | logrus.Debugf("[kinesis %d] Fetching credentials for %s\n", pluginID, roleARN) 224 | stsConfig := &aws.Config{} 225 | creds := stscreds.NewCredentials(svcSess, roleARN) 226 | stsConfig.Credentials = creds 227 | stsConfig.Region = aws.String(awsRegion) 228 | stsConfig.HTTPClient = httpClient 229 | svcConfig = stsConfig 230 | 231 | svcSess, err = session.NewSession(svcConfig) 232 | if err != nil { 233 | return nil, err 234 | } 235 | } 236 | 237 | client := kinesis.New(svcSess, svcConfig) 238 | client.Handlers.Build.PushBackNamed(plugins.CustomUserAgentHandler()) 239 | return client, nil 240 | } 241 | 242 | // AddRecord accepts a record and adds it to the buffer 243 | // the return value is one of: FLB_OK FLB_RETRY FLB_ERROR 244 | func (outputPlugin *OutputPlugin) AddRecord(records *[]*kinesis.PutRecordsRequestEntry, record map[interface{}]interface{}, timeStamp *time.Time) int { 245 | if outputPlugin.timeKey != "" { 246 | buf := new(bytes.Buffer) 247 | err := outputPlugin.fmtStrftime.Format(buf, *timeStamp) 248 | if err != nil { 249 | logrus.Errorf("[kinesis %d] Could not create timestamp %v\n", outputPlugin.PluginID, err) 250 | return fluentbit.FLB_ERROR 251 | } 252 | record[outputPlugin.timeKey] = buf.String() 253 | } 254 | 255 | partitionKey, hasPartitionKey := outputPlugin.getPartitionKey(record) 256 | var partitionKeyLen = len(partitionKey) 257 | if !hasPartitionKey { 258 | partitionKeyLen = outputPlugin.stringGen.Size 259 | } 260 | data, err := outputPlugin.processRecord(record, partitionKeyLen) 261 | if err != nil { 262 | logrus.Errorf("[kinesis %d] %v\n", outputPlugin.PluginID, err) 263 | // discard this single bad record instead and let the batch continue 264 | return fluentbit.FLB_OK 265 | } 266 | 267 | if !outputPlugin.isAggregate { 268 | if !hasPartitionKey { 269 | partitionKey = outputPlugin.stringGen.RandomString() 270 | } 271 | logrus.Debugf("[kinesis %d] Got value: %s for a given partition key.\n", outputPlugin.PluginID, partitionKey) 272 | *records = append(*records, &kinesis.PutRecordsRequestEntry{ 273 | Data: data, 274 | PartitionKey: aws.String(partitionKey), 275 | }) 276 | } else { 277 | // Use the KPL aggregator to buffer records isAggregate is true 278 | aggRecord, err := outputPlugin.aggregator.AddRecord(partitionKey, hasPartitionKey, data) 279 | if err != nil { 280 | logrus.Errorf("[kinesis %d] Failed to aggregate record %v\n", outputPlugin.PluginID, err) 281 | // discard this single bad record instead and let the batch continue 282 | return fluentbit.FLB_OK 283 | } 284 | 285 | // If aggRecord isn't nil, then a full kinesis record has been aggregated 286 | if aggRecord != nil { 287 | *records = append(*records, aggRecord) 288 | } 289 | } 290 | 291 | return fluentbit.FLB_OK 292 | } 293 | 294 | // FlushAggregatedRecords must be called after 295 | // Returns FLB_OK, FLB_RETRY, FLB_ERROR 296 | func (outputPlugin *OutputPlugin) FlushAggregatedRecords(records *[]*kinesis.PutRecordsRequestEntry) int { 297 | 298 | aggRecord, err := outputPlugin.aggregator.AggregateRecords() 299 | if err != nil { 300 | logrus.Errorf("[kinesis %d] Failed to aggregate record %v\n", outputPlugin.PluginID, err) 301 | return fluentbit.FLB_ERROR 302 | } 303 | 304 | if aggRecord != nil { 305 | *records = append(*records, aggRecord) 306 | } 307 | 308 | return fluentbit.FLB_OK 309 | } 310 | 311 | // Flush sends the current buffer of log records 312 | // Returns FLB_OK, FLB_RETRY, FLB_ERROR 313 | func (outputPlugin *OutputPlugin) Flush(records *[]*kinesis.PutRecordsRequestEntry) int { 314 | // Use a different buffer to batch the logs 315 | requestBuf := make([]*kinesis.PutRecordsRequestEntry, 0, maximumRecordsPerPut) 316 | dataLength := 0 317 | 318 | for i, record := range *records { 319 | newRecordSize := len(record.Data) + len(aws.StringValue(record.PartitionKey)) 320 | 321 | if len(requestBuf) == maximumRecordsPerPut || (dataLength+newRecordSize) > maximumPutRecordBatchSize { 322 | retCode, err := outputPlugin.sendCurrentBatch(&requestBuf, &dataLength) 323 | if err != nil { 324 | logrus.Errorf("[kinesis %d] %v\n", outputPlugin.PluginID, err) 325 | } 326 | if retCode != fluentbit.FLB_OK { 327 | unsent := (*records)[i:] 328 | // requestBuf will contain records sendCurrentBatch failed to send, 329 | // combine those with the records yet to be sent/batched 330 | *records = append(requestBuf, unsent...) 331 | return retCode 332 | } 333 | } 334 | 335 | requestBuf = append(requestBuf, record) 336 | dataLength += newRecordSize 337 | } 338 | 339 | // send any remaining records 340 | retCode, err := outputPlugin.sendCurrentBatch(&requestBuf, &dataLength) 341 | if err != nil { 342 | logrus.Errorf("[kinesis %d] %v\n", outputPlugin.PluginID, err) 343 | } 344 | 345 | if retCode == output.FLB_OK { 346 | logrus.Debugf("[kinesis %d] Flushed %d logs\n", outputPlugin.PluginID, len(*records)) 347 | } 348 | 349 | // requestBuf will contain records sendCurrentBatch failed to send 350 | *records = requestBuf 351 | return retCode 352 | } 353 | 354 | // FlushWithRetries sends the current buffer of log records, with retries 355 | func (outputPlugin *OutputPlugin) FlushWithRetries(count int, records []*kinesis.PutRecordsRequestEntry) { 356 | var retCode, tries int 357 | 358 | currentRetries := outputPlugin.getConcurrentRetries() 359 | outputPlugin.addGoroutineCount(1) 360 | 361 | for tries = 0; tries <= outputPlugin.concurrencyRetryLimit; tries++ { 362 | if currentRetries > 0 { 363 | // Wait if other goroutines are retrying, as well as implement a progressive backoff 364 | if currentRetries > uint32(outputPlugin.concurrencyRetryLimit) { 365 | time.Sleep(time.Duration((1< 0 { 382 | outputPlugin.addConcurrentRetries(-tries) 383 | } 384 | 385 | switch retCode { 386 | case output.FLB_ERROR: 387 | logrus.Errorf("[kinesis %d] Failed to send (%d) records with error", outputPlugin.PluginID, len(records)) 388 | case output.FLB_RETRY: 389 | logrus.Errorf("[kinesis %d] Failed to send (%d) records after retries %d", outputPlugin.PluginID, len(records), outputPlugin.concurrencyRetryLimit) 390 | case output.FLB_OK: 391 | logrus.Debugf("[kinesis %d] Flushed %d records\n", outputPlugin.PluginID, count) 392 | } 393 | } 394 | 395 | // FlushConcurrent sends the current buffer of log records in a goroutine with retries 396 | // Returns FLB_OK, FLB_RETRY 397 | // Will return FLB_RETRY if the limit of concurrency has been reached 398 | func (outputPlugin *OutputPlugin) FlushConcurrent(count int, records []*kinesis.PutRecordsRequestEntry) int { 399 | 400 | runningGoRoutines := outputPlugin.getGoroutineCount() 401 | if runningGoRoutines+1 > int32(outputPlugin.Concurrency) { 402 | logrus.Infof("[kinesis %d] flush returning retry, concurrency limit reached (%d)\n", outputPlugin.PluginID, runningGoRoutines) 403 | return output.FLB_RETRY 404 | } 405 | 406 | curRetries := outputPlugin.getConcurrentRetries() 407 | if curRetries > 0 { 408 | logrus.Infof("[kinesis %d] flush returning retry, kinesis retries in progress (%d)\n", outputPlugin.PluginID, curRetries) 409 | return output.FLB_RETRY 410 | } 411 | 412 | go outputPlugin.FlushWithRetries(count, records) 413 | 414 | return output.FLB_OK 415 | 416 | } 417 | 418 | func replaceDots(obj map[interface{}]interface{}, replacement string) map[interface{}]interface{} { 419 | for k, v := range obj { 420 | var curK = k 421 | switch kt := k.(type) { 422 | case string: 423 | curK = strings.ReplaceAll(kt, ".", replacement) 424 | } 425 | delete(obj, k) 426 | switch vt := v.(type) { 427 | case map[interface{}]interface{}: 428 | v = replaceDots(vt, replacement) 429 | } 430 | 431 | obj[curK] = v 432 | } 433 | 434 | return obj 435 | } 436 | 437 | func (outputPlugin *OutputPlugin) processRecord(record map[interface{}]interface{}, partitionKeyLen int) ([]byte, error) { 438 | if outputPlugin.dataKeys != "" { 439 | record = plugins.DataKeys(outputPlugin.dataKeys, record) 440 | } 441 | 442 | var err error 443 | record, err = plugins.DecodeMap(record) 444 | if err != nil { 445 | logrus.Debugf("[kinesis %d] Failed to decode record: %v\n", outputPlugin.PluginID, record) 446 | return nil, err 447 | } 448 | 449 | if outputPlugin.replaceDots != "" { 450 | record = replaceDots(record, outputPlugin.replaceDots) 451 | } 452 | 453 | var json = jsoniter.ConfigCompatibleWithStandardLibrary 454 | var data []byte 455 | 456 | if outputPlugin.logKey != "" { 457 | log, err := plugins.LogKey(record, outputPlugin.logKey) 458 | if err != nil { 459 | return nil, err 460 | } 461 | 462 | data, err = plugins.EncodeLogKey(log) 463 | } else { 464 | data, err = json.Marshal(record) 465 | } 466 | 467 | if err != nil { 468 | logrus.Debugf("[kinesis %d] Failed to marshal record: %v\n", outputPlugin.PluginID, record) 469 | return nil, err 470 | } 471 | 472 | // append a newline after each log record 473 | if outputPlugin.appendNewline { 474 | data = append(data, []byte("\n")...) 475 | } 476 | 477 | // max truncation size 478 | maxDataSize := maximumRecordSize-partitionKeyLen 479 | 480 | switch outputPlugin.compression { 481 | case CompressionZlib: 482 | data, err = compressThenTruncate(zlibCompress, data, maxDataSize, []byte(truncatedSuffix), *outputPlugin) 483 | case CompressionGzip: 484 | data, err = compressThenTruncate(gzipCompress, data, maxDataSize, []byte(truncatedSuffix), *outputPlugin) 485 | default: 486 | } 487 | if err != nil { 488 | return nil, err 489 | } 490 | 491 | if len(data)+partitionKeyLen > maximumRecordSize { 492 | logrus.Warnf("[kinesis %d] Found record with %d bytes, truncating to 1MB, stream=%s\n", outputPlugin.PluginID, len(data)+partitionKeyLen, outputPlugin.stream) 493 | data = data[:maxDataSize-len(truncatedSuffix)] 494 | data = append(data, []byte(truncatedSuffix)...) 495 | } 496 | 497 | return data, nil 498 | } 499 | 500 | func (outputPlugin *OutputPlugin) sendCurrentBatch(records *[]*kinesis.PutRecordsRequestEntry, dataLength *int) (int, error) { 501 | if len(*records) == 0 { 502 | return fluentbit.FLB_OK, nil 503 | } 504 | outputPlugin.timer.Check() 505 | response, err := outputPlugin.client.PutRecords(&kinesis.PutRecordsInput{ 506 | Records: *records, 507 | StreamName: aws.String(outputPlugin.stream), 508 | }) 509 | if err != nil { 510 | logrus.Errorf("[kinesis %d] PutRecords failed with %v\n", outputPlugin.PluginID, err) 511 | outputPlugin.timer.Start() 512 | if aerr, ok := err.(awserr.Error); ok { 513 | if aerr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException { 514 | logrus.Warnf("[kinesis %d] Throughput limits for the stream may have been exceeded.", outputPlugin.PluginID) 515 | } 516 | } 517 | return fluentbit.FLB_RETRY, err 518 | } 519 | logrus.Debugf("[kinesis %d] Sent %d events to Kinesis\n", outputPlugin.PluginID, len(*records)) 520 | 521 | return outputPlugin.processAPIResponse(records, dataLength, response) 522 | } 523 | 524 | // processAPIResponse processes the successful and failed records 525 | // it returns an error iff no records succeeded (i.e.) no progress has been made 526 | func (outputPlugin *OutputPlugin) processAPIResponse(records *[]*kinesis.PutRecordsRequestEntry, dataLength *int, response *kinesis.PutRecordsOutput) (int, error) { 527 | 528 | var retCode int = fluentbit.FLB_OK 529 | var limitsExceeded bool 530 | 531 | if aws.Int64Value(response.FailedRecordCount) > 0 { 532 | // start timer if all records failed (no progress has been made) 533 | if aws.Int64Value(response.FailedRecordCount) == int64(len(*records)) { 534 | outputPlugin.timer.Start() 535 | return fluentbit.FLB_RETRY, fmt.Errorf("PutRecords request returned with no records successfully recieved") 536 | } 537 | 538 | logrus.Warnf("[kinesis %d] %d/%d records failed to be delivered. Will retry.\n", outputPlugin.PluginID, aws.Int64Value(response.FailedRecordCount), len(*records)) 539 | failedRecords := make([]*kinesis.PutRecordsRequestEntry, 0, aws.Int64Value(response.FailedRecordCount)) 540 | // try to resend failed records 541 | for i, record := range response.Records { 542 | if record.ErrorMessage != nil { 543 | logrus.Debugf("[kinesis %d] Record failed to send with error: %s\n", outputPlugin.PluginID, aws.StringValue(record.ErrorMessage)) 544 | failedRecords = append(failedRecords, (*records)[i]) 545 | } 546 | 547 | if aws.StringValue(record.ErrorCode) == kinesis.ErrCodeProvisionedThroughputExceededException { 548 | retCode = fluentbit.FLB_RETRY 549 | limitsExceeded = true 550 | } 551 | } 552 | 553 | if limitsExceeded { 554 | logrus.Warnf("[kinesis %d] Throughput limits for the stream may have been exceeded.", outputPlugin.PluginID) 555 | } 556 | 557 | *records = (*records)[:0] 558 | *records = append(*records, failedRecords...) 559 | *dataLength = 0 560 | for _, record := range *records { 561 | *dataLength += len(record.Data) 562 | } 563 | } else { 564 | // request fully succeeded 565 | outputPlugin.timer.Reset() 566 | *records = (*records)[:0] 567 | *dataLength = 0 568 | } 569 | return retCode, nil 570 | } 571 | 572 | func getFromMap(dataKey string, record map[interface{}]interface{}) interface{} { 573 | for k, v := range record { 574 | currentKey := stringOrByteArray(k) 575 | if currentKey == dataKey { 576 | return v 577 | } 578 | } 579 | 580 | return "" 581 | } 582 | 583 | // getPartitionKey returns the value for a given valid key 584 | // if the given key is empty or invalid, it returns empty 585 | // second return value indicates whether a partition key was found or not 586 | func (outputPlugin *OutputPlugin) getPartitionKey(record map[interface{}]interface{}) (string, bool) { 587 | partitionKey := outputPlugin.partitionKey 588 | if partitionKey != "" { 589 | partitionKeys := strings.Split(partitionKey, "->") 590 | num := len(partitionKeys) 591 | for count, dataKey := range partitionKeys { 592 | newRecord := getFromMap(dataKey, record) 593 | if count == num-1 { 594 | value := stringOrByteArray(newRecord) 595 | if value != "" { 596 | if len(value) > partitionKeyMaxLength { 597 | value = value[0:partitionKeyMaxLength] 598 | } 599 | return value, true 600 | } 601 | } 602 | _, ok := newRecord.(map[interface{}]interface{}) 603 | if ok { 604 | record = newRecord.(map[interface{}]interface{}) 605 | } else { 606 | logrus.Errorf("[kinesis %d] The partition key could not be found in the record, using a random string instead", outputPlugin.PluginID) 607 | return "", false 608 | } 609 | } 610 | } 611 | return "", false 612 | } 613 | 614 | // CompressorFunc is a function that compresses a byte slice 615 | type CompressorFunc func([]byte) ([]byte, error) 616 | 617 | func zlibCompress(data []byte) ([]byte, error) { 618 | var b bytes.Buffer 619 | 620 | if data == nil { 621 | return nil, fmt.Errorf("No data to compress. 'nil' value passed as data") 622 | } 623 | 624 | zw := zlib.NewWriter(&b) 625 | _, err := zw.Write(data) 626 | if err != nil { 627 | return data, err 628 | } 629 | err = zw.Close() 630 | if err != nil { 631 | return data, err 632 | } 633 | 634 | return b.Bytes(), nil 635 | } 636 | 637 | func gzipCompress(data []byte) ([]byte, error) { 638 | var b bytes.Buffer 639 | 640 | if data == nil { 641 | return nil, fmt.Errorf("No data to compress. 'nil' value passed as data") 642 | } 643 | 644 | zw := gzip.NewWriter(&b) 645 | _, err := zw.Write(data) 646 | if err != nil { 647 | return data, err 648 | } 649 | err = zw.Close() 650 | if err != nil { 651 | return data, err 652 | } 653 | 654 | return b.Bytes(), nil 655 | } 656 | 657 | // Compress Then Truncate 658 | // compresses data with CompressorFunction and iteratively truncates data 659 | // adding the truncation suffix if the CompressorFunction output exceeds maxOutLen. 660 | // The output is compressed and possibly truncated data whose length guaranteed to 661 | // be less than or equal to maxOutLen. 662 | func compressThenTruncate(compressorFunc CompressorFunc, data []byte, maxOutLen int, truncatedSuffix []byte, outputPlugin OutputPlugin) ([]byte, error) { 663 | var compressedData []byte 664 | var truncationBuffer []byte 665 | var originalCompressedLen int 666 | var compressedLen int 667 | var err error 668 | 669 | /* Iterative approach to truncation */ 670 | isTruncated := false 671 | compressedLen = math.MaxInt64 672 | truncatedInLen := len(data) 673 | truncationBuffer = data 674 | truncationCompressionAttempts := 0 675 | for (compressedLen > maxOutLen) { 676 | compressedData, err = compressorFunc(truncationBuffer) 677 | if err != nil { 678 | return nil, err 679 | } 680 | compressedLen = len(compressedData) 681 | 682 | /* Truncation needed */ 683 | if (compressedLen > maxOutLen) { 684 | truncationCompressionAttempts++ 685 | logrus.Debugf("[kinesis %d] iterative truncation round stream=%s\n", 686 | outputPlugin.PluginID, outputPlugin.stream) 687 | 688 | /* Base case: input compressed empty string, output still too large */ 689 | if (truncatedInLen == 0) { 690 | logrus.Errorf("[kinesis %d] truncation failed, compressed empty input too " + 691 | "large stream=%s\n", outputPlugin.PluginID, outputPlugin.stream) 692 | return nil, errors.New("compressed empty to large"); 693 | } 694 | 695 | /* Base case: too many attempts - just to be extra safe */ 696 | if (truncationCompressionAttempts > truncationCompressionMaxAttempts) { 697 | logrus.Errorf("[kinesis %d] truncation failed, too many compression attempts " + 698 | "stream=%s\n", outputPlugin.PluginID, outputPlugin.stream) 699 | return nil, errors.New("too many compression attempts"); 700 | } 701 | 702 | /* Calculate corrected input size */ 703 | truncatedInLenPrev := truncatedInLen; 704 | truncatedInLen = (maxOutLen * truncatedInLen) / compressedLen; 705 | truncatedInLen = (truncatedInLen * truncationReductionPercent) / 100; 706 | 707 | /* Ensure working down */ 708 | if (truncatedInLen >= truncatedInLenPrev) { 709 | truncatedInLen = truncatedInLenPrev - 1; 710 | } 711 | 712 | /* Allocate truncation buffer */ 713 | if (!isTruncated) { 714 | isTruncated = true; 715 | originalCompressedLen = compressedLen 716 | truncationBuffer = make([]byte, truncatedInLen) 717 | copy(truncationBuffer, data[:truncatedInLen]) 718 | } 719 | 720 | /* Slap on truncation suffix */ 721 | if (truncatedInLen < len(truncatedSuffix)) { 722 | /* No room for the truncation suffix. Terminal error */ 723 | logrus.Errorf("[kinesis %d] truncation failed, no room for suffix " + 724 | "stream=%s\n", outputPlugin.PluginID, outputPlugin.stream) 725 | return nil, errors.New("no room for suffix"); 726 | } 727 | truncationBuffer = truncationBuffer[:truncatedInLen] 728 | copy(truncationBuffer[len(truncationBuffer)-len(truncatedSuffix):], truncatedSuffix) 729 | } 730 | } 731 | 732 | if (isTruncated) { 733 | logrus.Warnf("[kinesis %d] Found compressed record with %d bytes, " + 734 | "truncating to %d bytes after compression, stream=%s\n", 735 | outputPlugin.PluginID, originalCompressedLen, len(compressedData), outputPlugin.stream) 736 | } 737 | 738 | return compressedData, nil 739 | } 740 | 741 | // stringOrByteArray returns the string value if the input is a string or byte array otherwise an empty string 742 | func stringOrByteArray(v interface{}) string { 743 | switch t := v.(type) { 744 | case []byte: 745 | return string(t) 746 | case string: 747 | return t 748 | default: 749 | return "" 750 | } 751 | } 752 | 753 | // getConcurrentRetries value (goroutine safe) 754 | func (outputPlugin *OutputPlugin) getConcurrentRetries() uint32 { 755 | return atomic.LoadUint32(&outputPlugin.concurrentRetries) 756 | } 757 | 758 | // addConcurrentRetries will update the value (goroutine safe) 759 | func (outputPlugin *OutputPlugin) addConcurrentRetries(val int) uint32 { 760 | return atomic.AddUint32(&outputPlugin.concurrentRetries, uint32(val)) 761 | } 762 | 763 | // getConcurrentRetries value (goroutine safe) 764 | func (outputPlugin *OutputPlugin) getGoroutineCount() int32 { 765 | return atomic.LoadInt32(&outputPlugin.goroutineCount) 766 | } 767 | 768 | // addConcurrentRetries will update the value (goroutine safe) 769 | func (outputPlugin *OutputPlugin) addGoroutineCount(val int) int32 { 770 | return atomic.AddInt32(&outputPlugin.goroutineCount, int32(val)) 771 | } 772 | 773 | // IsAggregate indicates if this instance of the plugin has KCL aggregation enabled. 774 | func (outputPlugin *OutputPlugin) IsAggregate() bool { 775 | return outputPlugin.isAggregate 776 | } 777 | -------------------------------------------------------------------------------- /kinesis/kinesis_test.go: -------------------------------------------------------------------------------- 1 | package kinesis 2 | 3 | import ( 4 | "encoding/json" 5 | "math/rand" 6 | "os" 7 | "sync" 8 | "testing" 9 | "time" 10 | 11 | "github.com/aws/amazon-kinesis-firehose-for-fluent-bit/plugins" 12 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/aggregate" 13 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/kinesis/mock_kinesis" 14 | "github.com/aws/amazon-kinesis-streams-for-fluent-bit/util" 15 | "github.com/aws/aws-sdk-go/aws" 16 | "github.com/aws/aws-sdk-go/service/kinesis" 17 | fluentbit "github.com/fluent/fluent-bit-go/output" 18 | "github.com/golang/mock/gomock" 19 | "github.com/sirupsen/logrus" 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | const concurrencyRetryLimit = 4 24 | 25 | // newMockOutputPlugin creates an mock OutputPlugin object 26 | func newMockOutputPlugin(client *mock_kinesis.MockPutRecordsClient, isAggregate bool) (*OutputPlugin, error) { 27 | 28 | timer, _ := plugins.NewTimeout(func(d time.Duration) { 29 | logrus.Errorf("[kinesis] timeout threshold reached: Failed to send logs for %v", d) 30 | logrus.Errorf("[kinesis] Quitting Fluent Bit") 31 | os.Exit(1) 32 | }) 33 | 34 | stringGen := util.NewRandomStringGenerator(8) 35 | 36 | var aggregator *aggregate.Aggregator 37 | if isAggregate { 38 | aggregator = aggregate.NewAggregator(stringGen) 39 | } 40 | 41 | return &OutputPlugin{ 42 | stream: "stream", 43 | client: client, 44 | dataKeys: "", 45 | partitionKey: "", 46 | timer: timer, 47 | PluginID: 0, 48 | stringGen: stringGen, 49 | concurrencyRetryLimit: concurrencyRetryLimit, 50 | isAggregate: isAggregate, 51 | aggregator: aggregator, 52 | replaceDots: "-", 53 | }, nil 54 | } 55 | 56 | // Test cases for TestStringOrByteArray 57 | var testCases = []struct { 58 | input interface{} 59 | output string 60 | }{ 61 | {"testString", "testString"}, 62 | {35344, ""}, 63 | {[]byte{'b', 'y', 't', 'e'}, "byte"}, 64 | {nil, ""}, 65 | } 66 | 67 | func TestStringOrByteArray(t *testing.T) { 68 | for _, testCase := range testCases { 69 | result := stringOrByteArray(testCase.input) 70 | if result != testCase.output { 71 | t.Errorf("[Test Failed] Expeced: %s, Returned: %s", testCase.output, result) 72 | } 73 | } 74 | } 75 | 76 | func TestAddRecord(t *testing.T) { 77 | records := make([]*kinesis.PutRecordsRequestEntry, 0, 500) 78 | 79 | record := map[interface{}]interface{}{ 80 | "testkey": []byte("test value"), 81 | } 82 | 83 | outputPlugin, _ := newMockOutputPlugin(nil, false) 84 | 85 | timeStamp := time.Now() 86 | retCode := outputPlugin.AddRecord(&records, record, &timeStamp) 87 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected return code to be FLB_OK") 88 | assert.Len(t, records, 1, "Expected output to contain 1 record") 89 | } 90 | 91 | func TestTruncateLargeLogEvent(t *testing.T) { 92 | records := make([]*kinesis.PutRecordsRequestEntry, 0, 500) 93 | 94 | record := map[interface{}]interface{}{ 95 | "somekey": make([]byte, 1024*1024), 96 | } 97 | 98 | outputPlugin, _ := newMockOutputPlugin(nil, false) 99 | 100 | timeStamp := time.Now() 101 | retCode := outputPlugin.AddRecord(&records, record, &timeStamp) 102 | actualData, err := outputPlugin.processRecord(record, len("testKey")) 103 | if err != nil { 104 | logrus.Errorf("[kinesis %d] %v\n", outputPlugin.PluginID, err) 105 | } 106 | 107 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected return code to be FLB_OK") 108 | assert.Len(t, records, 1, "Expected output to contain 1 record") 109 | assert.Len(t, actualData, 1024*1024-len("testKey"), "Expected length is less than 1MB") 110 | } 111 | 112 | func TestAddRecordAndFlush(t *testing.T) { 113 | records := make([]*kinesis.PutRecordsRequestEntry, 0, 500) 114 | 115 | record := map[interface{}]interface{}{ 116 | "testkey": []byte("test value"), 117 | } 118 | 119 | ctrl := gomock.NewController(t) 120 | defer ctrl.Finish() 121 | mockKinesis := mock_kinesis.NewMockPutRecordsClient(ctrl) 122 | 123 | mockKinesis.EXPECT().PutRecords(gomock.Any()).Return(&kinesis.PutRecordsOutput{ 124 | FailedRecordCount: aws.Int64(0), 125 | }, nil) 126 | 127 | outputPlugin, _ := newMockOutputPlugin(mockKinesis, false) 128 | 129 | timeStamp := time.Now() 130 | retCode := outputPlugin.AddRecord(&records, record, &timeStamp) 131 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected return code to be FLB_OK") 132 | 133 | retCode = outputPlugin.Flush(&records) 134 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected return code to be FLB_OK") 135 | } 136 | 137 | func TestAddRecordAndFlushAggregate(t *testing.T) { 138 | records := make([]*kinesis.PutRecordsRequestEntry, 0, 500) 139 | 140 | record := map[interface{}]interface{}{ 141 | "testkey": []byte("test value"), 142 | } 143 | 144 | ctrl := gomock.NewController(t) 145 | defer ctrl.Finish() 146 | mockKinesis := mock_kinesis.NewMockPutRecordsClient(ctrl) 147 | 148 | mockKinesis.EXPECT().PutRecords(gomock.Any()).Return(&kinesis.PutRecordsOutput{ 149 | FailedRecordCount: aws.Int64(0), 150 | }, nil) 151 | 152 | outputPlugin, _ := newMockOutputPlugin(mockKinesis, true) 153 | 154 | checkIsAggregate := outputPlugin.IsAggregate() 155 | assert.Equal(t, checkIsAggregate, true, "Expected IsAggregate() to return true") 156 | 157 | timeStamp := time.Now() 158 | retCode := outputPlugin.AddRecord(&records, record, &timeStamp) 159 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected AddRecord return code to be FLB_OK") 160 | 161 | retCode = outputPlugin.FlushAggregatedRecords(&records) 162 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected FlushAggregatedRecords return code to be FLB_OK") 163 | 164 | retCode = outputPlugin.Flush(&records) 165 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected Flush return code to be FLB_OK") 166 | } 167 | 168 | func TestAddRecordWithConcurrency(t *testing.T) { 169 | records := make([]*kinesis.PutRecordsRequestEntry, 0, 500) 170 | 171 | record := map[interface{}]interface{}{ 172 | "testkey": []byte("test value"), 173 | } 174 | 175 | ctrl := gomock.NewController(t) 176 | defer ctrl.Finish() 177 | mockKinesis := mock_kinesis.NewMockPutRecordsClient(ctrl) 178 | // Need to use synchronization to ensure goroutine completes before test method exits 179 | var wg sync.WaitGroup 180 | wg.Add(1) 181 | defer wg.Wait() 182 | 183 | mockKinesis.EXPECT().PutRecords(gomock.Any()).DoAndReturn( 184 | func(arg0 *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { 185 | wg.Done() 186 | return &kinesis.PutRecordsOutput{ 187 | FailedRecordCount: aws.Int64(0), 188 | }, nil 189 | }) 190 | 191 | outputPlugin, _ := newMockOutputPlugin(mockKinesis, false) 192 | // Enable concurrency 193 | outputPlugin.Concurrency = 2 194 | 195 | timeStamp := time.Now() 196 | retCode := outputPlugin.AddRecord(&records, record, &timeStamp) 197 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected AddRecord return code to be FLB_OK") 198 | 199 | retCode = outputPlugin.FlushConcurrent(len(records), records) 200 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected FlushConcurrent return code to be FLB_OK") 201 | } 202 | 203 | func TestAddRecordWithConcurrencyNoRetries(t *testing.T) { 204 | records := make([]*kinesis.PutRecordsRequestEntry, 0, 500) 205 | 206 | record := map[interface{}]interface{}{ 207 | "testkey": []byte("test value"), 208 | } 209 | 210 | ctrl := gomock.NewController(t) 211 | defer ctrl.Finish() 212 | mockKinesis := mock_kinesis.NewMockPutRecordsClient(ctrl) 213 | // Need to use synchronization to ensure goroutine completes before test method exits 214 | var wg sync.WaitGroup 215 | wg.Add(1) 216 | defer wg.Wait() 217 | 218 | mockKinesis.EXPECT().PutRecords(gomock.Any()).DoAndReturn( 219 | func(arg0 *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { 220 | wg.Done() 221 | return &kinesis.PutRecordsOutput{ 222 | FailedRecordCount: aws.Int64(0), 223 | }, nil 224 | }) 225 | 226 | outputPlugin, _ := newMockOutputPlugin(mockKinesis, false) 227 | // Enable concurrency but no retries 228 | outputPlugin.Concurrency = 2 229 | outputPlugin.concurrencyRetryLimit = 0 230 | 231 | timeStamp := time.Now() 232 | retCode := outputPlugin.AddRecord(&records, record, &timeStamp) 233 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected AddRecord return code to be FLB_OK") 234 | 235 | retCode = outputPlugin.FlushConcurrent(len(records), records) 236 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected FlushConcurrent return code to be FLB_OK") 237 | } 238 | 239 | var compressors = map[string]func([]byte) ([]byte, error){ 240 | "zlib": zlibCompress, 241 | "gzip": gzipCompress, 242 | } 243 | 244 | func TestCompression(t *testing.T) { 245 | 246 | testData := []byte("Test Data: This is test data for compression. This data is needs to have with some repetitive values, so compression is effective.") 247 | 248 | for z, f := range compressors { 249 | compressedBuf, err := f(testData) 250 | assert.Equalf(t, err, nil, "Expected successful %s compression of data", z) 251 | assert.Lessf(t, len(compressedBuf), len(testData), "%s compressed data buffer should contain fewer bytes", z) 252 | } 253 | } 254 | 255 | func TestCompressionEmpty(t *testing.T) { 256 | 257 | for z, f := range compressors { 258 | _, err := f(nil) 259 | assert.NotEqualf(t, err, nil, "%s compressing 'nil' data should return an error", z) 260 | } 261 | } 262 | 263 | var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") 264 | 265 | func RandStringRunes(n int) string { 266 | b := make([]rune, n) 267 | for i := range b { 268 | b[i] = letterRunes[rand.Intn(len(letterRunes))] 269 | } 270 | return string(b) 271 | } 272 | 273 | func TestCompressionTruncation(t *testing.T) { 274 | deftlvl := logrus.GetLevel(); 275 | logrus.SetLevel(0); 276 | 277 | rand.Seed(0) 278 | testData := []byte(RandStringRunes(4000)) 279 | testSuffix := "[truncate]" 280 | outputPlugin := OutputPlugin{ 281 | PluginID: 10, 282 | stream: "MyStream", 283 | } 284 | var compressedOutput, err = compressThenTruncate(gzipCompress, testData, 200, []byte(testSuffix), outputPlugin) 285 | assert.Nil(t, err) 286 | assert.GreaterOrEqual(t, len(compressedOutput), 150) 287 | assert.LessOrEqual(t, len(compressedOutput), 200) 288 | 289 | logrus.SetLevel(deftlvl) 290 | } 291 | 292 | func TestCompressionTruncationFailureA(t *testing.T) { 293 | deftlvl := logrus.GetLevel(); 294 | logrus.SetLevel(0); 295 | 296 | rand.Seed(0) 297 | testData := []byte(RandStringRunes(4000)) 298 | testSuffix := "[truncate]" 299 | outputPlugin := OutputPlugin{ 300 | PluginID: 10, 301 | stream: "MyStream", 302 | } 303 | var _, err = compressThenTruncate(gzipCompress, testData, 20, []byte(testSuffix), outputPlugin) 304 | assert.Contains(t, err.Error(), "no room for suffix") 305 | 306 | logrus.SetLevel(deftlvl) 307 | } 308 | 309 | func TestCompressionTruncationFailureB(t *testing.T) { 310 | deftlvl := logrus.GetLevel(); 311 | logrus.SetLevel(0); 312 | 313 | rand.Seed(0) 314 | testData := []byte{} 315 | testSuffix := "[truncate]" 316 | outputPlugin := OutputPlugin{ 317 | PluginID: 10, 318 | stream: "MyStream", 319 | } 320 | var _, err = compressThenTruncate(gzipCompress, testData, 5, []byte(testSuffix), outputPlugin) 321 | assert.Contains(t, err.Error(), "compressed empty to large") 322 | 323 | logrus.SetLevel(deftlvl) 324 | } 325 | 326 | func TestDotReplace(t *testing.T) { 327 | records := make([]*kinesis.PutRecordsRequestEntry, 0, 500) 328 | record := map[interface{}]interface{}{ 329 | "message.key": map[interface{}]interface{}{ 330 | "messagevalue": []byte("some.message"), 331 | "message.value/one": []byte("some message"), 332 | "message.value/two": []byte("some message"), 333 | }, 334 | "kubernetes": map[interface{}]interface{}{ 335 | "app": []byte("test app label"), 336 | "app.kubernetes.io/name": []byte("test key with dots"), 337 | }, 338 | } 339 | 340 | outputPlugin, _ := newMockOutputPlugin(nil, false) 341 | 342 | timeStamp := time.Now() 343 | retCode := outputPlugin.AddRecord(&records, record, &timeStamp) 344 | assert.Equal(t, retCode, fluentbit.FLB_OK, "Expected return code to be FLB_OK") 345 | assert.Len(t, records, 1, "Expected output to contain 1 record") 346 | 347 | data := records[0].Data 348 | 349 | var log map[string]map[string]interface{} 350 | json.Unmarshal(data, &log) 351 | 352 | assert.Equal(t, "test app label", log["kubernetes"]["app"]) 353 | assert.Equal(t, "test key with dots", log["kubernetes"]["app-kubernetes-io/name"]) 354 | assert.Equal(t, "some.message", log["message-key"]["messagevalue"]) 355 | assert.Equal(t, "some message", log["message-key"]["message-value/one"]) 356 | assert.Equal(t, "some message", log["message-key"]["message-value/two"]) 357 | } 358 | 359 | func TestGetPartitionKey(t *testing.T) { 360 | record := map[interface{}]interface{}{ 361 | "testKey": []byte("test value with no nested keys"), 362 | "testKeyWithOneNestedKey": map[interface{}]interface{}{ 363 | "nestedKey": []byte("test value with one nested key"), 364 | }, 365 | "testKeyWithNestedKeys": map[interface{}]interface{}{ 366 | "outerKey": map[interface{}]interface{}{ 367 | "innerKey": []byte("test value with inner key"), 368 | }, 369 | }, 370 | } 371 | 372 | //test getPartitionKey() with single partition key 373 | outputPlugin, _ := newMockOutputPlugin(nil, false) 374 | outputPlugin.partitionKey = "testKey" 375 | value, hasValue := outputPlugin.getPartitionKey(record) 376 | assert.Equal(t, true, hasValue, "Should find value") 377 | assert.Equal(t, value, "test value with no nested keys") 378 | 379 | //test getPartitionKey() with nested partition key 380 | outputPlugin.partitionKey = "testKeyWithOneNestedKey->nestedKey" 381 | value, hasValue = outputPlugin.getPartitionKey(record) 382 | assert.Equal(t, true, hasValue, "Should find value") 383 | assert.Equal(t, value, "test value with one nested key") 384 | 385 | outputPlugin.partitionKey = "testKeyWithNestedKeys->outerKey->innerKey" 386 | value, hasValue = outputPlugin.getPartitionKey(record) 387 | assert.Equal(t, true, hasValue, "Should find value") 388 | assert.Equal(t, value, "test value with inner key") 389 | 390 | //test getPartitionKey() with partition key not found 391 | outputPlugin.partitionKey = "some key" 392 | value, hasValue = outputPlugin.getPartitionKey(record) 393 | assert.Equal(t, false, hasValue, "Should not find value") 394 | assert.Len(t, value, 0, "This should be an empty string") 395 | 396 | outputPlugin.partitionKey = "testKeyWithOneNestedKey" 397 | value, hasValue = outputPlugin.getPartitionKey(record) 398 | assert.Equal(t, false, hasValue, "Should not find value") 399 | assert.Len(t, value, 0, "This should be an empty string") 400 | 401 | outputPlugin.partitionKey = "testKeyWithOneNestedKey->someKey" 402 | value, hasValue = outputPlugin.getPartitionKey(record) 403 | assert.Equal(t, false, hasValue, "Should not find value") 404 | assert.Len(t, value, 0, "This should be an empty string") 405 | } 406 | -------------------------------------------------------------------------------- /kinesis/mock_kinesis/mock_kinesis.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | // not use this file except in compliance with the License. A copy of the 5 | // License is located at 6 | // 7 | // http://aws.amazon.com/apache2.0/ 8 | // 9 | // or in the "license" file accompanying this file. This file is distributed 10 | // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 | // express or implied. See the License for the specific language governing 12 | // permissions and limitations under the License. 13 | 14 | // Code generated by MockGen. DO NOT EDIT. 15 | // Source: github.com/aws/amazon-kinesis-streams-for-fluent-bit/kinesis (interfaces: PutRecordsClient) 16 | 17 | // Package mock_kinesis is a generated GoMock package. 18 | package mock_kinesis 19 | 20 | import ( 21 | kinesis "github.com/aws/aws-sdk-go/service/kinesis" 22 | gomock "github.com/golang/mock/gomock" 23 | reflect "reflect" 24 | ) 25 | 26 | // MockPutRecordsClient is a mock of PutRecordsClient interface 27 | type MockPutRecordsClient struct { 28 | ctrl *gomock.Controller 29 | recorder *MockPutRecordsClientMockRecorder 30 | } 31 | 32 | // MockPutRecordsClientMockRecorder is the mock recorder for MockPutRecordsClient 33 | type MockPutRecordsClientMockRecorder struct { 34 | mock *MockPutRecordsClient 35 | } 36 | 37 | // NewMockPutRecordsClient creates a new mock instance 38 | func NewMockPutRecordsClient(ctrl *gomock.Controller) *MockPutRecordsClient { 39 | mock := &MockPutRecordsClient{ctrl: ctrl} 40 | mock.recorder = &MockPutRecordsClientMockRecorder{mock} 41 | return mock 42 | } 43 | 44 | // EXPECT returns an object that allows the caller to indicate expected use 45 | func (m *MockPutRecordsClient) EXPECT() *MockPutRecordsClientMockRecorder { 46 | return m.recorder 47 | } 48 | 49 | // PutRecords mocks base method 50 | func (m *MockPutRecordsClient) PutRecords(arg0 *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { 51 | m.ctrl.T.Helper() 52 | ret := m.ctrl.Call(m, "PutRecords", arg0) 53 | ret0, _ := ret[0].(*kinesis.PutRecordsOutput) 54 | ret1, _ := ret[1].(error) 55 | return ret0, ret1 56 | } 57 | 58 | // PutRecords indicates an expected call of PutRecords 59 | func (mr *MockPutRecordsClientMockRecorder) PutRecords(arg0 interface{}) *gomock.Call { 60 | mr.mock.ctrl.T.Helper() 61 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutRecords", reflect.TypeOf((*MockPutRecordsClient)(nil).PutRecords), arg0) 62 | } 63 | -------------------------------------------------------------------------------- /util/random.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | ) 7 | 8 | const ( 9 | partitionKeyCharset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 10 | ) 11 | 12 | type RandomStringGenerator struct { 13 | seededRandom *rand.Rand 14 | buffer []byte 15 | Size int 16 | } 17 | 18 | // Provides a generator of random strings of provided length 19 | // it uses the math/rand library 20 | func NewRandomStringGenerator(stringSize int) *RandomStringGenerator { 21 | 22 | return &RandomStringGenerator{ 23 | seededRandom: rand.New(rand.NewSource(time.Now().UnixNano())), 24 | buffer: make([]byte, stringSize), 25 | Size: stringSize, 26 | } 27 | } 28 | 29 | func (gen *RandomStringGenerator) RandomString() string { 30 | for i := range gen.buffer { 31 | gen.buffer[i] = partitionKeyCharset[gen.seededRandom.Intn(len(partitionKeyCharset))] 32 | } 33 | return string(gen.buffer) 34 | } 35 | --------------------------------------------------------------------------------