├── .circleci └── config.yml ├── .github ├── CODEOWNERS ├── release.yml └── workflows │ ├── add-to-project-v2.yml │ ├── apply-labels.yml │ ├── stale.yml │ └── validate-pr-title.yml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── CONTRIBUTORS ├── Dockerfile ├── LICENSE ├── NOTICE ├── OSSMETADATA ├── README.md ├── RELEASING.md ├── SECURITY.md ├── SUPPORT.md ├── build-docker.sh ├── build-pkg.sh ├── cli ├── cli.go └── cli_test.go ├── go.mod ├── go.sum ├── kubernetes └── rdslogs.yml ├── main.go ├── preinstall ├── publisher └── publisher.go ├── rdslogs.conf ├── rdslogs.service ├── rdslogs.upstart └── terraform └── rdslogs.tf /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | aws-cli: circleci/aws-cli@2.0.3 5 | docker: circleci/docker@1.3.0 6 | 7 | executors: 8 | pkg: 9 | ## executor with fpm and npmbuild 10 | docker: 11 | - image: alanfranz/fpm-within-docker:centos-8 12 | 13 | platform_matrix: &platform_matrix 14 | matrix: 15 | parameters: 16 | os: &oses ["linux", "darwin"] 17 | arch: &arches ["amd64", "arm64", "arm", "386"] 18 | exclude: 19 | - os: darwin 20 | arch: arm 21 | - os: darwin 22 | arch: "386" 23 | 24 | jobs: 25 | test: 26 | docker: 27 | - image: cimg/go:1.18 28 | steps: 29 | - checkout 30 | - run: go test --timeout 10s -v ./... 31 | 32 | build_bins: 33 | docker: 34 | - image: cimg/go:1.18 35 | parameters: 36 | os: 37 | description: Target operating system 38 | type: enum 39 | enum: *oses 40 | default: "linux" 41 | arch: 42 | description: Target architecture 43 | type: enum 44 | enum: *arches 45 | default: "amd64" 46 | steps: 47 | - checkout 48 | - run: | 49 | GOOS=<< parameters.os >> \ 50 | GOARCH=<< parameters.arch >> \ 51 | CGO_ENABLED=0 \ 52 | go build -ldflags "-X main.BuildID=${CIRCLE_TAG}" \ 53 | -o ~/binaries/rdslogs-<< parameters.os >>-<< parameters.arch >> \ 54 | . 55 | - persist_to_workspace: 56 | root: ~/ 57 | paths: 58 | - binaries/rdslogs-<< parameters.os >>-<< parameters.arch >> 59 | - store_artifacts: 60 | path: binaries/rdslogs-<< parameters.os >>-<< parameters.arch >> 61 | 62 | ## We only have to build packages for linux, so we iterate architectures and build rpm and deb for each. 63 | build_packages: 64 | executor: pkg 65 | parameters: 66 | arch: 67 | description: Target architecture 68 | type: enum 69 | enum: *arches 70 | default: "amd64" 71 | steps: 72 | - attach_workspace: 73 | at: ~/ 74 | - checkout 75 | - run: ./build-pkg.sh -m << parameters.arch >> -v "${CIRCLE_TAG}" -t deb 76 | - run: ./build-pkg.sh -m << parameters.arch >> -v "${CIRCLE_TAG}" -t rpm 77 | - run: echo "finished building packages" && find ~/packages -ls 78 | - persist_to_workspace: 79 | root: ~/ 80 | paths: 81 | - packages/<< parameters.arch >>/* 82 | - store_artifacts: 83 | path: ~/packages/<< parameters.arch >> 84 | 85 | consolidate_artifacts: 86 | docker: 87 | - image: cimg/go:1.18 88 | steps: 89 | - attach_workspace: 90 | at: ~/ 91 | - run: cp -R ~/binaries ~/artifacts 92 | - run: find ~/packages -type f -print0 |xargs -0 -I {} cp {} ~/artifacts 93 | - persist_to_workspace: 94 | root: ~/ 95 | paths: 96 | - artifacts 97 | 98 | publish_github: 99 | docker: 100 | - image: cibuilds/github:0.13.0 101 | steps: 102 | - attach_workspace: 103 | at: ~/ 104 | - run: 105 | name: "Publish Release on GitHub" 106 | command: | 107 | echo "about to publish to tag ${CIRCLE_TAG}" 108 | ls -l ~/artifacts/* 109 | ghr -draft -n ${CIRCLE_TAG} -t ${GITHUB_TOKEN} -u ${CIRCLE_PROJECT_USERNAME} -r ${CIRCLE_PROJECT_REPONAME} -c ${CIRCLE_SHA1} ${CIRCLE_TAG} ~/artifacts 110 | 111 | publish_s3: 112 | executor: aws-cli/default 113 | steps: 114 | - attach_workspace: 115 | at: ~/ 116 | - aws-cli/install 117 | - aws-cli/setup: 118 | aws-access-key-id: AWS_ACCESS_KEY_ID 119 | aws-secret-access-key: AWS_SECRET_ACCESS_KEY 120 | aws-region: AWS_REGION 121 | - run: 122 | name: sync_s3_artifacts 123 | command: aws s3 sync ~/artifacts s3://honeycomb-builds/honeycombio/rdslogs/${CIRCLE_TAG}/ 124 | 125 | build_docker: 126 | docker: 127 | - image: cimg/go:1.18 128 | steps: 129 | - run: go install github.com/google/ko@latest 130 | - checkout 131 | - setup_remote_docker 132 | - run: 133 | name: build docker images and publish locally 134 | command: ./build-docker.sh 135 | 136 | publish_docker: 137 | docker: 138 | - image: cimg/go:1.18 139 | steps: 140 | - run: go install github.com/google/ko@latest 141 | - checkout 142 | - setup_remote_docker 143 | - run: 144 | name: build docker images and publish to Docker Hub 145 | environment: 146 | KO_DOCKER_REPO: honeycombio 147 | command: | 148 | echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin; 149 | ./build-docker.sh 150 | 151 | workflows: 152 | version: 2 153 | build: 154 | jobs: 155 | - test: 156 | filters: 157 | tags: 158 | only: /.*/ 159 | - build_bins: 160 | <<: *platform_matrix 161 | requires: 162 | - test 163 | filters: 164 | tags: 165 | only: /.*/ 166 | - build_packages: 167 | matrix: 168 | parameters: 169 | arch: *arches 170 | context: Honeycomb Secrets for Public Repos 171 | requires: 172 | - build_bins 173 | filters: 174 | tags: 175 | # allow tags that start with t so we can test builds without publishing 176 | only: /^[vt].*/ 177 | branches: 178 | ignore: /.*/ 179 | - build_docker: 180 | requires: 181 | - test 182 | filters: 183 | tags: 184 | only: /.*/ 185 | - consolidate_artifacts: 186 | requires: 187 | - build_packages 188 | filters: 189 | tags: 190 | only: /.*/ 191 | - publish_github: 192 | context: Honeycomb Secrets for Public Repos 193 | requires: 194 | - consolidate_artifacts 195 | filters: 196 | tags: 197 | only: /^v.*/ 198 | branches: 199 | ignore: /.*/ 200 | - publish_s3: 201 | context: Honeycomb Secrets for Public Repos 202 | requires: 203 | - consolidate_artifacts 204 | filters: 205 | tags: 206 | only: /^v.*/ 207 | branches: 208 | ignore: /.*/ 209 | - publish_docker: 210 | context: Honeycomb Secrets for Public Repos 211 | requires: 212 | - build_docker 213 | filters: 214 | tags: 215 | only: /^v.*/ 216 | branches: 217 | ignore: /.*/ 218 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Code owners file. 2 | # This file controls who is tagged for review for any given pull request. 3 | 4 | # For anything not explicitly taken by someone else: 5 | * @honeycombio/telemetry-team 6 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | # .github/release.yml 2 | 3 | changelog: 4 | exclude: 5 | labels: 6 | - no-changelog 7 | categories: 8 | - title: 💥 Breaking Changes 💥 9 | labels: 10 | - "version: bump major" 11 | - breaking-change 12 | - title: 💡 Enhancements 13 | labels: 14 | - "type: enhancement" 15 | - title: 🐛 Fixes 16 | labels: 17 | - "type: bug" 18 | - title: 🛠 Maintenance 19 | labels: 20 | - "type: maintenance" 21 | - title: 🤷 Other Changes 22 | labels: 23 | - "*" -------------------------------------------------------------------------------- /.github/workflows/add-to-project-v2.yml: -------------------------------------------------------------------------------- 1 | name: Add to project 2 | on: 3 | issues: 4 | types: [opened] 5 | pull_request_target: 6 | types: [opened] 7 | jobs: 8 | add-to-project: 9 | runs-on: ubuntu-latest 10 | name: Add issues and PRs to project 11 | steps: 12 | - uses: actions/add-to-project@main 13 | with: 14 | project-url: https://github.com/orgs/honeycombio/projects/11 15 | github-token: ${{ secrets.GHPROJECTS_TOKEN }} 16 | -------------------------------------------------------------------------------- /.github/workflows/apply-labels.yml: -------------------------------------------------------------------------------- 1 | name: Apply project labels 2 | on: [issues, pull_request_target, label] 3 | jobs: 4 | apply-labels: 5 | runs-on: ubuntu-latest 6 | name: Apply common project labels 7 | steps: 8 | - uses: honeycombio/oss-management-actions/labels@v1 9 | with: 10 | github-token: ${{ secrets.GITHUB_TOKEN }} 11 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: 'Close stale issues and PRs' 2 | on: 3 | schedule: 4 | - cron: '30 1 * * *' 5 | 6 | jobs: 7 | stale: 8 | name: 'Close stale issues and PRs' 9 | runs-on: ubuntu-latest 10 | permissions: 11 | issues: write 12 | pull-requests: write 13 | 14 | steps: 15 | - uses: actions/stale@v4 16 | with: 17 | start-date: '2021-09-01T00:00:00Z' 18 | stale-issue-message: 'Marking this issue as stale because it has been open 14 days with no activity. Please add a comment if this is still an ongoing issue; otherwise this issue will be automatically closed in 7 days.' 19 | stale-pr-message: 'Marking this PR as stale because it has been open 30 days with no activity. Please add a comment if this PR is still relevant; otherwise this PR will be automatically closed in 7 days.' 20 | close-issue-message: 'Closing this issue due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' 21 | close-pr-message: 'Closing this PR due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' 22 | days-before-issue-stale: 14 23 | days-before-pr-stale: 30 24 | days-before-issue-close: 7 25 | days-before-pr-close: 7 26 | any-of-labels: 'status: info needed,status: revision needed' 27 | -------------------------------------------------------------------------------- /.github/workflows/validate-pr-title.yml: -------------------------------------------------------------------------------- 1 | name: "Validate PR Title" 2 | 3 | on: 4 | pull_request: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | 10 | jobs: 11 | main: 12 | name: Validate PR title 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: amannn/action-semantic-pull-request@v5 16 | id: lint_pr_title 17 | name: "🤖 Check PR title follows conventional commit spec" 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 20 | with: 21 | # Have to specify all types because `maint` and `rel` aren't defaults 22 | types: | 23 | maint 24 | rel 25 | fix 26 | feat 27 | chore 28 | ci 29 | docs 30 | style 31 | refactor 32 | perf 33 | test 34 | ignoreLabels: | 35 | "type: dependencies" 36 | # When the previous steps fails, the workflow would stop. By adding this 37 | # condition you can continue the execution with the populated error message. 38 | - if: always() && (steps.lint_pr_title.outputs.error_message != null) 39 | name: "📝 Add PR comment about using conventional commit spec" 40 | uses: marocchino/sticky-pull-request-comment@v2 41 | with: 42 | header: pr-title-lint-error 43 | message: | 44 | Thank you for contributing to the project! 🎉 45 | 46 | We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. 47 | 48 | Make sure to prepend with `feat:`, `fix:`, or another option in the list below. 49 | 50 | Once you update the title, this workflow will re-run automatically and validate the updated title. 51 | 52 | Details: 53 | 54 | ``` 55 | ${{ steps.lint_pr_title.outputs.error_message }} 56 | ``` 57 | 58 | # Delete a previous comment when the issue has been resolved 59 | - if: ${{ steps.lint_pr_title.outputs.error_message == null }} 60 | name: "❌ Delete PR comment after title has been updated" 61 | uses: marocchino/sticky-pull-request-comment@v2 62 | with: 63 | header: pr-title-lint-error 64 | delete: true 65 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | rdslogs 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # rdslogs changelog 2 | 3 | ## [1.135.1] - 2022-07-20 4 | 5 | ### Maintenance 6 | 7 | - Re-release to fix an OpenSSL CVE | [@kentquirk](https://github.com/kentquirk) 8 | 9 | ## [1.135.0] - 2022-05-17 10 | 11 | ### Fixes 12 | 13 | - Use session.NewSession when creating AWS CLI instance (#50) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) 14 | 15 | ### Maintenance 16 | 17 | - Modernize the CI pipeline (#37 - #49) | Many participants 18 | 19 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | This project has adopted the Honeycomb User Community Code of Conduct to clarify expected behavior in our community. 4 | 5 | https://www.honeycomb.io/honeycomb-user-community-code-of-conduct/ -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guide 2 | 3 | Please see our [general guide for OSS lifecycle and practices.](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) 4 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | rdslogs contributors: 2 | 3 | Ben Hartshorne 4 | Charity Majors 5 | Chris Toshok 6 | Christine Yen 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.13-alpine 2 | 3 | COPY . /go/src/github.com/honeycombio/rdslogs 4 | WORKDIR /go/src/github.com/honeycombio/rdslogs 5 | RUN apk update && apk add git 6 | RUN go get ./... 7 | RUN go install ./... 8 | 9 | FROM golang:1.9-alpine 10 | COPY --from=0 /go/bin/rdslogs /rdslogs 11 | ENTRYPOINT ["/rdslogs"] 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016-Present Honeycomb, Hound Technology, Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /OSSMETADATA: -------------------------------------------------------------------------------- 1 | osslifecycle=archived 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RDSLogs 2 | 3 | [![OSS Lifecycle](https://img.shields.io/osslifecycle/honeycombio/rdslogs)](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) 4 | 5 | **STATUS: this project is [archived](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).** 6 | 7 | Honeycomb recommends using [AWS Agentless Integrations](https://github.com/honeycombio/agentless-integrations-for-aws/#mysql--postgresql-rds-integration-for-cloudwatch-logs) to extract log data from RDS. 8 | 9 | 10 | `rdslogs` is a tool to download or stream log files from RDS. When streaming, you 11 | can choose to stream them to STDOUT or directly to Honeycomb. 12 | 13 | To learn more about using Honeycomb, see our [docs](https://honeycomb.io/docs) (and [RDS-specific docs](https://honeycomb.io/docs/connect/mysql/rds/)). 14 | 15 | The default action of `rdslogs` is to stream the current log file. Use the 16 | `--download` flag to download log files instead. 17 | 18 | The default output is STDOUT to see what's happening: 19 | 20 | ```sh 21 | rdslogs --region us-east-1 --identifier my-rds-database 22 | ``` 23 | 24 | To output the results directly to Honeycomb, use the `--output honeycomb` flag 25 | and include the `--writekey` and `--dataset` flags. Optionally, the 26 | `--sample_rate` flag will only send a portion of your traffic to Honeycomb. 27 | 28 | ```sh 29 | rdslogs --region us-east-1 --identifier my-rds-database --output honeycomb --writekey abcabc123123 --dataset "rds logs" 30 | ``` 31 | 32 | ## Deprecation Notice for MySQL, MariaDB, and Aurora 33 | 34 | `rdslogs` is deprecated for MySQL, MariaDB, and Aurora: please use Cloudwatch Logs combined with our [Agentless Integrations for AWS](https://github.com/honeycombio/agentless-integrations-for-aws#mysql-rds-integration-for-cloudwatch-logs). 35 | 36 | `rdslogs` relies on the RDS API to tail mysql logs in realtime. Due to a bug in the API, slow query logs can randomly "disappear" for long periods of time, leaving large gaps in your MySQL Dataset. Amazon has acknowledged the bug, but has no ETA, so we have deprecated this tool in favor of Cloudwatch Logs, which are more reliable. 37 | 38 | ## Installation 39 | 40 | `rdslogs` is available as a `.deb` or `.rpm` package from [`honeycombio`][hq]; 41 | see the [MySQL RDS][mysql-rds-download] or [PostgreSQL RDS][pg-rds-download] 42 | integration documentation for links and command line instructions. 43 | 44 | [hq]: https://honeycomb.io 45 | [mysql-rds-download]: https://honeycomb.io/docs/getting-data-in/integrations/databases/mysql/rds/#download-the-rds-connector-rdslogs 46 | [pg-rds-download]: https://honeycomb.io/docs/getting-data-in/integrations/databases/postgresql/rds/#download-the-rds-connector-rdslogs 47 | 48 | When installed from a package, there is a config file at 49 | `/etc/rdslogs/rdslogs.conf`. Instead of using the command line flags as 50 | indicated in the previous section, edit the config file with the intended 51 | values. After doing so, start the service with the standard `sudo initctl start 52 | rdslogs` (upstart) or `sudo systemctl start rdslogs` (systemd) commands. 53 | 54 | To build and install directly from source: 55 | 56 | ```sh 57 | go get github.com/honeycombio/rdslogs 58 | ``` 59 | 60 | ## Usage 61 | 62 | ```nil 63 | $ rdslogs --help 64 | Usage: 65 | rdslogs rdslogs --identifier my-rds-instance 66 | 67 | rdslogs streams a log file from Amazon RDS and prints it to STDOUT or sends it 68 | up to Honeycomb.io. 69 | ``` 70 | 71 | ### AWS Requirements 72 | 73 | AWS credentials are required and can be provided via IAM roles, AWS shared 74 | config (`~/.aws/config`), AWS shared credentials (`~/.aws/credentials`), or 75 | the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. 76 | Below is the minimal IAM policy needed by RDSLogs. 77 | 78 | ```json 79 | { 80 | "Version": "2012-10-17", 81 | "Statement": [ 82 | { 83 | "Effect": "Allow", 84 | "Action": [ 85 | "rds:DescribeDBInstances", 86 | "rds:DescribeDBLogFiles", 87 | "rds:DownloadDBLogFilePortion" 88 | ], 89 | "Resource": "*" 90 | } 91 | ] 92 | } 93 | ``` 94 | 95 | Passing `--download` triggers Download Mode, in which `rdslogs` will download the 96 | specified logs to the directory specified by `--download_dir`. Logs are specified 97 | via the `--log_file` flag, which names an active log file as well as the past 24 98 | hours of rotated logs. (For example, specifying `--log_file=foo.log` will download 99 | `foo.log` as well as `foo.log.0`, `foo.log.2`, ... `foo.log.23`.) 100 | 101 | When `--output` is set to `honeycomb`, the `--writekey` and `--dataset` flags are 102 | required. Instead of being printed to STDOUT, database events from the log will 103 | be transmitted to Honeycomb. `--scrub_query` and `--sample_rate` also only apply to 104 | Honeycomb output. 105 | 106 | ```nil 107 | Application Options: 108 | --region= AWS region to use (default: us-east-1) 109 | -i, --identifier= RDS instance identifier 110 | --dbtype= RDS database type. Accepted values are mysql and postgresql. 111 | (default: mysql) 112 | --log_type= Log file type. Accepted values are query and audit. Audit is 113 | currently only supported for mysql. (default: query) 114 | -f, --log_file= RDS log file to retrieve 115 | -d, --download Download old logs instead of tailing the current log 116 | --download_dir= directory in to which log files are downloaded (default: ./) 117 | --num_lines= number of lines to request at a time from AWS. Larger number will 118 | be more efficient, smaller number will allow for longer lines 119 | (default: 10000) 120 | --backoff_timer= how many seconds to pause when rate limited by AWS. (default: 5) 121 | -o, --output= output for the logs: stdout or honeycomb (default: stdout) 122 | --writekey= Team write key, when output is honeycomb 123 | --dataset= Name of the dataset, when output is honeycomb 124 | --api_host= Hostname for the Honeycomb API server (default: 125 | https://api.honeycomb.io/) 126 | --scrub_query Replaces the query field with a one-way hash of the contents 127 | --sample_rate= Only send 1 / N log lines (default: 1) 128 | -a, --add_field= Extra fields to send in request, in the style of "field:value" 129 | -v, --version Output the current version and exit 130 | -c, --config= config file 131 | --write_default_config Write a default config file to STDOUT 132 | --debug turn on debugging output 133 | 134 | Help Options: 135 | -h, --help Show this help message 136 | ``` 137 | -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | # Releasing Process 2 | 3 | 1. Add release entry to [changelog](./CHANGELOG.md) 4 | 3. Open a PR with the above, and merge that into main 5 | 4. Create new tag on merged commit with the new version (e.g. `v0.2.1`) 6 | 5. Push the tag upstream (this will kick off the release pipeline in CI) 7 | 6. Copy change log entry for newest version into draft GitHub release created as part of CI publish steps 8 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting Security Issues 2 | 3 | If you discover a security vulnerability, please open an issue with label `type: security`. 4 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # How to Get Help 2 | 3 | This project uses GitHub issues to track bugs, feature requests, and questions about using the project. Please search for existing issues before filing a new one. 4 | -------------------------------------------------------------------------------- /build-docker.sh: -------------------------------------------------------------------------------- 1 | set -o nounset 2 | set -o pipefail 3 | set -o xtrace 4 | 5 | TAGS="latest" 6 | VERSION="dev" 7 | if [[ -n ${CIRCLE_TAG:-} ]]; then 8 | # trim 'v' prefix if present 9 | VERSION=${CIRCLE_TAG#"v"} 10 | # append version to image tags 11 | TAGS+=",$VERSION" 12 | fi 13 | 14 | unset GOOS 15 | unset GOARCH 16 | export KO_DOCKER_REPO=${KO_DOCKER_REPO:-ko.local} 17 | export GOFLAGS="-ldflags=-X=main.BuildID=$VERSION" 18 | export SOURCE_DATE_EPOCH=$(date +%s) 19 | # shellcheck disable=SC2086 20 | ko publish \ 21 | --tags "${TAGS}" \ 22 | --base-import-paths \ 23 | --platform "linux/amd64,linux/arm64" \ 24 | . 25 | -------------------------------------------------------------------------------- /build-pkg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Build deb or rpm packages for rdslogs. 4 | set -e 5 | 6 | function usage() { 7 | echo "Usage: build-pkg.sh -v -t -m arch" 8 | exit 2 9 | } 10 | 11 | while getopts "v:t:m:" opt; do 12 | case "$opt" in 13 | v) 14 | version=$OPTARG 15 | ;; 16 | t) 17 | pkg_type=$OPTARG 18 | ;; 19 | m) 20 | arch=$OPTARG 21 | ;; 22 | esac 23 | done 24 | 25 | if [ -z "$version" ] || [ -z "$pkg_type" ] || [ -z "$arch" ]; then 26 | usage 27 | fi 28 | 29 | PACKAGE_DIR=~/packages/${arch} 30 | mkdir -p ${PACKAGE_DIR} 31 | fpm -s dir -n rdslogs \ 32 | -m "Honeycomb " \ 33 | -p ${PACKAGE_DIR} \ 34 | -v $version \ 35 | -t $pkg_type \ 36 | -a $arch \ 37 | --pre-install=./preinstall \ 38 | ~/binaries/rdslogs-linux-${arch}=/usr/bin/rdslogs \ 39 | ./rdslogs.upstart=/etc/init/rdslogs.conf \ 40 | ./rdslogs.service=/lib/systemd/system/rdslogs.service \ 41 | ./rdslogs.conf=/etc/rdslogs/rdslogs.conf \ 42 | -------------------------------------------------------------------------------- /cli/cli.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path" 9 | "sort" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "github.com/aws/aws-sdk-go/aws" 15 | "github.com/aws/aws-sdk-go/service/rds" 16 | "github.com/honeycombio/honeytail/parsers" 17 | "github.com/honeycombio/honeytail/parsers/csv" 18 | "github.com/honeycombio/honeytail/parsers/mysql" 19 | "github.com/honeycombio/honeytail/parsers/postgresql" 20 | "github.com/honeycombio/rdslogs/publisher" 21 | "github.com/sirupsen/logrus" 22 | ) 23 | 24 | // Fortunately for us, the RDS team has diligently ignored requests to make 25 | // RDS Postgres's `log_line_prefix` customizable for years 26 | // (https://forums.aws.amazon.com/thread.jspa?threadID=143460). 27 | // So we can hard-code this prefix format for Postgres log lines. 28 | const rdsPostgresLinePrefix = "%t:%r:%u@%d:[%p]:" 29 | 30 | const DBTypePostgreSQL = "postgresql" 31 | const DBTypeMySQL = "mysql" 32 | 33 | const LogTypeQuery = "query" 34 | const LogTypeAudit = "audit" 35 | 36 | // Options contains all the CLI flags 37 | type Options struct { 38 | Region string `long:"region" description:"AWS region to use" default:"us-east-1"` 39 | InstanceIdentifier string `short:"i" long:"identifier" description:"RDS instance identifier"` 40 | DBType string `long:"dbtype" description:"RDS database type. Accepted values are mysql and postgresql." default:"mysql"` 41 | LogType string `long:"log_type" description:"Log file type. Accepted values are query and audit. Audit is currently only supported for mysql." default:"query"` 42 | LogFile string `short:"f" long:"log_file" description:"RDS log file to retrieve"` 43 | Download bool `short:"d" long:"download" description:"Download old logs instead of tailing the current log"` 44 | DownloadDir string `long:"download_dir" description:"directory in to which log files are downloaded" default:"./"` 45 | NumLines int64 `long:"num_lines" description:"number of lines to request at a time from AWS. Larger number will be more efficient, smaller number will allow for longer lines" default:"10000"` 46 | BackoffTimer int64 `long:"backoff_timer" description:"how many seconds to pause when rate limited by AWS." default:"5"` 47 | Output string `short:"o" long:"output" description:"output for the logs: stdout or honeycomb" default:"stdout"` 48 | WriteKey string `long:"writekey" description:"Team write key, when output is honeycomb"` 49 | Dataset string `long:"dataset" description:"Name of the dataset, when output is honeycomb"` 50 | APIHost string `long:"api_host" description:"Hostname for the Honeycomb API server" default:"https://api.honeycomb.io/"` 51 | ScrubQuery bool `long:"scrub_query" description:"Replaces the query field with a one-way hash of the contents"` 52 | SampleRate int `long:"sample_rate" description:"Only send 1 / N log lines" default:"1"` 53 | AddFields map[string]string `short:"a" long:"add_field" description:"Extra fields to send in request, in the style of \"field:value\""` 54 | NumParsers int `long:"num_parsers" default:"4" description:"Number of parsers to spin up. Currently only supported for the mysql parser."` 55 | 56 | Version bool `short:"v" long:"version" description:"Output the current version and exit"` 57 | ConfigFile string `short:"c" long:"config" description:"config file" no-ini:"true"` 58 | WriteDefaultConfig bool `long:"write_default_config" description:"Write a default config file to STDOUT" no-ini:"true"` 59 | Debug bool `long:"debug" description:"turn on debugging output"` 60 | } 61 | 62 | // Usage info for --help 63 | var Usage = `rdslogs --identifier my-rds-instance 64 | 65 | rdslogs streams a log file from Amazon RDS and prints it to STDOUT or sends it 66 | up to Honeycomb.io. 67 | 68 | AWS credentials are required and can be provided via IAM roles, AWS shared 69 | config (~/.aws/config), AWS shared credentials (~/.aws/credentials), or 70 | the environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. 71 | 72 | Passing --download triggers Download Mode, in which rdslogs will download the 73 | specified logs to the directory specified by --download_dir. Logs are specified 74 | via the --log_file flag, which names an active log file as well as the past 24 75 | hours of rotated logs. (For example, specifying --log_file=foo.log will download 76 | foo.log as well as foo.log.0, foo.log.2, ... foo.log.23.) 77 | 78 | When --output is set to "honeycomb", the --writekey and --dataset flags are 79 | required. Instead of being printed to STDOUT, database events from the log will 80 | be transmitted to Honeycomb. --scrub_query and --sample_rate also only apply to 81 | honeycomb output. 82 | ` 83 | 84 | // CLI contains handles to the provided Options + aws.RDS struct 85 | type CLI struct { 86 | // Options is for command line options 87 | Options *Options 88 | // RDS is an initialized session connected to RDS 89 | RDS *rds.RDS 90 | // Abort carries a true message when we catch CTRL-C so we can clean up 91 | Abort chan bool 92 | 93 | // target to which to send output 94 | output publisher.Publisher 95 | // allow changing the time for tests 96 | fakeNower Nower 97 | } 98 | 99 | // Stream polls the RDS log endpoint forever to effectively tail the logs and 100 | // spits them out to either stdout or to Honeycomb. 101 | func (c *CLI) Stream() error { 102 | // make sure we have a valid log file from which to stream 103 | latestFile, err := c.GetLatestLogFile() 104 | if err != nil { 105 | return err 106 | } 107 | // create the chosen output publisher target 108 | if c.Options.Output == "stdout" { 109 | c.output = &publisher.STDOUTPublisher{} 110 | } else { 111 | var parser parsers.Parser 112 | if c.Options.DBType == DBTypeMySQL && c.Options.LogType == LogTypeQuery { 113 | parser = &mysql.Parser{} 114 | parser.Init(&mysql.Options{NumParsers: c.Options.NumParsers}) 115 | } else if c.Options.DBType == DBTypeMySQL && c.Options.LogType == LogTypeAudit { 116 | parser = &csv.Parser{} 117 | parser.Init(&csv.Options{ 118 | Fields: "time,hostname,user,source_addr,connection_id,query_id,event_type,database,query,error_code", 119 | NumParsers: c.Options.NumParsers, 120 | TimeFieldName: "time", 121 | TimeFieldFormat: "20060102 15:04:05", 122 | }) 123 | } else if c.Options.DBType == DBTypePostgreSQL { 124 | parser = &postgresql.Parser{} 125 | parser.Init(&postgresql.Options{LogLinePrefix: rdsPostgresLinePrefix}) 126 | } else { 127 | return fmt.Errorf( 128 | "Unsupported (dbtype, log_type) pair (`%s`,`%s`)", 129 | c.Options.DBType, c.Options.LogType) 130 | } 131 | 132 | pub := &publisher.HoneycombPublisher{ 133 | Writekey: c.Options.WriteKey, 134 | Dataset: c.Options.Dataset, 135 | APIHost: c.Options.APIHost, 136 | ScrubQuery: c.Options.ScrubQuery, 137 | SampleRate: c.Options.SampleRate, 138 | AddFields: c.Options.AddFields, 139 | Parser: parser, 140 | } 141 | defer pub.Close() 142 | c.output = pub 143 | } 144 | 145 | // forever, download the most recent entries 146 | sPos := StreamPos{ 147 | logFile: LogFile{LogFileName: latestFile.LogFileName}, 148 | } 149 | // for mysql audit logs, we always want the first logfile, which may not 150 | // show up in GetLatestLogFiles if rdslogs started mid-rotation 151 | if c.Options.DBType == DBTypeMySQL && c.Options.LogType == LogTypeAudit { 152 | sPos.logFile.LogFileName = c.Options.LogFile 153 | } 154 | for { 155 | // check for signal triggered exit 156 | select { 157 | case <-c.Abort: 158 | return fmt.Errorf("signal triggered exit") 159 | default: 160 | } 161 | 162 | // get recent log entries 163 | resp, err := c.getRecentEntries(sPos) 164 | if err != nil { 165 | if strings.HasPrefix(err.Error(), "Throttling: Rate exceeded") { 166 | logrus.Infof("AWS Rate limit hit; sleeping for %d seconds.\n", c.Options.BackoffTimer) 167 | c.waitFor(time.Duration(c.Options.BackoffTimer) * time.Second) 168 | continue 169 | } 170 | if strings.HasPrefix(err.Error(), "InvalidParameterValue: This file contains binary data") { 171 | logrus.Infof("binary data at marker %s, skipping 1000 in marker position\n", sPos.marker) 172 | // skip over inaccessible data 173 | newMarker, err := sPos.Add(1000) 174 | if err != nil { 175 | return err 176 | } 177 | sPos.marker = newMarker 178 | continue 179 | } 180 | if strings.HasPrefix(err.Error(), "DBLogFileNotFoundFault") { 181 | logrus.WithError(err). 182 | Warn("log does not appear to exist (rotation ongoing?) - waiting and retrying") 183 | c.waitFor(time.Second * 5) 184 | continue 185 | } 186 | return err 187 | } 188 | if resp.LogFileData != nil { 189 | c.output.Write(*resp.LogFileData) 190 | } 191 | if c.Options.DBType == DBTypeMySQL && c.Options.LogType == LogTypeAudit { 192 | // The MariaDB audit plugin rotates based on size, not time. If no data 193 | // is being returned, it may have been rotated, or maybe the db is just 194 | // very quiet and nothing is being logged. We'll have to inspect 195 | // the log sizes to be sure 196 | 197 | // If we reset our marker, asked for logs, and got an empty marker back, 198 | // we don't have anything to do but wait 199 | if sPos.marker == "0" && (resp.Marker != nil && *resp.Marker == "") { 200 | c.waitFor(time.Second * 5) 201 | continue 202 | } 203 | 204 | // Two scenarios can occur during log rotation, depending on timing 205 | // - the file doesn't exist because rotation is ongoing, in which case 206 | // AdditionalDataPending will be false, marker will be "", and LogFileData will be nil 207 | // - the file exists but it's been rotated, meaning our marker is wrong and doesn't point 208 | // at a valid position - in this case, RDS will return the marker back to us with "" 209 | // for logfile data 210 | // In either scenario, we need to check for a new file. When we're sure there's a new file, 211 | // reset the marker 212 | if (resp.Marker != nil && resp.LogFileData != nil && sPos.marker == *resp.Marker) || 213 | !*resp.AdditionalDataPending && resp.LogFileData == nil { 214 | newestFile, err := c.GetLatestLogFile() 215 | if err != nil { 216 | return err 217 | } 218 | 219 | // If the latest log file doesn't match the first log file (i.e 220 | // server_audit.log.1 exists but not server_audit.log) we're in the 221 | // middle of a rotation, so let's wait 222 | if newestFile.LogFileName != sPos.logFile.LogFileName { 223 | logrus.WithFields(logrus.Fields{ 224 | "expectedFile": sPos.logFile.LogFileName, 225 | "newestFile": newestFile.LogFileName, 226 | }).Info("newest file is a rotated file, we appear to be mid-rotation") 227 | c.waitFor(time.Second * 5) 228 | continue 229 | } 230 | 231 | // ok there's a server_audit.log file out there 232 | // check the current position of the last read (this appears to be in bytes) 233 | splitMarker := strings.Split(sPos.marker, ":") 234 | if len(splitMarker) != 2 { 235 | // something's wrong. marker should have been #:# 236 | logrus.WithField("marker", sPos.marker). 237 | Warn("marker didn't split into two pieces across a colon") 238 | continue 239 | } 240 | offset, _ := strconv.Atoi(splitMarker[1]) 241 | 242 | // if our last position is greater in size than the current file 243 | // a rotation has probably occurred and we can reset the marker 244 | if int64(offset) > newestFile.Size { 245 | logrus.WithFields(logrus.Fields{ 246 | "currentOffset": offset, 247 | "newFileSize": newestFile.Size, 248 | }).Info("last marker offset exceeds newest file size, resetting marker to 0") 249 | sPos.marker = "0" 250 | continue 251 | } 252 | } 253 | } 254 | 255 | if !*resp.AdditionalDataPending || (resp.Marker != nil && *resp.Marker == "0") { 256 | if c.Options.DBType == DBTypePostgreSQL { 257 | // If that's all we've got for now, see if there's a newer file to 258 | // start tailing. This logic is only relevant for postgres: the 259 | // newest postgres log file will be named 260 | // error/postgresql.log.YYYY-MM-DD, 261 | // but the newest mysql log 262 | // will always be named 263 | // slowquery/mysql-slowquery.log. 264 | newestFile, err := c.GetLatestLogFile() 265 | if err != nil { 266 | return err 267 | } 268 | if newestFile.LogFileName != sPos.logFile.LogFileName { 269 | logrus.WithFields(logrus.Fields{ 270 | "oldFile": sPos.logFile.LogFileName, 271 | "newFile": newestFile.LogFileName}).Info("Found newer file") 272 | sPos = StreamPos{logFile: LogFile{LogFileName: newestFile.LogFileName}} 273 | continue 274 | } 275 | } 276 | // Wait for a few seconds and try again. 277 | c.waitFor(5 * time.Second) 278 | } 279 | newMarker := c.getNextMarker(sPos, resp) 280 | logrus.WithFields(logrus.Fields{ 281 | "prevMarker": sPos.marker, 282 | "newMarker": newMarker, 283 | "file": sPos.logFile.LogFileName}).Info("Got new marker") 284 | sPos.marker = newMarker 285 | } 286 | } 287 | 288 | // getNextMarker takes in to account the current and next reported markers and 289 | // decides whether to believe the resp.Marker or calculate its own next marker. 290 | func (c *CLI) getNextMarker(sPos StreamPos, resp *rds.DownloadDBLogFilePortionOutput) string { 291 | // if resp is nil, we're up a creek and should return sPos' marker, but at 292 | // least we shouldn't try and dereference it and panic. 293 | if resp == nil { 294 | logrus.Warn("resp was nil, returning previous marker") 295 | return sPos.marker 296 | } 297 | if resp.Marker == nil { 298 | logrus.Warn("resp marker is nil, returning previous marker") 299 | return sPos.marker 300 | } 301 | 302 | // when we get to the end of a log segment, the marker in resp is "0". 303 | // if it's not "0", we should trust it's correct and use it. 304 | if *resp.Marker != "0" { 305 | return *resp.Marker 306 | } 307 | // ok, we've hit the end of a segment, but did we get any data? If we got 308 | // data, then it's not really the end of the segment and we should calculate a 309 | // new marker and use that. 310 | if resp.LogFileData != nil && len(*resp.LogFileData) != 0 { 311 | newMarkerStr, err := sPos.Add(len(*resp.LogFileData)) 312 | if err != nil { 313 | logrus.WithError(err). 314 | Warn("failed to get next marker. Reverting to no marker.") 315 | return "0" 316 | } 317 | return newMarkerStr 318 | } 319 | // we hit the end of a segment but we didn't get any data. we should try again 320 | // during the 00-05 minutes past the hour time, and roll over once we get to 6 321 | // minutes past the hour 322 | var now time.Time 323 | if c.fakeNower != nil { 324 | now = c.fakeNower.Now().UTC() 325 | } else { 326 | now = time.Now().UTC() 327 | } 328 | curMin, _ := strconv.Atoi(now.Format("04")) 329 | if curMin > 5 { 330 | logrus.WithField("newMarker", *resp.Marker). 331 | Infof("no log data received but it's %d minutes (> 5) past "+ 332 | "the hour, returning resp marker", curMin) 333 | return *resp.Marker 334 | } 335 | logrus.WithField("prevMarker", sPos.marker). 336 | Infof("no log data received but it's %d minutes (< 5) past "+ 337 | "the hour, returning previous marker", curMin) 338 | // let's try again from where we did the last time. 339 | return sPos.marker 340 | } 341 | 342 | // StreamPos represents a log file and marker combination 343 | type StreamPos struct { 344 | logFile LogFile 345 | marker string 346 | } 347 | 348 | // Add returns a new marker string that is the current marker + dataLen offset 349 | func (s *StreamPos) Add(dataLen int) (string, error) { 350 | splitMarker := strings.Split(s.marker, ":") 351 | if len(splitMarker) != 2 { 352 | // something's wrong. marker should have been #:# 353 | // TODO provide a better value 354 | return "", fmt.Errorf("marker didn't split into two pieces across a colon") 355 | } 356 | mHour, _ := strconv.Atoi(splitMarker[0]) 357 | mOffset, _ := strconv.Atoi(splitMarker[1]) 358 | mOffset += dataLen 359 | return fmt.Sprintf("%d:%d", mHour, mOffset), nil 360 | } 361 | 362 | // getRecentEntries fetches the most recent lines from the log file, starting 363 | // from marker or the end of the file if marker is nil 364 | // returns the downloaded data 365 | func (c *CLI) getRecentEntries(sPos StreamPos) (*rds.DownloadDBLogFilePortionOutput, error) { 366 | params := &rds.DownloadDBLogFilePortionInput{ 367 | DBInstanceIdentifier: aws.String(c.Options.InstanceIdentifier), 368 | LogFileName: aws.String(sPos.logFile.LogFileName), 369 | NumberOfLines: aws.Int64(c.Options.NumLines), 370 | } 371 | // if we have a marker, download from there. otherwise get the most recent line 372 | if sPos.marker != "" { 373 | params.Marker = &sPos.marker 374 | } else { 375 | params.NumberOfLines = aws.Int64(1) 376 | } 377 | return c.RDS.DownloadDBLogFilePortion(params) 378 | } 379 | 380 | // Download downloads RDS logs and reads them all in 381 | func (c *CLI) Download() error { 382 | // get a list of RDS instances, return the one to use. 383 | // if one's user supplied, verify it exists. 384 | // if not user supplied and there's only one, use that 385 | // else ask 386 | logFiles, err := c.GetLogFiles() 387 | if err != nil { 388 | return err 389 | } 390 | 391 | logFiles, err = c.DownloadLogFiles(logFiles) 392 | if err != nil { 393 | fmt.Println("Error downloading log files:") 394 | return err 395 | } 396 | 397 | return nil 398 | } 399 | 400 | // LogFile wraps the returned structure from AWS 401 | // "Size": 2196, 402 | // "LogFileName": "slowquery/mysql-slowquery.log.7", 403 | // "LastWritten": 1474959300000 404 | type LogFile struct { 405 | Size int64 // in bytes? 406 | LogFileName string 407 | LastWritten int64 // arrives as msec since epoch 408 | LastWrittenTime time.Time 409 | Path string 410 | } 411 | 412 | func (l *LogFile) String() string { 413 | return fmt.Sprintf("%-35s (date: %s, size: %d)", l.LogFileName, l.LastWrittenTime, l.Size) 414 | } 415 | 416 | // DownloadLogFiles returns a new copy of the logFile list because it mutates the contents. 417 | func (c *CLI) DownloadLogFiles(logFiles []LogFile) ([]LogFile, error) { 418 | logrus.Infof("Downloading log files to %s\n", c.Options.DownloadDir) 419 | downloadedLogFiles := make([]LogFile, 0, len(logFiles)) 420 | for i := range logFiles { 421 | // returned logFile has a modified Path 422 | logFile, err := c.downloadFile(logFiles[i]) 423 | if err != nil { 424 | return nil, err 425 | } 426 | downloadedLogFiles = append(downloadedLogFiles, logFile) 427 | } 428 | return downloadedLogFiles, nil 429 | } 430 | 431 | // downloadFile fetches an individual log file. Note that AWS's RDS 432 | // DownloadDBLogFilePortion only returns 1MB at a time, and we have to manually 433 | // paginate it ourselves. 434 | func (c *CLI) downloadFile(logFile LogFile) (LogFile, error) { 435 | // open the out file for writing 436 | logFile.Path = path.Join(c.Options.DownloadDir, path.Base(logFile.LogFileName)) 437 | fmt.Printf("Downloading %s to %s ... ", logFile.LogFileName, logFile.Path) 438 | defer fmt.Printf("done\n") 439 | if err := os.MkdirAll(path.Dir(logFile.Path), os.ModePerm); err != nil { 440 | return logFile, err 441 | } 442 | outfile, err := os.Create(logFile.Path) 443 | if err != nil { 444 | return logFile, err 445 | } 446 | defer outfile.Close() 447 | 448 | resp := &rds.DownloadDBLogFilePortionOutput{ 449 | AdditionalDataPending: aws.Bool(true), 450 | Marker: aws.String("0"), 451 | } 452 | params := &rds.DownloadDBLogFilePortionInput{ 453 | DBInstanceIdentifier: aws.String(c.Options.InstanceIdentifier), 454 | LogFileName: aws.String(logFile.LogFileName), 455 | } 456 | for aws.BoolValue(resp.AdditionalDataPending) { 457 | // check for signal triggered exit 458 | select { 459 | case <-c.Abort: 460 | return logFile, fmt.Errorf("signal triggered exit") 461 | default: 462 | } 463 | params.Marker = resp.Marker // support pagination 464 | resp, err = c.RDS.DownloadDBLogFilePortion(params) 465 | if err != nil { 466 | return logFile, err 467 | } 468 | if _, err := io.WriteString(outfile, aws.StringValue(resp.LogFileData)); err != nil { 469 | return logFile, err 470 | } 471 | } 472 | return logFile, nil 473 | } 474 | 475 | // GetLogFiles returns a list of all log files based on the Options.LogFile pattern 476 | func (c *CLI) GetLogFiles() ([]LogFile, error) { 477 | // get a list of all log files. 478 | // prune the list so that the log file option is the prefix for all remaining files 479 | // return the list of as-yet unread files 480 | logFiles, err := c.getListRDSLogFiles() 481 | if err != nil { 482 | return nil, err 483 | } 484 | 485 | var matchingLogFiles []LogFile 486 | for _, lf := range logFiles { 487 | if strings.HasPrefix(lf.LogFileName, c.Options.LogFile) { 488 | matchingLogFiles = append(matchingLogFiles, lf) 489 | } 490 | } 491 | // matchingLogFiles now contains a list of eligible log files, 492 | // eg slow.log, slow.log.1, slow.log.2, etc. 493 | 494 | if len(matchingLogFiles) == 0 { 495 | errParts := []string{"No log file with the given prefix found. Available log files:"} 496 | 497 | for _, lf := range logFiles { 498 | errParts = append(errParts, fmt.Sprint("\t", lf.String())) 499 | } 500 | return nil, fmt.Errorf(strings.Join(errParts, "\n")) 501 | 502 | } 503 | 504 | return matchingLogFiles, nil 505 | } 506 | 507 | func (c *CLI) GetLatestLogFile() (LogFile, error) { 508 | logFiles, err := c.GetLogFiles() 509 | if err != nil { 510 | return LogFile{}, err 511 | } 512 | 513 | if len(logFiles) == 0 { 514 | return LogFile{}, errors.New("No log files found") 515 | } 516 | 517 | sort.SliceStable(logFiles, func(i, j int) bool { return logFiles[i].LastWritten < logFiles[j].LastWritten }) 518 | return logFiles[len(logFiles)-1], nil 519 | } 520 | 521 | // Gets a list of all available RDS log files for an instance. 522 | func (c *CLI) getListRDSLogFiles() ([]LogFile, error) { 523 | var output *rds.DescribeDBLogFilesOutput 524 | var err error 525 | var logFiles []LogFile 526 | 527 | for { 528 | if output == nil { 529 | output, err = c.RDS.DescribeDBLogFiles(&rds.DescribeDBLogFilesInput{ 530 | DBInstanceIdentifier: &c.Options.InstanceIdentifier, 531 | }) 532 | logFiles = make([]LogFile, 0, len(output.DescribeDBLogFiles)) 533 | } else { 534 | output, err = c.RDS.DescribeDBLogFiles(&rds.DescribeDBLogFilesInput{ 535 | DBInstanceIdentifier: &c.Options.InstanceIdentifier, 536 | Marker: output.Marker, 537 | }) 538 | } 539 | if err != nil { 540 | return nil, err 541 | } 542 | 543 | // assign go timestamp from msec epoch time, rebuild as a list 544 | for _, lf := range output.DescribeDBLogFiles { 545 | logFiles = append(logFiles, LogFile{ 546 | LastWritten: *lf.LastWritten, 547 | LastWrittenTime: time.Unix(*lf.LastWritten/1000, 0), 548 | LogFileName: *lf.LogFileName, 549 | Size: *lf.Size, 550 | }) 551 | } 552 | if output.Marker == nil { 553 | break 554 | } 555 | } 556 | 557 | return logFiles, nil 558 | } 559 | 560 | // ValidateRDSInstance validates that you have a valid RDS instance to talk to. 561 | // If an instance isn't specified and your credentials contain more than one RDS 562 | // instance, asks you to specify which instance you'd like to use. 563 | func (c *CLI) ValidateRDSInstance() error { 564 | rdsInstances, err := c.getListRDSInstances() 565 | if err != nil { 566 | return err 567 | } 568 | 569 | if len(rdsInstances) == 0 { 570 | // we didn't get any instances back from RDS. not sure what to do next... 571 | return fmt.Errorf("The list of instances we got back from RDS is empty. Check the region and authentication?") 572 | } 573 | 574 | if c.Options.InstanceIdentifier != "" { 575 | for _, instance := range rdsInstances { 576 | if c.Options.InstanceIdentifier == instance { 577 | // the user asked for an instance and we found it in the list. \o/ 578 | return nil 579 | } 580 | } 581 | // the user asked for an instance but we didn't find it. 582 | return fmt.Errorf("Instance identifier %s not found in list of instances:\n\t%s", 583 | c.Options.InstanceIdentifier, 584 | strings.Join(rdsInstances, "\n\t")) 585 | } 586 | 587 | // user didn't ask for an instance. 588 | // complain with a list of avaialable instances and exit. 589 | errStr := fmt.Sprintf(`No instance identifier specified. Available RDS instances: 590 | %s 591 | Please specify an instance identifier using the --identifier flag 592 | `, strings.Join(rdsInstances, "\n\t")) 593 | return fmt.Errorf(errStr) 594 | } 595 | 596 | // gets a list of all avaialable RDS instances 597 | func (c *CLI) getListRDSInstances() ([]string, error) { 598 | out, err := c.RDS.DescribeDBInstances(nil) 599 | if err != nil { 600 | return nil, err 601 | } 602 | instances := make([]string, len(out.DBInstances)) 603 | for i, instance := range out.DBInstances { 604 | instances[i] = *instance.DBInstanceIdentifier 605 | } 606 | return instances, nil 607 | } 608 | 609 | func (c *CLI) waitFor(d time.Duration) { 610 | select { 611 | case <-c.Abort: 612 | return 613 | case <-time.After(d): 614 | return 615 | } 616 | } 617 | 618 | // Nower interface abstracts time for testing 619 | type Nower interface { 620 | Now() time.Time 621 | } 622 | -------------------------------------------------------------------------------- /cli/cli_test.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/aws/aws-sdk-go/service/rds" 8 | ) 9 | 10 | type FakeNower struct { 11 | t time.Time 12 | } 13 | 14 | func (f *FakeNower) Now() time.Time { 15 | return f.t 16 | } 17 | 18 | func TestGetNextMarker(t *testing.T) { 19 | // next position is legit 20 | c := CLI{} 21 | c.fakeNower = &FakeNower{} 22 | c.fakeNower.(*FakeNower).t, _ = time.Parse(time.RFC3339, "2010-06-21T15:12:05Z") 23 | startingPos := "12:1234" 24 | streamPos := StreamPos{ 25 | logFile: LogFile{}, 26 | marker: startingPos, 27 | } 28 | resp := rds.DownloadDBLogFilePortionOutput{} 29 | respPos := "12:2345" 30 | resp.Marker = &respPos 31 | nextMarker := c.getNextMarker(streamPos, &resp) 32 | if resp.Marker == nil { 33 | t.Error("unexpected resp marker nil") 34 | } 35 | if nextMarker != respPos { 36 | t.Errorf("response marker %s expected, got %s", respPos, nextMarker) 37 | } 38 | // next position unchanged but legit 39 | respPos = "12:1234" 40 | resp.Marker = &respPos 41 | nextMarker = c.getNextMarker(streamPos, &resp) 42 | if resp.Marker == nil { 43 | t.Error("unexpected resp marker nil") 44 | } 45 | if nextMarker != respPos { 46 | t.Errorf("response marker %s expected, got %s", respPos, nextMarker) 47 | } 48 | // next position 0 and no data, not in :00-:05 time range, expect resp 49 | respPos = "0" 50 | resp.Marker = &respPos 51 | nextMarker = c.getNextMarker(streamPos, &resp) 52 | if resp.Marker == nil { 53 | t.Error("unexpected resp marker nil") 54 | } 55 | if nextMarker != respPos { 56 | t.Errorf("response marker %s expected, got %s", respPos, nextMarker) 57 | } 58 | // next position 0 and no data, in :00-:05 time range, expect startingPos 59 | c.fakeNower.(*FakeNower).t, _ = time.Parse(time.RFC3339, "2010-06-21T15:03:05Z") 60 | respPos = "0" 61 | resp.Marker = &respPos 62 | nextMarker = c.getNextMarker(streamPos, &resp) 63 | if resp.Marker == nil { 64 | t.Error("unexpected resp marker nil") 65 | } 66 | if nextMarker != startingPos { 67 | t.Errorf("response marker %s expected, got %s", startingPos, nextMarker) 68 | } 69 | // next position 0 and have data, not in :00-:05 time range, expect start+len 70 | respContent := "this is a slow query log entry, really." 71 | expectedMarker := "12:1273" // 1234 + 39 (aka len(respContent)) 72 | resp.LogFileData = &respContent 73 | respPos = "0" 74 | resp.Marker = &respPos 75 | nextMarker = c.getNextMarker(streamPos, &resp) 76 | if resp.Marker == nil { 77 | t.Error("unexpected resp marker nil") 78 | } 79 | if nextMarker != expectedMarker { 80 | t.Errorf("response marker %s expected, got %s", expectedMarker, nextMarker) 81 | } 82 | // next position 0 and have data, in :00-:05 time range, expect start+len 83 | c.fakeNower.(*FakeNower).t, _ = time.Parse(time.RFC3339, "2010-06-21T15:03:05Z") 84 | respPos = "0" 85 | resp.Marker = &respPos 86 | nextMarker = c.getNextMarker(streamPos, &resp) 87 | if resp.Marker == nil { 88 | t.Error("unexpected resp marker nil") 89 | } 90 | if nextMarker != expectedMarker { 91 | t.Errorf("response marker %s expected, got %s", expectedMarker, nextMarker) 92 | } 93 | } 94 | 95 | func TestStreamAdd(t *testing.T) { 96 | startingPos := "12:1234" 97 | streamPos := StreamPos{ 98 | marker: startingPos, 99 | } 100 | lenToAdd := 60 101 | expectedPos := "12:1294" 102 | sumPos, err := streamPos.Add(lenToAdd) 103 | if err != nil { 104 | t.Errorf("unexpected error returned %s", err) 105 | } 106 | if sumPos != expectedPos { 107 | t.Errorf("position %s added to length %d got %s, expected %s", startingPos, 108 | lenToAdd, sumPos, expectedPos) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/honeycombio/rdslogs 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go v1.44.9 7 | github.com/honeycombio/honeytail v1.6.2 8 | github.com/honeycombio/libhoney-go v1.15.8 9 | github.com/jessevdk/go-flags v1.5.0 10 | github.com/sirupsen/logrus v1.8.1 11 | ) 12 | 13 | require ( 14 | github.com/DataDog/zstd v1.5.2 // indirect 15 | github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect 16 | github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 // indirect 17 | github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 // indirect 18 | github.com/go-sql-driver/mysql v1.6.0 // indirect 19 | github.com/honeycombio/mysqltools v0.0.1 // indirect 20 | github.com/honeycombio/sqlparser v0.0.0-20210924214121-0662550abc08 // indirect 21 | github.com/jmespath/go-jmespath v0.4.0 // indirect 22 | github.com/klauspost/compress v1.15.3 // indirect 23 | github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect 24 | github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 25 | golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect 26 | gopkg.in/alexcesaro/statsd.v2 v2.0.0 // indirect 27 | ) 28 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= 2 | github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= 3 | github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= 4 | github.com/aws/aws-sdk-go v1.44.9 h1:s3lsEFbc8i7ghQmcEpcdyvoO/WMwyCVa9pUq3Lq//Ok= 5 | github.com/aws/aws-sdk-go v1.44.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= 6 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= 10 | github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= 11 | github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= 12 | github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= 13 | github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= 14 | github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01/go.mod h1:ypD5nozFk9vcGw1ATYefw6jHe/jZP++Z15/+VTMcWhc= 15 | github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= 16 | github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW87NGRw1FU5p5lEkpnt/QxoH5uPAOUlOVkAUuMg= 17 | github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= 18 | github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= 19 | github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= 20 | github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= 21 | github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= 22 | github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= 23 | github.com/honeycombio/honeytail v1.6.2 h1:FO/6O3XmHkYiok+41yjyL6Xxhr0hJ7tcDrfkTnrfxsI= 24 | github.com/honeycombio/honeytail v1.6.2/go.mod h1:yEu76+Y17VPXPyXnqHjmNlCk5QuB53pWi8FcVadXApc= 25 | github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= 26 | github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= 27 | github.com/honeycombio/mysqltools v0.0.1 h1:VyC2Z3npDpgXuwlBXwNcE/IH72STTZ1Tjt3pZlLPsDM= 28 | github.com/honeycombio/mysqltools v0.0.1/go.mod h1:r/WQhfDgxowZatJvhdy2VL801FOv5u8K6NzYqCGBJkQ= 29 | github.com/honeycombio/sqlparser v0.0.0-20210924214121-0662550abc08 h1:NyhUSa8Y20gPIm2U4DR3qC8YCNgfgVCVRhDuymneAGg= 30 | github.com/honeycombio/sqlparser v0.0.0-20210924214121-0662550abc08/go.mod h1:sq+YMepz8Z3OK2R1PkNlaJy4JlNpptYnl/pas5xA6sM= 31 | github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= 32 | github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= 33 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 34 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 35 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 36 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 37 | github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= 38 | github.com/klauspost/compress v1.15.3 h1:wmfu2iqj9q22SyMINp1uQ8C2/V4M1phJdmH9fG4nba0= 39 | github.com/klauspost/compress v1.15.3/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= 40 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 41 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 42 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 43 | github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= 44 | github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= 45 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 46 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 47 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 48 | github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= 49 | github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= 50 | github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= 51 | github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= 52 | github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= 53 | github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ= 54 | golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= 55 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 56 | golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 57 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 58 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 59 | golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= 60 | golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 61 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 62 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 63 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 64 | gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= 65 | gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= 66 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 67 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 68 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 69 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 70 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 71 | -------------------------------------------------------------------------------- /kubernetes/rdslogs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 3 | kind: Deployment 4 | metadata: 5 | name: honeycomb-rdslogs 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: rdslogs 10 | # RDSLogs should run as a singleton. 11 | replicas: 1 12 | template: 13 | metadata: 14 | labels: 15 | app: rdslogs 16 | spec: 17 | containers: 18 | - name: honeycomb-rdslogs 19 | image: honeycombio/rdslogs:latest 20 | command: ["/rdslogs"] 21 | args: 22 | - --region=us-east-1 23 | # set this to your RDS instance name 24 | - --identifier=CHANGME 25 | - --writekey=$(WRITE_KEY) 26 | - --dataset=rds 27 | - --output=honeycomb 28 | resources: 29 | requests: 30 | # Depending on your sample rate and your RDS workload, you may 31 | # need to adjust this up or down 32 | cpu: 250m 33 | memory: 100Mi 34 | env: 35 | - name: WRITE_KEY 36 | valueFrom: 37 | secretKeyRef: 38 | name: honeycomb-write-key 39 | key: write_key 40 | - name: AWS_DEFAULT_REGION 41 | value: us-east-1 42 | - name: AWS_ACCESS_KEY_ID 43 | valueFrom: 44 | secretKeyRef: 45 | name: honeycomb-rdslogs-svc-user 46 | key: aws_access_key_id 47 | - name: AWS_SECRET_ACCESS_KEY 48 | valueFrom: 49 | secretKeyRef: 50 | name: honeycomb-rdslogs-svc-user 51 | key: aws_secret_access_key 52 | # The secret specs below are optional if you already have AWS and Honeycomb 53 | # secrets configured in k8s 54 | --- 55 | apiVersion: v1 56 | kind: Secret 57 | metadata: 58 | name: honeycomb-rdslogs-svc-user 59 | type: Opaque 60 | data: 61 | # kubernetes expects base64-encoded secrets 62 | aws_access_key_id: CHANGEME 63 | aws_secret_access_key: CHANGEME 64 | --- 65 | apiVersion: v1 66 | kind: Secret 67 | metadata: 68 | name: honeycomb-write-key 69 | type: Opaque 70 | data: 71 | write_key: CHANGEME 72 | ... 73 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "os/exec" 8 | "os/signal" 9 | "strings" 10 | "syscall" 11 | "time" 12 | 13 | "github.com/aws/aws-sdk-go/aws" 14 | "github.com/aws/aws-sdk-go/aws/credentials" 15 | "github.com/aws/aws-sdk-go/aws/session" 16 | "github.com/aws/aws-sdk-go/service/rds" 17 | flag "github.com/jessevdk/go-flags" 18 | "github.com/sirupsen/logrus" 19 | 20 | "github.com/honeycombio/libhoney-go" 21 | "github.com/honeycombio/rdslogs/cli" 22 | ) 23 | 24 | // BuildID is set by Travis CI 25 | var BuildID string 26 | 27 | func main() { 28 | options, err := parseFlags() 29 | if err != nil { 30 | log.Fatal(err) 31 | } 32 | 33 | sigs := make(chan os.Signal, 1) 34 | abort := make(chan bool, 0) 35 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 36 | go func() { 37 | sig := <-sigs 38 | fmt.Fprintf(os.Stderr, "Aborting! Caught Signal \"%s\"\n", sig) 39 | fmt.Fprintf(os.Stderr, "Cleaning up...\n") 40 | select { 41 | case abort <- true: 42 | close(abort) 43 | case <-time.After(10 * time.Second): 44 | fmt.Fprintf(os.Stderr, "Taking too long... Aborting.\n") 45 | os.Exit(1) 46 | } 47 | }() 48 | 49 | session, err := session.NewSession() 50 | if err != nil { 51 | log.Fatal(err) 52 | } 53 | c := &cli.CLI{ 54 | Options: options, 55 | RDS: rds.New(session, &aws.Config{ 56 | Region: aws.String(options.Region), 57 | }), 58 | Abort: abort, 59 | } 60 | 61 | if options.Debug { 62 | logrus.SetLevel(logrus.DebugLevel) 63 | } 64 | 65 | // if sending output to Honeycomb, make sure we have a write key and dataset 66 | if options.Output == "honeycomb" { 67 | if options.WriteKey == "" || options.Dataset == "" { 68 | log.Fatal("writekey and dataset flags required when output is 'honeycomb'.\nuse --help for usage info.") 69 | } 70 | if options.SampleRate < 1 { 71 | log.Fatal("Sample rate must be a positive integer.\nuse --help for usage info.") 72 | } 73 | libhoney.UserAgentAddition = fmt.Sprintf("rdslogs/%s", BuildID) 74 | fmt.Fprintln(os.Stderr, "Sending output to Honeycomb") 75 | } else if options.Output == "stdout" { 76 | fmt.Fprintln(os.Stderr, "Sending output to STDOUT") 77 | } else { 78 | // output flag is neither stdout nor honeycomb. error and bail 79 | log.Fatal("output target not recognized. use --help for usage info") 80 | } 81 | 82 | // make sure we can talk to an RDS instance. 83 | err = c.ValidateRDSInstance() 84 | if err == credentials.ErrNoValidProvidersFoundInChain { 85 | log.Fatal(awsCredsFailureMsg()) 86 | } 87 | if err != nil { 88 | log.Fatal(err) 89 | } 90 | 91 | if options.Download { 92 | fmt.Fprintln(os.Stderr, "Running in download mode - downloading old logs") 93 | err = c.Download() 94 | } else { 95 | fmt.Fprintln(os.Stderr, "Running in tail mode - streaming logs from RDS") 96 | err = c.Stream() 97 | } 98 | if err != nil { 99 | log.Fatal(err) 100 | } 101 | fmt.Fprintln(os.Stderr, "OK") 102 | } 103 | 104 | // getVersion returns the internal version ID 105 | func getVersion() string { 106 | if BuildID == "" { 107 | return "dev" 108 | } 109 | return fmt.Sprintf("%s", BuildID) 110 | } 111 | 112 | // parse all the flags, exit if anything's amiss 113 | func parseFlags() (*cli.Options, error) { 114 | var options cli.Options 115 | flagParser := flag.NewParser(&options, flag.Default) 116 | flagParser.Usage = cli.Usage 117 | 118 | // parse flags and check for extra command line args 119 | if extraArgs, err := flagParser.Parse(); err != nil || len(extraArgs) != 0 { 120 | if err != nil { 121 | if err.(*flag.Error).Type == flag.ErrHelp { 122 | // user specified --help 123 | os.Exit(0) 124 | } 125 | fmt.Fprintln(os.Stderr, "Failed to parse the command line. Run with --help for more info") 126 | return nil, err 127 | } 128 | return nil, fmt.Errorf("Unexpected extra arguments: %s\n", strings.Join(extraArgs, " ")) 129 | } 130 | 131 | // if all we want is the config file, just write it in and exit 132 | if options.WriteDefaultConfig { 133 | ip := flag.NewIniParser(flagParser) 134 | ip.Write(os.Stdout, flag.IniIncludeDefaults|flag.IniCommentDefaults|flag.IniIncludeComments) 135 | os.Exit(0) 136 | } 137 | 138 | // spit out the version if asked 139 | if options.Version { 140 | fmt.Println("Version:", getVersion()) 141 | os.Exit(0) 142 | } 143 | // read the config file if specified 144 | if options.ConfigFile != "" { 145 | ini := flag.NewIniParser(flagParser) 146 | ini.ParseAsDefaults = true 147 | if err := ini.ParseFile(options.ConfigFile); err != nil { 148 | if os.IsNotExist(err) { 149 | return nil, fmt.Errorf("config file %s doesn't exist", options.ConfigFile) 150 | } 151 | return nil, err 152 | } 153 | } 154 | 155 | if options.DBType == cli.DBTypeMySQL && options.LogType == cli.LogTypeQuery { 156 | if options.LogFile == "" { 157 | options.LogFile = "slowquery/mysql-slowquery.log" 158 | } 159 | } else if options.DBType == cli.DBTypeMySQL && options.LogType == cli.LogTypeAudit { 160 | if options.LogFile == "" { 161 | options.LogFile = "audit/server_audit.log" 162 | } 163 | } else if options.DBType == cli.DBTypePostgreSQL && options.LogType == cli.LogTypeQuery { 164 | if options.LogFile == "" { 165 | options.LogFile = "error/postgresql.log" 166 | } 167 | } else { 168 | return nil, fmt.Errorf( 169 | "Unsupported (dbtype, log_type) pair (`%s`,`%s`)", 170 | options.DBType, options.LogType) 171 | } 172 | return &options, nil 173 | } 174 | 175 | func awsCredsFailureMsg() string { 176 | // check for AWS binary 177 | _, err := exec.LookPath("aws") 178 | if err == nil { 179 | return `Unable to locate credentials. You can configure credentials by running "aws configure".` 180 | } 181 | return `Unable to locate AWS credentials. You have a few options: 182 | - Create an IAM role for the host machine with the permissions to access RDS 183 | - Use an AWS shared config file (~/.aws/config) 184 | - Configure credentials on a development machine (via ~/.aws/credentials) 185 | - Or set the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables 186 | 187 | You can read more at this security blog post: 188 | http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs 189 | 190 | Or read more about IAM roles and RDS at: 191 | http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAM.AccessControl.IdentityBased.html` 192 | } 193 | -------------------------------------------------------------------------------- /preinstall: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | USER="honeycomb" 5 | GROUP="honeycomb" 6 | 7 | # Create service group and user if they doesn't already exist 8 | if ! getent group $GROUP >/dev/null 9 | then 10 | if type "groupadd" > /dev/null 2>&1; then 11 | groupadd --system $GROUP >/dev/null 12 | else 13 | addgroup --system $GROUP >/dev/null 14 | fi 15 | fi 16 | 17 | if ! getent passwd $USER >/dev/null 18 | then 19 | useradd \ 20 | --system \ 21 | -g $GROUP \ 22 | --home /nonexistent \ 23 | --shell /bin/false \ 24 | $USER >/dev/null 25 | fi 26 | -------------------------------------------------------------------------------- /publisher/publisher.go: -------------------------------------------------------------------------------- 1 | package publisher 2 | 3 | import ( 4 | "crypto/sha256" 5 | "fmt" 6 | "io" 7 | "os" 8 | "strings" 9 | "time" 10 | 11 | "github.com/honeycombio/honeytail/event" 12 | "github.com/honeycombio/honeytail/parsers" 13 | "github.com/honeycombio/libhoney-go" 14 | "github.com/sirupsen/logrus" 15 | ) 16 | 17 | // We fetch up to 10k lines at a time - buffering several fetches at once 18 | // allows us to hand them off and fetch more while the line processor is doing work. 19 | const lineChanSize = 100000 20 | 21 | // Publisher is an interface to write rdslogs entries to a target. Current 22 | // implementations are STDOUT and Honeycomb 23 | type Publisher interface { 24 | // Write accepts a long blob of text and writes it to the target 25 | Write(blob string) 26 | } 27 | 28 | // HoneycombPublisher implements Publisher and sends the entries provided to 29 | // Honeycomb 30 | type HoneycombPublisher struct { 31 | Writekey string 32 | Dataset string 33 | APIHost string 34 | ScrubQuery bool 35 | SampleRate int 36 | Parser parsers.Parser 37 | AddFields map[string]string 38 | initialized bool 39 | lines chan string 40 | eventsToSend chan event.Event 41 | eventsSent uint 42 | lastUpdateTime time.Time 43 | } 44 | 45 | func (h *HoneycombPublisher) Write(chunk string) { 46 | if !h.initialized { 47 | fmt.Fprintln(os.Stderr, "initializing honeycomb") 48 | h.initialized = true 49 | libhoney.Init(libhoney.Config{ 50 | WriteKey: h.Writekey, 51 | Dataset: h.Dataset, 52 | APIHost: h.APIHost, 53 | SampleRate: uint(h.SampleRate), 54 | }) 55 | h.lines = make(chan string, lineChanSize) 56 | h.eventsToSend = make(chan event.Event) 57 | go func() { 58 | h.Parser.ProcessLines(h.lines, h.eventsToSend, nil) 59 | close(h.eventsToSend) 60 | }() 61 | go func() { 62 | fmt.Fprintln(os.Stderr, "spinning up goroutine to send events") 63 | for ev := range h.eventsToSend { 64 | if h.ScrubQuery { 65 | if val, ok := ev.Data["query"]; ok { 66 | // generate a sha256 hash 67 | newVal := sha256.Sum256([]byte(fmt.Sprintf("%v", val))) 68 | // and use the base16 string version of it 69 | ev.Data["query"] = fmt.Sprintf("%x", newVal) 70 | } 71 | } 72 | libhEv := libhoney.NewEvent() 73 | libhEv.Timestamp = ev.Timestamp 74 | 75 | // add extra fields first so they don't override anything parsed 76 | // in the log file 77 | if err := libhEv.Add(h.AddFields); err != nil { 78 | logrus.WithFields(logrus.Fields{ 79 | "add_fields": h.AddFields, 80 | "error": err, 81 | }).Error("Unexpected error adding extra fields data to libhoney event") 82 | } 83 | 84 | if err := libhEv.Add(ev.Data); err != nil { 85 | logrus.WithFields(logrus.Fields{ 86 | "event": ev, 87 | "error": err, 88 | }).Error("Unexpected error adding data to libhoney event") 89 | } 90 | 91 | // periodically provide updates to indicate work is actually being done 92 | if time.Since(h.lastUpdateTime) >= time.Minute { 93 | logrus.WithFields(logrus.Fields{ 94 | "most_recent_event": ev, 95 | "events_since_last_update": h.eventsSent, 96 | "last_update_time": h.lastUpdateTime, 97 | }).Info("status update") 98 | h.eventsSent = 0 99 | h.lastUpdateTime = time.Now() 100 | } 101 | 102 | // sampling is handled by the mysql parser 103 | // TODO make this work for postgres too 104 | if err := libhEv.SendPresampled(); err != nil { 105 | logrus.WithFields(logrus.Fields{ 106 | "event": ev, 107 | "error": err, 108 | }).Error("Unexpected error event to libhoney send") 109 | } 110 | 111 | h.eventsSent++ 112 | } 113 | }() 114 | } 115 | lines := strings.Split(chunk, "\n") 116 | for _, line := range lines { 117 | if line == "" { 118 | continue 119 | } 120 | h.lines <- line 121 | } 122 | } 123 | 124 | // Close flushes outstanding sends 125 | func (h *HoneycombPublisher) Close() { 126 | libhoney.Close() 127 | } 128 | 129 | // STDOUTPublisher implements Publisher and sends the entries provided to 130 | // Honeycomb 131 | type STDOUTPublisher struct { 132 | } 133 | 134 | func (s *STDOUTPublisher) Write(line string) { 135 | io.WriteString(os.Stdout, line) 136 | } 137 | -------------------------------------------------------------------------------- /rdslogs.conf: -------------------------------------------------------------------------------- 1 | [Application Options] 2 | ; AWS region to use 3 | ; Region = us-east-1 4 | 5 | ; RDS instance identifier 6 | ; InstanceIdentifier = 7 | 8 | ; RDS database type. Accepted values are mysql and postgresql. 9 | ; DBType = mysql 10 | 11 | ; Log file type. Accepted values are query and audit. Audit is currently only supported for mysql. 12 | ; LogType = query 13 | 14 | ; RDS log file to retrieve 15 | ; LogFile = 16 | 17 | ; Download old logs instead of tailing the current log 18 | ; Download = false 19 | 20 | ; directory in to which log files are downloaded 21 | ; DownloadDir = ./ 22 | 23 | ; number of lines to request at a time from AWS. Larger number will be more efficient, smaller number will allow for longer lines 24 | ; NumLines = 10000 25 | 26 | ; how many seconds to pause when rate limited by AWS. 27 | ; BackoffTimer = 5 28 | 29 | ; output for the logs: stdout or honeycomb 30 | ; Output = stdout 31 | 32 | ; Team write key, when output is honeycomb 33 | ; WriteKey = 34 | 35 | ; Name of the dataset, when output is honeycomb 36 | ; Dataset = 37 | 38 | ; Hostname for the Honeycomb API server 39 | ; APIHost = https://api.honeycomb.io/ 40 | 41 | ; Replaces the query field with a one-way hash of the contents 42 | ; ScrubQuery = false 43 | 44 | ; Only send 1 / N log lines 45 | ; SampleRate = 1 46 | 47 | ; Extra fields to send in request, in the style of "field:value" 48 | ; AddFields = 49 | 50 | ; Number of parsers to spin up. Currently only supported for the mysql parser. 51 | ; NumParsers = 4 52 | 53 | ; Output the current version and exit 54 | ; Version = false 55 | 56 | ; turn on debugging output 57 | ; Debug = false 58 | 59 | -------------------------------------------------------------------------------- /rdslogs.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Honeycomb agent to consume RDS slow query log 3 | After=network.target 4 | 5 | [Service] 6 | ExecStart=/usr/bin/rdslogs -c /etc/rdslogs/rdslogs.conf 7 | KillMode=process 8 | Restart=on-failure 9 | User=honeycomb 10 | Group=honeycomb 11 | 12 | [Install] 13 | Alias=rdslogs rdslogs.service 14 | -------------------------------------------------------------------------------- /rdslogs.upstart: -------------------------------------------------------------------------------- 1 | # Upstart job for rdslogs, a tool for streaming the RDS slow query log 2 | # https://honeycomb.io/ 3 | 4 | description "rdslogs Daemon" 5 | author "Honeycomb Team " 6 | 7 | start on runlevel [2345] 8 | stop on runlevel [!2345] 9 | 10 | respawn 11 | 12 | exec su -s /bin/sh -c 'exec "$0" "$@"' honeycomb -- /usr/bin/rdslogs -c /etc/rdslogs/rdslogs.conf 13 | -------------------------------------------------------------------------------- /terraform/rdslogs.tf: -------------------------------------------------------------------------------- 1 | # This terraform configuration describes the minimum AWS requirements 2 | # for RDSLogs 3 | resource aws_iam_user "honeycomb-rdslogs-svc-user" { 4 | name = "honeycomb-rdslogs-svc-user" 5 | } 6 | 7 | resource "aws_iam_policy" "honeycomb-rdslogs-policy" { 8 | name = "honeycomb-rdslogs-policy" 9 | description = "minimal policy for honeycomb RDSLogs" 10 | policy = <