├── .github └── workflows │ ├── cla.yml │ ├── slack-notify-issues.yml │ └── slack-notify-pr.yml ├── .gitignore ├── .gitmodules ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── docker-build ├── Dockerenvfile_splunk.txt └── Dockerfile ├── fluentd-installer ├── Dockerfile.fluentd.sidecar ├── fluentd-1.11.0-linux-x86_64.tar.gz └── scripts │ └── linux │ ├── .gitignore │ ├── Dockerfile.fluentd │ ├── README.md │ ├── fluentd-agent-installer.sh │ ├── log-vendors │ ├── fluentd-datadog-installer.sh │ └── fluentd-splunk-installer.sh │ ├── other │ ├── dd_ascii_logo.txt │ ├── jfrog_ascii_logo.txt │ └── spl_ascii_logo.txt │ └── utils │ └── common.sh ├── fluentd ├── fluent.conf.distribution ├── fluent.conf.missioncontrol ├── fluent.conf.pipelines ├── fluent.conf.rt ├── fluent.conf.rt6 └── fluent.conf.xray └── log-vendors ├── Dockerfile.fluentd ├── Dockerfile.redhat-ubi-rt7-fluentd ├── Dockerfile.redhat-ubi-xray-analysis-fluentd ├── Dockerfile.redhat-ubi-xray-indexer-fluentd ├── Dockerfile.redhat-ubi-xray-persist-fluentd ├── Dockerfile.redhat-ubi-xray-router-fluentd ├── Dockerfile.redhat-ubi-xray-server-fluentd └── README.md /.github/workflows/cla.yml: -------------------------------------------------------------------------------- 1 | name: "CLA Assistant" 2 | on: 3 | # issue_comment triggers this action on each comment on issues and pull requests 4 | issue_comment: 5 | types: [created] 6 | pull_request_target: 7 | types: [opened,synchronize] 8 | 9 | jobs: 10 | CLAssistant: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions-ecosystem/action-regex-match@v2 14 | id: sign-or-recheck 15 | with: 16 | text: ${{ github.event.comment.body }} 17 | regex: '\s*(I have read the CLA Document and I hereby sign the CLA)|(recheckcla)\s*' 18 | 19 | - name: "CLA Assistant" 20 | if: ${{ steps.sign-or-recheck.outputs.match != '' || github.event_name == 'pull_request_target' }} 21 | # Alpha Release 22 | uses: cla-assistant/github-action@v2.1.1-beta 23 | env: 24 | # Generated and maintained by github 25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 26 | # JFrog organization secret 27 | PERSONAL_ACCESS_TOKEN : ${{ secrets.CLA_SIGN_TOKEN }} 28 | with: 29 | path-to-signatures: 'signed_clas.json' 30 | path-to-document: 'https://jfrog.com/cla/' 31 | remote-organization-name: 'jfrog' 32 | remote-repository-name: 'jfrog-signed-clas' 33 | # branch should not be protected 34 | branch: 'master' 35 | allowlist: bot* 36 | -------------------------------------------------------------------------------- /.github/workflows/slack-notify-issues.yml: -------------------------------------------------------------------------------- 1 | on: 2 | issues: 3 | types: [opened, reopened, deleted, closed] 4 | name: Slack Issue Notification 5 | jobs: 6 | slackNotification: 7 | name: Slack Notification Issue 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Slack Notification Issue 12 | uses: rtCamp/action-slack-notify@master 13 | env: 14 | SLACK_CHANNEL: partnereng-issues 15 | SLACK_COLOR: '#00A86B' 16 | SLACK_ICON: https://pbs.twimg.com/profile_images/978188446178082817/86ulJdF0.jpg 17 | SLACK_TITLE: "[${{ github.event.issue.state}}] ${{ github.event.issue.title }} on ${{ github.repository }} :rocket:" 18 | SLACK_MESSAGE: 'Link: ${{ github.event.issue.url }}' 19 | SLACK_USERNAME: PartnerEngineers 20 | SLACK_WEBHOOK: ${{ secrets.SLACK_ISSUE_WEBHOOK }} -------------------------------------------------------------------------------- /.github/workflows/slack-notify-pr.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | branches: 4 | - master 5 | types: [opened, reopened, closed] 6 | name: Slack Pull Request Notification 7 | jobs: 8 | slackNotification: 9 | name: Slack Notification PR 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Slack Notification PR 14 | uses: rtCamp/action-slack-notify@master 15 | env: 16 | SLACK_CHANNEL: partnereng-pullrequest 17 | SLACK_COLOR: '#00A86B' 18 | SLACK_ICON: https://pbs.twimg.com/profile_images/978188446178082817/86ulJdF0.jpg 19 | SLACK_TITLE: "[${{ github.event.pull_request.state}}] ${{ github.event.pull_request.title }} on ${{ github.repository }} :rocket:" 20 | SLACK_MESSAGE: 'Merging from ${{ github.head_ref }} to ${{ github.base_ref }} by ${{ github.actor }}. Link: ${{ github.event.pull_request._links.html.href }}' 21 | SLACK_USERNAME: PartnerEngineers 22 | SLACK_WEBHOOK: ${{ secrets.SLACK_PR_WEBHOOK }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *.swp 3 | .tox 4 | ./idea 5 | .idea/ 6 | .DS_Store -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "splunk"] 2 | path = log-vendors/splunk 3 | url = https://github.com/jfrog/log-analytics-splunk.git 4 | branch = master 5 | [submodule "elastic"] 6 | path = log-vendors/elastic-fluentd-kibana 7 | url = https://github.com/jfrog/log-analytics-elastic.git 8 | branch = master 9 | [submodule "datadog"] 10 | path = log-vendors/datadog 11 | url = https://github.com/jfrog/log-analytics-datadog.git 12 | branch = master 13 | [submodule "prometheus"] 14 | path = log-vendors/prometheus-fluentd-grafana 15 | url = https://github.com/jfrog/log-analytics-prometheus.git 16 | branch = master 17 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # JFrog Log Analytics Changelog 2 | All changes to the log analytics integration will be documented in this file. 3 | 4 | ## [0.7.0] - Oct 20, 2020 5 | * Fixing issue with ip_address in access logs having space and . at the end 6 | 7 | ## [0.6.0] - Sept 25, 2020 8 | * [BREAKING] Fluentd configs updated to use JF_PRODUCT_DATA_INTERNAL env. 9 | 10 | ## [0.5.1] - Sept 9, 2020 11 | * All log vendors Splunk, Datadog, Elastic, and Prometheus are separate git repos as submodules now. Versioning is per individual repo now. 12 | 13 | ## [0.5.0] - Sept 8, 2020 14 | * Adding JFrog Pipelines fluent configuration files to capture logs 15 | 16 | ## [0.4.0] - Sept 4, 2020 17 | * Adding JFrog Mission Control fluent configuration files to capture logs 18 | 19 | ## [0.3.0] - Aug 26, 2020 20 | * Adding JFrog Distribution fluent configuration files to capture logs 21 | 22 | ## [0.2.0] - Aug 24, 2020 23 | * Splunk updates to launch new version of Splunkbase app v1.1.0 24 | 25 | ## [0.1.1] - June 1, 2020 26 | * Removing the need for user to specify splunk host , user, and token twice 27 | * Fixing issue with regex on the audit security log 28 | * Fixed issue with the repo and image when not docker api url 29 | 30 | ## [0.1.0] - May 12, 2020 31 | * Initial release of Jfrog Logs Analytic integration 32 | 33 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # JFrog welcomes community contribution! 2 | 3 | Before we can accept your contribution, process your GitHub pull requests, and thank you full-heartedly, we request that you will fill out and submit JFrog's Contributor License Agreement (CLA). 4 | 5 | [Click here](https://gist.github.com/jfrog-ecosystem/7d4fbeaac18edbd3cfc38831125acbb3) to view the JFrog CLA. 6 | 7 | Please comment in your pull request to mark your acceptance for now until CLA assistant is fixed. 8 | 9 | "I have read the CLA Document and I hereby sign the CLA" 10 | 11 | This should only take a minute to complete and is a one-time process. 12 | 13 | *Thanks for Your Contribution to the Community!* :-) 14 | 15 | ## Pull Request Process ## 16 | 17 | - Fork this repository. 18 | - Clone the forked repository to your local machine and perform the proposed changes. 19 | - Test the changes in your own K8s environment and confirm everything works end to end. 20 | - Update the CHANGELOG.md 21 | - Submit a PR with the relevant information and check the applicable boxes and fill out the questions. 22 | 23 | ## Acceptance Criteria ## 24 | 25 | - Pull requests must pass all automated checks 26 | - CHANGELOG.md has relevant changes 27 | - README.md has been updated if required 28 | - One approval from JFrog reviewers 29 | 30 | Upon the success of the above the pull request will be mergable into master branch. Upon merge the source branch will be removed. 31 | 32 | Increase the version numbers in any examples files and the README.md to the new version that this Pull Request would represent. The versioning scheme we use is SemVer. 33 | You may merge the Pull Request in once you have the sign-off of one other developer. 34 | 35 | ## Code of Conduct 36 | ### Our Pledge 37 | 38 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 39 | 40 | ### Our Standards 41 | 42 | Examples of behavior that contributes to creating a positive environment include: 43 | ```` 44 | Using welcoming and inclusive language 45 | Being respectful of differing viewpoints and experiences 46 | Gracefully accepting constructive criticism 47 | Focusing on what is best for the company 48 | Showing empathy towards other colleagues 49 | ```` 50 | 51 | Examples of unacceptable behavior by participants include: 52 | 53 | ```` 54 | The use of sexualized language or imagery and unwelcome sexual attention or advances 55 | Trolling, insulting/derogatory comments, and personal or political attacks 56 | Public or private harassment 57 | Publishing others' private information, such as a physical or electronic address, without explicit permission 58 | Other conduct which could reasonably be considered inappropriate in a professional setting 59 | ```` 60 | ### Our Responsibilities 61 | 62 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 63 | 64 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 65 | 66 | ## Scope 67 | 68 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project. Examples of representing a project include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 69 | 70 | ## Enforcement 71 | 72 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at Slack #xray_splunk . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 73 | 74 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 75 | 76 | ## Attribution 77 | 78 | This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at http://contributor-covenant.org/version/1/4 79 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2021 JFrog Inc. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Note: This repository is no longer relevant. All the JFrog log-analytic solutions have moved to separate repos 2 | 3 | * Splunk - https://github.com/jfrog/log-analytics-splunk 4 | * Datadog - https://github.com/jfrog/log-analytics-datadog 5 | * Elasticsearch Kibana - https://github.com/jfrog/log-analytics-elastic 6 | * Prometheus Loki Grafana - https://github.com/jfrog/log-analytics-prometheus 7 | * NewRelic - https://github.com/jfrog/log-analytics-newrelic 8 | 9 | -------------------------------------------------------------------------------- /docker-build/Dockerenvfile_splunk.txt: -------------------------------------------------------------------------------- 1 | JFROG_PRODUCT=rt 2 | JF_PRODUCT_DATA_INTERNAL=path_to_jfrog_logs 3 | HEC_HOST=splunk.change_me.com 4 | HEC_PORT=8088 5 | HEC_TOKEN=change_me 6 | METRICS_HEC_TOKEN=change_me 7 | COM_PROTOCOL=https 8 | INSECURE_SSL=false 9 | JPD_URL=http:\/\/change_me.jfrog.io 10 | JPD_USER_NAME=change_me 11 | JPD_API_KEY=change_me -------------------------------------------------------------------------------- /docker-build/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for bitnami/fluentd sidecar image with all the necessary plugins for our log analytic providers 2 | FROM bitnami/fluentd:latest 3 | LABEL maintainer="Partner Engineering " 4 | 5 | ## Build time Arguments, short circuit them to ENV Variables so they are available at run time also 6 | ARG SOURCE=JFRT 7 | ARG TARGET=SPLUNK 8 | 9 | ## Environment Variables set by this docker file, there will be seperate env params set by a env file while running the containers 10 | ## For better maintainability always depend dockerfile code on the environment variables declared in this file to add more platforms 11 | ENV SRC_PLATFORM=$SOURCE 12 | ENV TGT_PLATFORM=$TARGET 13 | 14 | USER root 15 | 16 | ## Install JFrog Plugins 17 | RUN fluent-gem install fluent-plugin-jfrog-siem 18 | RUN fluent-gem install fluent-plugin-jfrog-metrics 19 | 20 | ## Install custom Fluentd plugins 21 | RUN if [ "$TGT_PLATFORM" = "SPLUNK" ] ; then echo "Downloading the fluentd plugin for $TGT_PLATFORM "; fluent-gem install fluent-plugin-splunk-hec; else echo "Not Downloading"; fi 22 | RUN if [ "$TGT_PLATFORM" = "DATADOG" ] ; then echo "Downloading the fluentd plugin for $TGT_PLATFORM "; fluent-gem install fluent-plugin-datadog; else echo "Not Downloading"; fi 23 | RUN if [ "$TGT_PLATFORM" = "ELASTIC" ] ; then echo "Downloading the fluentd plugin for $TGT_PLATFORM "; fluent-gem install fluent-plugin-elasticsearch; else echo "Not Downloading"; fi 24 | 25 | ## Download Config Files 26 | RUN if [ "$SRC_PLATFORM" = "JFRT" ] ; then echo "Downloading the fluentd config file for $SRC_PLATFORM and $TGT_PLATFORM "; curl https://raw.githubusercontent.com/jfrog/log-analytics-splunk/Metrics_splunk/fluent.conf.rt -o /opt/bitnami/fluentd/conf/fluentd.conf; else echo "Not Downloading"; fi 27 | RUN if [ "$SRC_PLATFORM" = "JFXRAY" ] ; then echo "Downloading the fluentd config file for $SRC_PLATFORM and $TGT_PLATFORM "; curl https://raw.githubusercontent.com/jfrog/log-analytics-splunk/Metrics_splunk/fluent.conf.xray -o /opt/bitnami/fluentd/conf/fluentd.conf; else echo "Not Downloading"; fi 28 | 29 | ENTRYPOINT if [ "$TGT_PLATFORM" = "SPLUNK" ] ; then sed -i -e "s/HEC_HOST/$HEC_HOST/g" \ 30 | -e "s/HEC_PORT/$HEC_PORT/g" \ 31 | -e "s/METRICS_HEC_TOKEN/$METRICS_HEC_TOKEN/" \ 32 | -e "s/HEC_TOKEN/$HEC_TOKEN/" \ 33 | -e "s/COM_PROTOCOL/$COM_PROTOCOL/g" \ 34 | -e "s/INSECURE_SSL/$INSECURE_SSL/g" \ 35 | -e "s/JPD_URL/$JPD_URL/" \ 36 | -e "s/ADMIN_USERNAME/$JPD_USER_NAME/" \ 37 | -e "s/API_KEY/$JPD_API_KEY/" /opt/bitnami/fluentd/conf/fluentd.conf && fluentd -v -c /opt/bitnami/fluentd/conf/fluentd.conf; fi 38 | USER 1001 39 | 40 | STOPSIGNAL SIGTERM -------------------------------------------------------------------------------- /fluentd-installer/Dockerfile.fluentd.sidecar: -------------------------------------------------------------------------------- 1 | # Dockerfile for bitnami/fluentd sidecar image with all the necessary plugins for our log analytic providers 2 | FROM bitnami/fluentd:latest 3 | LABEL maintainer "Partner Engineering " 4 | 5 | USER root 6 | 7 | ##Uninstall elastic plugin which is preinstalled in bitnami fluentd 8 | ##Pin elastic gem version to 7.14 9 | RUN fluent-gem uninstall elasticsearch -a --ignore-dependencies 10 | RUN fluent-gem install elasticsearch -v 7.14 --no-document 11 | ## Install custom Fluentd plugins 12 | RUN fluent-gem install fluent-plugin-splunk-hec --no-document 13 | RUN fluent-gem install fluent-plugin-datadog --no-document 14 | RUN fluent-gem install fluent-plugin-elasticsearch --no-document 15 | RUN fluent-gem install fluent-plugin-record-modifier --no-document 16 | RUN fluent-gem install fluent-plugin-jfrog-siem --no-document 17 | RUN fluent-gem install fluent-plugin-jfrog-metrics --no-document 18 | 19 | USER 1001 -------------------------------------------------------------------------------- /fluentd-installer/fluentd-1.11.0-linux-x86_64.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics/fbb6b4a7bcc8b0ad047b4c71a0491450aa0368bb/fluentd-installer/fluentd-1.11.0-linux-x86_64.tar.gz -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/.gitignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | run_container 3 | fluentd.conf 4 | fluent.conf.* 5 | fluentd-splunk-installer.sh 6 | install-redhat-td-agent4.sh 7 | common.sh 8 | fluentd-datadog-installer.sh 9 | *.txt 10 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/Dockerfile.fluentd: -------------------------------------------------------------------------------- 1 | FROM bitnami/fluentd:latest 2 | LABEL maintainer="JFrog " 3 | 4 | ## Fix the file permission of root on fluentd directory 5 | USER root 6 | ## Env variables 7 | ENV JF_PRODUCT_DATA_INTERNAL=JF_PRODUCT_DATA_INTERNAL_VALUE 8 | 9 | ## Copy fluentd.conf file 10 | COPY FLUENT_CONF_FILE_NAME /opt/bitnami/fluentd/conf/fluentd.conf 11 | ## Create JF product folder (used to mount the host JF logs folder) 12 | RUN mkdir -p "$JF_PRODUCT_DATA_INTERNAL" 13 | RUN chown -R 1001:1001 /opt/bitnami/fluentd/ 14 | ## Reset back to user 15 | USER 1001 16 | 17 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/README.md: -------------------------------------------------------------------------------- 1 | Interactive script that helps to install and configures fluentd and the plugins (Datadog, Splunk, etc). 2 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/fluentd-agent-installer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # vars 4 | # branch name (usually master) 5 | SCRIPT_BRANCH="master" 6 | # dockerfile name 7 | DOCKERFILE_PATH="./Dockerfile" 8 | # docker image prefix tag 9 | DOCKER_IMAGE_TAG="jfrog/fluentd" 10 | # log vendors scrips url 11 | SCRIPTS_URL_PATH="https://github.com/jfrog/log-analytics/raw/${SCRIPT_BRANCH}/fluentd-installer/scripts/linux" 12 | # dev mode 13 | DEV_MODE=false 14 | # load scripts from the local file system 15 | LOCAL_MODE=false 16 | # temp folder path 17 | TEMP_FOLDER="/tmp" 18 | 19 | # Terminate the script and print a message. 20 | terminate() { 21 | declare termination_reason=$1 22 | echo 23 | print_error 'Installation was unsuccessful!' 24 | echo 25 | print_error "Reason(s): $termination_reason" 26 | echo 27 | print_error 'Installation aborted!' 28 | echo 29 | exit 1 30 | } 31 | 32 | # Loads the remote script based on the provided vars 33 | load_remote_script() { 34 | declare script_url=$1 35 | declare script_path=$2 36 | 37 | echo "Loading script '$script_url', please wait..." 38 | 39 | # check url 40 | wget -nv -O "$script_path" "$script_url" || terminate "ERROR: Error while downloading ${script_url}. Exiting..." 41 | # load script 42 | source $script_path 43 | } 44 | 45 | # load the common script 46 | if [ "$LOCAL_MODE" == true ]; then 47 | source ./utils/common.sh || exit 1 48 | else 49 | load_remote_script "$SCRIPTS_URL_PATH/utils/common.sh" "common.sh" 50 | fi 51 | 52 | # Intro message 53 | intro() { 54 | help_link=https://github.com/jfrog/log-analytics 55 | load_and_print_logo "$SCRIPTS_URL_PATH/other/jfrog_ascii_logo.txt" "jfrog_ascii_logo.txt" 56 | echo 'JFrog fluentd installation script (Splunk, Datadog).' 57 | echo "More information: $help_link" 58 | echo 59 | echo 'The script installs fluentd and performs the following tasks:' 60 | echo '- Checks if the Fluentd requirements are met and updates the OS if needed.' 61 | echo '- Installs/Updates Fluentd as a service or in the user space depending on Linux distro.' 62 | echo '- Creates (builds) Fluentd docker image.' 63 | echo '- Updates the log files/folders permissions.' 64 | echo '- Installs Fluentd plugins (Splunk, Datadog).' 65 | echo '- Installs Fluentd SIEM plugin (Xray only).' 66 | echo '- Starts and enables the Fluentd service.' 67 | echo '- Provides additional info related to the installed plugins and configurations.' 68 | echo 69 | print_error "This script in might require superuser access. You might be prompted for your password by sudo." 70 | 71 | if [ "$DEV_MODE" == true ]; then 72 | echo 73 | print_error ">>>> THE SCRIPT RUNS IN THE DEV/DEBUGGING MODE (DEV_MODE==true)! <<<<" 74 | echo 75 | fi 76 | if [ "$LOCAL_MODE" == true ]; then 77 | echo 78 | print_error ">>>> THE SCRIPT RUNS IN THE LOCAL MODE, The scripts are loaded from the local file system instead of the github repository (LOCAL_MODE==true)! <<<<" 79 | echo 80 | fi 81 | 82 | # Experimental warning 83 | declare experiments_warning=$(question "The installer is still in the EXPERIMENTAL phase. Would you like to continue? [y/n]: ") 84 | if [ "$experiments_warning" == false ]; then 85 | echo Have a nice day! Good Bye! 86 | exit 0 87 | fi 88 | } 89 | 90 | # Modify the conf file 91 | modify_conf_file() { 92 | declare now_date=$(date +"%m_%d_%Y_%H_%M_%S") 93 | declare backup_postfix="_la_backup_$now_date" 94 | # backup modifying file first 95 | declare file_path=$1 96 | declare file_path_backup="${file_path}${backup_postfix}" 97 | declare conf_content=$2 98 | declare run_as_sudo=$3 99 | run_command "$run_as_sudo" "cp ${file_path} ${file_path_backup}" || terminate "Error while creating backup copy, original file: ${file_path}, backup file: ${file_path_backup}." 100 | echo "Modifying ${file_path}..." 101 | echo "$conf_content" | run_command "$run_as_sudo" "tee -a ${file_path}" 102 | echo "File ${file_path} modified and the original content backed up to ${file_path_backup}" 103 | } 104 | 105 | # Installs fluentd based on the type of installation and (service/user/etc) and based on Linux distro. 106 | install_fluentd() { 107 | declare install_as_service=$1 108 | # supported linux distros 109 | declare supported_distros=("centos" "amazon" "red hat") 110 | 111 | # Check distro 112 | declare detected_distro=$(cat /etc/*-release | tr [:upper:] [:lower:] | grep -Poi '(centos|ubuntu|red hat|amazon|debian)' | uniq) 113 | declare is_supported_distro=false 114 | for supported_distro in "${supported_distros[@]}"; do 115 | if [[ $detected_distro == $supported_distro ]]; then 116 | is_supported_distro=true 117 | break 118 | fi 119 | done 120 | 121 | if [ "$is_supported_distro" == false ]; then 122 | echo "Linux distro '${detected_distro}' is not supported. Fluentd was NOT installed. Exiting..." 123 | exit 0 124 | fi 125 | 126 | # Check the Fluentd requirements (file descriptors, etc) 127 | declare ulimit_output=$(ulimit -n) 128 | if [ $ulimit_output -lt 65536 ]; then 129 | # Update the file descriptors limit per process and 'high load environments' if needed 130 | echo 131 | declare update_limit=$(question "Fluentd requires a higher limit of the file descriptors per process and the network kernel parameters adjustment (more info: https://docs.fluentd.org/installation/before-install). Would you like to update the mentioned configuration (optional and sudo rights required)? [y/n]: ") 132 | if [ "$update_limit" == true ]; then 133 | limit_conf_file_path=/etc/security/limits.conf 134 | limit_config=" 135 | # Added by JFrog log-analytics install script 136 | root soft nofile 65536 137 | root hard nofile 65536 138 | * soft nofile 65536 139 | * hard nofile 65536" 140 | modify_conf_file $limit_conf_file_path "$limit_config" true 141 | nkp_path_file=/etc/sysctl.conf 142 | nkp_config=" 143 | # Added by JFrog log-analytics install script 144 | net.core.somaxconn = 1024 145 | net.core.netdev_max_backlog = 5000 146 | net.core.rmem_max = 16777216 147 | net.core.wmem_max = 16777216 148 | net.ipv4.tcp_wmem = 4096 12582912 16777216 149 | net.ipv4.tcp_rmem = 4096 12582912 16777216 150 | net.ipv4.tcp_max_syn_backlog = 8096 151 | net.ipv4.tcp_slow_start_after_idle = 0 152 | net.ipv4.tcp_tw_reuse = 1 153 | net.ipv4.ip_local_port_range = 10240 65535" 154 | modify_conf_file $nkp_path_file "$nkp_config" true 155 | fi 156 | fi 157 | 158 | # We support two ways of installing td-agent4 and user/zip. 159 | # Install as service or in the user space 160 | if [ "$install_as_service" == true ]; then 161 | # Fetches and installs td-agent4 (for now only Centos and Amazon distros supported) 162 | if [ "$detected_distro" == "centos" ]; then 163 | error_message="ERROR: td-agent 4 installation failed. Fluentd was NOT installed. Exiting..." 164 | echo "Centos detected. Installing td-agent 4..." 165 | { 166 | load_remote_script "https://toolbelt.treasuredata.com/sh/install-redhat-td-agent4.sh" "install-redhat-td-agent4.sh" 167 | } || { 168 | terminate "$error_message" 169 | } 170 | elif [ "$detected_distro" == "amazon" ]; then 171 | echo "Amazon Linux detected. Installing td-agent 4..." 172 | { 173 | load_remote_script "https://toolbelt.treasuredata.com/sh/install-amazon2-td-agent4.sh" "install-amazon2-td-agent4.sh" 174 | } || { 175 | terminate "$error_message" 176 | } 177 | else 178 | terminate "Unsupported linux distro: $detected_distro" 179 | fi 180 | else 181 | current_path=$(pwd) 182 | declare fluentd_file_name="fluentd-1.11.0-linux-x86_64.tar.gz" 183 | declare fluentd_zip_install_default_path="$HOME/fluentd" 184 | echo 185 | 186 | read -p "Please provide a path where Fluentd will be installed, (default: $fluentd_zip_install_default_path): " user_fluentd_install_path 187 | # check if the path is empty, if empty then use fluentd_zip_install_default_path 188 | if [ -z "$user_fluentd_install_path" ]; then 189 | user_fluentd_install_path="$fluentd_zip_install_default_path" 190 | fi 191 | # create folder if not present 192 | echo "Creating $user_fluentd_install_path..." 193 | mkdir -p "$user_fluentd_install_path" || terminate "Error while creating $user_fluentd_install_path" 194 | # check if user has write permissions in the specified path 195 | if ! [ -w "$user_fluentd_install_path" ]; then 196 | terminate "ERROR: Write permission denied in ${user_fluentd_install_path}. Please make sure that you have read/write permissions in ${user_fluentd_install_path}. Fluentd was NOT installed. Exiting..." 197 | fi 198 | # download and extract 199 | declare zip_file="$user_fluentd_install_path/$fluentd_file_name" 200 | wget -nv -O "$zip_file" https://github.com/jfrog/log-analytics/raw/${SCRIPT_BRANCH}/fluentd-installer/${fluentd_file_name} 201 | echo "Please wait, unpacking $fluentd_file_name to $user_fluentd_install_path" 202 | tar -xf "$zip_file" -C "$user_fluentd_install_path" --strip-components 1 || terminate "Error while unpacking $zip_file" 203 | # clean up 204 | rm "$zip_file" 205 | echo "Fluentd files extracted to: $user_fluentd_install_path" 206 | echo 207 | fi 208 | } 209 | 210 | # Installs log vendors (Splunk, Datadog, etc) 211 | install_log_vendor() { 212 | declare install_as_service=$1 213 | # Install log vendors (splunk, datadog etc) 214 | declare config_link=$help_link 215 | declare install_log_vendors=$(question "Would you like to install Fluentd log vendors (optional)? [y/n]: ") 216 | 217 | # check if gem/td-agent-gem is installed 218 | if [ "$install_as_docker" == false ]; then 219 | if [ -x "$(command -v td-agent-gem)" ] && [ $install_as_service == true ]; then 220 | gem_command="sudo td-agent-gem" 221 | elif [ -x "$(command -v ${user_fluentd_install_path}/lib/ruby/bin/gem -v)" ]; then 222 | gem_command="$user_fluentd_install_path/lib/ruby/bin/gem" 223 | else 224 | terminate "WARNING: Ruby 'gem' or 'td-agent-gem' is required and was not found, please make sure that at least one of the mentioned frameworks is installed. Fluentd log vendors installation aborted." 225 | fi 226 | fi 227 | 228 | if [ "$install_log_vendors" == true ]; then 229 | while true; do 230 | echo 231 | read -p "What log vendor would you like to install? [Splunk or Datadog]: " log_vendor_name 232 | log_vendor_name=${log_vendor_name,,} 233 | 234 | case $log_vendor_name in 235 | [splunk]*) 236 | log_vendor_name=splunk 237 | 238 | # load the script 239 | if [ "$LOCAL_MODE" == true ]; then 240 | source ./log-vendors/fluentd-splunk-installer.sh 241 | else 242 | load_remote_script "$SCRIPTS_URL_PATH/log-vendors/fluentd-splunk-installer.sh" "fluentd-splunk-installer.sh" 243 | fi 244 | 245 | install_plugin $install_as_service $install_as_docker "$user_fluentd_install_path" "$gem_command" || terminate "Error while installing Splunk plugin." 246 | break 247 | ;; 248 | [datadog]*) 249 | log_vendor_name=datadog 250 | 251 | # load the script 252 | if [ "$LOCAL_MODE" == true ]; then 253 | source ./log-vendors/fluentd-datadog-installer.sh 254 | else 255 | load_remote_script "$SCRIPTS_URL_PATH/log-vendors/fluentd-datadog-installer.sh" "fluentd-datadog-installer.sh" 256 | fi 257 | 258 | install_plugin $install_as_service $install_as_docker "$user_fluentd_install_path" "$gem_command" || terminate "Error while installing Datadog plugin." 259 | break 260 | ;; 261 | #[elastic]*) 262 | # log_vendor_name=elastic 263 | # echo Installing fluent-plugin-elasticsearch... 264 | # $gem_command install fluent-plugin-elasticsearch 265 | # help_link=https://github.com/jfrog/log-analytics-elastic 266 | # break 267 | # ;; 268 | #[prometheus]*) 269 | # log_vendor_name=prometheus 270 | # echo Installing fluent-plugin-prometheus... 271 | # $gem_command install fluent-plugin-prometheus 272 | # help_link=https://github.com/jfrog/log-analytics-prometheus 273 | # break 274 | # ;; 275 | *) 276 | echo "Please answer: Splunk or Datadog" ; 277 | esac 278 | done 279 | else 280 | echo 281 | echo "Skipping the log vendor installation!" 282 | fi 283 | } 284 | 285 | # Based on the type of installation starts and enables fluentd/td-agent. 286 | start_enable_fluentd() { 287 | declare install_as_service=$1 288 | 289 | # Start/enable/status td-agent service 290 | if [ "$install_as_service" == true ]; then 291 | # enable and start fluentd service, this part is only available if Fluentd was installed as service in the previous steps 292 | echo 293 | declare start_enable_service=$(question "Would you like to start and enable Fluentd service (td-agent4, optional)? [y/n]: ") 294 | declare fluentd_service_name="td-agent" 295 | if [ "$start_enable_service" == true ]; then 296 | echo Starting and enabling td-agent service... 297 | if [[ $(systemctl) =~ -\.mount ]]; then 298 | sudo systemctl daemon-reload 299 | sudo systemctl enable ${fluentd_service_name}.service 300 | sudo systemctl restart ${fluentd_service_name}.service 301 | sudo systemctl status ${fluentd_service_name}.service 302 | else 303 | sudo chkconfig ${fluentd_service_name} on 304 | sudo /etc/init.d/${fluentd_service_name} restart 305 | sudo /etc/init.d/${fluentd_service_name} status 306 | fi 307 | fi 308 | else 309 | echo 310 | if [ "$start_enable_service" == true ]; then 311 | declare start_enable_tar_install=$(question "Would you like to start and enable Fluentd as service (systemctl required, optional)? [y/n]: ") 312 | fi 313 | if ! [[ $(systemctl) =~ -\.mount ]]; then 314 | echo "WARNING: The 'systemctl' command not found, the files needed to start Fluentd as service won't be created." 315 | elif [ "$start_enable_tar_install" == true ]; then 316 | echo Creating files needed for the Fluentd service... 317 | mkdir -p "$HOME"/.config/systemd/user/ 318 | fluentd_service_name='jfrogfluentd' 319 | declare user_install_fluentd_service_conf_file="$HOME"/.config/systemd/user/${fluentd_service_name}.service 320 | touch "$user_install_fluentd_service_conf_file" 321 | echo "# Added by JFrog log-analytics install script 322 | [Unit] 323 | Description=JFrog_Fluentd 324 | 325 | [Service] 326 | ExecStart=${user_install_fluentd_path}/fluentd ${user_install_fluentd_path}/test.conf 327 | Restart=always 328 | 329 | [Install] 330 | WantedBy=graphical.target" >"$user_install_fluentd_service_conf_file" 331 | echo Starting and enabling td-agent service... 332 | { 333 | systemctl --user enable ${user_install_fluentd_service_conf_file} 334 | systemctl --user restart ${fluentd_service_name} 335 | } || { 336 | echo 337 | print_error "ALERT: Enabling the fluentd service wasn't successful, for additional info please check the errors above." 338 | print_error "You can still start Fluentd manually with the following command: '$user_fluentd_install_path/fluentd $fluentd_conf_file_path'" 339 | } 340 | fi 341 | fi 342 | 343 | if [[ -z $(ps aux | grep fluentd | grep -v "grep") ]]; then 344 | fluentd_summary_msg="ALERT: Service ${fluentd_service_name} not found. Fluentd is not available as service." 345 | fi 346 | if [ "$install_as_service" == true ]; then 347 | service_based_message="- To manage the Fluentd as service (td-agent) please use 'service' or 'systemctl' command." 348 | fluentd_conf_file_path="/etc/td-agent/td-agent.conf" 349 | else 350 | service_based_message="- To manually start Fluentd use the following command: $user_fluentd_install_path/fluentd $fluentd_conf_file_path" 351 | fi 352 | 353 | if ! [ -z $fluentd_conf_file_path ]; then 354 | fluentd_summary_msg="- To change the Fluentd configuration please update: $fluentd_conf_file_path 355 | $service_based_message" 356 | else 357 | fluentd_summary_msg="$service_based_message" 358 | fi 359 | } 360 | 361 | # Based on the gather user's input builds the fluentd docker image. 362 | build_docker_image() { 363 | echo 364 | if ! [ -z $log_vendor_name ]; then 365 | declare docker_default_image_tag="$DOCKER_IMAGE_TAG/$log_vendor_name" 366 | read -p "Please provide docker image tag (default: $docker_default_image_tag): " docker_image_tag 367 | if [ -z "$docker_image_tag" ]; then 368 | docker_image_tag=$docker_default_image_tag 369 | fi 370 | echo 371 | echo "Building docker image based on the provided information..." 372 | echo 373 | docker build -t $docker_image_tag ./ || terminate "Docker image creation failed." 374 | echo 375 | declare docker_image_info="Docker image: $(docker image ls | grep $docker_image_tag)" 376 | print_green "$docker_image_info" 377 | echo 378 | fluentd_summary_msg="- $docker_image_info" 379 | else 380 | echo 'Fluentd installation summary:' 381 | print_error "- ALERT! - You didn't request any Jfrog product related customization therefore no docker image was created or built, please use the Fluentd docker image instead: https://hub.docker.com/r/fluent/fluentd/ " 382 | echo "- Additional information related to the JFrog log analytics: https://github.com/jfrog/log-analytics" 383 | print_green 'Fluentd installation completed!' 384 | echo 385 | exit 0 386 | fi 387 | } 388 | 389 | # Runs the dockers image created by the script 390 | run_docker_image() { 391 | jf_log_mounting_path=$1 392 | echo 393 | while true; do 394 | read -p 'Please provide docker container name: ' docker_container_name 395 | echo "Provided name: $docker_container_name" 396 | if [ -z "$docker_container_name" ]; then 397 | echo "Incorrect docker container name, please try it again." 398 | else 399 | break 400 | fi 401 | done 402 | declare docker_start_command="docker run -d -it --name $docker_container_name --mount type=bind,source=$jf_log_mounting_path,target=$jf_log_mounting_path $docker_image_tag:latest" 403 | echo 404 | echo "Starting docker container..." 405 | { 406 | eval "$docker_start_command" 407 | docker ps -all | grep $docker_container_name 408 | } || print_error "Starting docker image '$DOCKER_IMAGE_TAG' failed, please resolve the problem and run it manually using the following command: '$docker_start_command'" 409 | fluentd_summary_msg="$fluentd_summary_msg 410 | - Docker container info: $(docker ps -all | grep $docker_container_name) 411 | - Docker run command: $docker_start_command" 412 | } 413 | 414 | # intro message 415 | intro 416 | 417 | # installation type selection 418 | while true; do 419 | echo 420 | read -p "Would you like to install Fluentd as SERVICE, in the USER space or build DOCKER image? [service/user/docker] 421 | [service] - Fluentd will be installed as service on this machine (sudo rights required). 422 | [user] - Fluentd will be installed in a folder specified in the next step. 423 | [docker] - Custom Docker image will built based on the latest fluentd image and user input. 424 | [service/user/docker]: " install_type 425 | install_type=${install_type,,} 426 | 427 | case $install_type in 428 | [service]*) 429 | declare install_as_service=true 430 | declare install_as_docker=false 431 | echo "Installation type: SERVICE, Fluentd will be installed as service." 432 | break 433 | ;; 434 | [user]*) 435 | declare install_as_service=false 436 | declare install_as_docker=false 437 | echo "Installation type: USER, Fluentd will be installed in a folder specified in the next step." 438 | break 439 | ;; 440 | [docker]*) 441 | declare install_as_service=false 442 | declare install_as_docker=true 443 | echo "Installation type: DOCKER, Custom Docker image will be built based on the latest fluentd image and user input" 444 | break 445 | ;; 446 | *) 447 | echo "Please answer: service, user or docker." 448 | esac 449 | done 450 | echo 451 | 452 | if [ "$install_as_docker" == false ]; then 453 | # install fluentd 454 | install_fluentd $install_as_service 455 | else 456 | # check if docker is running/present 457 | { 458 | echo "Checking if docker is installed..." 459 | echo 460 | docker ps -q 461 | } || { 462 | terminate "Docker is not running or not installed, please fix the problem before running the script again." 463 | } 464 | echo 465 | echo "Docker is present and running!" 466 | echo 467 | fi 468 | 469 | install_log_vendor $install_as_service 470 | 471 | # Enable/start Fluentd, only for non docker options. 472 | if [ "$install_as_docker" == false ]; then 473 | start_enable_fluentd $install_as_service 474 | else 475 | # build docker image 476 | build_docker_image 477 | declare run_docker_image=$(question "Would you like to create and run a container for $docker_image_tag:latest? [y/n]: ") 478 | if [ "$run_docker_image" == true ]; then 479 | run_docker_image $user_product_path 480 | fi 481 | fi 482 | 483 | # summary message 484 | echo 485 | print_green 'Fluentd installation summary:' 486 | echo "$fluentd_summary_msg" 487 | if [ "$install_as_docker" == true ]; then 488 | print_error "- ALERT! Please make sure the docker container has read/write access to the JPD logs folder (artifactory, xray, etc)." 489 | else 490 | print_error "- ALERT! Please make sure Fluentd has read/write access to the JPD logs folder (artifactory, xray, etc)." 491 | print_error "- ALERT! Before starting Fluentd please reload the environment (e.g. logout/login the current user: $USER)." 492 | fi 493 | echo "- Additional information related to the JFrog log analytics: https://github.com/jfrog/log-analytics" 494 | print_green 'Fluentd installation completed!' 495 | echo 496 | # Fin! 497 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/log-vendors/fluentd-datadog-installer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Fluents datadog script for JFrog products 4 | 5 | # vars 6 | FLUENTD_DATADOG_CONF_BASE_URL='https://raw.githubusercontent.com/jfrog/log-analytics-datadog/master' 7 | ERROR_MESSAGE='Error while installing/configuring Datadog.' 8 | 9 | # load the common script 10 | if [ "$LOCAL_MODE" == true ]; then 11 | source ./utils/common.sh 12 | else 13 | load_remote_script "$SCRIPTS_URL_PATH/utils/common.sh" "common.sh" 14 | fi 15 | 16 | # intro message 17 | intro() { 18 | ## Datadog - Fluentd Install Script 19 | load_and_print_logo "$SCRIPTS_URL_PATH/other/dd_ascii_logo.txt" "dd_ascii_logo.txt" 20 | echo 21 | echo 'The installation script for the Datadog plugin performs the following tasks:' 22 | echo '- Configure Datadog for JFrog artifactory, xray, etc' 23 | echo 'More info: https://github.com/jfrog/log-analytics-datadog' 24 | echo 25 | } 26 | 27 | # Configure fluentd datadog plugin based on the JFrog product 28 | configure_fluentd() { 29 | declare fluentd_as_service=$1 30 | declare install_as_docker=$2 31 | declare user_install_fluentd_install_path=$3 32 | declare gem_command=$4 33 | 34 | # Downloading the fluentd config for Datadog based on the user input 35 | config_download_path_base="https://raw.githubusercontent.com/jfrog/log-analytics-datadog/master/" 36 | while true; do 37 | echo 38 | read -p 'Type of Datadog configuration: [Artifactory or Xray]: ' product_name 39 | case $product_name in 40 | [artifactory]*) 41 | jfrog_env_variables '/var/opt/jfrog/artifactory/' 'artifactory' $fluentd_as_service 'artifactory' $install_as_docker 42 | declare fluentd_datadog_conf_name='fluent.conf.rt' 43 | download_fluentd_conf_file $FLUENTD_DATADOG_CONF_BASE_URL $fluentd_datadog_conf_name $TEMP_FOLDER 44 | # Update API key datadog 45 | update_fluentd_config_file "$TEMP_FOLDER/$fluentd_datadog_conf_name" 'Please provide Datadog API KEY (more info: https://docs.datadoghq.com/account_management/api-app-keys): ' 'API_KEY' true $fluentd_as_service 46 | break 47 | ;; 48 | [xray]*) 49 | jfrog_env_variables '/var/opt/jfrog/xray/' 'xray' $fluentd_as_service 'xray' $install_as_docker 50 | declare fluentd_datadog_conf_name='fluent.conf.xray' 51 | download_fluentd_conf_file $FLUENTD_DATADOG_CONF_BASE_URL $fluentd_datadog_conf_name $TEMP_FOLDER 52 | # Xray related config questions 53 | xray_shared_questions "$TEMP_FOLDER" "$fluentd_datadog_conf_name" "$gem_command" $fluentd_as_service $install_as_docker 54 | # Update API key datadog 55 | update_fluentd_config_file "$TEMP_FOLDER/$fluentd_datadog_conf_name" 'Please provide Datadog API KEY (more info: https://docs.datadoghq.com/account_management/api-app-keys): ' 'DATADOG_API_KEY' true $fluentd_as_service 56 | break 57 | ;; 58 | #[nginx]*) 59 | # jfrog_env_variables '/var/opt/jfrog/artifactory/' 'artifactory' 60 | # declare fluentd_datadog_conf_name='fluent.conf.nginx' 61 | # download_fluentd_conf_file $fluentd_datadog_conf_name 62 | # break 63 | # ;; 64 | #[missioncontrol]*) 65 | # jfrog_env_variables '/var/opt/jfrog/xray/' 'mission Control' $fluentd_as_service $install_as_docker 66 | # declare fluentd_datadog_conf_name='fluent.conf.missioncontrol' 67 | # download_fluentd_conf_file $FLUENTD_DATADOG_CONF_BASE_URL $fluentd_datadog_conf_name $TEMP_FOLDER 68 | # break 69 | # ;; 70 | #[distribution]*) 71 | # jfrog_env_variables '/var/opt/jfrog/distribution/' 'distribution' $fluentd_as_service $install_as_docker 72 | # declare fluentd_datadog_conf_name='fluent.conf.distribution' 73 | # download_fluentd_conf_file $FLUENTD_DATADOG_CONF_BASE_URL $fluentd_datadog_conf_name $TEMP_FOLDER 74 | # break 75 | # ;; 76 | #[pipelines]*) 77 | # jfrog_env_variables '/opt/jfrog/pipelines/var/' 'pipelines' $fluentd_as_service 78 | # declare fluentd_datadog_conf_name='fluent.conf.pipelines' 79 | # download_fluentd_conf_file $FLUENTD_DATADOG_CONF_BASE_URL $fluentd_datadog_conf_name $TEMP_FOLDER 80 | # break 81 | # ;; 82 | *) echo 'Incorrect value, please try again. ' ; 83 | esac 84 | done 85 | 86 | # update Dockerfile if needed - fluentd conf file name 87 | if [ "$install_as_docker" == true ]; then 88 | run_command false "sed -i -e "s,FLUENT_CONF_FILE_NAME,$fluentd_datadog_conf_name,g" $DOCKERFILE_PATH" 89 | fi 90 | 91 | # finalizing configuration 92 | finalizing_configuration $install_as_docker $fluentd_as_service $fluentd_datadog_conf_name "$user_install_fluentd_install_path" 93 | } 94 | 95 | # init method (run it first) 96 | install_plugin() { 97 | declare fluentd_as_service=$1 98 | declare install_as_docker=$2 99 | declare user_install_fluentd_install_path=$3 100 | declare gem_command=$4 101 | 102 | #init script 103 | intro 104 | 105 | # install datadog plugin (VM or docker) 106 | declare fluentd_plugin_name=fluent-plugin-datadog 107 | install_fluentd_plugin $fluentd_as_service $install_as_docker $fluentd_plugin_name "$gem_command" || terminate $ERROR_MESSAGE 108 | 109 | # configure fluentd 110 | configure_fluentd $fluentd_as_service $install_as_docker "$user_install_fluentd_install_path" "$gem_command" || terminate $ERROR_MESSAGE 111 | 112 | # summary message 113 | echo 114 | echo 'Datadog plugin installation summary:' 115 | if [ "$install_as_docker" == true ]; then 116 | echo "- The fluentd configuration will be added to the docker image." 117 | fi 118 | print_error '- ALERT: To use predefined Datadog Jfrog dashboards please do the following:' 119 | print_error ' 1) Install Datadog JFrog integration integration: https://app.datadoghq.com/account/settings#integrations/jfrog-platform' 120 | print_error ' 2) To add Datadog JFrog dashboards (Datadog portal) go to Dashboard -> Dashboard List, find JFrog Artifactory Dashboard, Artifactory Metrics, Xray Metrics, Xray Logs, Xray Violations and explore it.' 121 | echo '- More information: https://github.com/jfrog/log-analytics-datadog' 122 | print_green "Fluentd Datadog plugin configured!" 123 | } 124 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/log-vendors/fluentd-splunk-installer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # const 4 | FLUENTD_SPLUNK_CONF_BASE_URL='https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master' 5 | ERROR_MESSAGE='Error while installing/configuring Splunk.' 6 | 7 | # load the common script 8 | if [ "$LOCAL_MODE" == true ]; then 9 | source ./utils/common.sh 10 | else 11 | load_remote_script "$SCRIPTS_URL_PATH/utils/common.sh" "common.sh" 12 | fi 13 | 14 | intro() { 15 | ## Splunk - Fluentd Install Script 16 | load_and_print_logo "$SCRIPTS_URL_PATH/other/spl_ascii_logo.txt" "spl_ascii_logo.txt" 17 | echo 18 | echo 'The installation script for the Splunk plugin performs the following tasks:' 19 | echo '- Configure Splunk for JFrog artifactory, xray, etc' 20 | echo 'More info: https://github.com/jfrog/log-analytics-splunk' 21 | echo 22 | print_error "ALERT: Before continuing please complete the following steps:" 23 | echo 24 | echo "1) Splunkbase App 25 | - Install the JFrog Log Analytics Platform app from Splunkbase - https://splunkbase.splunk.com/app/5023. 26 | - Restart Splunk post installation of App. 27 | - Login to Splunk after the restart completes. 28 | - Confirm the version is the latest version available in Splunkbase. 29 | more info: https://github.com/jfrog/log-analytics-splunk/blob/master/README.md#splunkbase-app 30 | 31 | 2) Configure Splunk 32 | - Create new index 'jfrog_splunk' 33 | more info: https://github.com/jfrog/log-analytics-splunk/blob/master/README.md#create-index-jfrog_splunk 34 | - Configure new HEC (HTTP Event Collector) token to receive Logs (use 'jfrog_splunk' index to store the JFrog platform log data into). 35 | more info: https://github.com/jfrog/log-analytics-splunk/blob/master/README.md#configure-new-hec-token-to-receive-logs" 36 | echo 37 | declare continue_with_steps=$(question "Are you ready to continue? [y/n]: ") 38 | if [ "$continue_with_steps" == false ]; then 39 | echo 'Please complete the Splunk pre installation steps before continue.' 40 | echo 'Have a nice day! Good Bye!' 41 | exit 1 42 | fi 43 | echo 44 | } 45 | 46 | shared_config_questions() { 47 | declare fluentd_splunk_conf_name=$1 48 | declare $fluentd_as_service=$2 49 | 50 | download_fluentd_conf_file $FLUENTD_SPLUNK_CONF_BASE_URL $fluentd_splunk_conf_name $TEMP_FOLDER 51 | # configure HEC url 52 | update_fluentd_config_file "$TEMP_FOLDER/$fluentd_splunk_conf_name" 'Provide IP or DNS of Splunk HEC: ' 'HEC_HOST' false $fluentd_as_service 53 | # configure HEC port 54 | update_fluentd_config_file "$TEMP_FOLDER/$fluentd_splunk_conf_name" 'Provide Splunk HEC port: ' 'HEC_PORT' false $fluentd_as_service 55 | # configure HEC token 56 | update_fluentd_config_file "$TEMP_FOLDER/$fluentd_splunk_conf_name" 'Provide Splunk HEC token value: ' 'HEC_TOKEN' true $fluentd_as_service 57 | # configure SSL token 58 | echo 59 | echo 60 | declare enable_ssl=$(question "Would you like to enable SSL? [y/n]: ") 61 | if [ "$enable_ssl" == true ]; then 62 | update_fluentd_config_file_headless "$TEMP_FOLDER/$fluentd_splunk_conf_name" "#use_ssl" "use_ssl" $fluentd_as_service 63 | fi 64 | # configure CA file 65 | echo 66 | declare add_ca_file=$(question "Would you like to add 'root certificate authority' file (CA)? [y/n]: ") 67 | if [ "$add_ca_file" == true ]; then 68 | update_fluentd_config_file_headless "$TEMP_FOLDER/$fluentd_splunk_conf_name" "#ca_file" "ca_file" $fluentd_as_service 69 | # ask for the CA file path 70 | while [ true ]; do 71 | update_fluentd_config_file "$TEMP_FOLDER/$fluentd_splunk_conf_name" 'Provide CA file path: ' "/path/to/ca.pem" false $fluentd_as_service 72 | ca_file_path=$last_fluentd_conf_value 73 | if [ -f "$ca_file_path" ]; then 74 | break 75 | else 76 | echo "The provided CA file path '$ca_file_path' is incorrect, please try again." 77 | fi 78 | done 79 | 80 | # splunk dockerfile CA configuration 81 | if [ "$install_as_docker" == true ]; then 82 | echo '## Required for CA file' >> "$DOCKERFILE_PATH" 83 | echo '## Root might be needed to create copy the CA file' >> "$DOCKERFILE_PATH" 84 | echo "USER root" >> "$DOCKERFILE_PATH" 85 | echo "RUN mkdir -p $ca_file_path" >> "$DOCKERFILE_PATH" 86 | echo "COPY ./$(basename $ca_file_path) $ca_file_path" >> "$DOCKERFILE_PATH" 87 | echo '## Fix the CA file permissions' >> "$DOCKERFILE_PATH" 88 | echo "RUN chown -R 1001:1001 $ca_file_path" >> "$DOCKERFILE_PATH" 89 | echo '## Reset back to user' >> "$DOCKERFILE_PATH" 90 | echo "USER 1001" >> "$DOCKERFILE_PATH" 91 | # copy the CA file to the dockerfile folder 92 | cp $ca_file_path ./ 93 | fi 94 | fi 95 | } 96 | 97 | configure_fluentd() { 98 | declare fluentd_as_service=$1 99 | declare install_as_docker=$2 100 | declare user_install_fluentd_install_path=$3 101 | declare gem_command=$4 102 | 103 | # Downloading the fluentd config for Splunk based on the user input 104 | config_download_path_base="https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/" 105 | while true; do 106 | echo 107 | read -p 'Type of Splunk configuration: [Artifactory or Xray]: ' product_name 108 | case $product_name in 109 | [artifactory]*) 110 | jfrog_env_variables '/var/opt/jfrog/artifactory/' 'artifactory' $fluentd_as_service 'artifactory' $install_as_docker 111 | declare fluentd_splunk_conf_name='fluent.conf.rt' 112 | # shared splunk configuration questions 113 | shared_config_questions $fluentd_splunk_conf_name $fluentd_as_service 114 | break 115 | ;; 116 | [xray]*) 117 | jfrog_env_variables '/var/opt/jfrog/xray/' 'xray' $fluentd_as_service 'xray' $install_as_docker 118 | declare fluentd_splunk_conf_name='fluent.conf.xray' 119 | # shared splunk configuration questions 120 | shared_config_questions $fluentd_splunk_conf_name $fluentd_as_service 121 | # Xray related config questions 122 | xray_shared_questions "$TEMP_FOLDER" "$fluentd_splunk_conf_name" "$gem_command" "$fluentd_as_service" $install_as_docker 123 | break 124 | ;; 125 | # [nginx]*) 126 | # jfrog_env_variables '/var/opt/jfrog/artifactory/' 'artifactory' 127 | # declare fluentd_splunk_conf_name='fluent.conf.nginx' 128 | # download_fluentd_conf_file $fluentd_splunk_conf_name 129 | # break 130 | # ;; 131 | # [missioncontrol]*) 132 | # jfrog_env_variables '/var/opt/jfrog/xray/' 'mission Control' $fluentd_as_service 133 | # declare fluentd_splunk_conf_name='fluent.conf.missioncontrol' 134 | # # shared splunk configuration questions 135 | # shared_config_questions $fluentd_splunk_conf_name $fluentd_as_service 136 | # break 137 | # ;; 138 | # [distribution]*) 139 | # jfrog_env_variables '/var/opt/jfrog/distribution/' 'distribution' $fluentd_as_service 140 | # declare fluentd_splunk_conf_name='fluent.conf.distribution' 141 | # # shared splunk configuration questions 142 | # shared_config_questions $fluentd_splunk_conf_name $fluentd_as_service 143 | # break 144 | # ;; 145 | # [pipelines]*) 146 | # jfrog_env_variables '/opt/jfrog/pipelines/var/' 'pipelines' $fluentd_as_service 147 | # declare fluentd_splunk_conf_name='fluent.conf.pipelines' 148 | # # shared splunk configuration questions 149 | # shared_config_questions $fluentd_splunk_conf_name $fluentd_as_service 150 | # break 151 | # ;; 152 | *) echo 'Incorrect value, please try again. ' ; 153 | esac 154 | done 155 | 156 | # update Dockerfile if needed - fluentd conf file name 157 | if [ "$install_as_docker" == true ]; then 158 | run_command false "sed -i -e "s,FLUENT_CONF_FILE_NAME,$fluentd_splunk_conf_name,g" $DOCKERFILE_PATH" 159 | fi 160 | 161 | # finalizing configuration 162 | finalizing_configuration $install_as_docker $fluentd_as_service $fluentd_splunk_conf_name "$user_install_fluentd_install_path" || terminate $ERROR_MESSAGE 163 | } 164 | 165 | install_plugin() { 166 | declare fluentd_as_service=$1 167 | declare install_as_docker=$2 168 | declare user_install_fluentd_install_path=$3 169 | declare gem_command=$4 170 | 171 | #init script 172 | intro 173 | 174 | # install splunk plugin (VM or docker) 175 | declare fluentd_plugin_name=fluent-plugin-splunk-enterprise 176 | install_fluentd_plugin $fluentd_as_service $install_as_docker $fluentd_plugin_name "$gem_command" || terminate $ERROR_MESSAGE 177 | 178 | # configure fluentd 179 | configure_fluentd $fluentd_as_service $install_as_docker "$user_install_fluentd_install_path" "$gem_command" $install_as_docker || terminate $ERROR_MESSAGE 180 | 181 | # summary message 182 | echo 183 | echo 'Splunk plugin installation summary:' 184 | echo '- More information: https://github.com/jfrog/log-analytics-splunk' 185 | print_green "Fluentd Splunk plugin configured!" 186 | } 187 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/other/dd_ascii_logo.txt: -------------------------------------------------------------------------------- 1 | %%%%%%%% %%% %%%%%%%% %%% %%%%%%%, %%%%% %%%%%%% 2 | %% %% %% %% %% %% %% %% %% ,%% %% %%, 3 | %% %% %% %% %% %% %% %% %% %% (%# %%. #### 4 | %% %% %% (%%%%% %% %% (%%%%% %% %% #% %%* #%% %% 5 | %%%%%%%%% %% %% %% %% %% %%%%%%%%% %%%%%%%% %%%%%%%% 6 | 7 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/other/jfrog_ascii_logo.txt: -------------------------------------------------------------------------------- 1 | //// ///////// 2 | //// //// 3 | //// //// /////// ////////// ////////////// 4 | //// //////// //// //// //// //// (/// 5 | //// //// //// //// //// //// ///// 6 | //// //// //// //// //// ////// 7 | //// //// //// //// //// ////////(/ 8 | //// //// //// //////// ,**/////// 9 | //// //// //// 10 | //// //////////// 11 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/other/spl_ascii_logo.txt: -------------------------------------------------------------------------------- 1 | @@@ @@@ 2 | @@@ @@@ 3 | @@@@@@ @@@ @@@* @@@ @@@ @@@ @@@ @@@* @@@ @@ 4 | @@@ @@@@ @@ @@@ @@@ @@@ @@@ @@@ @@@*@@@ 5 | @@@ @@@ @@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ 6 | @ @@@ @@@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ 7 | @@@@@ @@& @@@ @@@ @@@@@@@@ @@@ @@@ @@@ @@@ 8 | @@& 9 | @@& 10 | -------------------------------------------------------------------------------- /fluentd-installer/scripts/linux/utils/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #const colors 4 | RED=`tput setaf 1` 5 | GREEN=`tput setaf 2` 6 | DEBUG_COLOR=`tput setaf 3` 7 | RESET=`tput sgr0` 8 | 9 | # Simple Yes/No Input util function. 10 | question() { 11 | question_text=$1 12 | answer=null 13 | while true; do 14 | read -p "$question_text" yesno 15 | case $yesno in 16 | [Yy]*) 17 | answer=true 18 | break 19 | ;; 20 | [Nn]*) 21 | answer=false 22 | break 23 | ;; 24 | *) echo "Please answer yes or no." ;; 25 | esac 26 | done 27 | echo $answer 28 | } 29 | 30 | # Checks if fluentd is installed. 31 | fluentd_check() { 32 | declare fluentd_as_service=$1 33 | declare user_install_fluentd_file_test_path=$2 34 | declare no_service_detected_message="No fluentd detected. Please install fluentd before continue (more info: https://github.com/jfrog/log-analytics)" 35 | 36 | # td-agent check 37 | if [ $fluentd_as_service == true ]; then 38 | TD_AGENT_SERVICE_NAME="td-agent.service" 39 | td_agent_present=$(systemctl list-units --full -all | grep "$TD_AGENT_SERVICE_NAME") 40 | if [ -z "$td_agent_present" -a "$td_agent_present" != " " ]; then 41 | terminate "$no_service_detected_message" 42 | fi 43 | # user installed fluentd check - check if fluentd file is in the bin folder 44 | elif [ ! -f "$user_install_fluentd_file_test_path/fluentd" ]; then 45 | echo $user_install_fluentd_file_test_path 46 | terminate "$no_service_detected_message" 47 | fi 48 | } 49 | 50 | # Executes commands based on the provided command string. 51 | run_command() { 52 | declare run_as_sudo=$1 53 | declare command_string=$2 54 | 55 | print_in_dev_mode_only "Method 'run_command', values: 56 | run_as_sudo=$run_as_sudo 57 | command_string=$command_string 58 | " 59 | 60 | # check if run the command as sudo 61 | if [ $run_as_sudo == true ]; then 62 | print_in_dev_mode_only "Run '$command_string' as SUDO..." 63 | declare sudo_cmd="sudo" 64 | else 65 | print_in_dev_mode_only "Run '$command_string' as user (non SUDO)..." 66 | declare sudo_cmd="" 67 | fi 68 | 69 | # run the command 70 | { 71 | print_in_dev_mode_only "Run command: '${sudo_cmd} ${command_string}'" 72 | ${sudo_cmd} ${command_string} 73 | } || { 74 | print_error "Error, command:'$command_string'. Please check the logs for more information. " 75 | } 76 | } 77 | 78 | # Updates the JPD product log folder access based on the provided product path. 79 | update_permissions() { 80 | declare product_path=$1 81 | declare user_name=$2 82 | declare run_as_sudo=$3 83 | 84 | print_in_dev_mode_only "Method: 'update_permissions', values: 85 | product_path=$1 86 | user_name=$2 87 | run_as_sudo=$3" 88 | 89 | echo 90 | declare update_perm=$(question "Would you like to add user '$user_name' to the product group and update the log folder permissions? (sudo required)? [y/n]: ") 91 | if [ "$update_perm" == true ]; then 92 | { 93 | echo 94 | read -p "Please provide the product group name (e.g artifactory, xray, etc): " group 95 | run_command $run_as_sudo "usermod -a -G $group $user_name" 96 | echo "User $user_name added to $group." 97 | run_command $run_as_sudo "chmod 0770 $product_path/log -R" 98 | sudo find $product_path/log/ -name "*.log" -exec chmod 0640 {} \; # TODO this should be rewritten so can be executed with "run_command", 99 | } || { 100 | print_error "The permissions update for $group was unsuccessful. Please try to update the log folder permissions manually. The log folder path: $product_path/log." 101 | } 102 | else 103 | print_error "ALERT! You chose not to update the logs folder permissions. Please make sure fluentd has read/write permissions to $product_path folder before continue." 104 | fi 105 | } 106 | 107 | # Prints text in red. 108 | print_error() { 109 | declare error_message=$1 110 | echo "$RED$error_message$RESET" 111 | } 112 | 113 | # Prints text in green. 114 | print_green() { 115 | declare message=$1 116 | echo "$GREEN$message$RESET" 117 | } 118 | 119 | # Helps to setup the fluentd environment based on the provided data. 120 | jfrog_env_variables() { 121 | declare jf_default_path_value=$1 122 | declare jf_product_data_default_name=$2 123 | declare fluentd_as_service=$3 124 | declare group=$4 125 | declare install_as_docker=$5 126 | 127 | echo 128 | read -p "Please provide $jf_product_data_default_name location (path where the log folder is located). (default: $jf_default_path_value): " user_product_path 129 | 130 | # check if the path is empty, if empty then use default 131 | echo "Provided location: $user_product_path" 132 | if [ -z "$user_product_path" ]; then 133 | echo "Using the default value $jf_default_path_value" 134 | user_product_path=$jf_default_path_value 135 | fi 136 | 137 | if [ ! -d "$user_product_path" ] && [ "$install_as_docker" == false ]; then 138 | echo "Incorrect product path $user_product_path" 139 | echo "Please try again." 140 | jfrog_env_variables $jf_default_path_value $jf_product_data_default_name $fluentd_as_service $group $install_as_docker 141 | fi 142 | 143 | # update the product path if needed (remove / if needed) 144 | if [ "${user_product_path: -1}" == "/" ]; then 145 | user_product_path=${user_product_path::-1} 146 | fi 147 | 148 | if [ "$install_as_docker" == false ]; then 149 | declare jf_product_var_path_string="JF_PRODUCT_DATA_INTERNAL=$user_product_path" 150 | echo "Setting the product path for JF_PRODUCT_DATA_INTERNAL=$user_product_path" 151 | if [ $fluentd_as_service == true ]; then # fluentd as service 152 | # update the service with the envs 153 | declare env_conf_file='/usr/lib/systemd/system/td-agent.service' 154 | jf_product_path_string="Environment=$jf_product_var_path_string" 155 | if grep -q "$jf_product_path_string" $env_conf_file; then 156 | echo "File $env_conf_file already contains the variables: $jf_product_var_path_string." 157 | else 158 | sudo sed -i "/^\[Service\]/a $jf_product_path_string" $env_conf_file 159 | fi 160 | update_permissions $user_product_path "td-agent" true 161 | else 162 | # update the user profile with the envs (fluentd as user install) 163 | declare env_conf_file="$HOME/.bashrc" 164 | jf_product_path_string="export $jf_product_var_path_string" 165 | if grep -q "'$jf_product_path_string'" $env_conf_file; then 166 | echo "File $env_conf_file already contains the variables: $jf_product_var_path_string." 167 | else 168 | echo "$jf_product_path_string # Added by the fluentd JFrog install script" >> $env_conf_file 169 | fi 170 | update_permissions $user_product_path $USER true 171 | fi 172 | else 173 | # update dockerfile 174 | run_command false "sed -i -e "s,JF_PRODUCT_DATA_INTERNAL_VALUE,$user_product_path,g" $DOCKERFILE_PATH" 175 | fi 176 | echo 177 | } 178 | 179 | # Downloads predefined fluentd template based on the provided vars. 180 | download_fluentd_conf_file() { 181 | declare fluentd_conf_base_url=$1 182 | declare fluentd_conf_name=$2 183 | declare temp_folder=$3 184 | declare fluentd_conf_file_path="$temp_folder/$fluentd_conf_name" 185 | 186 | print_in_dev_mode_only "Method: download_fluentd_conf_file, values: 187 | fluentd_conf_base_url=$1 188 | fluentd_conf_name=$2 189 | temp_folder=$3 190 | fluentd_conf_file_path=$temp_folder/$fluentd_conf_name" 191 | 192 | wget -nv -O $fluentd_conf_file_path "$fluentd_conf_base_url/$fluentd_conf_name" 193 | } 194 | 195 | # Utility function that simplify the asks for the impout and based on the input updates the indicated fluentd conf file 196 | update_fluentd_config_file() { 197 | declare fluentd_conf_file_path=$1 198 | declare conf_question=$2 199 | declare conf_property=$3 200 | declare value_is_secret=$4 201 | declare run_as_sudo=$5 202 | 203 | print_in_dev_mode_only "Method: 'update_fluentd_config_file', values: 204 | fluentd_conf_file_path=$1 205 | conf_question=$2 206 | conf_property=$3 207 | value_is_secret=$4 208 | run_as_sudo=$5" 209 | 210 | # check if we hide the user input 211 | echo 212 | while true; do 213 | if [ "$value_is_secret" == true ]; then 214 | echo -n $conf_question 215 | read -s fluentd_conf_value # hide user input 216 | else 217 | read -p "$conf_question" fluentd_conf_value # don't hide user input 218 | fi 219 | print_in_dev_mode_only "fluentd_conf_value=$fluentd_conf_value" 220 | # check if the value is empty, if empty then ask again 221 | if [ -z "$fluentd_conf_value" -a "$fluentd_conf_value" ]; then 222 | echo "Incorrect value, please try again." 223 | else 224 | break 225 | fi 226 | done 227 | # update the config file 228 | update_fluentd_config_file_headless "$fluentd_conf_file_path" "$conf_property" "$fluentd_conf_value" $run_as_sudo 229 | last_fluentd_conf_value=$fluentd_conf_value 230 | } 231 | 232 | update_fluentd_config_file_headless() { 233 | declare fluentd_conf_file_path="$1" 234 | declare conf_property="$2" 235 | declare fluentd_conf_value="$3" 236 | declare run_as_sudo="$4" 237 | declare value_is_secret=$5 238 | 239 | print_in_dev_mode_only "Method: 'update_fluentd_config_file_headless', values: 240 | fluentd_conf_file_path=$fluentd_conf_file_path 241 | conf_property=$conf_property 242 | fluentd_conf_value=$fluentd_conf_value 243 | run_as_sudo=$run_as_sudo" 244 | 245 | # update the config file 246 | { 247 | run_command $run_as_sudo "sed -i -e "s,$conf_property,$fluentd_conf_value,g" $fluentd_conf_file_path" 248 | } || { 249 | print_in_dev_mode_only "Method: 'update_fluentd_config_file_headless' FAILED, values: 250 | fluentd_conf_file_path=$fluentd_conf_file_path 251 | conf_property=$conf_property 252 | fluentd_conf_value=$fluentd_conf_value 253 | run_as_sudo=$run_as_sudoq" 254 | print_error "The value was not added to fluentd conf file $fluentd_conf_file_path. Please check the logs for more info." 255 | } 256 | } 257 | 258 | # Copy fluentd conf file based on the type of installation and provided vars. 259 | copy_fluentd_conf() { 260 | declare fluentd_conf_path_base=$1 261 | declare fluentd_conf_file_name=$2 262 | declare fluentd_as_service=$3 263 | declare install_as_docker=$4 264 | declare temp_folder=$5 265 | 266 | # copy and save the changes 267 | # if fluentd is installed as service 268 | if [ "$install_as_docker" == false ]; then 269 | if [ $fluentd_as_service == true ]; then 270 | fluentd_conf_file_path="$fluentd_conf_path_base/td-agent.conf" 271 | declare backup_timestamp=$(date +%s) 272 | # if config exists than back-up the old fluentd conf file 273 | if [ -f "$fluentd_conf_file_path" ]; then 274 | sudo mv $fluentd_conf_file_path "${fluentd_conf_file_path}_backup_${backup_timestamp}" 275 | fi 276 | else # if fluentd is installed as "user installation" 277 | while true; do 278 | echo 279 | read -p "Please provide location where fluentd conf file will be stored (default: $fluentd_conf_path_base):" user_fluentd_conf_path 280 | # TODO "Trim" the string to make sure that no empty spaces string is passed 281 | if [ -z "$user_fluentd_conf_path" ]; then # empty string use the default value 282 | fluentd_conf_file_path="$fluentd_conf_path_base/$fluentd_conf_file_name" 283 | break 284 | elif [ -h "$user_fluentd_conf_path" ]; then # user typed the conf path 285 | fluentd_conf_file_path="$user_fluentd_conf_path/$fluentd_conf_file_name" 286 | break 287 | fi 288 | done 289 | fi 290 | else 291 | # in case of docker, copy the fluentd file to the current folder where Dockerfile is 292 | fluentd_conf_file_path="./" 293 | fi 294 | 295 | # copy the conf file to the td-agent folder/conf 296 | { 297 | run_command $fluentd_as_service "cp $temp_folder/$fluentd_conf_file_name $fluentd_conf_file_path" 298 | echo "Fluentd conf file path: $fluentd_conf_file_path/$fluentd_conf_file_name" 299 | # clean up 300 | rm -rf $temp_folder/$fluentd_conf_file_name 301 | } || { 302 | terminate 'Please review the errors.' 303 | } 304 | } 305 | 306 | # Util method to install fluentd (gem) plugins (only SIEM at this point) 307 | install_custom_plugin() { 308 | declare plugin_name=$1 309 | declare gem_command=$2 310 | declare run_as_sudo=$3 311 | 312 | # Install additions plugin (splunk, datadog, elastic) 313 | echo 314 | declare user_install_plugin=$(question "Would you like to install $plugin_name plugin [y/n]: ") 315 | if [ "$user_install_plugin" == true ]; then 316 | declare lower_case_plugin_name=echo "${plugin_name,,}" 317 | case $lower_case_plugin_name in 318 | [siem]*) 319 | echo Installing fluent-plugin-jfrog-siem... 320 | if [ "$install_as_docker" == false ]; then 321 | run_command $run_as_sudo "$gem_command install fluent-plugin-jfrog-siem" || terminate 'Please review the errors.' 322 | else 323 | echo '## Required JFrog fluentd plugins' >> "$DOCKERFILE_PATH" 324 | echo "RUN fluent-gem install fluent-plugin-jfrog-siem" >> "$DOCKERFILE_PATH" 325 | echo "RUN fluent-gem install fluent-plugin-record-modifier" >> "$DOCKERFILE_PATH" 326 | fi 327 | declare help_link=https://github.com/jfrog/fluent-plugin-jfrog-siem 328 | ;; 329 | *) print_error "Plugin $plugin_name not found" ;; 330 | esac 331 | fi 332 | } 333 | 334 | # Util method to share the xray installation questions. 335 | xray_shared_questions() { 336 | temp_folder=$1 337 | fluentd_datadog_conf_name=$2 338 | gem_command=$3 339 | fluentd_as_service=$4 340 | install_as_docker=$5 341 | 342 | # required: JPD_URL is the JPD URL of the format http:// with is used to pull Xray Violations 343 | update_fluentd_config_file "$temp_folder/$fluentd_datadog_conf_name" "Provide JFrog URL (more info: https://www.jfrog.com/confluence/display/JFROG/General+System+Settings): " 'JPD_URL' false $fluentd_as_service 344 | # required: USER is the JPD username for authentication 345 | update_fluentd_config_file "$temp_folder/$fluentd_datadog_conf_name" 'Provide the JPD username for authentication (more info: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups): ' 'USER' false $fluentd_as_service 346 | # required: JFROG_API_KEY is the JPD API Key for authentication 347 | update_fluentd_config_file "$temp_folder/$fluentd_datadog_conf_name" 'Provide the JPD API Key for authentication (more info: https://www.jfrog.com/confluence/display/JFROG/User+Profile): ' 'JFROG_API_KEY' true $fluentd_as_service 348 | # install SIEM plugin 349 | echo 350 | install_custom_plugin 'SIEM' "$gem_command" $fluentd_as_service 351 | } 352 | 353 | # Downloads Dockerfile template to the current dir 354 | download_dockerfile_template() { 355 | wget -nv -O "$DOCKERFILE_PATH" https://github.com/jfrog/log-analytics/raw/${SCRIPT_BRANCH}/fluentd-installer/scripts/linux/Dockerfile.fluentd 356 | } 357 | 358 | # Util method to copy the fluentd conf file based on the installation type 359 | finalizing_configuration() { 360 | declare install_as_docker=$1 361 | declare fluentd_as_service=$2 362 | declare fluentd_conf_name=$3 363 | declare user_install_fluentd_install_path=$4 364 | 365 | if [ "$install_as_docker" == false ]; then 366 | if [ $fluentd_as_service == true ]; then 367 | copy_fluentd_conf '/etc/td-agent' "$fluentd_conf_name" $fluentd_as_service $install_as_docker "$TEMP_FOLDER" 368 | else 369 | copy_fluentd_conf "$user_install_fluentd_install_path" "$fluentd_conf_name" $fluentd_as_service $install_as_docker "$TEMP_FOLDER" 370 | fi 371 | else 372 | copy_fluentd_conf "$user_install_fluentd_install_path" "$fluentd_conf_name" $fluentd_as_service $install_as_docker "$TEMP_FOLDER" 373 | fi 374 | } 375 | 376 | # Util method to install fluentd plugins 377 | install_fluentd_plugin() { 378 | declare fluentd_as_service=$1 379 | declare install_as_docker=$2 380 | declare plugin_name=$3 381 | declare gem_command=$4 382 | 383 | # install slunk fluentd plugin or modify Dockerfile 384 | if [ "$install_as_docker" == false ]; then 385 | declare install_plugin_command="$gem_command install $plugin_name" 386 | # fluentd check 387 | fluentd_check $fluentd_as_service $user_install_fluentd_install_path 388 | # install fluentd datadog plugin 389 | run_command $fluentd_as_service "$install_plugin_command" || terminate "Error while installing $plugin_name plugin." 390 | else 391 | # download dockerfile template 392 | download_dockerfile_template 393 | # add plugin install command to the dockerfile 394 | echo '## Required JFrog fluentd plugins' >> "$DOCKERFILE_PATH" 395 | echo "RUN fluent-gem install $plugin_name" >> "$DOCKERFILE_PATH" 396 | fi 397 | } 398 | 399 | # Print messages only when DEV_MODE=true 400 | print_in_dev_mode_only() { 401 | declare debug_message=$1 402 | if [ "$DEV_MODE" == true ]; then 403 | echo 404 | echo "${DEBUG_COLOR}DEBUG: $debug_message$RESET" 405 | echo 406 | fi 407 | } 408 | 409 | # Loads the remote script based on the provided vars 410 | load_and_print_logo() { 411 | declare logo_url=$1 412 | declare logo_path=$2 413 | 414 | # download script 415 | wget -nv -O "$logo_path" "$logo_url" || terminate "ERROR: Error while downloading ${logo_url}. Exiting..." 416 | # show logo 417 | declare logo=`cat $logo_path` 418 | echo 419 | print_green "$logo" 420 | echo 421 | } 422 | -------------------------------------------------------------------------------- /fluentd/fluent.conf.distribution: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | @id distributor_service_tail 4 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distributor-service.log" 5 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distributor-service.log.pos" 6 | tag jfrog.distribution.distributor.service 7 | 8 | @type none 9 | 10 | 11 | 12 | @type tail 13 | @id distribution_service_tail 14 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distribution-service.log" 15 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distribution-service.log.pos" 16 | tag jfrog.distribution.distribution.service 17 | 18 | @type none 19 | 20 | 21 | 22 | @type tail 23 | @id distribution_router_tail 24 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log" 25 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log.pos" 26 | tag jfrog.distribution.router.service 27 | 28 | @type none 29 | 30 | 31 | 32 | @type tail 33 | @id distribution_router_traefik_tail 34 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log" 35 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log.pos" 36 | tag jfrog.distribution.router.traefik 37 | 38 | @type regexp 39 | expression ^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] -(?.+)$ 40 | time_key timestamp 41 | time_format %Y-%m-%dT%H:%M:%S.%LZ 42 | 43 | 44 | 45 | @type tail 46 | @id distribution_router_request_tail 47 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log" 48 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log.pos" 49 | tag jfrog.distribution.router.request 50 | 51 | @type json 52 | time_key time 53 | time_format %Y-%m-%dT%H:%M:%S%:z 54 | 55 | 56 | 57 | @type tail 58 | @id distribution_request_tail 59 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distribution-request.log" 60 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distribution-request.log.pos" 61 | tag jfrog.distribution.distribution.request 62 | 63 | @type regexp 64 | expression ^(?[^ ]*)\|(?[^ ]*)\|(?.+)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.*)$ 65 | time_key timestamp 66 | time_format %Y-%m-%dT%H:%M:%S.%L%:z 67 | 68 | 69 | 70 | @type tail 71 | @id distribution_access_tail 72 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distribution-access.log" 73 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distribution-access.log.pos" 74 | tag jfrog.distribution.distribution.access 75 | 76 | @type regexp 77 | expression /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] (?.*) for client : (?.+)/(?.+)$/ 78 | time_key timestamp 79 | time_format %Y-%m-%dT%H:%M:%S.%LZ 80 | 81 | 82 | 83 | 84 | @type tail 85 | @id distribution_foreman_tail 86 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distributor-foreman.log" 87 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/distributor-foreman.log.pos" 88 | tag jfrog.distribution.distributor.foreman 89 | 90 | @type regexp 91 | expression ^(?[^.]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \((?[^\)]*)\)(?.*)$ 92 | time_key timestamp 93 | time_format %Y-%m-%d %H:%M:%S,%L 94 | 95 | 96 | # STRIP COLOR CODES FROM SERVICE LOGS 97 | 98 | @type record_transformer 99 | enable_ruby true 100 | 101 | message ${record["message"].gsub(/\e\[([;\d]+)?m/, '')} 102 | 103 | 104 | # FIELD EXTRACT SERVICE LOG 105 | 106 | @type parser 107 | key_name message 108 | 109 | @type multiline 110 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 111 | format1 /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] (?.*)$/ 112 | time_key timestamp 113 | time_format %Y-%m-%dT%H:%M:%S.%LZ 114 | 115 | 116 | 117 | @type record_transformer 118 | 119 | hostname "#{Socket.gethostname}" 120 | log_source ${tag} 121 | 122 | 123 | 124 | 125 | @type copy 126 | 127 | @type "elasticsearch" 128 | @id elasticsearch 129 | host "elasticsearch" 130 | port 9200 131 | user "elastic" 132 | password xxxxxx 133 | index_name "unified-artifactory" 134 | include_tag_key true 135 | type_name "fluentd" 136 | logstash_format false 137 | 138 | 139 | @type "splunk_hec" 140 | host "splunk" 141 | port 8088 142 | token xxxxxx 143 | format json 144 | sourcetype_key "log_source" 145 | use_fluentd_time false 146 | flush_interval 10s 147 | 148 | flush_mode interval 149 | retry_type exponential_backoff 150 | flush_interval 10s 151 | 152 | 153 | @type json 154 | 155 | 156 | 157 | @type "datadog" 158 | @id datadog_agent_artifactory 159 | api_key xxxxxx 160 | include_tag_key true 161 | dd_source fluentd 162 | 163 | -------------------------------------------------------------------------------- /fluentd/fluent.conf.missioncontrol: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | @id insight_scheduler_service_tail 4 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-scheduler-service.log" 5 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-scheduler-service.log.pos" 6 | tag jfrog.missioncontrol.insight.scheduler.service 7 | 8 | @type none 9 | 10 | 11 | 12 | @type tail 13 | @id insight_server_service_tail 14 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-server-service.log" 15 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-server-service.log.pos" 16 | tag jfrog.missioncontrol.insight.server.service 17 | 18 | @type none 19 | 20 | 21 | 22 | @type tail 23 | @id router_service_tail 24 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log" 25 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log.pos" 26 | tag jfrog.missioncontrol.router.service 27 | 28 | @type none 29 | 30 | 31 | 32 | @type tail 33 | @id mc_error_tail 34 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/mc-error.log" 35 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/mc-error.log.pos" 36 | tag jfrog.missioncontrol.error.service 37 | 38 | @type none 39 | 40 | 41 | 42 | @type tail 43 | @id mc_service_tail 44 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/mc-service.log" 45 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/mc-service.log.pos" 46 | tag jfrog.missioncontrol.service 47 | 48 | @type none 49 | 50 | 51 | 52 | @type tail 53 | @id mc_router_traefik_tail 54 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log" 55 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log.pos" 56 | tag jfrog.missioncontrol.router.traefik 57 | 58 | @type regexp 59 | expression ^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] -(?.+)$ 60 | time_key timestamp 61 | time_format %Y-%m-%dT%H:%M:%S.%LZ 62 | 63 | 64 | 65 | @type tail 66 | @id mc_router_request_tail 67 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log" 68 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log.pos" 69 | tag jfrog.missioncontrol.router.request 70 | 71 | @type json 72 | time_key time 73 | time_format %Y-%m-%dT%H:%M:%S%:z 74 | 75 | 76 | 77 | @type tail 78 | @id insight_server_request_tail 79 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-server-request.log" 80 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-server-request.log.pos" 81 | tag jfrog.missioncontrol.insight.server.request 82 | 83 | @type regexp 84 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 85 | time_key timestamp 86 | time_format %Y-%m-%dT%H:%M:%S.%L%:z 87 | 88 | 89 | 90 | @type tail 91 | @id insight_scheduler_request_tail 92 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-scheduler-request.log" 93 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/insight-scheduler-request.log.pos" 94 | tag jfrog.missioncontrol.insight.scheduler.request 95 | 96 | @type regexp 97 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 98 | time_key timestamp 99 | time_format %Y-%m-%dT%H:%M:%S.%L%:z 100 | 101 | 102 | 103 | @type tail 104 | @id mc_request_tail 105 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/mc-request.log" 106 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/mc-request.log.pos" 107 | tag jfrog.missioncontrol.service.request 108 | 109 | @type regexp 110 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 111 | time_key timestamp 112 | time_format %Y-%m-%dT%H:%M:%S.%L 113 | 114 | 115 | 116 | @type record_transformer 117 | enable_ruby true 118 | 119 | message ${record["message"].gsub(/\e\[([;\d]+)?m/, '')} 120 | 121 | 122 | 123 | @type parser 124 | key_name message 125 | 126 | @type multiline 127 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 128 | format1 /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] -(?.*)$/ 129 | time_key timestamp 130 | time_format %Y-%m-%dT%H:%M:%S.%LZ 131 | 132 | 133 | 134 | @type record_transformer 135 | 136 | hostname "#{Socket.gethostname}" 137 | log_source ${tag} 138 | 139 | 140 | 141 | 142 | @type copy 143 | 144 | @type "elasticsearch" 145 | @id elasticsearch 146 | host "elasticsearch" 147 | port 9200 148 | user "elastic" 149 | password xxxxxx 150 | index_name "unified-artifactory" 151 | include_tag_key true 152 | type_name "fluentd" 153 | logstash_format false 154 | 155 | 156 | @type "splunk_hec" 157 | host "splunk" 158 | port 8088 159 | token xxxxxx 160 | format json 161 | sourcetype_key "log_source" 162 | use_fluentd_time false 163 | flush_interval 10s 164 | 165 | flush_mode interval 166 | retry_type exponential_backoff 167 | flush_interval 10s 168 | 169 | 170 | @type json 171 | 172 | 173 | 174 | @type "datadog" 175 | @id datadog_agent_artifactory 176 | api_key xxxxxx 177 | include_tag_key true 178 | dd_source fluentd 179 | 180 | -------------------------------------------------------------------------------- /fluentd/fluent.conf.pipelines: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | @id api_production_out 4 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/api_production_out.log" 5 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/api_production_out.log.pos" 6 | tag jfrog.pipelines.api.production 7 | 8 | @type none 9 | 10 | 11 | 12 | @type tail 13 | @id api_undefined_out 14 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/api_undefined_out.log" 15 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/api_undefined_out.log.pos" 16 | tag jfrog.pipelines.api.undefined 17 | 18 | @type none 19 | 20 | 21 | 22 | @type tail 23 | @id api_production_out 24 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/www_production_out.log" 25 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/www_production_out.log.pos" 26 | tag jfrog.pipelines.www.production 27 | 28 | @type none 29 | 30 | 31 | 32 | @type tail 33 | @id www_undefined_out 34 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/www_undefined_out.log" 35 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/www_undefined_out.log.pos" 36 | tag jfrog.pipelines.www.undefined.out 37 | 38 | @type none 39 | 40 | 41 | 42 | @type tail 43 | @id cron_log 44 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/cron.log" 45 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/cron.log.pos" 46 | tag jfrog.pipelines.cron 47 | 48 | @type none 49 | 50 | 51 | 52 | @type tail 53 | @id extension_sync_log 54 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/extensionSync.log" 55 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/extensionSync.log.pos" 56 | tag jfrog.pipelines.extension.sync 57 | 58 | @type none 59 | 60 | 61 | 62 | @type tail 63 | @id hook_handler_log 64 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/hookHandler.log" 65 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/hookHandler.log.pos" 66 | tag jfrog.pipelines.hook.handler 67 | 68 | @type none 69 | 70 | 71 | 72 | @type tail 73 | @id logup_log 74 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/logup.log" 75 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/logup.log.pos" 76 | tag jfrog.pipelines.logup 77 | 78 | @type none 79 | 80 | 81 | 82 | @type tail 83 | @id marshaller_log 84 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/marshaller.log" 85 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/marshaller.log.pos" 86 | tag jfrog.pipelines.marshaller 87 | 88 | @type none 89 | 90 | 91 | 92 | @type tail 93 | @id nexec_log 94 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/nexec.log" 95 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/nexec.log.pos" 96 | tag jfrog.pipelines.nexec 97 | 98 | @type none 99 | 100 | 101 | 102 | @type tail 103 | @id pipeline_sync_log 104 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/pipelineSync.log" 105 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/pipelineSync.log.pos" 106 | tag jfrog.pipelines.pipeline.sync 107 | 108 | @type none 109 | 110 | 111 | 112 | @type tail 113 | @id run_trigger_log 114 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/runTrigger.log" 115 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/runTrigger.log.pos" 116 | tag jfrog.pipelines.run.trigger 117 | 118 | @type none 119 | 120 | 121 | 122 | @type tail 123 | @id step_trigger_log 124 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/stepTrigger.log" 125 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/stepTrigger.log.pos" 126 | tag jfrog.pipelines.step.trigger 127 | 128 | @type none 129 | 130 | 131 | 132 | @type parser 133 | key_name message 134 | 135 | @type json 136 | time_key timestamp 137 | time_format %Y-%m-%dT%H:%M:%S.%LZ 138 | 139 | 140 | 141 | @type record_transformer 142 | 143 | hostname "#{Socket.gethostname}" 144 | log_source ${tag} 145 | 146 | 147 | 148 | 149 | @type copy 150 | 151 | @type "elasticsearch" 152 | @id elasticsearch 153 | host "elasticsearch" 154 | port 9200 155 | user "elastic" 156 | password xxxxxx 157 | index_name "unified-artifactory" 158 | include_tag_key true 159 | type_name "fluentd" 160 | logstash_format false 161 | 162 | 163 | @type "splunk_hec" 164 | host "splunk" 165 | port 8088 166 | token xxxxxx 167 | format json 168 | sourcetype_key "log_source" 169 | use_fluentd_time false 170 | flush_interval 10s 171 | 172 | flush_mode interval 173 | retry_type exponential_backoff 174 | flush_interval 10s 175 | 176 | 177 | @type json 178 | 179 | 180 | 181 | @type "datadog" 182 | @id datadog_agent_artifactory 183 | api_key xxxxxx 184 | include_tag_key true 185 | dd_source fluentd 186 | 187 | -------------------------------------------------------------------------------- /fluentd/fluent.conf.rt: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | @id access_service_tail 4 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-service.log" 5 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-service.log.pos" 6 | tag jfrog.rt.access.service 7 | 8 | @type none 9 | 10 | 11 | 12 | @type tail 13 | @id artifactory_service_tail 14 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-service.log" 15 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-service.log.pos" 16 | tag jfrog.rt.artifactory.service 17 | 18 | @type none 19 | 20 | 21 | 22 | @type tail 23 | @id frontend_service_tail 24 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-service.log" 25 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-service.log.pos" 26 | tag jfrog.rt.frontend.service 27 | 28 | @type none 29 | 30 | 31 | 32 | @type tail 33 | @id metadata_service_tail 34 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-service.log" 35 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-service.log.pos" 36 | tag jfrog.rt.metadata.service 37 | 38 | @type none 39 | 40 | 41 | 42 | @type tail 43 | @id router_service_tail 44 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log" 45 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log.pos" 46 | tag jfrog.rt.router.service 47 | 48 | @type none 49 | 50 | 51 | # Strip out color codes then field extract the service fields 52 | 53 | @type record_transformer 54 | enable_ruby true 55 | 56 | message ${record["message"].gsub(/\e\[([;\d]+)?m/, '')} 57 | 58 | 59 | 60 | @type parser 61 | key_name message 62 | 63 | @type multiline 64 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 65 | format1 /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] -(?.*)$/ 66 | time_key timestamp 67 | time_format %Y-%m-%dT%H:%M:%S.%LZ 68 | 69 | 70 | # End Service Fields Extraction 71 | 72 | 73 | 74 | @type tail 75 | @id router_traefik_tail 76 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log" 77 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log.pos" 78 | tag jfrog.rt.router.traefik 79 | 80 | @type multiline 81 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 82 | format1 /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] -(?.*)$/ 83 | time_key timestamp 84 | time_format %Y-%m-%dT%H:%M:%S.%LZ 85 | 86 | 87 | 88 | 89 | @type tail 90 | @id access_request_tail 91 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-request.log" 92 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-request.log.pos" 93 | tag jfrog.rt.access.request 94 | 95 | @type regexp 96 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 97 | time_key timestamp 98 | time_format %Y-%m-%dT%H:%M:%S.%LZ 99 | 100 | 101 | 102 | @type tail 103 | @id artifactory_request_tail 104 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-request.log" 105 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-request.log.pos" 106 | tag jfrog.rt.artifactory.request 107 | 108 | @type regexp 109 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 110 | time_key timestamp 111 | time_format %Y-%m-%dT%H:%M:%S.%LZ 112 | 113 | 114 | 115 | @type record_transformer 116 | enable_ruby true 117 | 118 | repo ${record["request_url"].include?("/api/docker") && !record["request_url"].include?("/api/docker/null") && !record["request_url"].include?("/api/docker/v2") ? (record["request_url"].split('/')[3]) : ("")} 119 | image ${record["request_url"].include?("/api/docker") && !record["request_url"].include?("/api/docker/null") && !record["request_url"].include?("/api/docker/v2") ? (record["request_url"].split('/')[5]) : ("")} 120 | 121 | 122 | 123 | @type tail 124 | @id frontend_request_tail 125 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-request.log" 126 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-request.log.pos" 127 | tag jfrog.rt.frontend.request 128 | 129 | @type regexp 130 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 131 | time_key timestamp 132 | time_format %Y-%m-%dT%H:%M:%S.%LZ 133 | 134 | 135 | 136 | @type tail 137 | @id metadata_request_tail 138 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-request.log" 139 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-request.log.pos" 140 | tag jfrog.rt.metadata.request 141 | 142 | @type regexp 143 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 144 | time_key timestamp 145 | time_format %Y-%m-%dT%H:%M:%S.%LZ 146 | 147 | 148 | 149 | @type tail 150 | @id router_request_tail 151 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log" 152 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log.pos" 153 | tag jfrog.rt.router.request 154 | 155 | @type json 156 | time_key time 157 | time_format %Y-%m-%dT%H:%M:%S%:z 158 | 159 | 160 | 161 | @type tail 162 | @id artifactory_access_tail 163 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-access.log" 164 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-access.log.pos" 165 | tag jfrog.rt.artifactory.access 166 | 167 | @type regexp 168 | expression /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] (?.*) for client : (?.+)/(?.+)$/ 169 | time_key timestamp 170 | time_format %Y-%m-%dT%H:%M:%S.%LZ 171 | 172 | 173 | 174 | @type tail 175 | @id access_security_audit_tail 176 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-security-audit.log" 177 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-security-audit.log.pos" 178 | tag jfrog.rt.access.audit 179 | 180 | @type regexp 181 | expression /^(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?.*)/ 182 | time_key timestamp 183 | time_format %Y-%m-%dT%H:%M:%S.%LZ 184 | 185 | 186 | 187 | @type record_transformer 188 | 189 | hostname "#{Socket.gethostname}" 190 | log_source ${tag} 191 | 192 | 193 | 194 | @type exec 195 | tag jfrog.callhome 196 | command "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/fluentd-1.11.0-linux-x86_64/lib/ruby/bin/gem list --local | grep fluent | sed 's/ (/:/g' | sed 's/)//g' | sed ':a;N;$!ba;s/\n/\t/g'" 197 | run_interval 1d 198 | 199 | @type ltsv 200 | 201 | 202 | 203 | @type exec 204 | tag jfrog.callhome 205 | command /opt/td-agent/embedded/bin/gem list --local | grep fluent | sed 's/ (/:/g' | sed 's/)//g' | sed ':a;N;$!ba;s/\n/\t/g' 206 | run_interval 1d 207 | 208 | @type ltsv 209 | 210 | 211 | 212 | @type record_transformer 213 | renew_record true 214 | keep_keys 'productId,features' 215 | enable_ruby true 216 | 217 | productId 'jfrogLogAnalytics/v0.1.0' 218 | features ${return(record.map { |k,v| { "featureId" => (k + ':' + v).to_sym} })} 219 | 220 | 221 | 222 | @type http 223 | endpoint http://localhost:8081/artifactory/api/system/usage 224 | open_timeout 5 225 | content_type application/json 226 | 227 | @type json 228 | 229 | 230 | flush_interval 5s 231 | 232 | 233 | 234 | 235 | 236 | @type copy 237 | 238 | @type "elasticsearch" 239 | @id elasticsearch 240 | host "elasticsearch" 241 | port 9200 242 | user "elastic" 243 | password xxxxxx 244 | index_name "unified-artifactory" 245 | include_tag_key true 246 | type_name "fluentd" 247 | logstash_format false 248 | 249 | 250 | @type "splunk_hec" 251 | host "splunk" 252 | port 8088 253 | token xxxxxx 254 | format json 255 | sourcetype_key "log_source" 256 | use_fluentd_time false 257 | flush_interval 10s 258 | 259 | flush_mode interval 260 | retry_type exponential_backoff 261 | flush_interval 10s 262 | 263 | 264 | @type json 265 | 266 | 267 | 268 | @type "datadog" 269 | @id datadog_agent_artifactory 270 | api_key xxxxxx 271 | include_tag_key true 272 | dd_source fluentd 273 | 274 | -------------------------------------------------------------------------------- /fluentd/fluent.conf.rt6: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | @id artifactory_service_tail 4 | path "#{ENV['ARTIFACTORY_HOME']}/logs/artifactory.log" 5 | pos_file "#{ENV['ARTIFACTORY_HOME']}/logs/artifactory.log.pos" 6 | tag jfrog.rt.artifactory.service 7 | 8 | @type multiline 9 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 10 | format1 /^(?[^.*]*) \[(?[^\]]*)\] \[(?[^\]]*)\] (?.*) -(?.*)$/ 11 | time_key timestamp 12 | time_format %Y-%m-%d %H:%M:%S,%L 13 | 14 | 15 | 16 | 17 | @type tail 18 | @id artifactory_request_tail 19 | path "#{ENV['ARTIFACTORY_HOME']}/logs/request.log" 20 | pos_file "#{ENV['ARTIFACTORY_HOME']}/logs/request.log.pos" 21 | tag jfrog.rt.artifactory.request 22 | 23 | @type regexp 24 | expression ^(?[^ ]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 25 | time_key timestamp 26 | time_format %Y%m%d%H%M%S 27 | 28 | 29 | 30 | 31 | @type tail 32 | @id artifactory_access_tail 33 | path "#{ENV['ARTIFACTORY_HOME']}/logs/access.log" 34 | pos_file "#{ENV['ARTIFACTORY_HOME']}/logs/access.log.pos" 35 | tag jfrog.rt.artifactory.access 36 | 37 | @type regexp 38 | expression ^(?[^.*]*) \[(?[^\]]*)\] (?.*) for client : (?.+) / (?.+)$ 39 | time_key timestamp 40 | time_format %Y-%m-%d %H:%M:%S,%L 41 | 42 | 43 | 44 | 45 | 46 | @type record_transformer 47 | 48 | hostname "#{Socket.gethostname}" 49 | log_source ${tag} 50 | 51 | 52 | 53 | 54 | @type record_transformer 55 | enable_ruby true 56 | 57 | repo ${record["request_url"].include?("/api/docker") && !record["request_url"].include?("/api/docker/null") && !record["request_url"].include?("/api/docker/v2") ? (record["request_url"].split('/')[3]) : ("")} 58 | image ${record["request_url"].include?("/api/docker") && !record["request_url"].include?("/api/docker/null") && !record["request_url"].include?("/api/docker/v2") ? (record["request_url"].split('/')[5]) : ("")} 59 | 60 | 61 | 62 | @type exec 63 | tag jfrog.callhome 64 | command "#{ENV['ARTIFACTORY_HOME']}/fluentd-1.11.0-linux-x86_64/lib/ruby/bin/gem list --local | grep fluent | sed 's/ (/:/g' | sed 's/)//g' | sed ':a;N;$!ba;s/\n/\t/g'" 65 | run_interval 1d 66 | 67 | @type ltsv 68 | 69 | 70 | 71 | @type exec 72 | tag jfrog.callhome 73 | command /opt/td-agent/embedded/bin/gem list --local | grep fluent | sed 's/ (/:/g' | sed 's/)//g' | sed ':a;N;$!ba;s/\n/\t/g' 74 | run_interval 1d 75 | 76 | @type ltsv 77 | 78 | 79 | 80 | @type record_transformer 81 | renew_record true 82 | keep_keys 'productId,features' 83 | enable_ruby true 84 | 85 | productId 'jfrogLogAnalytics/v0.1.0' 86 | features ${return(record.map { |k,v| { "featureId" => (k + ':' + v).to_sym} })} 87 | 88 | 89 | 90 | @type http 91 | endpoint http://localhost:8081/artifactory/api/system/usage 92 | open_timeout 5 93 | content_type application/json 94 | 95 | @type json 96 | 97 | 98 | flush_interval 5s 99 | 100 | 101 | 102 | 103 | @type copy 104 | 105 | @type "elasticsearch" 106 | @id elasticsearch 107 | host "elasticsearch" 108 | port 9200 109 | user "elastic" 110 | password xxxxxx 111 | index_name "unified-artifactory" 112 | include_tag_key true 113 | type_name "fluentd" 114 | logstash_format false 115 | 116 | 117 | @type "splunk_hec" 118 | host "splunk" 119 | port 8088 120 | token xxxxxx 121 | format json 122 | sourcetype_key "log_source" 123 | use_fluentd_time false 124 | flush_interval 10s 125 | 126 | flush_mode interval 127 | retry_type exponential_backoff 128 | flush_interval 10s 129 | 130 | 131 | @type json 132 | 133 | 134 | 135 | @type "datadog" 136 | @id datadog_agent_artifactory 137 | api_key xxxxxx 138 | include_tag_key true 139 | dd_source fluentd 140 | 141 | -------------------------------------------------------------------------------- /fluentd/fluent.conf.xray: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | @id xray_server_tail 4 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-server-service.log" 5 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-server-service.log.pos" 6 | tag jfrog.xray.server.service 7 | 8 | @type none 9 | 10 | 11 | 12 | @type tail 13 | @id xray_persist_tail 14 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-persist-service.log" 15 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-persist-service.log.pos" 16 | tag jfrog.xray.persist.service 17 | 18 | @type none 19 | 20 | 21 | 22 | @type tail 23 | @id xray_indexer_tail 24 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-indexer-service.log" 25 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-indexer-service.log.pos" 26 | tag jfrog.xray.indexer.service 27 | 28 | @type none 29 | 30 | 31 | 32 | @type tail 33 | @id xray_analysis_tail 34 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-analysis-service.log" 35 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-analysis-service.log.pos" 36 | tag jfrog.xray.analysis.service 37 | 38 | @type none 39 | 40 | 41 | 42 | @type tail 43 | @id xray_router_tail 44 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log" 45 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log.pos" 46 | tag jfrog.xray.router.service 47 | 48 | @type none 49 | 50 | 51 | 52 | @type tail 53 | @id xray_router_traefik_tail 54 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log" 55 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log.pos" 56 | tag jfrog.xray.router.traefik 57 | 58 | @type regexp 59 | expression ^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] -(?.+)$ 60 | time_key timestamp 61 | time_format %Y-%m-%dT%H:%M:%S.%LZ 62 | 63 | 64 | 65 | @type tail 66 | @id xray_router_request_tail 67 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log" 68 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log.pos" 69 | tag jfrog.xray.router.request 70 | 71 | @type json 72 | time_key time 73 | time_format %Y-%m-%dT%H:%M:%S%:z 74 | 75 | 76 | 77 | @type tail 78 | @id xray_request_tail 79 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-request.log" 80 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-request.log.pos" 81 | tag jfrog.xray.xray.request 82 | 83 | @type regexp 84 | expression ^(?[^ ]*)\|(?[^ ]*)\|(?.+)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.*)$ 85 | time_key timestamp 86 | time_format %Y-%m-%dT%H:%M:%S.%LZ 87 | 88 | 89 | # STRIP COLOR CODES FROM SERVICE LOGS 90 | 91 | @type record_transformer 92 | enable_ruby true 93 | 94 | message ${record["message"].gsub(/\e\[([;\d]+)?m/, '')} 95 | 96 | 97 | # FIELD EXTRACT SERVICE LOG 98 | 99 | @type parser 100 | key_name message 101 | 102 | @type multiline 103 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 104 | format1 /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] (?.*)$/ 105 | time_key timestamp 106 | time_format %Y-%m-%dT%H:%M:%S.%LZ 107 | 108 | 109 | 110 | @type record_transformer 111 | 112 | hostname "#{Socket.gethostname}" 113 | log_source ${tag} 114 | 115 | 116 | 117 | 118 | @type copy 119 | 120 | @type "elasticsearch" 121 | @id elasticsearch 122 | host "elasticsearch" 123 | port 9200 124 | user "elastic" 125 | password xxxxxx 126 | index_name "unified-artifactory" 127 | include_tag_key true 128 | type_name "fluentd" 129 | logstash_format false 130 | 131 | 132 | @type "splunk_hec" 133 | host "splunk" 134 | port 8088 135 | token xxxxxx 136 | format json 137 | sourcetype_key "log_source" 138 | use_fluentd_time false 139 | flush_interval 10s 140 | 141 | flush_mode interval 142 | retry_type exponential_backoff 143 | flush_interval 10s 144 | 145 | 146 | @type json 147 | 148 | 149 | 150 | @type "datadog" 151 | @id datadog_agent_artifactory 152 | api_key xxxxxx 153 | include_tag_key true 154 | dd_source fluentd 155 | 156 | -------------------------------------------------------------------------------- /log-vendors/Dockerfile.fluentd: -------------------------------------------------------------------------------- 1 | FROM bitnami/fluentd 2 | LABEL maintainer "Partner Engineering " 3 | USER root 4 | ## Install custom Fluentd plugins 5 | RUN fluent-gem install 'fluent-plugin-datadog' 6 | RUN fluent-gem install 'fluent-plugin-splunk-enterprise' 7 | RUN fluent-gem install 'fluent-plugin-elasticsearch' 8 | -------------------------------------------------------------------------------- /log-vendors/Dockerfile.redhat-ubi-rt7-fluentd: -------------------------------------------------------------------------------- 1 | # An example of customising Artifactory using RedHat Univeral Base Image (UBI). 2 | # Using Docker multi stage build. 3 | # Taking the Artifactory file system 4 | ARG ARTIFACTORY_BASE_VERSION 5 | # The Artifactory official Docker image 6 | FROM docker.bintray.io/jfrog/artifactory-pro:${ARTIFACTORY_BASE_VERSION} AS base 7 | 8 | # The new image based on registry.access.redhat.com/ubi 9 | FROM registry.access.redhat.com/ubi8 10 | 11 | ARG FLUENT_CONF 12 | 13 | LABEL name="JFrog Artifactory Pro" \ 14 | description="JFrog Artifactory Pro image based on the Red Hat Universal Base Image." \ 15 | vendor="JFrog" \ 16 | summary="JFrog Artifactory Pro (Red Hat UBI)" \ 17 | com.jfrog.license_terms="https://jfrog.com/artifactory/eula/" 18 | 19 | # Environment needed for Artifactory 20 | ENV JF_ARTIFACTORY_USER=artifactory \ 21 | ARTIFACTORY_USER_ID=1030 \ 22 | ARTIFACTORY_VERSION=${ARTIFACTORY_BASE_VERSION} \ 23 | JF_PRODUCT_HOME=/opt/jfrog/artifactory \ 24 | JF_PRODUCT_DATA_INTERNAL=/var/opt/jfrog/artifactory \ 25 | RECOMMENDED_MAX_OPEN_FILES=32000 \ 26 | MIN_MAX_OPEN_FILES=10000 \ 27 | RECOMMENDED_MAX_OPEN_PROCESSES=1024 \ 28 | POSTGRESQL_VERSION=9.4.1212 \ 29 | FLUENT_CONF=${FLUENT_CONF} 30 | 31 | # Copy needed file system from base (Artifactory image) 32 | COPY --from=base /opt/jfrog /opt/jfrog 33 | COPY --from=base /var/opt/jfrog/artifactory /var/opt/jfrog/artifactory 34 | COPY --from=base /entrypoint-artifactory.sh /entrypoint-artifactory.sh 35 | 36 | # Add license information to meet the Red Hat container image certification requirements 37 | COPY --from=base /opt/jfrog/artifactory/app/doc/* /licenses/ 38 | 39 | # Metadata to let Artifactory know its installation source 40 | RUN mkdir -p /artifactory_bootstrap/info/ 41 | RUN echo "{\"productId\":\"UBI8_artifactory/1.0.0\",\"features\":[{\"featureId\":\"Partner/ACC-006983\"}]}" > /artifactory_bootstrap/info/installer-info.json 42 | 43 | # Create the user, fix file system ownership and install needed tools with Yum 44 | # NOTE - wget must be installed for Artifactory HA 45 | # procps must be installed to run Artifactory 46 | # hostname is needed to generate nodeID 47 | RUN useradd -s /usr/sbin/nologin --uid ${ARTIFACTORY_USER_ID} --user-group ${JF_ARTIFACTORY_USER} && \ 48 | chown -R ${JF_ARTIFACTORY_USER}:${JF_ARTIFACTORY_USER} ${JF_PRODUCT_HOME} ${JF_PRODUCT_DATA_INTERNAL} && \ 49 | yum install -y --disableplugin=subscription-manager wget && \ 50 | yum install -y --disableplugin=subscription-manager procps && \ 51 | yum install -y --disableplugin=subscription-manager net-tools && \ 52 | yum install -y --disableplugin=subscription-manager hostname && \ 53 | yum install -y --disableplugin=subscription-manager sudo gem ruby-devel gcc gcc-c++ make 54 | 55 | RUN curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh 56 | 57 | RUN mkdir -p /var/log/td-agent/buffer && \ 58 | chown -R ${JF_ARTIFACTORY_USER}:${JF_ARTIFACTORY_USER} /var/log/td-agent && \ 59 | chown -R ${JF_ARTIFACTORY_USER}:${JF_ARTIFACTORY_USER} /etc/td-agent 60 | 61 | RUN mkdir -p /usr/lib/rpm/redhat/ && \ 62 | touch /usr/lib/rpm/redhat/redhat-hardened-cc1 && \ 63 | touch /usr/lib/rpm/redhat/redhat-hardened-ld && \ 64 | touch /usr/lib/rpm/redhat/redhat-annobin-cc1 && \ 65 | td-agent-gem install fluent-plugin-splunk-enterprise && \ 66 | td-agent-gem install fluent-plugin-datadog 67 | 68 | 69 | # REMOVE THE DEV TOOLS NEEDED FOR GEM INSTALL NOW.. 70 | RUN yum remove -y --disableplugin=subscription-manager gcc gcc-c++ make 71 | 72 | COPY ${FLUENT_CONF} /etc/td-agent/td-agent.conf 73 | 74 | RUN chown ${JF_ARTIFACTORY_USER}:${JF_ARTIFACTORY_USER} /etc/td-agent/td-agent.conf 75 | 76 | USER $JF_ARTIFACTORY_USER 77 | 78 | VOLUME ${JF_PRODUCT_DATA_INTERNAL} 79 | 80 | ENTRYPOINT ["/entrypoint-artifactory.sh"] -------------------------------------------------------------------------------- /log-vendors/Dockerfile.redhat-ubi-xray-analysis-fluentd: -------------------------------------------------------------------------------- 1 | ARG XRAY_BASE_VERSION 2 | 3 | FROM docker.bintray.io/jfrog/xray-analysis:${XRAY_BASE_VERSION} AS base 4 | 5 | # The new image based on registry.access.redhat.com/ubi 6 | FROM registry.access.redhat.com/ubi8 7 | 8 | ARG FLUENT_CONF 9 | 10 | LABEL name="JFrog Xray Analysis" \ 11 | description="JFrog Xray Analysis image based on the Red Hat Universal Base Image." \ 12 | vendor="JFrog" \ 13 | summary="JFrog Xray Analysis (Red Hat UBI)" \ 14 | com.jfrog.license_terms="https://jfrog.com/xray/eula/" 15 | 16 | # Environment needed for Xray 17 | ENV JF_XRAY_USER=xray \ 18 | XRAY_USER_ID=1035 \ 19 | XRAY_VERSION=${XRAY_BASE_VERSION} \ 20 | JF_PRODUCT_HOME=/opt/jfrog/xray \ 21 | JF_PRODUCT_DATA_INTERNAL=/var/opt/jfrog/xray \ 22 | SERVICE_NAME=analysis \ 23 | FLUENT_CONF=${FLUENT_CONF} 24 | 25 | 26 | COPY --from=base /opt/jfrog/xray /opt/jfrog/xray 27 | COPY --from=base /postgresql-client /postgresql-client 28 | 29 | # Add license information to meet the Red Hat container image certification requirements 30 | COPY --from=base /opt/jfrog/xray/app/doc/* /licenses/ 31 | 32 | RUN mkdir -p /var/opt/jfrog && chmod 0777 /var/opt/jfrog 33 | 34 | RUN useradd -s /usr/sbin/nologin --uid 1035 --user-group xray && \ 35 | chown -R 1035:1035 /opt/jfrog/xray /var/opt/jfrog /postgresql-client && \ 36 | yum install -y --disableplugin=subscription-manager wget && \ 37 | yum install -y --disableplugin=subscription-manager procps && \ 38 | yum install -y --disableplugin=subscription-manager net-tools && \ 39 | yum install -y --disableplugin=subscription-manager hostname && \ 40 | yum install -y --disableplugin=subscription-manager sudo gem ruby-devel gcc gcc-c++ make 41 | RUN curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh 42 | 43 | RUN mkdir -p /var/log/td-agent/buffer && \ 44 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /var/log/td-agent && \ 45 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /etc/td-agent 46 | 47 | RUN mkdir -p /usr/lib/rpm/redhat/ && \ 48 | touch /usr/lib/rpm/redhat/redhat-hardened-cc1 && \ 49 | touch /usr/lib/rpm/redhat/redhat-hardened-ld && \ 50 | touch /usr/lib/rpm/redhat/redhat-annobin-cc1 && \ 51 | td-agent-gem install fluent-plugin-splunk-enterprise && \ 52 | td-agent-gem install fluent-plugin-datadog 53 | 54 | 55 | # REMOVE THE DEV TOOLS NEEDED FOR GEM INSTALL NOW.. 56 | RUN yum remove -y --disableplugin=subscription-manager gcc gcc-c++ make 57 | 58 | USER $JF_XRAY_USER 59 | 60 | COPY ${FLUENT_CONF} /etc/td-agent/td-agent.conf 61 | 62 | VOLUME /var/opt/jfrog/xray 63 | 64 | ENTRYPOINT ["/opt/jfrog/xray/app/bin/wrapper.sh"] 65 | -------------------------------------------------------------------------------- /log-vendors/Dockerfile.redhat-ubi-xray-indexer-fluentd: -------------------------------------------------------------------------------- 1 | ARG XRAY_BASE_VERSION 2 | ARG FLUENT_CONF 3 | 4 | FROM docker.bintray.io/jfrog/xray-indexer:${XRAY_BASE_VERSION} AS base 5 | 6 | # The new image based on registry.access.redhat.com/ubi 7 | FROM registry.access.redhat.com/ubi8 8 | 9 | ARG FLUENT_CONF 10 | 11 | LABEL name="JFrog Xray Indexer" \ 12 | description="JFrog Xray Indexer image based on the Red Hat Universal Base Image." \ 13 | vendor="JFrog" \ 14 | summary="JFrog Xray Indexer (Red Hat UBI)" \ 15 | com.jfrog.license_terms="https://jfrog.com/xray/eula/" 16 | 17 | # Environment needed for Xray 18 | ENV JF_XRAY_USER=xray \ 19 | XRAY_USER_ID=1035 \ 20 | XRAY_VERSION=${XRAY_BASE_VERSION} \ 21 | JF_PRODUCT_HOME=/opt/jfrog/xray \ 22 | JF_PRODUCT_DATA_INTERNAL=/var/opt/jfrog/xray \ 23 | SERVICE_NAME=indexer \ 24 | FLUENT_CONF=${FLUENT_CONF} 25 | 26 | COPY --from=base /opt/jfrog/xray /opt/jfrog/xray 27 | COPY --from=base /postgresql-client /postgresql-client 28 | 29 | # Add license information to meet the Red Hat container image certification requirements 30 | COPY --from=base /opt/jfrog/xray/app/doc/* /licenses/ 31 | 32 | RUN mkdir -p /var/opt/jfrog && chmod 0777 /var/opt/jfrog 33 | 34 | RUN useradd -s /usr/sbin/nologin --uid 1035 --user-group xray && \ 35 | chown -R 1035:1035 /opt/jfrog/xray /var/opt/jfrog /postgresql-client && \ 36 | yum install -y --disableplugin=subscription-manager wget && \ 37 | yum install -y --disableplugin=subscription-manager procps && \ 38 | yum install -y --disableplugin=subscription-manager net-tools && \ 39 | yum install -y --disableplugin=subscription-manager hostname && \ 40 | yum install -y --disableplugin=subscription-manager sudo gem ruby-devel gcc gcc-c++ make 41 | RUN curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh 42 | 43 | RUN mkdir -p /var/log/td-agent/buffer && \ 44 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /var/log/td-agent && \ 45 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /etc/td-agent 46 | 47 | RUN mkdir -p /usr/lib/rpm/redhat/ && \ 48 | touch /usr/lib/rpm/redhat/redhat-hardened-cc1 && \ 49 | touch /usr/lib/rpm/redhat/redhat-hardened-ld && \ 50 | touch /usr/lib/rpm/redhat/redhat-annobin-cc1 && \ 51 | td-agent-gem install fluent-plugin-splunk-enterprise && \ 52 | td-agent-gem install fluent-plugin-datadog 53 | 54 | 55 | # REMOVE THE DEV TOOLS NEEDED FOR GEM INSTALL NOW.. 56 | RUN yum remove -y --disableplugin=subscription-manager gcc gcc-c++ make 57 | 58 | USER $JF_XRAY_USER 59 | 60 | COPY ${FLUENT_CONF} /etc/td-agent/td-agent.conf 61 | 62 | VOLUME /var/opt/jfrog/xray 63 | 64 | ENTRYPOINT ["/opt/jfrog/xray/app/bin/wrapper.sh"] 65 | -------------------------------------------------------------------------------- /log-vendors/Dockerfile.redhat-ubi-xray-persist-fluentd: -------------------------------------------------------------------------------- 1 | ARG XRAY_BASE_VERSION 2 | ARG FLUENT_CONF 3 | 4 | FROM docker.bintray.io/jfrog/xray-persist:${XRAY_BASE_VERSION} AS base 5 | 6 | # The new image based on registry.access.redhat.com/ubi 7 | FROM registry.access.redhat.com/ubi8 8 | 9 | ARG FLUENT_CONF 10 | 11 | LABEL name="JFrog Xray Persist" \ 12 | description="JFrog Xray Persist image based on the Red Hat Universal Base Image." \ 13 | vendor="JFrog" \ 14 | summary="JFrog Xray Persist (Red Hat UBI)" \ 15 | com.jfrog.license_terms="https://jfrog.com/xray/eula/" 16 | 17 | # Environment needed for Xray 18 | ENV JF_XRAY_USER=xray \ 19 | XRAY_USER_ID=1035 \ 20 | XRAY_VERSION=${XRAY_BASE_VERSION} \ 21 | JF_PRODUCT_HOME=/opt/jfrog/xray \ 22 | JF_PRODUCT_DATA_INTERNAL=/var/opt/jfrog/xray \ 23 | SERVICE_NAME=persist \ 24 | FLUENT_CONF=${FLUENT_CONF} 25 | 26 | COPY --from=base /opt/jfrog/xray /opt/jfrog/xray 27 | COPY --from=base /postgresql-client /postgresql-client 28 | 29 | # Add license information to meet the Red Hat container image certification requirements 30 | COPY --from=base /opt/jfrog/xray/app/doc/* /licenses/ 31 | 32 | RUN mkdir -p /var/opt/jfrog && chmod 0777 /var/opt/jfrog 33 | 34 | RUN useradd -s /usr/sbin/nologin --uid 1035 --user-group xray && \ 35 | chown -R 1035:1035 /opt/jfrog/xray /var/opt/jfrog /postgresql-client && \ 36 | yum install -y --disableplugin=subscription-manager wget && \ 37 | yum install -y --disableplugin=subscription-manager procps && \ 38 | yum install -y --disableplugin=subscription-manager net-tools && \ 39 | yum install -y --disableplugin=subscription-manager hostname && \ 40 | yum install -y --disableplugin=subscription-manager sudo gem ruby-devel gcc gcc-c++ make 41 | RUN curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh 42 | 43 | RUN mkdir -p /var/log/td-agent/buffer && \ 44 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /var/log/td-agent && \ 45 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /etc/td-agent 46 | 47 | RUN mkdir -p /usr/lib/rpm/redhat/ && \ 48 | touch /usr/lib/rpm/redhat/redhat-hardened-cc1 && \ 49 | touch /usr/lib/rpm/redhat/redhat-hardened-ld && \ 50 | touch /usr/lib/rpm/redhat/redhat-annobin-cc1 && \ 51 | td-agent-gem install fluent-plugin-splunk-enterprise && \ 52 | td-agent-gem install fluent-plugin-datadog 53 | 54 | 55 | # REMOVE THE DEV TOOLS NEEDED FOR GEM INSTALL NOW.. 56 | RUN yum remove -y --disableplugin=subscription-manager gcc gcc-c++ make 57 | 58 | USER $JF_XRAY_USER 59 | 60 | COPY ${FLUENT_CONF} /etc/td-agent/td-agent.conf 61 | 62 | VOLUME /var/opt/jfrog/xray 63 | 64 | ENTRYPOINT ["/opt/jfrog/xray/app/bin/wrapper.sh"] 65 | -------------------------------------------------------------------------------- /log-vendors/Dockerfile.redhat-ubi-xray-router-fluentd: -------------------------------------------------------------------------------- 1 | ARG ROUTER_BASE_VERSION 2 | ARG FLUENT_CONF 3 | 4 | FROM docker.bintray.io/jfrog/router:${ROUTER_BASE_VERSION} AS base 5 | 6 | # The new image based on registry.access.redhat.com/ubi 7 | FROM registry.access.redhat.com/ubi8 8 | 9 | ARG FLUENT_CONF 10 | 11 | LABEL name="JFrog Router" \ 12 | description="JFrog Router image based on the Red Hat Universal Base Image." \ 13 | vendor="JFrog" \ 14 | summary="JFrog Router (Red Hat UBI)" \ 15 | com.jfrog.license_terms="https://jfrog.com/xray/eula/" 16 | 17 | # Environment needed for Router 18 | ENV JF_ROUTER_USER=router \ 19 | ROUTER_USER_ID=1117 \ 20 | ROUTER_VERSION=${ROUTER_BASE_VERSION} \ 21 | JF_PRODUCT_HOME=/opt/jfrog/router \ 22 | JF_PRODUCT_DATA_INTERNAL=/var/opt/jfrog/router \ 23 | SERVICE_NAME=router \ 24 | FLUENT_CONF=${FLUENT_CONF} 25 | 26 | COPY --from=base /opt/jfrog/router /opt/jfrog/router 27 | 28 | # Add license information to meet the Red Hat container image certification requirements 29 | COPY --from=base /opt/jfrog/router/app/doc/* /licenses/ 30 | 31 | RUN mkdir -p /var/opt/jfrog && chmod 0777 /var/opt/jfrog 32 | 33 | RUN useradd -s /usr/sbin/nologin --uid 1117 --user-group router && \ 34 | chown -R 1117:1117 /opt/jfrog/router /var/opt/jfrog && \ 35 | yum install -y --disableplugin=subscription-manager wget && \ 36 | yum install -y --disableplugin=subscription-manager procps && \ 37 | yum install -y --disableplugin=subscription-manager net-tools && \ 38 | yum install -y --disableplugin=subscription-manager hostname && \ 39 | yum install -y --disableplugin=subscription-manager sudo gem ruby ruby-devel gcc gcc-c++ make 40 | RUN curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh 41 | 42 | RUN mkdir -p /var/log/td-agent/buffer && \ 43 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /var/log/td-agent && \ 44 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /etc/td-agent 45 | 46 | RUN mkdir -p /usr/lib/rpm/redhat/ && \ 47 | touch /usr/lib/rpm/redhat/redhat-hardened-cc1 && \ 48 | touch /usr/lib/rpm/redhat/redhat-hardened-ld && \ 49 | touch /usr/lib/rpm/redhat/redhat-annobin-cc1 && \ 50 | td-agent-gem install fluent-plugin-splunk-enterprise && \ 51 | td-agent-gem install fluent-plugin-datadog 52 | 53 | 54 | # REMOVE THE DEV TOOLS NEEDED FOR GEM INSTALL NOW.. 55 | RUN yum remove -y --disableplugin=subscription-manager gcc gcc-c++ make 56 | 57 | USER $JF_ROUTER_USER 58 | 59 | VOLUME /var/opt/jfrog/router 60 | 61 | COPY ${FLUENT_CONF} /etc/td-agent/td-agent.conf 62 | 63 | ENTRYPOINT ["/opt/jfrog/router/app/bin/entrypoint-router.sh"] 64 | -------------------------------------------------------------------------------- /log-vendors/Dockerfile.redhat-ubi-xray-server-fluentd: -------------------------------------------------------------------------------- 1 | ARG XRAY_BASE_VERSION 2 | ARG FLUENT_CONF 3 | 4 | FROM docker.bintray.io/jfrog/xray-server:${XRAY_BASE_VERSION} AS base 5 | 6 | # The new image based on registry.access.redhat.com/ubi 7 | FROM registry.access.redhat.com/ubi8 8 | 9 | ARG FLUENT_CONF 10 | 11 | LABEL name="JFrog Xray Server" \ 12 | description="JFrog Xray Server image based on the Red Hat Universal Base Image." \ 13 | vendor="JFrog" \ 14 | summary="JFrog Xray Server (Red Hat UBI)" \ 15 | com.jfrog.license_terms="https://jfrog.com/xray/eula/" 16 | 17 | # Environment needed for Xray 18 | ENV JF_XRAY_USER=xray \ 19 | XRAY_USER_ID=1035 \ 20 | XRAY_VERSION=${XRAY_BASE_VERSION} \ 21 | JF_PRODUCT_HOME=/opt/jfrog/xray \ 22 | JF_PRODUCT_DATA_INTERNAL=/var/opt/jfrog/xray \ 23 | SERVICE_NAME=server \ 24 | FLUENT_CONF=${FLUENT_CONF} 25 | 26 | COPY --from=base /opt/jfrog/xray /opt/jfrog/xray 27 | COPY --from=base /postgresql-client /postgresql-client 28 | 29 | # Add license information to meet the Red Hat container image certification requirements 30 | COPY --from=base /opt/jfrog/xray/app/doc/* /licenses/ 31 | 32 | RUN mkdir -p /var/opt/jfrog && chmod 0777 /var/opt/jfrog 33 | 34 | RUN useradd -s /usr/sbin/nologin --uid 1035 --user-group xray && \ 35 | chown -R 1035:1035 /opt/jfrog/xray /var/opt/jfrog /postgresql-client && \ 36 | yum install -y --disableplugin=subscription-manager wget && \ 37 | yum install -y --disableplugin=subscription-manager procps && \ 38 | yum install -y --disableplugin=subscription-manager net-tools && \ 39 | yum install -y --disableplugin=subscription-manager hostname && \ 40 | yum install -y --disableplugin=subscription-manager sudo gem ruby ruby-devel gcc gcc-c++ make 41 | RUN curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh 42 | 43 | RUN mkdir -p /var/log/td-agent/buffer && \ 44 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /var/log/td-agent && \ 45 | chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /etc/td-agent 46 | 47 | RUN mkdir -p /usr/lib/rpm/redhat/ && \ 48 | touch /usr/lib/rpm/redhat/redhat-hardened-cc1 && \ 49 | touch /usr/lib/rpm/redhat/redhat-hardened-ld && \ 50 | touch /usr/lib/rpm/redhat/redhat-annobin-cc1 && \ 51 | td-agent-gem install fluent-plugin-splunk-enterprise && \ 52 | td-agent-gem install fluent-plugin-datadog 53 | 54 | # REMOVE THE DEV TOOLS NEEDED FOR GEM INSTALL NOW.. 55 | RUN yum remove -y --disableplugin=subscription-manager gcc gcc-c++ make 56 | 57 | COPY ${FLUENT_CONF} /etc/td-agent/td-agent.conf 58 | 59 | RUN chown -R ${JF_XRAY_USER}:${JF_XRAY_USER} /etc/td-agent/td-agent.conf 60 | 61 | USER xray 62 | 63 | VOLUME /var/opt/jfrog/xray 64 | 65 | ENTRYPOINT ["/opt/jfrog/xray/app/bin/wrapper.sh"] 66 | -------------------------------------------------------------------------------- /log-vendors/README.md: -------------------------------------------------------------------------------- 1 | # JFrog Log Vendors 2 | 3 | This project has information about various log-vendors like splunk, datadog, elastic and prometheus. 4 | 5 | To build artifactory and xray images with fluentd installed and log-vendor configuration setup, use build arguments with the correct version of Artifactory and path to the respective fluentd configuration: 6 | 7 | ```--build-arg ARTIFACTORY_BASE_VERSION=${LATEST_VERSION} --build-arg FLUENT_CONF=${PATH_TO_FLUENT_CONF}``` 8 | 9 | Example: 10 | 11 | ```docker build -f Dockerfile.redhat-ubi-rt7-fluentd --build-arg ARTIFACTORY_BASE_VERSION={LATEST_VERSION} --build-arg FLUENT_CONF=splunk/fluent.conf.rt -t {IMAGE_NAME} .``` 12 | 13 | Versions of Artifactory: 14 | https://bintray.com/jfrog/reg2/jfrog%3Aartifactory-pro 15 | 16 | 17 | --------------------------------------------------------------------------------