├── .github └── workflows │ ├── slack-notify-issues.yml │ └── slack-notify-pr.yml ├── .gitignore ├── CHANGELOG-splunkbase.md ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── app ├── .tar ├── README.md ├── jfrog-logs-04-2025.tar.gz ├── jfrog-logs.tar.gz └── jfrog-logs │ ├── CONTRIBUTING.md │ ├── LICENSE │ ├── README.md │ ├── app.manifest │ ├── appserver │ └── static │ │ ├── jfrog-logs.css │ │ └── jfrog-logs.js │ ├── bin │ └── README │ ├── default │ ├── app.conf │ ├── data │ │ └── ui │ │ │ ├── nav │ │ │ └── default.xml │ │ │ └── views │ │ │ ├── README │ │ │ ├── artifactory.xml │ │ │ └── xray.xml │ ├── eventtypes.conf │ ├── inputs.conf │ ├── inputs.conf.spec │ ├── macros.conf │ ├── props.conf │ ├── savedsearches.conf │ └── tags.conf │ ├── metadata │ └── default.meta │ └── static │ ├── appIcon.png │ ├── appIconAlt.png │ ├── appIconAlt_2x.png │ ├── appIcon_2x.png │ ├── appLogo.png │ └── appLogo_2x.png ├── docker-build ├── Dockerfile └── docker.env ├── fluent.conf.rt ├── fluent.conf.xray ├── fluentd-demo.conf ├── fluentd-installer └── Dockerfile.fluentd.sidecar ├── helm ├── artifactory-ha-values.yaml ├── artifactory-values.yaml ├── jfrog_helm.env └── xray-values.yaml ├── jfrog.env └── k8s └── splunk.yaml /.github/workflows/slack-notify-issues.yml: -------------------------------------------------------------------------------- 1 | on: 2 | issues: 3 | types: [opened, reopened, deleted, closed] 4 | name: Slack Issue Notification 5 | jobs: 6 | slackNotification: 7 | name: Slack Notification Issue 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Slack Notification Issue 12 | uses: rtCamp/action-slack-notify@master 13 | env: 14 | SLACK_CHANNEL: partnereng-issues 15 | SLACK_COLOR: '#00A86B' 16 | SLACK_ICON: https://pbs.twimg.com/profile_images/978188446178082817/86ulJdF0.jpg 17 | SLACK_TITLE: "[${{ github.event.issue.state}}] ${{ github.event.issue.title }} on ${{ github.repository }} :rocket:" 18 | SLACK_MESSAGE: 'Link: ${{ github.event.issue.html_url }}' 19 | SLACK_USERNAME: PartnerEngineers 20 | SLACK_WEBHOOK: ${{ secrets.SLACK_ISSUE_WEBHOOK }} -------------------------------------------------------------------------------- /.github/workflows/slack-notify-pr.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | branches: 4 | - master 5 | types: [opened, reopened, closed] 6 | name: Slack Pull Request Notification 7 | jobs: 8 | slackNotification: 9 | name: Slack Notification PR 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Slack Notification PR 14 | uses: rtCamp/action-slack-notify@master 15 | env: 16 | SLACK_CHANNEL: partnereng-pullrequest 17 | SLACK_COLOR: '#00A86B' 18 | SLACK_ICON: https://pbs.twimg.com/profile_images/978188446178082817/86ulJdF0.jpg 19 | SLACK_TITLE: "[${{ github.event.pull_request.state}}] ${{ github.event.pull_request.title }} on ${{ github.repository }} :rocket:" 20 | SLACK_MESSAGE: 'Merging from ${{ github.head_ref }} to ${{ github.base_ref }} by ${{ github.actor }}. Link: ${{ github.event.pull_request._links.html.href }}' 21 | SLACK_USERNAME: PartnerEngineers 22 | SLACK_WEBHOOK: ${{ secrets.SLACK_PR_WEBHOOK }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | .DS_Store 3 | *.iml 4 | *idea* -------------------------------------------------------------------------------- /CHANGELOG-splunkbase.md: -------------------------------------------------------------------------------- 1 | ## [1.2.9] - April 22nd, 2025 2 | * Update search queries to fix Xray Log Volume, CPU Usage, System Memory, Disk Usage, Heap Memory graphs 3 | 4 | ## [1.2.8] - December 10th, 2024 5 | * Update docker panels to mach new log lines structure 6 | * Add `check_meta_default_write_access` to the app configuration 7 | * Update some Artifactory metrics in dashboards to point to the observability Open Metrics source (jfob) 8 | 9 | ## [1.2.7] - Jun 6th, 2023 10 | * Verified dashboard widget queries for Artifactory and Xray dashboards 11 | * Updated documentation to setup JFrog Splunk Integration 12 | 13 | ## [1.2.4] - Jan 17, 2022 14 | * Added Support for Openmetrics Dashboards for Jfrog Artifactory and Jfrog Xray 15 | * To enable and setup Openmentrics follow - https://github.com/jfrog/log-analytics-splunk/tree/Metrics_splunk#readme 16 | 17 | ## [1.2.3] - Aug 26, 2021 18 | * Updating jQuery version to 3.5.0 or higher 19 | 20 | ## [1.2.2] - Aug 3, 2021 21 | * Fixing violation data with correlation widget fields 22 | 23 | ## [1.2.1] - Jun 22, 2021 24 | * App restarts after installation 25 | 26 | ## [1.2.0] - May 26, 2021 27 | ### Breaking Changes 28 | * Using unified fluentd configuration for Xray Logs and Violations dashboards 29 | * Using APIKey to authenticate Xray Violations (SIEM fluentd input plugin) 30 | * Sending Xray logs and violation data to the same index 31 | * Using log_source to filter in Xray Violations dashboard queries 32 | 33 | ## [1.1.8] - Apr 12th, 2021 34 | * Fixing violation data correlation with user, ip information 35 | 36 | ## [1.1.7] - Apr 5th, 2021 37 | * Fixing bugs in Dockerhub widgets, Xray Violations Dashboard 38 | 39 | ## [1.1.6] - Mar 30th, 2021 40 | * Renaming widgets, fixing search queries in Xray Violations Dashboard 41 | 42 | ## [1.1.5] - Mar 2nd, 2021 43 | * Adding Violations widgets to Xray dashboard 44 | 45 | ## [1.1.4] - Feb 19th, 2021 46 | * Normalizing access and request logs to Splunk Web and Change CIM 47 | * Added eventtypes, tags, fields, macros to the app 48 | 49 | ## [1.1.3] - Feb 2nd, 2021 50 | * Added Xray Vulnerability Widgets 51 | 52 | ## [1.1.2] - Nov 3rd, 2020 53 | * Dockerhub Rate Impacts Javascript Bug Fix 54 | 55 | ## [1.1.1] - Oct 29, 2020 56 | * Dockerhub Rate Impacts Log Analytic Solution 57 | 58 | ## [1.1.0] - Aug 25, 2020 59 | • Grouped Artifactory chart widgets under Request, Application and Audit tabs 60 | • Added drilldowns in charts 61 | • Added hostname and sourcetype fields in Splunk events 62 | • Optimized chart SPL queries 63 | • Fixed bugs 64 | 65 | ## [1.0.1] May 29, 2020 66 | • JFrog Platform Log Analytics v1.0.1 - fixed issue with security audit log widget 67 | 68 | ## [1.0.0] May 14, 2020 69 | • JFrog Platform Log Analytics v0.1.0 -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # JFrog Log Analytics Changelog 2 | 3 | All changes to the log analytics integration will be documented in this file. 4 | 5 | ## [1.0.13] - April 22, 2025 6 | 7 | * Upgrade Splunk App to version 1.2.9. Changes are available [here](./CHANGELOG-splunkbase.md) 8 | 9 | ## [1.0.12] - March 18, 2025 10 | 11 | * Update artifactory-ha helm values file 12 | * Readme minor updates 13 | 14 | ## [1.0.11] - January 2, 2025 15 | 16 | * FluentD sidecar image version bumped to 4.15, to upgrade base image to bitnami/fluentd 1.18.0 17 | 18 | ## [1.0.10] - December 10, 2024 19 | 20 | * Upgrade Splunk App to version 1.2.8. Changes are available [here](./CHANGELOG-splunkbase.md) 21 | 22 | ## [1.0.9] - November 25, 2024 23 | 24 | * FluentD sidecar image version bumped to 4.14, to reflect logging improvements in `jfrog_metrics` FluentD plugin 25 | 26 | ## [1.0.8] - November 7, 2024 27 | 28 | * FluentD sidecar image version bumped to 4.13, to reflect changes in `jfrog_siem` and `jfrog_send_metrics` FluentD plugins 29 | 30 | ## [1.0.7] - October 25, 2024 31 | 32 | * Add support for metrics and logs outbound payload compression, with `gzip_compression` FluentD param as part of `fluent-plugin-jfrog-sent-metrics` and `fluent-plugin-splunk-hec` plugins 33 | * Add support for a configurable http request timeout, with `request_timeout` FluentD param as part of `fluent-plugin-jfrog-metrics` and `fluent-plugin-jfrog-sent-metrics` plugins 34 | * Add support for a configurable `verify_ssl` FluentD param as part of `fluent-plugin-jfrog-metrics` 35 | * FluentD sidecar version bumped to 4.9, to incorporate the above changes 36 | 37 | ## [1.0.6] - August 8, 2024 38 | 39 | * Fix metrics configuration due to deprecation of `artifactory.openMetrics` as part of Artifactory 7.87.x charts and renaming it to `artifactory.metrics` 40 | 41 | ## [1.0.5] - July 22, 2024 42 | 43 | * FluentD sidecar version bumped to 4.5, to upgrade base image to bitnami/fluentd 1.17.0 44 | * Fixing metrics documentation and general readme fixes 45 | * Remove elastic search fluentd plugins from docker images 46 | 47 | ## [1.0.4] - June6, 2024 48 | 49 | * [BREAKING] Adding deprecation notice for partnership-pts-observability.jfrog.io docker registry 50 | * FluentD sidecar version bumped to 4.3, to upgrade base image to bitnami/fluentd 1.16.5 51 | * Update FluentD sidecar helm charts to match recent changes in JFrog's official charts 52 | 53 | ## [1.0.3] - April 22, 2024 54 | 55 | * Fix order of request and response content length to match spec 56 | 57 | ## [1.0.2] - April 11th, 2024 58 | 59 | * Fix Artifactory access's regex to match log input changes 60 | 61 | ## [1.0.1] - March 22nd, 2024 62 | 63 | * Updated docker images to use fluetnd:1.16.3 to resolve existing CVEs. Please see [security section](https://github.com/jfrog/log-analytics-splunk/security) for more info 64 | * Added CI to generate [fluentd side car docker](https://github.com/jfrog/log-analytics-splunk/blob/master/fluentd-installer/Dockerfile.fluentd.sidecar) image from source 65 | 66 | ## [1.0.0] - May 26th, 2023 67 | 68 | * Supporting only OS/VM, Docker and k8s installation types 69 | * Adding .env files instead of setting/filling variables in fluentd config 70 | * Adding jfrog and heap callhome in fluentd config 71 | * Supporting only Artifactory and Xray Fluentd config 72 | 73 | ## [0.13.0] - Feb 14, 2022 74 | 75 | * Added call home implementation to the artifactory fluent configuration 76 | 77 | ## [0.12.0] - May 26, 2021 78 | 79 | ### Breaking Changes 80 | 81 | * Using unified fluentd configuration for Xray Logs and Violations dashboards 82 | * Using APIKey to authenticate Xray Violations (SIEM fluentd input plugin) 83 | * Sending Xray logs and violation data to the same index 84 | * Using log_source to filter in Xray Violations dashboard queries 85 | 86 | ## [0.11.2] - Apr 12, 2021 87 | 88 | * Fixing violation data correlation with user, ip information 89 | 90 | ## [0.11.2] - Apr 5, 2021 91 | 92 | * Fixing bugs in Dockerhub widgets, Xray Violations Dashboard 93 | 94 | ## [0.11.1] - Mar 30, 2021 95 | 96 | * Renaming widgets, fixing search queries in Xray Violations Dashboard 97 | 98 | ## [0.11.0] - Mar 3, 2021 99 | 100 | * Adding Violations widgets to Xray dashboard 101 | 102 | ## [0.10.0] - Feb 17, 2021 103 | 104 | * Normalizing access, request logs 105 | * Added eventtypes, tags, fields, macros to the app 106 | 107 | ## [0.9.2] - Feb 3, 2021 108 | 109 | * New Widgets to Xray tab to show vulnerability information 110 | 111 | ## [0.9.1] - Jan 28, 2021 112 | 113 | * Helm support for Splunk to deploy Artifactory or Xray via helm with logs sent to Splunk 114 | 115 | ## [0.9.0] - Dec 1, 2020 116 | 117 | * Log Volume charts to show only artifactory and xray logs respectively 118 | * Adding macro to avoid index dependency 119 | 120 | ## [0.8.0] - Oct 20, 2020 121 | 122 | * README updates for new Dockerhub / Docker widgets in Splunkbase app 123 | * Added CHANGELOG-splunkbase.md to mirror release notes in Splunkbase 124 | 125 | ## [0.7.0] - Oct 20, 2020 126 | 127 | * Fixing issue with ip_address in access logs having space and . at the end 128 | 129 | ## [0.6.0] - Sept 25, 2020 130 | 131 | * [BREAKING] Splunk fluentd configs updated to use JF_PRODUCT_DATA_INTERNAL env. 132 | 133 | ## [0.5.1] - Sept 9, 2020 134 | 135 | * Splunk repo now submodule of parent log-analytics. 136 | 137 | ## [0.5.0] - Sept 8, 2020 138 | 139 | * Adding JFrog Pipelines fluent configuration files to capture logs 140 | 141 | ## [0.4.0] - Sept 4, 2020 142 | 143 | * Adding JFrog Mission Control fluent configuration files to capture logs 144 | 145 | ## [0.3.0] - Aug 26, 2020 146 | 147 | * Adding JFrog Distribution fluent configuration files to capture logs 148 | 149 | ## [0.2.0] - Aug 24, 2020 150 | 151 | * Splunk updates to launch new version of Splunkbase app v1.1.0 152 | 153 | ## [0.1.1] - June 1, 2020 154 | 155 | * Removing the need for user to specify splunk host , user, and token twice 156 | * Fixing issue with regex on the audit security log 157 | * Fixed issue with the repo and image when not docker api url 158 | 159 | ## [0.1.0] - May 12, 2020 160 | 161 | * Initial release of Jfrog Logs Analytic integration 162 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # JFrog welcomes community contribution! 2 | 3 | Before we can accept your contribution, process your GitHub pull requests, and thank you full-heartedly, we request that you will fill out and submit JFrog's Contributor License Agreement (CLA). 4 | 5 | [Click here](https://gist.github.com/jfrog-ecosystem/7d4fbeaac18edbd3cfc38831125acbb3) to view the JFrog CLA. 6 | 7 | Please comment in your pull request to mark your acceptance for now until CLA assistant is fixed. 8 | 9 | "I have read the CLA Document and I hereby sign the CLA" 10 | 11 | This should only take a minute to complete and is a one-time process. 12 | 13 | *Thanks for Your Contribution to the Community!* :-) 14 | 15 | ## Pull Request Process ## 16 | 17 | - Fork this repository. 18 | - Clone the forked repository to your local machine and perform the proposed changes. 19 | - Test the changes in your own K8s environment and confirm everything works end to end. 20 | - Update the CHANGELOG.md 21 | - Submit a PR with the relevant information and check the applicable boxes and fill out the questions. 22 | 23 | ## Acceptance Criteria ## 24 | 25 | - Pull requests must pass all automated checks 26 | - CHANGELOG.md has relevant changes 27 | - README.md has been updated if required 28 | - One approval from JFrog reviewers 29 | 30 | Upon the success of the above the pull request will be mergable into master branch. Upon merge the source branch will be removed. 31 | 32 | Increase the version numbers in any examples files and the README.md to the new version that this Pull Request would represent. The versioning scheme we use is SemVer. 33 | You may merge the Pull Request in once you have the sign-off of one other developer. 34 | 35 | ## Code of Conduct 36 | ### Our Pledge 37 | 38 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 39 | 40 | ### Our Standards 41 | 42 | Examples of behavior that contributes to creating a positive environment include: 43 | ```` 44 | Using welcoming and inclusive language 45 | Being respectful of differing viewpoints and experiences 46 | Gracefully accepting constructive criticism 47 | Focusing on what is best for the company 48 | Showing empathy towards other colleagues 49 | ```` 50 | 51 | Examples of unacceptable behavior by participants include: 52 | 53 | ```` 54 | The use of sexualized language or imagery and unwelcome sexual attention or advances 55 | Trolling, insulting/derogatory comments, and personal or political attacks 56 | Public or private harassment 57 | Publishing others' private information, such as a physical or electronic address, without explicit permission 58 | Other conduct which could reasonably be considered inappropriate in a professional setting 59 | ```` 60 | ### Our Responsibilities 61 | 62 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 63 | 64 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 65 | 66 | ## Scope 67 | 68 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project. Examples of representing a project include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 69 | 70 | ## Enforcement 71 | 72 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at Slack #xray_splunk . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 73 | 74 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 75 | 76 | ## Attribution 77 | 78 | This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at http://contributor-covenant.org/version/1/4 79 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SPLUNK 2 | 3 | ## Versions Supported 4 | 5 | This integration is last tested with Artifactory 7.104.7 and Xray 3.88.12 versions. 6 | 7 | ## Table of Contents 8 | 9 | `Note! You must follow the order of the steps throughout Splunk Configuration` 10 | 11 | 1. [Splunk Setup](#splunk-setup) 12 | 2. [JFrog Metrics Setup](#jfrog-metrics-setup) 13 | 3. [FluentD Installation](#fluentd-installation) 14 | * [OS / Virtual Machine](#os--virtual-machine) 15 | * [Docker](#docker) 16 | * [Kubernetes Deployment with Helm](#kubernetes-deployment-with-helm) 17 | 4. [Dashboards](#dashboards) 18 | 5. [Splunk Demo](#splunk-demo) 19 | 6. [References](#references) 20 | 21 | ## Splunk Setup 22 | 23 | ### Splunkbase App 24 | 25 | Install the `JFrog Log Analytics Platform` app from Splunkbase [here!](https://splunkbase.splunk.com/app/5023/) 26 | 27 | ```text 28 | 1. Download file from Splunkbase 29 | 2. Open Splunk web console as administrator 30 | 3. From homepage click on the Manage button with a wheel icon (left side of the screen, in the top right corner of Apps section) 31 | 4. Click on "Install app from file" 32 | 5. Select download file from Splunkbase on your computer 33 | 6. Click upgrade 34 | 7. Click upload 35 | ``` 36 | 37 | Splunk will ask the user to restart to complete the installation. If the app is not restarted automatically, do the following steps: 38 | 39 | ```text 40 | 1. Open Splunk web console as administrator 41 | 2. Click on Settings then Server Controls 42 | 3. Click on Restart Splunk 43 | ``` 44 | 45 | Login to Splunk after the restart completes. 46 | 47 | Confirm the version is the latest version available in Splunkbase. 48 | 49 | ### Configure Splunk 50 | 51 | Our integration uses the [Splunk HEC](https://dev.splunk.com/enterprise/docs/dataapps/httpeventcollector/) to send data to Splunk. 52 | 53 | Users will need to configure the HEC to accept data (enabled) and also create a new token. Steps are below. 54 | 55 | #### Create index for logs (default: jfrog_splunk) 56 | 57 | ```text 58 | 1. Open Splunk web console as administrator 59 | 2. Click on "Settings" in dropdown select "Indexes" 60 | 3. Click on "New Index" 61 | 4. Enter Index name as jfrog_splunk (or your custom name) 62 | 5. Click "Save" 63 | ``` 64 | 65 | #### Create index for metrics (default: jfrog_splunk_metrics) 66 | 67 | ```text 68 | 1. Open Splunk web console as administrator 69 | 2. Click on "Settings" in dropdown select "Indexes" 70 | 3. Click on "New Index" 71 | 4. Enter Index name as jfrog_splunk_metrics (or your custom name) 72 | 5. Select Index Data Type as Metrics 73 | 6. Click "Save" 74 | ``` 75 | 76 | **Note:** You can customize the index names by setting the `SPLUNK_LOGS_INDEX` and `SPLUNK_METRICS_INDEX` environment variables in your docker.env file. 77 | 78 | #### Configure new HEC token to receive Logs 79 | 80 | ```text 81 | 1. Open Splunk web console as administrator 82 | 2. Click on "Settings" in dropdown select "Data inputs" 83 | 3. Click on "HTTP Event Collector" 84 | 4. Click on "New Token" 85 | 5. Enter a "Name" in the textbox 86 | 6. (Optional) Enter a "Description" in the textbox 87 | 7. Click on the green "Next" button 88 | 8. Add "jfrog_splunk" (or your custom logs index name) to store the JFrog platform log data into. 89 | 9. Click on the green "Review" button 90 | 10. If good, Click on the green "Done" button 91 | 11. Save the generated token value 92 | ``` 93 | 94 | #### Configure new HEC token to receive Metrics 95 | 96 | ```text 97 | 1. Open Splunk web console as administrator 98 | 2. Click on "Settings" in dropdown select "Data inputs" 99 | 3. Click on "HTTP Event Collector" 100 | 4. Click on "New Token" 101 | 5. Enter a "Name" in the textbox 102 | 6. (Optional) Enter a "Description" in the textbox 103 | 7. Click on the green "Next" button 104 | 8. Add "jfrog_splunk_metrics" (or your custom metrics index name) to store the JFrog platform metrics data into. 105 | 9. Click on the green "Review" button 106 | 10. If good, Click on the green "Done" button 107 | 11. Save the generated token value 108 | ``` 109 | 110 | ## JFrog Metrics Setup 111 | 112 | For non Kubernetes-based installations, enable metrics in Artifactory, make the following configuration changes to the [Artifactory System YAML](https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML) 113 | 114 | ```yaml 115 | shared: 116 | metrics: 117 | enabled: true 118 | 119 | artifactory: 120 | metrics: 121 | enabled: true 122 | ``` 123 | 124 | Once this configuration is done and the application is restarted, metrics will be available in Open Metrics Format 125 | 126 | Metrics are enabled by default in Xray. 127 | For Kubernetes-based installations, openMetrics is enabled in the helm install commands listed below 128 | 129 | ## Fluentd Installation 130 | 131 | ### OS / Virtual Machine 132 | 133 | Ensure you have access to the Internet from VM. Recommended install is through fluentd's native OS based package installs: 134 | 135 | 136 | | OS | Package Manager | Link | 137 | | ------------- | ------------------- | ---------------------------------------------------- | 138 | | CentOS/RHEL | Linux - RPM (YUM) | https://docs.fluentd.org/installation/install-by-rpm | 139 | | Debian/Ubuntu | Linux - APT | https://docs.fluentd.org/installation/install-by-deb | 140 | | MacOS/Darwin | MacOS - DMG | https://docs.fluentd.org/installation/install-by-dmg | 141 | | Windows | Windows - MSI | https://docs.fluentd.org/installation/install-by-msi | 142 | | Gem Install** | MacOS & Linux - Gem | https://docs.fluentd.org/installation/install-by-gem | 143 | 144 | ```text 145 | ** For Gem based install, Ruby Interpreter has to be setup first, following is the recommended process to install Ruby 146 | 147 | 1. Install Ruby Version Manager (RVM) as described in https://rvm.io/rvm/install#installation-explained, ensure to follow all the onscreen instructions provided to complete the rvm installation 148 | * For installation across users a SUDO based install is recommended, the installation is as described in https://rvm.io/support/troubleshooting#sudo 149 | 150 | 2. Once rvm installation is complete, verify the RVM installation executing the command 'rvm -v' 151 | 152 | 3. Now install ruby v2.7.0 or above executing the command 'rvm install ', ex: 'rvm install 2.7.5' 153 | 154 | 4. Verify the ruby installation, execute 'ruby -v', gem installation 'gem -v' and 'bundler -v' to ensure all the components are intact 155 | 156 | 5. Post completion of Ruby, Gems installation, the environment is ready to further install new gems, execute the following gem install commands one after other to setup the needed ecosystem 157 | 158 | 'gem install fluentd' 159 | 160 | ``` 161 | 162 | After FluentD is successfully installed, the below plugins are required to be installed 163 | 164 | ```bash 165 | gem install fluent-plugin-concat 166 | gem install fluent-plugin-splunk-hec 167 | gem install fluent-plugin-jfrog-siem 168 | gem install fluent-plugin-jfrog-metrics 169 | ``` 170 | 171 | #### Configure FluentD 172 | 173 | We rely heavily on environment variables so that the correct log files are streamed to your observability dashboards. Ensure that you fill in the .env file with correct values. Download the .env file from [here](https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/jfrog.env) 174 | 175 | * **JF_PRODUCT_DATA_INTERNAL**: This environment variable must be set to the folder that contains the `log` folder. For each JFrog service, you can find its active log files in the `$JFROG_HOME//var/log`. This environment variable should point to the folder that contains the `log` directory. For example, for `artifactory` set this variable to `$JFROG_HOME/artifactory/var` 176 | * **SPLUNK_COM_PROTOCOL**: HTTP Scheme, http or https 177 | * **SPLUNK_HEC_HOST**: Splunk Instance URL 178 | * **SPLUNK_HEC_PORT**: Splunk HEC configured port 179 | * **SPLUNK_HEC_TOKEN**: Splunk HEC Token for sending logs to Splunk 180 | * **SPLUNK_METRICS_HEC_TOKEN**: Splunk HEC Token for sending metrics to Splunk 181 | * **SPLUNK_LOGS_INDEX**: Splunk index name for storing logs (default: jfrog_splunk) 182 | * **SPLUNK_METRICS_INDEX**: Splunk index name for storing metrics (default: jfrog_splunk_metrics) 183 | * **SPLUNK_INSECURE_SSL**: false for test environments only or if http scheme 184 | * **SPLUNK_VERIFY_SSL**: false for disabling ssl validation (useful for proxy forwarding or bypassing ssl certificate validation) 185 | * **SPLUNK_COMPRESS_DATA**: true for compressing logs and metrics json payloads on outbound to Splunk 186 | * **JPD_URL**: Artifactory JPD URL of the format `http://` 187 | * **JPD_ADMIN_USERNAME**: Artifactory username for authentication 188 | * **JFROG_ADMIN_TOKEN**: Artifactory [Access Token](https://jfrog.com/help/r/how-to-generate-an-access-token-video/artifactory-creating-access-tokens-in-artifactory) for authentication 189 | * **COMMON_JPD**: This flag should be set as true only for non-kubernetes installations or installations where JPD base URL is same to access both Artifactory and Xray (ex: https://sample_base_url/artifactory or https://sample_base_url/xray) 190 | * **LOG_ENV**: Optional environment tag for categorizing logs and metrics (e.g., `staging`, `production`, `dev`). This tag will be added to all logs and metrics sent to Splunk as `env:` 191 | 192 | Apply the .env files and then run the fluentd wrapper with one argument pointed to the `fluent.conf.*` file configured. 193 | 194 | ```bash 195 | source jfrog.env 196 | ./fluentd $JF_PRODUCT_DATA_INTERNAL/fluent.conf. 197 | ``` 198 | 199 | ### Docker 200 | 201 | `Note! These steps were not tested to work out of the box on MAC`
202 | In order to run FluentD as a docker image to send the logs, violations and metrics data to splunk, the following commands needs to be executed on the host that runs the docker. 203 | 204 | 1. Check the docker installation is functional, execute command 'docker version' and 'docker ps'. 205 | 2. Once the version and process are listed successfully, build the intended docker image for Splunk using the docker file, 206 | 207 | * Download Dockerfile from [here](https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/docker-build/Dockerfile) to any directory which has write permissions. 208 | 3. Download the docker.env file needed to run Jfrog/FluentD Docker Images for Splunk, 209 | 210 | * Download docker.env from [here](https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/docker-build/docker.env) to the directory where the docker file was downloaded. 211 | 212 | For Splunk as the observability platform, execute these commands to setup the docker container running the FluentD installation 213 | 214 | 1. Execute 215 | 216 | ```bash 217 | docker build --build-arg SOURCE="JFRT" --build-arg TARGET="SPLUNK" -t . 218 | ``` 219 | 220 | Command example 221 | 222 | ```bash 223 | docker build --build-arg SOURCE="JFRT" --build-arg TARGET="SPLUNK" -t jfrog/fluentd-splunk-rt . 224 | ``` 225 | 226 | The above command will build the docker image. 227 | 2. Fill the necessary information in the docker.env file 228 | 229 | **JF_PRODUCT_DATA_INTERNAL**: This environment variable must be set to the folder that contains the `log` folder. For each JFrog service, you can find its active log files in the `$JFROG_HOME//var/log`. This environment variable should point to the folder that contains the `log` directory. For example, for `artifactory` set this variable to `$JFROG_HOME/artifactory/var` 230 | **SPLUNK_COM_PROTOCOL**: HTTP Scheme, http or https 231 | **SPLUNK_HEC_HOST**: Splunk Instance URL 232 | **SPLUNK_HEC_PORT**: Splunk HEC configured port 233 | **SPLUNK_HEC_TOKEN**: Splunk HEC Token for sending logs to Splunk 234 | **SPLUNK_METRICS_HEC_TOKEN**: Splunk HEC Token for sending metrics to Splunk 235 | **SPLUNK_INSECURE_SSL**: false for test environments only or if http scheme 236 | **SPLUNK_VERIFY_SSL**: false for disabling ssl validation (useful for proxy forwarding or bypassing ssl certificate validation) 237 | **SPLUNK_COMPRESS_DATA**: true for compressing logs and metrics payloads that are sent to Splunk 238 | **JPD_URL**: Artifactory JPD URL of the format `http://` 239 | **JPD_ADMIN_USERNAME**: Artifactory username for authentication 240 | **JFROG_ADMIN_TOKEN**: Artifactory [Access Token](https://jfrog.com/help/r/how-to-generate-an-access-token-video/artifactory-creating-access-tokens-in-artifactory) for authentication 241 | **COMMON_JPD**: This flag should be set as true only for non-kubernetes installations or installations where JPD base URL is same to access both Artifactory and Xray (ex: https://sample_base_url/artifactory or https://sample_base_url/xray) 242 | **LOG_ENV**: Optional environment tag for categorizing logs and metrics (e.g., `staging`, `production`, `dev`). This tag will be added to all logs and metrics sent to Splunk as `env:` 243 | 3. Execute 244 | 245 | ```bash 246 | docker run -it --name jfrog-fluentd-splunk-rt -v :/var/opt/jfrog/artifactory --env-file docker.env 247 | ``` 248 | 249 | The should be an absolute path where the Jfrog Artifactory Logs folder resides, i.e for an Docker based Artifactory Installation, ex: /var/opt/jfrog/artifactory/var/logs on the docker host. 250 | 251 | Command example 252 | 253 | ```bash 254 | docker run -it --name jfrog-fluentd-splunk-rt -v $JFROG_HOME/artifactory/var/:/var/opt/jfrog/artifactory --env-file docker.env jfrog/fluentd-splunk-rt 255 | ``` 256 | 257 | ### Kubernetes Deployment with Helm 258 | 259 | Recommended installation for Kubernetes is to utilize the helm chart with the associated values.yaml in this repo. 260 | 261 | 262 | | Product | Example Values File | 263 | | -------------- | ------------------------------- | 264 | | Artifactory | helm/artifactory-values.yaml | 265 | | Artifactory HA | helm/artifactory-ha-values.yaml | 266 | | Xray | helm/xray-values.yaml | 267 | 268 | > [!WARNING] 269 | > 270 | > The old docker registry `partnership-pts-observability.jfrog.io`, which contains older versions of this integration is now deprecated. We'll keep the existing docker images on this old registry until August 1st, 2024. After that date, this registry will no longer be available. Please `helm upgrade` your JFrog kubernetes deployment in order to pull images as specified on the above helm value files, from the new `releases-pts-observability-fluentd.jfrog.io` registry. Please do so in order to avoid `ImagePullBackOff` errors in your deployment once this registry is gone. 271 | 272 | Add JFrog Helm repository: 273 | 274 | ```bash 275 | helm repo add jfrog https://charts.jfrog.io 276 | helm repo update 277 | ``` 278 | 279 | Throughout the exampled helm installations we'll use `jfrog-splunk` as an example namespace. That said, you can use a different or existing namespace instead by setting the following environment variable 280 | 281 | ```bash 282 | export INST_NAMESPACE=jfrog-splunk 283 | ``` 284 | 285 | If you don't have an existing namespace for the deployment, create it and set the kubectl context to use this namespace 286 | 287 | ```bash 288 | kubectl create namespace $INST_NAMESPACE 289 | kubectl config set-context --current --namespace=$INST_NAMESPACE 290 | ``` 291 | 292 | Generate ``masterKey`` and ``joinKey`` for the installation 293 | 294 | ```bash 295 | export JOIN_KEY=$(openssl rand -hex 32) 296 | export MASTER_KEY=$(openssl rand -hex 32) 297 | ``` 298 | 299 | #### Artifactory ⎈: 300 | 301 | 1. Skip this step if you already have Artifactory installed. Else, install Artifactory using the command below 302 | 303 | ```bash 304 | helm upgrade --install artifactory jfrog/artifactory \ 305 | --set artifactory.masterKey=$MASTER_KEY \ 306 | --set artifactory.joinKey=$JOIN_KEY \ 307 | --set artifactory.metrics.enabled=true \ 308 | -n $INST_NAMESPACE --create-namespace 309 | ``` 310 | 311 | :bulb: Metrics collection is disabled by default in Artifactory. Please make sure that you are following the above `helm upgrade` command to enable them in Artifactory by setting `artifactory.metrics.enabled=true`. For Artifactory versions <=7.86.x, please enable metrics by setting the flag `artifactory.openMetrics.enabled=true`
312 | 2. Create a secret for JFrog's admin token - [Access Token](https://jfrog.com/help/r/how-to-generate-an-access-token-video/artifactory-creating-access-tokens-in-artifactory) using any of the following methods 313 | 314 | ```bash 315 | kubectl create secret generic jfrog-admin-token --from-file=token= 316 | 317 | OR 318 | 319 | kubectl create secret generic jfrog-admin-token --from-literal=token= 320 | ``` 321 | 3. For Artifactory installation, download the .env file from [here](https://github.com/jfrog/log-analytics-splunk/raw/master/helm/jfrog_helm.env). Fill in the jfrog_helm.env file with correct values. 322 | 323 | * **SPLUNK_COM_PROTOCOL**: HTTP Scheme, http or https 324 | * **SPLUNK_HEC_HOST**: Splunk Instance URL 325 | * **SPLUNK_HEC_PORT**: Splunk HEC configured port 326 | * **SPLUNK_HEC_TOKEN**: Splunk HEC Token for sending logs to Splunk 327 | * **SPLUNK_METRICS_HEC_TOKEN**: Splunk HEC Token for sending metrics to Splunk 328 | * **SPLUNK_INSECURE_SSL**: false for test environments only or if http scheme 329 | * **SPLUNK_VERIFY_SSL**: false for disabling ssl validation (useful for proxy forwarding or bypassing ssl certificate validation) 330 | * **SPLUNK_COMPRESS_DATA**: true for compressing logs and metrics json payloads on outbound to Splunk 331 | * **JPD_URL**: Artifactory JPD URL of the format `http://` 332 | * **JPD_ADMIN_USERNAME**: Artifactory username for authentication 333 | * **COMMON_JPD**: This flag should be set as true only for non-kubernetes installations or installations where JPD base URL is same to access both Artifactory and Xray (ex: https://sample_base_url/artifactory or https://sample_base_url/xray) 334 | * **LOG_ENV**: Optional environment tag for categorizing logs and metrics (e.g., `staging`, `production`, `dev`). This tag will be added to all logs and metrics sent to Splunk as `env:` 335 | 336 | Apply the .env files using the helm command below 337 | 338 | ```bash 339 | source jfrog_helm.env 340 | ``` 341 | 4. Postgres password is required to upgrade Artifactory. Run the following command to get the current password 342 | 343 | ```bash 344 | POSTGRES_PASSWORD=$(kubectl get secret artifactory-postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) 345 | ``` 346 | 5. Upgrade Artifactory installation using the command below 347 | 348 | ```bash 349 | helm upgrade --install artifactory jfrog/artifactory \ 350 | --set artifactory.joinKey=$JOIN_KEY \ 351 | --set databaseUpgradeReady=true --set postgresql.auth.password=$POSTGRES_PASSWORD --set nginx.service.ssloffload=true \ 352 | --set splunk.host=$SPLUNK_HEC_HOST \ 353 | --set splunk.port=$SPLUNK_HEC_PORT \ 354 | --set splunk.logs_token=$SPLUNK_HEC_TOKEN \ 355 | --set splunk.metrics_token=$SPLUNK_METRICS_HEC_TOKEN \ 356 | --set splunk.logs_index=$SPLUNK_LOGS_INDEX \ 357 | --set splunk.metrics_index=$SPLUNK_METRICS_INDEX \ 358 | --set splunk.compress_data=$SPLUNK_COMPRESS_DATA \ 359 | --set splunk.com_protocol=$SPLUNK_COM_PROTOCOL \ 360 | --set splunk.insecure_ssl=$SPLUNK_INSECURE_SSL \ 361 | --set splunk.verify_ssl=$SPLUNK_VERIFY_SSL \ 362 | --set jfrog.observability.jpd_url=$JPD_URL \ 363 | --set jfrog.observability.username=$JPD_ADMIN_USERNAME \ 364 | --set jfrog.observability.common_jpd=$COMMON_JPD \ 365 | --set jfrog.observability.log_env=$LOG_ENV \ 366 | -f helm/artifactory-values.yaml \ 367 | -n $INST_NAMESPACE --create-namespace 368 | ``` 369 | 370 | #### Artifactory-HA ⎈: 371 | 372 | 1. For HA installation, please create a license secret on your cluster prior to installation. 373 | 374 | ```bash 375 | kubectl create secret generic artifactory-license --from-file= 376 | ``` 377 | 2. Skip this step if you already have Artifactory installed. Else, install Artifactory using the command below 378 | 379 | ```bash 380 | helm upgrade --install artifactory-ha jfrog/artifactory-ha \ 381 | --set artifactory.masterKey=$MASTER_KEY \ 382 | --set artifactory.joinKey=$JOIN_KEY \ 383 | --set artifactory.license.secret=artifactory-license \ 384 | --set artifactory.license.dataKey=artifactory.cluster.license \ 385 | --set artifactory.metrics.enabled=true \ 386 | -n $INST_NAMESPACE --create-namespace 387 | ``` 388 | 389 | :bulb: Metrics collection is disabled by default in Artifactory-HA. Please make sure that you are following the above `helm upgrade` command to enable them in Artifactory by setting `artifactory.metrics.enabled=true`. For Artifactory versions <=7.86.x, please enable metrics by setting the flag `artifactory.openMetrics.enabled=true`
390 | 3. Create a secret for JFrog's admin token - [Access Token](https://jfrog.com/help/r/how-to-generate-an-access-token-video/artifactory-creating-access-tokens-in-artifactory) using any of the following methods 391 | 392 | ```bash 393 | kubectl create secret generic jfrog-admin-token --from-file=token= 394 | 395 | OR 396 | 397 | kubectl create secret generic jfrog-admin-token --from-literal=token= 398 | ``` 399 | 4. Download the .env file from [here](https://github.com/jfrog/log-analytics-splunk/raw/master/helm/jfrog_helm.env). Fill in the jfrog_helm.env file with correct values. 400 | 401 | * **SPLUNK_COM_PROTOCOL**: HTTP Scheme, http or https 402 | * **SPLUNK_HEC_HOST**: Splunk Instance URL 403 | * **SPLUNK_HEC_PORT**: Splunk HEC configured port 404 | * **SPLUNK_HEC_TOKEN**: Splunk HEC Token for sending logs to Splunk 405 | * **SPLUNK_METRICS_HEC_TOKEN**: Splunk HEC Token for sending metrics to Splunk 406 | * **SPLUNK_INSECURE_SSL**: false for test environments only or if http scheme 407 | * **SPLUNK_VERIFY_SSL**: false for disabling ssl validation (useful for proxy forwarding or bypassing ssl certificate validation) 408 | * **SPLUNK_COMPRESS_DATA**: true for compressing logs and metrics json payloads on outbound to Splunk 409 | * **JPD_URL**: Artifactory JPD URL of the format `http://` 410 | * **JPD_ADMIN_USERNAME**: Artifactory username for authentication 411 | * **COMMON_JPD**: This flag should be set as true only for non-kubernetes installations or installations where JPD base URL is same to access both Artifactory and Xray (ex: https://sample_base_url/artifactory or https://sample_base_url/xray) 412 | * **LOG_ENV**: Optional environment tag for categorizing logs and metrics (e.g., `staging`, `production`, `dev`). This tag will be added to all logs and metrics sent to Splunk as `env:` 413 | 414 | Apply the .env files and then run the helm command below 415 | 416 | ```bash 417 | source jfrog_helm.env 418 | ``` 419 | 5. Postgres password is required to upgrade Artifactory. Run the following command to get the current password 420 | 421 | ```bash 422 | POSTGRES_PASSWORD=$(kubectl get secret artifactory-ha-postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) 423 | ``` 424 | 6. Upgrade Artifactory HA installation using the command below 425 | 426 | ```bash 427 | helm upgrade --install artifactory-ha jfrog/artifactory-ha \ 428 | --set artifactory.joinKey=$JOIN_KEY \ 429 | --set databaseUpgradeReady=true --set postgresql.auth.password=$POSTGRES_PASSWORD --set nginx.service.ssloffload=true \ 430 | --set splunk.host=$SPLUNK_HEC_HOST \ 431 | --set splunk.port=$SPLUNK_HEC_PORT \ 432 | --set splunk.logs_token=$SPLUNK_HEC_TOKEN \ 433 | --set splunk.metrics_token=$SPLUNK_METRICS_HEC_TOKEN \ 434 | --set splunk.logs_index=$SPLUNK_LOGS_INDEX \ 435 | --set splunk.metrics_index=$SPLUNK_METRICS_INDEX \ 436 | --set splunk.com_protocol=$SPLUNK_COM_PROTOCOL \ 437 | --set splunk.insecure_ssl=$SPLUNK_INSECURE_SSL \ 438 | --set splunk.verify_ssl=$SPLUNK_VERIFY_SSL \ 439 | --set splunk.compress_data=$SPLUNK_COMPRESS_DATA \ 440 | --set jfrog.observability.jpd_url=$JPD_URL \ 441 | --set jfrog.observability.username=$JPD_ADMIN_USERNAME \ 442 | --set jfrog.observability.common_jpd=$COMMON_JPD \ 443 | --set jfrog.observability.log_env=$LOG_ENV \ 444 | -f helm/artifactory-ha-values.yaml \ 445 | -n $INST_NAMESPACE --create-namespace 446 | ``` 447 | 448 | #### Xray ⎈: 449 | 450 | Create a secret for JFrog's admin token - [Access Token](https://jfrog.com/help/r/how-to-generate-an-access-token-video/artifactory-creating-access-tokens-in-artifactory) using any of the following methods if it doesn't exist 451 | 452 | ```bash 453 | kubectl create secret generic jfrog-admin-token --from-file=token= 454 | 455 | OR 456 | 457 | kubectl create secret generic jfrog-admin-token --from-literal=token= 458 | ``` 459 | 460 | For Xray installation, download the .env file from [here](https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/jfrog_helm.env). Fill in the jfrog_helm.env file with correct values. 461 | 462 | * **SPLUNK_COM_PROTOCOL**: HTTP Scheme, http or https 463 | * **SPLUNK_HEC_HOST**: Splunk Instance URL 464 | * **SPLUNK_HEC_PORT**: Splunk HEC configured port 465 | * **SPLUNK_HEC_TOKEN**: Splunk HEC Token for sending logs to Splunk 466 | * **SPLUNK_METRICS_HEC_TOKEN**: Splunk HEC Token for sending metrics to Splunk 467 | * **SPLUNK_LOGS_INDEX**: Splunk index name for storing logs (default: jfrog_splunk) 468 | * **SPLUNK_METRICS_INDEX**: Splunk index name for storing metrics (default: jfrog_splunk_metrics) 469 | * **SPLUNK_INSECURE_SSL**: false for test environments only or if http scheme 470 | * **SPLUNK_VERIFY_SSL**: false for disabling ssl validation (useful for proxy forwarding or bypassing ssl certificate validation) 471 | * **SPLUNK_COMPRESS_DATA**: true for compressing logs and metrics json payloads on outbound to Splunk 472 | * **JPD_URL**: Artifactory JPD URL of the format `http://` 473 | * **JPD_ADMIN_USERNAME**: Artifactory username for authentication 474 | * **JFROG_ADMIN_TOKEN**: For security reasons, this value will be pulled from the secret jfrog-admin-token created in the step above 475 | * **COMMON_JPD**: This flag should be set as true only for non-kubernetes installations or installations where JPD base URL is same to access both Artifactory and Xray (ex: https://sample_base_url/artifactory or https://sample_base_url/xray) 476 | * **LOG_ENV**: Optional environment tag for categorizing logs and metrics (e.g., `staging`, `production`, `dev`). This tag will be added to all logs and metrics sent to Splunk as `env:` 477 | 478 | Apply the .env files and then run the helm command below 479 | 480 | ```bash 481 | source jfrog_helm.env 482 | ``` 483 | 484 | Generate a master key for xray 485 | 486 | ```bash 487 | export XRAY_MASTER_KEY=$(openssl rand -hex 32) 488 | ``` 489 | 490 | Use the same `joinKey` as you used in Artifactory installation ($JOIN_KEY) to allow Xray node to successfully connect to Artifactory. 491 | 492 | ```bash 493 | helm upgrade --install xray jfrog/xray --set xray.jfrogUrl=$JPD_URL \ 494 | --set xray.masterKey=$XRAY_MASTER_KEY \ 495 | --set xray.joinKey=$JOIN_KEY \ 496 | --set splunk.host=$SPLUNK_HEC_HOST \ 497 | --set splunk.port=$SPLUNK_HEC_PORT \ 498 | --set splunk.logs_token=$SPLUNK_HEC_TOKEN \ 499 | --set splunk.metrics_token=$SPLUNK_METRICS_HEC_TOKEN \ 500 | --set splunk.logs_index=$SPLUNK_LOGS_INDEX \ 501 | --set splunk.metrics_index=$SPLUNK_METRICS_INDEX \ 502 | --set splunk.com_protocol=$SPLUNK_COM_PROTOCOL \ 503 | --set splunk.insecure_ssl=$SPLUNK_INSECURE_SSL \ 504 | --set splunk.verify_ssl=$SPLUNK_VERIFY_SSL \ 505 | --set splunk.compress_data=$SPLUNK_COMPRESS_DATA \ 506 | --set jfrog.observability.jpd_url=$JPD_URL \ 507 | --set jfrog.observability.username=$JPD_ADMIN_USERNAME \ 508 | --set jfrog.observability.common_jpd=$COMMON_JPD \ 509 | --set jfrog.observability.log_env=$LOG_ENV \ 510 | -f helm/xray-values.yaml \ 511 | -n $INST_NAMESPACE --create-namespace 512 | ``` 513 | 514 | ## Dashboards 515 | 516 | ### Artifactory dashboard 517 | 518 | JFrog Artifactory Dashboard is divided into multiple sections Application, Audit, Requests, Docker, System Metrics, Heap Metrics and Connection Metrics 519 | 520 | * **Application** - This section tracks Log Volume(information about different log sources) and Artifactory Errors over time(bursts of application errors that may otherwise go undetected) 521 | * **Audit** - This section tracks audit logs help you determine who is accessing your Artifactory instance and from where. These can help you track potentially malicious requests or processes (such as CI jobs) using expired credentials. 522 | * **Requests** - This section tracks HTTP response codes, Top 10 IP addresses for uploads and downloads 523 | * **Docker** - To monitor Dockerhub pull requests users should have a Dockerhub account either paid or free. Free accounts allow up to 200 pull requests per 6 hour window. Various widgets have been added in the new Docker tab under Artifactory to help monitor your Dockerhub pull requests. An alert is also available to enable if desired that will allow you to send emails or add outbound webhooks through configuration to be notified when you exceed the configurable threshold. 524 | * **System Metrics** - This section tracks CPU Usage, System Memory and Disk Usage metrics 525 | * **Heap Metrics** - This section tracks Heap Memory and Garbage Collection 526 | * **Connection Metrics** - This section tracks Database connections and HTTP Connections 527 | 528 | ### Xray dashboard 529 | 530 | JFrog Xray Dashboard is divided into three sections Logs, Violations and Metrics 531 | 532 | * **Logs** - This section provides a summary of access, service and traffic log volumes associated with Xray. Additionally, customers are also able to track various HTTP response codes, HTTP 500 errors, and log errors for greater operational insight 533 | * **Violations** - This section provides an aggregated summary of all the license violations and security vulnerabilities found by Xray. Information is segment by watch policies and rules. Trending information is provided on the type and severity of violations over time, as well as, insights on most frequently occurring CVEs, top impacted artifacts and components. 534 | * **Metrics** - This section tracks CPU usage, System Memory, Disk Usage, Heap Memory and Database Connections 535 | 536 | ### CIM Compatibility 537 | 538 | Log data from JFrog platform logs is translated to pre-defined Common Information Models (CIM) compatible with Splunk. This compatibility enables new advanced features where users can search and access JFrog log data that is compatible with data models. For example 539 | 540 | ```text 541 | | datamodel Web Web search 542 | | datamodel Change_Analysis All_Changes search 543 | | datamodel Vulnerabilities Vulnerabilities search 544 | ``` 545 | 546 | ## Splunk Demo 547 | 548 | To run this integration for Splunk users can create a Splunk instance with the correct ports open in Kubernetes by applying the yaml file: 549 | 550 | ```bash 551 | kubectl apply -f k8s/splunk.yaml 552 | ``` 553 | 554 | This will create a new Splunk instance that can be used for a demo to send JFrog logs, violations and metrics over to. Follow the setup steps listed above to see data in the Dashboards. 555 | 556 | ## References 557 | 558 | * [Fluentd](https://www.fluentd.org) - Fluentd Logging Aggregator/Agent 559 | * [Splunk](https://www.splunk.com/) - Splunk Logging Platform 560 | * [Splunk HEC](https://dev.splunk.com/enterprise/docs/dataapps/httpeventcollector/) - Splunk HEC used to upload data into Splunk 561 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | 6 | 7 | | Version | Supported | 8 | | -------- | --------- | 9 | | 1.0.x | ✅ | 10 | | <= 1.0.x | ❌ | 11 | 12 | ## Reporting a Vulnerability 13 | 14 | Please post your vulnerability report from the following page: 15 | https://github.com/jfrog/log-analytics-splunk/security/advisories 16 | 17 | ## Fixed issues 18 | 19 | #### March 22, 2024 20 | 21 | Upgraded docker images to use fluentd:1.16.3 22 | This resolves the following CVEs 23 | 24 | * [CVE-2023-4911](https://github.com/advisories/GHSA-m77w-6vjw-wh2f "CVE-2023-4911") 25 | * [CVE-2019-8457](https://github.com/advisories/GHSA-p4jx-5p2x-4pq7) 26 | 27 | Note: When scanning our fluentd docker image `releases-pts-observability-fluentd.jfrog.io/fluentd:4.1` for volunerabilities, [CVE-2023-45853](https://www.cve.org/CVERecord?id=CVE-2023-45853) still comes up (from fluentd:1.16.3). However, fluentd claims it's a [non-issue](https://github.com/fluent/fluentd-kubernetes-daemonset/issues/1464#:~:text=zlib%3A%20CVE,not%20built%2Din.) since affected code (MiniZip) is not built-in 28 | -------------------------------------------------------------------------------- /app/.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/.tar -------------------------------------------------------------------------------- /app/README.md: -------------------------------------------------------------------------------- 1 | # JFrog Platform Log Analytics Splunk App Development 2 | 3 | ## App Directory Structure 4 | A Splunk App has a specific directory structure for packaging. Most of your development will be in [jfrog-logs/default/data/ui](jfrog-logs/default/data/ui) using [Simple XML](https://docs.splunk.com/Documentation/Splunk/8.0.5/Viz/PanelreferenceforSimplifiedXML) to layout your dashboard UI. CSS and Javascript in [jfrog-logs/appserver/static](jfrog-logs/appserver/static) can provide additional styling and functionality. 5 | ![App Directory Structure](https://dev.splunk.com/enterprise/static/app-overview-directorystructure-b397da01a76c1122a7c3c389f0c8ebeb.png) 6 | 7 | ### Local Development 8 | #### Installing Splunk Enterprise Locally 9 | It may be easier to install Splunk Enterprise locally for quicker development. You can install Splunk Enterprise from [here](https://www.splunk.com/en_us/download/splunk-enterprise.html). 10 | 11 | With Splunk Enterprise running, you can develop from the apps directory at _$SPLUNK_HOME/etc/apps/jfrog-logs_. Edit the UI xml files here and restart the server to see the changes. Use the Splunk CLI (_$SPLUNK_HOME/bin/splunk_) to restart the server. 12 | 13 | ``` 14 | $ splunk [start | stop | restart] 15 | ``` 16 | 17 | Occasionally, you may need to clear your index to start from scratch: 18 | 19 | ``` 20 | $ splunk stop 21 | $ splunk clean eventdata -index 22 | $ splunk start 23 | ``` 24 | 25 | #### Installing Fluentd Locally 26 | You can install FluentD locally and send demo data. This works particularly well for dashboard and chart enhancements. You can send demo data without having a full Artifactory and Xray deployment. You can modify your demo data quickly to achieve the type of visualizations needed. 27 | 28 | ##### Install FluentD (OSX) 29 | ``` 30 | $ gem install fluentd --no-doc 31 | ``` 32 | 33 | ##### Install Splunk FluentD Plugin (OSX) 34 | ``` 35 | $ gem install fluent-plugin-splunk-hec 36 | ``` 37 | ### Demo Data 38 | Demo data that populates all dashboard and charts can be used to verify your development. You can use the config file [fluentd-demo.conf](../fluentd-demo.conf) as an example. Add and modify _dummy_ data as needed. 39 | 40 | ``` 41 | $ fluentd -c fluentd-demo.conf 42 | ``` 43 | 44 | ### Static File Changes (CSS, JS) 45 | CSS and JS file changes require cache updates to be seen. Clear your browser cache. You also need to clear the Splunk webserver cache. Do this from http:///_bump. 46 | 47 | ### Update app.conf 48 | Before packaging the app, update the [app.conf](jfrog-logs/default/app.conf). Make sure update the version. 49 | 50 | ### Package the App 51 | To package the app, use the Splunk CLI. 52 | 53 | ``` 54 | $ cd jfrog-logs 55 | $ splunk package app jfrog-logs 56 | ``` 57 | 58 | ### Install the App 59 | Install the app in your Splunk instance through the _Apps > Manage Apps > Install app from file_. 60 | 61 | ### Create the HEC Data Input to Receive Data 62 | You may need to create a new HTTP Event Collector data input. You can do this at _Settings > Data Inputs > HTTP Event Collector_. Use the JFrog app as the context. Then use the token in the FluentD configs: 63 | 64 | ``` 65 | 66 | @type splunk_hec 67 | hec_host HEC_HOST 68 | hec_port HEC_PORT 69 | hec_token HEC_TOKEN <-- replace HEC_TOKEN 70 | format json 71 | sourcetype_key log_source 72 | use_fluentd_time false 73 | # buffered output parameter 74 | # flush_interval 10s 75 | # ssl parameter 76 | #use_ssl true 77 | #ca_file /path/to/ca.pem 78 | 79 | #END SPLUNK OUTPUT 80 | ``` 81 | 82 | ### Removing the App 83 | To completely remove the app use the Splunk CLI: 84 | 85 | ``` 86 | $ splunk remove app jfrog-logs 87 | ``` 88 | Note: This will also remove the HEC Event Collector. 89 | 90 | ### Use Splunk AppInspect to Pre-Validate the App 91 | Before submitting the app to Splunkbase for validation, use the [Splunk AppInspect API](https://dev.splunk.com/enterprise/docs/developapps/testvalidate/appinspect/splunkappinspectapi/runappinspectrequestsapi) to pre-validate and resolve issues. 92 | 93 | ### Submitting to Splunkbase 94 | When testing and validation is complete, follow these [instructions](https://dev.splunk.com/enterprise/docs/releaseapps/splunkbase/submitcontentui) for submitting the app to Splunkbase. 95 | ## References 96 | * [Develop Splunk Apps](https://dev.splunk.com/enterprise/docs/developapps) 97 | * [Simple XML Reference for Apps](https://docs.splunk.com/Documentation/Splunk/8.0.5/Viz/PanelreferenceforSimplifiedXML) -------------------------------------------------------------------------------- /app/jfrog-logs-04-2025.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs-04-2025.tar.gz -------------------------------------------------------------------------------- /app/jfrog-logs.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs.tar.gz -------------------------------------------------------------------------------- /app/jfrog-logs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make via slack, issue, email, or any other method with the owners of this repository before making a change. 4 | 5 | Note we have a code of conduct, please follow it in all your interactions with the project. 6 | 7 | ## Pull Request Process 8 | 9 | Ensure any install or build dependencies are removed before the end of the layer when doing a build. 10 | Update the README.md with details of changes to the interface, this includes new environment variables, exposed ports, useful file locations and container parameters. 11 | Increase the version numbers in any examples files and the README.md to the new version that this Pull Request would represent. The versioning scheme we use is SemVer. 12 | You may merge the Pull Request in once you have the sign-off of one other developer. 13 | 14 | ## Code of Conduct 15 | ### Our Pledge 16 | 17 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 18 | 19 | ### Our Standards 20 | 21 | Examples of behavior that contributes to creating a positive environment include: 22 | 23 | Using welcoming and inclusive language 24 | Being respectful of differing viewpoints and experiences 25 | Gracefully accepting constructive criticism 26 | Focusing on what is best for the company 27 | Showing empathy towards other colleagues 28 | 29 | Examples of unacceptable behavior by participants include: 30 | 31 | The use of sexualized language or imagery and unwelcome sexual attention or advances 32 | Trolling, insulting/derogatory comments, and personal or political attacks 33 | Public or private harassment 34 | Publishing others' private information, such as a physical or electronic address, without explicit permission 35 | Other conduct which could reasonably be considered inappropriate in a professional setting 36 | 37 | ### Our Responsibilities 38 | 39 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 40 | 41 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 42 | 43 | ## Scope 44 | 45 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project. Examples of representing a project include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 46 | 47 | ## Enforcement 48 | 49 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at Slack #xray_splunk . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 50 | 51 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 52 | 53 | Attribution 54 | 55 | This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at http://contributor-covenant.org/version/1/4 56 | -------------------------------------------------------------------------------- /app/jfrog-logs/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /app/jfrog-logs/README.md: -------------------------------------------------------------------------------- 1 | # JFrog Platform Log Analytics Splunk App 2 | 3 | ## Getting Started 4 | Install the app in your Splunk instance. Then restart your Splunk instance by going to _Server Controls > Restart_. 5 | 6 | ## Splunk Setup 7 | 1. Create new Events index `jfrog_splunk` (or your custom name) at _Settings > Indexes > New Index > Save_ 8 | 2. Create new Metrics index `jfrog_splunk_metrics` (or your custom name) at _Settings > Indexes > New Index > Metrics > Save_ 9 | 3. Create a new HTTP Event Collector data input for logs at _Settings > Data Inputs > HTTP Event Collector > New Token > jfrog_splunk index > Save_ 10 | 4. Create a new HTTP Event Collector data input for metrics at _Settings > Data Inputs > HTTP Event Collector > New Token > jfrog_splunk_metrics index > Save_ 11 | 12 | **Note:** You can customize the index names by setting the `SPLUNK_LOGS_INDEX` and `SPLUNK_METRICS_INDEX` environment variables in your configuration. 13 | 14 | ## Setup Fluentd 15 | FluentD is used to send log events to Splunk. This [repo](https://github.com/jfrog/log-analytics-splunk) contains instructions on various installations options for Fluentd as a logging agent. 16 | 17 | Download the .env file from [here](https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/jfrog.env) and fill in the jfrog.env file with Splunk and JPD information 18 | 19 | ``` 20 | export JF_PRODUCT_DATA_INTERNAL=JF_PRODUCT_DATA_INTERNAL 21 | export SPLUNK_COM_PROTOCOL=http 22 | export SPLUNK_HEC_HOST=splunk.example.com 23 | export SPLUNK_HEC_PORT=8088 24 | export SPLUNK_HEC_TOKEN=SPLUNK_HEC_TOKEN 25 | export SPLUNK_METRICS_HEC_TOKEN=SPLUNK_METRICS_HEC_TOKEN 26 | export SPLUNK_LOGS_INDEX=jfrog_splunk 27 | export SPLUNK_METRICS_INDEX=jfrog_splunk_metrics 28 | export SPLUNK_INSECURE_SSL=false 29 | export SPLUNK_VERIFY_SSL=true 30 | export SPLUNK_COMPRESS_DATA=true 31 | export JPD_URL=http://abc.jfrog.io 32 | export JPD_ADMIN_USERNAME=admin 33 | export JFROG_ADMIN_TOKEN=JFROG_ADMIN_TOKEN 34 | export COMMON_JPD=false 35 | ``` 36 | 37 | * **JF_PRODUCT_DATA_INTERNAL**: The environment variable JF_PRODUCT_DATA_INTERNAL must be defined to the correct location. For each JFrog service you will find its active log files in the `$JFROG_HOME//var/log` directory 38 | * **SPLUNK_COM_PROTOCOL**: HTTP Scheme, http or https 39 | * **SPLUNK_HEC_HOST**: Splunk Instance URL 40 | * **SPLUNK_HEC_PORT**: Splunk HEC configured port 41 | * **SPLUNK_HEC_TOKEN**: Splunk HEC Token for sending logs to Splunk 42 | * **SPLUNK_METRICS_HEC_TOKEN**: Splunk HEC Token for sending metrics to Splunk 43 | * **SPLUNK_LOGS_INDEX**: Splunk index name for storing logs (default: jfrog_splunk) 44 | * **SPLUNK_METRICS_INDEX**: Splunk index name for storing metrics (default: jfrog_splunk_metrics) 45 | * **SPLUNK_INSECURE_SSL**: false for test environments only or if http scheme 46 | * **SPLUNK_VERIFY_SSL**: false for disabling ssl validation (useful for proxy forwarding or bypassing ssl certificate validation) 47 | * **SPLUNK_COMPRESS_DATA**: true for compressing logs and metrics json payloads on outbound to Splunk 48 | * **JPD_URL**: Artifactory JPD URL of the format `http://` 49 | * **JPD_ADMIN_USERNAME**: Artifactory username for authentication 50 | * **JFROG_ADMIN_TOKEN**: Artifactory [Access Token](https://jfrog.com/help/r/how-to-generate-an-access-token-video/artifactory-creating-access-tokens-in-artifactory) for authentication 51 | * **COMMON_JPD**: This flag should be set as true only for non-kubernetes installations or installations where JPD base URL is same to access both Artifactory and Xray (ex: https://sample_base_url/artifactory or https://sample_base_url/xray) 52 | 53 | ## Dashboards 54 | 55 | ### Artifactory dashboard 56 | JFrog Artifactory Dashboard is divided into multiple sections Application, Audit, Requests, Docker, System Metrics, Heap Metrics and Connection Metrics 57 | 58 | * **Application** - This section tracks Log Volume(information about different log sources) and Artifactory Errors over time(bursts of application errors that may otherwise go undetected) 59 | * **Audit** - This section tracks audit logs help you determine who is accessing your Artifactory instance and from where. These can help you track potentially malicious requests or processes (such as CI jobs) using expired credentials. 60 | * **Requests** - This section tracks HTTP response codes, Top 10 IP addresses for uploads and downloads 61 | * **Docker** - To monitor Dockerhub pull requests users should have a Dockerhub account either paid or free. Free accounts allow up to 200 pull requests per 6 hour window. Various widgets have been added in the new Docker tab under Artifactory to help monitor your Dockerhub pull requests. An alert is also available to enable if desired that will allow you to send emails or add outbound webhooks through configuration to be notified when you exceed the configurable threshold. 62 | * **System Metrics** - This section tracks CPU Usage, System Memory and Disk Usage metrics 63 | * **Heap Metrics** - This section tracks Heap Memory and Garbage Collection 64 | * **Connection Metrics** - This section tracks Database connections and HTTP Connections 65 | 66 | ### Xray dashboard 67 | JFrog Xray Dashboard is divided into three sections Logs, Violations and Metrics 68 | 69 | * **Logs** - This section provides a summary of access, service and traffic log volumes associated with Xray. Additionally, customers are also able to track various HTTP response codes, HTTP 500 errors, and log errors for greater operational insight 70 | * **Violations** - This section provides an aggregated summary of all the license violations and security vulnerabilities found by Xray. Information is segment by watch policies and rules. Trending information is provided on the type and severity of violations over time, as well as, insights on most frequently occurring CVEs, top impacted artifacts and components. 71 | * **Metrics** - This section tracks CPU usage, System Memory, Disk Usage, Heap Memory and Database Connections 72 | 73 | ## CIM Compatibility 74 | Log data from JFrog platform logs is translated to pre-defined Common Information Models (CIM) compatible with Splunk. This compatibility enables new advanced features where users can search and access JFrog log data that is compatible with data models. For example 75 | 76 | ```text 77 | | datamodel Web Web search 78 | | datamodel Change_Analysis All_Changes search 79 | | datamodel Vulnerabilities Vulnerabilities search 80 | ``` 81 | 82 | ## Additional Setup 83 | For complete instructions on setup of the integration between JFrog Artifactory & Xray to Splunk visit our Github [repo](https://github.com/jfrog/log-analytics-splunk) 84 | -------------------------------------------------------------------------------- /app/jfrog-logs/app.manifest: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": "2.0.0", 3 | "info": { 4 | "title": "JFrog Platform Log Analytics, Violations and Metrics", 5 | "id": { 6 | "group": null, 7 | "name": "jfrog-logs", 8 | "version": "1.2.9" 9 | }, 10 | "author": [ 11 | { 12 | "name": "JFrog", 13 | "email": null, 14 | "company": null 15 | } 16 | ], 17 | "releaseDate": null, 18 | "description": "This app helps process extracted logs for the JFrog Platform, the universal, hybrid end-to-end DevOps platform. The app includes a diagnostic dashboard view for JFrog Artifactory. For more information see our blog: https://jfrog.com/blog/unified-jfrog-log-analytics-with-splunk/", 19 | "classification": { 20 | "intendedAudience": null, 21 | "categories": [], 22 | "developmentStatus": null 23 | }, 24 | "commonInformationModels": null, 25 | "license": { 26 | "name": null, 27 | "text": null, 28 | "uri": null 29 | }, 30 | "privacyPolicy": { 31 | "name": null, 32 | "text": null, 33 | "uri": null 34 | }, 35 | "releaseNotes": { 36 | "name": null, 37 | "text": "./README.md", 38 | "uri": null 39 | } 40 | }, 41 | "dependencies": null, 42 | "tasks": null, 43 | "inputGroups": null, 44 | "incompatibleApps": null, 45 | "platformRequirements": null, 46 | "supportedDeployments": [ 47 | "_standalone", 48 | "_distributed" 49 | ], 50 | "targetWorkloads": null 51 | } 52 | -------------------------------------------------------------------------------- /app/jfrog-logs/appserver/static/jfrog-logs.css: -------------------------------------------------------------------------------- 1 | #tabs { 2 | border-bottom: 0px; 3 | margin-bottom: 5px; 4 | background-color: #3D444D; 5 | } 6 | 7 | #tabs > .dashboard-cell > .dashboard-panel{ 8 | margin-bottom: 0px; 9 | border-bottom: 0px; 10 | background-color: #3D444D; 11 | } 12 | 13 | .nav-tabs > li > a:focus { 14 | box-shadow: none !important; 15 | } 16 | 17 | .nav-tabs>li.active>a:before { 18 | background-color: #5BC05C; 19 | } 20 | 21 | .nav-tabs>li>a { 22 | color: white; 23 | } -------------------------------------------------------------------------------- /app/jfrog-logs/appserver/static/jfrog-logs.js: -------------------------------------------------------------------------------- 1 | require(['jquery','underscore','splunkjs/mvc','bootstrap.tab','splunkjs/mvc/simplexml/ready!'], 2 | function($, _, mvc){ 3 | 4 | var tabsInitialzed = []; 5 | var metricsKey = 'metrics'; 6 | var spanKey = 'showspan'; 7 | 8 | /** 9 | * The below defines the tab handling logic. 10 | */ 11 | 12 | /** 13 | * This hides the content associated with the tabs. 14 | * 15 | * The normal, auto-magical Bootstrap tab processing doesn't work for us since it requires a particular 16 | * layout of HTML that we cannot use without converting the view entirely to simpleXML. So, we are 17 | * going to handle it ourselves. 18 | * @param {string} tabSetClass the 19 | */ 20 | var hideTabTargets = function(tabSetClass) { 21 | 22 | var tabs = $('a[data-elements]'); 23 | 24 | // If we are only applying this to a particular set of tabs, then limit the selector accordingly 25 | if (typeof tabSetClass !== 'undefined' && tabSetClass) { 26 | tabs = $('a.' + tabSetClass + '[data-elements]'); 27 | } 28 | 29 | // Go through each toggle tab 30 | for (var c = 0; c < tabs.length; c++) { 31 | 32 | // Hide the targets associated with the tab 33 | var targets = $(tabs[c]).data("elements").split(","); 34 | 35 | for (var d = 0; d < targets.length; d++) { 36 | $('#' + targets[d], this.$el).hide(); 37 | } 38 | } 39 | }; 40 | 41 | /** 42 | * Force a re-render of the panels with the given row ID. 43 | * 44 | * @param {string} row_id The ID of the row to force a rerender on 45 | * @param {bool} force Force the tab to re-render even if it was already rendered once (defaults to true) 46 | */ 47 | var rerenderPanels = function(row_id, force){ 48 | 49 | // Set a default argument for dont_rerender_until_needed 50 | if( typeof force === 'undefined'){ 51 | force = true; 52 | } 53 | 54 | // Don't do both if the panel was already rendered 55 | if( !force && _.contains(tabsInitialzed, row_id) ){ 56 | return; 57 | } 58 | 59 | // Get the elements so that we can find the components to re-render 60 | var elements = $('#' + row_id + ' .dashboard-element'); 61 | 62 | // Iterate the list and re-render the components so that they fill the screen 63 | for(var d = 0; d < elements.length; d++){ 64 | 65 | // Determine if this is re-sizable 66 | if( $('#' + row_id + ' .ui-resizable').length > 0){ 67 | 68 | var component = mvc.Components.get(elements[d].id); 69 | 70 | if(component){ 71 | component.render(); 72 | } 73 | } 74 | } 75 | 76 | // Remember that we initialized this tab 77 | tabsInitialzed.push(row_id); 78 | }; 79 | 80 | /** 81 | * Handles the selection of a partiular tab. 82 | * 83 | * @param {*} e 84 | */ 85 | var selectTab = function (e) { 86 | // Update which tab is considered active 87 | $('#tabs > li.active').removeClass("active"); 88 | $(e.target).closest("li").addClass("active"); 89 | 90 | // clearTabControlTokens(); 91 | setActiveTabToken(); 92 | setSpanActiveForMetrics(); 93 | 94 | // Stop if the tabs have no elements 95 | if( $(e.target).data("elements") === undefined ){ 96 | console.warn("Yikes, the clicked tab has no elements to hide!"); 97 | return; 98 | } 99 | 100 | // Determine if the set of tabs has a restriction on the classes to manipulate 101 | var tabSet = null; 102 | 103 | if ($(e.target).data("tab-set") !== undefined) { 104 | tabSet = $(e.target).data("tab-set"); 105 | } 106 | 107 | // Get the IDs that we should enable for this tab 108 | var toToggle = $(e.target).data("elements").split(","); 109 | 110 | // Hide the tab content by default 111 | hideTabTargets(tabSet); 112 | 113 | // Now show this tabs toggle elements 114 | for(var c = 0; c < toToggle.length; c++){ 115 | 116 | // Show the items 117 | $('#' + toToggle[c], this.$el).show(); 118 | 119 | // Re-render the panels under the item if necessary 120 | rerenderPanels(toToggle[c]); 121 | } 122 | 123 | }; 124 | 125 | /** 126 | * The code below handles the tokens that trigger when searches are kicked off for a tab. 127 | */ 128 | 129 | /** 130 | * Get the tab token for a given tab name 131 | * @param {string} tab_name The name of the tab 132 | */ 133 | var getTabTokenForTabName = function(tab_name){ 134 | return tab_name; 135 | }; 136 | 137 | // Get all of the possible tab control tokens 138 | var getTabTokens = function(){ 139 | var tabTokens = []; 140 | 141 | var tabLinks = $('#tabs > li > a'); 142 | 143 | for(var c = 0; c < tabLinks.length; c++){ 144 | tabTokens.push( getTabTokenForTabName( $(tabLinks[c]).data('token') ) ); 145 | } 146 | 147 | return tabTokens; 148 | }; 149 | 150 | /** 151 | * Clear all but the active tab control tokens 152 | */ 153 | var clearTabControlTokens = function(){ 154 | console.info("Clearing tab control tokens"); 155 | 156 | //tabsInitialzed = []; 157 | var tabTokens = getTabTokens(); 158 | var activeTabToken = getActiveTabToken(); 159 | var tokens = mvc.Components.getInstance("submitted"); 160 | 161 | // Clear the tokens for all tabs except for the active one 162 | for(var c = 0; c < tabTokens.length; c++){ 163 | 164 | if( activeTabToken !== tabTokens[c] ){ 165 | tokens.set(tabTokens[c], undefined); 166 | } 167 | } 168 | }; 169 | 170 | /** 171 | * Get the tab control token for the active tab 172 | */ 173 | var getActiveTabToken = function(){ 174 | return $('#tabs > li.active > a').data('token'); 175 | }; 176 | 177 | /** 178 | * Set the token for the active tab 179 | */ 180 | var setActiveTabToken = function(){ 181 | var activeTabToken = getActiveTabToken(); 182 | var tokens = mvc.Components.getInstance("submitted"); 183 | 184 | if(activeTabToken){ 185 | // Set each token if necessary 186 | activeTabToken.split(",").forEach(function(token){ 187 | 188 | // If the token wasn't set, set it so that the searches can run 189 | if(!tokens.toJSON()[token] || tokens.toJSON()[token] == undefined){ 190 | tokens.set(token, ""); 191 | } 192 | }); 193 | } 194 | }; 195 | 196 | var setSpanActiveForMetrics = function(){ 197 | var activeTabToken = getActiveTabToken(); 198 | var tokens = mvc.Components.getInstance("submitted"); 199 | if(activeTabToken.indexOf(metricsKey) != -1){ 200 | tokens.set(spanKey, ""); 201 | } else { 202 | tokens.unset(spanKey); 203 | } 204 | }; 205 | 206 | /** 207 | * Handle the setting of the token for the clicked tab. 208 | * @param {*} e 209 | */ 210 | var setTokenForTab = function(e){ 211 | 212 | // Get the token for the tab 213 | var tabToken = getTabTokenForTabName($(e.target).data('token')); 214 | 215 | // Set the token 216 | var tokens = mvc.Components.getInstance("submitted"); 217 | tokens.set(tabToken, ''); 218 | 219 | console.info("Set the token for the active tab (" + tabToken + ")"); 220 | }; 221 | 222 | /** 223 | * Perform the initial setup for making the tabs work. 224 | */ 225 | var firstTimeTabSetup = function() { 226 | $('a.toggle-tab').on('shown', setTokenForTab); 227 | 228 | // Wire up the function to show the appropriate tab 229 | $('a.toggle-tab').on('click shown', selectTab); 230 | 231 | // Show the first tab in each tab set 232 | $.each($('.nav-tabs'), function(index, value) { 233 | $('.toggle-tab', value).first().trigger('shown'); 234 | }); 235 | 236 | // Make the tabs into tabs 237 | $('#tabs', this.$el).tab(); 238 | 239 | // Wire up the tab control tokenization 240 | var submit = mvc.Components.get("submit"); 241 | 242 | if(submit){ 243 | submit.on("submit", function() { 244 | clearTabControlTokens(); 245 | }); 246 | } 247 | 248 | // Set the token for the selected tab 249 | setActiveTabToken(); 250 | setSpanActiveForMetrics(); 251 | }; 252 | 253 | firstTimeTabSetup(); 254 | }); -------------------------------------------------------------------------------- /app/jfrog-logs/bin/README: -------------------------------------------------------------------------------- 1 | This is where you put any scripts you want to add to this app. 2 | -------------------------------------------------------------------------------- /app/jfrog-logs/default/app.conf: -------------------------------------------------------------------------------- 1 | [default] 2 | 3 | [install] 4 | is_configured = 0 5 | state_change_requires_restart = true 6 | 7 | [launcher] 8 | author = JFrog 9 | version = 1.2.9 10 | description = This app helps process extracted logs and metrics for the JFrog Platform, the universal, hybrid end-to-end DevOps platform. The app includes a diagnostic dashboard view for JFrog Artifactory. For more information see our blog: https://jfrog.com/blog/unified-jfrog-log-analytics-with-splunk/ 11 | 12 | [ui] 13 | label = JFrog Platform Log Analytics, Violations and Metrics 14 | is_visible = 1 15 | 16 | [package] 17 | id = jfrog-logs 18 | check_for_updates = 1 19 | 20 | -------------------------------------------------------------------------------- /app/jfrog-logs/default/data/ui/nav/default.xml: -------------------------------------------------------------------------------- 1 | 6 | -------------------------------------------------------------------------------- /app/jfrog-logs/default/data/ui/views/README: -------------------------------------------------------------------------------- 1 | Add all the views that your app needs in this directory 2 | -------------------------------------------------------------------------------- /app/jfrog-logs/default/data/ui/views/artifactory.xml: -------------------------------------------------------------------------------- 1 |
2 | 3 | Artifactory Log Analytics and Platform Metrics 4 |
5 | 6 | 7 | 8 | -24h@h 9 | now 10 | 11 | 12 | 13 | 14 | auto 15 | 5s 16 | 10s 17 | 30s 18 | 1m 19 | 5m 20 | 10m 21 | 30m 22 | 1h 23 | auto 24 | 25 | if($value$ == "auto", "", "span=".$value$) 26 | "span=".$value$ 27 | 28 | 29 |
30 | 31 | 32 | 33 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | Log Volume 63 | 64 | `default_index` $application_control$ (sourcetype!="NULL" OR log_source!="NULL") (sourcetype!="jfrog.xray.*" OR log_source!="jfrog.xray.*") | timechart count by sourcetype 65 | $field1.earliest$ 66 | $field1.latest$ 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | Log Errors 80 | 81 | `default_index` $application_control$ (sourcetype="jfrog.rt.artifactory.service" OR log_source="jfrog.rt.artifactory.service") log_level="ERROR" | timechart count by log_level 82 | $field1.earliest$ 83 | $field1.latest$ 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | Audit Actions By Users 96 | 97 | `default_index` $audit_control$ (sourcetype="jfrog.rt.access.audit" OR log_source="jfrog.rt.access.audit") user!="UNKNOWN" | stats count by user 98 | $field1.earliest$ 99 | $field1.latest$ 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | Denied Logins By IP 109 | 110 | `default_index` $audit_control$ (sourcetype="jfrog.rt.artifactory.access" OR log_source="jfrog.rt.artifactory.access") action_response="DENIED LOGIN" | stats count by ip 111 | $field1.earliest$ 112 | $field1.latest$ 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | Denied Actions By Username 124 | 125 | `default_index` $audit_control$ (sourcetype="jfrog.rt.artifactory.access" OR log_source="jfrog.rt.artifactory.access") action_response="DENIED*" username!="NA " | stats count by username 126 | $field1.earliest$ 127 | $field1.latest$ 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | Denied Actions By IP 137 | 138 | `default_index` $audit_control$ (sourcetype="jfrog.rt.artifactory.access" OR log_source="jfrog.rt.artifactory.access") action_response="DENIED*" | stats count by ip 139 | $field1.earliest$ 140 | $field1.latest$ 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | Denied Logins By Username and IP 152 | 153 | `default_index` $audit_control$ (sourcetype="jfrog.rt.artifactory.access" OR log_source="jfrog.rt.artifactory.access") action_response="DENIED LOGIN" username!="NA " | stats count by ip,username 154 | $field1.earliest$ 155 | $field1.latest$ 156 | 157 | 158 | 159 |
160 |
161 | 162 | 163 | Accepted Deploys By Username 164 | 165 | `default_index` $audit_control$ (sourcetype="jfrog.rt.artifactory.access" OR log_source="jfrog.rt.artifactory.access") action_response="ACCEPTED DEPLOY" | stats count by username 166 | $field1.earliest$ 167 | $field1.latest$ 168 | 169 | 170 | 171 | 172 | 173 | 174 |
175 | 176 | 177 | 178 | Dockerhub Pull Requests Trends Per 6 Hours 179 | 180 | `default_index` $docker_control$ "downloading" (log_source="jfrog.rt.artifactory.service" OR sourcetype="jfrog.rt.artifactory.service") "manifests/" "docker.io" | spath message | search message !="*/manifests/sha256:*" | timechart count(message) span=6h as DockerPullRequests 181 | $field1.earliest$ 182 | $field1.latest$ 183 | 184 | 185 | 186 | 187 | 188 | 189 | Docker Repositories Cache Hit Ratio 190 | 191 | `default_index` $docker_control$ (log_source="jfrog.rt.artifactory.access" OR sourcetype="jfrog.rt.artifactory.access") action_response="ACCEPTED DOWNLOAD" "manifest.json" 192 | | stats count as aCount | appendcols [search `default_index` $docker_control$ (log_source="jfrog.rt.artifactory.access" OR sourcetype="jfrog.rt.artifactory.access") action_response="ACCEPTED DEPLOY" "list\.manifest" "*-cache" | stats count as bCount ] | eval pct=bCount/aCount | eval inversePct=1-pct | fields - aCount,bCount,inversePct 193 | $field1.earliest$ 194 | $field1.latest$ 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | Dockerhub Pull Requests in rolling 6 Hr window 208 | 209 | `default_index` $docker_control$ "downloading" (log_source="jfrog.rt.artifactory.service" OR sourcetype="jfrog.rt.artifactory.service") "manifests/" "docker.io" | spath message | search message !="*/manifests/sha256:*" | timechart span=1h count(message) as Count | streamstats sum(Count) as Count window=6 210 | $field1.earliest$ 211 | $field1.latest$ 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | Dockerhub Pull Requests Total 226 | 227 | `default_index` $docker_control$ "downloading" (log_source="jfrog.rt.artifactory.service" OR sourcetype="jfrog.rt.artifactory.service") "manifests/" "docker.io" | spath message | search message !="*/manifests/sha256:*" | timechart count(message) as Count 228 | $field1.earliest$ 229 | $field1.latest$ 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | Top 10 Users By Docker Pulls 244 | 245 | `default_index` $docker_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") request_url="/api/docker/*/manifests/*" request_method=HEAD | top limit=10 username 246 | $field1.earliest$ 247 | $field1.latest$ 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | Top 10 IPs By Docker Pulls 260 | 261 | `default_index` $docker_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") request_url="/api/docker/*/manifests/*" request_method=HEAD | top limit=10 remote_address 262 | $field1.earliest$ 263 | $field1.latest$ 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | Accessed Images 277 | 278 | `default_index` $docker_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") request_url="/api/docker/*/manifests/*" (request_method=HEAD OR request_method=PUT OR request_method=DELETE) repo!="NULL" image!="NULL" repo!="" image!="" repo!="latest" | timechart count by image 279 | $field1.earliest$ 280 | $field1.latest$ 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | Accessed Repos 293 | 294 | `default_index` $docker_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") request_url="/api/docker/*/manifests/*" (request_method=HEAD OR request_method=PUT OR request_method=DELETE) repo!="NULL" image!="NULL" repo!="" image!="" repo!="latest" | timechart count by repo 295 | $field1.earliest$ 296 | $field1.latest$ 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | Data Transfers (GBs) Uploads By Repo 311 | 312 | `default_index` $docker_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") request_url="/api/docker/*/uploads/*" repo!="NULL" image!="NULL" repo!="" image!="" repo!="latest" request_method=PATCH | eval gb=request_content_length/1073741824 | stats sum(gb) by repo 313 | $field1.earliest$ 314 | $field1.latest$ 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | Data Transfers (GBs) Downloads By Repo 327 | 328 | `default_index` $docker_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") (request_url="/api/docker/*/blobs/*" OR request_url="/api/docker/*/manifests/*") repo!="NULL" image!="NULL" repo!="" image!="" repo!="latest" (request_method=HEAD OR equest_method=GET) | eval gb=response_content_length/1073741824 | stats sum(gb) by repo 329 | $field1.earliest$ 330 | $field1.latest$ 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | HTTP 500 Errors 346 | 347 | `default_index` $requests_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") return_status="5*" | timechart count by return_status 348 | $field1.earliest$ 349 | $field1.latest$ 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | HTTP Response Codes 362 | 363 | `default_index` $requests_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") | timechart count by return_status 364 | $field1.earliest$ 365 | $field1.latest$ 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | Top 10 IPs By Uploads 380 | 381 | `default_index` $requests_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") response_content_length!="-1" | eval gb=response_content_length/1073741824 | stats sum(gb) as upload_size by remote_address | top limit=10 remote_address,upload_size | fields - count,percent 382 | $field1.earliest$ 383 | $field1.latest$ 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | Top 10 IPs Downloads 396 | 397 | `default_index` $requests_control$ (sourcetype="jfrog.rt.artifactory.request" OR log_source="jfrog.rt.artifactory.request") request_content_length!="-1" | eval gb=request_content_length/1073741824 | stats sum(gb) as download_size by remote_address | top limit=10 remote_address,download_size | fields - count,percent 398 | $field1.earliest$ 399 | $field1.latest$ 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | CPU Usage 413 | 414 | | mstats avg("jfrog.artifactory.jfob_sys_cpu_ratio") AS cpu_ratio prestats=false WHERE `default_metrics_index` $mstats_span$ 415 | | eval cpu_pct = (cpu_ratio * 100) 416 | | timechart avg(cpu_pct) AS "CPU %" $timechart_span$ 417 | | fields - _span* 418 | $field1.earliest$ 419 | $field1.latest$ 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | System Memory 439 | 440 | | mstats avg("jfrog.artifactory.jfob_sys_memory_used_bytes") AS avg_used, avg("jfrog.artifactory.jfob_sys_memory_free_bytes") AS avg_free prestats=false WHERE `default_metrics_index` $mstats_span$ 441 | | eval mem_used_gb = (avg_used / 1024 / 1024 / 1024), mem_free_gb = (avg_free / 1024 / 1024 / 1024) 442 | | timechart avg(mem_used_gb) AS "Sys Used(GB)", avg(mem_free_gb) AS "Sys Free(GB)" $timechart_span$ 443 | | fields - _span* 444 | $field1.earliest$ 445 | $field1.latest$ 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | Disk Usage 465 | 466 | | mstats avg("jfrog.artifactory.jfob_app_disk_free_bytes") AS avg_disk_free, avg("jfrog.artifactory.jfob_app_disk_used_bytes") AS avg_disk_used, avg("jfrog.artifactory.jfrt_storage_current_total_size_bytes") AS avg_disk_tot prestats=false WHERE `default_metrics_index` $mstats_span$ 467 | | eval disk_free_gb = (avg_disk_free / 1024 / 1024 / 1024), disk_used_gb = (avg_disk_used / 1024 / 1024 / 1024), disk_tot_gb = (avg_disk_tot/ 1024 / 1024 / 1024) 468 | | timechart avg(disk_free_gb) AS "Disk Free(GB)", avg(disk_used_gb) AS "Disk Used(GB)", avg(disk_tot_gb) AS "Disk Total(GB)" $timechart_span$ 469 | | fields - _span* 470 | $field1.earliest$ 471 | $field1.latest$ 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | Heap Memory 491 | 492 | | mstats avg("jfrog.artifactory.jfrt_runtime_heap_processors_total") AS heap_processors , avg("jfrog.artifactory.jfrt_runtime_heap_freememory_bytes") AS avg_heap_free_bytes, avg("jfrog.artifactory.jfrt_runtime_heap_maxmemory_bytes") AS avg_heap_max_bytes, avg("jfrog.artifactory.jfrt_runtime_heap_totalmemory_bytes") AS avg_heap_used_bytes prestats=false WHERE `default_metrics_index` $mstats_span$ 493 | | eval heap_pcrs = (heap_processors), avg_heap_free_gb = (avg_heap_free_bytes / 1024 / 1024 / 1024), avg_heap_max_gb = (avg_heap_max_bytes / 1024 / 1024 / 1024), avg_heap_used_gb = (avg_heap_used_bytes / 1024 / 1024 / 1024) 494 | | timechart avg(heap_pcrs) AS "No. of Processors", avg(avg_heap_free_gb) AS "Heap Free(GB)", avg(avg_heap_max_gb) AS "Heap Max(GB)", avg(avg_heap_used_gb) AS "Heap Used(GB)" $timechart_span$ 495 | | fields - _span* 496 | 497 | $field1.earliest$ 498 | $field1.latest$ 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | Garbage Collection 517 | 518 | | mstats avg("jfrog.artifactory.jfrt_artifacts_gc_duration_seconds") AS gc_dur_secs, avg("jfrog.artifactory.jfrt_artifacts_gc_binaries_total") AS gc_binary_tot, avg("jfrog.artifactory.jfrt_artifacts_gc_size_cleaned_bytes") AS avg_gc_cleaned_bytes prestats=false WHERE `default_metrics_index` $mstats_span$ 519 | | eval gc_dur_min = (gc_dur_secs / 60), gc_bin_tot = (gc_binary_tot), avg_gc_cleaned_gb = (avg_gc_cleaned_bytes / 1024 / 1024 / 1024) 520 | | timechart avg(gc_dur_min) AS "GC Duration (Minutes)", avg(gc_bin_tot) AS "GC Cleaned(Binaries)", avg(avg_gc_cleaned_gb) AS "GC Cleaned(GB)" $timechart_span$ 521 | | fields - _span* 522 | 523 | $field1.earliest$ 524 | $field1.latest$ 525 | 526 | 527 | 528 | 529 | 530 | 531 | 532 | 533 | 534 | 535 | 536 | 537 | 538 | 539 | 540 | 541 | 542 | DB Connections 543 | 544 | | mstats avg("jfrog.artifactory.jfrt_db_connections_max_active_total") AS db_conn_max_act, avg("jfrog.artifactory.jfrt_db_connections_min_idle_total") AS db_conn_min_idle, avg("jfrog.artifactory.jfrt_db_connections_idle_total") AS db_conn_tot_idle, avg("jfrog.artifactory.jfrt_db_connections_active_total") AS db_conn_tot_act, prestats=false WHERE `default_metrics_index` $mstats_span$ 545 | | timechart avg(db_conn_min_idle) AS "Idle(Min)", avg(db_conn_tot_act) AS "Active", avg(db_conn_tot_idle) AS "Idle" $timechart_span$ 546 | | fields - _span* 547 | 548 | $field1.earliest$ 549 | $field1.latest$ 550 | 551 | 552 | 553 | 554 | 555 | 556 | 557 | 558 | 559 | 560 | 561 | 562 | 563 | 564 | 565 | 566 | 567 | HTTP Connections 568 | 569 | | mstats avg("jfrog.artifactory.jfrt_http_connections_max_total") AS http_conn_max_tot, avg("jfrog.artifactory.jfrt_http_connections_leased_total") AS http_conn_lease_tot, avg("jfrog.artifactory.jfrt_http_connections_pending_total") AS http_conn_pend_tot, avg("jfrog.artifactory.jfrt_http_connections_available_total") AS http_conn_ava_tot, prestats=false WHERE `default_metrics_index` $mstats_span$ 570 | | timechart avg(http_conn_lease_tot) AS "Leased", avg(http_conn_pend_tot) AS "Pending", avg(http_conn_ava_tot) AS "Available" $timechart_span$ 571 | | fields - _span* 572 | 573 | $field1.earliest$ 574 | $field1.latest$ 575 | 576 | 577 | 578 | 579 | 580 | 581 | 582 | 583 | 584 | 585 | 586 | 587 | 588 | 589 |
-------------------------------------------------------------------------------- /app/jfrog-logs/default/data/ui/views/xray.xml: -------------------------------------------------------------------------------- 1 |
2 | 3 | Xray Log Analytics and Platform Metrics 4 |
5 | 6 | 7 | 8 | -24h@h 9 | now 10 | 11 | 12 | 13 | 14 | auto 15 | 5s 16 | 10s 17 | 30s 18 | 1m 19 | 5m 20 | 10m 21 | 30m 22 | 1h 23 | auto 24 | 25 | if($value$ == "auto", "", "span=".$value$) 26 | "span=".$value$ 27 | 28 | 29 |
30 | 31 | 32 | 33 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | Log Volume 51 | 52 | `default_index` $logs_control$ (sourcetype!="NULL" OR log_source!="NULL") (sourcetype="jfrog.xray.*" OR log_source="jfrog.xray.*") | timechart count by sourcetype 53 | $field1.earliest$ 54 | $field1.latest$ 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | Log Errors 68 | 69 | `default_index` $logs_control$ (sourcetype="jfrog.xray.*.service" OR log_source="jfrog.xray.*.service") log_level="ERROR" | timechart count by log_level 70 | $field1.earliest$ 71 | $field1.latest$ 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | HTTP 500 Errors 84 | 85 | `default_index` $logs_control$ (sourcetype="jfrog.xray.xray.request" OR log_source="jfrog.xray.xray.request") return_status="5*" | timechart count by return_status 86 | $field1.earliest$ 87 | $field1.latest$ 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | HTTP Response Codes 98 | 99 | `default_index` $logs_control$ (sourcetype="jfrog.xray.xray.request" OR log_source="jfrog.xray.xray.request") | timechart count by return_status 100 | $field1.earliest$ 101 | $field1.latest$ 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | Watches 114 | 115 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats dc(signature) as watches 116 | $field1.earliest$ 117 | $field1.latest$ 118 | 1m 119 | delay 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | Vulnerabilities 129 | 130 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") category="Security" | stats count as Vulnerabilities 131 | $field1.earliest$ 132 | $field1.latest$ 133 | 1m 134 | delay 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | License Issues 143 | 144 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") category="License" | stats count 145 | $field1.earliest$ 146 | $field1.latest$ 147 | 1m 148 | delay 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | Violations 157 | 158 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats count 159 | $field1.earliest$ 160 | $field1.latest$ 161 | 1m 162 | delay 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | Infected Components 171 | 172 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats dc(infected_components{}) 173 | $field1.earliest$ 174 | $field1.latest$ 175 | 1m 176 | delay 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | Impacted Artifacts 185 | 186 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats dc(impacted_artifacts{}) 187 | $field1.earliest$ 188 | $field1.latest$ 189 | 1m 190 | delay 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | Violations per Watch 201 | 202 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats count by signature 203 | $field1.earliest$ 204 | $field1.latest$ 205 | 1m 206 | delay 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | Violations Severity 216 | 217 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") severity!=Unknown $vulnerability_control$ | stats count by severity 218 | $field1.earliest$ 219 | $field1.latest$ 220 | 1m 221 | delay 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | Violations by Policy 233 | 234 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats count by matched_policies{}.policy 235 | $field1.earliest$ 236 | $field1.latest$ 237 | 1m 238 | delay 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | Violations by Rule 248 | 249 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats count by matched_policies{}.rule 250 | $field1.earliest$ 251 | $field1.latest$ 252 | 1m 253 | delay 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | Violation Types over Time (Stats) 265 | 266 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") category!=NULL $vulnerability_control$ | timechart count by category 267 | $field1.earliest$ 268 | $field1.latest$ 269 | 1m 270 | delay 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | Violations over Time(By Severity) 281 | 282 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") severity!="unknown" | timechart count by severity 283 | $field1.earliest$ 284 | $field1.latest$ 285 | 1m 286 | delay 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | Top Infected Components 300 | 301 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats count by infected_components{} | sort -count 302 | $field1.earliest$ 303 | $field1.latest$ 304 | 1m 305 | delay 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | Top Impacted Artifacts 316 | 317 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") | stats count by impacted_artifacts{} | sort -count 318 | $field1.earliest$ 319 | $field1.latest$ 320 | 1m 321 | delay 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | Top Impacted Artifacts by Count of User Downloads 336 | 337 | `default_index` $vulnerability_control$ log_source = "jfrog.rt.artifactory.access" action_response = "ACCEPTED DOWNLOAD" [search log_source="jfrog.xray.siem.vulnerabilities" impacted_artifacts{}=* | stats count by impacted_artifacts{} | rex field=impacted_artifacts{} "(?<impacted_artifacts>.*)" | return 500000 impacted_artifacts ] | stats count(username) by repo_path | rename repo_path as impacted_artifact 338 | $field1.earliest$ 339 | $field1.latest$ 340 | 1m 341 | delay 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | Top Impacted Artifacts by Count of IP Downloads 352 | 353 | `default_index` $vulnerability_control$ log_source = "jfrog.rt.artifactory.access" action_response = "ACCEPTED DOWNLOAD" [search log_source="jfrog.xray.siem.vulnerabilities" impacted_artifacts{}=* | stats count by impacted_artifacts{} | rex field=impacted_artifacts{} "(?<impacted_artifacts>.*)" | return 500000 impacted_artifacts ] | stats count(ip) by repo_path | rename repo_path as impacted_artifact 354 | $field1.earliest$ 355 | $field1.latest$ 356 | 1m 357 | delay 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | Top Vulnerabilities 370 | 371 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") cve!="null" | stats count by cve | sort -count 372 | $field1.earliest$ 373 | $field1.latest$ 374 | 1m 375 | delay 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | Violation Details 389 | 390 | `default_index` $vulnerability_control$ (sourcetype="jfrog.xray.siem.vulnerabilities" OR log_source="jfrog.xray.siem.vulnerabilities") impacted_artifacts{}=* | rex field=impacted_artifacts{} "(?<impacted_artifacts>.*)" | table impacted_artifacts{}, created, severity, cve, impacted_artifacts, signature, description, url, category | join type=outer impacted_artifacts [search `default_index` log_source = "jfrog.rt.artifactory.access" action_response = "ACCEPTED DOWNLOAD" | stats count(username) as user_count, count(ip) as ip_count by impacted_artifacts] | eval user_count=(if(user_count>0,user_count,0)) | eval ip_count=(if(ip_count>0,ip_count,0)) | fields - impacted_artifacts | sort -user_count 391 | $field1.earliest$ 392 | $field1.latest$ 393 | 1m 394 | delay 395 | 396 | 397 | 398 | 399 | 400 | search?q=%60default_index%60%20log_source%3D%22jfrog.rt.artifactory.access%22%20action_response%3D%22ACCEPTED%20DOWNLOAD%22%20%5Bsearch%20log_source%3D%22jfrog.xray.siem.vulnerabilities%22%20impacted_artifacts{}="$row.impacted_artifacts{}$"%20%7C%20stats%20count%20by%20impacted_artifacts%7B%7D%20%20%7C%20rex%20field%3Dimpacted_artifacts%7B%7D%20%22(%3F%3Crex_repo_path%3E.*)%22%20%7C%20return%20500000%20%24rex_repo_path%5D%20%7C%20table%20ip%2C%20_time%2C%20impacted_artifacts%2C%20username&earliest=$field1.earliest$&latest=$field1.latest$ 401 | 402 |
403 |
404 |
405 | 406 | 407 | 408 | 409 | CPU Usage 410 | 411 | | mstats avg("jfrog.xray.jfob_sys_cpu_ratio") AS cpu_ratio prestats=false WHERE `default_metrics_index` $mstats_span$ 412 | | eval cpu_pct = (cpu_ratio * 100) 413 | | timechart avg(cpu_pct) AS "CPU %" $timechart_span$ 414 | | fields - _span* 415 | 416 | $field1.earliest$ 417 | $field1.latest$ 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | System Memory 437 | 438 | | mstats avg("jfrog.xray.jfob_sys_memory_used_bytes") AS avg_used, avg("jfrog.xray.sys_memory_free_bytes") AS avg_free prestats=false WHERE `default_metrics_index` $mstats_span$ 439 | | eval mem_used_gb = (avg_used / 1024 / 1024 / 1024), mem_free_gb = (avg_free / 1024 / 1024 / 1024) 440 | | timechart avg(mem_used_gb) AS "Sys Used(GB)", avg(mem_free_gb) AS "Sys Free(GB)" $timechart_span$ 441 | | fields - _span* 442 | 443 | $field1.earliest$ 444 | $field1.latest$ 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | Disk Usage 464 | 465 | | mstats avg("jfrog.xray.jfob_app_disk_free_bytes") AS avg_disk_free, avg("jfrog.xray.app_disk_used_bytes") AS avg_disk_used prestats=false WHERE `default_metrics_index` $mstats_span$ 466 | | eval disk_free_gb = (avg_disk_free / 1024 / 1024 / 1024), disk_used_gb = (avg_disk_used / 1024 / 1024 / 1024) 467 | | timechart avg(disk_free_gb) AS "Disk Free(GB)", avg(disk_used_gb) AS "Disk Used(GB)" $timechart_span$ 468 | | fields - _span* 469 | 470 | $field1.earliest$ 471 | $field1.latest$ 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | Heap Memory 491 | 492 | | mstats avg("jfrog.xray.jfob_go_memstats_heap_objects_total") AS heap_objects , avg("jfrog.xray.go_memstats_heap_allocated_bytes") AS avg_heap_max_bytes, avg("jfrog.xray.go_memstats_heap_in_use_bytes") AS avg_heap_used_bytes prestats=false WHERE `default_metrics_index` $mstats_span$ 493 | | eval heap_objs = (heap_objects), avg_heap_max_gb = (avg_heap_max_bytes / 1024 / 1024 / 1024), avg_heap_used_gb = (avg_heap_used_bytes / 1024 / 1024 / 1024) 494 | | timechart avg(heap_objs) AS "No. of Objects", avg(avg_heap_max_gb) AS "Heap Max(GB)", avg(avg_heap_used_gb) AS "Heap Used(GB)" $timechart_span$ 495 | | fields - _span* 496 | 497 | $field1.earliest$ 498 | $field1.latest$ 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 540 | 541 | 542 | 543 | 544 | DB Connections 545 | 546 | | mstats avg("jfrog.xray.db_connection_pool_in_use_total") AS db_conn_max_act, avg("jfrog.xray.jfrt_db_connections_min_idle_total") AS db_conn_min_idle, avg("jfrog.xray.db_connection_pool_idle_total") AS db_conn_tot_idle, avg("jfrog.xray.db_connection_pool_max_open_total") AS db_conn_tot_act, prestats=false WHERE `default_metrics_index` $mstats_span$ 547 | | timechart avg(db_conn_min_idle) AS "Idle(Min)", avg(db_conn_tot_act) AS "Active", avg(db_conn_tot_idle) AS "Idle" $timechart_span$ 548 | | fields - _span* 549 | 550 | $field1.earliest$ 551 | $field1.latest$ 552 | 553 | 554 | 555 | 556 | 557 | 558 | 559 | 560 | 561 | 562 | 563 | 564 | 565 | 566 | 567 | 592 | 593 |
-------------------------------------------------------------------------------- /app/jfrog-logs/default/eventtypes.conf: -------------------------------------------------------------------------------- 1 | [access_audit_eventtype] 2 | search = `default_index` log_source="jfrog.rt.access.audit" 3 | 4 | [artifactory_request_eventtype] 5 | search = `default_index` log_source="jfrog.rt.artifactory.request" 6 | 7 | [router_request_eventtype] 8 | search = `default_index` log_source="jfrog.rt.router.request" 9 | 10 | [vulnerability_eventtype] 11 | search = `default_index` 12 | -------------------------------------------------------------------------------- /app/jfrog-logs/default/inputs.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs/default/inputs.conf -------------------------------------------------------------------------------- /app/jfrog-logs/default/inputs.conf.spec: -------------------------------------------------------------------------------- 1 | python.version = python3 2 | -------------------------------------------------------------------------------- /app/jfrog-logs/default/macros.conf: -------------------------------------------------------------------------------- 1 | [default_index] 2 | definition = index="$SPLUNK_LOGS_INDEX$" 3 | iseval = 0 4 | 5 | [default_metrics_index] 6 | definition = "index"="$SPLUNK_METRICS_INDEX$" 7 | iseval = 0 -------------------------------------------------------------------------------- /app/jfrog-logs/default/props.conf: -------------------------------------------------------------------------------- 1 | [jfrog.rt.router.request] 2 | FIELDALIAS-web_cim_to_router_request_logs = BackendAddr ASNEW dest ClientAddr ASNEW src DownstreamStatus ASNEW status RequestMethod ASNEW http_method "request_User-Agent" ASNEW http_user_agent 3 | EVAL-action = "allowed" 4 | 5 | [jfrog.rt.artifactory.request] 6 | FIELDALIAS-web_cim_to_artifactory_request_logs = hostname ASNEW dest remote_address ASNEW src request_method ASNEW http_method request_user_agent ASNEW http_user_agent return_status ASNEW status username ASNEW user 7 | EVAL-action = "allowed" 8 | 9 | [jfrog.rt.access.audit] 10 | FIELDALIAS-change_cim_to_access_audit_logs = data_changed ASNEW object_attrs entity_name ASNEW object_id event ASNEW object user_ip ASNEW src 11 | EVAL-change_type = "AAA" 12 | EVAL-command = "API" 13 | EVAL-dvc = "JFrog Access" 14 | EVAL-status = "success" 15 | EVAL-action = case(event_type == "C","created",event_type == "U","modified",event_type == "D","deleted") 16 | EVAL-object_category = case(event == "USR","user",event == "GRP","group",event == "TKN","user",event == "PRM","group") -------------------------------------------------------------------------------- /app/jfrog-logs/default/savedsearches.conf: -------------------------------------------------------------------------------- 1 | [default] 2 | [Dockerhub Rolling 6 Hr Window Alert] 3 | disabled = 1 4 | display.page.search.mode = verbose 5 | request.ui_dispatch_app = jfrog-logs 6 | enableSched = 1 7 | alert.severity = 4 8 | dispatch.latest_time = rt-0h 9 | description = Counts the total number of requests made to Dockerhub in a rolling 6 hour window and alerts if they exceed the new rate limits set by Docker. 10 | search = * "downloading" log_source="jfrog.rt.artifactory.service" "manifests/" "docker.io" | spath message | search message !="downloading */manifests/sha256:*" 11 | relation = greater than 12 | alert.digest_mode = 0 13 | counttype = number of events 14 | alert.suppress.period = 30m 15 | cron_schedule = * * * * * 16 | dispatch.earliest_time = rt-6h 17 | alert.suppress = 1 18 | quantity = 200 19 | request.ui_dispatch_view = search 20 | alert.track = 1 21 | alert.suppress.fields = * "downloading" log_source="jfrog.rt.artifactory.service" "manifests/" "docker.io" | spath message | search message !=" docker-remote downloading */manifests/sha256:*" -------------------------------------------------------------------------------- /app/jfrog-logs/default/tags.conf: -------------------------------------------------------------------------------- 1 | [eventtype=access_audit_eventtype] 2 | change = enabled 3 | 4 | [eventtype=artifactory_request_eventtype] 5 | web = enabled 6 | 7 | [eventtype=router_request_eventtype] 8 | web = enabled 9 | 10 | [eventtype=vulnerability_eventtype] 11 | report = enabled 12 | vulnerability = enabled 13 | -------------------------------------------------------------------------------- /app/jfrog-logs/metadata/default.meta: -------------------------------------------------------------------------------- 1 | [] 2 | access = read : [ * ], write : [ admin, power ] 3 | export = system 4 | 5 | [app/ui] 6 | version = 8.0.3 7 | modtime = 1588963848.079049000 8 | 9 | [app/launcher] 10 | version = 8.0.3 11 | modtime = 1588963848.080299000 12 | 13 | [app/package/check_for_updates] 14 | version = 8.0.3 15 | modtime = 1588963848.080816000 16 | 17 | [views/jfrog] 18 | owner = admin 19 | version = 8.0.3 20 | modtime = 1588708421.478459000 21 | 22 | [app/install/install_source_checksum] 23 | version = 8.0.3 24 | modtime = 1588970236.816055000 25 | 26 | [eventtypes/access_audit_eventtype] 27 | export = system 28 | version = 8.1.2 29 | 30 | [tags/eventtype%3Daccess_audit_eventtype] 31 | export = system 32 | version = 8.1.2 33 | modtime = 1613598849.148678000 34 | 35 | [eventtypes/artifactory_request_eventtype] 36 | export = system 37 | version = 8.1.2 38 | modtime = 1613598854.140799000 39 | 40 | [tags/eventtype%3Dartifactory_request_eventtype] 41 | export = system 42 | version = 8.1.2 43 | modtime = 1613598854.143679000 44 | 45 | [eventtypes/router_request_eventtype] 46 | export = system 47 | version = 8.1.2 48 | modtime = 1613598861.092277000 49 | 50 | [tags/eventtype%3Drouter_request_eventtype] 51 | export = system 52 | version = 8.1.2 53 | modtime = 1613598861.094997000 54 | 55 | [props/jfrog.rt.access.audit/FIELDALIAS-change_cim_to_access_audit_logs] 56 | access = read : [ * ], write : [ * ] 57 | export = system 58 | version = 8.1.2 59 | modtime = 1613599097.780904000 60 | 61 | [props/jfrog.rt.artifactory.request/FIELDALIAS-web_cim_to_artifactory_request_logs] 62 | access = read : [ * ], write : [ * ] 63 | export = system 64 | version = 8.1.2 65 | modtime = 1613599104.926349000 66 | 67 | [props/jfrog.rt.router.request/FIELDALIAS-web_cim_to_router_request_logs] 68 | access = read : [ * ], write : [ * ] 69 | export = system 70 | version = 8.1.2 71 | modtime = 1613599110.071746000 72 | 73 | [props/jfrog.rt.access.audit/EVAL-action] 74 | access = read : [ * ], write : [ * ] 75 | export = system 76 | version = 8.1.2 77 | modtime = 1613599307.893122000 78 | 79 | [props/jfrog.rt.access.audit/EVAL-change_type] 80 | access = read : [ * ], write : [ * ] 81 | export = system 82 | version = 8.1.2 83 | modtime = 1613599314.202966000 84 | 85 | [props/jfrog.rt.access.audit/EVAL-command] 86 | access = read : [ * ], write : [ * ] 87 | export = system 88 | version = 8.1.2 89 | modtime = 1613599320.421809000 90 | 91 | [props/jfrog.rt.access.audit/EVAL-dvc] 92 | access = read : [ * ], write : [ * ] 93 | export = system 94 | version = 8.1.2 95 | modtime = 1613599325.826373000 96 | 97 | [props/jfrog.rt.access.audit/EVAL-object_category] 98 | access = read : [ * ], write : [ * ] 99 | export = system 100 | version = 8.1.2 101 | modtime = 1613599331.126107000 102 | 103 | [props/jfrog.rt.access.audit/EVAL-status] 104 | access = read : [ * ], write : [ * ] 105 | export = system 106 | version = 8.1.2 107 | modtime = 1613599336.505381000 108 | 109 | [props/jfrog.rt.artifactory.request/EVAL-action] 110 | access = read : [ * ], write : [ * ] 111 | export = system 112 | version = 8.1.2 113 | modtime = 1613599342.916029000 114 | 115 | [props/jfrog.rt.router.request/EVAL-action] 116 | access = read : [ * ], write : [ * ] 117 | export = system 118 | version = 8.1.2 119 | modtime = 1613599349.267343000 120 | 121 | [eventtypes/vulnerability_eventtype] 122 | export = system 123 | version = 8.1.2 124 | modtime = 1613599932.281743000 125 | 126 | [tags/eventtype%3Dvulnerability_eventtype] 127 | export = system 128 | version = 8.1.2 129 | modtime = 1613599932.285464000 130 | 131 | [macros/default_index] 132 | export = none 133 | version = 8.1.2 134 | modtime = 1613600110.142305000 -------------------------------------------------------------------------------- /app/jfrog-logs/static/appIcon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs/static/appIcon.png -------------------------------------------------------------------------------- /app/jfrog-logs/static/appIconAlt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs/static/appIconAlt.png -------------------------------------------------------------------------------- /app/jfrog-logs/static/appIconAlt_2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs/static/appIconAlt_2x.png -------------------------------------------------------------------------------- /app/jfrog-logs/static/appIcon_2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs/static/appIcon_2x.png -------------------------------------------------------------------------------- /app/jfrog-logs/static/appLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs/static/appLogo.png -------------------------------------------------------------------------------- /app/jfrog-logs/static/appLogo_2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/log-analytics-splunk/4155e7bec1d6e3938c18a7fc6033252c19e11f58/app/jfrog-logs/static/appLogo_2x.png -------------------------------------------------------------------------------- /docker-build/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for bitnami/fluentd sidecar image with all the necessary plugins for our log analytic providers 2 | FROM bitnami/fluentd:1.18.0 3 | LABEL maintainer="Partner Engineering " 4 | 5 | ## Build time Arguments, short circuit them to ENV Variables so they are available at run time also 6 | ARG SOURCE=JFRT 7 | ARG TARGET=SPLUNK 8 | 9 | ## Environment Variables set by this docker file, there will be seperate env params set by a env file while running the containers 10 | ## For better maintainability always depend dockerfile code on the environment variables declared in this file to add more platforms 11 | ENV SRC_PLATFORM=$SOURCE 12 | ENV TGT_PLATFORM=$TARGET 13 | 14 | USER root 15 | 16 | ## Install JFrog Plugins 17 | RUN fluent-gem install fluent-plugin-concat 18 | RUN fluent-gem install fluent-plugin-splunk-hec 19 | RUN fluent-gem install fluent-plugin-jfrog-siem 20 | RUN fluent-gem install fluent-plugin-jfrog-metrics 21 | RUN fluent-gem install fluent-plugin-jfrog-send-metrics 22 | 23 | # Install prerequisites 24 | RUN apt-get update && apt-get install -y curl 25 | CMD /bin/bash 26 | 27 | ## Download Config Files 28 | RUN if [ "$SRC_PLATFORM" = "JFRT" ] ; then echo "Downloading the fluentd config file for $SRC_PLATFORM and $TGT_PLATFORM "; curl https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/fluent.conf.rt -o /opt/bitnami/fluentd/conf/fluentd.conf; else echo "Not Downloading"; fi 29 | RUN if [ "$SRC_PLATFORM" = "JFXRAY" ] ; then echo "Downloading the fluentd config file for $SRC_PLATFORM and $TGT_PLATFORM "; curl https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/fluent.conf.xray -o /opt/bitnami/fluentd/conf/fluentd.conf; else echo "Not Downloading"; fi 30 | 31 | ENTRYPOINT if [ "$TGT_PLATFORM" = "SPLUNK" ] ; then cat /opt/bitnami/fluentd/conf/fluentd.conf; fluentd -v -c /opt/bitnami/fluentd/conf/fluentd.conf; fi 32 | 33 | USER 1001 34 | 35 | STOPSIGNAL SIGTERM -------------------------------------------------------------------------------- /docker-build/docker.env: -------------------------------------------------------------------------------- 1 | JF_PRODUCT_DATA_INTERNAL=path_to_jfrog_logs 2 | SPLUNK_COM_PROTOCOL=change_me 3 | SPLUNK_HEC_HOST=splunk.change_me.com 4 | SPLUNK_HEC_PORT=8088 5 | SPLUNK_HEC_TOKEN=change_me 6 | SPLUNK_METRICS_HEC_TOKEN=change_me 7 | SPLUNK_LOGS_INDEX=jfrog_splunk 8 | SPLUNK_METRICS_INDEX=jfrog_splunk_metrics 9 | SPLUNK_INSECURE_SSL=change_me 10 | SPLUNK_VERIFY_SSL=change_me 11 | SPLUNK_COMPRESS_DATA=change_me 12 | JPD_URL=https://change_me.jfrog.io 13 | JPD_ADMIN_USERNAME=admin 14 | JFROG_ADMIN_TOKEN=change_me 15 | COMMON_JPD=false 16 | LOG_ENV=production -------------------------------------------------------------------------------- /fluent.conf.rt: -------------------------------------------------------------------------------- 1 | # JFROG ARTIFACTORY METRICS SOURCE 2 | 3 | @type jfrog_metrics 4 | @id metrics_http_jfrt 5 | tag jfrog.metrics.artifactory 6 | execution_interval 60s 7 | metric_prefix 'jfrog.artifactory' 8 | jpd_url "#{ENV['JPD_URL']}" 9 | username "#{ENV['JPD_ADMIN_USERNAME']}" 10 | token "#{ENV['JFROG_ADMIN_TOKEN']}" 11 | common_jpd "#{ENV['COMMON_JPD']}" 12 | # @log_level debug 13 | # request_timeout 30s 14 | # verify_ssl "#{ENV['SPLUNK_VERIFY_SSL']}" 15 | 16 | # SPLUNK ARTIFACTORY METRICS OUTPUT 17 | 18 | @type splunk_hec 19 | protocol "#{ENV['SPLUNK_COM_PROTOCOL']}" 20 | data_type metric 21 | hec_host "#{ENV['SPLUNK_HEC_HOST']}" 22 | hec_port "#{ENV['SPLUNK_HEC_PORT']}" 23 | hec_token "#{ENV['SPLUNK_METRICS_HEC_TOKEN']}" 24 | flush_interval 5s 25 | source ${tag} 26 | index "#{ENV['SPLUNK_METRICS_INDEX']}" 27 | metric_name_key metric_name 28 | metric_value_key value 29 | insecure_ssl "#{ENV['SPLUNK_INSECURE_SSL']}" 30 | gzip_compression "#{ENV['SPLUNK_COMPRESS_DATA']}" 31 | 32 | 33 | # CALLHOME 34 | 35 | @type exec 36 | tag callhome 37 | command "curl --request GET '#{ENV['JPD_URL']}/artifactory/api/system/version' -H 'Authorization: Bearer #{ENV['JFROG_ADMIN_TOKEN']}'" 38 | run_interval 24h 39 | 40 | @type json 41 | 42 | 43 | 44 | @type copy 45 | 46 | @type relabel 47 | @label @jfrogcalhome 48 | 49 | 50 | @type relabel 51 | @label @heapcallhome 52 | 53 | 54 | 80 | 109 | 110 | # LOG SOURCE DIRECTIVES 111 | ## SERVICE LOGS 112 | 113 | @type tail 114 | @id access_service_tail 115 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-service.log" 116 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-service.log.pos" 117 | tag jfrog.rt.access.service 118 | 119 | @type none 120 | 121 | 122 | 123 | @type tail 124 | @id artifactory_service_tail 125 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-service.log" 126 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-service.log.pos" 127 | tag jfrog.rt.artifactory.service 128 | 129 | @type none 130 | 131 | 132 | 133 | @type tail 134 | @id frontend_service_tail 135 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-service.log" 136 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-service.log.pos" 137 | tag jfrog.rt.frontend.service 138 | 139 | @type none 140 | 141 | 142 | 143 | @type tail 144 | @id metadata_service_tail 145 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-service.log" 146 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-service.log.pos" 147 | tag jfrog.rt.metadata.service 148 | 149 | @type none 150 | 151 | 152 | 153 | @type tail 154 | @id router_service_tail 155 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log" 156 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log.pos" 157 | tag jfrog.rt.router.service 158 | 159 | @type none 160 | 161 | 162 | ## TRAEFIK LOGS 163 | 164 | @type tail 165 | @id router_traefik_tail 166 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log" 167 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log.pos" 168 | tag jfrog.rt.router.traefik 169 | 170 | @type multiline 171 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 172 | format1 /^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] \[\] -(?.*)$/ 173 | time_key log_timestamp 174 | time_format %Y-%m-%dT%H:%M:%S.%LZ 175 | 176 | 177 | ## REQUEST LOGS 178 | 179 | @type tail 180 | @id access_request_tail 181 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-request.log" 182 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-request.log.pos" 183 | tag jfrog.rt.access.request 184 | 185 | @type regexp 186 | expression ^(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.+)$ 187 | time_key log_timestamp 188 | time_format %Y-%m-%dT%H:%M:%S.%LZ 189 | types response_content_length:integer, request_content_length:integer, return_status:integer 190 | 191 | 192 | 193 | @type tail 194 | @id artifactory_request_tail 195 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-request.log" 196 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-request.log.pos" 197 | tag jfrog.rt.artifactory.request 198 | 199 | @type none 200 | 201 | 202 | 203 | @type tail 204 | @id frontend_request_tail 205 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-request.log" 206 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-request.log.pos" 207 | tag jfrog.rt.frontend.request 208 | 209 | @type none 210 | 211 | 212 | 213 | @type tail 214 | @id metadata_request_tail 215 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-request.log" 216 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-request.log.pos" 217 | tag jfrog.rt.metadata.request 218 | 219 | @type none 220 | 221 | 222 | 223 | @type tail 224 | @id router_request_tail 225 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log" 226 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log.pos" 227 | tag jfrog.rt.router.request 228 | 229 | @type json 230 | time_key time 231 | time_format %Y-%m-%dT%H:%M:%S%:z 232 | 233 | 234 | ## ACCESS LOG 235 | 236 | @type tail 237 | @id artifactory_access_tail 238 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-access.log" 239 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-access.log.pos" 240 | tag jfrog.rt.artifactory.access 241 | 242 | @type regexp 243 | expression /^(?[^\s]*) \[(?[^\s\]]*)\s*\] \[(?[^\]\r\n]*)\] (?[^\s]*)? *for client : *(?[^\s]*) *\/ *(?[^\s]*)? ?(\[(?[^\s]*)\])?$/ 244 | time_key log_timestamp 245 | time_format %Y-%m-%dT%H:%M:%S.%LZ 246 | 247 | 248 | ## AUDIT LOG 249 | 250 | @type tail 251 | @id access_security_audit_tail 252 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-security-audit.log" 253 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-security-audit.log.pos" 254 | tag jfrog.rt.access.audit 255 | 256 | @type regexp 257 | expression /^(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?[^ ]*)\|(?.*)/ 258 | time_key log_timestamp 259 | time_format %Y-%m-%dT%H:%M:%S.%LZ 260 | 261 | 262 | 263 | # FILTER DIRECTIVE 264 | ## ALL LOGS 265 | 266 | @type concat 267 | key message 268 | multiline_start_regexp /\d{4}-\d{1,2}-\d{1,2}/ 269 | timeout_label @NORMAL 270 | flush_interval 5 271 | 272 | 273 | @type relabel 274 | @label @NORMAL 275 | 276 | 418 | 419 | 420 | -------------------------------------------------------------------------------- /fluent.conf.xray: -------------------------------------------------------------------------------- 1 | # JFROG XRAY METRICS SOURCE 2 | 3 | @type jfrog_metrics 4 | @id metrics_http_jfrt 5 | tag jfrog.metrics.xray 6 | execution_interval 5s 7 | metric_prefix 'jfrog.xray' 8 | jpd_url "#{ENV['JPD_URL']}" 9 | username "#{ENV['JPD_ADMIN_USERNAME']}" 10 | token "#{ENV['JFROG_ADMIN_TOKEN']}" 11 | common_jpd "#{ENV['COMMON_JPD']}" 12 | # @log_level debug 13 | # request_timeout 30s 14 | # verify_ssl "#{ENV['SPLUNK_VERIFY_SSL']}" 15 | 16 | # SPLUNK XRAY METRICS OUTPUT 17 | 18 | @type splunk_hec 19 | protocol "#{ENV['SPLUNK_COM_PROTOCOL']}" 20 | data_type metric 21 | hec_host "#{ENV['SPLUNK_HEC_HOST']}" 22 | hec_port "#{ENV['SPLUNK_HEC_PORT']}" 23 | hec_token "#{ENV['SPLUNK_METRICS_HEC_TOKEN']}" 24 | flush_interval 5s 25 | source ${tag} 26 | index "#{ENV['SPLUNK_METRICS_INDEX']}" 27 | metric_name_key metric_name 28 | metric_value_key value 29 | insecure_ssl "#{ENV['SPLUNK_INSECURE_SSL']}" 30 | gzip_compression "#{ENV['SPLUNK_COMPRESS_DATA']}" 31 | 32 | 33 | # ALL CALLHOME 34 | 35 | @type exec 36 | tag callhome 37 | command "curl --request GET '#{ENV['JPD_URL']}/xray/api/v1/system/version' -H 'Authorization: Bearer #{ENV['JFROG_ADMIN_TOKEN']}'" 38 | run_interval 24h 39 | 40 | @type json 41 | 42 | 43 | 44 | @type copy 45 | 46 | @type relabel 47 | @label @jfrogcalhome 48 | 49 | 50 | @type relabel 51 | @label @heapcallhome 52 | 53 | 54 | 80 | 109 | 110 | # XRAY VIOLATIONS CONFIG 111 | 112 | @type jfrog_siem 113 | tag jfrog.xray.siem.vulnerabilities 114 | jpd_url "#{ENV['JPD_URL']}" 115 | username "#{ENV['JPD_ADMIN_USERNAME']}" 116 | token "#{ENV['JFROG_ADMIN_TOKEN']}" 117 | pos_file_path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/jfrog_siem.log.pos" 118 | 119 | 120 | @type record_transformer 121 | enable_ruby true 122 | 123 | log_source "jfrog.xray.siem.vulnerabilities" 124 | category ${record["type"]} 125 | url ${record["violation_details_url"]} 126 | signature ${record["watch_name"]} 127 | cvss ${record["cvss_score"]} 128 | cve ${record["cve"]} 129 | vendor_product "Xray" 130 | 131 | 132 | 133 | @type record_modifier 134 | 135 | _impacted_artifacts_url_ ${if record.has_key?('impacted_artifacts_url'); record.delete('impacted_artifacts_url') ; end; nil} 136 | _type_ ${if record.has_key?('type'); record.delete('type') ; end; nil} 137 | _watch_name_ ${if record.has_key?('watch_name'); record.delete('watch_name') ; end; nil} 138 | _violation_details_url_ ${if record.has_key?('violation_details_url'); record.delete('violation_details_url') ; end; nil} 139 | _cvss_score_ ${if record.has_key?('cvss_score'); record.delete('cvss_score') ; end; nil} 140 | _cvss_version_ ${if record.has_key?('cvss_version'); record.delete('cvss_version') ; end; nil} 141 | 142 | remove_keys _impacted_artifacts_url_, _type_, _watch_name_, _violation_details_url_, _cvss_score_, _cvss_version_ 143 | 144 | 145 | @type splunk_hec 146 | protocol "#{ENV['SPLUNK_COM_PROTOCOL']}" 147 | hec_host "#{ENV['SPLUNK_HEC_HOST']}" 148 | hec_port "#{ENV['SPLUNK_HEC_PORT']}" 149 | hec_token "#{ENV['SPLUNK_HEC_TOKEN']}" 150 | index "#{ENV['SPLUNK_LOGS_INDEX']}" 151 | format json 152 | sourcetype_key log_source 153 | use_fluentd_time false 154 | insecure_ssl "#{ENV['SPLUNK_INSECURE_SSL']}" 155 | # ssl parameter 156 | # use_ssl true 157 | # ca_file /path/to/ca.pem 158 | 159 | flush_interval 1s 160 | # frequency of the buffer flush 161 | flush_thread_count 5 162 | # The number of threads to flush/write chunks in parallel 163 | chunk_limit_records 10 164 | # The max number of events that each chunks can store in it 165 | 166 | 167 | 168 | # SOURCE DIRECTIVES 169 | # SERVICE LOGS 170 | 171 | @type tail 172 | @id xray_server_tail 173 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-server-service.log" 174 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-server-service.log.pos" 175 | tag jfrog.xray.server.service 176 | 177 | @type none 178 | 179 | 180 | 181 | @type tail 182 | @id xray_persist_tail 183 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-persist-service.log" 184 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-persist-service.log.pos" 185 | tag jfrog.xray.persist.service 186 | 187 | @type none 188 | 189 | 190 | 191 | @type tail 192 | @id xray_indexer_tail 193 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-indexer-service.log" 194 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-indexer-service.log.pos" 195 | tag jfrog.xray.indexer.service 196 | 197 | @type none 198 | 199 | 200 | 201 | @type tail 202 | @id xray_analysis_tail 203 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-analysis-service.log" 204 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-analysis-service.log.pos" 205 | tag jfrog.xray.analysis.service 206 | 207 | @type none 208 | 209 | 210 | 211 | @type tail 212 | @id xray_router_tail 213 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log" 214 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log.pos" 215 | tag jfrog.xray.router.service 216 | 217 | @type none 218 | 219 | 220 | # TRAEFIK LOGS 221 | 222 | @type tail 223 | @id xray_router_traefik_tail 224 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log" 225 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log.pos" 226 | tag jfrog.xray.router.traefik 227 | 228 | @type regexp 229 | expression ^(?[^ ]*) \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?[^\]]*)\] \[(?.*)\] \[(?.*)\] -(?.+)$ 230 | time_key log_timestamp 231 | time_format %Y-%m-%dT%H:%M:%S.%LZ 232 | 233 | 234 | # REQUEST LOGS 235 | 236 | @type tail 237 | @id xray_router_request_tail 238 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log" 239 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log.pos" 240 | tag jfrog.xray.router.request 241 | 242 | @type json 243 | time_key time 244 | time_format %Y-%m-%dT%H:%M:%S%:z 245 | 246 | 247 | 248 | @type tail 249 | @id xray_request_tail 250 | path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-request.log" 251 | pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/xray-request.log.pos" 252 | tag jfrog.xray.xray.request 253 | 254 | @type regexp 255 | expression ^(?[^ ]*)\|(?[^ ]*)\|(?[^|]++)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?[^\|]*)\|(?.*)$ 256 | time_key log_timestamp 257 | time_format %Y-%m-%dT%H:%M:%S.%LZ 258 | 259 | 260 | 261 | 262 | # FILTER DIRECTIVE 263 | ## ALL LOGS 264 | 265 | @type concat 266 | key message 267 | multiline_start_regexp /\d{4}-\d{1,2}-\d{1,2}/ 268 | timeout_label @NORMAL 269 | flush_interval 5 270 | 271 | 272 | @type relabel 273 | @label @NORMAL 274 | 275 | 386 | -------------------------------------------------------------------------------- /fluentd-demo.conf: -------------------------------------------------------------------------------- 1 | # LOG LEVEL ERROR 2 | 3 | @type dummy 4 | @id log_level_error 5 | tag jfrog.rt.artifactory.service 6 | dummy '{"log_level":"ERROR"}' 7 | 8 | 9 | # Data Transfers (GBs) Over Time. 10 | 11 | @type dummy 12 | @id data_transfer_over_time1 13 | tag jfrog.rt.artifactory.request 14 | rate 10 15 | dummy '{"request_url":"/api/docker/johnp-docker/test/centos","repo":"johnp-docker","image":"centos","response_content_length":"1235343","request_content_length":"34323","return_status":"200","remote_address":"64.55.33.22"}' 16 | 17 | 18 | @type dummy 19 | @id data_transfer_over_time2 20 | tag jfrog.rt.artifactory.request 21 | rate 2 22 | dummy '{"request_url":"/api/docker/docker-local/test/ubuntu","repo":"docker-local","image":"ubuntu","response_content_length":"1235543","request_content_length":"123","return_status":"401","remote_address":"33.44.11.22"}' 23 | 24 | 25 | @type dummy 26 | @id data_transfer_over_time3 27 | tag jfrog.rt.artifactory.request 28 | rate 5 29 | dummy '{"request_url":"/api/docker/docker-local2/test/oraclelinux","repo":"docker-local2","image":"oraclelinux","response_content_length":"5535343","request_content_length":"33334323","return_status":"203","remote_address":"10.0.3.15"}' 30 | 31 | 32 | @type dummy 33 | @id data_transfer_over_time4 34 | tag jfrog.rt.artifactory.request 35 | rate 8 36 | dummy '{"request_url":"/api/docker/docker-local3/test/redhat-ubi8","repo":"docker-local3","image":"redhat-ubi8","response_content_length":"5235343","request_content_length":"3499323","return_status":"201","remote_address":"107.1.3.34"}' 37 | 38 | 39 | @type dummy 40 | @id data_transfer_over_time5 41 | tag jfrog.rt.artifactory.request 42 | dummy '{"request_url":"/api/docker/docker-local4/test/debian","repo":"docker-local4","image":"debian","response_content_length":"1","request_content_length":"1","return_status":"201","remote_address":"1.1.3.2"}' 43 | 44 | #send empty string 45 | 46 | @type dummy 47 | @id data_transfer_over_time6 48 | tag jfrog.rt.artifactory.request 49 | dummy '{"request_url":"/api/docker/docker-local4/test/debian","repo":"","image":"","response_content_length":"1","request_content_length":"1","return_status":"201","remote_address":"1.1.3.6"}' 50 | 51 | 52 | # Audit Actions 53 | 54 | @type dummy 55 | @id audit_actions1 56 | tag jfrog.rt.access.audit 57 | rate 1 58 | dummy '{"user":"johnp"}' 59 | 60 | 61 | @type dummy 62 | @id audit_actions2 63 | tag jfrog.rt.access.audit 64 | rate 2 65 | dummy '{"user":"vinaya"}' 66 | 67 | 68 | @type dummy 69 | @id audit_actions3 70 | tag jfrog.rt.access.audit 71 | rate 3 72 | dummy '{"user":"mahithab"}' 73 | 74 | 75 | @type dummy 76 | @id audit_actions4 77 | tag jfrog.rt.access.audit 78 | rate 4 79 | dummy '{"user":"jefff"}' 80 | 81 | 82 | # 500 errors 83 | 84 | @type dummy 85 | @id five_hundrend_errors 86 | tag jfrog.rt.artifactory.request 87 | dummy '{"return_status":"500"}' 88 | 89 | 90 | # Xray Log Level Errors 91 | 92 | @type dummy 93 | @id xray_log_level_error 94 | tag jfrog.xray.server.service 95 | dummy '{"log_level":"ERROR"}' 96 | 97 | 98 | # Xray 500 errors 99 | 100 | @type dummy 101 | @id xray_five_hundrend_errors 102 | tag jfrog.xray.xray.request 103 | dummy '{"return_status":"500"}' 104 | 105 | 106 | # DENIED LOGINS 107 | 108 | @type dummy 109 | @id denied_logins1 110 | tag jfrog.rt.artifactory.access 111 | rate 5 112 | dummy '{"action_response":"DENIED LOGIN","ip":"10.15.1.2","username":"vasuki"}' 113 | 114 | 115 | @type dummy 116 | @id denied_logins2 117 | tag jfrog.rt.artifactory.access 118 | rate 6 119 | dummy '{"action_response":"DENIED LOGIN","ip":"51.10.13.22","username":"karol"}' 120 | 121 | 122 | @type dummy 123 | @id denied_logins3 124 | tag jfrog.rt.artifactory.access 125 | rate 4 126 | dummy '{"action_response":"DENIED LOGIN","ip":"64.5.12.23","username":"mahithab"}' 127 | 128 | 129 | @type dummy 130 | @id denied_logins4 131 | tag jfrog.rt.artifactory.access 132 | rate 3 133 | dummy '{"action_response":"DENIED LOGIN","ip":"107.10.12.27","username":"idog"}' 134 | 135 | 136 | @type dummy 137 | @id denied_logins5 138 | tag jfrog.rt.artifactory.access 139 | rate 5 140 | dummy '{"action_response":"DENIED LOGIN","ip":"11.11.14.24","username":"jefff"}' 141 | 142 | 143 | @type dummy 144 | @id denied_logins6 145 | tag jfrog.rt.artifactory.access 146 | rate 60 147 | dummy '{"action_response":"DENIED LOGIN","ip":"10.0.1.2","username":"badguy"}' 148 | 149 | 150 | # ACCEPTED DEPLOY 151 | 152 | @type dummy 153 | @id accepted_deploy1 154 | tag jfrog.rt.artifactory.access 155 | rate 1 156 | dummy '{"action_response":"ACCEPTED DEPLOY","ip":"64.5.12.23","username":"mahithab"}' 157 | 158 | 159 | @type dummy 160 | @id accepted_deploy2 161 | tag jfrog.rt.artifactory.access 162 | rate 2 163 | dummy '{"action_response":"ACCEPTED DEPLOY","ip":"107.10.12.27","username":"idog"}' 164 | 165 | 166 | @type dummy 167 | @id accepted_deploy3 168 | tag jfrog.rt.artifactory.access 169 | rate 3 170 | dummy '{"action_response":"ACCEPTED DEPLOY","ip":"11.11.14.24","username":"jefff"}' 171 | 172 | 173 | # WHAT LOG IT WAS INTO THE JSON 174 | 175 | @type record_transformer 176 | 177 | hostname "#{Socket.gethostname}" 178 | log_source ${tag} 179 | 180 | 181 | 182 | 183 | @type splunk_hec 184 | protocol https 185 | hec_host HEC_HOST 186 | hec_port HEC_PORT 187 | hec_token HEC_TOKEN 188 | index "#{ENV['SPLUNK_LOGS_INDEX']}" 189 | format json 190 | sourcetype_key log_source 191 | use_fluentd_time false 192 | # buffered output parameter 193 | flush_interval 10s 194 | insecure_ssl false 195 | # ssl parameter 196 | #use_ssl true 197 | #ca_file /path/to/ca.pem 198 | 199 | #END SPLUNK OUTPUT 200 | -------------------------------------------------------------------------------- /fluentd-installer/Dockerfile.fluentd.sidecar: -------------------------------------------------------------------------------- 1 | # Dockerfile for bitnami/fluentd sidecar image with all the necessary plugins for our log analytic providers 2 | FROM bitnami/fluentd:1.17.0 3 | LABEL maintainer "Partner Engineering " 4 | 5 | USER root 6 | 7 | ## Install custom Fluentd plugins 8 | RUN fluent-gem install fluent-plugin-jfrog-siem --no-document \ 9 | && fluent-gem install fluent-plugin-splunk-hec --no-document \ 10 | && fluent-gem install fluent-plugin-datadog --no-document \ 11 | && fluent-gem install fluent-plugin-record-modifier --no-document \ 12 | && fluent-gem install fluent-plugin-jfrog-metrics --no-document \ 13 | && fluent-gem install fluent-plugin-jfrog-send-metrics --no-document \ 14 | && fluent-gem install fluent-plugin-newrelic --no-document \ 15 | && fluent-gem install fluent-plugin-concat --no-document \ 16 | && fluent-gem uninstall concurrent-ruby -v '>1.1.9' 17 | 18 | USER 1001 19 | -------------------------------------------------------------------------------- /helm/artifactory-ha-values.yaml: -------------------------------------------------------------------------------- 1 | installerInfo: '{ "productId": "OnPremObservability-Splunk/1.0.1", "features": [ { "featureId": "ArtifactoryVersion/{{ default .Chart.AppVersion .Values.artifactory.image.version }}" }, { "featureId": "{{ if .Values.postgresql.enabled }}postgresql{{ else }}{{ .Values.database.type }}{{ end }}/0.0.0" }, { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}" }, { "featureId": "Channel/OnPremObservability-Splunk-Helm" } ] }' 2 | artifactory: 3 | # For Artifactory versions <= 7.86.x please use artifactory.openMetrics.enabled instead of artifactory.metrics.enabled 4 | metrics: 5 | enabled: true 6 | customInitContainersBegin: | 7 | - name: "prepare-fluentd-conf-on-persistent-volume" 8 | image: {{ include "artifactory-ha.getImageInfoByValue" (list . "initContainers") }} 9 | imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" 10 | command: 11 | - 'sh' 12 | - '-c' 13 | - > 14 | mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/fluentd/; 15 | curl https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/fluent.conf.rt -o {{ .Values.artifactory.persistence.mountPath }}/etc/fluentd/fluentd.conf 16 | volumeMounts: 17 | - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" 18 | name: volume 19 | customSidecarContainers: | 20 | - name: "artifactory-fluentd-sidecar" 21 | image: "releases-pts-observability-fluentd.jfrog.io/fluentd:4.15" 22 | imagePullPolicy: "IfNotPresent" 23 | volumeMounts: 24 | - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" 25 | name: volume 26 | env: 27 | - name: JF_PRODUCT_DATA_INTERNAL 28 | value: {{ .Values.artifactory.persistence.mountPath }} 29 | - name: JPD_URL 30 | value: {{ .Values.jfrog.observability.jpd_url }} 31 | - name: JPD_ADMIN_USERNAME 32 | value: {{ .Values.jfrog.observability.username }} 33 | - name: JFROG_ADMIN_TOKEN 34 | valueFrom: 35 | secretKeyRef: 36 | name: jfrog-admin-token 37 | key: token 38 | - name: COMMON_JPD 39 | value: {{ .Values.jfrog.observability.common_jpd | quote }} 40 | - name: SPLUNK_COM_PROTOCOL 41 | value: {{ .Values.splunk.com_protocol }} 42 | - name: SPLUNK_HEC_HOST 43 | value: {{ .Values.splunk.host }} 44 | - name: SPLUNK_HEC_PORT 45 | value: {{ .Values.splunk.port | quote}} 46 | - name: SPLUNK_METRICS_HEC_TOKEN 47 | value: {{ .Values.splunk.metrics_token }} 48 | - name: SPLUNK_HEC_TOKEN 49 | value: {{ .Values.splunk.logs_token }} 50 | - name: SPLUNK_LOGS_INDEX 51 | value: {{ .Values.splunk.logs_index | default "jfrog_splunk" }} 52 | - name: SPLUNK_METRICS_INDEX 53 | value: {{ .Values.splunk.metrics_index | default "jfrog_splunk_metrics" }} 54 | - name: SPLUNK_INSECURE_SSL 55 | value: {{ .Values.splunk.insecure_ssl | quote}} 56 | - name: SPLUNK_VERIFY_SSL 57 | value: {{ .Values.splunk.verify_ssl | quote}} 58 | - name: SPLUNK_COMPRESS_DATA 59 | value: {{ .Values.splunk.compress_data | quote}} 60 | - name: LOG_ENV 61 | value: {{ .Values.jfrog.observability.log_env | default "production" }} 62 | - name: FLUENTD_CONF 63 | value: ../../../..{{ .Values.artifactory.persistence.mountPath }}/etc/fluentd/fluentd.conf 64 | splunk: 65 | host: SPLUNK_HEC_HOST 66 | port: SPLUNK_HEC_PORT 67 | logs_token: SPLUNK_HEC_TOKEN 68 | metrics_token: SPLUNK_METRICS_HEC_TOKEN 69 | logs_index: SPLUNK_LOGS_INDEX 70 | metrics_index: SPLUNK_METRICS_INDEX 71 | com_protocol: SPLUNK_COM_PROTOCOL 72 | insecure_ssl: SPLUNK_INSECURE_SSL 73 | verify_ssl: SPLUNK_VERIFY_SSL 74 | compress_data: SPLUNK_COMPRESS_DATA 75 | jfrog: 76 | observability: 77 | jpd_url: JPD_URL 78 | username: JPD_ADMIN_USERNAME 79 | common_jpd: COMMON_JPD 80 | log_env: LOG_ENV -------------------------------------------------------------------------------- /helm/artifactory-values.yaml: -------------------------------------------------------------------------------- 1 | installerInfo: '{ "productId": "OnPremObservability-Splunk/1.0.1", "features": [ { "featureId": "ArtifactoryVersion/{{ default .Chart.AppVersion .Values.artifactory.image.version }}" }, { "featureId": "{{ if .Values.postgresql.enabled }}postgresql{{ else }}{{ .Values.database.type }}{{ end }}/0.0.0" }, { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}" }, { "featureId": "Channel/OnPremObservability-Splunk-Helm" } ] }' 2 | artifactory: 3 | # For Artifactory versions <= 7.86.x please use artifactory.openMetrics.enabled instead of artifactory.metrics.enabled 4 | metrics: 5 | enabled: true 6 | customInitContainersBegin: | 7 | - name: "prepare-fluentd-conf-on-persistent-volume" 8 | image: {{ include "artifactory.getImageInfoByValue" (list . "initContainers") }} 9 | imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" 10 | command: 11 | - 'sh' 12 | - '-c' 13 | - > 14 | mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/fluentd/; 15 | curl https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/fluent.conf.rt -o {{ .Values.artifactory.persistence.mountPath }}/etc/fluentd/fluentd.conf; 16 | volumeMounts: 17 | - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" 18 | name: artifactory-volume 19 | customSidecarContainers: | 20 | - name: "artifactory-fluentd-sidecar" 21 | image: "releases-pts-observability-fluentd.jfrog.io/fluentd:4.15" 22 | imagePullPolicy: "IfNotPresent" 23 | volumeMounts: 24 | - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" 25 | name: artifactory-volume 26 | env: 27 | - name: JF_PRODUCT_DATA_INTERNAL 28 | value: {{ .Values.artifactory.persistence.mountPath }} 29 | - name: JPD_URL 30 | value: {{ .Values.jfrog.observability.jpd_url }} 31 | - name: JPD_ADMIN_USERNAME 32 | value: {{ .Values.jfrog.observability.username }} 33 | - name: JFROG_ADMIN_TOKEN 34 | valueFrom: 35 | secretKeyRef: 36 | name: jfrog-admin-token 37 | key: token 38 | - name: COMMON_JPD 39 | value: {{ .Values.jfrog.observability.common_jpd | quote }} 40 | - name: SPLUNK_COM_PROTOCOL 41 | value: {{ .Values.splunk.com_protocol }} 42 | - name: SPLUNK_HEC_HOST 43 | value: {{ .Values.splunk.host }} 44 | - name: SPLUNK_HEC_PORT 45 | value: {{ .Values.splunk.port | quote}} 46 | - name: SPLUNK_METRICS_HEC_TOKEN 47 | value: {{ .Values.splunk.metrics_token }} 48 | - name: SPLUNK_HEC_TOKEN 49 | value: {{ .Values.splunk.logs_token }} 50 | - name: SPLUNK_LOGS_INDEX 51 | value: {{ .Values.splunk.logs_index | default "jfrog_splunk" }} 52 | - name: SPLUNK_METRICS_INDEX 53 | value: {{ .Values.splunk.metrics_index | default "jfrog_splunk_metrics" }} 54 | - name: SPLUNK_INSECURE_SSL 55 | value: {{ .Values.splunk.insecure_ssl | quote}} 56 | - name: SPLUNK_VERIFY_SSL 57 | value: {{ .Values.splunk.verify_ssl | quote}} 58 | - name: SPLUNK_COMPRESS_DATA 59 | value: {{ .Values.splunk.compress_data | quote }} 60 | - name: LOG_ENV 61 | value: {{ .Values.jfrog.observability.log_env | default "production" }} 62 | - name: FLUENTD_CONF 63 | value: ../../../..{{ .Values.artifactory.persistence.mountPath }}/etc/fluentd/fluentd.conf 64 | splunk: 65 | host: SPLUNK_HEC_HOST 66 | port: SPLUNK_HEC_PORT 67 | logs_token: SPLUNK_HEC_TOKEN 68 | metrics_token: SPLUNK_METRICS_HEC_TOKEN 69 | logs_index: SPLUNK_LOGS_INDEX 70 | metrics_index: SPLUNK_METRICS_INDEX 71 | com_protocol: SPLUNK_COM_PROTOCOL 72 | insecure_ssl: SPLUNK_INSECURE_SSL 73 | verify_ssl: SPLUNK_VERIFY_SSL 74 | compress_data: SPLUNK_COMPRESS_DATA 75 | jfrog: 76 | observability: 77 | jpd_url: JPD_URL 78 | username: JPD_ADMIN_USERNAME 79 | common_jpd: COMMON_JPD 80 | log_env: LOG_ENV -------------------------------------------------------------------------------- /helm/jfrog_helm.env: -------------------------------------------------------------------------------- 1 | export SPLUNK_COM_PROTOCOL=http 2 | export SPLUNK_HEC_HOST=splunk.example.com 3 | export SPLUNK_HEC_PORT=8088 4 | export SPLUNK_HEC_TOKEN=SPLUNK_HEC_TOKEN 5 | export SPLUNK_METRICS_HEC_TOKEN=SPLUNK_METRICS_HEC_TOKEN 6 | export SPLUNK_LOGS_INDEX=jfrog_splunk 7 | export SPLUNK_METRICS_INDEX=jfrog_splunk_metrics 8 | export SPLUNK_INSECURE_SSL=false 9 | export SPLUNK_VERIFY_SSL=true 10 | export SPLUNK_COMPRESS_DATA=true 11 | export JPD_URL=http://abc.jfrog.io 12 | export JPD_ADMIN_USERNAME=admin 13 | export COMMON_JPD=false 14 | export LOG_ENV=staging -------------------------------------------------------------------------------- /helm/xray-values.yaml: -------------------------------------------------------------------------------- 1 | unifiedUpgradeAllowed: true 2 | databaseUpgradeReady: true 3 | xray: 4 | jfrogUrl: http://xray-xray.rt:8082 5 | replicaCount: 1 6 | common: 7 | customInitContainersBegin: | 8 | - name: "prepare-fluentd-conf-on-persistent-volume" 9 | image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} 10 | imagePullPolicy: "{{ .Values.imagePullPolicy }}" 11 | command: 12 | - 'sh' 13 | - '-c' 14 | - > 15 | mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/fluentd/; 16 | curl https://raw.githubusercontent.com/jfrog/log-analytics-splunk/master/fluent.conf.xray -o {{ .Values.xray.persistence.mountPath }}/etc/fluentd/fluentd.conf; 17 | volumeMounts: 18 | - mountPath: "{{ .Values.xray.persistence.mountPath }}" 19 | name: data-volume 20 | customSidecarContainers: | 21 | - name: "xray-platform-fluentd-sidecar" 22 | image: "releases-pts-observability-fluentd.jfrog.io/fluentd:4.15" 23 | imagePullPolicy: "IfNotPresent" 24 | volumeMounts: 25 | - mountPath: "{{ .Values.xray.persistence.mountPath }}" 26 | name: data-volume 27 | env: 28 | - name: JF_PRODUCT_DATA_INTERNAL 29 | value: {{ .Values.xray.persistence.mountPath }} 30 | - name: FLUENTD_CONF 31 | value: ../../../..{{ .Values.xray.persistence.mountPath }}/etc/fluentd/fluentd.conf 32 | - name: JPD_URL 33 | value: {{ .Values.jfrog.observability.jpd_url }} 34 | - name: JPD_ADMIN_USERNAME 35 | value: {{ .Values.jfrog.observability.username }} 36 | - name: JFROG_ADMIN_TOKEN 37 | valueFrom: 38 | secretKeyRef: 39 | name: jfrog-admin-token 40 | key: token 41 | - name: COMMON_JPD 42 | value: {{ .Values.jfrog.observability.common_jpd | quote }} 43 | - name: SPLUNK_COM_PROTOCOL 44 | value: {{ .Values.splunk.com_protocol }} 45 | - name: SPLUNK_HEC_HOST 46 | value: {{ .Values.splunk.host }} 47 | - name: SPLUNK_HEC_PORT 48 | value: {{ .Values.splunk.port | quote}} 49 | - name: SPLUNK_METRICS_HEC_TOKEN 50 | value: {{ .Values.splunk.metrics_token }} 51 | - name: SPLUNK_HEC_TOKEN 52 | value: {{ .Values.splunk.logs_token }} 53 | - name: SPLUNK_LOGS_INDEX 54 | value: {{ .Values.splunk.logs_index | default "jfrog_splunk" }} 55 | - name: SPLUNK_METRICS_INDEX 56 | value: {{ .Values.splunk.metrics_index | default "jfrog_splunk_metrics" }} 57 | - name: SPLUNK_INSECURE_SSL 58 | value: {{ .Values.splunk.insecure_ssl | quote}} 59 | - name: SPLUNK_VERIFY_SSL 60 | value: {{ .Values.splunk.verify_ssl | quote}} 61 | - name: SPLUNK_COMPRESS_DATA 62 | value: {{ .Values.splunk.compress_data | quote }} 63 | - name: LOG_ENV 64 | value: {{ .Values.jfrog.observability.log_env | default "production" }} 65 | splunk: 66 | host: SPLUNK_HEC_HOST 67 | port: SPLUNK_HEC_PORT 68 | logs_token: SPLUNK_HEC_TOKEN 69 | metrics_token: SPLUNK_METRICS_HEC_TOKEN 70 | logs_index: SPLUNK_LOGS_INDEX 71 | metrics_index: SPLUNK_METRICS_INDEX 72 | com_protocol: SPLUNK_COM_PROTOCOL 73 | insecure_ssl: SPLUNK_INSECURE_SSL 74 | verify_ssl: SPLUNK_VERIFY_SSL 75 | compress_data: SPLUNK_COMPRESS_DATA 76 | jfrog: 77 | observability: 78 | jpd_url: JPD_URL 79 | username: JPD_ADMIN_USERNAME 80 | common_jpd: COMMON_JPD 81 | log_env: LOG_ENV -------------------------------------------------------------------------------- /jfrog.env: -------------------------------------------------------------------------------- 1 | export JF_PRODUCT_DATA_INTERNAL=JF_PRODUCT_DATA_INTERNAL 2 | export SPLUNK_COM_PROTOCOL=http 3 | export SPLUNK_HEC_HOST=splunk.example.com 4 | export SPLUNK_HEC_PORT=8088 5 | export SPLUNK_HEC_TOKEN=SPLUNK_HEC_TOKEN 6 | export SPLUNK_METRICS_HEC_TOKEN=SPLUNK_METRICS_HEC_TOKEN 7 | export SPLUNK_INSECURE_SSL=false 8 | export SPLUNK_VERIFY_SSL=true 9 | export SPLUNK_COMPRESS_DATA=true 10 | export JPD_URL=http://abc.jfrog.io 11 | export JPD_ADMIN_USERNAME=admin 12 | export JFROG_ADMIN_TOKEN=JFROG_ADMIN_TOKEN 13 | export COMMON_JPD=false 14 | export LOG_ENV=production -------------------------------------------------------------------------------- /k8s/splunk.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: splunk 6 | name: splunk 7 | namespace: default 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: splunk 13 | template: 14 | metadata: 15 | labels: 16 | app: splunk 17 | spec: 18 | containers: 19 | - image: splunk/splunk:latest 20 | imagePullPolicy: "Always" 21 | name: splunk 22 | env: 23 | - name: SPLUNK_START_ARGS 24 | value: "--accept-license" 25 | - name: SPLUNK_PASSWORD 26 | value: "password1" 27 | ports: 28 | - containerPort: 8000 29 | - containerPort: 8088 30 | - containerPort: 8089 31 | --- 32 | kind: Service 33 | apiVersion: v1 34 | metadata: 35 | name: splunk 36 | labels: 37 | app: splunk 38 | spec: 39 | selector: 40 | app: splunk 41 | ports: 42 | - name: web 43 | protocol: TCP 44 | port: 8000 45 | targetPort: 8000 46 | - name: hec 47 | protocol: TCP 48 | port: 8088 49 | targetPort: 8088 50 | - name: api 51 | protocol: TCP 52 | port: 8089 53 | targetPort: 8089 54 | type: ClusterIP 55 | --- 56 | kind: Service 57 | apiVersion: v1 58 | metadata: 59 | name: splunk-lb 60 | labels: 61 | app: splunk 62 | spec: 63 | selector: 64 | app: splunk 65 | ports: 66 | - name: http 67 | protocol: TCP 68 | port: 80 69 | targetPort: 8000 70 | - name: hec 71 | protocol: TCP 72 | port: 8088 73 | targetPort: 8088 74 | - name: api 75 | protocol: TCP 76 | port: 8089 77 | targetPort: 8089 78 | type: LoadBalancer 79 | --------------------------------------------------------------------------------