├── criblvision-for-splunk
├── criblvision
│ ├── bin
│ │ └── README
│ ├── static
│ │ ├── appIcon.png
│ │ ├── appLogo.png
│ │ ├── appIconAlt.png
│ │ ├── appIcon_2x.png
│ │ ├── appLogo_2x.png
│ │ └── appIconAlt_2x.png
│ ├── default
│ │ ├── props.conf
│ │ ├── data
│ │ │ └── ui
│ │ │ │ ├── views
│ │ │ │ ├── criblvision_setup_page.xml
│ │ │ │ ├── audit.xml
│ │ │ │ ├── auditlogs.xml
│ │ │ │ ├── job_inspector.xml
│ │ │ │ ├── log_viewer.xml
│ │ │ │ ├── commit_and_deploy_audit_logs.xml
│ │ │ │ ├── stats.xml
│ │ │ │ ├── home.xml
│ │ │ │ ├── cribl_stream_license_metrics.xml
│ │ │ │ ├── leader_performance_introspection.xml
│ │ │ │ └── sources_and_destinations_overview.xml
│ │ │ │ └── nav
│ │ │ │ └── default.xml
│ │ ├── transforms.conf
│ │ ├── app.conf
│ │ └── macros.conf
│ ├── lookups
│ │ └── bytes_units.csv
│ ├── readme
│ │ ├── criblvision_for_splunk_pack.md
│ │ ├── upgrading_criblvision.md
│ │ ├── troubleshooting.md
│ │ ├── criblvision_alerts.md
│ │ ├── README.md
│ │ ├── cribl_stream_assets_lookup.md
│ │ ├── configuring_criblvision.md
│ │ └── getting_data_in.md
│ ├── metadata
│ │ └── default.meta
│ └── appserver
│ │ └── static
│ │ └── javascript
│ │ ├── setup_criblvision_page.js
│ │ ├── views
│ │ ├── util.js
│ │ ├── setup_configuration.js
│ │ ├── store_criblvision_properties.js
│ │ ├── app.js
│ │ └── splunk_helpers.js
│ │ └── vendor
│ │ └── react.production.min.js
├── criblvision.spl
└── README.md
├── criblvision-for-criblsearch
├── alerts
│ ├── CriblVision Alert - RSS Memory Usage.md
│ ├── CriblVision Alert - Unhealthy Sources.md
│ ├── CriblVision Alert - Worker Process Restarted.md
│ ├── CriblVision Alert - Unhealthy Destinations.md
│ ├── CriblVision Alert - Cluster Communication Errors.md
│ ├── CriblVision Alert - Destination Persistent Queue.md
│ └── CriblVision Alert - CPU Usage Over Threshold.md
├── README.md
└── dashboards
│ ├── CriblVision - Log Statistics.json
│ └── CriblVision - Thruput Introspection.json
├── README.md
└── criblvision-for-grafana
└── README.md
/criblvision-for-splunk/criblvision/bin/README:
--------------------------------------------------------------------------------
1 | This is where you put any scripts you want to add to this app.
2 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision.spl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/criblio/criblvision/HEAD/criblvision-for-splunk/criblvision.spl
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/static/appIcon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/criblio/criblvision/HEAD/criblvision-for-splunk/criblvision/static/appIcon.png
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/static/appLogo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/criblio/criblvision/HEAD/criblvision-for-splunk/criblvision/static/appLogo.png
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/static/appIconAlt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/criblio/criblvision/HEAD/criblvision-for-splunk/criblvision/static/appIconAlt.png
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/static/appIcon_2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/criblio/criblvision/HEAD/criblvision-for-splunk/criblvision/static/appIcon_2x.png
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/static/appLogo_2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/criblio/criblvision/HEAD/criblvision-for-splunk/criblvision/static/appLogo_2x.png
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/static/appIconAlt_2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/criblio/criblvision/HEAD/criblvision-for-splunk/criblvision/static/appIconAlt_2x.png
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/props.conf:
--------------------------------------------------------------------------------
1 | [cribl]
2 | LOOKUP-cribl_stream_workers = cribl_stream_assets host AS host OUTPUTNEW instance_type AS instance_type worker_group AS worker_group
3 |
4 | [source::cribl]
5 | AUTO_KV_JSON = false
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/lookups/bytes_units.csv:
--------------------------------------------------------------------------------
1 | abbreviated_unit,unit
2 | B,Bytes
3 | KB,Kilobytes
4 | MB,Megabytes
5 | GB,Gigabytes
6 | TB,Terabytes
7 | PB,Petabytes
8 | EB,Exabytes
9 | ZB,Zettabytes
10 | YB,Yottabytes
11 | RB,Ronnabytes
12 | QB,Quettabytes
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/data/ui/views/criblvision_setup_page.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/transforms.conf:
--------------------------------------------------------------------------------
1 | [cribl_stream_assets]
2 | batch_index_query = 0
3 | case_sensitive_match = 1
4 | filename = cribl_stream_assets.csv
5 |
6 | [cribl_stream_workers]
7 | batch_index_query = 0
8 | case_sensitive_match = 1
9 | filename = cribl_stream_assets.csv
10 |
11 | [bytes_units]
12 | batch_index_query = 0
13 | case_sensitve_match = 0
14 | filename = bytes_units.csv
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/criblvision_for_splunk_pack.md:
--------------------------------------------------------------------------------
1 | # The CriblVision for Splunk Pack
2 |
3 | The CriblVision for Splunk Pack is a companion to this Splunk app. It **is not** a requirement to use the Pack to receive value from this app. The Pack is only required if you would like to take advantage of the Collector Jobs that the Pack provides templates for.
4 |
5 | More information on this Pack can be found in the [Cribl Packs Dispensary](https://packs.cribl.io/packs/cc-criblvision-for-splunk).
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/metadata/default.meta:
--------------------------------------------------------------------------------
1 |
2 | # Application-level permissions
3 |
4 | []
5 | access = read : [ * ], write : [ admin, sc_admin, power ]
6 |
7 | ### EVENT TYPES
8 |
9 | [eventtypes]
10 | export = system
11 |
12 |
13 | ### PROPS
14 |
15 | [props]
16 | export = system
17 |
18 |
19 | ### TRANSFORMS
20 |
21 | [transforms]
22 | export = system
23 |
24 |
25 | ### LOOKUPS
26 |
27 | [lookups]
28 | export = system
29 |
30 |
31 | ### VIEWSTATES: even normal users should be able to create shared viewstates
32 |
33 | [viewstates]
34 | access = read : [ * ], write : [ * ]
35 | export = system
36 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/appserver/static/javascript/setup_criblvision_page.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const app_name = './criblvision';
4 |
5 | require.config({
6 | paths: {
7 | myApp: `../app/${app_name}/javascript/views/app`,
8 | react: `../app/${app_name}/javascript/vendor/react.production.min`,
9 | ReactDOM: `../app/${app_name}/javascript/vendor/react-dom.production.min`,
10 | },
11 | scriptType: 'module',
12 | });
13 |
14 | require(
15 | [
16 | 'react',
17 | 'ReactDOM',
18 | 'myApp',
19 | ],
20 | function(react, ReactDOM, myApp){
21 | ReactDOM.render(myApp, document.getElementById('criblvision_setup_container'));
22 | }
23 | );
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/app.conf:
--------------------------------------------------------------------------------
1 | #
2 | # Splunk app configuration file
3 | #
4 |
5 | [id]
6 | name = criblvision
7 | version = 4.6.1
8 |
9 | [install]
10 | is_configured = false
11 |
12 | [triggers]
13 | reload.lookups = simple
14 |
15 | [ui]
16 | is_visible = true
17 | label = CriblVision
18 | setup_view = criblvision_setup_page
19 |
20 | [launcher]
21 | author = Johan Woger
22 | description = The Splunk on Stream app provides insight into your Cribl Stream Environment based on Stream's internal logs and metrics. The dashboards included in this app will help you quickly identify and troubleshoot issue with your deployment.
23 | version = 4.6.1
24 |
25 | [package]
26 | id = criblvision
27 | check_for_updates = false
28 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/appserver/static/javascript/views/util.js:
--------------------------------------------------------------------------------
1 | function promisify(fn) {
2 | console.log("promisify: Don't use this in production! Use a proper promisify library instead.")
3 |
4 | // return a new promisified function
5 | return (...args) => {
6 | return new Promise((resolve, reject) => {
7 | // create a callback that resolves and rejects
8 | function callback(err, result) {
9 | if (err) {
10 | reject(err);
11 | } else {
12 | resolve(result);
13 | }
14 | }
15 |
16 | args.push(callback)
17 |
18 | // pass the callback into the function
19 | fn.call(this, ...args);
20 | })
21 | }
22 | }
23 |
24 | export {
25 | promisify,
26 | }
27 |
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/alerts/CriblVision Alert - RSS Memory Usage.md:
--------------------------------------------------------------------------------
1 | CriblVision Alert - RSS Memory Usage
2 |
3 | Query
4 |
5 | dataset="default_logs" | where message == "_raw stats"
6 | | summarize mem_rss = max(mem.rss) by bin(_time, 1m), host, group
7 | | summarize
8 | unhealthy_memory_rss_usage_events = countif(mem_rss > 1700)
9 | by host, group
10 |
11 |
12 |
13 | Alerting instructions
14 | By default, this query is configured to alert if mem usage for a node goes over 1.7GB. You can adjust this yourself by editing the part of the query countif(mem_rss > 1700)
15 |
16 | Scheduling
17 | Click on schedule and configure it to run every hour
18 |
19 | Notifications
20 | Enable send notifications and configure the when clause to be as follows
21 |
22 | Count of results - Greater than - 0
23 |
24 | Configure Target for notification
25 |
26 | Under Send notification to
27 | Select your desired target or create a new target out of our supported Targets. (webhook, Pagerduty, Slack, AWS SNS, Email)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CriblVision
2 |
3 | CriblVision is a troubleshooting tool and monitoring aid for Cribl administrators. It was created by Cribl Support Engineers to help customers troubleshoot their own Cribl deployments. There are several troubleshooting dashboards tailored to certain product areas in which support has seen the highest number of recurring issues. And while our intent is to help you troubleshoot your own Cribl deployment, this app will always be a continual "work in progress" and should always be used in conjunction with the Cribl Monitoring Console and associated views.
4 |
5 | Currently, there are three instances of CriblVision:
6 |
7 | * [CriblVision for CriblSearch](https://github.com/criblio/criblvision/tree/main/criblvision-for-criblsearch)
8 | * [CriblVision for Grafana](https://github.com/criblio/criblvision/tree/main/criblvision-for-grafana)
9 | * [CriblVision for Splunk](https://github.com/criblio/criblvision/tree/main/criblvision-for-splunk)
10 |
11 | For more information on each, please see the README in their respective directories.
12 |
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/alerts/CriblVision Alert - Unhealthy Sources.md:
--------------------------------------------------------------------------------
1 | CriblVision Alert - Unhealthy Sources
2 |
3 | Query
4 |
5 | ${set_cribl_metrics_dataset} group="*" host="*" _metric in ("cribl.logstream.health.inputs") | project _time, _metric, _value, host, group
6 | | where _value > 0 | summarize inputs=values(inputs) by host | mv-expand inputs | render table
7 |
8 |
9 | Alerting instructions
10 | By default, this query is configured to alert if a source is in an unhealthy state.
11 |
12 | You can adjust how often for this check to occur by adjusting the timerange of the search and the scheduling frequency.
13 |
14 | Scheduling
15 | Click on schedule and configure it to run every 5 minutes (or to your liking)
16 |
17 | Notifications
18 | Enable send notifications and configure the when clause to be as follows
19 |
20 | Count of results - Greater than - 0
21 |
22 | Configure Target for notification
23 |
24 | Under Send notification to:
25 | Select your desired target or create a new target out of our supported Targets. (webhook, Pagerduty, Slack, AWS SNS, Email)
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/alerts/CriblVision Alert - Worker Process Restarted.md:
--------------------------------------------------------------------------------
1 | CriblVision Alert - Worker Process Restarted
2 |
3 | Query
4 |
5 | dataset="default_logs"
6 | | where message == "restarting worker process"
7 | | summarize worker_process_restarts = count() by host, instance_type, worker_group
8 | | where worker_process_restarts > 10
9 | | project host, instance_type, worker_group, worker_process_restarts
10 |
11 | Alerting instructions
12 | By default, this query is configured to alert if restarts for a node goes over 10 for the selected timerange. You can adjust this yourself by editing the part of the query worker_process_restarts > 10
13 |
14 | Scheduling
15 | Click on schedule and configure it to run every hour
16 |
17 | Notifications
18 | Enable send notifications and configure the when clause to be as follows
19 |
20 | Count of results - Greater than - 0
21 |
22 | Configure Target for notification
23 |
24 | Under Send notification to
25 | Select your desired target or create a new target out of our supported Targets. (webhook, Pagerduty, Slack, AWS SNS, Email)
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/alerts/CriblVision Alert - Unhealthy Destinations.md:
--------------------------------------------------------------------------------
1 | CriblVision Alert - Unhealthy Destinations
2 |
3 | Query
4 |
5 | ${set_cribl_metrics_dataset} group="*" host="*" _metric in ("cribl.logstream.health.outputs") | project _time, _metric, _value, host, group
6 | | where _value > 0 | summarize outputs=values(output) by host | mv-expand outputs | render table
7 |
8 |
9 |
10 | Alerting instructions
11 | By default, this query is configured to alert if a destination is in an unhealthy state.
12 |
13 | You can adjust how often for this check to occur by adjusting the timerange of the search and the scheduling frequency.
14 |
15 | Scheduling
16 | Click on schedule and configure it to run every 5 minutes (or to your liking)
17 |
18 | Notifications
19 | Enable send notifications and configure the when clause to be as follows
20 |
21 | Count of results - Greater than - 0
22 |
23 | Configure Target for notification
24 |
25 | Under Send notification to:
26 | Select your desired target or create a new target out of our supported Targets. (webhook, Pagerduty, Slack, AWS SNS, Email)
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/alerts/CriblVision Alert - Cluster Communication Errors.md:
--------------------------------------------------------------------------------
1 | CriblVision Alert - Cluster Communication Errors
2 |
3 | dataset="default_logs"
4 | | where channel == "clustercomm" and (level == "warn" or level == "error") and not(message startswith "metric")
5 | | summarize cluster_communication_errors = count() by host, group
6 | | where cluster_communication_errors > 10
7 | | project host, group, cluster_communication_errors
8 |
9 | Alerting instructions
10 | By default, this query is configured to alert if a worker node is having communication issues with its leader.
11 |
12 | You can adjust how often for this check to occur by adjusting the timerange of the search and the scheduling frequency.
13 |
14 | Scheduling
15 | Click on schedule and configure it to run every 1 hour (or to your liking)
16 |
17 | Notifications
18 | Enable send notifications and configure the when clause to be as follows
19 |
20 | Count of results - Greater than - 0
21 |
22 | Configure Target for notification
23 |
24 | Under Send notification to:
25 | Select your desired target or create a new target out of our supported Targets. (webhook, Pagerduty, Slack, AWS SNS, Email)
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/alerts/CriblVision Alert - Destination Persistent Queue.md:
--------------------------------------------------------------------------------
1 | CriblVision Alert - Destination Persistent Queue Initialized
2 |
3 | Query
4 |
5 | dataset="default_logs"
6 | | where channel startswith "DestPQ:" and message == "initializing persistent queue"
7 | | extend output = extract("DestPQ:(.*)", 1, channel)
8 | | summarize pq_initialized_count = count() by group, output
9 | | where pq_initialized_count > 1
10 | | project group, output
11 |
12 |
13 | Alerting instructions
14 | By default, this query is configured to alert if destination PQ has been initiated during the selected timerange.
15 |
16 | You can adjust how often for this check to occur by adjusting the timerange of the search and the scheduling frequency.
17 |
18 | Scheduling
19 | Click on schedule and configure it to run every 5 minutes
20 |
21 | Notifications
22 | Enable send notifications and configure the when clause to be as follows
23 |
24 | Count of results - Greater than - 0
25 |
26 | Configure Target for notification
27 |
28 | Under Send notification to:
29 | Select your desired target or create a new target out of our supported Targets. (webhook, Pagerduty, Slack, AWS SNS, Email)
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/alerts/CriblVision Alert - CPU Usage Over Threshold.md:
--------------------------------------------------------------------------------
1 | CriblVision Alert - CPU Usage Over Threshold
2 |
3 | Query
4 |
5 | dataset="default_logs" | summarize cpu_pct = max(cpuPerc) by bin(TimeGenerated, 1m), host, group
6 | | summarize
7 | unhealthy_cpu_usage = countif(cpu_pct > 80),
8 | total = count()
9 | by host, group
10 | | extend unhealthy_cpu_usage_pct = round((unhealthy_cpu_usage * 100.0) / total, 2)
11 | | where unhealthy_cpu_usage_pct > 80
12 | | project host, group, unhealthy_cpu_usage_pct
13 |
14 |
15 | Alerting instructions
16 | By default, this query is configured to alert if cpu usage for a node goes over 80%. You can adjust this yourself by editing the part of the query unhealthy_cpu_usage = countif(cpu_pct > 80)
17 |
18 | Scheduling
19 | Click on schedule and configure it to run every hour
20 |
21 | Notifications
22 | Enable send notifications and configure the when clause to be as follows
23 |
24 | Count of results - Greater than - 0
25 |
26 | Configure Target for notification
27 |
28 | Under Send notification to
29 | Select your desired target or create a new target out of our supported Targets. (webhook, Pagerduty, Slack, AWS SNS, Email)
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/upgrading_criblvision.md:
--------------------------------------------------------------------------------
1 | # Upgrading the CriblVision for Splunk App
2 |
3 | ## Upgrading to Version 3.x
4 |
5 | Version 3.x of the CriblVision app introduces a new Cribl Stream asset lookup to replace the previous Worker Group lookup. This will cause dashboards to behave unexpectedly until the cutover to the new lookup is made. If you are upgrading from version 2.x of CriblVision then the following steps will be required to make this cutover:
6 |
7 | 1. Install version 3.x of CriblVision
8 | 2. Run the CriblVision setup page again:
9 | 1. From the **Apps** dropdown, select **Manage Apps**
10 | 2. Selected the **Set up** action for CriblVision
11 | 3. Follow the instructions on the setup page
12 | 3. Run the **Populate Cribl Stream Assets Lookup** report by either:
13 | * Clicking the button on the landing page
14 | * Clicking the link in the navigation bar
15 | 4. Double check that any alerts that were enabled are still enabled
16 |
17 | If there are issues after completing these steps, you may need to clear your browser cache to clear cached scripts in Splunk. Clear the browser cache and follow from instructions from step 2 onwards.
18 |
19 | If installing the app through other methods, refer to the documentation below on how to configure the app and update your configuration accordingly.
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/appserver/static/javascript/views/setup_configuration.js:
--------------------------------------------------------------------------------
1 | import { promisify } from './util.js'
2 | import * as SplunkHelpers from './splunk_helpers.js'
3 |
4 | async function complete_setup(splunk_js_sdk_service) {
5 | var configuration_file_name = "app";
6 | var stanza_name = "install";
7 | var properties_to_update = {
8 | is_configured: "true",
9 | };
10 |
11 | await SplunkHelpers.update_configuration_file(
12 | splunk_js_sdk_service,
13 | configuration_file_name,
14 | stanza_name,
15 | properties_to_update,
16 | );
17 | };
18 |
19 | async function reload_splunk_app(
20 | splunk_js_sdk_service,
21 | app_name,
22 | ) {
23 | var splunk_js_sdk_apps = splunk_js_sdk_service.apps();
24 | await promisify(splunk_js_sdk_apps.fetch)();
25 |
26 | var current_app = splunk_js_sdk_apps.item(app_name);
27 | await promisify(current_app.reload)();
28 | };
29 |
30 | function redirect_to_splunk_app_homepage(
31 | app_name,
32 | ) {
33 | var redirect_url = "/app/" + app_name;
34 |
35 | window.location.href = redirect_url;
36 | };
37 |
38 |
39 | function create_splunk_js_sdk_service(
40 | splunk_js_sdk,
41 | application_name_space,
42 | ) {
43 | var http = new splunk_js_sdk.SplunkWebHttp();
44 |
45 | var splunk_js_sdk_service = new splunk_js_sdk.Service(
46 | http,
47 | application_name_space,
48 | );
49 |
50 | return splunk_js_sdk_service;
51 | };
52 |
53 | export {
54 | complete_setup,
55 | reload_splunk_app,
56 | redirect_to_splunk_app_homepage,
57 | create_splunk_js_sdk_service,
58 | }
59 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/troubleshooting.md:
--------------------------------------------------------------------------------
1 | # Troubleshooting the CriblVision for Splunk App
2 |
3 | CriblVision for Splunk is a community-supported Splunk Add-on. If you have any questions or queries, please reach out in the #criblvision [Community Slack](https://cribl-community.slack.com/) channel.
4 |
5 | ## Missing Events
6 |
7 | This app can function with just the logs/metrics from the Cribl Internal log/metric Sources, but it does use data collected from other sources to aid in accurate reporting. See the `Getting Data In` page for all the data sources used and how they are collected.
8 |
9 | ## Potential for Duplicate Fields
10 |
11 | The way the logs from the Internal Cribl Stream Log Source are formatted can result in fields getting extracted twice: once and index-time fields and again as search-time fields. This results in some fields at search-time being multi-value fields with duplicate data in them. This can be diagnosed by selecting a common field like channel from the Interesting Fields list and checking if the total percentage adds up to over 100%.
12 |
13 | The following props have been included to work around this:
14 | ```conf
15 | [source::cribl]
16 | AUTO_KV_JSON = false
17 | ```
18 | This configuration will stop Splunk from parsing the fields at search-time for any event with the cribl source (the default source from the Internal Cribl Stream Logs Source). This will still allow internal logs collected via other means (e.g., an Edge Node on an Leader Node, the CriblStream for Splunk Pack) to extract fields at search-time, as they will not have the same source value.
19 |
20 | ## Accurate Representation with Metrics
21 |
22 | When comparing the results in CriblVision with the Monitoring Console in Cribl Stream, there can be some discrepancies in the accuracy between the two. By default, the Internal Cribl Stream Metrics Source rolls up metrics with the cribl_metrics_rollup Pipeline. To have the results in CriblVision match more closely with the Monitoring Console, update the Pre-Processing Pipeline on the Cribl Metrics Source to be the passthru Pipeline.
--------------------------------------------------------------------------------
/criblvision-for-grafana/README.md:
--------------------------------------------------------------------------------
1 | # CriblVision for Grafana
2 |
3 | ## Getting Started
4 |
5 | This Grafana dashboard was created by Cribl Support Engineers to help customers troubleshoot their own Cribl deployments. It is designed to provide a comprehensive overview of the health and performance of a Cribl deployment. This was designed to be used in conjunction with the Cribl Monitoring Console.
6 |
7 | #### Logs and Metrics
8 |
9 | Cribl internal logs and metrics must be enabled and forwarded to Prometheus(metrics) and Loki(logs) destinations in order for all of the panels to populate with events. Refer to the documentation [here](https://docs.cribl.io/stream/sources-cribl-internal/#configuring-cribl-internal-logsmetrics-as-a-datasource) for instructions on configuring this Source.
10 |
11 | For this dashboard, you will need a Loki instance as well as a Prometheus instance with Remote Write enabled with the following flag: `--enable-feature=remote-write-receiver`
12 |
13 | #### Leader Logs
14 |
15 | Some of the view in this app will require Leader logs to be forwarded to Grafana. In distributed Cribl Stream environments, Leader logs are currently *NOT* sent via our internal Source. You will have to install a Cribl Edge Node on your Leader Node and configure local log collection via a file monitor input. Configure the file monitor input to collect logs by configuring the filename modal to `/opt/cribl/log/*`. For more information on how to deploy a Cribl Edge Node, please refer to our documentation [here](https://docs.cribl.io/edge/deploy-planning).
16 |
17 | #### Source and Destination configuration in Cribl
18 |
19 | For your internal logs source, you will need to configure the following:
20 |
21 | * Source
22 | * `group` field should be set to the name of the Worker Group
23 | * Destination
24 | * `prometheus_metrics` pipeline should be enabled for the `Prometheus` destination
25 | * `service` set to `Cribl` for Loki destination under `Advanced Settings`
26 |
27 | ### Using This Dashboard
28 |
29 | In addition to this overview page, this app provides several views intended to aid a Cribl admin in troubleshooting and assessing the health of a Cribl deployment. We recommend starting with the Health Check view and selecting a single Worker Group or Worker Node from the provided filters.
30 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/data/ui/views/audit.xml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/criblvision_alerts.md:
--------------------------------------------------------------------------------
1 | # CriblVision for Splunk Alerts
2 |
3 | This app ships with a number of disabled alerts that can be used to alert when issues in your Cribl Stream environment arise. These alerts take advantage of macros that can be utilized to tweak the alert triggers to better reflect your Cribl Stream environment. The alerts all come with default Splunk's default scheduling configured and no alert actions. The scheduling should be configured for each alert you plan to enable. For more on scheduling alerts, please reference Splunk's documentation [here](https://docs.splunk.com/Documentation/Splunk/latest/Alert/Definescheduledalerts). Fore more on configuring alert actions, please reference Splunk's documentation [here](https://docs.splunk.com/Documentation/Splunk/9.2.0/Alert/Setupalertactions).
4 |
5 | |Macro Name|Related Alert(s)|Description|
6 | |----------|----------------|-----------|
7 | |set_alert_ignored_destinations|No Output From Destinations, Unhealthy Destinations|A list of Destinations that should not trigger alerts|
8 | |set_alert_ignored_sources|No Input From Sources, Unhealthy Sources|A list of Sources that should not trigger alerts|
9 | |set_alert_lower_limit_unhealthy_memory_usage_mb|RSS Memory Usage Within Threshold|The lower limit of the threshold when alerts for memory usage (in MB) should trigger|
10 | |set_alert_threshold_backpressure|Destinations Experiencing Backpressure|The threshold of backpressure messages received (for a Worker Group + Worker) before the alert should trigger|
11 | |set_alert_threshold_blocked_destinations|Blocked Destinations|The threshold of blocked Destination messages received (for a Worker Group + Destination) before the alert should trigger|
12 | |set_alert_threshold_cluster_communication_errors|Blocked Destinations|The threshold of cluster communcation error messages received (for a Worker Group + Worker) before the alert should trigger|
13 | |set_alert_threshold_no_destination_thruput_pct|No Output From Destinations|The threshold of times a Destination has not sent any events (for a Worker Group + Destination) before the alert should trigger|
14 | |set_alert_threshold_no_source_thruput_pct|No Input From Sources|The threshold of times a Source has not received any events (for a Worker Group + Source) before the alert should trigger|
15 | |set_alert_threshold_unhealthy_cpu_usage_pct|CPU Usage Over Threshold|The threshold of times a host has reported above the unhealthy CPU percentage threshold (for a Worker Group + Worker) before the alert should trigger|
16 | |set_alert_threshold_unhealthy_destinations_pct|Unhealthy Destinations|The threshold of times a Destination has reported as being unhealthy (for a Worker Group + Destination) before the alert should trigger|
17 | |set_alert_threshold_unhealthy_memory_usage_mb_pct|RSS Memory Usage Within Threshold|The threshold of times a host has reported memory usage within the unhealthy threshold (for a Worker Group + host) before the alert should trigger|
18 | |set_alert_threshold_unhealthy_sources_pct|Unhealthy Sources|The threshold of times a Source has reported as being unhealthy (for a Worker Group + Source) before the alert should trigger|
19 | |set_alert_threshold_worker_process_restarts|Worker Proces Restarted|The threshold of times a host has reported Worker Process restarts (for a Worker Group + host) before the alert should trigger|
20 | |set_alert_unhealthy_cpu_usage_pct|CPU Usage Over Threshold|The threshold at which a host's CPU usage is deemed to be unhealthy|
21 | |set_alert_upper_limit_unhealthy_memory_usage_mb|RSS Memory Usage Within Threshold|The upper limit of the threshold when alerts for memory usage (in MB) should trigger|
22 |
23 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/README.md:
--------------------------------------------------------------------------------
1 | # CriblVision for Splunk
2 |
3 | ## Getting Started
4 |
5 | This CriblVision for Splunk app was designed as a troubleshooting tool and monitoring aid for Cribl administrators. It was created by Cribl Support Engineers to help customers troubleshoot their own Cribl deployments. There are several troubleshooting dashboards tailored to certain product areas in which support has seen the highest number of recurring issues. And while our intent is to help you troubleshoot your own Cribl deployment, this app will always be a continual "work in progress" and should always be used in conjunction with the Cribl Monitoring Console and associated views.
6 |
7 | Several Dashboards are provided to aid a Cribl admin in troubleshooting and assessing the health of a Cribl deployment. Every view is equipped with a "How to Use" toggle that reveals a description and instructions for that view. We recommend starting with the Health Check view and selecting a single Worker Group or Worker Node from the provided filters.
8 |
9 | ### Context is Crucial
10 |
11 | In the ancient art of troubleshooting, context is key - whether the problem is of a technical nature, or merely one related to existence itself.
12 |
13 | Without a clear understanding of the circumstances surrounding an issue, it becomes challenging to identify the root cause and provide an effective solution. Context provides valuable information about a specific environment, and every environment is unique; when using this app, it is wise to ask yourself the following questions:
14 |
15 | * *What is the current issue you are attempting to troubleshoot?*
16 | * *Are there any recent configuration changes that were made before the issue started?*
17 | * *Were there any specific user interactions that may have contributed to the start of the issue? **Example:** Increase in data throughput, new Sources or Destinations, change in architecture, etc.*
18 |
19 | Answers to the above questions, and many others, will help narrow down the scope of the investigation, enabling you and your team to focus their efforts on the relevant areas. Additionally, contexts aids in replicating the problem, as it enables Support Engineers to understand the exact conditions under which the issue occurs. Knowledge of the environment, along with the context of use-cases and integrations, and ensure that the troubleshooting process is efficient and accurate.
20 |
21 | ### Configuration
22 |
23 | * [Configuring CriblVision for Splunk App](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/configuring_criblvision.md)
24 | * [Cribl Stream Assets Lookup](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/cribl_stream_assets_lookup.md)
25 | * [CriblVision for Splunk Alerts](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/criblvision_alerts.md)
26 | * [CriblVision for Splunk Pack](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/criblvision_for_splunk_pack.md)
27 | * [Getting Data In](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/getting_data_in.md)
28 | * [Troubleshooting](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/troubleshooting.md)
29 | * [Upgrading CriblVision for Splunk App](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/upgrading_criblvision.md)
30 |
31 | ## About
32 |
33 | * **Author:** Johan Woger
34 | * **Co-Authors:** Jeremy Prescott, Martin Prado, David Sheridan, Christopher Owen
35 | * **Honorable Mentions:**
36 | * George (Trey) Haraksin - For his initial ideas on thruput introspection (check out his other projects at [https://github.com/arcsector](https://github.com/arcsector))
37 | * Ben Marcus - General Testing.
38 | * Brendan Dalpe - Guru of many things.
39 | * Brandon McCombs - General Testing.
40 | * Chris Owens - General testing and contributor.
41 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/README.md:
--------------------------------------------------------------------------------
1 | # CriblVision for Splunk
2 |
3 | ## Getting Started
4 |
5 | This CriblVision for Splunk app was designed as a troubleshooting tool and monitoring aid for Cribl administrators. It was created by Cribl Support Engineers to help customers troubleshoot their own Cribl deployments. There are several troubleshooting dashboards tailored to certain product areas in which support has seen the highest number of recurring issues. And while our intent is to help you troubleshoot your own Cribl deployment, this app will always be a continual "work in progress" and should always be used in conjunction with the Cribl Monitoring Console and associated views.
6 |
7 | Several Dashboards are provided to aid a Cribl admin in troubleshooting and assessing the health of a Cribl deployment. Every view is equipped with a "How to Use" toggle that reveals a description and instructions for that view. We recommend starting with the Health Check view and selecting a single Worker Group or Worker Node from the provided filters.
8 |
9 | ### Context is Crucial
10 |
11 | In the ancient art of troubleshooting, context is key - whether the problem is of a technical nature, or merely one related to existence itself.
12 |
13 | Without a clear understanding of the circumstances surrounding an issue, it becomes challenging to identify the root cause and provide an effective solution. Context provides valuable information about a specific environment, and every environment is unique; when using this app, it is wise to ask yourself the following questions:
14 |
15 | * *What is the current issue you are attempting to troubleshoot?*
16 | * *Are there any recent configuration changes that were made before the issue started?*
17 | * *Were there any specific user interactions that may have contributed to the start of the issue? **Example:** Increase in data throughput, new Sources or Destinations, change in architecture, etc.*
18 |
19 | Answers to the above questions, and many others, will help narrow down the scope of the investigation, enabling you and your team to focus their efforts on the relevant areas. Additionally, contexts aids in replicating the problem, as it enables Support Engineers to understand the exact conditions under which the issue occurs. Knowledge of the environment, along with the context of use-cases and integrations, and ensure that the troubleshooting process is efficient and accurate.
20 |
21 | ### Configuration
22 |
23 | * [Configuring CriblVision for Splunk App](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/configuring_criblvision.md)
24 | * [Cribl Stream Assets Lookup](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/cribl_stream_assets_lookup.md)
25 | * [CriblVision for Splunk Alerts](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/criblvision_alerts.md)
26 | * [CriblVision for Splunk Pack](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/criblvision_for_splunk_pack.md)
27 | * [Getting Data In](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/getting_data_in.md)
28 | * [Troubleshooting](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/troubleshooting.md)
29 | * [Upgrading CriblVision for Splunk App](https://github.com/criblio/criblvision/blob/main/criblvision-for-splunk/criblvision/readme/upgrading_criblvision.md)
30 |
31 | ## About
32 |
33 | * **Author:** Johan Woger
34 | * **Co-Authors:** Jeremy Prescott, Martin Prado, David Sheridan, Christopher Owen
35 | * **Honorable Mentions:**
36 | * George (Trey) Haraksin - For his initial ideas on thruput introspection (check out his other projects at [https://github.com/arcsector](https://github.com/arcsector))
37 | * Ben Marcus - General Testing.
38 | * Brendan Dalpe - Guru of many things.
39 | * Brandon McCombs - General Testing.
40 | * Chris Owens - General testing and contributor.
41 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/cribl_stream_assets_lookup.md:
--------------------------------------------------------------------------------
1 | # Cribl Stream Assets Lookup
2 |
3 | The `cribl_stream_assets` lookup is used by the CriblVision for Splunk app to apply additional context to events:
4 |
5 | * `environment`: An optional field used to distinguish the environment that instances belong to.
6 | * `instance_type`: The type of Cribl Stream/Edge instance operating on that host.
7 | * `worker_group`: The Worker Group or Fleet an instance belongs to.
8 |
9 | These are used by Alerts and Dashboards to filter content and apply additional context to events.
10 |
11 | ## Running the Populate Cribl Stream Asset Lookup Report
12 |
13 | After installing or upgrading the CriblVision application, run the `Populate Cribl Stream Asset Lookup` Report to repopulate the `cribl_stream_assets` lookup. This can be done by selecting "Populate Cribl Stream Worker Lookup" from the navigation menu. This report is scheduled to run every hour, but can be updated to meet your requirements.
14 |
15 | **Note:** When the report is initially run, you may see an error stating that the `cribl_stream_assets.csv` lookup file does not exist. This will not impact the search. Once the search is completed for the first time, the lookup file will be initiated and the results of the search will be written to it.
16 |
17 | ## Data Sources Used in the Report
18 |
19 | The `Populate Cribl Stream Asset Lookup` Report can be run with only the events/metrics from the internal Cribl log/metric Sources, however; this can be inaccurate at times. The REST Collectors defined in the CriblVision for Splunk Pack are used to create events that contain accurate information about each Cribl Stream instance. The Pack defines the following templates that are used in this Report:
20 |
21 | * `cribl_instance_details`: Collects info on Leader/Single Cribl Stream instances (`channel=CriblInstanceDetails`)
22 | * `cribl_worker_details`: Collects info on Worker/Edge Nodes (`channel=CriblWorkerDetails`)
23 |
24 | The report handles mutliple ways of collecting information about Cribl Stream/Edge instances by using the following precedence:
25 |
26 | 1. **Collected Values**: The values collected from the `cribl_instance/worker_details` REST Collectors in the CriblVision for Splunk Pack.
27 | 2. **Lookup Values**: The values from the last time the `cribl_stream_assets` lookup was updated.
28 | 3. **Calculated Values**: The values calculated from the internal Cribl log/metric Sources.
29 |
30 | ## Manually Updating the Cribl Stream Asset Lookup
31 |
32 | The values in the `cribl_stream_assets` lookup take precedence over calculated values in the `Populate Cribl Stream Asset Lookup` Report, therefore; these can be manually updated override incorrectly calculated instance information.
33 |
34 | It is recommended to use the [Splunk App for Lookup File Editing](https://splunkbase.splunk.com/app/1724) to manually update the `cribl_stream_assets` lookup file.
35 |
36 | If updating the `instance_type` or `status` fields expect specific values for filtering in Alerts and Dashboards. If updating either, ensure they are set to on of the following:
37 |
38 | * `instance_type`:
39 | * `edge`
40 | * `leader`
41 | * `managed-edge`
42 | * `single`
43 | * `worker`
44 | * `status`:
45 | * `active`
46 | * `inactive`
47 | * `missing`
48 | * `shutdown`
49 |
50 | ## Setting Instances as Inactive
51 |
52 | Instances that are in either a `missing` or `shutdown` state will trigger Missing/Shutdown Asset Alerts if they are enabled. These will continue to trigger while in either state. However, if these entries are removed from the `cribl_stream_assets` lookup then Dashboards that require the `instance_type` field to be set will no longer display data for that instance.
53 |
54 | The `inactive` status is used when a instance is known to be missing or shutdown for valid reasons. This status will hide these instances from the `Cribl Stream Assets` Dashboard, but will still be visible in other Dashboards when running historical searches.
55 |
56 | There are two methods for updating a instance to be in the `inactive` state:
57 |
58 | 1. Manually update the `cribl_stream_assets` lookup (see the section above).
59 | 2. Use the `set to inactive` action via the `Cribl Stream Assets` Dashboard.
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/appserver/static/javascript/views/store_criblvision_properties.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | import * as Splunk from './splunk_helpers.js'
4 | import * as Config from './setup_configuration.js';
5 |
6 | const CRIBL_ENVIRONMENT_FIELD_NAME = 'set_cribl_environment_field_name';
7 | const CRIBL_INTERNAL_LOG_INDEX_MACRO = 'set_cribl_internal_log_index';
8 | const CRIBL_LOG_SOURCETYPE_MACRO = 'set_cribl_log_sourcetype';
9 | const CRIBL_METRICS_INDEX_MACRO = 'set_cribl_metrics_index';
10 | const CRIBL_METRICS_PREFIX = 'set_cribl_metrics_prefix';
11 | const POPULATE_CRIBL_STREAM_WORKER_LOOKUP_SAVEDSEARCH = 'Populate Cribl Stream Worker Lookup';
12 |
13 | function extract_macro_properties(setup_options){
14 | let {
15 | set_cribl_internal_log_index,
16 | set_cribl_log_sourcetype,
17 | set_cribl_metrics_index,
18 | set_cribl_metrics_prefix,
19 | set_cribl_environment_field_name
20 | } = setup_options;
21 |
22 | return {
23 | [CRIBL_ENVIRONMENT_FIELD_NAME]: {
24 | defaultValue: 'env',
25 | value: set_cribl_environment_field_name.trim(),
26 | definition: value => value,
27 | isRequired: false,
28 | },
29 | [CRIBL_INTERNAL_LOG_INDEX_MACRO]: {
30 | defaultValue: 'cribl_logs',
31 | value: set_cribl_internal_log_index.trim(),
32 | definition: index => `index=${index}`,
33 | isRequired: true
34 | },
35 | [CRIBL_LOG_SOURCETYPE_MACRO]: {
36 | value: set_cribl_log_sourcetype.trim(),
37 | definition: sourcetypes => `sourcetype IN (${sourcetypes})`,
38 | isRequired: true
39 | },
40 | [CRIBL_METRICS_INDEX_MACRO]: {
41 | defaultValue: 'cribl_metrics',
42 | value: set_cribl_metrics_index.trim(),
43 | definition: index => `index=${index}`,
44 | isRequired: true
45 | },
46 | [CRIBL_METRICS_PREFIX]: {
47 | defaultValue: 'cribl.logstream.',
48 | value: set_cribl_metrics_prefix.trim(),
49 | definition: prefix => `${prefix}$metric_name$`,
50 | args: ['metric_name'],
51 | isRequired: true
52 | }
53 | }
54 | }
55 |
56 | export async function perform(splunk_js_sdk, setup_options){
57 | const app_name = 'criblvision';
58 |
59 | const application_name_space = {
60 | owner: 'nobody',
61 | app: app_name,
62 | sharing: 'app',
63 | };
64 |
65 | try{
66 | const service = Config.create_splunk_js_sdk_service(splunk_js_sdk, application_name_space);
67 | const macros = extract_macro_properties(setup_options);
68 |
69 | // check that values have been provided for each macro
70 | Object.keys(macros).forEach(key => {
71 | if((macros[key].value === undefined || macros[key].value.length === 0)){
72 | if(macros[key].isRequired){
73 | throw new Error(`Please enter a value for the "${key}" macro.`);
74 | }
75 | else{
76 | macros[key].value = macros[key].defaultValue;
77 | }
78 | }
79 | });
80 |
81 | // configure the macros
82 | for(let key in macros){
83 | let stanza = key;
84 | let properties = {
85 | definition: macros[key].definition(macros[key].value),
86 | iseval: 0
87 | };
88 |
89 | // handle macros with arguments
90 | if(macros[key].args !== undefined){
91 | stanza = `${stanza}(${macros[key].args.length})`;
92 | properties.args = macros[key].args.reduce((accumulator, currentValue) => `${accumulator},${currentValue}`, '').substr(1);
93 | }
94 |
95 | await Splunk.update_configuration_file(service, 'macros', stanza, properties);
96 | }
97 |
98 | // configure the automatic lookup(s)
99 | const lookup_property = { 'LOOKUP-cribl_stream_workers': 'cribl_stream_assets host AS host OUTPUTNEW instance_type AS instance_type worker_group AS worker_group' };
100 | for(let sourcetype of macros[CRIBL_LOG_SOURCETYPE_MACRO].value.split(',')){
101 | await Splunk.update_configuration_file(service, 'props', sourcetype.trim(), lookup_property);
102 | }
103 |
104 | await Config.complete_setup(service);
105 | await Config.reload_splunk_app(service, app_name);
106 | Config.redirect_to_splunk_app_homepage(app_name);
107 | }catch(error){
108 | console.log(error);
109 | alert(error);
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/configuring_criblvision.md:
--------------------------------------------------------------------------------
1 | # Configuration
2 |
3 | This app and its associated dashboards rely on a few components which you must configure.
4 |
5 | *Note:* On your initial entry into the application after it has been installed, you should be prompted to navigate to the setup page which will guide you through configuring the required macros. Once the `Save Configuration` button is pressed, the macro definitions will be saved, and automatic lookups will be created off the back of the provided sourcetype(s) to automatically enrich events with the Worker Group of the Worker Node from which that event came. If you are wanting to manually configure these (or you run into errors executing the setup page), the following instructions can be followed. You may experience errors when deploying this app within a Search Head Cluster or when changing the name of the app from `criblvision` to something else. For information on deploying to a Search Head Cluster, refer to Splunk's documentation [here](https://docs.splunk.com/Documentation/Splunk/latest/DistSearch/PropagateSHCconfigurationchanges#Deploy_a_configuration_bundle).
6 |
7 | ## Macros
8 |
9 | This app ships with 4 macros which must be edited in accordance with your Splunk index naming schema. For more info on configuring macros, reference Splunk's documentation [here](https://docs.splunk.com/Documentation/SplunkCloud/latest/Knowledge/Definesearchmacros).
10 |
11 | |Macro Name|Macro Description|
12 | |----------|-----------------|
13 | |`set_cribl_internal_log_index`|Set this macro definition to the index you configured for your Cribl logs.|
14 | |`set_cribl_log_sourcetype`|Set this macro definition to the sourcetype you configured for Cribl logs.|
15 | |`set_cribl_metrics_index`|Set this macro definition to the index your configured for Cribl metrics.|
16 | |`set_cribl_metrics_prefix(1)`|Change the `cribl.logstream` value before `.$metric_name$` if you have changed the default namespace used for your Cribl metrics internal Source.|
17 | |`set_cribl_environment_field_name`|(Optional) Set this macro definition to the name of the field that specifies the environment a Cribl Stream instance belongs to.|
18 |
19 | For manual configuration of the macro definitions through the CLI, append and update the following to `$SPLUNK_HOME/etc/apps/criblvision/local/macros.conf` on standalone Search Heads or `$SPLUNK_HOME/etc/shcluster/apps/criblvision/local/macros.conf` on Search Head Deployers for a Search Head Cluster:
20 |
21 | ```conf
22 | [set_cribl_internal_log_index]
23 | definition = index=cribl_logs
24 |
25 | [set_cribl_log_sourcetype]
26 | definition = sourcetype IN (cribl)
27 |
28 | [set_cribl_metrics_index]
29 | definition = index=cribl_metrics
30 |
31 | [set_cribl_metrics_prefix(1)]
32 | definition = cribl.logstream.$metric_name$
33 |
34 | [set_cribl_environment_field_name]
35 | definition = env
36 | ```
37 |
38 | ## Automatic Lookups
39 |
40 | This app uses an automatic lookup to enrich events with the Worker Group (the `worker_group` field) on events from a Worker Node in a distributed environment. For more on automatic lookups, see Splunk's documentation [here](https://docs.splunk.com/Documentation/Splunk/latest/Knowledge/DefineanautomaticlookupinSplunkWeb).
41 |
42 | When configuring an automatic lookup from the Splunk UI, ensure that the following values are set before clicking the `Save` button:
43 |
44 | * **Lookup table**: `cribl_stream_workers`
45 | * **Apply to:** `sourcetype`
46 | * **named:** Your Cribl log sourcetype *Note:* You cannot use wildcards in this definition
47 | * **Lookup input fields:** `worker` = `host`
48 | * **Lookup output fields:** `worker_group` = `worker_group`
49 |
50 | For manual configuration of the automatic lookup definiton(s) through the CLI, append and update the following stanza to `$SPLUNK_HOME/etc/apps/criblvision/local/props.conf` on standalone Search Heads or `$SPLUNK_HOME/etc/shcluster/apps/criblvision/local/props.conf` on Search Head Deployers for a Search Head Cluster:
51 |
52 | ```conf
53 | [your_sourcetype]
54 | LOOKUP-cribl_stream_workers = cribl_stream_workers worker AS host OUTPUTNEW worker_group AS worker_group`
55 | ```
56 |
57 | If you are using multiple sourcetypes for your internal Cribl logs and would like to use a wildcarded props definition over configuring multiple automated lookups, you can use the following stanza template:
58 |
59 | ```conf
60 | [(?::){0}your_wildcarded_sourcetype]
61 | ```
62 |
63 | If configuring manually, update the `$SPLUNK_HOME/etc/apps/criblvision/local/app.conf` configuration file to tell Splunk that this app has been configured:
64 |
65 | ```conf
66 | [install]
67 | is_configured = true
68 | ```
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/data/ui/nav/default.xml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/readme/getting_data_in.md:
--------------------------------------------------------------------------------
1 | # Getting Data In
2 |
3 | The majority of the CriblVision for Splunk app can operate with just the Internal Cribl Stream/Edge logs and metrics, however; this can be extended through the use of the CriblVision for Splunk Pack that can be downloaded from the Cribl Packs Dispensery.
4 |
5 | It is recommended to use the HTTP Event Collector (HEC) protocol to forward events to Splunk from Cribl Stream. If using the Splunk 2 Splunk (S2S) protocol Destinations, you can view the Splunk S2S Troubleshooting dashboard to troubleshoot issues with potential dropped events or events experiencing subsecond timestamp issues.
6 |
7 | ## Internal Cribl Stream / Edge Logs and Metrics
8 |
9 | Cribl internal logs and metrics must be enabled and forwarded to Splunk in order for the majority of the panels to populate with events. There are a couple of options for collecting them.
10 |
11 | ### Interal Cribl Logs/Metrics Sources
12 |
13 | Refer to the documentation [here](https://docs.cribl.io/stream/sources-cribl-internal/#configuring-cribl-internal-logsmetrics-as-a-datasource) for instructions on configuring these Sources. Be sure to configure a corresponding index and sourcetype field for logs and a corresponding index for metrics.
14 |
15 | ### Cribl Stream API (Metrics Only)
16 |
17 | The Cribl Stream Leader API can return Worker metrics from the Leader Node via the `/api/v1/system/metrics/query` endpoint. For instructions on configuring this, see the documentation in the CriblVision for Splunk Pack.
18 |
19 | ## Cribl Stream Leader Logs
20 |
21 | Some of the views in this app will require Leader logs to be forwarded to Splunk. In distributed Cribl Stream environments, Leader logs are currently *NOT* sent via our internal Source. There are a couple of options for collecting them.
22 |
23 | ### Cribl Edge Node
24 |
25 | You can install a Cribl Edge Node on your Leader Node and configure local log collection via file monitor inputs.
26 |
27 | #### Leader Logs
28 |
29 | Configure the file monitor input to collect logs by configuring the search path to `/log` (where `` is what `$CRIBL_HOME` on your Leader Node evaluates to), depth to `0`, and setting the following filenames in the allowlist:
30 | * `access.log`
31 | * `audit.log`
32 | * `cribl.log`
33 | * `cribl_stderr.log`
34 | * `metrics.log`
35 | * `notifications.log`
36 | * `search_metrics.log`
37 | * `ui-access.log`
38 |
39 | The following template can be used as a base configuration:
40 |
41 | ```json
42 | {
43 | "id": "leader_logs",
44 | "disabled": false,
45 | "mode": "manual",
46 | "interval": 10,
47 | "filenames": [
48 | "access.log",
49 | "audit.log",
50 | "cribl.log",
51 | "cribl_stderr.log",
52 | "metrics.log",
53 | "notifications.log",
54 | "search_metrics.log",
55 | "ui-access.log"
56 | ],
57 | "type": "file",
58 | "path": "/log",
59 | "metadata": [
60 | {
61 | "name": "index",
62 | "value": "'cribl_logs'"
63 | },
64 | {
65 | "name": "sourcetype",
66 | "value": "'Cribl:InternalLogs'"
67 | }
68 | ],
69 | "depth": 0
70 | }
71 | ```
72 |
73 | #### Collector Job Logs
74 |
75 | Configure the file monitor input to collect Collector Job logs by configuring the search path to `/state/jobs` (where `` is what `$CRIBL_HOME` on your Leader Node evaluates to), and setting the following filenames in the allowlist:
76 | * `*.log`
77 |
78 | The following template can be used as a base configuration:
79 |
80 | ```json
81 | {
82 | "id": "collector_job_logs",
83 | "disabled": false,
84 | "mode": "manual",
85 | "interval": 10,
86 | "filenames": [
87 | "*.log",
88 | ],
89 | "type": "file",
90 | "path": "/state/jobs",
91 | "metadata": [
92 | {
93 | "name": "index",
94 | "value": "'cribl_logs'"
95 | },
96 | {
97 | "name": "sourcetype",
98 | "value": "'Cribl:InternalLogs'"
99 | }
100 | ]
101 | }
102 | ```
103 |
104 | For more information on how to deploy a Cribl Edge Node, please refer to our documentation [here](https://docs.cribl.io/edge/deploy-planning). If sending directly to Splunk from the Edge Node, it is recommended to use the HTTP Event Collector (HEC) protocol to forward events to Splunk.
105 |
106 | ### Cribl Stream Leader API
107 |
108 | The Cribl Stream Leader API can return logs from the Leader Node via the `/api/v1/system/logs` endpoint. For instructions on configuring this, see the documentation in the CriblVision for Splunk Pack.
109 |
110 | ## CriblVision for Splunk Pack Sources
111 |
112 | The following data sources are collected via REST Collectors and processed by the CriblVision for Splunk Pack:
113 | * `channel=CriblInstanceDetails`
114 | * `channel=CriblWorkerDetails`
115 | * `channel=git_commit_logs`
116 |
117 | See the documentation in the CriblVision for Splunk Pack for configuring these REST Collectors.
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/data/ui/views/auditlogs.xml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/macros.conf:
--------------------------------------------------------------------------------
1 | ### GENERAL MACROS ###
2 |
3 | [set_cribl_environment_field_name]
4 | definition = env
5 | iseval = 0
6 |
7 | [set_cribl_internal_log_index]
8 | definition = index=cribl_logs
9 | iseval = 0
10 |
11 | [set_cribl_log_sourcetype]
12 | definition = sourcetype IN (cribl)
13 | iseval = 0
14 |
15 | [set_cribl_metrics_index]
16 | definition = index=cribl_metrics
17 | iseval = 0
18 |
19 | [set_cribl_metrics_prefix(1)]
20 | args = metric_name
21 | definition = cribl.logstream.$metric_name$
22 | iseval = 0
23 |
24 | [cribl_stream_assets_lookup(1)]
25 | args = fields
26 | definition = lookup cribl_stream_assets guid AS host OUTPUTNEW host AS lookup_host\
27 | | eval host = if(lookup_host != "" OR isnotnull(lookup_host), lookup_host, host)\
28 | | lookup cribl_stream_assets host OUTPUTNEW $fields$\
29 | | fields - lookup_host
30 | iseval = 0
31 |
32 | [dashboard_cribl_environment_filter]
33 | definition = inputlookup cribl_stream_assets\
34 | | stats values(host) AS hosts BY environment\
35 | | eval hosts = "\"".mvjoin(hosts, "\", \"")."\""
36 | iseval = 0
37 |
38 | [dashboard_host_filter]
39 | definition = tstats count WHERE `set_cribl_internal_log_index` `set_cribl_log_sourcetype` BY host\
40 | | lookup cribl_stream_assets host AS host\
41 | | eval label = case(instance_type == "managed-edge", "Edge", instance_type == "worker", "Worker", instance_type == "leader", "Leader", instance_type == "single", "Single", true(), "").": ".host\
42 | | sort - label
43 | iseval = 0
44 |
45 | [dashboard_host_filter(1)]
46 | args = host_filter
47 | definition = `dashboard_host_filter`\
48 | | search $host_filter$
49 | iseval = 0
50 |
51 | [dashboard_worker_group_filter]
52 | definition = inputlookup cribl_stream_assets\
53 | | search instance_type IN ("managed-edge", "worker")\
54 | | stats count BY instance_type worker_group\
55 | | eval label = if(instance_type == "managed-edge", "Fleet", "Worker Group").": ".worker_group
56 | iseval = 0
57 |
58 | [dashboard_worker_group_filter(1)]
59 | args = worker_group_filter
60 | definition = inputlookup cribl_stream_assets\
61 | | search instance_type IN ("managed-edge", "worker") $worker_group_filter$\
62 | | stats count BY instance_type worker_group\
63 | | eval label = if(instance_type == "managed-edge", "Fleet", "Worker Group").": ".worker_group
64 | iseval = 0
65 |
66 | [get_environment_hosts(1)]
67 | args = environment
68 | definition = lookup cribl_stream_assets environment AS $environment$ OUTPUTNEW host AS hosts\
69 | | eval hosts = "\"".mvjoin(hosts, "\", \"")."\""
70 | iseval = 0
71 |
72 | [process_bytes(2)]
73 | args = bytes_field,bytes_unit
74 | definition = $bytes_field$ = $bytes_field$ / pow(1024, case(lower("$bytes_unit$") == "kb", 1, lower("$bytes_unit$") == "mb", 2, lower("$bytes_unit$") == "gb", 3, lower("$bytes_unit$") == "tb", 4, lower("$bytes_unit$") == "pb", 5, lower("$bytes_unit$") == "eb", 6, lower("$bytes_unit$") == "zb", 7, lower("$bytes_unit$") == "yb", 8, lower("$bytes_unit$") == "rb", 9, lower("$bytes_unit$") == "qb", 10, true(), 0))
75 | iseval = 0
76 | errormsg = The bytes unit "$bytes_unit$" is not valid
77 | validation = match($bytes_unit$, "(?i)[KMGTPEZYRQ]?B")
78 |
79 | [set_bytes_unit]
80 | definition = "GB"
81 | iseval = 0
82 |
83 | ### ALERT MACROS ###
84 |
85 | # general macros used across alerts
86 |
87 | [set_alert_ignored_destinations]
88 | definition = NOT output IN ("devnull:devnull")
89 | iseval = 0
90 |
91 | [set_alert_ignored_sources]
92 | definition = NOT input IN ("cribl:CriblLogs", "cribl:CriblMetrics", "datagen:*")
93 | iseval = 0
94 |
95 | # criblvision alert - blocked destinations
96 |
97 | [set_alert_threshold_blocked_destinations]
98 | definition = 0
99 | iseval = 0
100 |
101 | # criblvision alert - cpu usage over threshold
102 |
103 | [set_alert_unhealthy_cpu_usage_pct]
104 | definition = 75
105 | iseval = 0
106 |
107 | [set_alert_threshold_unhealthy_cpu_usage_pct]
108 | definition = 75
109 | iseval = 0
110 |
111 | # criblvision alert - cluster communication errors
112 |
113 | [set_alert_threshold_cluster_communication_errors]
114 | definition = 0
115 | iseval = 0
116 |
117 | # criblvision alert - destinations experiencing backpressure
118 |
119 | [set_alert_threshold_backpressure]
120 | definition = 0
121 | iseval = 0
122 |
123 | # criblvision alert - no input from sources
124 |
125 | [set_alert_threshold_no_source_thruput_pct]
126 | definition = 75
127 | iseval = 0
128 |
129 | # criblvision alert - no output from destinations
130 |
131 | [set_alert_threshold_no_destination_thruput_pct]
132 | definition = 75
133 | iseval = 0
134 |
135 | # criblvision alert - opened connections over threshold
136 |
137 | [set_alert_unhealthy_open_connections_count]
138 | definition = 200
139 | iseval = 0
140 |
141 | [set_alert_threshold_unhealthy_open_connections_pct]
142 | definition = 75
143 | iseval = 0
144 |
145 | # criblvision alert - persistent queue initialized
146 |
147 | [set_alert_threshold_pq_initialized_count]
148 | definition = 1
149 | iseval = 0
150 |
151 | # criblvision alert - rss memory usage within threshold
152 |
153 | [set_alert_lower_limit_unhealthy_memory_usage_mb]
154 | definition = 8192
155 | iseval = 0
156 |
157 | [set_alert_threshold_unhealthy_memory_usage_mb_pct]
158 | definition = 75
159 | iseval = 0
160 |
161 | [set_alert_upper_limit_unhealthy_memory_usage_mb]
162 | definition = 9216
163 | iseval = 0
164 |
165 | # criblvision alert - unhealthy destinations
166 |
167 | [set_alert_threshold_unhealthy_destinations_pct]
168 | definition = 75
169 | iseval = 0
170 |
171 | # criblvision alert - unhealthy sources
172 |
173 | [set_alert_threshold_unhealthy_sources_pct]
174 | definition = 75
175 | iseval = 0
176 |
177 | # criblvision alert - worker process restarted
178 |
179 | [set_alert_threshold_worker_process_restarts]
180 | definition = 0
181 | iseval = 0
182 |
183 | ### REPORT MACROS ###
184 |
185 | # populate cribl stream asset lookup
186 |
187 | [set_missing_asset_relative_time]
188 | definition = "-15m@m"
189 | iseval = 0
190 |
191 | [set_shutdown_activity_difference_secs]
192 | definition = 300
193 | iseval = 0
194 |
195 | [set_unknown_worker_group_value]
196 | definition = "n/a"
197 | iseval = 0
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/appserver/static/javascript/views/app.js:
--------------------------------------------------------------------------------
1 | import * as Setup from './store_criblvision_properties.js';
2 |
3 | define(['react', 'splunkjs/splunk'], function(react, splunk_js_sdk){
4 | const e = react.createElement;
5 |
6 | class CriblVisionSetupPage extends react.Component {
7 | constructor(props){
8 | super(props);
9 |
10 | this.state = {
11 | set_cribl_internal_log_index: 'cribl_logs',
12 | set_cribl_log_sourcetype: '',
13 | set_cribl_metrics_index: 'cribl_metrics',
14 | set_cribl_metrics_prefix: 'cribl.logstream.',
15 | set_cribl_environment_field_name: 'env'
16 | };
17 |
18 | this.handleChange = this.handleChange.bind(this);
19 | this.handleSubmit = this.handleSubmit.bind(this);
20 | }
21 |
22 | handleChange(event){
23 | this.setState({ ...this.state, [event.target.name]: event.target.value });
24 | }
25 |
26 | async handleSubmit(event){
27 | event.preventDefault();
28 |
29 | await Setup.perform(splunk_js_sdk, this.state);
30 | }
31 |
32 | render(){
33 | return e('div', { style: { textAlign: 'center' } }, [
34 | e('h1', null, 'CriblVision Setup'),
35 | e('hr', { style: { borderTop: '3px solid #bbb' } }),
36 | e('div', null, [
37 | e('h2', { style: { fontWeight: 'bold' } }, 'Logs and Metrics'),
38 | e('p', null, [
39 | 'Cribl internal logs and metrics must be enabled and forwarded to Splunk in order for all of the panels to populate with events. Refer to the documentation ',
40 | e('a', { href: 'https://docs.cribl.io/stream/sources-cribl-internal/#configuring-cribl-internal-logsmetrics-as-a-datasource', target: '_blank' }, 'here'),
41 | ' for instructions on configuring this source. Be sure to configure a corresponding index and sourcetype field for logs and a corresponding index for metrics.'
42 | ])
43 | ]),
44 | e('div', null, [
45 | e('h2', { style: { fontWeight: 'bold' } }, 'Macros'),
46 | e('p', null, 'This app ships with four macros which must be configured in accordance with your Splunk index naming schema:'),
47 | e('div', { style: { display: 'block' } }, e('form', { style: { display: 'table', marginLeft: 'auto', marginRight: 'auto' } }, [
48 | e('p', { style: { display: 'table-row' } }, [
49 | e('label', { style: { display: 'table-cell', textAlign: 'left', marginBottom: '1em', paddingRight: '1em' } }, 'Cribl Internal Log Splunk Index:'),
50 | e('input', { style: { display: 'table-cell', marginBottom: '1em' }, type: 'text', name: 'set_cribl_internal_log_index', value: this.state.set_cribl_internal_log_index, onChange: this.handleChange })
51 | ]),
52 | e('p', { style: { display: 'table-row' } }, [
53 | e('label', { style: { display: 'table-cell', textAlign: 'left', marginBottom: '1em', paddingRight: '1em' } }, 'Cribl Internal Log Sourcetype(s):'),
54 | e('input', { style: { display: 'table-cell', marginBottom: '1em' }, type: 'text', name: 'set_cribl_log_sourcetype', value: this.state.set_cribl_log_sourcetype, onChange: this.handleChange })
55 | ]),
56 | e('p', { style: { display: 'table-row' } }, [
57 | e('label', { style: { display: 'table-cell', textAlign: 'left', marginBottom: '1em', paddingRight: '1em' } }, 'Cribl Metrics Splunk Index:'),
58 | e('input', { type: 'text', name: 'set_cribl_metrics_index', value: this.state.set_cribl_metrics_index, onChange: this.handleChange })
59 | ]),
60 | e('p', { style: { display: 'table-row' } }, [
61 | e('label', { style: { display: 'table-cell', textAlign: 'left', marginBottom: '1em', paddingRight: '1em' } }, 'Cribl Metrics Prefix:'),
62 | e('input', { style: { display: 'table-cell', marginBottom: '1em' }, type: 'text', name: 'set_cribl_metrics_prefix', value: this.state.set_cribl_metrics_prefix, onChange: this.handleChange })
63 | ]),
64 | e('p', { style: { display: 'table-row' } }, [
65 | e('label', { style: { display: 'table-cell', textAlign: 'left', marginBottom: '1em', paddingRight: '1em' } }, 'Cribl Environment Field Name:'),
66 | e('input', { style: { display: 'table-cell', marginBottom: '1em' }, type: 'text', name: 'set_cribl_environment_field_name', value: this.state.set_cribl_environment_field_name, onChange: this.handleChange })
67 | ])
68 | ]))
69 | ]),
70 | e('div', null, [
71 | e('h2', { style: { fontWeight: 'bold' } }, 'Leader Logs'),
72 | e('p', null, [
73 | 'Some of the views in this app will require Leader logs to be forwarded onto Splunk. In distributed Cribl Stream environments, Leader logs are currently ',
74 | e('b', null, 'NOT'),
75 | ' sent via our internal source. You will need to install an Edge node on your Leader node and configure local log collection via a file monitoring input. Configure the file monitor input to collect logs by configuring the filename allow-list modal to ',
76 | e('code', null, '/opt/cribl/log/*.log'),
77 | '. For more information on how to deploy an Edge node, please refer to our documentation ',
78 | e('a', { href: 'https://docs.cribl.io/edge/deploy-planning', target: '_blank' }, 'here'),
79 | '. ',
80 | e('b', null, 'NOTE:'),
81 | ' When deploying the Edge Node to your Leader Node, we recommend having a separate Fleet just for this Node. Be sure to disable all other inputs on that Edge Node except for file monitor inputs.'
82 | ])
83 | ]),
84 | e('div', null, [
85 | e('h2', { style: { fontWeight: 'bold' } }, 'Cribl Stream Assets Lookup'),
86 | e('p', null, 'The CriblVision app utilizes a lookup to add context to its searches. This needs to be created to have the dashboards, searches, and reports work as expected. To create this lookup, either follow the instructions on the Welcome page after saving this configuration, or click the "Populate Cribl Stream Asset Lookup" item in the navigation bar.')
87 | ]),
88 | e('hr', { style: { borderTop: '3px solid #bbb' } }),
89 | e('button', { className: 'btn btn-primary', onClick: this.handleSubmit }, 'Save Configuration')
90 | ]);
91 | }
92 | }
93 |
94 | return e(CriblVisionSetupPage);
95 | });
96 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/appserver/static/javascript/views/splunk_helpers.js:
--------------------------------------------------------------------------------
1 | import { promisify } from './util.js'
2 |
3 | // ----------------------------------
4 | // Splunk JS SDK Helpers
5 | // ----------------------------------
6 | // ---------------------
7 | // Process Helpers
8 | // ---------------------
9 | async function update_configuration_file(
10 | splunk_js_sdk_service,
11 | configuration_file_name,
12 | stanza_name,
13 | properties,
14 | ) {
15 | // Retrieve the accessor used to get a configuration file
16 | var splunk_js_sdk_service_configurations = splunk_js_sdk_service.configurations(
17 | {
18 | owner: 'nobody',
19 | app: 'criblvision',
20 | sharing: 'app',
21 | }
22 | );
23 | splunk_js_sdk_service_configurations = await promisify(splunk_js_sdk_service_configurations.fetch)();
24 |
25 | // Check for the existence of the configuration file
26 | var configuration_file_exist = does_configuration_file_exist(
27 | splunk_js_sdk_service_configurations,
28 | configuration_file_name,
29 | );
30 |
31 | // If the configuration file doesn't exist, create it
32 | if (!configuration_file_exist) {
33 | await create_configuration_file(
34 | splunk_js_sdk_service_configurations,
35 | configuration_file_name,
36 | );
37 |
38 | // BUG WORKAROUND: re-fetch because the client doesn't do so
39 | splunk_js_sdk_service_configurations = await promisify(splunk_js_sdk_service_configurations.fetch)();
40 | }
41 |
42 | // Retrieves the configuration file accessor
43 | var configuration_file_accessor = get_configuration_file(
44 | splunk_js_sdk_service_configurations,
45 | configuration_file_name,
46 | );
47 | configuration_file_accessor = await promisify(configuration_file_accessor.fetch)();
48 |
49 | // Checks to see if the stanza where the inputs will be
50 | // stored exist
51 | var stanza_exist = does_stanza_exist(
52 | configuration_file_accessor,
53 | stanza_name,
54 | );
55 |
56 | // If the configuration stanza doesn't exist, create it
57 | if (!stanza_exist) {
58 | await create_stanza(configuration_file_accessor, stanza_name);
59 | }
60 | // Need to update the information after the creation of the stanza
61 | configuration_file_accessor = await promisify(configuration_file_accessor.fetch)();
62 |
63 | // Retrieves the configuration stanza accessor
64 | var configuration_stanza_accessor = get_configuration_file_stanza(
65 | configuration_file_accessor,
66 | stanza_name,
67 | );
68 | configuration_stanza_accessor = await promisify(configuration_stanza_accessor.fetch)();
69 |
70 | // We don't care if the stanza property does or doesn't exist
71 | // This is because we can use the
72 | // configurationStanza.update() function to create and
73 | // change the information of a property
74 | await update_stanza_properties(
75 | configuration_stanza_accessor,
76 | properties,
77 | );
78 | };
79 |
80 | function create_configuration_file(
81 | configurations_accessor,
82 | configuration_file_name,
83 | ) {
84 | return promisify(configurations_accessor.create)(configuration_file_name);
85 | };
86 |
87 | // ---------------------
88 | // Existence Functions
89 | // ---------------------
90 | function does_configuration_file_exist(
91 | configurations_accessor,
92 | configuration_file_name,
93 | ) {
94 | var was_configuration_file_found = false;
95 |
96 | var configuration_files_found = configurations_accessor.list();
97 | for (var index = 0; index < configuration_files_found.length; index++) {
98 | var configuration_file_name_found =
99 | configuration_files_found[index].name;
100 | if (configuration_file_name_found === configuration_file_name) {
101 | was_configuration_file_found = true;
102 | break;
103 | }
104 | }
105 |
106 | return was_configuration_file_found;
107 | };
108 |
109 | function does_stanza_exist(
110 | configuration_file_accessor,
111 | stanza_name,
112 | ) {
113 | var was_stanza_found = false;
114 |
115 | var stanzas_found = configuration_file_accessor.list();
116 | for (var index = 0; index < stanzas_found.length; index++) {
117 | var stanza_found = stanzas_found[index].name;
118 | if (stanza_found === stanza_name) {
119 | was_stanza_found = true;
120 | break;
121 | }
122 | }
123 |
124 | return was_stanza_found;
125 | };
126 |
127 | function does_stanza_property_exist(
128 | configuration_stanza_accessor,
129 | property_name,
130 | ) {
131 | var was_property_found = false;
132 |
133 | for (const [key, value] of Object.entries(
134 | configuration_stanza_accessor.properties(),
135 | )) {
136 | if (key === property_name) {
137 | was_property_found = true;
138 | break;
139 | }
140 | }
141 |
142 | return was_property_found;
143 | };
144 |
145 | // ---------------------
146 | // Retrieval Functions
147 | // ---------------------
148 | function get_configuration_file(
149 | configurations_accessor,
150 | configuration_file_name,
151 | ) {
152 | var configuration_file_accessor = configurations_accessor.item(
153 | configuration_file_name,
154 | {
155 | // Name space information not provided
156 | },
157 | );
158 |
159 | return configuration_file_accessor;
160 | };
161 |
162 | function get_configuration_file_stanza(
163 | configuration_file_accessor,
164 | configuration_stanza_name,
165 | ) {
166 | var configuration_stanza_accessor = configuration_file_accessor.item(
167 | configuration_stanza_name,
168 | {
169 | // Name space information not provided
170 | },
171 | );
172 |
173 | return configuration_stanza_accessor;
174 | };
175 |
176 | function get_configuration_file_stanza_property(
177 | configuration_file_accessor,
178 | configuration_file_name,
179 | ) {
180 | return null;
181 | };
182 |
183 | function create_stanza(
184 | configuration_file_accessor,
185 | new_stanza_name,
186 | ) {
187 | return promisify(configuration_file_accessor.create)(new_stanza_name);
188 | };
189 |
190 | function update_stanza_properties(
191 | configuration_stanza_accessor,
192 | new_stanza_properties,
193 | ) {
194 | return promisify(configuration_stanza_accessor.update)(new_stanza_properties);
195 | };
196 |
197 | export {
198 | update_configuration_file,
199 | }
200 |
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/README.md:
--------------------------------------------------------------------------------
1 | # Caution: These dashboards are not current. The latest version of these dashboards can be found in the CriblVision Search pack here: https://packs.cribl.io/packs/cribl-criblvision-for-stream There is no need to follow the instructions in this readme in order to use the search pack.
2 |
3 | Please use the search pack instead of these dashboards.
4 |
5 |
6 | # Welcome to CriblVision for CriblSearch.
7 |
8 | Born from the same minds that brought you the Criblvision app for Splunk. You asked and we delivered!
9 |
10 | This Collection of Cribl Search dashboards and alerts was designed as a troubleshooting tool and monitoring aid for Cribl administrators. It was created by Cribl Professional Services to help customers troubleshoot and monitor their own Cribl Cloud Hybrid deployments. There are several troubleshooting dashboards tailored to certain product areas in which we have seen the highest number of recurring issues. While we intend to help you troubleshoot your own Cribl deployment, this collection will always be a continual "work in progress". It should always be used with the Cribl Monitoring Console and associated views.
11 |
12 | Included in this collection are the following:
13 |
14 | ## **Dashboards**
15 |
16 | **Health Check Dashboard** - An overview of the health of your deployment with the most common items you should be monitoring.
17 |
18 |
19 |
20 | **Log statistics** - A dashboard designed to help you easily drill down into log messages generated by your worker nodes and surface any trends that will aid in troubleshooting issues.
21 |
22 |
23 |
24 | **Volume Metrics** - The most requested dashboard for Cribl Search. Now you can easily see how much data is flowing through your deployment by route and pipeline along with reduction amounts and percentages for each.
25 |
26 |
27 |
28 | **Thruput Introspection** - Easily drill down into throughput metrics for your sources and destinations and correlate any spikes or dips with CPU usage.
29 |
30 |
31 |
32 | ## **Alerts**
33 |
34 |
35 |
36 | Another highly requested feature by the community was to have pre-configured alerts available to alert and notify you automatically about the health of your deployment. Included alerts are below:
37 |
38 |
39 |
40 | CriblVision Alert - CPU Usage Over Threshold
41 |
42 | CriblVision Alert - RSS Memory Usage
43 |
44 | CriblVision Alert - Worker Process Restarted
45 |
46 | CriblVision Alert - Destination Persistent Queue Initialized
47 |
48 | CriblVision Alert - Unhealthy Destinations
49 |
50 | CriblVision Alert - Unhealthy Sources
51 |
52 | CriblVision Alert - Cluster Communication Errors
53 |
54 |
55 |
56 | # Getting started
57 |
58 |
59 |
60 | ## **Installation instructions**
61 |
62 | ## Requirements
63 |
64 | This is intended to be used with Cribl Cloud's product suite. You will need Owner/Admin permissions in your cribl cloud deployment and access to Cribl Search, Stream, and Lake. This is currently only intended for use with hybrid worker groups. Support for other types will be coming soon.
65 |
66 | ### Configure Data Lake destinations
67 |
68 |
69 |
70 | Navigate to your hybrid worker group's destinations and click on Data Lakes -> Cribl Lake
71 |
72 | You will create 2 Cribl-lake destinations with the following names
73 |
74 | -cribl_lake_logs -> sending to the "default_logs" data set
75 |
76 | -cribl_lake_metrics -> sending to the "default_metrics" data set
77 |
78 |
79 |
80 | ### Configure internal metrics and sources
81 |
82 | -Navigate to your hybrid worker group's sources and enable both Cribl internal logs and metrics
83 | -Navigate to your Cribl Internal logs source, click on fields, and add a new field with a name of group and a value that exactly matches your worker group's name.
84 |
85 | ### Configure routes
86 |
87 | -Configure 2 routes, one will send data from the Cribl internal logs source, via a pass-thru pipeline to the cribl_lakes_logs destination. The other will send data from the cribl_internal metrics source via a passthru pipeline to the cribl_lake_metrics destination
88 |
89 |
90 |
91 |
92 |
93 | ### Configure search Macros
94 |
95 |
96 |
97 | In Cribl Search, navigate to settings on the bottom left.
98 |
99 | Click on Macros -> Add two macros with the following names and definitions
100 |
101 |
102 | | Name |Definition |
103 | |--|--|
104 | | set_cribl_metrics_dataset | dataset="default_metrics" |
105 | |set_cribl_logs_dataset|dataset="default_logs"|
106 |
107 |
108 |
109 | ### Populate Environment Assets lookup
110 |
111 |
112 |
113 | The dropdowns in these worker groups rely on lookup called environment_assets which you will have to create. Follow the instructions below.
114 |
115 |
116 |
117 | Navigate to search home and run the following search for the last hour:
118 |
119 |
120 |
121 | dataset="default_metrics" | distinct group, host
122 |
123 |
124 |
125 | Export the search results as a CSV. Once the CSV has been downloaded, rename it to envrionment_assets.csv
126 |
127 |
128 |
129 | Navigate to Data -> lookups -> add lookup file -> upload file and select the CSV that you just renamed.
130 |
131 |
132 |
133 |
134 |
135 | ### Installing Dashboards and Alerts
136 | **Dashboards**
137 |
138 | Inside of this repo, you will see two folders: dashboards and alerts.
139 |
140 | The dashboard folder contains JSON files for each of the dashboards. For each of these JSON files you will have to do the following:
141 |
142 |
143 |
144 | Navigate to Dashboards -> Click on Add Dashboard -> Name the dashboard with the same name as the JSON file -> click save -> Click on the ellipses on the upper right of the created dashboard and click "edit as JSON" -> click import and navigate to the JSON file -> click save and exit -> reload the dashboard once more for all changes to take effect.
145 |
146 |
147 |
148 | Do this for every single JSON file in the dashboards directory.
149 |
150 |
151 |
152 | **Alerts**
153 |
154 |
155 |
156 | The Alerts folder of this repo contains .md files with the actual search query. For each of these files you will have to manually create each saved search.
157 |
158 |
159 |
160 | Navigate to Search Home -> click Saved -> Add Search -> In the new Search Modal paste the search query from the text file into the search string box. Copy the name of the .md file in the name of the new search. Click Save.
161 |
162 |
163 |
164 | Inside each alert file, you will find further instructions for setting up the search to run a schedule.
165 |
166 |
167 | # Acknowledgements
168 |
169 | **Author**: Johan Woger - Senior Professional Services Engineer - jwoger@cribl.io
170 |
171 | **Contributors**
172 | Jon Rust (Cribl)
173 | David Sheridan (GKC)
174 | Jeremy Prescott (Cribl)
175 | Andrew Duca (Cribl)
176 | Eugene Katz (Cribl)
177 | Christopher Owens (Cribl)
178 | David Maislin (Cribl)
179 |
180 | ## License
181 |
182 | This project is licensed under the GNU General Public License (GPL). You are free to use, modify, and distribute this software under the terms of the license.
183 |
184 |
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/data/ui/views/job_inspector.xml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/dashboards/CriblVision - Log Statistics.json:
--------------------------------------------------------------------------------
1 | {
2 | "elements": [
3 | {
4 | "config": {
5 | "defaultValue": "*",
6 | "field": "group"
7 | },
8 | "search": {
9 | "type": "inline",
10 | "query": "dataset=\"$vt_lookups\" lookupFile=\"environment_assets\" | summarize count() by group | sort by group desc",
11 | "earliest": 0,
12 | "latest": "now"
13 | },
14 | "id": "rkljaygth",
15 | "inputId": "wg",
16 | "type": "input.dropdown",
17 | "layout": {
18 | "x": 3,
19 | "y": 0,
20 | "w": 3,
21 | "h": 2
22 | },
23 | "title": "Worker Group"
24 | },
25 | {
26 | "config": {
27 | "field": "host",
28 | "defaultValue": "*"
29 | },
30 | "search": {
31 | "type": "inline",
32 | "query": "dataset=\"$vt_lookups\" lookupFile=\"environment_assets\" | where group == \"$wg\" | summarize count() by host | sort by host desc",
33 | "earliest": 0,
34 | "latest": "now"
35 | },
36 | "id": "cpcyglxxn",
37 | "inputId": "wn",
38 | "type": "input.dropdown",
39 | "layout": {
40 | "x": 6,
41 | "y": 0,
42 | "w": 3,
43 | "h": 2
44 | },
45 | "title": "Worker Node"
46 | },
47 | {
48 | "config": {
49 | "defaultValue": {
50 | "earliest": "-1h",
51 | "latest": "now",
52 | "timezone": "local"
53 | }
54 | },
55 | "id": "ypqobwwf6",
56 | "inputId": "timepicker",
57 | "type": "input.timerange",
58 | "layout": {
59 | "x": 0,
60 | "y": 0,
61 | "w": 3,
62 | "h": 2
63 | },
64 | "title": "Timerange"
65 | },
66 | {
67 | "config": {
68 | "onClickAction": {
69 | "type": "Run a new search",
70 | "search": "dataset=\"diag_cribl_log\" case=\"$case\" diagfile=\"$input_2\" level=\"error\" channel=\"$value$\""
71 | },
72 | "showRowNumbers": false,
73 | "colorPalette": 0,
74 | "colorPaletteReversed": false,
75 | "data": {
76 | "connectNulls": "Leave gaps",
77 | "stack": false
78 | },
79 | "xAxis": {
80 | "labelOrientation": 0,
81 | "position": "Bottom"
82 | },
83 | "yAxis": {
84 | "position": "Left",
85 | "scale": "Linear",
86 | "splitLine": true
87 | },
88 | "legend": {
89 | "position": "Right",
90 | "truncate": true
91 | },
92 | "horizontalChart": false,
93 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"error\" | summarize cnt=count() by channel | sort by cnt desc | render table",
94 | "axis": {}
95 | },
96 | "search": {
97 | "type": "inline",
98 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"error\" | summarize cnt=count() by channel | sort by cnt desc | render table",
99 | "earliest": "$timepicker.earliest",
100 | "latest": "$timepicker.latest",
101 | "timezone": "$timepicker.timezone"
102 | },
103 | "horizontalChart": false,
104 | "id": "9y6hylgv0",
105 | "type": "list.table",
106 | "layout": {
107 | "x": 0,
108 | "y": 0,
109 | "w": 4,
110 | "h": 3
111 | },
112 | "title": "Error counts by channel"
113 | },
114 | {
115 | "config": {
116 | "onClickAction": {
117 | "type": "Run a new search",
118 | "search": "dataset=\"diag_cribl_log\" case=\"$case\" diagfile=\"$input_2\" level=\"error\" channel=\"$value$\""
119 | },
120 | "showRowNumbers": false,
121 | "colorPalette": 0,
122 | "colorPaletteReversed": false,
123 | "data": {
124 | "connectNulls": "Leave gaps",
125 | "stack": false
126 | },
127 | "xAxis": {
128 | "labelOrientation": 0,
129 | "position": "Bottom"
130 | },
131 | "yAxis": {
132 | "position": "Left",
133 | "scale": "Linear",
134 | "splitLine": true
135 | },
136 | "legend": {
137 | "position": "Right",
138 | "truncate": true
139 | },
140 | "horizontalChart": false,
141 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"warn\" | summarize cnt=count() by channel |sort by cnt desc | render table",
142 | "axis": {}
143 | },
144 | "search": {
145 | "type": "inline",
146 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"warn\" | summarize cnt=count() by channel |sort by cnt desc | render table",
147 | "earliest": "$timepicker.earliest",
148 | "latest": "$timepicker.latest",
149 | "timezone": "$timepicker.timezone"
150 | },
151 | "horizontalChart": false,
152 | "id": "632i8leyf",
153 | "type": "list.table",
154 | "layout": {
155 | "x": 4,
156 | "y": 0,
157 | "w": 4,
158 | "h": 3
159 | },
160 | "title": "Warn counts by Channel"
161 | },
162 | {
163 | "config": {
164 | "onClickAction": {
165 | "type": "Run a new search",
166 | "search": "dataset=\"diag_cribl_log\" case=\"$case\" diagfile=\"$input_2\" level=\"error\" channel=\"$value$\""
167 | },
168 | "showRowNumbers": false,
169 | "horizontalChart": false,
170 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"info\" | summarize cnt=count() by channel |sort by cnt desc | render table",
171 | "axis": {}
172 | },
173 | "search": {
174 | "type": "inline",
175 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"info\" | summarize cnt=count() by channel |sort by cnt desc | render table",
176 | "earliest": "$timepicker.earliest",
177 | "latest": "$timepicker.latest",
178 | "timezone": "$timepicker.timezone"
179 | },
180 | "horizontalChart": false,
181 | "id": "caqjp1wim",
182 | "type": "list.table",
183 | "layout": {
184 | "x": 8,
185 | "y": 0,
186 | "w": 4,
187 | "h": 3
188 | },
189 | "title": "Info counts by channel"
190 | },
191 | {
192 | "config": {
193 | "colorPalette": 0,
194 | "colorPaletteReversed": false,
195 | "customData": {
196 | "trellis": false,
197 | "connectNulls": "Leave gaps",
198 | "stack": false,
199 | "dataFields": [],
200 | "seriesCount": 11
201 | },
202 | "xAxis": {
203 | "labelOrientation": 0,
204 | "position": "Bottom"
205 | },
206 | "yAxis": {
207 | "position": "Left",
208 | "scale": "Linear",
209 | "splitLine": true
210 | },
211 | "legend": {
212 | "position": "Right",
213 | "truncate": false
214 | },
215 | "onClickAction": {
216 | "type": "Run a new search",
217 | "search": "dataset=\"diag_cribl_log\" case=\"$case\" diagfile=\"$input_2\" level=\"error\" channel=\"$value$\""
218 | },
219 | "data": {
220 | "connectNulls": "Leave gaps",
221 | "stack": false
222 | },
223 | "timestats": true,
224 | "horizontalChart": false,
225 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"error\" | timestats span=10m count() by channel",
226 | "axis": {}
227 | },
228 | "search": {
229 | "type": "inline",
230 | "query": "${set_cribl_logs_dataset} host=\"$wn\" level=\"error\" | timestats span=10m count() by channel",
231 | "earliest": "$timepicker.earliest",
232 | "latest": "$timepicker.latest",
233 | "timezone": "$timepicker.timezone"
234 | },
235 | "horizontalChart": false,
236 | "id": "t65f3ize8",
237 | "type": "chart.line",
238 | "layout": {
239 | "x": 0,
240 | "y": 3,
241 | "w": 12,
242 | "h": 4
243 | },
244 | "title": "Error count by channel"
245 | }
246 | ]
247 | }
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/default/data/ui/views/log_viewer.xml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/criblvision-for-criblsearch/dashboards/CriblVision - Thruput Introspection.json:
--------------------------------------------------------------------------------
1 | {
2 | "elements": [
3 | {
4 | "config": {
5 | "defaultValue": "*",
6 | "field": "group"
7 | },
8 | "search": {
9 | "type": "inline",
10 | "query": "dataset=\"$vt_lookups\" lookupFile=\"environment_assets\" | summarize count() by group | sort by group desc",
11 | "earliest": 0,
12 | "latest": "now"
13 | },
14 | "id": "rkljaygth",
15 | "inputId": "wg",
16 | "type": "input.dropdown",
17 | "layout": {
18 | "x": 3,
19 | "y": 0,
20 | "w": 3,
21 | "h": 2
22 | },
23 | "title": "Worker Group"
24 | },
25 | {
26 | "config": {
27 | "field": "host",
28 | "defaultValue": "*"
29 | },
30 | "search": {
31 | "type": "inline",
32 | "query": "dataset=\"$vt_lookups\" lookupFile=\"environment_assets\" | where group == \"$wg\" | summarize count() by host | sort by host desc",
33 | "earliest": 0,
34 | "latest": "now"
35 | },
36 | "id": "cpcyglxxn",
37 | "inputId": "wn",
38 | "type": "input.dropdown",
39 | "layout": {
40 | "x": 6,
41 | "y": 0,
42 | "w": 3,
43 | "h": 2
44 | },
45 | "title": "Worker Node"
46 | },
47 | {
48 | "config": {
49 | "defaultValue": {
50 | "earliest": "-1h",
51 | "latest": "now",
52 | "timezone": "local"
53 | }
54 | },
55 | "id": "ypqobwwf6",
56 | "inputId": "timepicker",
57 | "type": "input.timerange",
58 | "layout": {
59 | "x": 0,
60 | "y": 0,
61 | "w": 3,
62 | "h": 2
63 | },
64 | "title": "Timerange"
65 | },
66 | {
67 | "config": {
68 | "values": [
69 | "1m",
70 | "5m",
71 | "15m",
72 | "1h",
73 | "4h",
74 | "1d"
75 | ],
76 | "axis": {},
77 | "defaultValue": "5m"
78 | },
79 | "search": {
80 | "type": "values",
81 | "values": [
82 | "1m",
83 | "5m",
84 | "15m",
85 | "1h",
86 | "4h",
87 | "1d"
88 | ]
89 | },
90 | "id": "bxqzd532j",
91 | "inputId": "span",
92 | "type": "input.dropdown",
93 | "layout": {
94 | "x": 0,
95 | "y": 2,
96 | "w": 3,
97 | "h": 2
98 | },
99 | "title": "TimeStats Bucket Span"
100 | },
101 | {
102 | "config": {
103 | "shouldApplyUserChartSettings": false,
104 | "series": [
105 | {
106 | "yAxisField": "cribl_http:Cloud_default_group",
107 | "name": "cribl_http:Cloud_default_group",
108 | "color": {
109 | "paletteIndex": 0,
110 | "colorIndex": 0
111 | }
112 | }
113 | ],
114 | "legend": {
115 | "truncate": true
116 | },
117 | "timestats": true,
118 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.out_events\" | timestats span=$span sum(_value) by output",
119 | "axis": {}
120 | },
121 | "search": {
122 | "type": "inline",
123 | "earliest": "$timepicker.earliest",
124 | "latest": "$timepicker.latest",
125 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.out_events\" | timestats span=$span sum(_value) by output",
126 | "timezone": "$timepicker.timezone"
127 | },
128 | "id": "7tff87y67",
129 | "type": "chart.line",
130 | "layout": {
131 | "x": 0,
132 | "y": 0,
133 | "w": 12,
134 | "h": 4
135 | },
136 | "title": "Out events"
137 | },
138 | {
139 | "config": {
140 | "shouldApplyUserChartSettings": false,
141 | "series": [
142 | {
143 | "yAxisField": "cribl_http:Cloud_default_group",
144 | "name": "cribl_http:Cloud_default_group",
145 | "color": {
146 | "paletteIndex": 0,
147 | "colorIndex": 0
148 | }
149 | }
150 | ],
151 | "legend": {
152 | "truncate": true
153 | },
154 | "timestats": true,
155 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.out_bytes\" | timestats span=$span sum(_value) by output",
156 | "axis": {}
157 | },
158 | "search": {
159 | "type": "inline",
160 | "earliest": "$timepicker.earliest",
161 | "latest": "$timepicker.latest",
162 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.out_bytes\" | timestats span=$span sum(_value) by output",
163 | "timezone": "$timepicker.timezone"
164 | },
165 | "id": "ddk86zjcl",
166 | "type": "chart.line",
167 | "layout": {
168 | "x": 0,
169 | "y": 4,
170 | "w": 12,
171 | "h": 4
172 | },
173 | "title": "Bytes out"
174 | },
175 | {
176 | "config": {
177 | "customData": {
178 | "stack": true,
179 | "seriesCount": 10
180 | },
181 | "series": [
182 | {
183 | "yAxisField": "w1",
184 | "name": "w1",
185 | "color": {
186 | "paletteIndex": 0,
187 | "colorIndex": 0
188 | }
189 | },
190 | {
191 | "yAxisField": "w7",
192 | "name": "w7",
193 | "color": {
194 | "paletteIndex": 0,
195 | "colorIndex": 1
196 | }
197 | },
198 | {
199 | "yAxisField": "w8",
200 | "name": "w8",
201 | "color": {
202 | "paletteIndex": 0,
203 | "colorIndex": 2
204 | }
205 | },
206 | {
207 | "yAxisField": "w0",
208 | "name": "w0",
209 | "color": {
210 | "paletteIndex": 0,
211 | "colorIndex": 3
212 | }
213 | },
214 | {
215 | "yAxisField": "w9",
216 | "name": "w9",
217 | "color": {
218 | "paletteIndex": 0,
219 | "colorIndex": 4
220 | }
221 | },
222 | {
223 | "yAxisField": "w3",
224 | "name": "w3",
225 | "color": {
226 | "paletteIndex": 0,
227 | "colorIndex": 5
228 | }
229 | },
230 | {
231 | "yAxisField": "w5",
232 | "name": "w5",
233 | "color": {
234 | "paletteIndex": 0,
235 | "colorIndex": 6
236 | }
237 | },
238 | {
239 | "yAxisField": "w2",
240 | "name": "w2",
241 | "color": {
242 | "paletteIndex": 0,
243 | "colorIndex": 7
244 | }
245 | },
246 | {
247 | "yAxisField": "w6",
248 | "name": "w6",
249 | "color": {
250 | "paletteIndex": 0,
251 | "colorIndex": 8
252 | }
253 | },
254 | {
255 | "yAxisField": "w4",
256 | "name": "w4",
257 | "color": {
258 | "paletteIndex": 0,
259 | "colorIndex": 9
260 | }
261 | }
262 | ],
263 | "legend": {
264 | "truncate": true
265 | },
266 | "timestats": true,
267 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.system.cpu_perc\" | timestats span=$span max(_value) by cribl_wp",
268 | "axis": {}
269 | },
270 | "search": {
271 | "type": "inline",
272 | "earliest": "$timepicker.earliest",
273 | "latest": "$timepicker.latest",
274 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.system.cpu_perc\" | timestats span=$span max(_value) by cribl_wp",
275 | "timezone": "$timepicker.timezone"
276 | },
277 | "id": "u5zy9c3em",
278 | "type": "chart.column",
279 | "layout": {
280 | "x": 0,
281 | "y": 8,
282 | "w": 12,
283 | "h": 4
284 | },
285 | "title": "Cpu usage by worker process"
286 | },
287 | {
288 | "config": {
289 | "series": [
290 | {
291 | "yAxisField": "cribl:CriblLogs",
292 | "name": "cribl:CriblLogs",
293 | "color": {
294 | "paletteIndex": 0,
295 | "colorIndex": 0
296 | }
297 | },
298 | {
299 | "yAxisField": "cribl:CriblMetrics",
300 | "name": "cribl:CriblMetrics",
301 | "color": {
302 | "paletteIndex": 0,
303 | "colorIndex": 1
304 | }
305 | }
306 | ],
307 | "legend": {
308 | "truncate": true
309 | },
310 | "timestats": true,
311 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.in_events\" | timestats span=$span sum(_value) by input",
312 | "axis": {}
313 | },
314 | "search": {
315 | "type": "inline",
316 | "earliest": "$timepicker.earliest",
317 | "latest": "$timepicker.latest",
318 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.in_events\" | timestats span=$span sum(_value) by input",
319 | "timezone": "$timepicker.timezone"
320 | },
321 | "id": "0ymfi2lit",
322 | "type": "chart.line",
323 | "layout": {
324 | "x": 0,
325 | "y": 12,
326 | "w": 12,
327 | "h": 4
328 | },
329 | "title": "Total events in"
330 | },
331 | {
332 | "config": {
333 | "shouldApplyUserChartSettings": false,
334 | "series": [],
335 | "timestats": true,
336 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.in_bytes\" | timestats span=$span sum(_value) by input",
337 | "axis": {}
338 | },
339 | "search": {
340 | "type": "inline",
341 | "earliest": "$timepicker.earliest",
342 | "latest": "$timepicker.latest",
343 | "query": "dataset=\"default_metrics\" group=\"$wg\" host=\"$wn\" _metric=\"cribl.logstream.total.in_bytes\" | timestats span=$span sum(_value) by input",
344 | "timezone": "$timepicker.timezone"
345 | },
346 | "id": "2a7r2mnwi",
347 | "type": "chart.column",
348 | "layout": {
349 | "x": 0,
350 | "y": 16,
351 | "w": 12,
352 | "h": 4
353 | },
354 | "title": "In bytes"
355 | }
356 | ]
357 | }
--------------------------------------------------------------------------------
/criblvision-for-splunk/criblvision/appserver/static/javascript/vendor/react.production.min.js:
--------------------------------------------------------------------------------
1 | /** @license React v16.13.1
2 | * react.production.min.js
3 | *
4 | * Copyright (c) Facebook, Inc. and its affiliates.
5 | *
6 | * This source code is licensed under the MIT license found in the
7 | * LICENSE file in the root directory of this source tree.
8 | */
9 | 'use strict';(function(d,r){"object"===typeof exports&&"undefined"!==typeof module?r(exports):"function"===typeof define&&define.amd?define(["exports"],r):(d=d||self,r(d.React={}))})(this,function(d){function r(a){for(var b="https://reactjs.org/docs/error-decoder.html?invariant="+a,c=1;cC.length&&C.push(a)}function O(a,b,c,g){var e=typeof a;if("undefined"===e||"boolean"===e)a=null;var d=!1;if(null===a)d=!0;else switch(e){case "string":case "number":d=!0;break;case "object":switch(a.$$typeof){case x:case xa:d=!0}}if(d)return c(g,a,""===b?"."+P(a,0):b),1;d=0;b=""===b?".":b+":";if(Array.isArray(a))for(var f=0;f>>1,e=a[g];if(void 0!==
15 | e&&0D(f,c))void 0!==k&&0>D(k,f)?(a[g]=k,a[h]=c,g=h):(a[g]=f,a[d]=c,g=d);else if(void 0!==k&&0>D(k,c))a[g]=k,a[h]=c,g=h;else break a}}return b}return null}function D(a,b){var c=a.sortIndex-b.sortIndex;return 0!==c?c:a.id-b.id}function F(a){for(var b=n(u);null!==
16 | b;){if(null===b.callback)E(u);else if(b.startTime<=a)E(u),b.sortIndex=b.expirationTime,S(p,b);else break;b=n(u)}}function T(a){y=!1;F(a);if(!v)if(null!==n(p))v=!0,z(U);else{var b=n(u);null!==b&&G(T,b.startTime-a)}}function U(a,b){v=!1;y&&(y=!1,V());H=!0;var c=m;try{F(b);for(l=n(p);null!==l&&(!(l.expirationTime>b)||a&&!W());){var g=l.callback;if(null!==g){l.callback=null;m=l.priorityLevel;var e=g(l.expirationTime<=b);b=q();"function"===typeof e?l.callback=e:l===n(p)&&E(p);F(b)}else E(p);l=n(p)}if(null!==
17 | l)var d=!0;else{var f=n(u);null!==f&&G(T,f.startTime-b);d=!1}return d}finally{l=null,m=c,H=!1}}function oa(a){switch(a){case 1:return-1;case 2:return 250;case 5:return 1073741823;case 4:return 1E4;default:return 5E3}}var f="function"===typeof Symbol&&Symbol.for,x=f?Symbol.for("react.element"):60103,xa=f?Symbol.for("react.portal"):60106,Aa=f?Symbol.for("react.fragment"):60107,Ba=f?Symbol.for("react.strict_mode"):60108,Ca=f?Symbol.for("react.profiler"):60114,Da=f?Symbol.for("react.provider"):60109,
18 | Ea=f?Symbol.for("react.context"):60110,Fa=f?Symbol.for("react.forward_ref"):60112,Ga=f?Symbol.for("react.suspense"):60113,Ha=f?Symbol.for("react.memo"):60115,Ia=f?Symbol.for("react.lazy"):60116,la="function"===typeof Symbol&&Symbol.iterator,pa=Object.getOwnPropertySymbols,Ja=Object.prototype.hasOwnProperty,Ka=Object.prototype.propertyIsEnumerable,I=function(){try{if(!Object.assign)return!1;var a=new String("abc");a[5]="de";if("5"===Object.getOwnPropertyNames(a)[0])return!1;var b={};for(a=0;10>a;a++)b["_"+
19 | String.fromCharCode(a)]=a;if("0123456789"!==Object.getOwnPropertyNames(b).map(function(a){return b[a]}).join(""))return!1;var c={};"abcdefghijklmnopqrst".split("").forEach(function(a){c[a]=a});return"abcdefghijklmnopqrst"!==Object.keys(Object.assign({},c)).join("")?!1:!0}catch(g){return!1}}()?Object.assign:function(a,b){if(null===a||void 0===a)throw new TypeError("Object.assign cannot be called with null or undefined");var c=Object(a);for(var g,e=1;e=ua};f=function(){};X=function(a){0>a||125d?(a.sortIndex=e,S(u,a),null===n(p)&&a===n(u)&&(y?V():y=!0,G(T,e-d))):(a.sortIndex=c,S(p,a),v||H||(v=!0,z(U)));return a},unstable_cancelCallback:function(a){a.callback=null},unstable_wrapCallback:function(a){var b=m;return function(){var c=m;m=b;try{return a.apply(this,arguments)}finally{m=c}}},unstable_getCurrentPriorityLevel:function(){return m},
27 | unstable_shouldYield:function(){var a=q();F(a);var b=n(p);return b!==l&&null!==l&&null!==b&&null!==b.callback&&b.startTime<=a&&b.expirationTime
2 |
3 |
4 | `set_cribl_internal_log_index` `set_cribl_log_sourcetype` ((source IN ("*/audit.log", "*/audit1.log", "*/audit2.log", "*/audit3.log", "*/audit4.log") ((action=commit type=git id!="") OR (action=deploy type=groups id!=""))) OR channel=GitCommitLogs)
5 | | rex field=message "(?<username>[^:]+):\s(?<message>.*)"
6 | | eval worker_group_deploy = if(action == "deploy", id, null()),
7 | worker_group_commit = if(channel == "GitCommitLogs", worker_group, null()),
8 | instance_type_deploy = if(action == "deploy", id, null()),
9 | instance_type_commit = if(channel == "GitCommitLogs", worker_group, null()),
10 | leader_host = if(channel == "GitCommitLogs", null(), host),
11 | commit_version = case(action == "deploy", version, action == "commit", id, isnotnull(hash), substr(hash, 0, 7), true(), null()),
12 | commit_time = if(action == "commit" OR channel == "GitCommitLogs", strftime(_time, "%Y-%m-%d %H:%M:%S"), null()),
13 | deploy_time = if(action == "deploy", strftime(_time, "%Y-%m-%d %H:%M:%S"), null()),
14 | is_deployed = if(isnotnull(deploy_time), "true", null()),
15 | message = if(username == "revert to", username.": ".message, message),
16 | username = if(username != "revert to", username, null())
17 | | fields worker_group* instance_type leader_host user* commit_* deploy_time is_deployed message files.*
18 | | stats earliest(_time) AS _time values(*) AS * BY commit_version
19 | | rename leader_host AS host
20 | | search $host_filter$ $worker_group_event_filter$
21 |
22 | $time.earliest$
23 | $time.latest$
24 |
25 |
129 |
130 |
131 |
132 |