├── .dockerignore
├── .fossa.yml
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── enhancement_request.md
│ └── failing_test.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ └── ci_build_test.yaml
├── .gitignore
├── .rubocop.yml
├── .ruby-version
├── CLA.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── CONTRIBUTORS.md
├── Gemfile
├── Gemfile.lock
├── LICENSE
├── Makefile
├── README.md
├── Rakefile
├── VERSION
├── ci_scripts
├── deploy_connector.sh
├── install_dep.sh
└── k8s-splunk.yml
├── docker
├── Dockerfile
├── Gemfile
├── Gemfile.lock
└── build.sh
├── fluent-plugin-splunk-hec.gemspec
├── lib
└── fluent
│ └── plugin
│ ├── out_splunk.rb
│ ├── out_splunk
│ ├── match_formatter.rb
│ └── version.rb
│ ├── out_splunk_hec.rb
│ ├── out_splunk_hec
│ └── version.rb
│ └── out_splunk_ingest_api.rb
└── test
├── fluent
└── plugin
│ ├── out_splunk_hec_test.rb
│ └── out_splunk_ingest_api_test.rb
├── lib
└── webmock
│ ├── README.md
│ └── http_lib_adapters
│ ├── curb_adapter.rb
│ ├── em_http_request_adapter.rb
│ ├── excon_adapter.rb
│ ├── http_rb_adapter.rb
│ ├── manticore_adapter.rb
│ ├── patron_adapter.rb
│ └── typhoeus_hydra_adapter.rb
└── test_helper.rb
/.dockerignore:
--------------------------------------------------------------------------------
1 | /.bundle/
2 | /.yardoc
3 | /_yardoc/
4 | /coverage/
5 | /doc/
6 | /pkg/
7 | /spec/reports/
8 | /tmp/
9 | *.gem
10 | *.aes
11 | coverage
12 | /ci/
13 | codeship*
14 | Dockerfile*
15 |
--------------------------------------------------------------------------------
/.fossa.yml:
--------------------------------------------------------------------------------
1 | version: 3
2 | server: https://app.fossa.com
3 | project:
4 | id: "fluent-plugin-splunk-hec"
5 | team: "TA-Automation"
6 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Report a bug encountered while operating fluent-plugin-splunk-hec
4 | labels: ''
5 | assignees: ''
6 |
7 | ---
8 |
9 |
13 |
14 |
15 | **What happened**:
16 |
17 | **What you expected to happen**:
18 |
19 | **How to reproduce it (as minimally and precisely as possible)**:
20 |
21 | **Anything else we need to know?**:
22 |
23 | **Environment**:
24 | - Kubernetes version (use `kubectl version`):
25 | - Ruby version (use `ruby --version`):
26 | - OS (e.g: `cat /etc/os-release`):
27 | - Splunk version:
28 | - Others:
29 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/enhancement_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Enhancement Request
3 | about: Suggest an enhancement to the fluent-plugin-splunk-hec project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | <
11 |
12 | **What would you like to be added**:
13 |
14 | **Why is this needed**:
15 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/failing_test.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Failing Test
3 | about: Report test failures in fluent-plugin-splunk-hec
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
12 | **Which test(s) are failing**:
13 |
14 | **Since when has it been failing**:
15 |
16 | **Reason for failure**:
17 |
18 | **Anything else we need to know**:
19 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Proposed changes
2 |
3 | Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue.
4 |
5 | ## Types of changes
6 |
7 | What types of changes does your code introduce?
8 | _Put an `x` in the boxes that apply_
9 |
10 | - [ ] Bugfix (non-breaking change which fixes an issue)
11 | - [ ] New feature (non-breaking change which adds functionality)
12 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
13 |
14 | ## Checklist
15 |
16 | _Put an `x` in the boxes that apply._
17 |
18 | - [ ] I have read the [CONTRIBUTING](https://github.com/splunk/fluent-plugin-splunk-hec/blob/develop/CONTRIBUTING.md) doc
19 | - [ ] I have read the [CLA](https://github.com/splunk/fluent-plugin-splunk-hec/blob/develop/CLA.md)
20 | - [ ] I have added tests that prove my fix is effective or that my feature works
21 | - [ ] I have added necessary documentation (if appropriate)
22 | - [ ] Any dependent changes have been merged and published in downstream modules
23 |
24 |
--------------------------------------------------------------------------------
/.github/workflows/ci_build_test.yaml:
--------------------------------------------------------------------------------
1 | name: CI Build Test
2 |
3 | on:
4 | pull_request:
5 | branches-ignore:
6 | - /^release\/.*/
7 | - main
8 |
9 | jobs:
10 | build:
11 | runs-on: ubuntu-20.04
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v2
15 |
16 | - name: Setup Ruby and install gems
17 | uses: ruby/setup-ruby@v1
18 | with:
19 | bundler-cache: true
20 | ruby-version: 3.1
21 |
22 | - name: Install dependencies
23 | run: |
24 | sudo ci_scripts/install_dep.sh
25 |
26 | - name: Builder
27 | run: |
28 | bundle exec rake build -t -v
29 | cp -R pkg /tmp
30 |
31 | - name: Cache pkg
32 | uses: actions/cache@v1
33 | with:
34 | path: /tmp
35 | key: ${{ runner.os }}-build
36 |
37 | unit-test:
38 | runs-on: ubuntu-20.04
39 | needs:
40 | - build
41 | steps:
42 | - name: Checkout
43 | uses: actions/checkout@v2
44 |
45 | - name: Install dependencies
46 | run: |
47 | sudo ci_scripts/install_dep.sh
48 |
49 | - uses: actions/cache@v2
50 | with:
51 | path: /tmp
52 | key: ${{ runner.os }}-build
53 |
54 | - name: Run unit tests
55 | run: |
56 | bundle exec rake test -t -v
57 |
58 | func-test:
59 | needs:
60 | - unit-test
61 | runs-on: ubuntu-20.04
62 | env:
63 | CI_SPLUNK_PORT: 8089
64 | CI_SPLUNK_USERNAME: admin
65 | CI_SPLUNK_HEC_TOKEN: a6b5e77f-d5f6-415a-bd43-930cecb12959
66 | CI_SPLUNK_PASSWORD: changeme2
67 | CI_INDEX_EVENTS: ci_events
68 | CI_INDEX_OBJECTS: ci_objects
69 | CI_INDEX_METRICS: ci_metrics
70 | KUBERNETES_VERSION: v1.23.2
71 | MINIKUBE_VERSION: latest
72 | MINIKUBE_NODE_COUNTS: 2
73 | GITHUB_ACTIONS: true
74 |
75 | steps:
76 | - name: Checkout
77 | uses: actions/checkout@v2
78 |
79 | - name: Prepare container build
80 | id: prep
81 | run: |
82 | VERSION=`cat VERSION`
83 | TAGS=splunk/fluentd-hec:recent
84 | echo ::set-output name=tags::${TAGS}
85 | echo ::set-output name=version::${VERSION}
86 |
87 | - name: Set up QEMU
88 | uses: docker/setup-qemu-action@master
89 | with:
90 | platforms: all
91 |
92 | - name: Set up Docker Buildx
93 | id: buildx
94 | uses: docker/setup-buildx-action@master
95 |
96 | - name: Build multi-arch kubernetes-metrics image
97 | uses: docker/build-push-action@v2
98 | with:
99 | builder: ${{ steps.buildx.outputs.name }}
100 | context: .
101 | file: ./docker/Dockerfile
102 | platforms: linux/amd64
103 | push: false
104 | load: true
105 | tags: ${{ steps.prep.outputs.tags }}
106 | build-args: VERSION=${{ steps.prep.outputs.version }}
107 |
108 | - name: Check kubernetes-metrics image
109 | run: |
110 | docker image ls
111 |
112 | - name: Setup Minikube
113 | run: |
114 | # Install Kubectl
115 | curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl
116 | chmod +x kubectl
117 | sudo mv kubectl /usr/local/bin/
118 | mkdir -p ${HOME}/.kube
119 | touch ${HOME}/.kube/config
120 | # Install Minikube
121 | curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
122 | chmod +x minikube
123 | sudo mv minikube /usr/local/bin/
124 | # Start Minikube and Wait
125 | minikube start --driver=docker --container-runtime=docker --cpus 2 --memory 4096 --kubernetes-version=${KUBERNETES_VERSION} --no-vtx-check -n=${MINIKUBE_NODE_COUNTS}
126 | export JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'
127 | until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
128 | sleep 1;
129 | done
130 |
131 | - name: Install Splunk
132 | run: |
133 | # Wait until minikube is ready
134 | export JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'
135 | until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
136 | echo "wait for minikube ready ..."
137 | sleep 1;
138 | done
139 | kubectl get nodes
140 | until kubectl get sa | grep -q 'default'; do
141 | sleep 1;
142 | done
143 | # Install Splunk on minikube
144 | kubectl apply -f ci_scripts/k8s-splunk.yml
145 | # Wait until splunk is ready
146 | until kubectl logs splunk --tail=2 | grep -q 'Ansible playbook complete'; do
147 | sleep 1;
148 | done
149 | export CI_SPLUNK_HOST=$(kubectl get pod splunk --template={{.status.podIP}})
150 | # Setup Indexes
151 | curl -k -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/services/data/indexes -d name=$CI_INDEX_EVENTS -d datatype=event
152 | curl -k -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/services/data/indexes -d name=$CI_INDEX_OBJECTS -d datatype=event
153 | curl -k -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/services/data/indexes -d name=$CI_INDEX_METRICS -d datatype=metric
154 | curl -k -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/services/data/indexes -d name=default-events -d datatype=event
155 | curl -k -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/services/data/indexes -d name=ns-anno -d datatype=event
156 | curl -k -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/services/data/indexes -d name=pod-anno -d datatype=event
157 | # Enable HEC services
158 | curl -X POST -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD -k https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/servicesNS/nobody/splunk_httpinput/data/inputs/http/http/enable
159 | # Create new HEC token
160 | curl -X POST -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD -k -d "name=splunk_hec_token&token=a6b5e77f-d5f6-415a-bd43-930cecb12959&disabled=0&index=default-events&indexes=default-events,$CI_INDEX_METRICS,$CI_INDEX_OBJECTS,$CI_INDEX_EVENTS,ns-anno,pod-anno" https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/servicesNS/nobody/splunk_httpinput/data/inputs/http
161 | # lower the limit to 50MiB. Higher limits throws error 'Search not executed XXXX'
162 | kubectl exec -it splunk -- bash -c 'echo -e "\n[diskUsage]\nminFreeSpace = 50" >> /opt/splunk/etc/system/local/server.conf'
163 | # Restart Splunk
164 | curl -k -u $CI_SPLUNK_USERNAME:$CI_SPLUNK_PASSWORD https://$CI_SPLUNK_HOST:$CI_SPLUNK_PORT/services/server/control/restart -X POST
165 |
166 | - name: Deploy k8s connector
167 | run: |
168 | export CI_SPLUNK_HOST=$(kubectl get pod splunk --template={{.status.podIP}})
169 | ci_scripts/deploy_connector.sh
170 |
171 | - name: Deploy log generator
172 | run: |
173 | cd /opt/splunk-connect-for-kubernetes
174 | kubectl apply -f test/test_setup.yaml
175 | sleep 60
176 |
177 | - uses: actions/setup-python@v2
178 | with:
179 | python-version: 3.7
180 |
181 | - name: Run functional tests
182 | run: |
183 | echo "check the pods"
184 | kubectl get pods -A
185 | cd /opt/splunk-connect-for-kubernetes
186 | kubectl get nodes
187 | export PYTHONWARNINGS="ignore:Unverified HTTPS request"
188 | export CI_SPLUNK_HOST=$(kubectl get pod splunk --template={{.status.podIP}})
189 | cd test
190 | pip install --upgrade pip
191 | pip install -r requirements.txt
192 | echo "Running functional tests....."
193 | python -m pytest \
194 | --splunkd-url https://$CI_SPLUNK_HOST:8089 \
195 | --splunk-user admin \
196 | --splunk-password $CI_SPLUNK_PASSWORD \
197 | --nodes-count $MINIKUBE_NODE_COUNTS\
198 | -p no:warnings -s -n auto
199 | fossa-scan:
200 | continue-on-error: true
201 | runs-on: ubuntu-latest
202 | steps:
203 | - uses: actions/checkout@v3
204 | - name: run fossa anlyze and create report
205 | run: |
206 | curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash
207 | fossa analyze --include-unused-deps --debug
208 | fossa report attribution --format text > /tmp/THIRDPARTY
209 | env:
210 | FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }}
211 | - name: upload THIRDPARTY file
212 | uses: actions/upload-artifact@v2
213 | with:
214 | name: THIRDPARTY
215 | path: /tmp/THIRDPARTY
216 | - name: run fossa test
217 | run: |
218 | fossa test --debug
219 | env:
220 | FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }}
221 | semgrep:
222 | runs-on: ubuntu-latest
223 | name: security-sast-semgrep
224 | if: github.actor != 'dependabot[bot]'
225 | steps:
226 | - uses: actions/checkout@v3
227 | - name: Semgrep
228 | id: semgrep
229 | uses: returntocorp/semgrep-action@v1
230 | with:
231 | publishToken: ${{ secrets.SEMGREP_PUBLISH_TOKEN }}
232 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.bundle/
2 | /.yardoc
3 | /_yardoc/
4 | /coverage/
5 | /doc/
6 | /pkg/
7 | /spec/reports/
8 | /tmp/
9 | *.gem
10 | *.aes
11 | coverage
12 | docker/licenses/
13 | docker/gem/
14 | docker/gems/
15 | docker/.bundle/
16 | .idea/
--------------------------------------------------------------------------------
/.rubocop.yml:
--------------------------------------------------------------------------------
1 | Metrics/LineLength:
2 | Enabled: false
3 |
4 | Style:
5 | Enabled: true
6 |
7 | Metrics/MethodLength:
8 | Enabled: false
9 |
10 | Metrics/BlockLength:
11 | Enabled: false
12 |
--------------------------------------------------------------------------------
/.ruby-version:
--------------------------------------------------------------------------------
1 | 3.1.3
2 |
--------------------------------------------------------------------------------
/CLA.md:
--------------------------------------------------------------------------------
1 | By submitting a Contribution to this Work, You agree that Your Contribution is made subject to the primary LICENSE
2 | file applicable to this Work. In addition, You represent that: (i) You are the copyright owner of the Contribution
3 | or (ii) You have the requisite rights to make the Contribution.
4 |
5 | Definitions:
6 |
7 | “You” shall mean: (i) yourself if you are making a Contribution on your own behalf; or (ii) your company,
8 | if you are making a Contribution on behalf of your company. If you are making a Contribution on behalf of your
9 | company, you represent that you have the requisite authority to do so.
10 |
11 | "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing
12 | work, that is intentionally submitted by You for inclusion in, or documentation of, this project/repository. For the
13 | purposes of this definition, "submitted" means any form of electronic, verbal, or written communication submitted for
14 | inclusion in this project/repository, including but not limited to communication on electronic mailing lists, source
15 | code control systems, and issue tracking systems that are managed by, or on behalf of, the maintainers of
16 | the project/repository.
17 |
18 | “Work” shall mean the collective software, content, and documentation in this project/repository.
19 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, gender identity and expression, level of experience,
9 | nationality, personal appearance, race, religion, or sexual identity and
10 | orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at zliang@splunk.com. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at [http://contributor-covenant.org/version/1/4][version]
72 |
73 | [homepage]: http://contributor-covenant.org
74 | [version]: http://contributor-covenant.org/version/1/4/
75 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | CONTRIBUTING
2 |
3 | By submitting a Contribution to this Work, You agree that Your Contribution is made subject to the primary LICENSE
4 | file applicable to this Work. In addition, You represent that: (i) You are the copyright owner of the Contribution
5 | or (ii) You have the requisite rights to make the Contribution.
6 |
7 | Definitions:
8 |
9 | “You” shall mean: (i) yourself if you are making a Contribution on your own behalf; or (ii) your company,
10 | if you are making a Contribution on behalf of your company. If you are making a Contribution on behalf of your
11 | company, you represent that you have the requisite authority to do so.
12 |
13 | "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing
14 | work, that is intentionally submitted by You for inclusion in, or documentation of, this project/repository. For the
15 | purposes of this definition, "submitted" means any form of electronic, verbal, or written communication submitted for
16 | inclusion in this project/repository, including but not limited to communication on electronic mailing lists, source
17 | code control systems, and issue tracking systems that are managed by, or on behalf of, the maintainers of
18 | the project/repository.
19 |
20 | “Work” shall mean the collective software, content, and documentation in this project/repository.
21 |
--------------------------------------------------------------------------------
/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | # Contributors
2 |
3 | Fluentd Plugin for Splunk HEC is developed by Splunkers and the open-source community.
4 |
5 | We thank all of our [contributors](https://github.com/splunk/fluent-plugin-splunk-hec/graphs/contributors)!
6 |
7 | **For the detailed history of contributions** of a given file, try
8 |
9 | git blame file
10 |
11 | to see line-by-line credits and
12 |
13 | git log --follow file
14 |
15 | to see the change log even across renames and rewrites.
16 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | source 'https://rubygems.org'
4 |
5 | group :test do
6 | gem 'simplecov', require: false
7 | gem 'net-smtp', require: false
8 | end
9 |
10 | git_source(:github) { |repo_name| "https://github.com/#{repo_name}" }
11 |
12 | # Specify your gem's dependencies in fluent-plugin-splunk_hec_output.gemspec
13 | gemspec
14 |
--------------------------------------------------------------------------------
/Gemfile.lock:
--------------------------------------------------------------------------------
1 | PATH
2 | remote: .
3 | specs:
4 | fluent-plugin-splunk-hec (1.3.3)
5 | fluentd (>= 1.5)
6 | json-jwt (~> 1.15.0)
7 | multi_json (~> 1.13)
8 | net-http-persistent (~> 4.0)
9 | openid_connect (~> 1.1.8)
10 | prometheus-client (>= 2.1.0)
11 | rack-oauth2 (~> 1.19)
12 |
13 | GEM
14 | remote: https://rubygems.org/
15 | specs:
16 | activemodel (7.0.7.2)
17 | activesupport (= 7.0.7.2)
18 | activesupport (7.0.7.2)
19 | concurrent-ruby (~> 1.0, >= 1.0.2)
20 | i18n (>= 1.6, < 2)
21 | minitest (>= 5.1)
22 | tzinfo (~> 2.0)
23 | addressable (2.8.0)
24 | public_suffix (>= 2.0.2, < 5.0)
25 | aes_key_wrap (1.1.0)
26 | attr_required (1.0.1)
27 | bindata (2.4.14)
28 | concurrent-ruby (1.2.2)
29 | connection_pool (2.3.0)
30 | cool.io (1.7.1)
31 | crack (0.4.5)
32 | rexml
33 | digest (3.1.0)
34 | docile (1.4.0)
35 | faraday (2.7.1)
36 | faraday-net_http (>= 2.0, < 3.1)
37 | ruby2_keywords (>= 0.0.4)
38 | faraday-follow_redirects (0.3.0)
39 | faraday (>= 1, < 3)
40 | faraday-net_http (3.0.2)
41 | fluentd (1.15.3)
42 | bundler
43 | cool.io (>= 1.4.5, < 2.0.0)
44 | http_parser.rb (>= 0.5.1, < 0.9.0)
45 | msgpack (>= 1.3.1, < 2.0.0)
46 | serverengine (>= 2.3.0, < 3.0.0)
47 | sigdump (~> 0.2.2)
48 | strptime (>= 0.2.4, < 1.0.0)
49 | tzinfo (>= 1.0, < 3.0)
50 | tzinfo-data (~> 1.0)
51 | webrick (>= 1.4.2, < 1.8.0)
52 | yajl-ruby (~> 1.0)
53 | hashdiff (1.0.1)
54 | http_parser.rb (0.8.0)
55 | httpclient (2.8.3)
56 | i18n (1.12.0)
57 | concurrent-ruby (~> 1.0)
58 | io-wait (0.2.1)
59 | json-jwt (1.15.3)
60 | activesupport (>= 4.2)
61 | aes_key_wrap
62 | bindata
63 | httpclient
64 | mail (2.7.1)
65 | mini_mime (>= 0.1.1)
66 | mini_mime (1.1.2)
67 | minitest (5.15.0)
68 | msgpack (1.6.0)
69 | multi_json (1.15.0)
70 | net-http-persistent (4.0.1)
71 | connection_pool (~> 2.2)
72 | net-protocol (0.1.2)
73 | io-wait
74 | timeout
75 | net-smtp (0.3.1)
76 | digest
77 | net-protocol
78 | timeout
79 | openid_connect (1.1.8)
80 | activemodel
81 | attr_required (>= 1.0.0)
82 | json-jwt (>= 1.5.0)
83 | rack-oauth2 (>= 1.6.1)
84 | swd (>= 1.0.0)
85 | tzinfo
86 | validate_email
87 | validate_url
88 | webfinger (>= 1.0.1)
89 | power_assert (2.0.1)
90 | prometheus-client (4.0.0)
91 | public_suffix (4.0.6)
92 | rack (3.0.8)
93 | rack-oauth2 (1.21.2)
94 | activesupport
95 | attr_required
96 | httpclient
97 | json-jwt (>= 1.11.0)
98 | rack (>= 2.1.0)
99 | rake (13.0.6)
100 | rexml (3.2.6)
101 | ruby2_keywords (0.0.5)
102 | serverengine (2.3.0)
103 | sigdump (~> 0.2.2)
104 | sigdump (0.2.4)
105 | simplecov (0.21.2)
106 | docile (~> 1.1)
107 | simplecov-html (~> 0.11)
108 | simplecov_json_formatter (~> 0.1)
109 | simplecov-html (0.12.3)
110 | simplecov_json_formatter (0.1.3)
111 | strptime (0.2.5)
112 | swd (2.0.2)
113 | activesupport (>= 3)
114 | attr_required (>= 0.0.5)
115 | faraday (~> 2.0)
116 | faraday-follow_redirects
117 | test-unit (3.5.3)
118 | power_assert
119 | timeout (0.2.0)
120 | tzinfo (2.0.6)
121 | concurrent-ruby (~> 1.0)
122 | tzinfo-data (1.2022.6)
123 | tzinfo (>= 1.0.0)
124 | validate_email (0.1.6)
125 | activemodel (>= 3.0)
126 | mail (>= 2.2.5)
127 | validate_url (1.0.15)
128 | activemodel (>= 3.0.0)
129 | public_suffix
130 | webfinger (2.1.2)
131 | activesupport
132 | faraday (~> 2.0)
133 | faraday-follow_redirects
134 | webmock (3.5.1)
135 | addressable (>= 2.3.6)
136 | crack (>= 0.3.2)
137 | hashdiff
138 | webrick (1.7.0)
139 | yajl-ruby (1.4.3)
140 |
141 | PLATFORMS
142 | ruby
143 |
144 | DEPENDENCIES
145 | bundler (~> 2.0)
146 | fluent-plugin-splunk-hec!
147 | minitest (~> 5.0)
148 | net-smtp
149 | rake (>= 12.0)
150 | simplecov
151 | test-unit (~> 3.0)
152 | webmock (~> 3.5.0)
153 |
154 | BUNDLED WITH
155 | 2.2.32
156 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2018 Splunk Inc.
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 | =======================================================================
204 | Fluentd Plugin for Splunk HEC:
205 |
206 | The Fluentd Plugin for Splunk HTTP Event Collector project contains
207 | subcomponents with separate copyright notices and license terms.
208 | Your use of the source code for the these subcomponents is subject
209 | to the terms and conditions of the following licenses.
210 |
211 | ========================================================================
212 | Apache License 2.0
213 | ========================================================================
214 | The following components are provided under the Apache License 2.0. See project link for details.
215 |
216 | (Apache License 2.0) fluentd (https://github.com/fluent/fluentd/blob/master/LICENSE)
217 | (Apache License 2.0) ffi-compiler (https://github.com/ffi/ffi-compiler/blob/master/LICENSE)
218 | (Apache License 2.0) msgpack (https://github.com/msgpack/msgpack-ruby/blob/master/LICENSE)
219 | (Apache License 2.0) prometheus-client (https://github.com/prometheus/client_ruby/blob/master/LICENSE)
220 | (Apache License 2.0) quantile (https://github.com/matttproud/ruby_quantile_estimation/blob/master/LICENSE)
221 | (Apache License 2.0) serverengine (https://github.com/treasure-data/serverengine/blob/master/LICENSE)
222 | (Apache License 2.0) addressable (https://github.com/sporkmonger/addressable/blob/master/LICENSE.txt)
223 | (Apache License 2.0) fluent-plugin-kubernetes_metadata_filter (https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter/blob/master/LICENSE.txt)
224 | (Apache License 2.0) thread_safe (https://github.com/ruby-concurrency/thread_safe/blob/master/LICENSE)
225 |
226 | ========================================================================
227 | MIT licenses
228 | ========================================================================
229 | The following components are provided under the MIT License. See project link for details.
230 |
231 | (MIT License) activemodel (https://github.com/rails/rails/blob/v6.0.2.1/activemodel/MIT-LICENSE)
232 | (MIT License) activesupport (https://github.com/rails/rails/blob/v6.0.2.1/activesupport/MIT-LICENSE)
233 | (MIT License) aes_key_wrap (https://github.com/tomdalling/aes_key_wrap/blob/master/LICENSE.txt)
234 | (MIT License) ast (https://github.com/whitequark/ast/blob/master/LICENSE.MIT)
235 | (MIT License) attr_required (https://github.com/nov/attr_required/blob/master/LICENSE)
236 | (MIT License) bundler (https://github.com/bundler/bundler/blob/master/LICENSE.md)
237 | (MIT License) concurrent-ruby (https://github.com/ruby-concurrency/concurrent-ruby/blob/master/LICENSE.md)
238 | (MIT License) connection_pool (https://github.com/mperham/connection_pool/blob/master/LICENSE)
239 | (MIT License) cool.io (https://github.com/tarcieri/cool.io/blob/master/LICENSE)
240 | (MIT License) crack (https://github.com/jnunemaker/crack/blob/master/LICENSE)
241 | (MIT License) docile (https://github.com/ms-ati/docile/blob/master/LICENSE)
242 | (MIT License) hashdiff (https://github.com/liufengyun/hashdiff/blob/master/LICENSE)
243 | (MIT License) http (https://github.com/httprb/http/blob/master/LICENSE.txt)
244 | (MIT License) http_parser.rb (https://github.com/tmm1/http_parser.rb/blob/master/LICENSE-MIT)
245 | (MIT License) http-accept (https://github.com/socketry/http-accept#license)
246 | (MIT License) http-cookie (https://github.com/sparklemotion/http-cookie/blob/master/LICENSE.txt)
247 | (MIT License) http-form_data (https://github.com/httprb/form_data/blob/master/LICENSE.txt)
248 | (MIT License) http-parser (https://github.com/cotag/http-parser/blob/master/LICENSE)
249 | (MIT License) i18n (https://github.com/ruby-i18n/i18n/blob/master/MIT-LICENSE)
250 | (MIT License) jaro_winkler (https://github.com/tonytonyjan/jaro_winkler/blob/master/LICENSE.txt)
251 | (MIT License) json-jwt (https://github.com/tonytonyjan/jaro_winkler/blob/master/LICENSE.txt)
252 | (MIT License) kubeclient (https://github.com/abonas/kubeclient/blob/master/LICENSE.txt)
253 | (MIT License) lru_redux (https://github.com/SamSaffron/lru_redux/blob/master/LICENSE.txt)
254 | (MIT License) mail (https://github.com/mikel/mail/blob/master/MIT-LICENSE)
255 | (MIT License) mime-types (https://github.com/mime-types/ruby-mime-types/blob/master/Licence.md)
256 | (MIT License) mime-types-data (https://github.com/mime-types/mime-types-data/blob/master/Licence.md)
257 | (MIT License) mini_mime (https://github.com/discourse/mini_mime/blob/master/LICENSE.txt)
258 | (MIT License) minitest (https://github.com/seattlerb/minitest)
259 | (MIT License) multi_json (https://github.com/intridea/multi_json/blob/master/LICENSE.md)
260 | (MIT License) net-http-persistent (https://github.com/drbrain/net-http-persistent)
261 | (MIT License) netrc (https://github.com/heroku/netrc/blob/master/LICENSE.md)
262 | (MIT License) openid_connect (https://github.com/nov/openid_connect/blob/master/LICENSE)
263 | (MIT License) parallel (https://github.com/grosser/parallel/blob/master/MIT-LICENSE.txt)
264 | (MIT License) parser (https://github.com/whitequark/parser/blob/master/LICENSE.txt)
265 | (MIT License) powerpack (https://github.com/bbatsov/powerpack/blob/master/LICENSE.txt)
266 | (MIT License) public_suffix (https://github.com/weppos/publicsuffix-ruby/blob/master/LICENSE.txt)
267 | (MIT License) rack (https://github.com/rack/rack/blob/master/MIT-LICENSE)
268 | (MIT License) rack-oauth2 (https://github.com/nov/rack-oauth2/blob/master/LICENSE)
269 | (MIT License) rainbow (https://github.com/sickill/rainbow/blob/master/LICENSE)
270 | (MIT License) rake (https://github.com/ruby/rake/blob/master/MIT-LICENSE)
271 | (MIT License) recursive-open-struct (https://github.com/aetherknight/recursive-open-struct/blob/master/LICENSE.txt)
272 | (MIT License) rest-client (https://github.com/rest-client/rest-client/blob/master/LICENSE)
273 | (MIT License) ruby-progressbar (https://github.com/jfelchner/ruby-progressbar/blob/master/LICENSE.txt)
274 | (MIT License) safe_yaml (https://github.com/dtao/safe_yaml/blob/master/LICENSE.txt)
275 | (MIT License) sigdump (https://github.com/frsyuki/sigdump/blob/master/LICENSE)
276 | (MIT License) simplecov (https://github.com/colszowka/simplecov/blob/master/LICENSE)
277 | (MIT License) simplecov-html (https://github.com/colszowka/simplecov-html/blob/master/LICENSE)
278 | (MIT License) swd (https://github.com/nov/SWD/blob/master/LICENSE)
279 | (MIT License) tzinfo (https://github.com/tzinfo/tzinfo/blob/master/LICENSE)
280 | (MIT License) tzinfo-data (https://github.com/tzinfo/tzinfo-data/blob/master/LICENSE)
281 | (MIT License) unf_ext (https://github.com/knu/ruby-unf_ext/blob/master/LICENSE.txt)
282 | (MIT License) unicode-display_width (https://github.com/janlelis/unicode-display_width/blob/master/MIT-LICENSE.txt)
283 | (MIT License) validate_email (https://github.com/perfectline/validates_email/blob/master/MIT-LICENSE)
284 | (MIT License) validate_url (https://github.com/perfectline/validates_url/blob/master/LICENSE.md)
285 | (MIT License) webfinger (https://github.com/nov/webfinger/blob/master/LICENSE.txt)
286 | (MIT License) webmock (https://github.com/bblimke/webmock/blob/master/LICENSE)
287 | (MIT License) yajl-ruby (https://github.com/brianmario/yajl-ruby/blob/master/LICENSE)
288 |
289 | ========================================================================
290 | For the rest:
291 | ========================================================================
292 |
293 | bindata (https://github.com/dmendel/bindata/blob/master/COPYING)
294 | httpclient (https://github.com/nahi/httpclient/#license)
295 | json (https://www.ruby-lang.org/en/about/license.txt)
296 | test-unit (https://github.com/test-unit/test-unit)
297 | unf (https://github.com/knu/ruby-unf/blob/master/LICENSE)
298 | power_assert (https://github.com/k-tsj/power_assert/blob/master/BSDL)
299 | strptime (https://github.com/nurse/strptime/blob/master/LICENSE.txt)
300 | domain_name (https://github.com/knu/ruby-domain_name/blob/master/LICENSE.txt)
301 | ffi (https://github.com/ffi/ffi/blob/master/LICENSE)
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | VERSION := $(shell sh -c 'cat VERSION')
2 |
3 | clean_pkg:
4 | @rm -rf pkg/* docker/*.gem
5 |
6 | clean_gems:
7 | @rm -rf docker/gem/ docker/gems/
8 |
9 | clean: clean_pkg clean_gems
10 | @rm -rf docker/licenses
11 |
12 | build: clean_pkg
13 | @bundle exec rake build
14 |
15 | .PHONY: docker
16 | docker:
17 | @docker buildx build --no-cache --pull --platform linux/amd64 -o type=image,name=splunk/fluentd-hec:$(VERSION),push=false --build-arg VERSION=$(VERSION) . -f docker/Dockerfile
18 |
19 | docker-rebuild:
20 | @docker buildx build --platform linux/amd64 -o type=image,name=splunk/fluentd-hec:$(VERSION),push=false --build-arg VERSION=$(VERSION) . -f docker/Dockerfile
21 |
22 | unit-test:
23 | @bundle exec rake test
24 |
25 | install-deps:
26 | @gem install bundler
27 | @bundle update --bundler
28 | @bundle install
29 |
30 | unpack: build
31 | @cp pkg/fluent-plugin-*.gem docker
32 | @mkdir -p docker/gem
33 | @rm -rf docker/gem/*
34 | @gem unpack docker/fluent-plugin-*.gem --target docker/gem
35 | @cd docker && bundle install
36 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # End of Support
2 |
3 | **Important:** The fluent-plugin-splunk-hec will reach End of Support on January 1, 2024. After that date, this repository will no longer receive updates from Splunk and will no longer be supported by Splunk. Until then, only critical security fixes and bug fixes will be provided.
4 |
5 | # fluent-plugin-splunk-hec
6 |
7 | [Fluentd](https://fluentd.org/) output plugin to send events and metrics to [Splunk](https://www.splunk.com) in 2 modes:
8 | 1) Via Splunk's [HEC (HTTP Event Collector) API](http://dev.splunk.com/view/event-collector/SP-CAAAE7F)
9 | 2) Via the Splunk Cloud Services (SCS) [Ingest API](https://sdc.splunkbeta.com/reference/api/ingest/v1beta2)
10 |
11 | ## Installation
12 |
13 | ### RubyGems
14 | ```
15 | $ gem install fluent-plugin-splunk-hec
16 | ```
17 | ### Bundler
18 |
19 | Add following line to your Gemfile:
20 |
21 | ```ruby
22 | gem "fluent-plugin-splunk-hec"
23 | ```
24 |
25 | And then execute:
26 |
27 | ```
28 | $ bundle
29 | ```
30 |
31 | ## Configuration
32 |
33 | * See also: [Output Plugin Overview](https://docs.fluentd.org/v1.0/articles/output-plugin-overview)
34 |
35 | #### Example 1: Minimum HEC Configuration
36 |
37 | ```
38 |
39 | @type splunk_hec
40 | hec_host 12.34.56.78
41 | hec_port 8088
42 | hec_token 00000000-0000-0000-0000-000000000000
43 |
44 | ```
45 |
46 | This example is very basic, it just tells the plugin to send events to Splunk HEC on `https://12.34.56.78:8088` (https is the default protocol), using the HEC token `00000000-0000-0000-0000-000000000000`. It will use whatever index, source, sourcetype are configured in HEC. And the `host` of each event is the hostname of the machine which running fluentd.
47 |
48 |
49 | #### Example 2: SCS Ingest Configuration example
50 |
51 | ```
52 |
53 | @type splunk_ingest_api
54 | service_client_identifier xxxxxxxx
55 | service_client_secret_key xxxx-xxxxx
56 | token_endpoint /token
57 | ingest_auth_host auth.scp.splunk.com
58 | ingest_api_host api.scp.splunk.com
59 | ingest_api_tenant
60 | ingest_api_events_endpoint //ingest/v1beta2/events
61 | debug_http false
62 |
63 | ```
64 |
65 | This example shows the configuration to be used for sending events to ingest API. This configuration shows how to use `service_client_identifier`, `service_client_secret_key` to get token from `token_endpoint` and send events to `ingest_api_host` for the tenant `ingest_api_tenant` at the endpoint `ingest_api_events_endpoint`. The `debug_http` flag indicates whether the user wants to print debug logs to stdout.
66 |
67 | #### Example 3: Overwrite HEC defaults
68 |
69 | ```
70 |
71 | @type splunk_hec
72 | hec_host 12.34.56.78
73 | hec_port 8088
74 | hec_token 00000000-0000-0000-0000-000000000000
75 |
76 | index awesome
77 | source ${tag}
78 | sourcetype _json
79 |
80 | ```
81 |
82 | This configuration will
83 | * send all events to the `awesome` index, and
84 | * set their source to the event tags. `${tag}` is a special value which will be replaced by the event tags, and
85 | * set their sourcetype to `_json`.
86 |
87 | Sometimes you want to use the values from the input event for these parameters, this is where the `*_key` parameters help.
88 |
89 | ```
90 |
91 | ...omitting other parameters...
92 |
93 | source_key file_path
94 |
95 | ```
96 |
97 | In this example (in order to keep it concise, we just omitted the repeating parameters, and we will keep doing so in the following examples), it uses the `source_key` config to set the source of event to the value of the event's `file_path` field. Given an input event like
98 | ```javascript
99 | {"file_path": "/var/log/splunk.log", "message": "This is an exmaple.", "level": "info"}
100 | ```
101 | Then the source for this event will be "/var/log/splunk.log". And the "file\_path" field will be removed from the input event, so what you will eventually get ingested in Splunk is:
102 | ```javascript
103 | {"message": "This is an example.", "level": "info"}
104 | ```
105 | If you want to keep "file\_path" in the event, you can use `keep_keys`.
106 |
107 | Besides `source_key` there are also other `*_key` parameters, check the parameters details below.
108 |
109 | #### Example 4: Sending metrics
110 |
111 | [Metrics](https://docs.splunk.com/Documentation/Splunk/latest/Metrics/Overview) is available since Splunk 7.0.0, you can use this output plugin to send events as metrics to a Splunk metric index by setting `data_type` to "metric".
112 |
113 | ```
114 |
115 | @type splunk_hec
116 | data_type metric
117 | hec_host 12.34.56.78
118 | hec_port 8088
119 | hec_token 00000000-0000-0000-0000-000000000000
120 |
121 | ```
122 |
123 | With this configuration, the plugin will treat each input event as a collection of metrics, i.e. each key-value pair in the event is a metric name-value pair. For example, given an input event like
124 |
125 | ```javascript
126 | {"cpu/usage": 0.5, "cpu/rate": 10, "memory/usage": 100, "memory/rss": 90}
127 | ```
128 |
129 | then 4 metrics will be sent to splunk.
130 |
131 | If the input events are not like this, instead they have the metric name and metric value as properties of the event. Then you can use `metric_name_key` and `metric_value_key`. Given an input event like
132 |
133 | ```javascript
134 | {"metric": "cpu/usage", "value": 0.5, "app": "web_ui"}
135 | ```
136 |
137 | You should change the configuration to
138 |
139 | ```
140 |
141 | @type splunk_hec
142 | data_type metric
143 | hec_host 12.34.56.78
144 | hec_port 8088
145 | hec_token 00000000-0000-0000-0000-000000000000
146 |
147 | metric_name_key metric
148 | metric_value_key value
149 |
150 | ```
151 |
152 | All other properties of the input (in this example, "app"), will be sent as dimensions of the metric. You can use the `` section to customize the dimensions.
153 |
154 | ### Type of plugin
155 |
156 | #### @type
157 |
158 | This value must be set to `splunk_hec` when using HEC API and to `splunk_ingest_api` when using the ingest API. Only one type either `splunk_hec` or `splunk_ingest_api` is expected to be used when configuring this plugin.
159 |
160 | ### Parameters for `splunk_hec`
161 |
162 | #### protocol (enum) (optional)
163 |
164 | This is the protocol to use for calling the HEC API. Available values are: http, https. This parameter is
165 | set to `https` by default.
166 |
167 | ### hec_host (string) (required)
168 |
169 | The hostname/IP for the HEC token or the HEC load balancer.
170 |
171 | ### hec_port (integer) (optional)
172 |
173 | The port number for the HEC token or the HEC load balancer. The default value is `8088`.
174 |
175 | ### hec_token (string) (required)
176 |
177 | Identifier for the HEC token.
178 |
179 | ### hec_endpoint (string) (optional)
180 |
181 | The HEC REST API endpoint to use. The default value is `services/collector`.
182 |
183 | ### metrics_from_event (bool) (optional)
184 |
185 | When `data_type` is set to "metric", the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set `metrics_from_event` to `false` to disable this behavior and use `metric_name_key` and `metric_value_key` to define metrics. The default value is `true`.
186 |
187 | ### metric_name_key (string) (optional)
188 |
189 | Field name that contains the metric name. This parameter only works in conjunction with the `metrics_from_event` paramter. When this prameter is set, the `metrics_from_event` parameter is automatically set to `false`.
190 |
191 | ### metric_value_key (string) (optional)
192 |
193 | Field name that contains the metric value, this parameter is required when `metric_name_key` is configured.
194 |
195 | ### coerce_to_utf8 (bool) (optional)
196 |
197 | Indicates whether to allow non-UTF-8 characters in user logs. If set to `true`, any non-UTF-8 character is replaced by the string specified in `non_utf8_replacement_string`. If set to `false`, the Ingest API errors out any non-UTF-8 characters. This parameter is set to `true` by default.
198 |
199 | ### non_utf8_replacement_string (string) (optional)
200 |
201 | If `coerce_to_utf8` is set to `true`, any non-UTF-8 character is replaced by the string you specify in this parameter. The parameter is set to `' '` by default.
202 |
203 | ### Parameters for `splunk_ingest_api`
204 |
205 | ### service_client_identifier: (optional) (string)
206 |
207 | Splunk uses the client identifier to make authorized requests to the ingest API.
208 |
209 | ### service_client_secret_key: (string)
210 |
211 | The client identifier uses this authorization to make requests to the ingest API.
212 |
213 | ### token_endpoint: (string)
214 |
215 | This value indicates which endpoint Splunk should look to for the authorization token necessary for requests to the ingest API.
216 |
217 | ### ingest_api_host: (string)
218 |
219 | Indicates which url/hostname to use for requests to the ingest API.
220 |
221 | ### ingest_api_tenant: (string)
222 |
223 | Indicates which tenant Splunk should use for requests to the ingest API.
224 |
225 | ### ingest_api_events_endpoint: (string)
226 |
227 | Indicates which endpoint to use for requests to the ingest API.
228 |
229 | ### debug_http: (bool)
230 | Set to True if you want to debug requests and responses to ingest API. Default is false.
231 |
232 | ### Parameters for both `splunk_hec` and `splunk_ingest_api`
233 |
234 | ### index (string) (optional)
235 |
236 | Identifier for the Splunk index to be used for indexing events. If this parameter is not set,
237 | the indexer is chosen by HEC. Cannot set both `index` and `index_key` parameters at the same time.
238 |
239 | ### index_key (string) (optional)
240 |
241 | The field name that contains the Splunk index name. Cannot set both `index` and `index_key` parameters at the same time.
242 |
243 | ### host (string) (optional)
244 |
245 | The host location for events. Cannot set both `host` and `host_key` parameters at the same time.
246 | If the parameter is not set, the default value is the hostname of the machine runnning fluentd.
247 |
248 | ### host_key (string) (optional)
249 |
250 | Key for the host location. Cannot set both `host` and `host_key` parameters at the same time.
251 |
252 | ### source (string) (optional)
253 |
254 | The source field for events. If this parameter is not set, the source will be decided by HEC.
255 | Cannot set both `source` and `source_key` parameters at the same time.
256 |
257 | ### source_key (string) (optional)
258 |
259 | Field name to contain source. Cannot set both `source` and `source_key` parameters at the same time.
260 |
261 | ### sourcetype (string) (optional)
262 |
263 | The sourcetype field for events. When not set, the sourcetype is decided by HEC.
264 | Cannot set both `source` and `source_key` parameters at the same time.
265 |
266 | ### sourcetype_key (string) (optional)
267 |
268 | Field name that contains the sourcetype. Cannot set both `source` and `source_key` parameters at the same time.
269 |
270 | ### time_key (string) (optional)
271 |
272 | Field name to contain Splunk event time. By default will use fluentd\'d time.
273 |
274 | ### fields (init) (optional)
275 |
276 | Lets you specify the index-time fields for the event data type, or metric dimensions for the metric data type. Null value fields are removed.
277 |
278 | ### keep_keys (boolean) (Optional)
279 |
280 | By default, all the fields used by the `*_key` parameters are removed from the original input events. To change this behavior, set this parameter to `true`. This parameter is set to `false` by default.
281 | When set to true, all fields defined in `index_key`, `host_key`, `source_key`, `sourcetype_key`, `metric_name_key`, and `metric_value_key` are saved in the original event.
282 |
283 | ### <fields> section (optional) (single)
284 |
285 | Depending on the value of `data_type` parameter, the parameters inside the `` section have different meanings. Despite the meaning, the syntax for parameters is unique.
286 |
287 | ### app_name (string) (Optional)
288 |
289 | Splunk app name using this plugin (default to `hec_plugin_gem`)
290 |
291 | ### app_version (string) (Optional)
292 |
293 | The version of Splunk app using this this plugin (default to plugin version)
294 |
295 | ### custom_headers (Hash) (Optional)
296 |
297 | Hash of custom headers to be added to the HTTP request. Used to populate [`override_headers`](https://docs.seattlerb.org/net-http-persistent/Net/HTTP/Persistent.html#attribute-i-override_headers) attribute of the underlying `Net::HTTP::Persistent` connection.
298 |
299 | #### When `data_type` is `event`
300 |
301 | In this case, parameters inside `` are used as indexed fields and removed from the original input events. Please see the "Add a "fields" property at the top JSON level" [here](http://dev.splunk.com/view/event-collector/SP-CAAAFB6) for details. Given we have configuration like
302 |
303 | ```
304 |
305 | @type splunk_hec
306 | ...omitting other parameters...
307 |
308 |
309 | file
310 | level
311 | app application
312 |
313 |
314 | ```
315 |
316 | and an input event like
317 |
318 | ```javascript
319 | {"application": "webServer", "file": "server.rb", "lineNo": 100, "level": "info", "message": "Request finished in 30ms."}
320 | ```
321 |
322 | Then the HEC request JSON payload will be:
323 |
324 | ```javascript
325 | {
326 | // omitting other fields
327 | // ...
328 | "event": "{\"lineNo\": 100, \"message\": \"Request finished in 30ms.\"}",
329 | "fields": {
330 | "file": "server.rb",
331 | "level": "info",
332 | "app": "webServer"
333 | }
334 | }
335 | ```
336 |
337 | As you can see, parameters inside `` section can be a key-value pair or just a key (a name).
338 | If a parameter is a key-value, the key will be the name of the field inside the `"fields"` JSON object,
339 | whereas the value is the field name of the input event. So a key-value pair is a rename.
340 |
341 | If a parameter has just a key, it means its value is exactly the same as the key.
342 |
343 | #### When `data_type` is `metric`
344 |
345 | For metrics, parameters inside `` are used as dimensions. If `` is not presented, the original input event will be used as dimensions. If an empty `` is presented, no dimension is sent. For example, given the following configuration:
346 |
347 | ```
348 |
349 | @type splunk_hec
350 | data_type metric
351 | ...omitting other parameters...
352 |
353 | metric_name_key name
354 | metric_value_key value
355 |
356 | file
357 | level
358 | app application
359 |
360 |
361 | ```
362 |
363 | and the following input event:
364 |
365 | ```javascript
366 | {"application": "webServer", "file": "server.rb", "value": 100, "status": "OK", "message": "Normal", "name": "CPU Usage"}
367 | ```
368 |
369 | Then, a metric of "CPU Usage" with value=100, along with 3 dimensions file="server.rb", status="OK", and app="webServer" are sent to Splunk.
370 |
371 | ### <format> section (optional) (multiple)
372 |
373 | The `` section let you define which formatter to use to format events.
374 | By default, it uses [the `json` formatter](https://docs.fluentd.org/v1.0/articles/formatter_jso://docs.fluentd.org/v1.0/articles/formatter_json).
375 |
376 | Besides the `@type` parameter, you should define the other parameters for the formatter inside this section.
377 |
378 | Multiple `` sections can be defined to use different formatters for different tags. Each `` section accepts an argument just like the `` section does to define tag matching. By default, every event is formatted with `json`. For example:
379 |
380 | ```
381 |
382 | @type splunk_hec
383 | ...
384 |
385 |
386 | @type single_value
387 | message_key log
388 |
389 |
390 |
391 | @type csv
392 | fields ["some", "fields"]
393 |
394 | ```
395 |
396 | This example:
397 | - Formats events with tags that start with `sometag.` with the `single_value` formatter
398 | - Formats events with tags `some.othertag` with the `csv` formatter
399 | - Formats all other events with the `json` formatter (the default formatter)
400 |
401 | If you want to use a different default formatter, you can add a `` (or ``) section.
402 |
403 | #### @type (string) (required)
404 |
405 | Specifies which formatter to use.
406 |
407 | ### Net::HTTP::Persistent parameters (optional)
408 |
409 | The following parameters can be used for tuning HTTP connections:
410 |
411 | #### gzip_compression (boolean)
412 | Whether to use gzip compression on outbound posts. This parameter is set to `false` by default for backwards compatibility.
413 |
414 | #### idle_timeout (integer)
415 |
416 | The default is five seconds. If a connection has not been used for five seconds, it is automatically reset at next use, in order to avoid attempting to send to a closed connection. Specifiy `nil` to prohibit any timeouts.
417 |
418 | #### read_timeout (integer)
419 |
420 | The amount of time allowed between reading two chunks from the socket. The default value is `nil`, which means no timeout.
421 |
422 | #### open_timeout (integer)
423 |
424 | The amount of time to wait for a connection to be opened. The default is `nil`, which means no timeout.
425 |
426 | ### SSL parameters
427 |
428 | The following optional parameters let you configure SSL for HTTPS protocol.
429 |
430 | #### client_cert (string)
431 |
432 | The path to a file containing a PEM-format CA certificate for this client.
433 |
434 | #### client_key (string)
435 |
436 | The private key for this client.
437 |
438 | #### ca_file (string)
439 |
440 | The path to a file containing CA cerificates in PEM format. The plugin will verify the TLS server certificate presented by Splunk against the certificates in this file, unless verification is disabled by the `ssl_insecure` option.
441 |
442 | #### ca_path (string)
443 |
444 | The path to a directory containing CA certificates in PEM format. The plugin will verify the TLS server certificate presented by Splunk against the certificates in this file, unless verification is disabled by the `ssl_insecure` option.
445 |
446 | #### ciphers (array)
447 |
448 | List of SSl ciphers allowed.
449 |
450 | #### insecure_ssl (bool)
451 |
452 | Specifies whether an insecure SSL connection is allowed. If set to `false` (the default), the plugin will verify the TLS server certificate presented by Splunk against the CA certificates provided by the `ca_file`/`ca_path` options, and reject the certificate if if verification fails.
453 |
454 | #### require_ssl_min_version (bool)
455 |
456 | When set to `true` (the default), the plugin will require TLSv1.1 or later for its connection to Splunk.
457 |
458 | #### consume_chunk_on_4xx_errors (bool)
459 |
460 | Specifies whether any 4xx HTTP response status code consumes the buffer chunks. If set to `false`, Splunk will fail to flush the buffer on such status codes. This parameter is set to `true` by default for backwards compatibility.
461 |
462 | ## About Buffer
463 |
464 | This plugin sends events to HEC using [batch mode](https://docs.splunk.com/Documentation/Splunk/7.1.0/Data/FormateventsforHTTPEventCollector#Event_data).
465 | It batches all events in a chunk in one request. So you need to configure the `` section carefully to gain the best performance.
466 | Here are some hints:
467 |
468 | * Read through the [fluentd buffer document](https://docs.fluentd.org/v1.0/articles/buffer-section) to understand the buffer configurations.
469 | * Use `chunk_limit_size` and/or `chunk_limit_records` to define how big a chunk can be. And remember that all events in a chunk will be sent in one request.
470 | * Splunk has a limit on how big the payload of a HEC request can be. And it's defined with `max_content_length` in [the `[http_input]` section of `limits.conf`](https://docs.splunk.com/Documentation/Splunk/latest/Admin/Limitsconf#.5Bhttp_input.5D). In Splunk of version 6.5.0+, the default value is 800MiB, while in versions before 6.5.0, it's just 1MB. Make sure your chunk size won't exceed this limit, or you should change the limit on your Splunk deployment.
471 | * Sending requests to HEC takes time, so if you flush your fluentd buffer too fast (for example, with a very small `flush_interval`), it's possible that the plugin cannot catch up with the buffer flushing. There are two ways you can handle this situation, one is to increase the `flush_interval` or use multiple flush threads by setting `flush_thread_count` to a number bigger than 1.
472 |
473 | ## License
474 |
475 | Please see [LICENSE](LICENSE).
476 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'bundler/gem_tasks'
4 | require 'rake/testtask'
5 |
6 | Rake::TestTask.new(:test) do |t|
7 | t.libs << 'test'
8 | t.libs << 'lib'
9 | t.test_files = FileList['test/**/*_test.rb']
10 | t.verbose = false
11 | t.warning = false
12 | end
13 |
14 | task default: :test
15 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 1.3.3
--------------------------------------------------------------------------------
/ci_scripts/deploy_connector.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | #Make sure to check and clean previously failed deployment
5 | echo "Checking if previous deployment exist..."
6 | if [ "`helm ls --short`" == "" ]; then
7 | echo "Nothing to clean, ready for deployment"
8 | else
9 | helm delete $(helm ls --short)
10 | fi
11 |
12 | # Clone splunk-connect-for-kubernetes repo
13 | cd /opt
14 | git clone https://github.com/splunk/splunk-connect-for-kubernetes.git
15 | cd splunk-connect-for-kubernetes
16 |
17 | minikube image load splunk/fluentd-hec:recent
18 |
19 | echo "Deploying k8s-connect with latest changes"
20 | helm install ci-sck --set global.splunk.hec.token=$CI_SPLUNK_HEC_TOKEN \
21 | --set global.splunk.hec.host=$CI_SPLUNK_HOST \
22 | --set kubelet.serviceMonitor.https=true \
23 | --set splunk-kubernetes-logging.image.tag=recent \
24 | --set splunk-kubernetes-logging.image.pullPolicy=IfNotPresent \
25 | -f ci_scripts/sck_values.yml helm-chart/splunk-connect-for-kubernetes
26 | # kubectl get pod | grep "ci-sck-splunk-kubernetes-logging" | awk 'NR==1{print $1}
27 | kubectl get pod
28 | # wait for deployment to finish
29 | # metric and logging deamon set for each node + aggr + object + splunk
30 | PODS=$((MINIKUBE_NODE_COUNTS*2+2+1))
31 | until kubectl get pod | grep Running | [[ $(wc -l) == $PODS ]]; do
32 | kubectl get pod
33 | sleep 2;
34 | done
35 |
--------------------------------------------------------------------------------
/ci_scripts/install_dep.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | sudo gem update --system
3 | gem install bundler
4 | bundle update --bundler
5 | bundle install
--------------------------------------------------------------------------------
/ci_scripts/k8s-splunk.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: splunk
5 | spec:
6 | hostNetwork: true
7 | securityContext:
8 | runAsUser: 0
9 | runAsGroup: 0
10 | containers:
11 | - name: splunk
12 | image: splunk/splunk:latest
13 | ports:
14 | - containerPort: 8000
15 | hostPort: 8000
16 | protocol: TCP
17 | - containerPort: 8088
18 | hostPort: 8088
19 | protocol: TCP
20 | - containerPort: 8089
21 | hostPort: 8089
22 | protocol: TCP
23 | env:
24 | - name: SPLUNK_START_ARGS
25 | value: --accept-license
26 | - name: SPLUNK_USER
27 | value: root
28 | - name: SPLUNK_PASSWORD
29 | value: changeme2
30 | - name: SPLUNK_LAUNCH_CONF
31 | value: OPTIMISTIC_ABOUT_FILE_LOCKING=1
32 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ruby:3.1.4-buster as builder
2 |
3 | ADD ./ /app/
4 | WORKDIR /app
5 | RUN gem install bundler
6 | RUN bundle update --bundler
7 | RUN bundle install
8 |
9 | RUN bundle exec rake build -t -v
10 |
11 | FROM registry.access.redhat.com/ubi9/ruby-31
12 |
13 | ARG VERSION
14 |
15 | LABEL name="Splunk Connect for Kubernetes Logging container" \
16 | maintainer="DataEdge@splunk.com" \
17 | vendor="Splunk Inc." \
18 | version=${VERSION} \
19 | release=${VERSION} \
20 | summary="Splunk Connect for Kubernetes Logging container" \
21 | description="Splunk Connect for Kubernetes Logging container"
22 |
23 | ENV VERSION=${VERSION}
24 | ENV FLUENT_USER fluent
25 |
26 | USER root
27 |
28 | COPY --from=builder /app/pkg/fluent-plugin-*.gem /tmp/
29 |
30 | RUN mkdir /licenses
31 | COPY --from=builder /app/LICENSE /licenses/LICENSE
32 |
33 | RUN dnf install -y jq
34 |
35 | COPY --from=builder /app/docker/Gemfile* ./
36 | RUN yum update -y \
37 | && yum remove -y nodejs npm \
38 | && gem install bundler \
39 | && gem uninstall -i /usr/share/gems bundler \
40 | && gem unpack /tmp/*.gem --target gem \
41 | && bundle install \
42 | && yum groupremove -y "Development Tools" \
43 | && rpm -e --nodeps python3-urllib3-* python3-requests-* python3-libxml2-* python3-dmidecode-* subscription-manager-* libwebp-* libwebp-devel-* libjpeg-turbo-devel-* libjpeg-turbo-* mariadb-connector-c-config-* mariadb-connector-c-* mariadb-connector-c-devel-* rsync-* libX11-* libX11-common-* libX11-devel-* libX11-xcb-* dbus-daemon-* tar-* qt5-srpm-macros-* perl-parent-* git-* bsdtar-* openssh-clients-* binutils-* libtiff-devel-* libtiff-* python3-pip-wheel || true
44 |
45 | RUN groupadd -r $FLUENT_USER && \
46 | useradd -r -g $FLUENT_USER $FLUENT_USER && \
47 | mkdir -p /fluentd/log /fluentd/etc /fluentd/plugins &&\
48 | chown -R $FLUENT_USER /fluentd && chgrp -R $FLUENT_USER /fluentd && \
49 | chmod +t /tmp
50 |
51 | USER $FLUENT_USER
52 | CMD bundle exec fluentd -c /fluentd/etc/fluent.conf
--------------------------------------------------------------------------------
/docker/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | # This is separate gemfile for building docker image that has all plugins
4 | # for kubernetes log collection agent
5 | # List all required gems here and install via bundler to resolve dependencies
6 | gem "fluentd", "=1.15.3"
7 | gem "fluent-plugin-systemd", "=1.0.2"
8 | gem "fluent-plugin-concat", "=2.4.0"
9 | gem "fluent-plugin-prometheus", "=2.0.2"
10 | gem "fluent-plugin-jq", "=0.5.1"
11 | gem 'kubeclient', git: 'https://github.com/splunk/kubeclient.git', ref: '955ec5b'
12 | gem 'fluent-plugin-kubernetes_metadata_filter', '~> 3.1'
13 | gem "oj", ">=3.11.2"
14 | gem 'multi_json', '~> 1.13'
15 | gem 'net-http-persistent', '~> 4.0'
16 | gem 'openid_connect', '~> 1.1.8'
17 | gem 'prometheus-client', '=2.1.0'
18 | gem 'http_parser.rb', '=0.8.0'
19 | gem "rack", ">=3.0.0"
20 | gem "fluent-plugin-record-modifier", ">=2.1"
21 | gem 'json-jwt', '~> 1.15.0'
22 | gem 'rack-oauth2', '~> 1.19'
23 | gem 'cgi', '~> 0.3.6'
24 | gem 'date', '~> 3.3.3'
25 |
26 |
27 | gem 'fluent-plugin-splunk-hec', path: 'gem/'
28 |
--------------------------------------------------------------------------------
/docker/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GIT
2 | remote: https://github.com/splunk/kubeclient.git
3 | revision: 955ec5b62f64b6065b25a7c0c76467e8b6badf17
4 | ref: 955ec5b
5 | specs:
6 | kubeclient (4.9.3)
7 | http (>= 3.0, < 5.0)
8 | jsonpath (~> 1.0)
9 | recursive-open-struct (~> 1.1, >= 1.1.1)
10 | rest-client (~> 2.0)
11 |
12 | PATH
13 | remote: gem
14 | specs:
15 | fluent-plugin-splunk-hec (1.3.3)
16 | fluentd (>= 1.5)
17 | json-jwt (~> 1.15.0)
18 | multi_json (~> 1.13)
19 | net-http-persistent (~> 4.0)
20 | openid_connect (~> 1.1.8)
21 | prometheus-client (>= 2.1.0)
22 | rack-oauth2 (~> 1.19)
23 |
24 | GEM
25 | remote: https://rubygems.org/
26 | specs:
27 | activemodel (7.0.7.2)
28 | activesupport (= 7.0.7.2)
29 | activesupport (7.0.7.2)
30 | concurrent-ruby (~> 1.0, >= 1.0.2)
31 | i18n (>= 1.6, < 2)
32 | minitest (>= 5.1)
33 | tzinfo (~> 2.0)
34 | addressable (2.8.5)
35 | public_suffix (>= 2.0.2, < 6.0)
36 | aes_key_wrap (1.1.0)
37 | attr_required (1.0.1)
38 | bindata (2.4.15)
39 | cgi (0.3.6)
40 | concurrent-ruby (1.2.2)
41 | connection_pool (2.4.1)
42 | cool.io (1.8.0)
43 | date (3.3.3)
44 | domain_name (0.5.20190701)
45 | unf (>= 0.0.5, < 1.0.0)
46 | ffi (1.15.5)
47 | ffi-compiler (1.0.1)
48 | ffi (>= 1.0.0)
49 | rake
50 | fluent-plugin-concat (2.4.0)
51 | fluentd (>= 0.14.0, < 2)
52 | fluent-plugin-jq (0.5.1)
53 | fluentd (>= 0.14.10, < 2)
54 | multi_json (~> 1.13)
55 | fluent-plugin-kubernetes_metadata_filter (3.1.0)
56 | fluentd (>= 0.14.0, < 1.16)
57 | kubeclient (>= 4.0.0, < 5.0.0)
58 | lru_redux
59 | fluent-plugin-prometheus (2.0.2)
60 | fluentd (>= 1.9.1, < 2)
61 | prometheus-client (>= 2.1.0)
62 | fluent-plugin-record-modifier (2.1.0)
63 | fluentd (>= 1.0, < 2)
64 | fluent-plugin-systemd (1.0.2)
65 | fluentd (>= 0.14.11, < 2)
66 | systemd-journal (~> 1.3.2)
67 | fluentd (1.15.3)
68 | bundler
69 | cool.io (>= 1.4.5, < 2.0.0)
70 | http_parser.rb (>= 0.5.1, < 0.9.0)
71 | msgpack (>= 1.3.1, < 2.0.0)
72 | serverengine (>= 2.3.0, < 3.0.0)
73 | sigdump (~> 0.2.2)
74 | strptime (>= 0.2.4, < 1.0.0)
75 | tzinfo (>= 1.0, < 3.0)
76 | tzinfo-data (~> 1.0)
77 | webrick (>= 1.4.2, < 1.8.0)
78 | yajl-ruby (~> 1.0)
79 | http (4.4.1)
80 | addressable (~> 2.3)
81 | http-cookie (~> 1.0)
82 | http-form_data (~> 2.2)
83 | http-parser (~> 1.2.0)
84 | http-accept (1.7.0)
85 | http-cookie (1.0.5)
86 | domain_name (~> 0.5)
87 | http-form_data (2.3.0)
88 | http-parser (1.2.3)
89 | ffi-compiler (>= 1.0, < 2.0)
90 | http_parser.rb (0.8.0)
91 | httpclient (2.8.3)
92 | i18n (1.14.1)
93 | concurrent-ruby (~> 1.0)
94 | json-jwt (1.15.3)
95 | activesupport (>= 4.2)
96 | aes_key_wrap
97 | bindata
98 | httpclient
99 | jsonpath (1.1.3)
100 | multi_json
101 | lru_redux (1.1.0)
102 | mail (2.7.1)
103 | mini_mime (>= 0.1.1)
104 | mime-types (3.4.1)
105 | mime-types-data (~> 3.2015)
106 | mime-types-data (3.2023.0808)
107 | mini_mime (1.1.5)
108 | minitest (5.19.0)
109 | msgpack (1.7.2)
110 | multi_json (1.15.0)
111 | net-http-persistent (4.0.2)
112 | connection_pool (~> 2.2)
113 | netrc (0.11.0)
114 | oj (3.16.0)
115 | openid_connect (1.1.8)
116 | activemodel
117 | attr_required (>= 1.0.0)
118 | json-jwt (>= 1.5.0)
119 | rack-oauth2 (>= 1.6.1)
120 | swd (>= 1.0.0)
121 | tzinfo
122 | validate_email
123 | validate_url
124 | webfinger (>= 1.0.1)
125 | prometheus-client (2.1.0)
126 | public_suffix (5.0.3)
127 | rack (3.0.8)
128 | rack-oauth2 (1.21.3)
129 | activesupport
130 | attr_required
131 | httpclient
132 | json-jwt (>= 1.11.0)
133 | rack (>= 2.1.0)
134 | rake (13.0.6)
135 | recursive-open-struct (1.1.3)
136 | rest-client (2.1.0)
137 | http-accept (>= 1.7.0, < 2.0)
138 | http-cookie (>= 1.0.2, < 2.0)
139 | mime-types (>= 1.16, < 4.0)
140 | netrc (~> 0.8)
141 | serverengine (2.3.0)
142 | sigdump (~> 0.2.2)
143 | sigdump (0.2.4)
144 | strptime (0.2.5)
145 | swd (1.3.0)
146 | activesupport (>= 3)
147 | attr_required (>= 0.0.5)
148 | httpclient (>= 2.4)
149 | systemd-journal (1.3.3)
150 | ffi (~> 1.9)
151 | timeout (0.4.0)
152 | tzinfo (2.0.6)
153 | concurrent-ruby (~> 1.0)
154 | tzinfo-data (1.2023.3)
155 | tzinfo (>= 1.0.0)
156 | unf (0.1.4)
157 | unf_ext
158 | unf_ext (0.0.8.2)
159 | validate_email (0.1.6)
160 | activemodel (>= 3.0)
161 | mail (>= 2.2.5)
162 | validate_url (1.0.15)
163 | activemodel (>= 3.0.0)
164 | public_suffix
165 | webfinger (1.2.0)
166 | activesupport
167 | httpclient (>= 2.4)
168 | webrick (1.7.0)
169 | yajl-ruby (1.4.3)
170 |
171 | PLATFORMS
172 | ruby
173 |
174 | DEPENDENCIES
175 | cgi (~> 0.3.6)
176 | date (~> 3.3.3)
177 | fluent-plugin-concat (= 2.4.0)
178 | fluent-plugin-jq (= 0.5.1)
179 | fluent-plugin-kubernetes_metadata_filter (~> 3.1)
180 | fluent-plugin-prometheus (= 2.0.2)
181 | fluent-plugin-record-modifier (>= 2.1)
182 | fluent-plugin-splunk-hec!
183 | fluent-plugin-systemd (= 1.0.2)
184 | fluentd (= 1.15.3)
185 | http_parser.rb (= 0.8.0)
186 | json-jwt (~> 1.15.0)
187 | kubeclient!
188 | multi_json (~> 1.13)
189 | net-http-persistent (~> 4.0)
190 | oj (>= 3.11.2)
191 | openid_connect (~> 1.1.8)
192 | prometheus-client (= 2.1.0)
193 | rack (>= 3.0.0)
194 | rack-oauth2 (~> 1.19)
195 |
196 | BUNDLED WITH
197 | 2.3.11
198 |
--------------------------------------------------------------------------------
/docker/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 | TAG=$1
4 |
5 | # Install dependecies
6 | gem install bundler
7 | bundle update --bundler
8 | bundle install
9 |
10 | # Build Gem
11 | bundle exec rake build -t -v
12 | cp pkg/fluent-plugin-*.gem docker
13 |
14 | # Build Docker Image
15 | VERSION=`cat VERSION`
16 | echo "Copying licenses to be included in the docker image..."
17 | mkdir -p docker/licenses
18 | cp -rp LICENSE docker/licenses/
19 | docker build --no-cache --pull --build-arg VERSION=$VERSION -t splunk/fluentd-hec:$TAG ./docker
20 |
--------------------------------------------------------------------------------
/fluent-plugin-splunk-hec.gemspec:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | Gem::Specification.new do |spec|
4 | spec.name = 'fluent-plugin-splunk-hec'
5 | spec.version = File.read('VERSION')
6 | spec.authors = ['Splunk Inc.']
7 | spec.email = ['DataEdge@splunk.com']
8 |
9 | spec.summary = 'Fluentd plugin for Splunk HEC.'
10 | spec.description = 'A fluentd output plugin created by Splunk
11 | that writes events to splunk indexers over HTTP Event Collector API.'
12 | spec.homepage = 'https://github.com/splunk/fluent-plugin-splunk-hec'
13 | spec.license = 'Apache-2.0'
14 |
15 | # Prevent pushing this gem to RubyGems.org.
16 | # To allow pushes either set the 'allowed_push_host' to allow
17 | # pushing to a single host or delete this section to allow pushing to any host.
18 | # if spec.respond_to?(:metadata)
19 | # spec.metadata["allowed_push_host"] = "TODO: Set to 'http://mygemserver.com'"
20 | # else
21 | # raise "RubyGems 2.0 or newer is required to protect against " \
22 | # "public gem pushes."
23 | # end
24 |
25 | spec.require_paths = ['lib']
26 | spec.test_files = Dir.glob('test/**/**.rb')
27 | spec.files = %w[
28 | CODE_OF_CONDUCT.md README.md LICENSE
29 | fluent-plugin-splunk-hec.gemspec
30 | Gemfile Gemfile.lock
31 | Rakefile VERSION
32 | ] + Dir.glob('lib/**/**').reject(&File.method(:directory?))
33 |
34 | spec.required_ruby_version = '>= 2.3.0'
35 |
36 | spec.add_runtime_dependency 'fluentd', '>= 1.5'
37 | spec.add_runtime_dependency 'multi_json', '~> 1.13'
38 | spec.add_runtime_dependency 'net-http-persistent', '~> 4.0'
39 | spec.add_runtime_dependency 'openid_connect', '~> 1.1.8'
40 | spec.add_runtime_dependency 'prometheus-client', '>= 2.1.0'
41 | spec.add_runtime_dependency 'json-jwt', '~> 1.15.0'
42 | spec.add_runtime_dependency 'rack-oauth2', '~> 1.19'
43 |
44 |
45 | spec.add_development_dependency 'bundler', '~> 2.0'
46 | spec.add_development_dependency 'rake', '>= 12.0'
47 | # required by fluent/test.rb
48 | spec.add_development_dependency 'minitest', '~> 5.0'
49 | spec.add_development_dependency 'simplecov', '~> 0.16.1'
50 | spec.add_development_dependency 'test-unit', '~> 3.0'
51 | spec.add_development_dependency 'webmock', '~> 3.5.0'
52 | end
53 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/out_splunk.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'fluent/plugin/output'
4 | require 'fluent/plugin/formatter'
5 | require 'prometheus/client'
6 | require 'benchmark'
7 |
8 | module Fluent::Plugin
9 | class SplunkOutput < Fluent::Plugin::Output
10 | helpers :formatter
11 |
12 | autoload :VERSION, 'fluent/plugin/out_splunk/version'
13 | autoload :MatchFormatter, 'fluent/plugin/out_splunk/match_formatter'
14 |
15 | KEY_FIELDS = %w[index host source sourcetype metric_name metric_value time].freeze
16 | TAG_PLACEHOLDER = '${tag}'
17 |
18 | desc 'The host field for events, by default it uses the hostname of the machine that runnning fluentd. This is exclusive with `host_key`.'
19 | config_param :host, :string, default: nil
20 |
21 | desc 'Field name to contain host. This is exclusive with `host`.'
22 | config_param :host_key, :string, default: nil
23 |
24 | desc 'The source field for events, when not set, will be decided by HEC. This is exclusive with `source_key`.'
25 | config_param :source, :string, default: nil
26 |
27 | desc 'Field name to contain source. This is exclusive with `source`.'
28 | config_param :source_key, :string, default: nil
29 |
30 | desc 'The sourcetype field for events, when not set, will be decided by HEC. This is exclusive with `sourcetype_key`.'
31 | config_param :sourcetype, :string, default: nil
32 |
33 | desc 'Field name to contain sourcetype. This is exclusive with `sourcetype`.'
34 | config_param :sourcetype_key, :string, default: nil
35 |
36 | desc 'Field name to contain Splunk event time. By default will use fluentd\'d time'
37 | config_param :time_key, :string, default: nil
38 |
39 | desc 'The Splunk index to index events. When not set, will be decided by HEC. This is exclusive with `index_key`'
40 | config_param :index, :string, default: nil
41 |
42 | desc 'Field name to contain Splunk index name. This is exclusive with `index`.'
43 | config_param :index_key, :string, default: nil
44 |
45 | desc 'When set to true, all fields defined in `index_key`, `host_key`, `source_key`, `sourcetype_key`, `metric_name_key`, `metric_value_key` will not be removed from the original event.'
46 | config_param :keep_keys, :bool, default: false
47 |
48 | desc 'Define index-time fields for event data type, or metric dimensions for metric data type. Null value fields will be removed.'
49 | config_section :fields, init: false, multi: false, required: false do
50 | # this is blank on purpose
51 | end
52 |
53 | desc 'Indicates if 4xx errors should consume chunk'
54 | config_param :consume_chunk_on_4xx_errors, :bool, :default => true
55 |
56 | config_section :format do
57 | config_set_default :usage, '**'
58 | config_set_default :@type, 'json'
59 | config_set_default :add_newline, false
60 | end
61 |
62 | desc <<~DESC
63 | Whether to allow non-UTF-8 characters in user logs. If set to true, any
64 | non-UTF-8 character would be replaced by the string specified by
65 | `non_utf8_replacement_string`. If set to false, any non-UTF-8 character
66 | would trigger the plugin to error out.
67 | DESC
68 | config_param :coerce_to_utf8, :bool, default: true
69 |
70 | desc <<~DESC
71 | If `coerce_to_utf8` is set to true, any non-UTF-8 character would be
72 | replaced by the string specified here.
73 | DESC
74 | config_param :non_utf8_replacement_string, :string, default: ' '
75 |
76 | def initialize
77 | super
78 | @registry = ::Prometheus::Client.registry
79 | end
80 |
81 | def configure(conf)
82 | super
83 | check_conflict
84 | @api = construct_api
85 | prepare_key_fields
86 | configure_fields(conf)
87 | configure_metrics(conf)
88 |
89 | # @formatter_configs is from formatter helper
90 | @formatters = @formatter_configs.map do |section|
91 | MatchFormatter.new section.usage, formatter_create(usage: section.usage)
92 | end
93 | end
94 |
95 | def shutdown
96 | super
97 | end
98 |
99 | def write(chunk)
100 | log.trace { "#{self.class}: Received new chunk, size=#{chunk.read.bytesize}" }
101 |
102 | t = Benchmark.realtime do
103 | write_to_splunk(chunk)
104 | end
105 |
106 | @metrics[:record_counter].increment(labels: metric_labels, by: chunk.size)
107 | @metrics[:bytes_counter].increment(labels: metric_labels, by: chunk.bytesize)
108 | @metrics[:write_records_histogram].observe(chunk.size, labels: metric_labels)
109 | @metrics[:write_bytes_histogram].observe(chunk.bytesize, labels: metric_labels, )
110 | @metrics[:write_latency_histogram].observe(t, labels: metric_labels, )
111 | end
112 |
113 | def write_to_splunk(_chunk)
114 | raise NotImplementedError("Child class should implement 'write_to_splunk'")
115 | end
116 |
117 | def construct_api
118 | raise NotImplementedError("Child class should implement 'construct_api'")
119 | end
120 |
121 | protected
122 |
123 | def prepare_event_payload(tag, time, record)
124 | {
125 | host: @host ? @host.call(tag, record) : @default_host,
126 | # From the API reference
127 | # http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTinput#services.2Fcollector
128 | # `time` should be a string or unsigned integer.
129 | # That's why we use `to_s` here.
130 | time: time.to_f.to_s
131 | }.tap do |payload|
132 | payload[:index] = @index.call(tag, record) if @index
133 | payload[:source] = @source.call(tag, record) if @source
134 | payload[:sourcetype] = @sourcetype.call(tag, record) if @sourcetype
135 |
136 | # delete nil fields otherwise will get format error from HEC
137 | %i[host index source sourcetype].each { |f| payload.delete f if payload[f].nil? }
138 |
139 | if @extra_fields
140 | payload[:fields] = @extra_fields.map { |name, field| [name, record[field]] }.to_h
141 | payload[:fields].compact!
142 | # if a field is already in indexed fields, then remove it from the original event
143 | @extra_fields.values.each { |field| record.delete field }
144 | end
145 | if formatter = @formatters.find { |f| f.match? tag }
146 | record = formatter.format(tag, time, record)
147 | end
148 | payload[:event] = convert_to_utf8 record
149 | end
150 | end
151 |
152 | def format_event(tag, time, record)
153 | MultiJson.dump(prepare_event_payload(tag, time, record))
154 | end
155 |
156 | def process_response(response, _request_body)
157 | log.trace { "[Response] POST #{@api}: #{response.inspect}" }
158 |
159 | @metrics[:status_counter].increment(labels: metric_labels(status: response.code.to_s))
160 |
161 | raise_err = response.code.to_s.start_with?('5') || (!@consume_chunk_on_4xx_errors && response.code.to_s.start_with?('4'))
162 |
163 | # raise Exception to utilize Fluentd output plugin retry mechanism
164 | raise "Server error (#{response.code}) for POST #{@api}, response: #{response.body}" if raise_err
165 |
166 | # For both success response (2xx) we will consume the chunk.
167 | unless response.code.to_s.start_with?('2')
168 | log.error "#{self.class}: Failed POST to #{@api}, response: #{response.body}"
169 | log.error { "#{self.class}: Failed request body: #{post.body}" }
170 | end
171 | end
172 |
173 | private
174 |
175 | def check_conflict
176 | KEY_FIELDS.each do |f|
177 | kf = "#{f}_key"
178 | raise Fluent::ConfigError, "Can not set #{f} and #{kf} at the same time." \
179 | if %W[@#{f} @#{kf}].all? &method(:instance_variable_get)
180 | end
181 | end
182 |
183 | def prepare_key_fields
184 | KEY_FIELDS.each do |f|
185 | v = instance_variable_get "@#{f}_key"
186 | if v
187 | attrs = v.split('.').freeze
188 | if @keep_keys
189 | instance_variable_set "@#{f}", ->(_, record) { attrs.inject(record) { |o, k| o[k] } }
190 | else
191 | instance_variable_set "@#{f}", lambda { |_, record|
192 | attrs[0...-1].inject(record) { |o, k| o[k] }.delete(attrs[-1])
193 | }
194 | end
195 | else
196 | v = instance_variable_get "@#{f}"
197 | next unless v
198 |
199 | if v.include? TAG_PLACEHOLDER
200 | instance_variable_set "@#{f}", ->(tag, _) { v.gsub(TAG_PLACEHOLDER, tag) }
201 | else
202 | instance_variable_set "@#{f}", ->(_, _) { v }
203 | end
204 | end
205 | end
206 | end
207 |
208 | # directive, which defines:
209 | # * when data_type is event, index-time fields
210 | # * when data_type is metric, metric dimensions
211 | def configure_fields(conf)
212 | # This loop looks dump, but it is used to suppress the unused parameter configuration warning
213 | # Learned from `filter_record_transformer`.
214 | conf.elements.select { |element| element.name == 'fields' }.each do |element|
215 | element.each_pair { |k, _v| element.has_key?(k) }
216 | end
217 |
218 | return unless @fields
219 |
220 | @extra_fields = @fields.corresponding_config_element.map do |k, v|
221 | [k, v.empty? ? k : v]
222 | end.to_h
223 | end
224 |
225 | def pick_custom_format_method
226 | if @data_type == :event
227 | define_singleton_method :format, method(:format_event)
228 | else
229 | define_singleton_method :format, method(:format_metric)
230 | end
231 | end
232 |
233 | def configure_metrics(conf)
234 | @metric_labels = {
235 | type: conf['@type'],
236 | plugin_id: plugin_id
237 | }
238 |
239 | @metrics = {
240 | record_counter: register_metric(::Prometheus::Client::Counter.new(
241 | :splunk_output_write_records_count, docstring:
242 | 'The number of log records being sent',
243 | labels: metric_label_keys
244 | )),
245 | bytes_counter: register_metric(::Prometheus::Client::Counter.new(
246 | :splunk_output_write_bytes_count, docstring:
247 | 'The number of log bytes being sent',
248 | labels: metric_label_keys
249 | )),
250 | status_counter: register_metric(::Prometheus::Client::Counter.new(
251 | :splunk_output_write_status_count, docstring:
252 | 'The count of sends by response_code',
253 | labels: metric_label_keys(status: "")
254 | )),
255 | write_bytes_histogram: register_metric(::Prometheus::Client::Histogram.new(
256 | :splunk_output_write_payload_bytes, docstring:
257 | 'The size of the write payload in bytes', buckets: [1024, 23_937, 47_875, 95_750, 191_500, 383_000, 766_000, 1_149_000],
258 | labels: metric_label_keys
259 | )),
260 | write_records_histogram: register_metric(::Prometheus::Client::Histogram.new(
261 | :splunk_output_write_payload_records, docstring:
262 | 'The number of records written per write', buckets: [1, 10, 25, 100, 200, 300, 500, 750, 1000, 1500],
263 | labels: metric_label_keys
264 | )),
265 | write_latency_histogram: register_metric(::Prometheus::Client::Histogram.new(
266 | :splunk_output_write_latency_seconds, docstring:
267 | 'The latency of writes',
268 | labels: metric_label_keys
269 | ))
270 | }
271 | end
272 |
273 | # Tag metrics with the type string that was used to register the plugin
274 | def metric_labels(other_labels = {})
275 | @metric_labels.merge other_labels
276 | end
277 |
278 | def metric_label_keys(other_labels = {})
279 | (@metric_labels.merge other_labels).keys
280 | end
281 |
282 | # Encode as UTF-8. If 'coerce_to_utf8' is set to true in the config, any
283 | # non-UTF-8 character would be replaced by the string specified by
284 | # 'non_utf8_replacement_string'. If 'coerce_to_utf8' is set to false, any
285 | # non-UTF-8 character would trigger the plugin to error out.
286 | # Thanks to
287 | # https://github.com/GoogleCloudPlatform/fluent-plugin-google-cloud/blob/dbc28575/lib/fluent/plugin/out_google_cloud.rb#L1284
288 | def convert_to_utf8(input)
289 | if input.is_a?(Hash)
290 | record = {}
291 | input.each do |key, value|
292 | record[convert_to_utf8(key)] = convert_to_utf8(value)
293 | end
294 |
295 | return record
296 | end
297 | return input.map { |value| convert_to_utf8(value) } if input.is_a?(Array)
298 | return input unless input.respond_to?(:encode)
299 |
300 | if @coerce_to_utf8
301 | input.encode(
302 | 'utf-8',
303 | invalid: :replace,
304 | undef: :replace,
305 | replace: @non_utf8_replacement_string
306 | )
307 | else
308 | begin
309 | input.encode('utf-8')
310 | rescue EncodingError
311 | log.error do
312 | 'Encountered encoding issues potentially due to non ' \
313 | 'UTF-8 characters. To allow non-UTF-8 characters and ' \
314 | 'replace them with spaces, please set "coerce_to_utf8" ' \
315 | 'to true.'
316 | end
317 | raise
318 | end
319 | end
320 | end
321 |
322 | def register_metric(metric)
323 | if !@registry.exist?(metric.name)
324 | @registry.register(metric)
325 | else
326 | @registry.get(metric.name)
327 | end
328 | end
329 | end
330 | end
331 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/out_splunk/match_formatter.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'fluent/match'
4 |
5 | class Fluent::Plugin::SplunkOutput::MatchFormatter
6 | def initialize(pattern, formatter)
7 | # based on fluentd/lib/fluent/event_router.rb
8 | patterns = pattern.split(/\s+/).map do |str|
9 | Fluent::MatchPattern.create(str)
10 | end
11 | @pattern =
12 | if patterns.length == 1
13 | patterns[0]
14 | else
15 | Fluent::OrMatchPattern.new(patterns)
16 | end
17 | @formatter = formatter
18 | end
19 |
20 | def match?(tag)
21 | @pattern.match tag
22 | end
23 |
24 | def format(tag, time, record)
25 | @formatter.format tag, time, record
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/out_splunk/version.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | Fluent::Plugin::SplunkOutput::VERSION = File.read(File.expand_path('../../../../VERSION', File.dirname(__FILE__))).chomp.strip
4 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/out_splunk_hec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 | $LOAD_PATH.unshift(File.expand_path('..', __dir__))
3 | require 'fluent/env'
4 | require 'fluent/output'
5 | require 'fluent/plugin/output'
6 | require 'fluent/plugin/formatter'
7 | require 'fluent/plugin/out_splunk'
8 |
9 | require 'openssl'
10 | require 'multi_json'
11 | require 'net/http/persistent'
12 | require 'zlib'
13 |
14 | module Fluent::Plugin
15 | class SplunkHecOutput < SplunkOutput
16 | Fluent::Plugin.register_output('splunk_hec', self)
17 |
18 | helpers :formatter
19 |
20 | autoload :VERSION, "fluent/plugin/out_splunk_hec/version"
21 | autoload :MatchFormatter, "fluent/plugin/out_splunk_hec/match_formatter"
22 |
23 | KEY_FIELDS = %w[index time host source sourcetype metric_name metric_value].freeze
24 | TAG_PLACEHOLDER = '${tag}'.freeze
25 |
26 | MISSING_FIELD = Hash.new do |_h, k|
27 | $log.warn "expected field #{k} but it's missing" if defined?($log)
28 | MISSING_FIELD
29 | end.freeze
30 |
31 | desc 'Protocol to use to call HEC API.'
32 | config_param :protocol, :enum, list: %i[http https], default: :https
33 |
34 | desc 'The hostname/IP to HEC, or HEC load balancer.'
35 | config_param :hec_host, :string, default: ''
36 |
37 | desc 'The port number to HEC, or HEC load balancer.'
38 | config_param :hec_port, :integer, default: 8088
39 |
40 | desc 'HEC REST API endpoint to use'
41 | config_param :hec_endpoint, :string, default: 'services/collector'
42 |
43 | desc 'Full url to connect tosplunk. Example: https://mydomain.com:8088/apps/splunk'
44 | config_param :full_url, :string, default: ''
45 |
46 | desc 'The HEC token.'
47 | config_param :hec_token, :string, secret: true
48 |
49 | desc 'If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout.'
50 | config_param :idle_timeout, :integer, default: 5
51 |
52 | desc 'The amount of time allowed between reading two chunks from the socket.'
53 | config_param :read_timeout, :integer, default: nil
54 |
55 | desc 'The amount of time to wait for a connection to be opened.'
56 | config_param :open_timeout, :integer, default: nil
57 |
58 | desc 'The path to a file containing a PEM-format CA certificate for this client.'
59 | config_param :client_cert, :string, default: nil
60 |
61 | desc 'The private key for this client.'
62 | config_param :client_key, :string, default: nil
63 |
64 | desc 'The path to a file containing a PEM-format CA certificate.'
65 | config_param :ca_file, :string, default: nil
66 |
67 | desc 'The path to a directory containing CA certificates in PEM format.'
68 | config_param :ca_path, :string, default: nil
69 |
70 | desc 'List of SSL ciphers allowed.'
71 | config_param :ssl_ciphers, :array, default: nil
72 |
73 | desc 'When set to true, TLS version 1.1 and above is required.'
74 | config_param :require_ssl_min_version, :bool, default: true
75 |
76 | desc 'Indicates if insecure SSL connection is allowed.'
77 | config_param :insecure_ssl, :bool, default: false
78 |
79 | desc 'Type of data sending to Splunk, `event` or `metric`. `metric` type is supported since Splunk 7.0. To use `metric` type, make sure the index is a metric index.'
80 | config_param :data_type, :enum, list: %i[event metric], default: :event
81 |
82 | desc 'The Splunk index to index events. When not set, will be decided by HEC. This is exclusive with `index_key`'
83 | config_param :index, :string, default: nil
84 |
85 | desc 'Field name to contain Splunk index name. This is exclusive with `index`.'
86 | config_param :index_key, :string, default: nil
87 |
88 | desc 'When `data_type` is set to "metric", by default it will treat every key-value pair in the income event as a metric name-metric value pair. Set `metrics_from_event` to `false` to disable this behavior and use `metric_name_key` and `metric_value_key` to define metrics.'
89 | config_param :metrics_from_event, :bool, default: true
90 |
91 | desc 'Field name to contain metric name. This is exclusive with `metrics_from_event`, when this is set, `metrics_from_event` will be set to `false`.'
92 | config_param :metric_name_key, :string, default: nil
93 |
94 | desc 'Field name to contain metric value, this is required when `metric_name_key` is set.'
95 | config_param :metric_value_key, :string, default: nil
96 |
97 | desc 'When set to true, all fields defined in `index_key`, `host_key`, `source_key`, `sourcetype_key`, `metric_name_key`, `metric_value_key` will not be removed from the original event.'
98 | config_param :keep_keys, :bool, default: false
99 |
100 | desc 'Indicates if GZIP Compression is enabled.'
101 | config_param :gzip_compression, :bool, default: false
102 |
103 | desc 'App name'
104 | config_param :app_name, :string, default: "hec_plugin_gem"
105 |
106 | desc 'App version'
107 | config_param :app_version, :string, default: "#{VERSION}"
108 |
109 | desc 'Define index-time fields for event data type, or metric dimensions for metric data type. Null value fields will be removed.'
110 | config_section :fields, init: false, multi: false, required: false do
111 | # this is blank on purpose
112 | end
113 |
114 | desc 'Indicates if 4xx errors should consume chunk'
115 | config_param :consume_chunk_on_4xx_errors, :bool, :default => true
116 |
117 | config_section :format do
118 | config_set_default :usage, '**'
119 | config_set_default :@type, 'json'
120 | config_set_default :add_newline, false
121 | end
122 |
123 | desc <<~DESC
124 | Whether to allow non-UTF-8 characters in user logs. If set to true, any
125 | non-UTF-8 character would be replaced by the string specified by
126 | `non_utf8_replacement_string`. If set to false, any non-UTF-8 character
127 | would trigger the plugin to error out.
128 | DESC
129 | config_param :coerce_to_utf8, :bool, :default => true
130 |
131 | desc <<~DESC
132 | If `coerce_to_utf8` is set to true, any not-UTF-8 char's would be
133 | replaced by the string specified here.
134 | DESC
135 | config_param :non_utf8_replacement_string, :string, :default => ' '
136 |
137 | desc 'Any custom headers to include alongside requests made to Splunk'
138 | config_param :custom_headers, :hash, :default => {}
139 |
140 | def initialize
141 | super
142 | @default_host = Socket.gethostname
143 | @extra_fields = nil
144 | end
145 |
146 | def configure(conf)
147 | super
148 | raise Fluent::ConfigError, 'One of `hec_host` or `full_url` is required.' if @hec_host.empty? && @full_url.empty?
149 | check_metric_configs
150 | pick_custom_format_method
151 | end
152 |
153 | def write(chunk)
154 | super
155 | end
156 |
157 | def start
158 | super
159 | @conn = Net::HTTP::Persistent.new.tap do |c|
160 | c.verify_mode = @insecure_ssl ? OpenSSL::SSL::VERIFY_NONE : OpenSSL::SSL::VERIFY_PEER
161 | c.cert = OpenSSL::X509::Certificate.new File.read(@client_cert) if @client_cert
162 | c.key = OpenSSL::PKey::RSA.new File.read(@client_key) if @client_key
163 | c.ca_file = @ca_file
164 | c.ca_path = @ca_path
165 | c.ciphers = @ssl_ciphers
166 | c.proxy = :ENV
167 | c.min_version = OpenSSL::SSL::TLS1_1_VERSION if @require_ssl_min_version
168 |
169 | c.override_headers['Content-Type'] = 'application/json'
170 | c.override_headers['User-Agent'] = "fluent-plugin-splunk_hec_out/#{VERSION}"
171 | c.override_headers['Authorization'] = "Splunk #{@hec_token}"
172 | c.override_headers['__splunk_app_name'] = "#{@app_name}"
173 | c.override_headers['__splunk_app_version'] = "#{@app_version}"
174 | @custom_headers.each do |header, value|
175 | c.override_headers[header] = value
176 | end
177 | end
178 | end
179 |
180 | def shutdown
181 | @conn.shutdown if not @conn.nil?
182 | super
183 | end
184 |
185 | def format(tag, time, record)
186 | # this method will be replaced in `configure`
187 | end
188 |
189 | def multi_workers_ready?
190 | true
191 | end
192 |
193 | protected
194 |
195 | private
196 |
197 | def check_metric_configs
198 | return unless @data_type == :metric
199 |
200 | @metrics_from_event = false if @metric_name_key
201 |
202 | return if @metrics_from_event
203 |
204 | raise Fluent::ConfigError, '`metric_name_key` is required when `metrics_from_event` is `false`.' unless @metric_name_key
205 | raise Fluent::ConfigError, '`metric_value_key` is required when `metric_name_key` is set.' unless @metric_value_key
206 | end
207 |
208 | def format_event(tag, time, record)
209 | d = {
210 | host: @host ? @host.(tag, record) : @default_host,
211 | # From the API reference
212 | # http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTinput#services.2Fcollector
213 | # `time` should be a string or unsigned integer.
214 | # That's why we use the to_string function here.
215 | time: time.to_f.to_s
216 | }.tap { |payload|
217 | if @time
218 | time_value = @time.(tag, record)
219 | # if no value is found don't override and use fluentd's time
220 | if !time_value.nil?
221 | payload[:time] = time_value
222 | end
223 | end
224 |
225 | payload[:index] = @index.(tag, record) if @index
226 | payload[:source] = @source.(tag, record) if @source
227 | payload[:sourcetype] = @sourcetype.(tag, record) if @sourcetype
228 |
229 | # delete nil fields otherwise will get formet error from HEC
230 | %i[host index source sourcetype].each { |f| payload.delete f if payload[f].nil? }
231 |
232 | if @extra_fields
233 | payload[:fields] = @extra_fields.map { |name, field| [name, record[field]] }.to_h
234 | payload[:fields].delete_if { |_k,v| v.nil? }
235 | # if a field is already in indexed fields, then remove it from the original event
236 | @extra_fields.values.each { |field| record.delete field }
237 | end
238 | if formatter = @formatters.find { |f| f.match? tag }
239 | record = formatter.format(tag, time, record)
240 | end
241 | payload[:event] = convert_to_utf8 record
242 | }
243 | if d[:event] == "{}"
244 | log.warn { "Event after formatting was blank, not sending" }
245 | return ""
246 | end
247 | MultiJson.dump(d)
248 | end
249 |
250 | def format_metric(tag, time, record)
251 | payload = {
252 | host: @host ? @host.call(tag, record) : @default_host,
253 | # From the API reference
254 | # http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTinput#services.2Fcollector
255 | # `time` should be a string or unsigned integer.
256 | # That's why we use `to_s` here.
257 | time: time.to_f.to_s,
258 | event: 'metric'
259 | }.tap do |payload|
260 | if @time
261 | time_value = @time.(tag, record)
262 | # if no value is found don't override and use fluentd's time
263 | if !time_value.nil?
264 | payload[:time] = time_value
265 | end
266 | end
267 | end
268 | payload[:index] = @index.call(tag, record) if @index
269 | payload[:source] = @source.call(tag, record) if @source
270 | payload[:sourcetype] = @sourcetype.call(tag, record) if @sourcetype
271 |
272 | unless @metrics_from_event
273 | fields = {
274 | metric_name: @metric_name.call(tag, record),
275 | _value: @metric_value.call(tag, record)
276 | }
277 |
278 | if @extra_fields
279 | fields.update @extra_fields.map { |name, field| [name, record[field]] }.to_h
280 | fields.delete_if { |_k,v| v.nil? }
281 | else
282 | fields.update record
283 | end
284 |
285 | fields.delete_if { |_k,v| v.nil? }
286 |
287 | payload[:fields] = convert_to_utf8 fields
288 |
289 | return MultiJson.dump(payload)
290 | end
291 |
292 | # when metrics_from_event is true, generate one metric event for each key-value in record
293 | payloads = record.map do |key, value|
294 | { fields: { metric_name: key, _value: value } }.merge! payload
295 | end
296 |
297 | payloads.map!(&MultiJson.method(:dump)).join
298 | end
299 |
300 | def construct_api
301 | if @full_url.empty?
302 | URI("#{@protocol}://#{@hec_host}:#{@hec_port}/#{@hec_endpoint.delete_prefix("/")}")
303 | else
304 | URI("#{@full_url.delete_suffix("/")}/#{@hec_endpoint.delete_prefix("/")}")
305 | end
306 | rescue StandardError
307 | if @full_url.empty?
308 | raise Fluent::ConfigError, "hec_host (#{@hec_host}) and/or hec_port (#{@hec_port}) are invalid."
309 | else
310 | raise Fluent::ConfigError, "full_url (#{@full_url}) is invalid."
311 | end
312 | end
313 |
314 | def new_connection
315 | Net::HTTP::Persistent.new.tap do |c|
316 | c.verify_mode = @insecure_ssl ? OpenSSL::SSL::VERIFY_NONE : OpenSSL::SSL::VERIFY_PEER
317 | c.cert = OpenSSL::X509::Certificate.new File.read(@client_cert) if @client_cert
318 | c.key = OpenSSL::PKey::RSA.new File.read(@client_key) if @client_key
319 | c.ca_file = @ca_file
320 | c.ca_path = @ca_path
321 | c.ciphers = @ssl_ciphers
322 | c.proxy = :ENV
323 | c.idle_timeout = @idle_timeout
324 | c.read_timeout = @read_timeout
325 | c.open_timeout = @open_timeout
326 | c.min_version = OpenSSL::SSL::TLS1_1_VERSION if @require_ssl_min_version
327 |
328 | c.override_headers['Content-Type'] = 'application/json'
329 | c.override_headers['User-Agent'] = "fluent-plugin-splunk_hec_out/#{VERSION}"
330 | c.override_headers['Authorization'] = "Splunk #{@hec_token}"
331 | c.override_headers['__splunk_app_name'] = "#{@app_name}"
332 | c.override_headers['__splunk_app_version'] = "#{@app_version}"
333 | end
334 | end
335 |
336 | def write_to_splunk(chunk)
337 | post = Net::HTTP::Post.new @api.request_uri
338 | if @gzip_compression
339 | post.add_field("Content-Encoding", "gzip")
340 | gzip_stream = Zlib::GzipWriter.new StringIO.new
341 | gzip_stream << chunk.read
342 | post.body = gzip_stream.close.string
343 | else
344 | post.body = chunk.read
345 | end
346 |
347 | log.debug { "[Sending] Chunk: #{dump_unique_id_hex(chunk.unique_id)}(#{post.body.bytesize}B)." }
348 | log.trace { "POST #{@api} body=#{post.body}" }
349 | begin
350 | t1 = Time.now
351 | response = @conn.request @api, post
352 | t2 = Time.now
353 | rescue Net::HTTP::Persistent::Error => e
354 | raise e.cause
355 | end
356 |
357 | raise_err = response.code.to_s.start_with?('5') || (!@consume_chunk_on_4xx_errors && response.code.to_s.start_with?('4'))
358 |
359 | # raise Exception to utilize Fluentd output plugin retry mechanism
360 | raise "Server error (#{response.code}) for POST #{@api}, response: #{response.body}" if raise_err
361 |
362 | # For both success response (2xx) we will consume the chunk.
363 | if not response.code.start_with?('2')
364 | log.error "Failed POST to #{@api}, response: #{response.body}"
365 | log.debug { "Failed request body: #{post.body}" }
366 | end
367 |
368 | log.debug { "[Response] Chunk: #{dump_unique_id_hex(chunk.unique_id)} Size: #{post.body.bytesize} Response: #{response.inspect} Duration: #{t2 - t1}" }
369 | process_response(response, post.body)
370 | end
371 |
372 | # Encode as UTF-8. If 'coerce_to_utf8' is set to true in the config, any
373 | # non-UTF-8 character would be replaced by the string specified by
374 | # 'non_utf8_replacement_string'. If 'coerce_to_utf8' is set to false, any
375 | # non-UTF-8 character would trigger the plugin to error out.
376 | # Thanks to
377 | # https://github.com/GoogleCloudPlatform/fluent-plugin-google-cloud/blob/dbc28575/lib/fluent/plugin/out_google_cloud.rb#L1284
378 | def convert_to_utf8(input)
379 | if input.is_a?(Hash)
380 | record = {}
381 | input.each do |key, value|
382 | record[convert_to_utf8(key)] = convert_to_utf8(value)
383 | end
384 |
385 | return record
386 | end
387 | return input.map { |value| convert_to_utf8(value) } if input.is_a?(Array)
388 | return input unless input.respond_to?(:encode)
389 |
390 | if @coerce_to_utf8
391 | input.encode(
392 | 'utf-8',
393 | invalid: :replace,
394 | undef: :replace,
395 | replace: @non_utf8_replacement_string)
396 | else
397 | begin
398 | input.encode('utf-8')
399 | rescue EncodingError
400 | log.error { 'Encountered encoding issues potentially due to non ' \
401 | 'UTF-8 characters. To allow non-UTF-8 characters and ' \
402 | 'replace them with spaces, please set "coerce_to_utf8" ' \
403 | 'to true.' }
404 | raise
405 | end
406 | end
407 | end
408 | end
409 | end
410 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/out_splunk_hec/version.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | Fluent::Plugin::SplunkHecOutput::VERSION = File.read(
4 | File.expand_path('../../../../VERSION', File.dirname(__FILE__))
5 | ).chomp.strip
6 |
--------------------------------------------------------------------------------
/lib/fluent/plugin/out_splunk_ingest_api.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 | $LOAD_PATH.unshift(File.expand_path('..', __dir__))
3 | require 'fluent/plugin/out_splunk'
4 | require 'openid_connect'
5 | require 'rack/oauth2'
6 | require 'multi_json'
7 |
8 | module Fluent::Plugin
9 | class SplunkIngestApiOutput < SplunkOutput
10 | Fluent::Plugin.register_output('splunk_ingest_api', self)
11 |
12 | desc 'Service Client Identifier'
13 | config_param :service_client_identifier, :string, default: nil
14 |
15 | desc 'Service Client Secret Key'
16 | config_param :service_client_secret_key, :string, default: nil
17 |
18 | desc 'Token Endpoint'
19 | config_param :token_endpoint, :string, default: '/token'
20 |
21 | desc 'Token Auth Hostname'
22 | config_param :ingest_auth_host, :string, default: 'auth.scp.splunk.com'
23 |
24 | desc 'Ingest Api Hostname'
25 | config_param :ingest_api_host, :string, default: 'api.scp.splunk.com'
26 |
27 | desc 'Ingest API Tenant Name'
28 | config_param :ingest_api_tenant, :string
29 |
30 | desc 'Ingest API Events Endpoint'
31 | config_param :ingest_api_events_endpoint, :string, default: '/ingest/v1beta2/events'
32 |
33 | desc 'Debug the HTTP transport'
34 | config_param :debug_http, :bool, default: false
35 |
36 | def prefer_buffer_processing
37 | true
38 | end
39 |
40 | def configure(conf)
41 | super
42 | end
43 |
44 | def write(chunk)
45 | super
46 | end
47 |
48 | def construct_api
49 | uri = "https://#{@ingest_api_host}/#{@ingest_api_tenant}#{@ingest_api_events_endpoint}"
50 | URI(uri)
51 | rescue StandardError
52 | raise Fluent::ConfigError, "URI #{uri} is invalid"
53 | end
54 |
55 | def format(tag, time, record)
56 | format_event(tag, time, record)
57 | end
58 |
59 | def format_event(tag, time, record)
60 | event = prepare_event_payload(tag, time, record)
61 | # Unsure how to drop a record. So append the empty string
62 | if event[:body].nil? || event[:body].strip.empty?
63 | ''
64 | else
65 | MultiJson.dump(event) + ','
66 | end
67 | end
68 |
69 | def prepare_event_payload(tag, time, record)
70 | payload = super(tag, time, record)
71 | payload[:attributes] = payload.delete(:fields) || {}
72 | payload[:attributes][:index] = payload.delete(:index) if payload[:index]
73 | payload[:body] = payload.delete(:event)
74 | payload.delete(:time)
75 | payload[:timestamp] = (time.to_f * 1000).to_i
76 | payload[:nanos] = time.nsec / 100_000
77 |
78 | payload
79 | end
80 |
81 | def process_response(response, request_body)
82 | super
83 | if response.code.to_s == '401'
84 | @conn = new_connection
85 | raise 'Auth Error recived. New token has been fetched.'
86 | elsif response.code.to_s == '429'
87 | raise "Throttle error from server. #{response.body}"
88 | elsif /INVALID_DATA/.match?(response.body)
89 | log.error "#{self.class}: POST Body #{request_body}"
90 | end
91 | end
92 |
93 | def new_connection
94 | Rack::OAuth2.debugging = true if @debug_http
95 | client = OpenIDConnect::Client.new(
96 | token_endpoint: @token_endpoint,
97 | identifier: @service_client_identifier,
98 | secret: @service_client_secret_key,
99 | redirect_uri: 'http://localhost:8080/', # Not used
100 | host: @ingest_auth_host,
101 | scheme: 'https'
102 | )
103 |
104 | client.access_token!(client_auth_method: 'other')
105 | end
106 |
107 | def write_to_splunk(chunk)
108 | log.trace "#{self.class}: In write() with #{chunk.size} records and #{chunk.bytesize} bytes "
109 | # ingest API is an array of json objects
110 | body = "[#{chunk.read.chomp(',')}]"
111 | @conn ||= new_connection
112 | response = @conn.post("https://#{@ingest_api_host}/#{@ingest_api_tenant}#{@ingest_api_events_endpoint}", body: body)
113 | process_response(response, body)
114 | end
115 | end
116 | end
117 |
--------------------------------------------------------------------------------
/test/fluent/plugin/out_splunk_hec_test.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'test_helper'
4 |
5 | describe Fluent::Plugin::SplunkHecOutput do
6 | include Fluent::Test::Helpers
7 | include PluginTestHelper
8 |
9 | before { Fluent::Test.setup } # setup router and others
10 |
11 | it { expect(::Fluent::Plugin::SplunkHecOutput::VERSION).wont_be_nil }
12 |
13 | describe 'config param tests' do
14 | it 'should require https protocol' do
15 | expect(create_hec_output_driver('hec_host protocol').instance.protocol).must_equal :https
16 | end
17 | it 'should require hec_host' do
18 | expect(create_hec_output_driver('hec_host hec_host').instance.hec_host).must_equal 'hec_host'
19 | end
20 | it 'should require hec_port' do
21 | expect(create_hec_output_driver('hec_host hec_port').instance.hec_port).must_equal 8088
22 | end
23 | it 'should require hec_token' do
24 | expect(create_hec_output_driver('hec_host hec_token').instance.hec_token).must_equal 'some-token'
25 | end
26 | it 'should define client_cert as nil initially' do
27 | assert_nil(create_hec_output_driver('hec_host hec_token').instance.client_cert)
28 | end
29 | it 'should define client_key as nil (string) initially' do
30 | assert_nil(create_hec_output_driver('hec_host hec_token').instance.client_key)
31 | expect(create_hec_output_driver('hec_host hec_token').instance.client_key).is_a? String
32 | end
33 | it 'should define ca_file as nil (string) initially' do
34 | assert_nil(create_hec_output_driver('hec_host hec_token').instance.ca_file)
35 | expect(create_hec_output_driver('hec_host hec_token').instance.ca_file).is_a? String
36 | end
37 | it 'should define ca_path as nil (string) initially' do
38 | assert_nil(create_hec_output_driver('hec_host hec_token').instance.ca_path)
39 | expect(create_hec_output_driver('hec_host hec_token').instance.ca_path).is_a? String
40 | end
41 | it 'should define ssl_ciphers as nil (array) initially' do
42 | assert_nil(create_hec_output_driver('hec_host hec_token').instance.ssl_ciphers)
43 | expect(create_hec_output_driver('hec_host hec_token').instance.ssl_ciphers).is_a? Array
44 | end
45 | it 'should not allow an insecure ssl connection' do
46 | expect(create_hec_output_driver('hec_host hec_token').instance.insecure_ssl).must_equal false
47 | end
48 | it 'should allow both event (default) and metric to be sent to splunk' do
49 | expect(create_hec_output_driver('hec_host hec_token').instance.data_type).must_equal :event
50 | expect(create_hec_output_driver('hec_host hec_token').instance.data_type = :metric).must_equal :metric
51 | end
52 | it 'should define Splunk index to index (string) as nil initially' do
53 | assert_nil(create_hec_output_driver('hec_host hec_token').instance.index)
54 | expect(create_hec_output_driver('hec_host hec_token').instance.index).is_a? String
55 | end
56 | it 'should define field names to include Splunk index_key as nil (string) initially' do
57 | assert_nil(create_hec_output_driver('hec_host hec_token').instance.index_key)
58 | expect(create_hec_output_driver('hec_host hec_token').instance.index_key).is_a? String
59 | end
60 | it 'should consume chunks on 4xx errors' do
61 | expect(create_hec_output_driver('hec_host hec_token').instance.consume_chunk_on_4xx_errors).must_equal true
62 | end
63 | it 'should default gzip off' do
64 | expect(create_hec_output_driver('hec_host hec_token').instance.gzip_compression).must_equal false
65 | end
66 | it 'should support enabling gzip' do
67 | expect(create_hec_output_driver('hec_host hec_token', 'gzip_compression true').instance.gzip_compression).must_equal true
68 | end
69 | it 'should define custom_headers as {} (hash) initially' do
70 | assert_empty(create_hec_output_driver('hec_host hec_token').instance.custom_headers)
71 | expect(create_hec_output_driver('hec_host hec_token').instance.custom_headers).is_a? Hash
72 | end
73 | it 'should allow setting custom_headers' do
74 | assert_equal(create_hec_output_driver('hec_host hec_token', 'custom_headers {"custom":"header"}').instance.custom_headers, {"custom" => "header"})
75 | end
76 | end
77 |
78 | describe 'hec_host validation' do
79 | describe 'invalid host' do
80 | it 'should require hec_host or full_url' do
81 | expect { create_hec_output_driver }.must_raise Fluent::ConfigError
82 | end
83 |
84 | it { expect { create_hec_output_driver('hec_host %bad-host%') }.must_raise Fluent::ConfigError }
85 | end
86 |
87 | describe 'good host' do
88 | it {
89 | expect(create_hec_output_driver('hec_host splunk.com').instance.hec_host).must_equal 'splunk.com'
90 | }
91 | end
92 | end
93 |
94 | describe 'full_url validation' do
95 | describe 'invalid full_url' do
96 | it { expect { create_hec_output_driver(full_url: '%bad-host%.com') }.must_raise Fluent::ConfigError }
97 | end
98 | describe 'good full_url' do
99 | it {
100 | expect(create_hec_output_driver('full_url https://splunk.com').instance.full_url).must_equal 'https://splunk.com'
101 | }
102 | end
103 | end
104 |
105 | it 'should send request to Splunk' do
106 | req = verify_sent_events do |batch|
107 | expect(batch.size).must_equal 2
108 | end
109 | expect(req).must_be_requested times: 1
110 | end
111 |
112 | it 'should use string for event time, and the value of the string should be a float' do
113 | verify_sent_events do |batch|
114 | batch.each do |item|
115 | expect(item['time']).must_be_instance_of String
116 | expect(item['time']).must_match /^\d+\.\d+$/
117 | end
118 | end
119 | end
120 |
121 | # it "should contain splunk event time field via fluentd, as nil" do
122 | # expect(create_hec_output_driver('hec_host splunk.com').instance.time_key).must_equal nil
123 | # end
124 | #
125 | it 'should contain splunk event time field via fluentd, as nil' do
126 | test_driver = create_hec_output_driver('hec_host splunk.com')
127 | assert_nil(test_driver.instance.time_key)
128 | end
129 |
130 | it "should use host machine's hostname for event host by default" do
131 | verify_sent_events do |batch|
132 | batch.each do |item|
133 | expect(item['host']).must_equal Socket.gethostname
134 | end
135 | end
136 | end
137 |
138 | %w[index source sourcetype].each do |field|
139 | it "should not set #{field} by default" do
140 | verify_sent_events do |batch|
141 | batch.each do |item|
142 | expect(item).wont_include field
143 | end
144 | end
145 | end
146 | end
147 |
148 | it 'should support ${tag}' do
149 | verify_sent_events(<<~CONF) do |batch|
150 | index ${tag}
151 | host ${tag}
152 | source ${tag}
153 | sourcetype ${tag}
154 | CONF
155 | batch.each do |item|
156 | %w[index host source sourcetype].each do |field|
157 | expect(%w[tag.event1 tag.event2]).must_include item[field]
158 | end
159 | end
160 | end
161 | end
162 |
163 | it 'should support *_key' do
164 | verify_sent_events(<<~CONF) do |batch|
165 | index_key level
166 | host_key from
167 | source_key file
168 | sourcetype_key agent.name
169 | time_key timestamp
170 | CONF
171 | batch.each do |item|
172 | expect(item['index']).must_equal 'info'
173 | expect(item['host']).must_equal 'my_machine'
174 | expect(item['source']).must_equal 'cool.log'
175 | expect(item['sourcetype']).must_equal 'test'
176 |
177 | JSON.load(item['event']).tap do |event|
178 | %w[level from file timestamp].each { |field| expect(event).wont_include field }
179 | expect(event['agent']).wont_include 'name'
180 | end
181 | end
182 | end
183 | end
184 |
185 | it 'should remove nil fileds.' do
186 | verify_sent_events(<<~CONF) do |batch|
187 | index_key nonexist
188 | host_key nonexist
189 | source_key nonexist
190 | sourcetype_key nonexist
191 | CONF
192 | batch.each do |item|
193 | expect(item).wont_be :has_key?, 'index'
194 | expect(item).wont_be :has_key?, 'host'
195 | expect(item).wont_be :has_key?, 'source'
196 | expect(item).wont_be :has_key?, 'sourcetype'
197 | end
198 | end
199 | end
200 |
201 | describe 'formatter' do
202 | it 'should support replace the default json formater' do
203 | verify_sent_events(<<~CONF) do |batch|
204 |
205 | @type single_value
206 | message_key log
207 | add_newline false
208 |
209 | CONF
210 | batch.map { |item| item['event'] }
211 | .each { |event| expect(event).must_equal 'everything is good' }
212 | end
213 | end
214 |
215 | it 'should support multiple formatters' do
216 | verify_sent_events(<<~CONF) do |batch|
217 | source ${tag}
218 |
219 | @type single_value
220 | message_key log
221 | add_newline false
222 |
223 | CONF
224 | expect(batch.find { |item| item['source'] == 'tag.event1' }['event']).must_equal 'everything is good'
225 | expect(batch.find { |item| item['source'] == 'tag.event2' }['event']).must_be_instance_of Hash
226 | end
227 | end
228 | end
229 |
230 | it 'should support fields for indexed field extraction' do
231 | verify_sent_events(<<~CONF) do |batch|
232 |
233 | from
234 | logLevel level
235 | nonexist
236 |
237 | CONF
238 | batch.each do |item|
239 | JSON.load(item['event']).tap do |event|
240 | expect(event).wont_include 'from'
241 | expect(event).wont_include 'level'
242 | end
243 |
244 | expect(item['fields']['from']).must_equal 'my_machine'
245 | expect(item['fields']['logLevel']).must_equal 'info'
246 | expect(item['fields']).wont_be :has_key?, 'nonexist'
247 | end
248 | end
249 | end
250 |
251 | it 'should not send blank events' do
252 | verify_sent_events(<<~CONF) do |batch|
253 |
254 | from
255 | logLevel level
256 | nonexist
257 | log
258 | file
259 | value
260 | id
261 | agent
262 | timestamp
263 |
264 | CONF
265 | expect(batch.length).must_equal 0
266 | end
267 | end
268 |
269 | describe 'metric' do
270 | it 'should check related configs' do
271 | expect(
272 | create_hec_output_driver('hec_host somehost', 'data_type metric')
273 | ).wont_be_nil
274 |
275 | expect do
276 | create_hec_output_driver('hec_host somehost', 'data_type metric', 'metrics_from_event false')
277 | end.must_raise Fluent::ConfigError
278 |
279 | expect do
280 | create_hec_output_driver('hec_host somehost', 'data_type metric', 'metric_name_key x')
281 | end.must_raise Fluent::ConfigError
282 |
283 | expect(
284 | create_hec_output_driver('hec_host somehost', 'data_type metric', 'metric_name_key x', 'metric_value_key y')
285 | ).wont_be_nil
286 | end
287 |
288 | it 'should have "metric" as event, and have proper fields' do
289 | verify_sent_events(<<~CONF) do |batch|
290 | data_type metric
291 | metric_name_key from
292 | metric_value_key value
293 | CONF
294 | batch.each do |item|
295 | expect(item['event']).must_equal 'metric'
296 | expect(item['fields']['metric_name']).must_equal 'my_machine'
297 | expect(item['fields']['_value']).must_equal 100
298 | expect(item['fields']['log']).must_equal 'everything is good'
299 | expect(item['fields']['level']).must_equal 'info'
300 | expect(item['fields']['file']).must_equal 'cool.log'
301 | end
302 | end
303 | end
304 |
305 | it 'should handle empty fields' do
306 | verify_sent_events(<<~CONF) do |batch|
307 | data_type metric
308 | metric_name_key from
309 | metric_value_key value
310 |
311 |
312 | CONF
313 | batch.each do |item|
314 | # only "metric_name" and "_value"
315 | expect(item['fields'].keys.size).must_equal 2
316 | end
317 | end
318 | end
319 |
320 | it 'should handle custom fields' do
321 | verify_sent_events(<<~CONF) do |batch|
322 | data_type metric
323 | metric_name_key from
324 | metric_value_key value
325 |
326 | level
327 | filePath file
328 | username
329 |
330 | CONF
331 | batch.each do |item|
332 | expect(item['fields'].keys.size).must_equal 4
333 | expect(item['fields']['level']).must_equal 'info'
334 | expect(item['fields']['filePath']).must_equal 'cool.log'
335 | # null fields should be removed
336 | expect(item['fields']).wont_be :has_key?, 'username'
337 | end
338 | end
339 | end
340 |
341 | it 'should treat each key-value in event as a metric' do
342 | metrics = [
343 | ['tag', event_time, { 'cup': 0.5, 'memory': 100 }],
344 | ['tag', event_time, { 'cup': 0.6, 'memory': 200 }]
345 | ]
346 | with_stub_hec(events: metrics, conf: 'data_type metric') do |batch|
347 | expect(batch.size).must_equal 4
348 | end
349 | end
350 | end
351 |
352 | describe 'timeout params' do
353 | it 'should reset unused connection after 5 seconds' do
354 | expect(create_hec_output_driver('hec_host splunk.com', 'idle_timeout 5').instance.idle_timeout).must_equal 5
355 | end
356 |
357 | it 'should allow custom setting between reading chunks from the socket' do
358 | expect(create_hec_output_driver('hec_host splunk.com', 'read_timeout 5').instance.read_timeout).must_equal 5
359 | end
360 |
361 | it 'should allow custom setting a connection to be opened' do
362 | expect(create_hec_output_driver('hec_host splunk.com', 'open_timeout 5').instance.open_timeout).must_equal 5
363 | end
364 |
365 | it 'should check default values are created correctly for timeout params' do
366 | test_driver = create_hec_output_driver('hec_host splunk.com')
367 | expect(test_driver.instance.idle_timeout).must_equal 5
368 | assert_nil(test_driver.instance.read_timeout)
369 | assert_nil(test_driver.instance.open_timeout)
370 | end
371 | end
372 |
373 | describe 'gzip encoding' do
374 | it 'should include gzip header when enabled' do
375 | metrics = [
376 | ['tag', event_time, { 'cup': 0.5, 'memory': 100 }]
377 | ]
378 | with_stub_hec_gzip(events: metrics, conf: 'data_type metric')
379 | end
380 | end
381 |
382 | def with_stub_hec(events:, conf: '')
383 | host = 'hec.splunk.com'
384 | @driver = create_hec_output_driver("hec_host #{host}", conf)
385 |
386 | hec_req = stub_hec_request("https://#{host}:8088").with do |r|
387 | yield r.body.split(/(?={)\s*(?<=})/).map { |item| JSON.load item }
388 | end
389 |
390 | @driver.run do
391 | events.each { |evt| @driver.feed *evt }
392 | end
393 |
394 | hec_req
395 | end
396 |
397 | def with_stub_hec_gzip(events:, conf: '')
398 | host = 'hec.splunk.com'
399 | @driver = create_hec_output_driver("hec_host #{host}", 'gzip_compression true', conf)
400 |
401 | hec_req = stub_hec_gzip_request("https://#{host}:8088").with do |r|
402 | yield r.body.split(/(?={)\s*(?<=})/).map { |item| JSON.load item }
403 | end
404 |
405 | @driver.run do
406 | events.each { |evt| @driver.feed *evt }
407 | end
408 |
409 | hec_req
410 | end
411 |
412 | def verify_sent_events(conf = '', &blk)
413 | event = {
414 | 'log' => 'everything is good',
415 | 'level' => 'info',
416 | 'from' => 'my_machine',
417 | 'file' => 'cool.log',
418 | 'value' => 100,
419 | 'agent' => {
420 | 'name' => 'test',
421 | 'version' => '1.0.0'
422 | },
423 | 'timestamp' => 'time'
424 | }
425 | events = [
426 | ['tag.event1', event_time, { 'id' => '1st' }.merge(Marshal.load(Marshal.dump(event)))],
427 | ['tag.event2', event_time, { 'id' => '2nd' }.merge(Marshal.load(Marshal.dump(event)))]
428 | ]
429 |
430 | with_stub_hec conf: conf, events: events, &blk
431 | end
432 | end
433 |
--------------------------------------------------------------------------------
/test/fluent/plugin/out_splunk_ingest_api_test.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'test_helper'
4 |
5 | describe Fluent::Plugin::SplunkIngestApiOutput do
6 | include Fluent::Test::Helpers
7 | include PluginTestHelper
8 |
9 | INGEST_API_ENDPOINT = 'https://api.scp.splunk.com/tenant_name/ingest/v1beta2/events'
10 | AUTH_TOKEN_ENDPOINT = 'https://auth.scp.splunk.com/token'
11 |
12 | before { Fluent::Test.setup } # setup router and others
13 |
14 | it { expect(::Fluent::Plugin::SplunkIngestApiOutput::VERSION).wont_be_nil }
15 |
16 | describe 'Required configs validation' do
17 | it 'should have required fields' do
18 | expect { create_api_output_driver }.must_raise Fluent::ConfigError
19 | end
20 |
21 | describe 'good_config' do
22 | it {
23 | instance = create_api_output_driver('service_client_identifier service_client_id',
24 | 'service_client_secret_key secret_key',
25 | 'ingest_api_tenant tenant_name').instance
26 | expect(instance.service_client_identifier).must_equal 'service_client_id'
27 | expect(instance.service_client_secret_key).must_equal 'secret_key'
28 | expect(instance.ingest_api_tenant).must_equal 'tenant_name'
29 | }
30 | end
31 | describe 'invalid host' do
32 | it {
33 | expect do
34 | create_api_output_driver('ingest_api_host %bad-host%',
35 | 'service_client_identifier service_client_id',
36 | 'service_client_secret_key secret_key',
37 | 'ingest_api_tenant tenant_name')
38 | end.must_raise Fluent::ConfigError
39 | }
40 | end
41 | describe 'missing tenant name' do
42 | it {
43 | expect do
44 | create_api_output_driver('ingest_api_host %bad-host%',
45 | 'service_client_identifier service_client_id',
46 | 'service_client_secret_key secret_key',
47 | 'ingest_api_tenant tenant_name')
48 | end.must_raise Fluent::ConfigError
49 | }
50 | end
51 | describe 'missing client identifier' do
52 | it {
53 | expect do
54 | create_api_output_driver('ingest_api_host %bad-host%',
55 | 'service_client_secret_key secret_key',
56 | 'ingest_api_tenant tenant_name')
57 | end.must_raise Fluent::ConfigError
58 | }
59 | end
60 |
61 | describe 'missing secret key' do
62 | it {
63 | expect do
64 | create_api_output_driver('ingest_api_host %bad-host%',
65 | 'service_client_identifier service_client_id',
66 | 'ingest_api_tenant tenant_name')
67 | end.must_raise Fluent::ConfigError
68 | }
69 | end
70 | end
71 |
72 | it 'should not fail to start when provided bad auth' do
73 | stub_failed_auth
74 | driver = create_api_output_driver('service_client_identifier service_client_id',
75 | 'service_client_secret_key secret_key',
76 | 'ingest_api_tenant tenant_name')
77 | driver.run
78 | end
79 |
80 | it 'should send request to Splunk' do
81 | req = verify_sent_events do |batch|
82 | expect(batch.size).must_equal 2
83 | end
84 | expect(req).must_be_requested times: 1
85 | end
86 |
87 | it 'should have an index in the attributes slot' do
88 | verify_sent_events(conf: %(
89 | index my_index
90 | )) do |batch|
91 | batch.each do |item|
92 | expect(item['attributes']['index']).must_equal 'my_index'
93 | end
94 | end
95 | end
96 |
97 | it 'should have attrbutes not fields' do
98 | verify_sent_events do |batch|
99 | batch.each do |item|
100 | expect(item).wont_include :fields
101 | expect(item).includes :attributes
102 | end
103 | end
104 | end
105 |
106 | it 'should have body not event' do
107 | verify_sent_events do |batch|
108 | batch.each do |item|
109 | expect(item).wont_include :event
110 | expect(item).includes :body
111 | end
112 | end
113 | end
114 |
115 | it 'should have a timestamp and nanos' do
116 | verify_sent_events do |batch|
117 | batch.each do |item|
118 | expect(item).wont_include :time
119 | expect(item).includes :timestamp
120 | expect(item).includes :nanos
121 | end
122 | end
123 | end
124 |
125 | it 'should raise error on 401/429 to force retry' do
126 | # Try to quiet this down some.
127 | report_on_exception = Thread.report_on_exception
128 | Thread.report_on_exception = false
129 | begin
130 | expect do
131 | verify_sent_events status: 429
132 | end.must_raise RuntimeError
133 |
134 | expect do
135 | verify_sent_events status: 401
136 | end.must_raise RuntimeError
137 | ensure
138 | Thread.report_on_exception = report_on_exception
139 | end
140 | end
141 |
142 | it 'should not send an empty log message' do
143 | verify_sent_events conf: %(
144 |
145 | @type single_value
146 | message_key log
147 | add_newline false
148 | ), event: { 'log' => "\n" } do |batch|
149 | batch.each do |_item|
150 | raise 'No message should be sent'
151 | end
152 | end
153 | end
154 |
155 | # it 'should send index from filters' do
156 | # verify_sent_events conf: %[
157 | #
158 | # @type record_transformer
159 | # enable_ruby
160 | #
161 | # index ${ENV['SPLUNK_INDEX']}
162 | #
163 | #
164 | # ], event: {"log" => "This is the log", "index" => "indexname"} do |batch|
165 | # batch.each do |item|
166 | # item[:attrbutes][:index].
167 | # fail "No message should be sent"
168 | # end
169 | # end
170 | # end
171 |
172 | def create_api_output_driver(*configs)
173 | Fluent::Test::Driver::Output.new(Fluent::Plugin::SplunkIngestApiOutput).tap do |d|
174 | d.configure(configs.join("\n"))
175 | end
176 | end
177 |
178 | DEFAULT_EVENT = {
179 | log: 'everything is good',
180 | level: 'info',
181 | from: 'my_machine',
182 | file: 'cool.log',
183 | value: 100,
184 | agent: {
185 | name: 'test',
186 | version: '1.0.0'
187 | }
188 | }.freeze
189 |
190 | def verify_sent_events(args = {})
191 | conf = args[:conf] || ''
192 | event = args[:event] || DEFAULT_EVENT
193 | status = args[:status] || 200
194 |
195 | events = [
196 | ['tag.event1', event_time, { id: '1st' }.merge(Marshal.load(Marshal.dump(event)))],
197 | ['tag.event2', event_time, { id: '2nd' }.merge(Marshal.load(Marshal.dump(event)))]
198 | ]
199 |
200 | @driver = create_api_output_driver('service_client_identifier service_client_id',
201 | 'service_client_secret_key secret_key',
202 | 'ingest_api_tenant tenant_name',
203 | conf)
204 |
205 | api_req = if status == 200
206 | stub_successful_api_request.with do |r|
207 | yield r.body.split(/(?={)\s*(?<=})/).map { |item| JSON.load item }.first
208 | end
209 | else
210 | stub_failed_api_request status
211 | end
212 |
213 | @driver.run do
214 | events.each { |evt| @driver.feed *evt }
215 | end
216 |
217 | api_req
218 | end
219 |
220 | def stub_successful_auth
221 | stub_request(:post, AUTH_TOKEN_ENDPOINT)
222 | .to_return(body: '{"access_token":"bearer token","token_type":"Bearer","expires_in":432000,"scope":"client_credentials"}')
223 | end
224 |
225 | def stub_failed_auth
226 | stub_request(:post, AUTH_TOKEN_ENDPOINT)
227 | .to_return(status: 401,
228 | body: '{"error":"invalid_client","error_description":"The client secret supplied for a confidential client is invalid."}')
229 | end
230 |
231 | def stub_successful_api_request
232 | stub_successful_auth
233 |
234 | stub_request(:post, INGEST_API_ENDPOINT)
235 | .to_return(body: '{"message":"Success","code":"SUCCESS"}')
236 | end
237 |
238 | def stub_failed_api_request(status)
239 | stub_successful_auth
240 |
241 | stub_request(:post, INGEST_API_ENDPOINT)
242 | .to_return(body: '', status: status)
243 | end
244 | end
245 |
--------------------------------------------------------------------------------
/test/lib/webmock/README.md:
--------------------------------------------------------------------------------
1 | There are two reasons why we stub out all these webmock adapter:
2 | * Requiring 'http' (by the http_rb_adapter) will trigger a circle require warning (http/client <-> http/connection)
3 | * We only need mocking the standard library `net/http`, and we don't want to load a bunch of not used libraries.
4 |
--------------------------------------------------------------------------------
/test/lib/webmock/http_lib_adapters/curb_adapter.rb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/splunk/fluent-plugin-splunk-hec/3fc5ddb3c154782adfe2824cc08e26fdcfd7cf26/test/lib/webmock/http_lib_adapters/curb_adapter.rb
--------------------------------------------------------------------------------
/test/lib/webmock/http_lib_adapters/em_http_request_adapter.rb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/splunk/fluent-plugin-splunk-hec/3fc5ddb3c154782adfe2824cc08e26fdcfd7cf26/test/lib/webmock/http_lib_adapters/em_http_request_adapter.rb
--------------------------------------------------------------------------------
/test/lib/webmock/http_lib_adapters/excon_adapter.rb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/splunk/fluent-plugin-splunk-hec/3fc5ddb3c154782adfe2824cc08e26fdcfd7cf26/test/lib/webmock/http_lib_adapters/excon_adapter.rb
--------------------------------------------------------------------------------
/test/lib/webmock/http_lib_adapters/http_rb_adapter.rb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/splunk/fluent-plugin-splunk-hec/3fc5ddb3c154782adfe2824cc08e26fdcfd7cf26/test/lib/webmock/http_lib_adapters/http_rb_adapter.rb
--------------------------------------------------------------------------------
/test/lib/webmock/http_lib_adapters/manticore_adapter.rb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/splunk/fluent-plugin-splunk-hec/3fc5ddb3c154782adfe2824cc08e26fdcfd7cf26/test/lib/webmock/http_lib_adapters/manticore_adapter.rb
--------------------------------------------------------------------------------
/test/lib/webmock/http_lib_adapters/patron_adapter.rb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/splunk/fluent-plugin-splunk-hec/3fc5ddb3c154782adfe2824cc08e26fdcfd7cf26/test/lib/webmock/http_lib_adapters/patron_adapter.rb
--------------------------------------------------------------------------------
/test/lib/webmock/http_lib_adapters/typhoeus_hydra_adapter.rb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/splunk/fluent-plugin-splunk-hec/3fc5ddb3c154782adfe2824cc08e26fdcfd7cf26/test/lib/webmock/http_lib_adapters/typhoeus_hydra_adapter.rb
--------------------------------------------------------------------------------
/test/test_helper.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'simplecov'
4 | SimpleCov.start
5 |
6 | $LOAD_PATH.unshift File.expand_path('../lib', __dir__)
7 | $LOAD_PATH.unshift File.expand_path('lib', __dir__)
8 | require 'fluent/plugin/out_splunk_hec'
9 | require 'fluent/plugin/out_splunk_ingest_api'
10 |
11 | require 'fluent/test'
12 | require 'fluent/test/driver/output'
13 | require 'fluent/test/helpers'
14 | require 'minitest/autorun'
15 | require 'webmock/minitest'
16 |
17 | # make assertions from webmock available in minitest/spec
18 | module Minitest::Expectations
19 | infect_an_assertion :assert_requested, :must_be_requested, :reverse
20 | infect_an_assertion :assert_not_requested, :wont_be_requested, :reverse
21 | end
22 |
23 | TEST_HEC_TOKEN = 'some-token'
24 |
25 | module PluginTestHelper
26 | def fluentd_conf_for(*lines)
27 | basic_config = [
28 | "hec_token #{TEST_HEC_TOKEN}"
29 | ]
30 | (basic_config + lines).join("\n")
31 | end
32 |
33 | def create_hec_output_driver(*configs)
34 | Fluent::Test::Driver::Output.new(Fluent::Plugin::SplunkHecOutput).tap do |d|
35 | d.configure(fluentd_conf_for(*configs))
36 | end
37 | end
38 |
39 | def stub_hec_request(endpoint)
40 | stub_request(:post, "#{endpoint}/services/collector")
41 | .with(headers: { 'Authorization' => "Splunk #{TEST_HEC_TOKEN}",
42 | 'User-Agent' => "fluent-plugin-splunk_hec_out/#{Fluent::Plugin::SplunkHecOutput::VERSION}" })
43 | .to_return(body: '{"text":"Success","code":0}')
44 | end
45 |
46 | def stub_hec_gzip_request(endpoint)
47 | stub_request(:post, "#{endpoint}/services/collector")
48 | .with(headers: {
49 | 'Authorization' => "Splunk #{TEST_HEC_TOKEN}",
50 | 'User-Agent' => "fluent-plugin-splunk_hec_out/#{Fluent::Plugin::SplunkHecOutput::VERSION}",
51 | 'Content-Encoding' => "gzip"
52 | },
53 | )
54 | .to_return(body: '{"text":"GzipSuccess","code":0}')
55 | end
56 | end
57 |
--------------------------------------------------------------------------------