├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .fixtures.yml ├── .gitattributes ├── .githooks └── pre-commit ├── .github ├── pull_request_template.md └── workflows │ ├── ci.yml │ ├── latest_testing.yml │ ├── lts_testing.yml │ ├── nightly_testing.yml │ ├── release.yml │ └── release_prep.yml ├── .gitignore ├── .pdkignore ├── .puppet-lint.rc ├── .rspec ├── .rubocop.yml ├── .rubocop_todo.yml ├── .sync.yml ├── .vscode └── extensions.json ├── .yardopts ├── CHANGELOG.md ├── CODEOWNERS ├── CONTRIBUTING.md ├── Gemfile ├── HISTORY.md ├── Jenkinsfile ├── LICENSE ├── README.md ├── REFERENCE.md ├── Rakefile ├── appveyor.yml ├── docs ├── advanced_puppet_configuration.md ├── advanced_splunk_configuration.md ├── custom_installation.md ├── customized_reporting.md ├── fact_terminus_support.md ├── images │ ├── hec_token.png │ └── puppet_report_viewer_config.png ├── puppet_metrics_collector_support.md ├── running_the_tests.md ├── ssl_support.md └── troubleshooting_and_verification.md ├── examples ├── foo.json ├── orchestrator_metrics.json ├── remote_splunk.yml └── splunk_hec.yaml ├── files ├── hec_secrets.yaml.epp └── splunk_hec.rb ├── lib ├── facter │ ├── splunk_hec_agent_only_node.rb │ └── splunk_hec_is_pe.rb └── puppet │ ├── application │ └── splunk_hec.rb │ ├── functions │ └── splunk_hec │ │ └── secure.rb │ ├── indirector │ └── facts │ │ └── splunk_hec.rb │ ├── reports │ └── splunk_hec.rb │ └── util │ └── splunk_hec.rb ├── manifests ├── init.pp └── v2_cleanup.pp ├── metadata.json ├── pdk.yaml ├── plans ├── acceptance │ ├── oss_server_setup.pp │ ├── pe_server_setup.pp │ ├── provision_machines.pp │ └── server_setup.pp └── examples │ ├── apply_example.pp │ └── result_example.pp ├── provision.yaml ├── rakelib └── helpers.rake ├── spec ├── acceptance │ ├── .gitkeep │ ├── class_spec.rb │ └── events_processor_spec.rb ├── classes │ └── init_spec.rb ├── default_facts.yml ├── spec_helper.rb ├── spec_helper_acceptance.rb ├── spec_helper_acceptance_local.rb ├── support │ ├── acceptance │ │ ├── example_events_data.json │ │ ├── helpers.rb │ │ ├── install_pe.sh │ │ ├── splunk │ │ │ ├── defaults │ │ │ │ └── default.yml │ │ │ └── docker-compose.yml │ │ ├── splunk_hec.yaml │ │ └── start_splunk_instance.sh │ └── unit │ │ └── reports │ │ └── splunk_hec_spec_helpers.rb └── unit │ └── reports │ └── splunk_hec_spec.rb ├── tasks └── examples │ ├── bolt_apply.json │ ├── bolt_apply.rb │ ├── bolt_result.json │ ├── bolt_result.rb │ ├── cleanup_tokens.json │ └── cleanup_tokens.sh └── templates ├── settings.yaml.epp ├── splunk_hec_routes.yaml.epp └── util_splunk_hec.erb /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM puppet/pdk:latest 2 | 3 | # [Optional] Uncomment this section to install additional packages. 4 | # RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 5 | # && apt-get -y install --no-install-recommends 6 | 7 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Puppet Development Kit (Community)", 3 | "dockerFile": "Dockerfile", 4 | 5 | "settings": { 6 | "terminal.integrated.profiles.linux": { 7 | "bash": { 8 | "path": "bash" 9 | } 10 | } 11 | }, 12 | 13 | "extensions": [ 14 | "puppet.puppet-vscode", 15 | "rebornix.Ruby" 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /.fixtures.yml: -------------------------------------------------------------------------------- 1 | --- 2 | fixtures: 3 | forge_modules: 4 | ruby_task_helper: 5 | repo: "puppetlabs/ruby_task_helper" 6 | ref: "0.5.1" 7 | inifile: 8 | repo: "puppetlabs/inifile" 9 | ref: "4.2.0" 10 | repositories: 11 | stdlib: 'https://github.com/puppetlabs/puppetlabs-stdlib' 12 | translate: 'https://github.com/puppetlabs/puppetlabs-translate' 13 | facts: 'https://github.com/puppetlabs/puppetlabs-facts' 14 | puppet_agent: 'https://github.com/puppetlabs/puppetlabs-puppet_agent' 15 | provision: 'https://github.com/puppetlabs/provision' 16 | puppet_conf: 'https://github.com/puppetlabs/puppet_conf' 17 | pe_event_forwarding: 'https://github.com/puppetlabs/puppetlabs-pe_event_forwarding' 18 | deploy_pe: 'https://github.com/jarretlavallee/puppet-deploy_pe' 19 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.rb eol=lf 2 | *.erb eol=lf 3 | *.pp eol=lf 4 | *.sh eol=lf 5 | *.epp eol=lf 6 | -------------------------------------------------------------------------------- /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if git rev-parse --verify HEAD >/dev/null 2>&1 4 | then 5 | against=HEAD 6 | else 7 | # Initial commit: diff against an empty tree object 8 | against=$(git hash-object -t tree /dev/null) 9 | fi 10 | 11 | # Whitespace check copied from the template pre-commit hook. 12 | git diff-index --check --cached $against -- 13 | 14 | bundle exec rake \ 15 | syntax \ 16 | lint \ 17 | metadata_lint \ 18 | check:symlinks \ 19 | check:git_ignore \ 20 | check:dot_underscore \ 21 | check:test_file \ 22 | rubocop \ 23 | spec 24 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | # Detailed Description 4 | 5 | 11 | 12 | # Checklist 13 | 14 | [ ] Draft PR? 15 | [ ] Ensure README is updated 16 | [ ] Any changes to existing documentation 17 | [ ] Anything new added 18 | [ ] Link to external Puppet documentation 19 | [ ] Review [Support Playbook](https://confluence.puppetlabs.com/display/SUP/Splunk+HEC+Module+Support+Playbook) for any needed updates 20 | [ ] Tags 21 | [ ] Unit Tests 22 | [ ] Acceptance Tests 23 | [ ] PR title is "(Ticket|Maint) Short Description" 24 | [ ] Commit title matches PR title 25 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | setup_matrix: 7 | name: "Setup Test Matrix" 8 | runs-on: ubuntu-latest 9 | outputs: 10 | spec_matrix: ${{ steps.get-matrix.outputs.spec_matrix }} 11 | 12 | steps: 13 | - name: Checkout Source 14 | uses: actions/checkout@v4 15 | if: ${{ github.repository_owner == 'puppetlabs' }} 16 | 17 | - name: Activate Ruby 3.1 18 | uses: ruby/setup-ruby@v1 19 | if: ${{ github.repository_owner == 'puppetlabs' }} 20 | with: 21 | ruby-version: "3.1" 22 | bundler-cache: true 23 | 24 | - name: Print bundle environment 25 | if: ${{ github.repository_owner == 'puppetlabs' }} 26 | run: | 27 | echo ::group::bundler environment 28 | bundle env 29 | echo ::endgroup:: 30 | - name: Setup Spec Test Matrix 31 | id: get-matrix 32 | if: ${{ github.repository_owner == 'puppetlabs' }} 33 | run: | 34 | bundle exec matrix_from_metadata_v3 35 | 36 | Spec: 37 | name: "Spec Tests (Puppet: ${{matrix.puppet_version}}, Ruby Ver: ${{matrix.ruby_version}})" 38 | needs: 39 | - setup_matrix 40 | if: ${{ needs.setup_matrix.outputs.spec_matrix != '{}' }} 41 | 42 | runs-on: ubuntu-latest 43 | strategy: 44 | fail-fast: false 45 | matrix: ${{fromJson(needs.setup_matrix.outputs.spec_matrix)}} 46 | 47 | env: 48 | PUPPET_GEM_VERSION: ${{ matrix.puppet_version }} 49 | FACTER_GEM_VERSION: 'https://github.com/puppetlabs/facter#main' 50 | 51 | steps: 52 | - name: Checkout Source 53 | uses: actions/checkout@v4 54 | 55 | - name: "Activate Ruby ${{ matrix.ruby_version }}" 56 | uses: ruby/setup-ruby@v1 57 | with: 58 | ruby-version: ${{matrix.ruby_version}} 59 | bundler-cache: true 60 | 61 | - name: Print bundle environment 62 | run: | 63 | echo ::group::bundler environment 64 | bundle env 65 | echo ::endgroup:: 66 | - name: Run Static & Syntax Tests 67 | if: ${{ github.repository_owner == 'puppetlabs' }} 68 | run: | 69 | bundle exec rake syntax lint metadata_lint check:symlinks check:git_ignore check:dot_underscore check:test_file rubocop 70 | - name: Run parallel_spec tests 71 | run: | 72 | bundle exec rake parallel_spec 73 | -------------------------------------------------------------------------------- /.github/workflows/latest_testing.yml: -------------------------------------------------------------------------------- 1 | name: "PE Latest Testing" 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | setup_matrix: 7 | name: "Setup Test Matrix" 8 | runs-on: ubuntu-latest 9 | outputs: 10 | matrix: ${{ steps.set-matrix.outputs.acc_matrix }} 11 | 12 | steps: 13 | - name: Checkout Source 14 | uses: actions/checkout@v4 15 | if: ${{ github.repository_owner == 'puppetlabs' }} 16 | 17 | - name: Activate Ruby 3.1 18 | uses: ruby/setup-ruby@v1 19 | if: ${{ github.repository_owner == 'puppetlabs' }} 20 | with: 21 | ruby-version: "3.1" 22 | bundler-cache: true 23 | 24 | - name: Print bundle environment 25 | if: ${{ github.repository_owner == 'puppetlabs' }} 26 | run: | 27 | echo ::group::bundler environment 28 | bundle env 29 | echo ::endgroup:: 30 | - name: Curl Forge for PE versions 31 | id: curl_forge 32 | run: | 33 | echo "forge_response=$(curl https://forge.puppet.com/private/versions/pe)" >> $GITHUB_OUTPUT 34 | - name: Set PE latest release 35 | id: latest_release 36 | run: | 37 | out=$(jq -c '[.[] | select(.lts == false)][0].latest | {"pe_version": [.]}' <<<'${{ steps.curl_forge.outputs.forge_response }}') 38 | echo "latest=$out" >> $GITHUB_OUTPUT 39 | - name: Build Test Matrix 40 | id: build-matrix 41 | run: | 42 | bundle exec matrix_from_metadata_v3 \ 43 | --provision-exclude docker \ 44 | --arch-exclude arm \ 45 | --platform-exclude debian \ 46 | --platform-exclude redhat-8 \ 47 | --platform-exclude sles \ 48 | --platform-exclude ubuntu 49 | - name: Setup Acceptance Test Matrix 50 | id: set-matrix 51 | run: | 52 | out=$(echo '${{ steps.build-matrix.outputs.matrix }}' | jq -c --argjson latest '${{ steps.latest_release.outputs.latest }}' '.collection = $latest.pe_version') 53 | echo "acc_matrix=$out" >> $GITHUB_OUTPUT 54 | Integration: 55 | name: "${{matrix.platforms.label}}, ${{matrix.collection}}" 56 | needs: 57 | - setup_matrix 58 | 59 | runs-on: ubuntu-latest 60 | strategy: 61 | fail-fast: false 62 | matrix: ${{ fromJson(needs.setup_matrix.outputs.matrix) }} 63 | 64 | steps: 65 | - name: Checkout Source 66 | uses: actions/checkout@v4 67 | 68 | - name: Activate Ruby 3.1 69 | uses: ruby/setup-ruby@v1 70 | with: 71 | ruby-version: "3.1" 72 | bundler-cache: true 73 | 74 | - name: Print bundle environment 75 | run: | 76 | echo ::group::bundler environment 77 | bundle env 78 | echo ::endgroup:: 79 | - name: Create the fixtures directory 80 | run: | 81 | echo ::group::Create the fixtures directory 82 | bundle exec rake spec_prep 83 | echo ::endgroup:: 84 | - name: Provision test environment 85 | run: | 86 | bundle exec bolt --modulepath spec/fixtures/modules plan run splunk_hec::acceptance::provision_machines using='provision_service' image='${{ matrix.platforms.image }}' 87 | echo ::group::=== REQUEST === 88 | cat request.json || true 89 | echo 90 | echo ::endgroup:: 91 | echo ::group::=== INVENTORY === 92 | if [ -f 'spec/fixtures/litmus_inventory.yaml' ]; 93 | then 94 | FILE='spec/fixtures/litmus_inventory.yaml' 95 | elif [ -f 'inventory.yaml' ]; 96 | then 97 | FILE='inventory.yaml' 98 | fi 99 | sed -e 's/password: .*/password: "[redacted]"/' < $FILE || true 100 | echo ::endgroup:: 101 | echo INVENTORY_PATH=$FILE >> $GITHUB_ENV 102 | - name: Install server 103 | run: | 104 | bundle exec bolt --modulepath spec/fixtures/modules plan run splunk_hec::acceptance::server_setup puppet_version='${{ matrix.collection }}' -i ./$INVENTORY_PATH --stream 105 | - name: Install module 106 | run: | 107 | bundle exec rake 'litmus:install_module' 108 | - name: Set up Splunk instance 109 | run: | 110 | bundle exec rake acceptance:setup_splunk_targets 111 | - name: Run acceptance tests 112 | run: | 113 | bundle exec rake acceptance:ci_run_tests 114 | - name: Remove test environment 115 | if: ${{ always() }} 116 | continue-on-error: true 117 | run: | 118 | if [[ -f inventory.yaml || -f spec/fixtures/litmus_inventory.yaml ]]; then 119 | bundle exec rake 'litmus:tear_down' 120 | echo ::group::=== REQUEST === 121 | cat request.json || true 122 | echo 123 | echo ::endgroup:: 124 | fi 125 | -------------------------------------------------------------------------------- /.github/workflows/lts_testing.yml: -------------------------------------------------------------------------------- 1 | name: "PE LTS Testing" 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | setup_matrix: 7 | name: "Setup Test Matrix" 8 | runs-on: ubuntu-latest 9 | outputs: 10 | matrix: ${{ steps.build-matrix.outputs.matrix }} 11 | 12 | steps: 13 | - name: Checkout Source 14 | uses: actions/checkout@v4 15 | if: ${{ github.repository_owner == 'puppetlabs' }} 16 | 17 | - name: Activate Ruby 3.1 18 | uses: ruby/setup-ruby@v1 19 | if: ${{ github.repository_owner == 'puppetlabs' }} 20 | with: 21 | ruby-version: "3.1" 22 | bundler-cache: true 23 | 24 | - name: Print bundle environment 25 | if: ${{ github.repository_owner == 'puppetlabs' }} 26 | run: | 27 | echo ::group::bundler environment 28 | bundle env 29 | echo ::endgroup:: 30 | 31 | - name: Build Test Matrix 32 | id: build-matrix 33 | run: | 34 | bundle exec matrix_from_metadata_v3 \ 35 | --provision-exclude docker \ 36 | --arch-exclude arm \ 37 | --platform-exclude debian \ 38 | --platform-exclude redhat-7 \ 39 | --platform-exclude redhat-8 \ 40 | --platform-exclude sles \ 41 | --platform-exclude ubuntu \ 42 | --puppet-exclude 7 \ 43 | --puppet-exclude 8 \ 44 | --pe-include 45 | 46 | Integration: 47 | name: "${{matrix.platforms.label}}, ${{matrix.collection}}" 48 | needs: 49 | - setup_matrix 50 | if: ${{ needs.setup_matrix.outputs.matrix != '{}' }} 51 | 52 | runs-on: ubuntu-latest 53 | strategy: 54 | fail-fast: false 55 | matrix: ${{fromJson(needs.setup_matrix.outputs.matrix)}} 56 | 57 | steps: 58 | - name: Checkout Source 59 | uses: actions/checkout@v4 60 | 61 | - name: Activate Ruby 3.1 62 | uses: ruby/setup-ruby@v1 63 | with: 64 | ruby-version: "3.1" 65 | bundler-cache: true 66 | 67 | - name: Print bundle environment 68 | run: | 69 | echo ::group::bundler environment 70 | bundle env 71 | echo ::endgroup:: 72 | - name: Create the fixtures directory 73 | run: | 74 | echo ::group::Create the fixtures directory 75 | bundle exec rake spec_prep 76 | echo ::endgroup:: 77 | - name: Provision test environment 78 | run: | 79 | bundle exec bolt --modulepath spec/fixtures/modules plan run splunk_hec::acceptance::provision_machines using='provision_service' image='${{ matrix.platforms.image }}' 80 | echo ::group::=== REQUEST === 81 | cat request.json || true 82 | echo 83 | echo ::endgroup:: 84 | echo ::group::=== INVENTORY === 85 | if [ -f 'spec/fixtures/litmus_inventory.yaml' ]; 86 | then 87 | FILE='spec/fixtures/litmus_inventory.yaml' 88 | elif [ -f 'inventory.yaml' ]; 89 | then 90 | FILE='inventory.yaml' 91 | fi 92 | sed -e 's/password: .*/password: "[redacted]"/' < $FILE || true 93 | echo ::endgroup:: 94 | echo INVENTORY_PATH=$FILE >> $GITHUB_ENV 95 | - name: Install server 96 | run: | 97 | bundle exec bolt --modulepath spec/fixtures/modules plan run splunk_hec::acceptance::server_setup puppet_version='${{ matrix.collection }}' -i ./$INVENTORY_PATH --stream 98 | - name: Install module 99 | run: | 100 | bundle exec rake 'litmus:install_module' 101 | - name: Set up Splunk instance 102 | run: | 103 | bundle exec rake acceptance:setup_splunk_targets 104 | - name: Run acceptance tests 105 | run: | 106 | bundle exec rake acceptance:ci_run_tests 107 | - name: Remove test environment 108 | if: ${{ always() }} 109 | continue-on-error: true 110 | run: | 111 | if [[ -f inventory.yaml || -f spec/fixtures/litmus_inventory.yaml ]]; then 112 | bundle exec rake 'litmus:tear_down' 113 | echo ::group::=== REQUEST === 114 | cat request.json || true 115 | echo 116 | echo ::endgroup:: 117 | fi 118 | -------------------------------------------------------------------------------- /.github/workflows/nightly_testing.yml: -------------------------------------------------------------------------------- 1 | name: "Puppet Server Nightly Testing" 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | setup_matrix: 7 | name: "Setup Test Matrix" 8 | runs-on: ubuntu-latest 9 | outputs: 10 | matrix: ${{ steps.build-matrix.outputs.matrix }} 11 | 12 | steps: 13 | - name: Checkout Source 14 | uses: actions/checkout@v4 15 | if: ${{ github.repository_owner == 'puppetlabs' }} 16 | 17 | - name: Activate Ruby 3.1 18 | uses: ruby/setup-ruby@v1 19 | if: ${{ github.repository_owner == 'puppetlabs' }} 20 | with: 21 | ruby-version: "3.1" 22 | bundler-cache: true 23 | 24 | - name: Print bundle environment 25 | if: ${{ github.repository_owner == 'puppetlabs' }} 26 | run: | 27 | echo ::group::bundler environment 28 | bundle env 29 | echo ::endgroup:: 30 | - name: Get Test Matrix 31 | id: get-matrix 32 | run: | 33 | bundle exec matrix_from_metadata_v3 34 | - name: Set nightly releases 35 | id: nightly_release 36 | run: | 37 | out=$(echo '${{ steps.get-matrix.outputs.matrix }}' | jq -c '.collection') 38 | echo "latest=$out" >> $GITHUB_OUTPUT 39 | - name: Setup Spec Test Matrix 40 | id: set-matrix 41 | run: | 42 | echo "matrix={\"platforms\":[\"rhel-7\",\"rhel-9\"]}" >> $GITHUB_OUTPUT 43 | - name: Setup Acceptance Test Matrix 44 | id: build-matrix 45 | run: | 46 | out=$(echo '${{ steps.set-matrix.outputs.matrix }}' | jq -c --argjson nightly '${{ steps.nightly_release.outputs.latest }}' '.collection += $nightly') 47 | echo "matrix=$out" >> $GITHUB_OUTPUT 48 | Integration: 49 | name: "${{matrix.platforms}}, ${{matrix.collection}}" 50 | needs: 51 | - setup_matrix 52 | if: ${{ needs.setup_matrix.outputs.matrix != '{}' }} 53 | 54 | runs-on: ubuntu-latest 55 | strategy: 56 | fail-fast: false 57 | matrix: ${{fromJson(needs.setup_matrix.outputs.matrix)}} 58 | 59 | steps: 60 | - name: Checkout Source 61 | uses: actions/checkout@v4 62 | 63 | - name: Activate Ruby 3.1 64 | uses: ruby/setup-ruby@v1 65 | with: 66 | ruby-version: "3.1" 67 | bundler-cache: true 68 | 69 | - name: Print bundle environment 70 | run: | 71 | echo ::group::bundler environment 72 | bundle env 73 | echo ::endgroup:: 74 | - name: Create the fixtures directory 75 | run: | 76 | echo ::group::Create the fixtures directory 77 | bundle exec rake spec_prep 78 | echo ::endgroup:: 79 | - name: Provision test environment 80 | run: | 81 | bundle exec bolt --modulepath spec/fixtures/modules plan run splunk_hec::acceptance::provision_machines using='provision_service' image='${{ matrix.platforms }}' 82 | echo ::group::=== REQUEST === 83 | cat request.json || true 84 | echo 85 | echo ::endgroup:: 86 | echo ::group::=== INVENTORY === 87 | if [ -f 'spec/fixtures/litmus_inventory.yaml' ]; 88 | then 89 | FILE='spec/fixtures/litmus_inventory.yaml' 90 | elif [ -f 'inventory.yaml' ]; 91 | then 92 | FILE='inventory.yaml' 93 | fi 94 | sed -e 's/password: .*/password: "[redacted]"/' < $FILE || true 95 | echo ::endgroup:: 96 | echo INVENTORY_PATH=$FILE >> $GITHUB_ENV 97 | - name: Install server 98 | run: | 99 | bundle exec bolt --modulepath spec/fixtures/modules plan run splunk_hec::acceptance::server_setup puppet_version='${{ matrix.collection }}' -i ./$INVENTORY_PATH --stream 100 | - name: Install module 101 | run: | 102 | bundle exec rake 'litmus:install_module' 103 | - name: Set up Splunk instance 104 | run: | 105 | bundle exec rake acceptance:setup_splunk_targets 106 | - name: Run acceptance tests 107 | run: | 108 | bundle exec rake acceptance:ci_run_tests 109 | - name: Remove test environment 110 | if: ${{ always() }} 111 | continue-on-error: true 112 | run: | 113 | if [[ -f inventory.yaml || -f spec/fixtures/litmus_inventory.yaml ]]; then 114 | bundle exec rake 'litmus:tear_down' 115 | echo ::group::=== REQUEST === 116 | cat request.json || true 117 | echo 118 | echo ::endgroup:: 119 | fi 120 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: 6 | - v[0-9]+.[0-9]+.[0-9]+ 7 | 8 | jobs: 9 | deploy-forge: 10 | name: Deploy to Forge 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v2 15 | with: 16 | ref: ${{ github.ref }} 17 | clean: true 18 | - name: "PDK Build" 19 | uses: docker://puppet/pdk:nightly 20 | with: 21 | args: 'build' 22 | - name: "Push to Forge" 23 | uses: docker://puppet/pdk:nightly 24 | with: 25 | args: 'release publish --forge-token ${{ secrets.FORGE_API_KEY }} --force' 26 | -------------------------------------------------------------------------------- /.github/workflows/release_prep.yml: -------------------------------------------------------------------------------- 1 | name: release_prep 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | module_version: 7 | description: 'The new version you would like to release' 8 | required: true 9 | 10 | jobs: 11 | release_prep: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Extract branch name 15 | run: echo "GITHUB_BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV 16 | 17 | - name: Set up Ruby 18 | uses: ruby/setup-ruby@v1 19 | with: 20 | ruby-version: 3.1 21 | 22 | - name: Update Rubygems 23 | run: gem update --system 3.1.0 24 | 25 | - name: Clone repository 26 | uses: actions/checkout@v2 27 | with: 28 | ref: ${{ env.GITHUB_BRANCH }} 29 | # Needed to fetch all the tags 30 | fetch-depth: 0 31 | 32 | - name: Get most recent tag 33 | id: most_recent_tag 34 | uses: "WyriHaximus/github-action-get-previous-tag@master" 35 | # An error in this step likely means that a most recent tag doesn't exist. 36 | # This just means that our job corresponds to the first release, so continuing 37 | # on error is OK. 38 | continue-on-error: true 39 | 40 | - name: Calculate the release prep commit's COMMIT_TITLE, COMMIT_BODY_MAIN, COMMIT_BODY_NOTE 41 | run: | 42 | echo "COMMIT_TITLE=$(echo 'Release prep to ${{ github.event.inputs.module_version }}')" >> $GITHUB_ENV 43 | echo "COMMIT_BODY_MAIN=$(echo -n 'You will need to manually update the \`CHANGELOG.md\` file. First, checkout this PR on your local machine via something like \`git fetch upstream; git checkout upstream/release_prep\`. Then, once you have updated the \`CHANGELOG.md\` file, \`git commit --amend\` your update, then push your changes to this PR via something like \`git push --set-upstream upstream.\`')" >> $GITHUB_ENV 44 | if [ ! -z '${{ steps.most_recent_tag.outputs.tag }}' ]; then 45 | echo "COMMIT_BODY_NOTE=$(echo -n "${commit_body} **Note:** You can use https://github.com/${{ github.repository }}/compare/${{ steps.most_recent_tag.outputs.tag }}...${{ env.GITHUB_BRANCH }} to see all the new commits that have landed since the previous release.")" >> $GITHUB_ENV 46 | fi 47 | 48 | - name: Release Prep 49 | uses: docker://puppet/pdk:nightly 50 | with: 51 | args: 'release prep --version=${{ github.event.inputs.module_version }} --skip-changelog' 52 | 53 | - name: Generate the release prep commit 54 | run: | 55 | git checkout -b release_prep 56 | bundle install 57 | git add . 58 | git -c user.name=${{ github.actor }} -c user.email=${{ github.actor }}@users.noreply.github.com commit -m "${{ env.COMMIT_TITLE }}" -m "${{ env.COMMIT_BODY_MAIN }}" -m '' -m '${{ env.COMMIT_BODY_NOTE }}' 59 | git push --set-upstream origin release_prep --force 60 | 61 | - name: Generate the release prep PR 62 | uses: repo-sync/pull-request@v2 63 | with: 64 | github_token: ${{ secrets.GITHUB_TOKEN }} 65 | source_branch: 'release_prep' 66 | destination_branch: ${{ env.GITHUB_BRANCH }} 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .*.sw[op] 3 | .metadata 4 | .yardoc 5 | .yardwarns 6 | *.iml 7 | /.bundle/ 8 | /.idea/ 9 | /.vagrant/ 10 | /coverage/ 11 | /bin/ 12 | /doc/ 13 | /Gemfile.local 14 | /Gemfile.lock 15 | /junit/ 16 | /log/ 17 | /pkg/ 18 | /spec/fixtures/manifests/ 19 | /spec/fixtures/modules/* 20 | /tmp/ 21 | /vendor/ 22 | /.vendor/ 23 | /convert_report.txt 24 | /update_report.txt 25 | .DS_Store 26 | .project 27 | .envrc 28 | /inventory.yaml 29 | /spec/fixtures/litmus_inventory.yaml 30 | .resource_types 31 | .modules 32 | .task_cache.json 33 | .plan_cache.json 34 | .rerun.json 35 | bolt-debug.log 36 | -------------------------------------------------------------------------------- /.pdkignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .*.sw[op] 3 | .metadata 4 | .yardoc 5 | .yardwarns 6 | *.iml 7 | /.bundle/ 8 | /.idea/ 9 | /.vagrant/ 10 | /coverage/ 11 | /bin/ 12 | /doc/ 13 | /Gemfile.local 14 | /Gemfile.lock 15 | /junit/ 16 | /log/ 17 | /pkg/ 18 | /spec/fixtures/manifests/ 19 | /spec/fixtures/modules/* 20 | /tmp/ 21 | /vendor/ 22 | /.vendor/ 23 | /convert_report.txt 24 | /update_report.txt 25 | .DS_Store 26 | .project 27 | .envrc 28 | /inventory.yaml 29 | /spec/fixtures/litmus_inventory.yaml 30 | .resource_types 31 | .modules 32 | .task_cache.json 33 | .plan_cache.json 34 | .rerun.json 35 | bolt-debug.log 36 | /.fixtures.yml 37 | /Gemfile 38 | /.gitattributes 39 | /.github/ 40 | /.gitignore 41 | /.pdkignore 42 | /.puppet-lint.rc 43 | /Rakefile 44 | /rakelib/ 45 | /.rspec 46 | /..yml 47 | /.yardopts 48 | /spec/ 49 | /.vscode/ 50 | /.sync.yml 51 | /.devcontainer/ 52 | -------------------------------------------------------------------------------- /.puppet-lint.rc: -------------------------------------------------------------------------------- 1 | --relative 2 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --color 2 | --format documentation 3 | -------------------------------------------------------------------------------- /.rubocop_todo.yml: -------------------------------------------------------------------------------- 1 | Lint/ScriptPermission: 2 | Enabled: false 3 | -------------------------------------------------------------------------------- /.sync.yml: -------------------------------------------------------------------------------- 1 | .rubocop.yml: 2 | default_configs: 3 | inherit_from: .rubocop_todo.yml 4 | 5 | Gemfile: 6 | optional: 7 | ':development': 8 | - gem: 'puppet_litmus' 9 | platforms: 10 | - ruby 11 | - mswin 12 | - mingw 13 | - x64_mingw 14 | condition: "ENV['PUPPET_GEM_VERSION'].nil? or ENV['PUPPET_GEM_VERSION'] !~ %r{ 5}" 15 | - gem: 'hiera-eyaml' 16 | from_env: 'HIERA_EYAML_GEM_VERSION' 17 | - gem: 'github_changelog_generator' 18 | git: 'https://github.com/skywinder/github-changelog-generator' 19 | ref: '20ee04ba1234e9e83eb2ffb5056e23d641c7a018' 20 | condition: "Gem::Version.new(RUBY_VERSION.dup) >= Gem::Version.new('2.2.2')" 21 | - gem: 'rspec_junit_formatter' 22 | - gem: 'rspec-puppet-utils' 23 | - gem: 'rb-readline' 24 | version: 25 | '= 0.5.5' 26 | platforms: 27 | - mswin 28 | - mingw 29 | - x64_mingw 30 | 31 | .travis.yml: 32 | delete: true 33 | 34 | .gitlab-ci.yml: 35 | delete: true 36 | 37 | Rakefile: 38 | requires: 39 | use_litmus_tasks: true 40 | 41 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "puppet.puppet-vscode", 4 | "Shopify.ruby-lsp" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /.yardopts: -------------------------------------------------------------------------------- 1 | --markup markdown 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change log 2 | 3 | All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org). 4 | 5 | ## [Unreleased](https://github.com/puppetlabs/puppetlabs-splunk_hec) 6 | 7 | [Current Diff](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v2.1.0..main) 8 | 9 | ## [2.1.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v2.1.0) (2025-06-03) 10 | 11 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v2.0.1..v2.1.0) 12 | 13 | ### Fixed 14 | 15 | - System metrics are now formatted with the metric name as the key and the metric value as the value. [#220](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/220) 16 | 17 | ## [2.0.1](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v2.0.1) (2024-11-26) 18 | 19 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v2.0.0..v2.0.1) 20 | 21 | ### Fixed 22 | 23 | - Ensure the report processor is using the correct file to retrieve the token value. [#217](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/217) 24 | 25 | ## [2.0.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v2.0.0) (2024-3-27) 26 | 27 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v1.4.0..v2.0.0) 28 | 29 | ### Added 30 | 31 | - New parameters `token_events` and `url_events` can now be used to store events from `pe_event_forwarding` in a different index. [#212](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/212) 32 | 33 | - The parameter `ignore_system_cert_store` is now named `include_system_cert_store` and defaults to **false**. [#208](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/208) 34 | 35 | - Credential data provided to this module is now written to a separate configuration file utilizing the Sensitive data type to ensure redaction from Puppet logs and reports. [#204](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/204) 36 | 37 | - Configuration files created by this module are now placed in a `splunk_hec` subdirectory. [#204](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/204) 38 | 39 | - New private subclass `splunk_hec::v2_cleanup` ensures old configuration files are removed. [#204](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/204) 40 | 41 | - New custom function to convert sensitive user provided data from a String to Puppet's [Sensitive](https://www.puppet.com/docs/puppet/latest/lang_data_sensitive.html) data type. [#203](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/203) 42 | 43 | - Add support for Puppet 8. [#200](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/200) 44 | 45 | ### Removed 46 | 47 | - The deprecated `reports` parameter has been removed in favor of having the module automatically add the **splunk_hec** setting to `puppet.conf`. [#212](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/212) 48 | 49 | ### Fixed 50 | 51 | - Bug fix preventing `sar` metrics collected by `puppet_metrics_collector::system` from being shipped to Splunk. [#214](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/214) 52 | 53 | - Removed logic preventing `job_id` and `code_id` from being added to report data. [#213](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/213) 54 | 55 | - The `collect_facts` parameter has been renamed to `facts_allowlist` to align with the `facts_blocklist` parameter. [#212](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/212) 56 | 57 | - No longer utilizing `parse_legacy_metrics` function for metrics collected with older versions of `puppet_metrics_collector`. [#211](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/211) 58 | 59 | - False positive when attempting to rescue required facts from an unconfigured `splunk_hec::facts_blocklist`. [#210](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/210) 60 | 61 | - Settings are now removed from `puppet.conf` when `splunk_hec::disabled` is set to **true**. [#205](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/205) 62 | 63 | ## [v1.4.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v1.4.0) (2023-4-17) 64 | 65 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v1.3.0..v1.4.0) 66 | 67 | ### Fixed 68 | 69 | - Event filtering for `pe-console` and `code-manager` events are no longer ignored. [#196](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/196) 70 | 71 | ## [v1.3.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v1.3.0) (2022-4-27) 72 | 73 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v1.2.1..v1.3.0) 74 | 75 | ### Added 76 | 77 | - Added PE LTS version to testing matrix. [#185](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/185) 78 | 79 | - SSL config options for FIPS. [#186](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/186) 80 | 81 | - Ability to send reports that only contain changes. [#187](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/187) 82 | 83 | ### Fixed 84 | 85 | - Rescue required facts that have been added to the block list. [#188](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/188) 86 | 87 | ## [v1.2.1](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v1.2.1) (2022-3-14) 88 | 89 | ### Added 90 | 91 | - Allow rbac events to be disabled. [#179](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/179) 92 | 93 | ### Fixed 94 | 95 | - Allow customer confdirs when using with pe_event_forwarding [#180](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/180) 96 | 97 | - Fix syntax error when ssl is enabled. [#181](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/181) 98 | 99 | ## [v1.2.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v1.2.0) (2022-2-16) 100 | 101 | ### Added 102 | 103 | - Ability to collect all facts against a blocklist. [#170](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/170) 104 | 105 | ### Fixed 106 | 107 | - Prevent the `event_types` parameter from being configured unless `events_reporting_enabled` is set to **true**. [#174](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/174) 108 | 109 | - The `splunk_hec_agent_only_node` fact now properly resolves to **false** on infrastructure nodes running Puppet Server. [#175](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/175) 110 | 111 | - Prevent the deprecated `reports` parameter from removing configured settings in `puppet.conf`. [#176](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/176) 112 | 113 | ## [v1.1.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v1.1.0) (2021-11-09) 114 | 115 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v1.0.1..v1.1.0) 116 | 117 | ### Added 118 | 119 | - Forwarding from Non Puppet Server nodes. [#154](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/154) 120 | - Filtering of event types data. [#156](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/156) 121 | - FIPS Compatability [#159](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/159) 122 | 123 | ## [v1.0.1](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v1.0.1) (2021-10-04) 124 | 125 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v1.0.0..v1.0.1) 126 | 127 | ### Fixed 128 | 129 | - Removed hardcoded certname in util_splunk_hec template. [#149](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/149) 130 | - Updated sourcetype from common_events to pe_event_forwarding in util_splunk_hec template. [#149](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/149) 131 | 132 | ### Added 133 | 134 | - Added `event_types` parameter to limit the event types sent to Splunk. [#152](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/152) 135 | 136 | ## [v1.0.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v1.0.0) (2021-09-29) 137 | 138 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.10.0...v1.0.0) 139 | 140 | ### Added 141 | 142 | - Event Forwarding Processor to handle events from PE Event Fowarding. [#142](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/142) 143 | 144 | ## [v0.10.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.10.0) (2021-08-23) 145 | 146 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.9.2...v0.10.0) 147 | 148 | ### Added 149 | 150 | - Ignore System CA Certificate Store. [#137](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/137) 151 | 152 | ## [v0.9.2](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.9.2) (2021-08-02) 153 | 154 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.9.1...v0.9.2) 155 | 156 | ### Fixed 157 | 158 | - Fixed sourcetypetime to allow metrics to be sent without issue. [#135](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/135) 159 | 160 | - Module metadata now supports latest versions of Puppet and Puppets Metrics Collector 161 | 162 | ## [v0.9.1](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.9.1) (2021-07-07) 163 | 164 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.9.0...v0.9.1) 165 | 166 | ### Fixed 167 | 168 | - Timestamp now matches timestamp value in the console [#130](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/130) 169 | 170 | ## [v0.9.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.9.0) (2021-06-29) 171 | 172 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.8.1...v0.9.0) 173 | 174 | ### Fixed 175 | 176 | - Puppet open source compatibility [\#76](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/76) Thanks [@southalc](https://github.com/southalc) 177 | 178 | - Deprecation warning only when report parameter defined [\#85](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/85) 179 | 180 | ### Added 181 | 182 | - Added array resource format option [\#40](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/40) 183 | 184 | - Added Puppet Alert Actions documentation to README.md [\#115](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/115) Thanks [@coreymbe](https://github.com/coreymbe) 185 | 186 | - Added splunk_hec disabling feature [\#120](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/120) 187 | 188 | ### Changed 189 | 190 | - Project issues URl changed in metadata to point to JIRA to create tickets instead of at github to create issues [\#62](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/62) 191 | 192 | - Switch to the `pe_ini_subsetting` resource for adding the report processor setting [\#51](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/51) 193 | 194 | ### Deprecated 195 | 196 | - 'report' setting is now dynamically calculated. [\#49](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/49) 197 | 198 | 199 | ## [v0.8.1](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.8.1) (2020-05-11) 200 | 201 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.8.0...v0.8.1) 202 | 203 | ### Fixed 204 | 205 | - Replace relative docs links with static links [\#45](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/45) ([gsparks](https://github.com/gsparks)) 206 | 207 | ## [v0.8.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.8.0) (2020-05-07) 208 | 209 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.7.1...v0.8.0) 210 | 211 | ### Fixed 212 | 213 | - fix single quote issue in classifier [\#42](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/42) ([mrzarquon](https://github.com/mrzarquon)) 214 | - \(PIE-178\) Parse line-delimited JSON metrics [\#39](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/39) ([Sharpie](https://github.com/Sharpie)) 215 | - PIE-178 Multiple Metrics in stdin [\#36](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/36) ([mrzarquon](https://github.com/mrzarquon)) 216 | 217 | ## [v0.7.1](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.7.1) (2019-07-01) 218 | 219 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/v0.7.0...v0.7.1) 220 | 221 | ### Fixed 222 | 223 | - Fixes metrics uploading on splunk\_hec application [\#30](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/30) ([mrzarquon](https://github.com/mrzarquon)) 224 | 225 | ## [v0.7.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/v0.7.0) (2019-06-25) 226 | 227 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/0.7.0...v0.7.0) 228 | 229 | ## [0.7.0](https://github.com/puppetlabs/puppetlabs-splunk_hec/tree/0.7.0) (2019-06-17) 230 | 231 | [Full Changelog](https://github.com/puppetlabs/puppetlabs-splunk_hec/compare/0.6.0...0.7.0) 232 | 233 | ### Added 234 | 235 | - Setup for github-changelog-generator [\#21](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/21) ([HelenCampbell](https://github.com/HelenCampbell)) 236 | 237 | ### Fixed 238 | 239 | - Adds troubleshooting documentation [\#22](https://github.com/puppetlabs/puppetlabs-splunk_hec/pull/22) ([mrzarquon](https://github.com/mrzarquon)) 240 | 241 | ## 0.6.0 242 | (2019/06/13) 243 | 244 | ### Changed 245 | - The splunk_hec module now supports customizing the `fact_terminus` and `facts_cache_terminus` names in the custom routes.yaml it deploys. If you are using a different facts_terminus (ie, not PuppetDB), you will want to set that parameter. Please note that this will come with a breaking change in functionality - Switches to the fact terminus cache setting via routes.yaml to ensure compatibility with CD4PE, see Fact Terminus Support for guides on how to change it. 246 | 247 | ## 0.5.0 248 | (2019/06/11) 249 | 250 | ### Added 251 | - Advanced configuration for puppet:summary, puppet:facts and puppet:metrics to allow for the support of multiple indexes 252 | - Additional documentation updates 253 | - Added support for individual sourcetype urls 254 | - Added ability to define multiple hec tokens on a sourcetype basis 255 | - Addition of basic acceptance testing using Litmus 256 | - The module is now under the power of the PDK 257 | - Addition of the `saved_report` flag for the splunk_hec application - Allows the user to test some of the splunk_hec functionality, submits the result directly to the splunk configuration 258 | - Addition of the splunk_hec puppet face/app allowing for a cat json | puppet splunk_hec like workflow. The primary functionality of this code is to enable sending pe metrics data to Splunk using the current CS best practices for collecting the CS data. 259 | - Major changes to module were done to enable the Fact Terminus: 260 | - util/splunk_hec.rb created for common access methods 261 | - consistent info and error handling for both reports and facts 262 | - performance profile support for Fact Terminus 263 | - Documentation updated with guide and default facts listed 264 | - Module updated to optionally manage reports setting in puppet.conf 265 | - Module updated to add new parameters and template values 266 | - Fact collection time added to puppet report processor 267 | - SSL handling and documentation improved 268 | 269 | ### Fixed 270 | - Minor fixes to output dialog 271 | 272 | ### Changed 273 | - url parameter now expects a full URI of https://servername:8088/services/collector 274 | 275 | ## 0.4.1 276 | 277 | - A small maintenance release to fix some broken links in metadata 278 | 279 | ## 0.4.0 280 | 281 | Initial release 282 | 283 | * SSL checking of target Splunk HEC is possible 284 | * Submits Puppet Summary report 285 | * Tasks for Bolt Apply and Bolt Result included 286 | * Example Plans for above included 287 | 288 | 289 | \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)* 290 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Default to Partner Integrations Engineering (PIE) team 2 | * @puppetlabs/pie 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Releasing the module 2 | 3 | Run a `release_prep` job with the branch set to `main` and `module_version` set to the module version that will be released 4 | 5 | > You can access the `release_prep` job via the `Actions` tab at the repository home page 6 | 7 | The `release_prep` job will run the `pdk release prep` command, push its changes up to the `release_prep` branch on the repo, and then generate a PR against `main` for review. Follow the instructions in the PR body to properly update the `CHANGELOG.md` file. 8 | 9 | Once the release prep PR's been merged to `main`, run a `release` job with the branch set to `main`. The `release` job will tag the module at the current `metadata.json` version, push the tag upstream, then build and publish the module to the Forge. 10 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source ENV['GEM_SOURCE'] || 'https://rubygems.org' 2 | 3 | def location_for(place_or_version, fake_version = nil) 4 | git_url_regex = %r{\A(?(https?|git)[:@][^#]*)(#(?.*))?} 5 | file_url_regex = %r{\Afile:\/\/(?.*)} 6 | 7 | if place_or_version && (git_url = place_or_version.match(git_url_regex)) 8 | [fake_version, { git: git_url[:url], branch: git_url[:branch], require: false }].compact 9 | elsif place_or_version && (file_url = place_or_version.match(file_url_regex)) 10 | ['>= 0', { path: File.expand_path(file_url[:path]), require: false }] 11 | else 12 | [place_or_version, { require: false }] 13 | end 14 | end 15 | 16 | group :development do 17 | gem "json", '= 2.1.0', require: false if Gem::Requirement.create(['>= 2.5.0', '< 2.7.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 18 | gem "json", '= 2.3.0', require: false if Gem::Requirement.create(['>= 2.7.0', '< 3.0.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 19 | gem "json", '= 2.5.1', require: false if Gem::Requirement.create(['>= 3.0.0', '< 3.0.5']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 20 | gem "json", '= 2.6.1', require: false if Gem::Requirement.create(['>= 3.1.0', '< 3.1.3']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 21 | gem "json", '= 2.6.3', require: false if Gem::Requirement.create(['>= 3.2.0', '< 4.0.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 22 | gem "racc", '~> 1.4.0', require: false if Gem::Requirement.create(['>= 2.7.0', '< 3.0.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 23 | gem "deep_merge", '~> 1.2.2', require: false 24 | gem "voxpupuli-puppet-lint-plugins", '~> 5.0', require: false 25 | gem "facterdb", '~> 2.1', require: false 26 | gem "metadata-json-lint", '~> 4.0', require: false 27 | gem "rspec-puppet-facts", '~> 4.0', require: false 28 | gem "dependency_checker", '~> 1.0.0', require: false 29 | gem "parallel_tests", '= 3.12.1', require: false 30 | gem "pry", '~> 0.10', require: false 31 | gem "simplecov-console", '~> 0.9', require: false 32 | gem "puppet-debugger", '~> 1.0', require: false 33 | gem "rubocop", '~> 1.50.0', require: false 34 | gem "rubocop-performance", '= 1.16.0', require: false 35 | gem "rubocop-rspec", '= 2.19.0', require: false 36 | gem "rb-readline", '= 0.5.5', require: false, platforms: [:mswin, :mingw, :x64_mingw] 37 | gem "rexml", '>= 3.0.0', '< 3.2.7', require: false 38 | gem "hiera-eyaml" 39 | gem "github_changelog_generator", require: false, git: 'https://github.com/skywinder/github-changelog-generator', ref: '20ee04ba1234e9e83eb2ffb5056e23d641c7a018' if Gem::Version.new(RUBY_VERSION.dup) >= Gem::Version.new('2.2.2') 40 | gem "rspec_junit_formatter", require: false 41 | gem "rspec-puppet-utils", require: false 42 | end 43 | group :development, :release_prep do 44 | gem "puppet-strings", '~> 4.0', require: false 45 | gem "puppetlabs_spec_helper", '~> 7.0', require: false 46 | end 47 | group :system_tests do 48 | gem "puppet_litmus", '~> 1.0', require: false, platforms: [:ruby, :x64_mingw] 49 | gem "CFPropertyList", '< 3.0.7', require: false, platforms: [:mswin, :mingw, :x64_mingw] 50 | gem "serverspec", '~> 2.41', require: false 51 | end 52 | 53 | puppet_version = ENV['PUPPET_GEM_VERSION'] 54 | facter_version = ENV['FACTER_GEM_VERSION'] 55 | hiera_version = ENV['HIERA_GEM_VERSION'] 56 | 57 | gems = {} 58 | 59 | gems['puppet'] = location_for(puppet_version) 60 | 61 | # If facter or hiera versions have been specified via the environment 62 | # variables 63 | 64 | gems['facter'] = location_for(facter_version) if facter_version 65 | gems['hiera'] = location_for(hiera_version) if hiera_version 66 | 67 | gems.each do |gem_name, gem_params| 68 | gem gem_name, *gem_params 69 | end 70 | 71 | # Evaluate Gemfile.local and ~/.gemfile if they exist 72 | extra_gemfiles = [ 73 | "#{__FILE__}.local", 74 | File.join(Dir.home, '.gemfile'), 75 | ] 76 | 77 | extra_gemfiles.each do |gemfile| 78 | if File.file?(gemfile) && File.readable?(gemfile) 79 | eval(File.read(gemfile), binding) 80 | end 81 | end 82 | # vim: syntax=ruby 83 | -------------------------------------------------------------------------------- /HISTORY.md: -------------------------------------------------------------------------------- 1 | ## 0.6.0 2 | (2019/06/13) 3 | 4 | ### Changed 5 | - The splunk_hec module now supports customizing the `fact_terminus` and `facts_cache_terminus` names in the custom routes.yaml it deploys. If you are using a different facts_terminus (ie, not PuppetDB), you will want to set that parameter. Please note that this will come with a breaking change in functionality - Switches to the fact terminus cache setting via routes.yaml to ensure compatibility with CD4PE, see Fact Terminus Support for guides on how to change it. 6 | 7 | ## 0.5.0 8 | (2019/06/11) 9 | 10 | ### Added 11 | - Advanced configuration for puppet:summary, puppet:facts and puppet:metrics to allow for the support of multiple indexes 12 | - Additional documentation updates 13 | - Added support for individual sourcetype urls 14 | - Added ability to define multiple hec tokens on a sourcetype basis 15 | - Addition of basic acceptance testing using Litmus 16 | - The module is now under the power of the PDK 17 | - Addition of the `saved_report` flag for the splunk_hec application - Allows the user to test some of the splunk_hec functionality, submits the result directly to the splunk configuration 18 | - Addition of the splunk_hec puppet face/app allowing for a cat json | puppet splunk_hec like workflow. The primary functionality of this code is to enable sending pe metrics data to Splunk using the current CS best practices for collecting the CS data. 19 | - Major changes to module were done to enable the Fact Terminus: 20 | - util/splunk_hec.rb created for common access methods 21 | - consistent info and error handling for both reports and facts 22 | - performance profile support for Fact Terminus 23 | - Documentation updated with guide and default facts listed 24 | - Module updated to optionally manage reports setting in puppet.conf 25 | - Module updated to add new parameters and template values 26 | - Fact collection time added to puppet report processor 27 | - SSL handling and documentation improved 28 | 29 | ### Fixed 30 | - Minor fixes to output dialog 31 | 32 | ### Changed 33 | - url parameter now expects a full URI of https://servername:8088/services/collector 34 | 35 | ## 0.4.1 36 | 37 | - A small maintenance release to fix some broken links in metadata 38 | 39 | ## 0.4.0 40 | 41 | Initial release 42 | 43 | * SSL checking of target Splunk HEC is possible 44 | * Submits Puppet Summary report 45 | * Tasks for Bolt Apply and Bolt Result included 46 | * Example Plans for above included 47 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | // Update this once Bill's helpers are merged into the shared library repo 2 | @Library('puppet_jenkins_shared_libraries') _ 3 | 4 | pipeline{ 5 | agent { 6 | label 'k8s-worker' 7 | } 8 | environment { 9 | RUBY_VERSION='2.5.7' 10 | GEM_SOURCE='https://artifactory.delivery.puppetlabs.net/artifactory/api/gems/rubygems/' 11 | RAKE_SETUP_TASK='rake acceptance:setup' 12 | RAKE_TEST_TASK='rake acceptance:run_tests' 13 | RAKE_TEARDOWN_TASK='rake acceptance:tear_down' 14 | CI='true' 15 | RESULTS_FILE_NAME='rspec_junit_results.xml' 16 | } 17 | stages{ 18 | 19 | stage('Setup') { 20 | steps { 21 | echo 'Bundle Install' 22 | bundleInstall env.RUBY_VERSION 23 | bundleExec env.RUBY_VERSION, env.RAKE_SETUP_TASK 24 | } 25 | } 26 | 27 | stage('Run Tests') { 28 | steps { 29 | echo 'Run Tests' 30 | bundleExec env.RUBY_VERSION, env.RAKE_TEST_TASK 31 | } 32 | } 33 | } 34 | post{ 35 | always { 36 | script { 37 | if(fileExists(env.RESULTS_FILE_NAME)) { 38 | junit testResults: env.RESULTS_FILE_NAME, allowEmptyResults: true 39 | } 40 | } 41 | } 42 | cleanup { 43 | bundleExec env.RUBY_VERSION, env.RAKE_TEARDOWN_TASK 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "{}" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2019 Puppet, Inc. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /REFERENCE.md: -------------------------------------------------------------------------------- 1 | # Reference 2 | 3 | 4 | 5 | ## Table of Contents 6 | 7 | ### Classes 8 | 9 | #### Public Classes 10 | 11 | * [`splunk_hec`](#splunk_hec): Simple class to manage your splunk_hec connectivity 12 | 13 | #### Private Classes 14 | 15 | * `splunk_hec::v2_cleanup`: Class to remove old configuration files 16 | 17 | ### Functions 18 | 19 | * [`splunk_hec::secure`](#splunk_hecsecure): Custom function to mark sensitive data utilized by this module as Sensitive types in the Puppet language. Sensitive data is redacted from Pup 20 | 21 | ### Plans 22 | 23 | #### Public Plans 24 | 25 | * [`splunk_hec::examples::apply_example`](#splunk_hec--examples--apply_example): Example of submitting a report from apply Function to Splunk. 26 | * [`splunk_hec::examples::result_example`](#splunk_hec--examples--result_example): An example of submitting a Task or Functions results to Splunk as a Task. 27 | 28 | #### Private Plans 29 | 30 | * `splunk_hec::acceptance::oss_server_setup`: Installs open source Puppet. 31 | * `splunk_hec::acceptance::pe_server_setup`: Install PE Server 32 | * `splunk_hec::acceptance::provision_machines`: Provisions machines 33 | * `splunk_hec::acceptance::server_setup`: Install PE Server 34 | 35 | ## Classes 36 | 37 | ### `splunk_hec` 38 | 39 | Simple class to manage your splunk_hec connectivity 40 | 41 | #### Examples 42 | 43 | ##### 44 | 45 | ```puppet 46 | include splunk_hec 47 | ``` 48 | 49 | #### Parameters 50 | 51 | The following parameters are available in the `splunk_hec` class: 52 | 53 | - [Reference](#reference) 54 | - [Table of Contents](#table-of-contents) 55 | - [Classes](#classes) 56 | - [Public Classes](#public-classes) 57 | - [Private Classes](#private-classes) 58 | - [Functions](#functions) 59 | - [Plans](#plans) 60 | - [Public Plans](#public-plans) 61 | - [Private Plans](#private-plans) 62 | - [Classes](#classes-1) 63 | - [`splunk_hec`](#splunk_hec) 64 | - [Examples](#examples) 65 | - [](#) 66 | - [Parameters](#parameters) 67 | - [`url`](#url) 68 | - [`token`](#token) 69 | - [`facts_allowlist`](#facts_allowlist) 70 | - [`enable_reports`](#enable_reports) 71 | - [`record_event`](#record_event) 72 | - [`disabled`](#disabled) 73 | - [`only_changes`](#only_changes) 74 | - [`manage_routes`](#manage_routes) 75 | - [`events_reporting_enabled`](#events_reporting_enabled) 76 | - [`facts_terminus`](#facts_terminus) 77 | - [`facts_cache_terminus`](#facts_cache_terminus) 78 | - [`facts_blocklist`](#facts_blocklist) 79 | - [`pe_console`](#pe_console) 80 | - [`timeout`](#timeout) 81 | - [`ssl_ca`](#ssl_ca) 82 | - [`include_system_cert_store`](#include_system_cert_store) 83 | - [`fips_crl_check`](#fips_crl_check) 84 | - [`fips_verify_peer`](#fips_verify_peer) 85 | - [`token_summary`](#token_summary) 86 | - [`token_facts`](#token_facts) 87 | - [`token_metrics`](#token_metrics) 88 | - [`token_events`](#token_events) 89 | - [`url_summary`](#url_summary) 90 | - [`url_facts`](#url_facts) 91 | - [`url_metrics`](#url_metrics) 92 | - [`url_events`](#url_events) 93 | - [`include_logs_status`](#include_logs_status) 94 | - [`include_logs_catalog_failure`](#include_logs_catalog_failure) 95 | - [`include_logs_corrective_change`](#include_logs_corrective_change) 96 | - [`include_resources_status`](#include_resources_status) 97 | - [`include_resources_corrective_change`](#include_resources_corrective_change) 98 | - [`summary_resources_format`](#summary_resources_format) 99 | - [`event_types`](#event_types) 100 | - [`orchestrator_data_filter`](#orchestrator_data_filter) 101 | - [`rbac_data_filter`](#rbac_data_filter) 102 | - [`classifier_data_filter`](#classifier_data_filter) 103 | - [`pe_console_data_filter`](#pe_console_data_filter) 104 | - [`code_manager_data_filter`](#code_manager_data_filter) 105 | - [Plans](#plans-1) 106 | - [`splunk_hec::examples::apply_example`](#splunk_hecexamplesapply_example) 107 | - [Parameters](#parameters-1) 108 | - [`plan_guid`](#plan_guid) 109 | - [`plan_name`](#plan_name) 110 | - [`splunk_hec::examples::result_example`](#splunk_hecexamplesresult_example) 111 | 112 | ##### `url` 113 | 114 | Data type: `Optional[String]` 115 | 116 | The url of the server that PE is running on 117 | 118 | ##### `token` 119 | 120 | Data type: `Optional[String]` 121 | 122 | The default Splunk HEC token 123 | Note: The value of the token is converted to Puppet's Sensitive data type during catalog application. 124 | 125 | ##### `facts_allowlist` 126 | 127 | Data type: `Array` 128 | 129 | The list of facts that will be collected in the report. To collect all facts available add the special value 'all.facts'. 130 | 131 | Default value: `['dmi','disks','partitions','processors','networking']` 132 | 133 | ##### `enable_reports` 134 | 135 | Data type: `Boolean` 136 | 137 | Adds splunk_hec to the list of report processors 138 | 139 | Default value: `false` 140 | 141 | ##### `record_event` 142 | 143 | Data type: `Boolean` 144 | 145 | If set to true, will call store_event and save report as json 146 | 147 | Default value: `false` 148 | 149 | ##### `disabled` 150 | 151 | Data type: `Boolean` 152 | 153 | Removes settings to send reports and facts to Splunk 154 | 155 | Default value: `false` 156 | 157 | ##### `only_changes` 158 | 159 | Data type: `Boolean` 160 | 161 | When true, only reports with a changed status with be send to Splunk 162 | 163 | Default value: `false` 164 | 165 | ##### `manage_routes` 166 | 167 | Data type: `Boolean` 168 | 169 | When false, will not automatically send facts to splunk_hec 170 | 171 | Default value: `false` 172 | 173 | ##### `events_reporting_enabled` 174 | 175 | Data type: `Boolean` 176 | 177 | When true, will send data from PE Event Forwarding module to Splunk 178 | 179 | Default value: `false` 180 | 181 | ##### `facts_terminus` 182 | 183 | Data type: `String` 184 | 185 | Ensure that facts get saved to puppetdb 186 | 187 | Default value: `'puppetdb'` 188 | 189 | ##### `facts_cache_terminus` 190 | 191 | Data type: `String` 192 | 193 | Makes sure that the facts get sent to splunk_hec 194 | 195 | Default value: `'splunk_hec'` 196 | 197 | ##### `facts_blocklist` 198 | 199 | Data type: `Optional[Array]` 200 | 201 | The list of facts that will not be collected in the report 202 | 203 | Default value: `undef` 204 | 205 | ##### `pe_console` 206 | 207 | Data type: `String` 208 | 209 | The FQDN for the PE console 210 | 211 | Default value: `$settings::report_server` 212 | 213 | ##### `timeout` 214 | 215 | Data type: `Optional[Integer]` 216 | 217 | Timeout limit for for both open and read sessions 218 | 219 | Default value: `undef` 220 | 221 | ##### `ssl_ca` 222 | 223 | Data type: `Optional[String]` 224 | 225 | The name of the ca certification/bundle for ssl validation of the splunk_hec endpoint 226 | 227 | Default value: `undef` 228 | 229 | ##### `include_system_cert_store` 230 | 231 | Data type: `Boolean` 232 | 233 | By default, the certificates in the local system cert store are ignored. To include 234 | these certificates for ssl validation of the splunk_hec endpoint set to True 235 | 236 | Default value: `false` 237 | 238 | ##### `fips_crl_check` 239 | 240 | Data type: `Boolean` 241 | 242 | By default, the Puppet HTTP Client will attempt to check the Splunk CA against the Splunk CRL. 243 | Unless the Splunk HEC endpoint is configured with a certificate generated by the Puppet CA, set 244 | this parameter to false to allow metrics to successfully send. 245 | 246 | Default value: `true` 247 | 248 | ##### `fips_verify_peer` 249 | 250 | Data type: `Boolean` 251 | 252 | By default, the Puppet HTTP Client will attempt peer verfication. When utilizing a self-signed 253 | certificate set this parameter to false to allow metrics to successfully send. 254 | 255 | Default value: `true` 256 | 257 | ##### `token_summary` 258 | 259 | Data type: `Optional[String]` 260 | 261 | Corresponds to puppet:summary in the Puppet Report Viewer 262 | When storing summary in a different index than the default token 263 | Note: The value of the token is converted to Puppet's Sensitive data type during catalog application. 264 | 265 | Default value: `undef` 266 | 267 | ##### `token_facts` 268 | 269 | Data type: `Optional[String]` 270 | 271 | Corresponds to puppet:facts in the Puppet Report Viewer 272 | When storing facts in a different index than the default token 273 | Note: The value of the token is converted to Puppet's Sensitive data type during catalog application. 274 | 275 | Default value: `undef` 276 | 277 | ##### `token_metrics` 278 | 279 | Data type: `Optional[String]` 280 | 281 | Corresponds to puppet:metrics in the Puppet Report Viewer 282 | When storing metrics in a different index than the default token 283 | Note: The value of the token is converted to Puppet's Sensitive data type during catalog application. 284 | 285 | Default value: `undef` 286 | 287 | ##### `token_events` 288 | 289 | Data type: `Optional[String]` 290 | 291 | When storing events from pe_event_forwarding in a different index than the default token 292 | Note: The value of the token is converted to Puppet's Sensitive data type during catalog application. 293 | 294 | Default value: `undef` 295 | 296 | ##### `url_summary` 297 | 298 | Data type: `Optional[String]` 299 | 300 | Similar to token_summary; used to store summary in a different index than the default url 301 | 302 | Default value: `undef` 303 | 304 | ##### `url_facts` 305 | 306 | Data type: `Optional[String]` 307 | 308 | Similar to token_facts; used to store facts in a different index than the default url 309 | 310 | Default value: `undef` 311 | 312 | ##### `url_metrics` 313 | 314 | Data type: `Optional[String]` 315 | 316 | Similar to token_metrics; used to store metrics in a different index than the default url 317 | 318 | Default value: `undef` 319 | 320 | ##### `url_events` 321 | 322 | Data type: `Optional[String]` 323 | 324 | Similar to token_events; used to store events from pe_event_forwarding in a different index than the default url 325 | 326 | Default value: `undef` 327 | 328 | ##### `include_logs_status` 329 | 330 | Data type: `Optional[Array]` 331 | 332 | Determines if puppet logs should be included based on the return status of the puppet agent run 333 | Can be none, one, or any of the following: failed, changed, unchanged 334 | 335 | Default value: `undef` 336 | 337 | ##### `include_logs_catalog_failure` 338 | 339 | Data type: `Boolean` 340 | 341 | Include logs if catalog fails to compile 342 | 343 | Default value: `false` 344 | 345 | ##### `include_logs_corrective_change` 346 | 347 | Data type: `Boolean` 348 | 349 | Include logs if there is a corrective change 350 | Only a PE feature 351 | 352 | Default value: `false` 353 | 354 | ##### `include_resources_status` 355 | 356 | Data type: `Optional[Array]` 357 | 358 | Determines if resource events should be included based on return status of puppet agent run 359 | Does not include 'unchanged' status reports 360 | Allowed values are: failed, changed, unchanged 361 | 362 | Default value: `undef` 363 | 364 | ##### `include_resources_corrective_change` 365 | 366 | Data type: `Boolean` 367 | 368 | Include resource events if there is a corrective change 369 | Only a PE feature 370 | 371 | Default value: `false` 372 | 373 | ##### `summary_resources_format` 374 | 375 | Data type: `String` 376 | 377 | If include_resource_corrective_change or include_resources_status is set and thus resource_events 378 | are being sent as part of puppet:summary events, then can choose format. 379 | Allowed values are: 'hash', 'array' 380 | 381 | Default value: `'hash'` 382 | 383 | ##### `event_types` 384 | 385 | Data type: `Array` 386 | 387 | Determines which events should be forwarded to Splunk 388 | Allowed values are: 'orchestrator','rbac','classifier','pe-console','code-manager' 389 | 390 | Default value: `['orchestrator','rbac','classifier','pe-console','code-manager']` 391 | 392 | ##### `orchestrator_data_filter` 393 | 394 | Data type: `Optional[Array]` 395 | 396 | Filters the jobs event data 397 | 398 | Default value: `undef` 399 | 400 | ##### `rbac_data_filter` 401 | 402 | Data type: `Optional[Array]` 403 | 404 | Filters the rbac event data 405 | 406 | Default value: `undef` 407 | 408 | ##### `classifier_data_filter` 409 | 410 | Data type: `Optional[Array]` 411 | 412 | Filters the classifier event data 413 | 414 | Default value: `undef` 415 | 416 | ##### `pe_console_data_filter` 417 | 418 | Data type: `Optional[Array]` 419 | 420 | Filters the pe_console event data 421 | 422 | Default value: `undef` 423 | 424 | ##### `code_manager_data_filter` 425 | 426 | Data type: `Optional[Array]` 427 | 428 | Filters the code_manager event data 429 | 430 | Default value: `undef` 431 | 432 | ## Plans 433 | 434 | ### `splunk_hec::examples::apply_example` 435 | 436 | Example of submitting a report from apply Function to Splunk. 437 | 438 | #### Parameters 439 | 440 | The following parameters are available in the `splunk_hec::examples::apply_example` plan: 441 | 442 | * [`plan_guid`](#-splunk_hec--examples--apply_example--plan_guid) 443 | * [`plan_name`](#-splunk_hec--examples--apply_example--plan_name) 444 | 445 | ##### `plan_guid` 446 | 447 | Data type: `Optional[String[1]]` 448 | 449 | A guid used to identify invocation of the plan (should change each run) 450 | 451 | ##### `plan_name` 452 | 453 | Data type: `Optional[String[1]]` 454 | 455 | The name of the plan being run (shouldn't change each run) 456 | 457 | ### `splunk_hec::examples::result_example` 458 | 459 | An example of submitting a Task or Functions results to Splunk as a Task. 460 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'bundler' 4 | require 'puppet_litmus/rake_tasks' if Gem.loaded_specs.key? 'puppet_litmus' 5 | require 'puppetlabs_spec_helper/rake_tasks' 6 | require 'puppet-syntax/tasks/puppet-syntax' 7 | require 'puppet-strings/tasks' if Gem.loaded_specs.key? 'puppet-strings' 8 | 9 | PuppetLint.configuration.send('disable_relative') 10 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 1.1.x.{build} 3 | skip_branch_with_pr: true 4 | branches: 5 | only: 6 | - main 7 | - release 8 | skip_commits: 9 | message: /^\(?doc\)?.*/ 10 | clone_depth: 10 11 | init: 12 | - SET 13 | - 'mkdir C:\ProgramData\PuppetLabs\code && exit 0' 14 | - 'mkdir C:\ProgramData\PuppetLabs\facter && exit 0' 15 | - 'mkdir C:\ProgramData\PuppetLabs\hiera && exit 0' 16 | - 'mkdir C:\ProgramData\PuppetLabs\puppet\var && exit 0' 17 | environment: 18 | matrix: 19 | - 20 | RUBY_VERSION: 25-x64 21 | CHECK: validate lint check rubocop 22 | - 23 | PUPPET_GEM_VERSION: ~> 6.0 24 | RUBY_VERSION: 25 25 | CHECK: parallel_spec 26 | - 27 | PUPPET_GEM_VERSION: ~> 6.0 28 | RUBY_VERSION: 25-x64 29 | CHECK: parallel_spec 30 | matrix: 31 | fast_finish: true 32 | install: 33 | - set PATH=C:\Ruby%RUBY_VERSION%\bin;%PATH% 34 | - bundle install --jobs 4 --retry 2 --without system_tests 35 | - type Gemfile.lock 36 | build: off 37 | test_script: 38 | - bundle exec puppet -V 39 | - ruby -v 40 | - gem -v 41 | - bundle -v 42 | - bundle exec rake %CHECK% 43 | notifications: 44 | - provider: Email 45 | to: 46 | - nobody@nowhere.com 47 | on_build_success: false 48 | on_build_failure: false 49 | on_build_status_changed: false 50 | -------------------------------------------------------------------------------- /docs/advanced_puppet_configuration.md: -------------------------------------------------------------------------------- 1 | ## Advanced Puppet Configuration 2 | 3 | The `splunk_hec` module also supports customizing the `facts_terminus` and `facts_cache_terminus` names in the custom `splunk_hec_routes.yaml` it deploys. If you are using a different `facts_terminus` (i.e. not PuppetDB), you will want to configure that parameter. 4 | 5 | If you are already using a custom `routes.yaml`, these are the equivalent instructions of what the `splunk_hec` module does, the most important setting is configuring `cache: splunk_hec`. 6 | 7 | * Create a custom `splunk_hec_routes.yaml` file to override where facts are cached: 8 | 9 | ``` 10 | --- 11 | master: 12 | facts: 13 | terminus: puppetdb 14 | cache: splunk_hec 15 | ``` 16 | 17 | * Set this routes file instead of the default one with the following command: 18 | * `puppet config set route_file /etc/puppetlabs/puppet/splunk_hec_routes.yaml --section master` 19 | -------------------------------------------------------------------------------- /docs/advanced_splunk_configuration.md: -------------------------------------------------------------------------------- 1 | ## Advanced Splunk Configuration Options 2 | 3 | The `splunk_hec` class and data processors support setting individual HEC tokens and URLs for the following data types: 4 | 5 | * **Summary Reports**: Corresponds to the `puppet:summary` source type in the Puppet Report Viewer. Use the `token_summary` and `url_summary` parameters to configure them in the `splunk_hec.yaml` file. 6 | * **Fact Data**: Corresponds to the `puppet:facts` source type in the Puppet Report Viewer. Use the `token_facts` and `url_facts` parameters to configure them in the `splunk_hec.yaml` file. 7 | * **PE Metrics**: Corresponds to the `puppet:metrics` source type in the Puppet Report Viewer. Use the `token_metrics` and `url_metrics` parameters to configure them in the `splunk_hec.yaml` file. 8 | 9 | Different URLs only need to be specified if different HEC systems entirely are being used. If one is using one collecter server, but multiple HECs, just use the single `url` parameter as before, and specify each source type's corresponding HEC token. 10 | 11 | **Note**: Making these changes here assumes that you know how to properly use indexes and update the advanced search macros in Splunk to ensure that the Report Viewer can load data from those indexes. 12 | -------------------------------------------------------------------------------- /docs/custom_installation.md: -------------------------------------------------------------------------------- 1 | ## Custom Installation 2 | 3 | > **Please Note**: If you are installing this module using a [`control-repo`](https://puppet.com/docs/pe/latest/control_repo.html) you must have `splunk_hec` in your production environment's [`Puppetfile`](https://puppet.com/docs/pe/latest/puppetfile.html) so the Puppet Server process can properly load the required libraries. You can then create a feature branch to enable them and test the configuration, but the libraries **must be** in `production`; otherwise the feature branch won't work as expected. If your Puppet Server is in a different environment, please add this module to the `Puppetfile` in that environment as well. 4 | 5 | The steps below will help install and troubleshoot the report processor on a standard Puppet Primary Server; including manual steps to configure compilers (Puppet Servers), and to use the included `splunk_hec` class. Because one is modifying production machines, these steps allow you to validate your settings before deploying the changes live. 6 | 7 | 1. Install the Puppet Report Viewer app in Splunk. This will import the needed source types to configure Splunk's HTTP Endpoint Collector (HEC) and provide a dashboard that will show the reports once they are sent to Splunk. 8 | 9 | 2. Create a Splunk HEC Token or use an existing one that sends to `main` index and **does not** have acknowledgement enabled. Follow the steps provided by Splunk's [Getting Data In Guide](http://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) if you are new to HTTP Endpoint Collectors. 10 | 11 | 3. [Install this Puppet module](https://puppet.com/docs/puppet/latest/modules_installing.html) in the environment that your Puppet Servers are using (e.g. `production`). 12 | 13 | 4. Run `puppet plugin download` on your Puppet Servers to sync the content. Some users with strict permissions may need to run `umask 022` first. 14 | 15 | * **Please Note**: If permissions are too restrictive you may see the following error in the Puppet Server logs: 16 | 17 | ``` 18 | Could not find terminus splunk_hec for indirection facts 19 | ``` 20 | 21 | 5. Create `/etc/puppetlabs/puppet/splunk_hec.yaml` (see the [examples directory](https://github.com/puppetlabs/puppetlabs-splunk_hec/main/examples/splunk_hec.yaml)), adding your Splunk Server URL to the `url` parameter (e.g. `https://splunk-dev:8088/services/collector`) and HEC Token created during step 2 to the `splunk_token` parameter. 22 | * You can add `timeout` as an optional parameter. The **default value** is `1` second for both open and read sessions, so take value x2 for real world use. 23 | * **PE Only**: Provide the `pe_console` parameter value. This is the FQDN for the PE console, which Splunk can use to lookup further information if the installation utilizes compilers (it is best practice to set this if you're anticipating scaling the installation in the future). 24 | 25 | ``` 26 | --- 27 | "url" : "https://splunk-dev.testing.local:8088/services/collector" 28 | "token" : "13311780-EC29-4DD0-A796-9F0CDC56F2AD" 29 | ``` 30 | (**Note**: If [Disaster Recovery](https://puppet.com/docs/pe/latest/dr_overview.html) is enabled you will need to ensure these settings exist on the Replica node as well. This is often done through the `PE HA Replica` node group.) 31 | 32 | 6. Run `puppet apply -e 'notify { "hello world": }' --reports=splunk_hec` from the Puppet Server, this will load the report processor and test your configuration settings without actually modifying your Puppet Server's running configuration. If you are using the Puppet Report Viewer app in Splunk then you will see the page update with new data. If not, perform a search by the `sourcetype` you provided with your HEC configuration. 33 | 34 | 7. If configured properly the Puppet Report Viewer app in Splunk will show 1 node in the `Overview` tab. 35 | 36 | 8. Now it is time to roll these settings out to the fleet of Puppet Servers in the installation. For PE users: 37 | * In the [PE console](https://puppet.com/docs/pe/latest/console_accessing.html), navigate to `Node groups` and expand `PE Infrastructure`. 38 | * Select `PE Master` and navigate to the `Classes` tab. 39 | * Click **Refresh** to ensure that the `splunk_hec` class is loaded. 40 | * Add new class `splunk_hec`. 41 | * From the `Parameter` drop down list you will need to configure at least `url` and `token`, providing the same values from the testing configuration file. 42 | * Optionally set `enable_reports` to `true` if there isn't another component managing the servers reports setting. Otherwise manually add `splunk_hec` to the settings as described in the [manual steps](#manual-steps) below. 43 | * Commit changes and run Puppet. It is best to navigate to the `PE Certificate Authority` node group and run Puppet there first, before running Puppet on the remaining nodes. 44 | 45 | 9. For Inventory support in the Puppet Report Viewer, see [Fact Terminus Support](https://github.com/puppetlabs/puppetlabs-splunk_hec/blob/main/docs/fact_terminus_support.md). 46 | 47 | #### Manual Steps: 48 | 49 | * Add `splunk_hec` to `reports` under the `[master]` configuration block in `/etc/puppetlabs/puppet/puppet.conf`: 50 | 51 | ``` 52 | [master] 53 | node_terminus = classifier 54 | storeconfigs = true 55 | storeconfigs_backend = puppetdb 56 | reports = puppetdb,splunk_hec 57 | ``` 58 | 59 | * [Restart the `pe-puppetserver`](https://puppet.com/docs/puppetserver/latest/restarting.html) process (`puppet-server` for Open Source Puppet) for it to reload the configuration and the plugin. 60 | 61 | * Run `puppet agent -t` on an agent; if you are using the suggested name, use `source="http:puppet-report-summary"` in your Splunk search field to show the reports as they arrive. 62 | -------------------------------------------------------------------------------- /docs/customized_reporting.md: -------------------------------------------------------------------------------- 1 | ## Customized Reporting 2 | 3 | As of `0.8.0` and later the report processor can be configured to include [**Logs**](https://puppet.com/docs/puppet/latest/format_report.html#puppet::util::log) and [**Resource Events**](https://puppet.com/docs/puppet/latest/format_report.html#puppet::resource::status) along with the existing summary data. Because this data varies between runs and agents in Puppet, it is difficult to predict how much data you will use in Splunk as a result. However, this removes the need for configuring the **Detailed Report Generation** alerts in Splunk to retrieve that information; which may be useful for large installations that need to retrieve a large amount of data. You can now just send the information from Puppet directly. 4 | 5 | Add one or more of these parameters based on the desired outcome, these apply to the state of the puppet runs. You cannot filter by facts on which nodes these are in effect for. As such, you can get ***logs when a puppet run fails***, but not *logs when a `windows` server puppet run fails*. 6 | 7 | By default this type of reporting is not enabled. 8 | 9 | **Parameters**: 10 | 11 | ##### event_types (Requires `puppetlabs-pe_event_forwarding` module) 12 | 13 | `Array`: Determines which event types should be forwarded to Splunk. Default value includes all event types. This can be one, or any of the following: 14 | 15 | * `classifier` 16 | * `code-manager` 17 | * `orchestrator` 18 | * `pe-console` 19 | * `rbac` 20 | 21 | ##### include_logs_status 22 | 23 | `Array`: Determines if [logs](https://puppet.com/docs/puppet/latest/format_report.html#puppet::util::log) should be included based on the return status of the puppet agent run. This can be none, one, or any of the following: 24 | 25 | * `failed` 26 | * `changed` 27 | * `unchanged` 28 | 29 | ##### include_logs_catalog_failure 30 | 31 | `Boolean`: Include logs if a [catalog](https://puppet.com/docs/puppet/latest/subsystem_catalog_compilation.html) fails to compile. This is a more specific type of failure that indicates a server-side issue. 32 | 33 | * `true` 34 | * `false` 35 | 36 | ##### include_logs_corrective_change 37 | 38 | `Boolean`: Include logs if a there is a [corrective change](https://puppet.com/docs/pe/latest/analyze_changes_across_runs.html) (a PE only feature) - indicating drift was detected from the last time puppet ran on the system. 39 | 40 | * `true` 41 | * `false` 42 | 43 | ##### include_resources_status 44 | 45 | `Array`: Determines if [resource events](https://puppet.com/docs/puppet/latest/format_report.html#puppet::resource::status) should be included based on the return status of the puppet agent run. **Note**: This only includes resources whose status is not `unchanged` - not the entire catalog. The can be none, one, or any of the following: 46 | 47 | * `failed` 48 | * `changed` 49 | * `unchanged` 50 | 51 | ##### include_resources_corrective_change 52 | 53 | `Boolean`: Include resource events if a there is a corrective change (a **PE only** feature) - indicating drift was detected from the last time puppet ran on the system. 54 | 55 | * `true` 56 | * `false` 57 | 58 | ##### summary_resources_format 59 | 60 | `String`: If `include_resources_corrective_change` or `include_resources_status` is set and therefore `resource_events` are being sent as part of `puppet:summary` events, we can choose what format they should be sent in. Depending on your usage within Splunk, different formats may be preferable. The possible values are: 61 | 62 | * `hash` :: **Default Value** 63 | * `array` 64 | 65 | Here is an example of the data that will be forwarded to Splunk in each instance: 66 | 67 | **`hash`**: 68 | 69 | ```json 70 | { 71 | "resource_events": { 72 | "File[/etc/something.conf]": { 73 | "resource": "File[/etc/something.conf]", 74 | "failed": false, 75 | "out_of_sync": true 76 | } 77 | } 78 | } 79 | ``` 80 | 81 | **`array`**: 82 | 83 | ```json 84 | { 85 | "resource_events": [ 86 | { 87 | "resource": "File[/etc/something.conf]", 88 | "failed": false, 89 | "out_of_sync": true 90 | } 91 | ] 92 | } 93 | ``` 94 | -------------------------------------------------------------------------------- /docs/fact_terminus_support.md: -------------------------------------------------------------------------------- 1 | ## Fact Terminus Support 2 | 3 | The `splunk_hec` module provides a fact terminus that will send a configurable set of facts to the same HEC that the report processor is using, with the `puppet:facts` source type. 4 | 5 | * Set the parameter `splunk_hec::manage_routes` to `true`. 6 | * In the PE console, this would be done by adding the `manage_routes` parameter in the node group configured with the `splunk_hec` class. 7 | * Run Puppet on the machines in that node group. 8 | * The `pe-puppetserver` service will restart once the new routes.yaml is deployed and configured. 9 | 10 | To configure which facts to collect add the `collect_facts` parameter to the `splunk_hec` class and modify the array of facts presented. 11 | 12 | * To collect **all facts** available at the time of the Puppet run, add the special value `all.facts` to the `collect_facts` array. 13 | * When collecting **all facts**, you can configure the optional parameter `facts_blocklist` with an array of facts that should not be collected. 14 | 15 | **Note**: The following facts are collected regardless as this data is utilized in a number of the dashboards in the Puppet Report Viewer: 16 | 17 | ``` 18 | 'os' 19 | 'memory' 20 | 'puppetversion' 21 | 'system_uptime' 22 | 'load_averages 23 | 'ipaddress' 24 | 'fqdn' 25 | 'trusted' 26 | 'producer' 27 | 'environment' 28 | ``` 29 | -------------------------------------------------------------------------------- /docs/images/hec_token.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/puppetlabs-splunk_hec/4e443283a7a714034d96dc6a61333450a39e4d67/docs/images/hec_token.png -------------------------------------------------------------------------------- /docs/images/puppet_report_viewer_config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/puppetlabs-splunk_hec/4e443283a7a714034d96dc6a61333450a39e4d67/docs/images/puppet_report_viewer_config.png -------------------------------------------------------------------------------- /docs/puppet_metrics_collector_support.md: -------------------------------------------------------------------------------- 1 | ## Puppet Metrics Collector Support 2 | 3 | This module can be utilized in conjunction with the [Puppet Metrics Collector](https://forge.puppet.com/puppetlabs/puppet_metrics_collector) module to populate the dashboards in the Metrics tab of the Puppet Report Viewer app. 4 | 5 | To enable this, once reporting is working with this module and the Metrics Collector module installed, set the `puppet_metrics_collector::metrics_server_type` parameter to `splunk_hec`. 6 | 7 | --- 8 | 9 | > In PE **2019.8.7+**, with `splunk_hec` and the Puppet Report Viewer properly configured, you will want to configure the following parameters within the `puppet_enterprise` class in the **PE Infrastructure** node group: 10 | 11 | > * `puppet_enterprise::enable_metrics_collection: true` 12 | > * `puppet_enterprise::enable_system_metrics_collection: true` 13 | 14 | >In your hiera data you will then want to configure the `metrics_server_type` parameter: 15 | 16 | > * `puppet_metrics_collector::metrics_server_type: ‘splunk_hec’` 17 | 18 | --- 19 | 20 | For more information please refer to the metrics collectors [documentation](https://forge.puppet.com/modules/puppetlabs/puppet_metrics_collector#metrics_server_type). -------------------------------------------------------------------------------- /docs/running_the_tests.md: -------------------------------------------------------------------------------- 1 | ## Running the tests 2 | ----------- 3 | ### Linter 4 | `bundle exec rubocop` 5 | 6 | ### Puppet class tests 7 | `bundle exec rspec spec/classes` 8 | 9 | ### Acceptance tests 10 | The acceptance tests use puppet-litmus in a multi-node fashion. The nodes consist of a 'master' node representing the PE master (and agent), and a Splunk node that runs the Splunk docker container. All nodes are stored in a generated `inventory.yaml` file (relative to the project root) so that they can be used with Bolt. 11 | 12 | To setup the test infrastructure, use `bundle exec rake acceptance:setup`. This will: 13 | 14 | * **Provision the master VM** 15 | * **Setup PE on the VM** 16 | * **Setup the Splunk instance.** This is just a Docker container on the master VM that runs splunk enterprise. Its code is contained in `spec/support/acceptance/splunk`. 17 | * **Install the module on the master** 18 | 19 | Each setup step is its own task; `acceptance:setup`'s implementation consists of calling these tasks. Also, all setup tasks are idempotent. That means its safe to run them (and hence `acceptance:setup`) multiple times. 20 | 21 | To run the tests after setup, you can do `bundle exec rspec spec/acceptance`. To teardown the infrastructure, do `bundle exec rake acceptance:tear_down`. 22 | 23 | Below is an example acceptance test workflow: 24 | 25 | ``` 26 | bundle exec rake acceptance:setup 27 | bundle exec rspec spec/acceptance 28 | bundle exec rake acceptance:tear_down 29 | ``` 30 | 31 | **Note:** Remember to run `bundle exec rake acceptance:install_module` whenever you make updates to the module code. This ensures that the tests run against the latest version of the module. 32 | 33 | #### Debugging the acceptance tests 34 | Since the high-level setup is separate from the tests, you should be able to re-run a failed test multiple times via `bundle exec rspec spec/acceptance/path/to/test.rb`. 35 | 36 | **Note:** Sometimes, the modules in `spec/fixtures/modules` could be out-of-sync. If you see a weird error related to one of those modules, try running `bundle exec rake spec_prep` to make sure they're updated. 37 | -------------------------------------------------------------------------------- /docs/ssl_support.md: -------------------------------------------------------------------------------- 1 | ## SSL Configuration 2 | 3 | Configuring SSL support for this report processor and tasks requires that the Splunk HEC service being used has a [properly configured SSL certificate](https://docs.splunk.com/Documentation/Splunk/latest/Security/AboutsecuringyourSplunkconfigurationwithSSL). Once the HEC service has a valid SSL certificate, the CA will need to be made available to the report processor to load. The supported path is to install a copy of the Splunk CA to a directory called `/etc/puppetlabs/puppet/splunk_hec/` and provide the file name to `splunk_hec` class. 4 | 5 | You can manually update the `splunk_hec.yaml` file with these settings: 6 | 7 | ``` 8 | "ssl_ca" : "splunk_ca.cert" 9 | ``` 10 | 11 | Alternatively, you can create a [profile class](https://puppet.com/docs/pe/latest/osp/the_roles_and_profiles_method.html) that copies the `splunk_ca.cert` as part of invoking the splunk_hec class: 12 | 13 | ``` 14 | class profile::splunk_hec { 15 | file { '/etc/puppetlabs/puppet/splunk_hec': 16 | ensure => directory, 17 | owner => 'pe-puppet', 18 | group => 'pe-puppet', 19 | mode => 0644, 20 | } 21 | file { '/etc/puppetlabs/puppet/splunk_hec/splunk_ca.cert': 22 | ensure => file, 23 | owner => 'pe-puppet', 24 | group => 'pe-puppet', 25 | mode => '0644', 26 | source => 'puppet:///modules/profile/splunk_hec/splunk_ca.cert', 27 | } 28 | } 29 | ``` 30 | 31 | The certificate provided to the `ssl_ca` parameter is a supplement to the system ca certificates store. By default, the Ruby classes that perform certificate validation will attempt to use the system certificates first, and then if the certificate cannot be validated there, it will load the ca file in `ssl_ca`. Occasionally, the system cert store will cause validation errors prior to checking the file at `ssl_ca`. To avoid this you can set `ignore_system_cert_store` to `true`. This will allow the code to use ONLY the file at `ssl_ca` to perform certificate validation. 32 | -------------------------------------------------------------------------------- /docs/troubleshooting_and_verification.md: -------------------------------------------------------------------------------- 1 | ## Troubleshooting and Verification 2 | 3 | ### Puppet 4 | 5 | Custom report processors and fact terminus indirectors run inside the Puppet Server process. For both Puppet Enterprise (PE) and Open Source Puppet (OSP) the Puppet Server logs are located at `/var/log/puppetlabs/puppetserver/puppetserver.log`. 6 | 7 | With versions `0.5.0+` of the `splunk_hec` module configured, a healthy system would log entries like the ones below: 8 | 9 | ``` 10 | # grep -i splunk /var/log/puppetlabs/puppetserver/puppetserver.log 11 | 2019-06-17T12:44:47.729Z INFO [qtp1685349172-4356] [puppetserver] Puppet Submitting facts to Splunk at https://splunk-dev.c.splunk-217321.internal:8088/services/collector 12 | 2019-06-17T12:44:48.322Z INFO [qtp1685349172-15004] [puppetserver] Puppet Submitting report to Splunk at https://splunk-dev.c.splunk-217321.internal:8088/services/collector 13 | 2019-06-17T12:45:25.913Z INFO [qtp1685349172-28874] [puppetserver] Puppet Submitting facts to Splunk at https://splunk-dev.c.splunk-217321.internal:8088/services/collector 14 | ``` 15 | 16 | Versions prior to `0.5.0`, or `0.5.0+` without the fact terminus configured, a healthy system would log entries like the ones below: 17 | 18 | ``` 19 | # grep -i splunk /var/log/puppetlabs/puppetserver/puppetserver.log 20 | 2019-06-17T12:48:21.646Z INFO [qtp1685349172-4354] [puppetserver] Puppet Submitting report to Splunk at https://splunk-dev.c.splunk-217321.internal:8088/services/collector 21 | 2019-06-17T12:48:31.689Z INFO [qtp1685349172-4354] [puppetserver] Puppet Submitting report to Splunk at https://splunk-dev.c.splunk-217321.internal:8088/services/collector 22 | 2019-06-17T12:49:22.881Z INFO [qtp1685349172-4356] [puppetserver] Puppet Submitting report to Splunk at https://splunk-dev.c.splunk-217321.internal:8088/services/collector 23 | ``` 24 | 25 | If neither of those entries appears in the log, then the Puppet Server has yet to be configured. Check the `reports` and `route_file` settings in the `puppet.conf` to ensure report processing and fact indirection for `splunk_hec` is properly configured on all of the infrastructure nodes in the installation (e.g. Primary Server, Replica, Compilers). This can be confirmed with the following command. 26 | 27 | ``` 28 | # puppet config print reports route_file --section master 29 | reports = puppetdb,splunk_hec 30 | route_file = /etc/puppetlabs/puppet/splunk_hec_routes.yaml 31 | ``` 32 | 33 | --- 34 | 35 | ### Splunk 36 | 37 | To verify that reports and facts from Puppet are properly ingested by Splunk, search all indexes for the source type `puppet:*`. 38 | 39 | ``` 40 | index=* sourcetype=puppet:* 41 | ``` 42 | 43 | The number of events corresponds to the number of Puppet runs, doubled (1 event for the report and 1 event for the facts collected), that have occured during that time period; not the number of hosts. To verify all hosts in an environment have submitted reports and facts, you would need to `dedup` the events by `host` to get an accurate count. 44 | 45 | Once Puppet has been sending data to Splunk for ~60 minutes, set the time range picker to the last 60 minutes and use the following search: 46 | 47 | ``` 48 | index=* sourcetype=puppet:summary | dedup host 49 | ``` 50 | 51 | The resulting event count should match the number of nodes listed in the Puppet Enterprise console. If you are utilizing multiple Puppet installations you will need to filter by the `pe_console` value: 52 | 53 | ``` 54 | index=* sourcetype=puppet:summary | pe_console=puppet.company.com | dedup host 55 | ``` 56 | 57 | In the event the above steps have confirmed that the reports/facts are being sent to Splunk and stored appropriately by the correct source types; and you are experiencing issues with detailed reports or display issues in the Splunk Console, please see the documentation for the [Puppet Report Viewer](https://github.com/puppetlabs/ta-puppet-report-viewer). 58 | 59 | --- -------------------------------------------------------------------------------- /examples/foo.json: -------------------------------------------------------------------------------- 1 | {"timestamp":"2019-05-03T05:55:30+00:00","servers":{"127-0-0-1":{"asdfsafsdf":{"bas":"bin"}}}} -------------------------------------------------------------------------------- /examples/orchestrator_metrics.json: -------------------------------------------------------------------------------- 1 | {"sourcetype":"non-existant_sourcetype","timestamp":"2019-05-03T02:59:33Z","servers":{"puppet-c-splunk-217321-internal":{"orchestrator":{"broker-service":{"service_version":"1.5.3","service_status_version":1,"detail_level":"debug","state":"running","status":{"metrics":{"puppetlabs.pcp.on-close":{"rates":{"1":2.788760636356196e-20,"5":0.000033877101432950975,"15":0.011065951062135926,"total":1},"mean":417353931,"std-dev":0,"percentiles":{"0.75":417353931,"0.95":417353931,"0.99":417353931,"0.999":417353931,"1.0":417353931},"largest":417353931,"smallest":417353931},"puppetlabs.pcp.on-connect":{"rates":{"1":6.888187206729161e-20,"5":0.000040696306647189904,"15":0.011763559299922392,"total":12},"mean":41894024.417897455,"std-dev":109757370.42593586,"percentiles":{"0.75":28301604,"0.95":491414497,"0.99":491414497,"0.999":491414497,"1.0":491414497},"largest":491414497,"smallest":4684587},"puppetlabs.pcp.on-message":{"rates":{"1":2.81218054725494e-7,"5":0.002505776531083986,"15":0.005911137972860372,"total":22},"mean":2758247.0595337525,"std-dev":1528318.9077125278,"percentiles":{"0.75":3784909,"0.95":4057499,"0.99":4543275,"0.999":4543275,"1.0":50167287},"largest":50167287,"smallest":424824},"puppetlabs.pcp.on-send":{"rates":{"1":2.8121805472550845e-7,"5":0.0025082327356744194,"15":0.006163991430922259,"total":26},"mean":349634.0596412754,"std-dev":87947.06774548466,"percentiles":{"0.75":385273,"0.95":471062,"0.99":539459,"0.999":539459,"1.0":30715567},"largest":30715567,"smallest":189908}},"threads":{"ThreadCount":93,"ObjectMonitorUsageSupported":true,"PeakThreadCount":99,"ThreadAllocatedMemoryEnabled":true,"ThreadContentionMonitoringSupported":true,"DaemonThreadCount":18,"CurrentThreadCpuTime":1260545,"ThreadCpuTimeEnabled":true,"ThreadCpuTimeSupported":true,"SynchronizerUsageSupported":true,"TotalStartedThreadCount":365,"ThreadAllocatedMemorySupported":true,"ThreadContentionMonitoringEnabled":false,"CurrentThreadUserTime":0,"CurrentThreadCpuTimeSupported":true},"memory":{"Verbose":true,"ObjectPendingFinalizationCount":0,"HeapMemoryUsage":{"committed":725090304,"init":738197504,"max":725090304,"used":109287864},"NonHeapMemoryUsage":{"committed":155369472,"init":2555904,"max":-1,"used":152582728}}},"active_alerts":[]},"orchestrator-service":{"service_version":"2019.1.0.62","service_status_version":1,"detail_level":"debug","state":"running","status":{"db_up":true,"classifier_up":true,"rbac_up":true,"puppetserver_up":true,"puppetdb_up":true,"pxp_up":true,"replication":{"mode":"source","status":"none"},"metrics":{"routes":{"routes":{"orchestrator-v1-tasks-:module-:task-name":{"route-id":"orchestrator-v1-tasks-:module-:task-name","count":0,"mean":0,"aggregate":0},"orchestrator-v1-tasks-:module-:task-name-permitted":{"route-id":"orchestrator-v1-tasks-:module-:task-name-permitted","count":0,"mean":0,"aggregate":0},"orchestrator-v1-jobs-:job-id-nodes":{"route-id":"orchestrator-v1-jobs-:job-id-nodes","count":5,"mean":56,"aggregate":280},"orchestrator-v1-scheduled_jobs":{"route-id":"orchestrator-v1-scheduled_jobs","count":0,"mean":0,"aggregate":0},"orchestrator-v1-environments-:environment":{"route-id":"orchestrator-v1-environments-:environment","count":0,"mean":0,"aggregate":0},"orchestrator-v1-environments":{"route-id":"orchestrator-v1-environments","count":0,"mean":0,"aggregate":0},"orchestrator-v1-environments-:environment-applications":{"route-id":"orchestrator-v1-environments-:environment-applications","count":0,"mean":0,"aggregate":0},"other":{"route-id":"other","count":0,"mean":0,"aggregate":0},"orchestrator-v1-plan_jobs-:job-id-events":{"route-id":"orchestrator-v1-plan_jobs-:job-id-events","count":6264,"mean":18,"aggregate":112752},"orchestrator-v1-scheduled_jobs-:job-id":{"route-id":"orchestrator-v1-scheduled_jobs-:job-id","count":0,"mean":0,"aggregate":0},"orchestrator-v1-usage":{"route-id":"orchestrator-v1-usage","count":1,"mean":1272,"aggregate":1272},"orchestrator-v1-inventory-:node":{"route-id":"orchestrator-v1-inventory-:node","count":1,"mean":102,"aggregate":102},"orchestrator-v1-tasks":{"route-id":"orchestrator-v1-tasks","count":2,"mean":206,"aggregate":412},"orchestrator-v1-internal-:command-name":{"route-id":"orchestrator-v1-internal-:command-name","count":0,"mean":0,"aggregate":0},"orchestrator-v1-jobs-:job-id-report":{"route-id":"orchestrator-v1-jobs-:job-id-report","count":0,"mean":0,"aggregate":0},"orchestrator-v1-dumplings-/1-9_d*/":{"route-id":"orchestrator-v1-dumplings-/1-9_d*/","count":0,"mean":0,"aggregate":0},"total":{"route-id":"total","count":6527,"mean":18,"aggregate":117486},"orchestrator-v1-plan_jobs-:job-id":{"route-id":"orchestrator-v1-plan_jobs-:job-id","count":0,"mean":0,"aggregate":0},"orchestrator-v1-jobs-:job-id-catalog":{"route-id":"orchestrator-v1-jobs-:job-id-catalog","count":0,"mean":0,"aggregate":0},"orchestrator-v1-jobs":{"route-id":"orchestrator-v1-jobs","count":4,"mean":84,"aggregate":336},"orchestrator-v1-jobs-:job-id":{"route-id":"orchestrator-v1-jobs-:job-id","count":14,"mean":57,"aggregate":798},"orchestrator-v1-swagger_json":{"route-id":"orchestrator-v1-swagger_json","count":0,"mean":0,"aggregate":0},"orchestrator-v1-command-:command-name":{"route-id":"orchestrator-v1-command-:command-name","count":4,"mean":476,"aggregate":1904},"orchestrator-v1-plan_jobs":{"route-id":"orchestrator-v1-plan_jobs","count":17,"mean":47,"aggregate":799},"orchestrator-v1-dumplings":{"route-id":"orchestrator-v1-dumplings","count":0,"mean":0,"aggregate":0},"/*/":{"route-id":"/*/","count":0,"mean":0,"aggregate":0},"orchestrator-v1-environments-:environment-instances":{"route-id":"orchestrator-v1-environments-:environment-instances","count":0,"mean":0,"aggregate":0},"orchestrator-v1-//":{"route-id":"orchestrator-v1-//","count":0,"mean":0,"aggregate":0},"orchestrator-v1-status":{"route-id":"orchestrator-v1-status","count":0,"mean":0,"aggregate":0},"orchestrator-v1-jobs-:job-id-events":{"route-id":"orchestrator-v1-jobs-:job-id-events","count":214,"mean":38,"aggregate":8132},"orchestrator-v1-inventory":{"route-id":"orchestrator-v1-inventory","count":1,"mean":88,"aggregate":88}},"sorted-routes":[{"route-id":"total","count":6527,"mean":18,"aggregate":117486},{"route-id":"orchestrator-v1-plan_jobs-:job-id-events","count":6264,"mean":18,"aggregate":112752},{"route-id":"orchestrator-v1-jobs-:job-id-events","count":214,"mean":38,"aggregate":8132},{"route-id":"orchestrator-v1-command-:command-name","count":4,"mean":476,"aggregate":1904},{"route-id":"orchestrator-v1-usage","count":1,"mean":1272,"aggregate":1272},{"route-id":"orchestrator-v1-plan_jobs","count":17,"mean":47,"aggregate":799},{"route-id":"orchestrator-v1-jobs-:job-id","count":14,"mean":57,"aggregate":798},{"route-id":"orchestrator-v1-tasks","count":2,"mean":206,"aggregate":412},{"route-id":"orchestrator-v1-jobs","count":4,"mean":84,"aggregate":336},{"route-id":"orchestrator-v1-jobs-:job-id-nodes","count":5,"mean":56,"aggregate":280},{"route-id":"orchestrator-v1-inventory-:node","count":1,"mean":102,"aggregate":102},{"route-id":"orchestrator-v1-inventory","count":1,"mean":88,"aggregate":88},{"route-id":"orchestrator-v1-tasks-:module-:task-name","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-tasks-:module-:task-name-permitted","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-scheduled_jobs","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-environments-:environment","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-environments","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-environments-:environment-applications","count":0,"mean":0,"aggregate":0},{"route-id":"other","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-scheduled_jobs-:job-id","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-internal-:command-name","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-jobs-:job-id-report","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-dumplings-/1-9_d*/","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-plan_jobs-:job-id","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-jobs-:job-id-catalog","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-swagger_json","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-dumplings","count":0,"mean":0,"aggregate":0},{"route-id":"/*/","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-environments-:environment-instances","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-//","count":0,"mean":0,"aggregate":0},{"route-id":"orchestrator-v1-status","count":0,"mean":0,"aggregate":0}]},"app":{"deploy-queue.length":0,"jobs-created":3,"puppet-run-time":0}}},"active_alerts":[]},"status-service":{"service_version":"1.1.0","service_status_version":1,"detail_level":"debug","state":"running","status":{"experimental":{"jvm-metrics":{"cpu-usage":1.5996801,"up-time-ms":2632100,"gc-cpu-usage":0,"threading":{"thread-count":90,"peak-thread-count":99},"heap-memory":{"committed":725090304,"init":738197504,"max":725090304,"used":108667120},"gc-stats":{"PS Scavenge":{"count":57,"total-time-ms":1504,"last-gc-info":{"duration-ms":11}},"PS MarkSweep":{"count":3,"total-time-ms":503,"last-gc-info":{"duration-ms":287}}},"start-time-ms":1556849741982,"file-descriptors":{"used":165,"max":100000},"non-heap-memory":{"committed":155369472,"init":2555904,"max":-1,"used":152588464}}}},"active_alerts":[]},"error":[],"error_count":0,"api-query-start":"2019-05-03T02:59:33Z","api-query-duration":0.299507934}},"foo":{"bar":{}}}} -------------------------------------------------------------------------------- /examples/remote_splunk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nodes: 3 | - name: splunk_hec 4 | config: 5 | transport: remote 6 | remote: 7 | hostname: 8 | token: 9 | port: 8088 -------------------------------------------------------------------------------- /examples/splunk_hec.yaml: -------------------------------------------------------------------------------- 1 | # managed by puppet 2 | --- 3 | "url" : "https://splunk-dev.testing.local:8088/services/collector" 4 | "token" : "13311780-EC29-4DD0-A796-9F0CDC56F2AD" 5 | "pe_console": "puppetdb.foo.bar.com" -------------------------------------------------------------------------------- /files/hec_secrets.yaml.epp: -------------------------------------------------------------------------------- 1 | <%- | Optional[Sensitive[String]] $token = undef, 2 | Optional[Sensitive[String]] $token_summary = undef, 3 | Optional[Sensitive[String]] $token_facts = undef, 4 | Optional[Sensitive[String]] $token_metrics = undef, 5 | Optional[Sensitive[String]] $token_events = undef 6 | | -%> 7 | # managed by splunk_hec module 8 | --- 9 | <% if $token { -%> 10 | "token" : "<%= $token %>" 11 | <% } -%> 12 | <% if $token_summary { -%> 13 | "token_summary" : "<%= $token_summary %>" 14 | <% } -%> 15 | <% if $token_facts { -%> 16 | "token_facts" : "<%= $token_facts %>" 17 | <% } -%> 18 | <% if $token_metrics { -%> 19 | "token_metrics" : "<%= $token_metrics %>" 20 | <% } -%> 21 | <% if $token_events { -%> 22 | "token_events" : "<%= $token_events %>" 23 | <% } -%> 24 | -------------------------------------------------------------------------------- /files/splunk_hec.rb: -------------------------------------------------------------------------------- 1 | #!/opt/puppetlabs/puppet/bin/ruby 2 | 3 | require_relative './splunk_hec/util_splunk_hec' 4 | require 'json' 5 | 6 | data_path = ARGV[0] 7 | 8 | data = JSON.parse(File.read(data_path)) 9 | 10 | data_to_send = '' 11 | 12 | EVENT_SOURCETYPE = INDICES.select { |index| settings['event_types'].include? index } 13 | 14 | EVENT_SOURCETYPE.each_key do |index| 15 | # A nil value indicates that there were no new events. 16 | # A negative value indicates that the sourcetype has been disabled from the pe_event_forwarding module. 17 | next if data[index].nil? || data[index] == -1 18 | data_to_send << extract_events(data[index], INDICES[index], settings["#{index}_data_filter"]) 19 | end 20 | 21 | submit_request data_to_send 22 | -------------------------------------------------------------------------------- /lib/facter/splunk_hec_agent_only_node.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | Facter.add(:splunk_hec_agent_only_node) do 4 | setcode do 5 | if Facter.value(:os)['family'].eql?('RedHat') || Facter.value(:os)['family'].eql?('Suse') 6 | Dir['/etc/sysconfig/*puppetserver'].empty? 7 | else 8 | Dir['/etc/default/*puppetserver'].empty? 9 | end 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /lib/facter/splunk_hec_is_pe.rb: -------------------------------------------------------------------------------- 1 | Facter.add(:splunk_hec_is_pe) do 2 | setcode do 3 | File.readable?('/opt/puppetlabs/server/pe_version') 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /lib/puppet/application/splunk_hec.rb: -------------------------------------------------------------------------------- 1 | require 'puppet/application' 2 | require File.dirname(__FILE__) + '/../util/splunk_hec.rb' 3 | 4 | # rubocop:disable Style/ClassAndModuleCamelCase 5 | # splunk_hec.rb 6 | class Puppet::Application::Splunk_hec < Puppet::Application 7 | include Puppet::Util::Splunk_hec 8 | 9 | RUN_HELP = _("Run 'puppet splunk_hec --help' for more details").freeze 10 | 11 | run_mode :master 12 | 13 | # Options for splunk_hec 14 | 15 | option('--sourcetype SOURCETYPE') do |format| 16 | options[:sourcetype] = format.downcase.to_sym 17 | end 18 | 19 | option('--pe_metrics') 20 | 21 | option('--saved_report') 22 | 23 | option('--debug', '-d') 24 | 25 | def get_name(servername) 26 | name = if servername.to_s == '127-0-0-1' 27 | Puppet[:certname].to_s 28 | else 29 | servername 30 | end 31 | name.to_s 32 | end 33 | 34 | def send_pe_metrics(data, sourcetype) 35 | timestamp = sourcetypetime(data['timestamp']) 36 | event_template = { 37 | 'time' => timestamp, 38 | 'sourcetype' => sourcetype.to_s, 39 | 'event' => {}, 40 | } 41 | data['servers'].each_key do |server| 42 | name = get_name(server.to_s) 43 | content = data['servers'][server.to_s] 44 | content.each_key do |serv| 45 | event = event_template.clone 46 | event['host'] = name 47 | if content[serv.to_s].is_a?(Array) 48 | event['event'] = {} 49 | event['event']['metrics'] = [] 50 | content[serv.to_s].each do |metric| 51 | event['event']['metrics'] << { metric['name'].to_s => metric['value'].to_f } 52 | end 53 | else 54 | event['event'] = content[serv.to_s] 55 | end 56 | event['event']['pe_console'] = pe_console 57 | event['event']['pe_service'] = serv.to_s 58 | Puppet.info 'Submitting metrics to Splunk' 59 | submit_request(event) 60 | end 61 | end 62 | end 63 | 64 | def upload_report(data, sourcetype) 65 | data['sourcetype'] = sourcetype 66 | submit_request(data) 67 | end 68 | 69 | def main 70 | data = begin 71 | STDIN.each_line.map { |l| JSON.parse(l) } 72 | rescue StandardError => e 73 | Puppet.info 'Unable to parse json from stdin' 74 | Puppet.info e.message 75 | Puppet.info e.backtrace.inspect 76 | 77 | [] 78 | end 79 | 80 | sourcetype = options[:sourcetype].to_s 81 | data.each do |server| 82 | send_pe_metrics(server, sourcetype) if options[:pe_metrics] 83 | upload_report(server, sourcetype) if options[:saved_report] 84 | end 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /lib/puppet/functions/splunk_hec/secure.rb: -------------------------------------------------------------------------------- 1 | # Custom function to mark sensitive data utilized by 2 | # this module as Sensitive types in the Puppet language. 3 | # Sensitive data is redacted from Puppet logs and reports. 4 | Puppet::Functions.create_function(:'splunk_hec::secure') do 5 | dispatch :secure do 6 | param 'Hash', :secrets 7 | end 8 | 9 | def secure(secrets) 10 | secrets.each do |key, value| 11 | unless value.nil? 12 | secrets[key] = Puppet::Pops::Types::PSensitiveType::Sensitive.new(value) 13 | end 14 | end 15 | secrets 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/puppet/indirector/facts/splunk_hec.rb: -------------------------------------------------------------------------------- 1 | require 'puppet/indirector/facts/yaml' 2 | require 'puppet/util/profiler' 3 | require File.dirname(__FILE__) + '/../../util/splunk_hec.rb' 4 | 5 | # rubocop:disable Style/ClassAndModuleCamelCase 6 | # splunk_hec.rb 7 | class Puppet::Node::Facts::Splunk_hec < Puppet::Node::Facts::Yaml 8 | desc "Save facts to Splunk over HEC and then to yamlcache. 9 | It uses PuppetDB to retrieve facts for catalog compilation." 10 | 11 | include Puppet::Util::Splunk_hec 12 | 13 | def get_trusted_info(node) 14 | trusted = Puppet.lookup(:trusted_information) do 15 | Puppet::Context::TrustedInformation.local(node) 16 | end 17 | trusted.to_h 18 | end 19 | 20 | def profile(message, metric_id, &block) 21 | message = 'Splunk_hec: ' + message 22 | arity = Puppet::Util::Profiler.method(:profile).arity 23 | case arity 24 | when 1 25 | Puppet::Util::Profiler.profile(message, &block) 26 | when 2, -2 27 | Puppet::Util::Profiler.profile(message, metric_id, &block) 28 | end 29 | end 30 | 31 | def save(request) 32 | # yaml cache goes first 33 | super(request) 34 | 35 | profile('splunk_facts#save', [:splunk, :facts, :save, request.key]) do 36 | host = request.instance.name.dup 37 | incoming_facts = request.instance.values.dup 38 | transaction_uuid = request.options[:transaction_uuid] 39 | 40 | hardcoded = [ 41 | 'os', 42 | 'memory', 43 | 'puppetversion', 44 | 'system_uptime', 45 | 'load_averages', 46 | 'ipaddress', 47 | 'fqdn', 48 | ] 49 | 50 | # lets ensure user provided fact names are downcased 51 | allow_list = (settings['facts.allowlist'].map(&:downcase) + hardcoded).uniq 52 | block_list = settings['facts.blocklist'].nil? ? [] : settings['facts.blocklist'].map(&:downcase) 53 | # lets rescue any hardcoded facts that have been added to the blocklist 54 | rescued_facts = block_list.select { |k| hardcoded.include?(k) } 55 | 56 | facts = if allow_list.include?('all.facts') 57 | unless rescued_facts.empty? 58 | Puppet.warning "Rescued required facts - Please remove the following facts from splunk_hec::facts_blocklist: #{rescued_facts}" 59 | end 60 | final_block = block_list.reject { |k| hardcoded.include?(k) } 61 | incoming_facts.reject { |k, _v| final_block.include?(k) } 62 | else 63 | incoming_facts.select { |k, _v| allow_list.include?(k) } 64 | end 65 | 66 | facts['trusted'] = get_trusted_info(request.node) 67 | facts['environment'] = request.options[:environment] || request.environment.to_s 68 | facts['producer'] = Puppet[:certname] 69 | facts['pe_console'] = pe_console 70 | facts['transaction_uuid'] = transaction_uuid 71 | 72 | event = { 73 | 'host' => host, 74 | 'sourcetype' => 'puppet:facts', 75 | 'event' => facts 76 | } 77 | 78 | Puppet.info "Submitting facts to Splunk at #{get_splunk_url('facts')}" 79 | submit_request event 80 | rescue StandardError => e 81 | Puppet.err "Could not send facts to Splunk: #{e}\n#{e.backtrace}" 82 | end 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /lib/puppet/reports/splunk_hec.rb: -------------------------------------------------------------------------------- 1 | require File.dirname(__FILE__) + '/../util/splunk_hec.rb' 2 | 3 | Puppet::Reports.register_report(:splunk_hec) do 4 | desc 'Submits just a report summary to Splunk HEC endpoint' 5 | # Next, define and configure the report processor. 6 | 7 | include Puppet::Util::Splunk_hec 8 | def process 9 | # This prevents the processor from running if disabled 10 | return 0 if settings['disabled'] 11 | 12 | # This prevents the processor from running when there are no changes to the report 13 | if settings['only_changes'] && (status != 'changed') 14 | Puppet.warning "Not submitting report to Splunk, report contains no changes! Status: #{status}" 15 | return 0 16 | end 17 | 18 | # now we can create the event with the timestamp from the report 19 | epoch = sourcetypetime(time, metrics['time']['total']) 20 | 21 | # pass simple metrics for report processing later 22 | # STATES = [:skipped, :failed, :failed_to_restart, :restarted, :changed, :out_of_sync, :scheduled, :corrective_change] 23 | metrics = { 24 | 'time' => { 25 | 'config_retrieval' => self.metrics['time']['config_retrieval'], 26 | 'fact_generation' => self.metrics['time']['fact_generation'], 27 | 'catalog_application' => self.metrics['time']['catalog_application'], 28 | 'total' => self.metrics['time']['total'], 29 | }, 30 | 'resources' => { 31 | 'total' => self.metrics['resources']['total'], 32 | }, 33 | 'changes' => { 34 | 'total' => self.metrics['changes']['total'], 35 | }, 36 | } 37 | 38 | event = { 39 | 'host' => host, 40 | 'time' => epoch, 41 | 'sourcetype' => 'puppet:summary', 42 | 'event' => { 43 | 'cached_catalog_status' => cached_catalog_status, 44 | 'catalog_uuid' => catalog_uuid, 45 | 'certname' => host, 46 | 'code_id' => code_id, 47 | 'configuration_version' => configuration_version, 48 | 'corrective_change' => corrective_change, 49 | 'environment' => environment, 50 | 'job_id' => job_id, 51 | 'metrics' => metrics, 52 | 'noop' => noop, 53 | 'noop_pending' => noop_pending, 54 | 'pe_console' => pe_console, 55 | 'producer' => Puppet[:certname], 56 | 'puppet_version' => puppet_version, 57 | 'report_format' => report_format, 58 | 'server_used' => server_used, 59 | 'status' => status, 60 | 'time' => (time + metrics['time']['total']).iso8601(3), 61 | 'transaction_uuid' => transaction_uuid, 62 | }, 63 | } 64 | 65 | include_logs = false 66 | 67 | if settings['include_logs_status'] 68 | include_logs_status = settings['include_logs_status'] 69 | if include_logs_status.include? status 70 | include_logs = true 71 | end 72 | end 73 | 74 | if settings['include_logs_catalog_failure'] && catalog_uuid.to_s.strip.empty? 75 | include_logs = true 76 | end 77 | 78 | if settings['include_logs_corrective_change'] && corrective_change 79 | include_logs = true 80 | end 81 | 82 | if include_logs 83 | event['event']['logs'] = logs 84 | end 85 | 86 | # the i'm tired way to prevent doing this twice 87 | add_resources = false 88 | 89 | if settings['include_resources_status'] 90 | include_resources_status = settings['include_resources_status'] 91 | if include_resources_status.include? status 92 | add_resources = true 93 | end 94 | end 95 | 96 | if settings['include_resources_corrective_change'] && corrective_change 97 | add_resources = true 98 | end 99 | 100 | if add_resources 101 | resource_events_hash = resource_statuses.select { |_k, v| v.events.count > 0 } 102 | # We may want to return this as a hash or an array. Splunk deals better 103 | # with arrays but we need to give people the choice so as to not break 104 | # existing reports 105 | event['event']['resource_events'] = if settings['summary_resources_format'] == 'array' 106 | resource_events_hash.map { |_k, v| v } 107 | else 108 | resource_events_hash 109 | end 110 | end 111 | 112 | Puppet.info "Submitting report to Splunk at #{get_splunk_url('summary')}" 113 | submit_request event 114 | 115 | if record_event 116 | store_event event 117 | end 118 | rescue StandardError => e 119 | Puppet.err "Could not send report to Splunk: #{e}\n#{e.backtrace}" 120 | end 121 | end 122 | -------------------------------------------------------------------------------- /lib/puppet/util/splunk_hec.rb: -------------------------------------------------------------------------------- 1 | require 'puppet' 2 | require 'puppet/util' 3 | require 'fileutils' 4 | require 'net/http' 5 | require 'uri' 6 | require 'yaml' 7 | require 'json' 8 | require 'time' 9 | 10 | # rubocop:disable Style/ClassAndModuleCamelCase 11 | # splunk_hec.rb 12 | module Puppet::Util::Splunk_hec 13 | def settings 14 | return @settings if @settings 15 | @settings_file = Puppet[:confdir] + '/splunk_hec/settings.yaml' 16 | 17 | @settings = YAML.load_file(@settings_file) 18 | end 19 | 20 | def secrets 21 | return @secrets if @secrets 22 | @secrets_file = Puppet[:confdir] + '/splunk_hec/hec_secrets.yaml' 23 | 24 | @secrets = YAML.load_file(@secrets_file) 25 | end 26 | 27 | def build_ca_store(cert_store_file_path) 28 | store = OpenSSL::X509::Store.new 29 | store.add_file(cert_store_file_path) 30 | store 31 | end 32 | 33 | def create_http(source_type) 34 | splunk_url = get_splunk_url(source_type) 35 | @uri = URI.parse(splunk_url) 36 | timeout = settings['timeout'] || '1' 37 | http = Net::HTTP.new(@uri.host, @uri.port) 38 | http.open_timeout = timeout.to_i 39 | http.read_timeout = timeout.to_i 40 | http.use_ssl = @uri.scheme == 'https' 41 | if http.use_ssl? 42 | if (settings['ssl_ca'] && !settings['ssl_ca'].empty?) || settings['include_system_cert_store'] 43 | ssl_ca_file = if settings['ssl_ca'] 44 | File.join(Puppet[:confdir], 'splunk_hec', settings['ssl_ca']) 45 | elsif !settings['ssl_ca'] && Facter.value(:os)['family'].eql?('RedHat') 46 | '/etc/ssl/certs/ca-bundle.crt' 47 | elsif !settings['ssl_ca'] && Facter.value(:os)['family'].eql?('Suse') 48 | '/etc/ssl/ca-bundle.pem' 49 | else 50 | '/etc/ssl/certs/ca-certificates.crt' 51 | end 52 | message = if settings['ssl_ca'] 53 | "will verify #{splunk_url} SSL identity against Splunk HEC SSL #{settings['ssl_ca']}" 54 | else 55 | "will verify #{splunk_url} SSL identity against system store" 56 | end 57 | 58 | unless File.exist?(ssl_ca_file) && !File.zero?(ssl_ca_file) 59 | raise Puppet::Error, 60 | "CA file #{ssl_ca_file} is an empty file or does not exist" 61 | end 62 | ssl_ca = build_ca_store(ssl_ca_file) 63 | http.cert_store = ssl_ca 64 | http.verify_mode = OpenSSL::SSL::VERIFY_PEER 65 | Puppet.warn_once('hec_ssl', 'ssl_ca', message, :default, :default, :info) 66 | else 67 | message = "will NOT verify #{splunk_url} SSL identity" 68 | Puppet.warn_once('hec_ssl', 'no_ssl', message, :default, :default, :info) 69 | http.verify_mode = OpenSSL::SSL::VERIFY_NONE 70 | end 71 | end 72 | 73 | http 74 | end 75 | 76 | def send_with_fips(body, source_type, token) 77 | splunk_url = URI.parse(get_splunk_url(source_type)) 78 | headers = { 79 | 'Authorization' => "Splunk #{token}", 80 | 'Content-Type' => 'application/json' 81 | } 82 | ssl_options = { 83 | ssl_context: { 84 | certificate_revocation: settings['certificate_revocation'], 85 | verify_peer: settings['verify_peer'] 86 | } 87 | } 88 | 89 | client = Puppet.runtime[:http] 90 | client.post(splunk_url, body.to_json, headers: headers, options: ssl_options) 91 | end 92 | 93 | def send_with_nonfips(body, source_type, token) 94 | http = create_http(source_type) 95 | req = Net::HTTP::Post.new(@uri.path.to_str) 96 | req.add_field('Authorization', "Splunk #{token}") 97 | req.add_field('Content-Type', 'application/json') 98 | req.content_type = 'application/json' 99 | req.body = body.to_json 100 | http.request(req) 101 | end 102 | 103 | def submit_request(body) 104 | # we want users to be able to provide different tokens per sourcetype if they want 105 | source_type = body['sourcetype'].split(':')[1] 106 | token_name = "token_#{source_type}" 107 | token = secrets[token_name] || secrets['token'] || raise(Puppet::Error, 'Must provide token parameter to splunk class') 108 | if settings['fips_enabled'] 109 | send_with_fips(body, source_type, token) 110 | else 111 | send_with_nonfips(body, source_type, token) 112 | end 113 | end 114 | 115 | def store_event(event) 116 | host = event['host'] 117 | epoch = event['time'].to_f 118 | 119 | timestamp = Time.at(epoch).to_datetime 120 | 121 | filename = timestamp.strftime('%F-%H-%M-%S-%L') + '.json' 122 | 123 | dir = File.join(Puppet[:reportdir], host) 124 | 125 | unless Puppet::FileSystem.exist?(dir) 126 | FileUtils.mkdir_p(dir) 127 | FileUtils.chmod_R(0o750, dir) 128 | end 129 | 130 | file = File.join(dir, filename) 131 | 132 | begin 133 | File.open(file, 'w') do |f| 134 | f.write(event.to_json) 135 | end 136 | rescue => detail 137 | Puppet.log_exception(detail, "Could not write report for #{host} at #{file}: #{detail}") 138 | end 139 | end 140 | 141 | private 142 | 143 | def get_splunk_url(source_type) 144 | url_name = "url_#{source_type}" 145 | settings[url_name] || settings['url'] || raise(Puppet::Error, 'Must provide url parameter to splunk class') 146 | end 147 | 148 | def pe_console 149 | settings['pe_console'] || Puppet[:certname] 150 | end 151 | 152 | def record_event 153 | result = if settings['record_event'] == 'true' 154 | true 155 | else 156 | false 157 | end 158 | result 159 | end 160 | 161 | # standard function to make sure we're using the same time format our sourcetypes are set to parse 162 | def sourcetypetime(time, duration = 0) 163 | parsed_time = time.is_a?(String) ? Time.parse(time) : time 164 | total = Time.parse((parsed_time + duration).iso8601(3)) 165 | '%10.3f' % total.to_f 166 | end 167 | 168 | # Legacy function to parse pretty-printed output produced by puppet_metrics_collector prior to v5.3.0 169 | def parse_legacy_metrics(input) 170 | cleaned = input.gsub("\n}{\n", "\n},{\n") 171 | cleaned = cleaned.insert(0, '[') 172 | cleaned = cleaned.insert(-1, ']') 173 | 174 | result = begin 175 | JSON.parse(cleaned) 176 | rescue StandardError => e 177 | Puppet.info 'Unable to parse json from stdin' 178 | Puppet.info e.message 179 | Puppet.info e.backtrace.inspect 180 | 181 | [] 182 | end 183 | 184 | result 185 | end 186 | end 187 | -------------------------------------------------------------------------------- /manifests/v2_cleanup.pp: -------------------------------------------------------------------------------- 1 | # @summary Class to remove old configuration files 2 | # 3 | # @api private 4 | # 5 | # Private subclass to remove configurations utilized by v1 of this module 6 | # 7 | # @example 8 | # include splunk_hec::v2_cleanup 9 | class splunk_hec::v2_cleanup { 10 | file { "${settings::confdir}/splunk_hec.yaml": 11 | ensure => absent, 12 | } 13 | 14 | file { "${settings::confdir}/splunk_hec_routes.yaml": 15 | ensure => absent, 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "puppetlabs-splunk_hec", 3 | "version": "2.1.0", 4 | "author": "puppetlabs", 5 | "summary": "Puppet report processor using Splunk HEC", 6 | "license": "Apache-2.0", 7 | "source": "https://github.com/puppetlabs/puppetlabs-splunk_hec", 8 | "project_page": "https://github.com/puppetlabs/puppetlabs-splunk_hec", 9 | "issues_url": "https://github.com/puppetlabs/puppetlabs-splunk_hec/issues", 10 | "dependencies": [ 11 | { 12 | "name": "puppetlabs-puppet_metrics_collector", 13 | "version_requirement": ">= 6.0.0 < 9.0.0" 14 | }, 15 | { 16 | "name": "puppetlabs-pe_event_forwarding", 17 | "version_requirement": ">= 1.1.0 < 3.0.0" 18 | } 19 | ], 20 | "operatingsystem_support": [ 21 | { 22 | "operatingsystem": "AlmaLinux", 23 | "operatingsystemrelease": [ 24 | "8", 25 | "9" 26 | ] 27 | }, 28 | { 29 | "operatingsystem": "AmazonLinux", 30 | "operatingsystemrelease": [ 31 | "2" 32 | ] 33 | }, 34 | { 35 | "operatingsystem": "Debian", 36 | "operatingsystemrelease": [ 37 | "9", 38 | "10", 39 | "11" 40 | ] 41 | }, 42 | { 43 | "operatingsystem": "OracleLinux", 44 | "operatingsystemrelease": [ 45 | "7", 46 | "8" 47 | ] 48 | }, 49 | { 50 | "operatingsystem": "RedHat", 51 | "operatingsystemrelease": [ 52 | "7", 53 | "8", 54 | "9" 55 | ] 56 | }, 57 | { 58 | "operatingsystem": "RockyLinux", 59 | "operatingsystemrelease": [ 60 | "8", 61 | "9" 62 | ] 63 | }, 64 | { 65 | "operatingsystem": "SLES", 66 | "operatingsystemrelease": [ 67 | "12", 68 | "15" 69 | ] 70 | }, 71 | { 72 | "operatingsystem": "Ubuntu", 73 | "operatingsystemrelease": [ 74 | "18.04", 75 | "20.04", 76 | "22.04" 77 | ] 78 | } 79 | ], 80 | "requirements": [ 81 | { 82 | "name": "puppet", 83 | "version_requirement": ">= 6.16.0 < 9.0.0" 84 | } 85 | ], 86 | "pdk-version": "3.3.0", 87 | "template-url": "https://github.com/puppetlabs/pdk-templates#3.3.0", 88 | "template-ref": "tags/3.3.0-0-g5d17ec1" 89 | } 90 | -------------------------------------------------------------------------------- /pdk.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ignore: [] 3 | -------------------------------------------------------------------------------- /plans/acceptance/oss_server_setup.pp: -------------------------------------------------------------------------------- 1 | # This plan installs open source Puppet adds Puppet to the path variable, and 2 | # adds a puppet hosts entry. It also restarts the Puppet service and starts a 3 | # puppet agent run. 4 | # @summary Installs open source Puppet. 5 | # @api private 6 | # 7 | # @param [Optional[String]] collection 8 | # puppet version collection name 9 | plan splunk_hec::acceptance::oss_server_setup( 10 | Optional[String] $collection = 'puppet7' 11 | ) { 12 | # get server 13 | $server = get_targets('*').filter |$n| { $n.vars['role'] == 'server' } 14 | $localhost = get_targets('localhost') 15 | 16 | # get facts 17 | $puppetserver_facts = facts($server[0]) 18 | $platform = $puppetserver_facts['platform'] 19 | 20 | # machines are not yet ready at time of installing the puppetserver, so we wait 15s 21 | run_command('sleep 15', $localhost) 22 | 23 | # install puppetserver and start on master 24 | run_task( 25 | 'provision::install_puppetserver', 26 | $server, 27 | 'install and configure server', 28 | { 'collection' => $collection, 'platform' => $platform } 29 | ) 30 | 31 | $os_name = $puppetserver_facts['provisioner'] ? { 32 | 'docker' => split($puppetserver_facts['platform'], Regexp['[/:-]'])[1], 33 | 'docker_exp' => split($puppetserver_facts['platform'], Regexp['[/:-]'])[1], 34 | default => split($puppetserver_facts['platform'], Regexp['[/:-]'])[0] 35 | } 36 | 37 | $os_family = $os_name ? { 38 | /(^redhat|rhel|centos|scientific|oraclelinux)/ => 'redhat', 39 | /(^debian|ubuntu)/ => 'debian', 40 | default => 'unsupported' 41 | } 42 | 43 | if $os_family == 'unsupported' { 44 | fail_plan('Not supported platform!') 45 | } 46 | 47 | if $os_family == 'debian' { 48 | run_task('provision::fix_secure_path', $server, 'fix secure path') 49 | } 50 | 51 | run_command('echo "export PATH=$PATH:/opt/puppetlabs/bin" > /etc/environment', $server) 52 | run_command('echo "127.0.0.1 puppet" >> /etc/hosts', $server) 53 | 54 | $fqdn = run_command('facter fqdn', $server).to_data[0]['value']['stdout'] 55 | run_task('puppet_conf', $server, action => 'set', section => 'main', setting => 'server', value => $fqdn) 56 | 57 | run_command('systemctl start puppetserver', $server, '_catch_errors' => true) 58 | run_command('systemctl enable puppetserver', $server, '_catch_errors' => true) 59 | run_command('puppet agent -t', $server, '_catch_errors' => true) 60 | } 61 | -------------------------------------------------------------------------------- /plans/acceptance/pe_server_setup.pp: -------------------------------------------------------------------------------- 1 | # @summary Install PE Server 2 | # @api private 3 | # 4 | # Install PE Server 5 | # 6 | # @example 7 | # pe_event_forwarding::acceptance::pe_server_setup 8 | # 9 | # @param [Optional[String]] version 10 | # Sets the version of the PE to install 11 | # @param [Optional[Hash]] pe_settings 12 | # Sets PE settings including password 13 | plan splunk_hec::acceptance::pe_server_setup( 14 | Optional[String] $version = '2021.7.5', 15 | Optional[Hash] $pe_settings = { password => 'puppetlabsPi3!', configure_tuning => false } 16 | ) { 17 | # machines are not yet ready at time of installing the puppetserver, so we wait 15s 18 | $localhost = get_targets('localhost') 19 | run_command('sleep 15', $localhost) 20 | 21 | #identify pe server node 22 | $puppet_server = get_targets('*').filter |$n| { $n.vars['role'] == 'server' } 23 | 24 | # extract pe version from matrix_from_metadata_v3 output 25 | $pe_version = regsubst($version, '-puppet_enterprise', '') 26 | 27 | # install pe server 28 | run_plan( 29 | 'deploy_pe::provision_master', 30 | $puppet_server, 31 | 'version' => $pe_version, 32 | 'pe_settings' => $pe_settings 33 | ) 34 | 35 | $cmd = @("CMD") 36 | echo 'puppetlabsPi3!' | puppet access login -l 1y --username admin 37 | puppet infrastructure tune | sed "s,\\x1B\\[[0-9;]*[a-zA-Z],,g" > /etc/puppetlabs/code/environments/production/data/common.yaml 38 | puppet agent -t 39 | | CMD 40 | 41 | run_command($cmd, $puppet_server, '_catch_errors' => true) 42 | } 43 | -------------------------------------------------------------------------------- /plans/acceptance/provision_machines.pp: -------------------------------------------------------------------------------- 1 | # @summary Provisions machines 2 | # @api private 3 | # 4 | # @param [Optional[String]] using 5 | # provision service 6 | # @param [Optional[String]] image 7 | # os image 8 | plan splunk_hec::acceptance::provision_machines( 9 | Optional[String] $using = 'abs', 10 | Optional[String] $image = 'centos-7-x86_64' 11 | ) { 12 | # provision machines, set roles 13 | run_task("provision::${using}", 'localhost', action => 'provision', platform => $image, vars => 'role: server') 14 | } 15 | -------------------------------------------------------------------------------- /plans/acceptance/server_setup.pp: -------------------------------------------------------------------------------- 1 | # @summary Install PE Server 2 | # @api private 3 | # 4 | # Install PE Server 5 | # 6 | # @example 7 | # pe_event_forwarding::acceptance::pe_server 8 | # 9 | # @param [Optional[String]] puppet_version 10 | # Sets the version of Puppet Server to install 11 | plan splunk_hec::acceptance::server_setup( 12 | Optional[String] $puppet_version = '2021.7.5', 13 | ) { 14 | # machines are not yet ready at time of installing the puppetserver, so we wait 15s 15 | $localhost = get_targets('localhost') 16 | run_command('sleep 15', $localhost) 17 | 18 | if $puppet_version =~ /-nightly/ { 19 | run_plan( 20 | 'splunk_hec::acceptance::oss_server_setup', 21 | 'collection' => $puppet_version 22 | ) 23 | } else { 24 | run_plan( 25 | 'splunk_hec::acceptance::pe_server_setup', 26 | 'version' => $puppet_version 27 | ) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /plans/examples/apply_example.pp: -------------------------------------------------------------------------------- 1 | # Example of submitting a report from apply Function to Splunk. 2 | # @param [Optional[String[1]]] plan_guid 3 | # A guid used to identify invocation of the plan (should change each run) 4 | # @param [Optional[String[1]]] plan_name 5 | # The name of the plan being run (shouldn't change each run) 6 | plan splunk_hec::examples::apply_example ( 7 | Optional[String[1]] $plan_guid, 8 | Optional[String[1]] $plan_name, 9 | ) { 10 | $result_ca = puppetdb_query('nodes [ certname ]{}') 11 | $ca = $result_ca.map |$r| { $r["certname"] } 12 | $pcpca = $ca.map |$n| { "pcp://${n}" } 13 | 14 | apply_prep ($pcpca) 15 | 16 | $results = apply ($pcpca) { 17 | include ntp 18 | notify { 'hello config test': } 19 | } 20 | 21 | $results.each |$result| { 22 | $node = $result.target.name 23 | if $result.ok { 24 | #notice("${node} returned a value: ${result.report}") 25 | notice("sending ${node}'s report to splunk") 26 | # this will use facts[clientcert] because we don't pass host 27 | run_task('splunk_hec::bolt_apply', 'splunk_hec', 28 | report => $result.report, 29 | facts => $result.target.facts, 30 | plan_guid => $plan_guid, 31 | plan_name => $plan_name 32 | ) 33 | # this will set host to $node value - note: this will include the URI of pcp://$name vs $name as result from $clientcert value 34 | # run_task("splunk_hec::bolt_apply", 'splunk_bolt_apply', report => $result.report, facts => $result.target.facts, host => $node) 35 | } else { 36 | notice("${node} errored with a message: ${result.error}") 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /plans/examples/result_example.pp: -------------------------------------------------------------------------------- 1 | # An example of submitting a Task or Functions results to Splunk as a Task. 2 | plan splunk_hec::examples::result_example { 3 | $result_ca = puppetdb_query('nodes [ certname ]{}') 4 | $ca = $result_ca.map |$r| { $r["certname"] } 5 | $pcpca = $ca.map |$n| { "pcp://${n}" } 6 | 7 | $results = run_task('package', $pcpca, action => status, name => 'splunkforwarder') 8 | 9 | $results.each |$result| { 10 | $node = $result.target.name 11 | if $result.ok { 12 | notice("${node} returned a value: ${result.value}") 13 | notice("sending ${node}'s report to splunk") 14 | $result_hash = { 15 | value => $result.value, 16 | target => $result.target.name, 17 | } 18 | run_task('splunk_hec::bolt_result', 'splunk_hec', result => $result_hash) 19 | } else { 20 | notice("${node} errored with a message: ${result.error}") 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /provision.yaml: -------------------------------------------------------------------------------- 1 | acceptance: 2 | provisioner: abs 3 | images: 4 | - centos-7-x86_64 5 | - redhat-8-x86_64 6 | - redhat-9-x86_64 7 | - ubuntu-2004-x86_64 8 | vars: | 9 | role: server 10 | fips_acceptance: 11 | provisioner: abs 12 | images: 13 | - redhat-fips-7-x86_64 14 | - ubuntu-2004-x86_64 15 | -------------------------------------------------------------------------------- /rakelib/helpers.rake: -------------------------------------------------------------------------------- 1 | namespace :acceptance do 2 | require_relative '../spec/support/acceptance/helpers' 3 | include TargetHelpers 4 | 5 | desc 'Provisions the VMs. This is currently just the puppetserver' 6 | task :provision_vms do 7 | if File.exist?('spec/fixtures/litmus_inventory.yaml') 8 | # Check if a puppetserver VM's already been setup 9 | begin 10 | uri = puppetserver.uri 11 | puts("A puppetserver VM at '#{uri}' has already been set up") 12 | next 13 | rescue TargetNotFoundError 14 | # Pass-thru, this means that we haven't set up the puppetserver VM 15 | end 16 | end 17 | 18 | provision_list = ENV['PROVISION_LIST'] || 'acceptance' 19 | Rake::Task['litmus:provision_list'].invoke(provision_list) 20 | inventory_hash = inventory_hash_from_inventory_file 21 | begin 22 | # If a fips node is present, assign the correct roles to the fips node and the splunk node 23 | fips_node = inventory_hash['groups'].detect {|g| g['name'] == 'ssh_nodes'}['targets'].detect {|t| t['facts']['platform'].match(/fips/)} 24 | fips_node['vars'] = {'role' => 'server'} 25 | splunk_node = inventory_hash['groups'].detect {|g| g['name'] == 'ssh_nodes'}['targets'].detect {|t| !t['facts']['platform'].match(/fips/)} 26 | splunk_node['vars'] = {'role' => 'splunk_node'} 27 | rescue => exception 28 | puts 'no fips node found.' 29 | end 30 | 31 | # Remove bad username and password keys as a result of a provision module bug 32 | inventory_hash['groups'].detect {|g| g['name'] == 'ssh_nodes'}['targets'].each do |target| 33 | target['config']['ssh'].delete("password") if target['config']['ssh']['password'].nil? 34 | target['config']['ssh'].delete("user") if target['config']['ssh']['user'].nil? 35 | end 36 | write_to_inventory_file(inventory_hash, 'spec/fixtures/litmus_inventory.yaml') 37 | end 38 | 39 | desc 'clone puppetlabs-pe_event_forwarding module to test host' 40 | task :upload_pe_event_forwarding_module do 41 | puppetserver.each do |target| 42 | message = "Installing puppetlabs-pe_event_forwarding module on #{target.uri} !" 43 | spinner = start_spinner(message) 44 | target.run_shell('rm /etc/puppetlabs/code/environments/production/modules/pe_event_forwarding -rf', expect_failures: true) 45 | target.bolt_upload_file('./spec/fixtures/modules/pe_event_forwarding', '/etc/puppetlabs/code/environments/production/modules') 46 | stop_spinner(spinner) 47 | end 48 | end 49 | 50 | desc 'Sets up PE on the server' 51 | task :setup_pe do 52 | include ::BoltSpec::Run 53 | inventory_hash = inventory_hash_from_inventory_file 54 | target_nodes = find_targets(inventory_hash, 'ssh_nodes') 55 | 56 | config = { 'modulepath' => File.join(Dir.pwd, 'spec', 'fixtures', 'modules') } 57 | params = {} 58 | params.merge(puppet_version: ENV['PUPPET_VERSION']) unless ENV['PUPPET_VERSION'].nil? 59 | 60 | message = "Installing Puppet Enterprise on targets in litmus_inventory.yaml !" 61 | install_spinner = start_spinner(message) 62 | bolt_result = run_plan('splunk_hec::acceptance::server_setup', params, config: config, inventory: inventory_hash.clone) 63 | stop_spinner(install_spinner) 64 | puts bolt_result['status'] 65 | end 66 | 67 | desc 'Sets up the Splunk instance' 68 | task :setup_splunk_targets do 69 | inventory_hash = LitmusHelpers.inventory_hash_from_inventory_file 70 | splunk_setup_target = begin 71 | splunk_node 72 | rescue TargetNotFoundError 73 | puppetserver 74 | end 75 | splunk_setup_target.each_with_index do |splunk_target, i| 76 | message = "Starting the Splunk instance at the puppetserver (#{splunk_target.uri})" 77 | splunk_spinner = start_spinner(message) 78 | splunk_target.bolt_upload_file('./spec/support/acceptance/splunk', '/tmp/splunk') 79 | result = splunk_target.bolt_run_script('spec/support/acceptance/start_splunk_instance.sh').stdout.chomp 80 | stop_spinner(splunk_spinner) 81 | puts result 82 | 83 | # HEC token is hard coded because it will always be the same in the splunk container 84 | instance, hec_token = "#{splunk_target.uri}:8088", 'abcd1234' 85 | 86 | # Update the inventory file 87 | message = "Updating the inventory.yaml file with the Splunk HEC credentials for #{splunk_target.uri}" 88 | inventory_spinner = start_spinner(message) 89 | splunk_group = inventory_hash['groups'].find { |g| g['name'] =~ %r{splunk} } 90 | unless splunk_group 91 | splunk_group = { 'name' => 'splunk_nodes' } 92 | inventory_hash['groups'].push(splunk_group) 93 | splunk_group['targets'] = [] 94 | end 95 | splunk_group['targets'][i] = { 96 | 'uri' => instance, 97 | 'config' => { 98 | 'transport' => 'remote', 99 | 'remote' => { 100 | 'hec_token' => hec_token, 101 | } 102 | }, 103 | 'facts' => { 104 | 'platform' => 'splunk_hec', 105 | 'provisioner' => 'docker', 106 | 'container_name' => 'splunk_enterprise_1' 107 | }, 108 | 'vars' => { 109 | 'role' => ['splunk_instance'], 110 | } 111 | } 112 | stop_spinner(inventory_spinner) 113 | end 114 | write_to_inventory_file(inventory_hash, 'spec/fixtures/litmus_inventory.yaml') 115 | end 116 | 117 | desc 'Installs the module on the puppetserver' 118 | task :install_module do 119 | puppetserver.each do |target| 120 | Rake::Task['litmus:install_module'].invoke(target.uri) 121 | Rake::Task['litmus:install_module'].reenable 122 | end 123 | end 124 | 125 | desc 'Runs the tests' 126 | task :run_tests do 127 | rspec_command = 'bundle exec rspec ./spec/acceptance --format documentation' 128 | rspec_command += ' --format RspecJunitFormatter --out rspec_junit_results.xml' if ENV['CLOUD_CI'] == 'true' 129 | puts("Running the tests ...\n") 130 | unless system(rspec_command) 131 | # system returned false which means rspec failed. So exit 1 here 132 | exit 1 133 | end 134 | end 135 | 136 | desc 'Set up the test infrastructure' 137 | task :setup do 138 | tasks = [ 139 | :provision_vms, 140 | :setup_pe, 141 | :setup_splunk_targets, 142 | :install_module, 143 | :upload_pe_event_forwarding_module, 144 | ] 145 | 146 | tasks.each do |task| 147 | task = "acceptance:#{task}" 148 | puts("Invoking #{task}") 149 | Rake::Task[task].invoke 150 | puts("") 151 | end 152 | end 153 | 154 | desc 'Tear down the setup' 155 | task :tear_down do 156 | puts("Tearing down the test infrastructure ...\n") 157 | Rake::Task['litmus:tear_down'].invoke 158 | FileUtils.rm_f('spec/fixtures/litmus_inventory.yaml') 159 | end 160 | 161 | desc 'Task to run rspec tests against multiple targets' 162 | task :ci_run_tests do 163 | include ::BoltSpec::Run 164 | inventory_hash = inventory_hash_from_inventory_file 165 | 166 | # Run the tests 167 | config = { 'modulepath' => File.join(Dir.pwd, 'spec', 'fixtures', 'modules') } 168 | puppetserver.each do |server| 169 | message = "Running rspec tests against #{server.uri} !" 170 | spec_spinner = start_spinner(message) 171 | params = { 'sut' => server.uri, 'format' => 'documentation' } 172 | bolt_result = run_task('provision::run_tests', 'localhost', params, config: config) 173 | stop_spinner(spec_spinner) 174 | puts "Finished running rspec tests against #{server.uri} !\n" 175 | if bolt_result[0]['value'].has_key?('_error') 176 | test_result = bolt_result[0]['value']['_error']['msg'].to_json 177 | puts JSON.parse(test_result) 178 | exit 1 179 | else 180 | test_result = bolt_result[0]['value']['result'].to_json 181 | puts JSON.parse(test_result) 182 | end 183 | end 184 | end 185 | 186 | desc 'Task for CI' 187 | task :ci_tests do 188 | begin 189 | Rake::Task['acceptance:setup'].invoke 190 | Rake::Task['acceptance:ci_run_tests'].invoke 191 | ensure 192 | Rake::Task['acceptance:tear_down'].invoke 193 | end 194 | end 195 | end 196 | -------------------------------------------------------------------------------- /spec/acceptance/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/puppetlabs-splunk_hec/4e443283a7a714034d96dc6a61333450a39e4d67/spec/acceptance/.gitkeep -------------------------------------------------------------------------------- /spec/acceptance/class_spec.rb: -------------------------------------------------------------------------------- 1 | 2 | require 'spec_helper_acceptance' 3 | 4 | describe 'Verify the minimum install' do 5 | let(:earliest) { Time.now.utc } 6 | let(:server) { ENV['TARGET_HOST'] } 7 | 8 | before(:all) do 9 | server_agent_run(setup_manifest) 10 | end 11 | 12 | context 'with a basic test' do 13 | it 'Successfully sends a report to splunk' do 14 | before_run = earliest 15 | trigger_puppet_run(server) 16 | after_run = Time.now.utc 17 | report_count = report_count(get_splunk_report(before_run, after_run)) 18 | expect(report_count).to be 1 19 | end 20 | 21 | it 'Successfully sends facts to Splunk' do 22 | before_run = earliest 23 | trigger_puppet_run(server) 24 | after_run = Time.now.utc 25 | report_count = report_count(get_splunk_report(before_run, after_run, 'puppet:facts')) 26 | expect(report_count).to be >= 1 27 | end 28 | 29 | it 'Records events with record_event set to true' 30 | 31 | it 'Successfully sends data to an http endpoint' do 32 | server.run_shell('cat /etc/puppetlabs/code/environments/production/modules/splunk_hec/examples/orchestrator_metrics.json | puppet splunk_hec --sourcetype puppet:summary --saved_report') 33 | end 34 | 35 | it 'Fails when given a bad endpoint' do 36 | server_agent_run(setup_manifest(url: 'notanendpoint/nicetry')) 37 | cmd = 'cat /etc/puppetlabs/code/environments/production/modules/splunk_hec/examples/foo.json | puppet splunk_hec --sourcetype puppet:summary --saved_report' 38 | results = server.run_shell(cmd, expect_failures: true).to_s 39 | expect(results).to match %r{exit_code=1} 40 | end 41 | 42 | it 'Does not run report processor when disabled set to true' do 43 | before_run = earliest 44 | server_agent_run(setup_manifest(disabled: true)) 45 | after_run = Time.now.utc 46 | expect(report_count(get_splunk_report(before_run, after_run))).to be 0 47 | end 48 | end 49 | 50 | context 'with logs' do 51 | it '# Configure splunk_hec::include_logs_status with ["changed"]' 52 | end 53 | 54 | context 'with resource events' do 55 | it '# Configure splunk_hec::include_resources_status with ["changed"]' 56 | end 57 | 58 | context 'with SSL configuration' do 59 | let(:server_log) { '/var/log/puppetlabs/puppetserver/puppetserver.log' } 60 | 61 | it 'Verifies SSL certificate with ssl_ca configured' do 62 | configure_ssl 63 | server_agent_run(setup_manifest(ssl_ca: 'ca.pem')) 64 | expect(log_count('Splunk HEC SSL', server_log)).to be 1 65 | end 66 | 67 | it 'Verifies SSL against the system store w/ include_system_cert_store set to true' do 68 | configure_ssl(cert_store: true) 69 | server_agent_run(setup_manifest(cert_store: true)) 70 | expect(log_count('system store', server_log)).to be 1 71 | end 72 | end 73 | end 74 | -------------------------------------------------------------------------------- /spec/acceptance/events_processor_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper_acceptance' 2 | 3 | describe 'Event Forwarding' do 4 | is_pe = puppet_user == 'pe-puppet' 5 | let(:earliest) { Time.now.utc } 6 | let(:server) { ENV['TARGET_HOST'] } 7 | 8 | context 'With event forwarding enabled', if: is_pe do 9 | before(:all) do 10 | server_agent_run(setup_manifest(with_event_forwarding: true)) 11 | end 12 | 13 | context 'with orchestrator event_types set' do 14 | let(:report) do 15 | before_run = Time.now.utc 16 | server.run_shell("LC_ALL=en_US.UTF-8 puppet task run facts --nodes #{host_name}") 17 | server.run_shell("#{EVENT_FORWARDING_CONFDIR}/collect_api_events.rb") 18 | after_run = Time.now.utc 19 | get_splunk_report(before_run, after_run, 'puppet:jobs') 20 | end 21 | 22 | it 'does not send report on first run' do 23 | server.run_shell('rm /etc/puppetlabs/pe_event_forwarding/pe_event_forwarding_indexes.yaml', expect_failures: true) 24 | count = report_count(report) 25 | expect(count).to be 0 26 | end 27 | 28 | it 'Successfully sends an orchestrator event to splunk' do 29 | # ensure the indexes.yaml file is created 30 | server.run_shell("#{EVENT_FORWARDING_CONFDIR}/collect_api_events.rb") 31 | count = report_count(report) 32 | expect(count).to be 1 33 | end 34 | 35 | it 'Sets event properties correctly' do 36 | data = report[0]['result'] 37 | event = JSON.parse(data['_raw']) 38 | 39 | expect(data['source']).to eql('http:splunk_hec_token') 40 | expect(data['sourcetype']).to eql('puppet:jobs') 41 | expect(event['options']['scope']['nodes']).to eql([host_name]) 42 | expect(event['options']['blah']).to be_nil 43 | expect(event['environment']['name']).to eql('production') 44 | expect(event['options']['transport']).to be_nil 45 | end 46 | end 47 | 48 | context 'with rbac event_types set' do 49 | it 'does not send report on first run' 50 | it 'Successfully sends an RBAC event to splunk' 51 | it 'Sets event properties correctly' 52 | end 53 | 54 | context 'with classifier event_types set' do 55 | it 'does not send report on first run' 56 | it 'Successfully sends a classifier event to splunk' 57 | it 'Sets event properties correctly' 58 | end 59 | 60 | context 'with pe_console event_types set' do 61 | let(:report) do 62 | before_run = Time.now.utc 63 | server.run_shell("LC_ALL=en_US.UTF-8 puppet task run facts --nodes #{host_name}") 64 | server.run_shell("#{EVENT_FORWARDING_CONFDIR}/collect_api_events.rb") 65 | after_run = Time.now.utc 66 | get_splunk_report(before_run, after_run, 'puppet:activities_console') 67 | end 68 | 69 | it 'does not send report on first run' do 70 | server.run_shell('rm /etc/puppetlabs/pe_event_forwarding/pe_event_forwarding_indexes.yaml', expect_failures: true) 71 | count = report_count(report) 72 | expect(count).to be 0 73 | end 74 | 75 | it 'Successfully sends a pe_console event to splunk' do 76 | # ensure the indexes.yaml file is created 77 | server.run_shell("#{EVENT_FORWARDING_CONFDIR}/collect_api_events.rb") 78 | count = report_count(report) 79 | expect(count).to be 1 80 | end 81 | 82 | it 'Sets event properties correctly' do 83 | data = report[0]['result'] 84 | event = JSON.parse(data['_raw']) 85 | 86 | expect(data['source']).to eql('http:splunk_hec_token') 87 | expect(data['sourcetype']).to eql('puppet:activities_console') 88 | expect(event['events'][0]['type']).to eql('run_task') 89 | expect(event['subject']['blah']).to be_nil 90 | expect(event['subject']['name']).to eql('admin') 91 | end 92 | end 93 | 94 | context 'with code_manager event_types set' do 95 | it 'does not send report on first run' 96 | it 'Successfully sends a code manager event to splunk' 97 | it 'Sets event properties correctly' 98 | end 99 | end 100 | end 101 | -------------------------------------------------------------------------------- /spec/classes/init_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | require 'rspec-puppet-utils' 5 | 6 | describe 'splunk_hec' do 7 | let(:pre_condition) do 8 | <<-MANIFEST 9 | # Define the pe-puppetserver service 10 | Service { 'pe-puppetserver': 11 | } 12 | 13 | # Define the puppetserver service 14 | Service { 'puppetserver': 15 | } 16 | 17 | # pe_ini_setting is a PE-only resource. Since PE modules are private, we 18 | # make the resource a defined type for the unit tests. Note that we still 19 | # have to use pe_ini_setting instead of ini_setting for backwards compatibility. 20 | define pe_ini_setting ( 21 | Optional[Any] $ensure = undef, 22 | Optional[Any] $path = undef, 23 | Optional[Any] $section = undef, 24 | Optional[Any] $setting = undef, 25 | Optional[Any] $value = undef, 26 | ) { 27 | ini_setting { $title: 28 | ensure => $ensure, 29 | path => $path, 30 | section => $section, 31 | setting => $setting, 32 | value => $value, 33 | } 34 | } 35 | 36 | # Ditto for pe_ini_subsetting 37 | define pe_ini_subsetting ( 38 | Optional[Any] $ensure = undef, 39 | Optional[Any] $path = undef, 40 | Optional[Any] $section = undef, 41 | Optional[Any] $setting = undef, 42 | Optional[Any] $subsetting = undef, 43 | Optional[Any] $subsetting_separator = undef, 44 | ) { 45 | ini_subsetting { $title: 46 | ensure => $ensure, 47 | path => $path, 48 | section => $section, 49 | subsetting => $subsetting, 50 | subsetting_separator => $subsetting_separator, 51 | } 52 | } 53 | 54 | class pe_event_forwarding ( 55 | Optional[String] $confdir = "/tmp", 56 | ) {} 57 | 58 | class {pe_event_forwarding:} 59 | MANIFEST 60 | end 61 | 62 | let(:params) do 63 | { 64 | 'url' => 'foo_url', 65 | 'token' => 'foo_token', 66 | } 67 | end 68 | 69 | let(:confdir) { '/tmp' } 70 | let(:event_forwarding_base) { "#{confdir}/pe_event_forwarding/processors.d" } 71 | let(:facts) do 72 | { 73 | splunk_hec_agent_only_node: false 74 | } 75 | end 76 | 77 | context 'on a server node' do 78 | let(:facts) do 79 | { 80 | splunk_hec_is_pe: true 81 | } 82 | end 83 | 84 | context 'enable_reports is false' do 85 | let(:params) do 86 | p = super() 87 | p['enable_reports'] = false 88 | p 89 | end 90 | 91 | it { is_expected.to have_pe_ini_setting_resource_count(0) } 92 | it { is_expected.to have_pe_ini_subsetting_resource_count(0) } 93 | it { is_expected.not_to contain_file("#{event_forwarding_base}/splunk_hec") } 94 | it { is_expected.not_to contain_file("#{event_forwarding_base}/splunk_hec/util_splunk_hec.rb") } 95 | it { is_expected.not_to contain_file("#{event_forwarding_base}/splunk_hec.rb") } 96 | end 97 | 98 | context 'enable_reports is true' do 99 | let(:params) do 100 | p = super() 101 | p['enable_reports'] = true 102 | p 103 | end 104 | 105 | context "sets 'reports' setting to 'splunk_hec' (default behavior)" do 106 | it { is_expected.to contain_pe_ini_subsetting('enable splunk_hec').with_subsetting('splunk_hec') } 107 | it { is_expected.to have_pe_ini_setting_resource_count(0) } 108 | it { is_expected.to have_pe_ini_subsetting_resource_count(1) } 109 | end 110 | end 111 | 112 | context 'disabled is set to true' do 113 | let(:params) do 114 | p = super() 115 | p['disabled'] = true 116 | p['enable_reports'] = true 117 | p['manage_routes'] = true 118 | p 119 | end 120 | 121 | it { is_expected.to contain_pe_ini_subsetting('enable splunk_hec').with_setting('reports').with_ensure('absent') } 122 | it { is_expected.to contain_pe_ini_setting('enable splunk_hec_routes.yaml').with_setting('route_file').with_ensure('absent') } 123 | it { is_expected.to have_pe_ini_subsetting_resource_count(1) } 124 | it { is_expected.to have_pe_ini_setting_resource_count(1) } 125 | end 126 | 127 | context 'events_reporting_enabled' do 128 | let(:params) do 129 | p = super() 130 | p['events_reporting_enabled'] = true 131 | p 132 | end 133 | 134 | it { 135 | is_expected.to contain_file("#{event_forwarding_base}/splunk_hec") 136 | .with(ensure: 'directory') 137 | } 138 | 139 | it { 140 | is_expected.to contain_file("#{event_forwarding_base}/splunk_hec/util_splunk_hec.rb") 141 | .with( 142 | ensure: 'file', 143 | mode: '0755', 144 | ) 145 | } 146 | 147 | it { 148 | is_expected.to contain_file("#{event_forwarding_base}/splunk_hec.rb") 149 | .with( 150 | ensure: 'file', 151 | mode: '0755', 152 | ) 153 | } 154 | end 155 | end 156 | 157 | context 'on an agent node' do 158 | # enable_reports should always be false on an agent node. 159 | let(:params) do 160 | p = super() 161 | p['enable_reports'] = false 162 | p 163 | end 164 | let(:facts) do 165 | { 166 | splunk_hec_agent_only_node: true 167 | } 168 | end 169 | 170 | context 'events_reporting not enabled' do 171 | it { is_expected.to have_pe_ini_setting_resource_count(0) } 172 | it { is_expected.to have_pe_ini_subsetting_resource_count(0) } 173 | it { is_expected.not_to contain_file("#{event_forwarding_base}/splunk_hec") } 174 | it { is_expected.not_to contain_file("#{event_forwarding_base}/splunk_hec/util_splunk_hec.rb") } 175 | it { is_expected.not_to contain_file("#{event_forwarding_base}/splunk_hec.rb") } 176 | end 177 | 178 | context 'events_reporting enabled' do 179 | let(:params) do 180 | p = super() 181 | p['events_reporting_enabled'] = true 182 | p 183 | end 184 | 185 | it { is_expected.to have_pe_ini_setting_resource_count(0) } 186 | it { is_expected.to have_pe_ini_subsetting_resource_count(0) } 187 | it { 188 | is_expected.to contain_file("#{confdir}/splunk_hec/settings.yaml") 189 | .with( 190 | owner: 'root', 191 | group: 'root', 192 | ) 193 | } 194 | it { 195 | is_expected.to contain_file("#{confdir}/splunk_hec/hec_secrets.yaml") 196 | .with( 197 | owner: 'root', 198 | group: 'root', 199 | ) 200 | } 201 | it { is_expected.to contain_file("#{event_forwarding_base}/splunk_hec") } 202 | it { is_expected.to contain_file("#{event_forwarding_base}/splunk_hec/util_splunk_hec.rb") } 203 | it { is_expected.to contain_file("#{event_forwarding_base}/splunk_hec.rb") } 204 | end 205 | end 206 | end 207 | -------------------------------------------------------------------------------- /spec/default_facts.yml: -------------------------------------------------------------------------------- 1 | # Use default_module_facts.yml for module specific facts. 2 | # 3 | # Facts specified here will override the values provided by rspec-puppet-facts. 4 | --- 5 | networking: 6 | ip: "172.16.254.254" 7 | ip6: "FE80:0000:0000:0000:AAAA:AAAA:AAAA" 8 | mac: "AA:AA:AA:AA:AA:AA" 9 | is_pe: false 10 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | RSpec.configure do |c| 4 | c.mock_with :rspec 5 | end 6 | 7 | require 'puppetlabs_spec_helper/module_spec_helper' 8 | require 'rspec-puppet-facts' 9 | 10 | require 'spec_helper_local' if File.file?(File.join(File.dirname(__FILE__), 'spec_helper_local.rb')) 11 | 12 | include RspecPuppetFacts 13 | 14 | default_facts = { 15 | puppetversion: Puppet.version, 16 | facterversion: Facter.version, 17 | } 18 | 19 | default_fact_files = [ 20 | File.expand_path(File.join(File.dirname(__FILE__), 'default_facts.yml')), 21 | File.expand_path(File.join(File.dirname(__FILE__), 'default_module_facts.yml')), 22 | ] 23 | 24 | default_fact_files.each do |f| 25 | next unless File.exist?(f) && File.readable?(f) && File.size?(f) 26 | 27 | begin 28 | require 'deep_merge' 29 | default_facts.deep_merge!(YAML.safe_load(File.read(f), permitted_classes: [], permitted_symbols: [], aliases: true)) 30 | rescue StandardError => e 31 | RSpec.configuration.reporter.message "WARNING: Unable to load #{f}: #{e}" 32 | end 33 | end 34 | 35 | # read default_facts and merge them over what is provided by facterdb 36 | default_facts.each do |fact, value| 37 | add_custom_fact fact, value, merge_facts: true 38 | end 39 | 40 | RSpec.configure do |c| 41 | c.default_facts = default_facts 42 | c.before :each do 43 | # set to strictest setting for testing 44 | # by default Puppet runs at warning level 45 | Puppet.settings[:strict] = :warning 46 | Puppet.settings[:strict_variables] = true 47 | end 48 | c.filter_run_excluding(bolt: true) unless ENV['GEM_BOLT'] 49 | c.after(:suite) do 50 | RSpec::Puppet::Coverage.report!(0) 51 | end 52 | 53 | # Filter backtrace noise 54 | backtrace_exclusion_patterns = [ 55 | %r{spec_helper}, 56 | %r{gems}, 57 | ] 58 | 59 | if c.respond_to?(:backtrace_exclusion_patterns) 60 | c.backtrace_exclusion_patterns = backtrace_exclusion_patterns 61 | elsif c.respond_to?(:backtrace_clean_patterns) 62 | c.backtrace_clean_patterns = backtrace_exclusion_patterns 63 | end 64 | end 65 | 66 | # Ensures that a module is defined 67 | # @param module_name Name of the module 68 | def ensure_module_defined(module_name) 69 | module_name.split('::').reduce(Object) do |last_module, next_module| 70 | last_module.const_set(next_module, Module.new) unless last_module.const_defined?(next_module, false) 71 | last_module.const_get(next_module, false) 72 | end 73 | end 74 | 75 | # 'spec_overrides' from sync.yml will appear below this line 76 | # 77 | -------------------------------------------------------------------------------- /spec/spec_helper_acceptance.rb: -------------------------------------------------------------------------------- 1 | 2 | # frozen_string_literal: true 3 | 4 | require 'spec_helper_acceptance_local' if File.file?(File.join(File.dirname(__FILE__), 'spec_helper_acceptance_local.rb')) 5 | -------------------------------------------------------------------------------- /spec/spec_helper_acceptance_local.rb: -------------------------------------------------------------------------------- 1 | require 'serverspec' 2 | require 'puppet_litmus' 3 | require 'support/acceptance/helpers.rb' 4 | 5 | include PuppetLitmus 6 | PuppetLitmus.configure! 7 | 8 | EVENT_FORWARDING_CONFDIR = '/etc/puppetlabs/pe_event_forwarding'.freeze 9 | DIR_TEST_COMMAND = '[[ -d /etc/puppetlabs/code/environments/production/modules/pe_event_forwarding ]] '\ 10 | '&& rm /etc/puppetlabs/code/environments/production/modules/pe_event_forwarding -rf'.freeze 11 | EVENT_FORWARDING_LOCAL_PATH = './spec/fixtures/modules/pe_event_forwarding'.freeze 12 | EVENT_FORWARDING_REMOTE_PATH = '/etc/puppetlabs/code/environments/production/modules'.freeze 13 | 14 | TARGET_SERVER = ENV['TARGET_HOST'] 15 | 16 | RSpec.configure do |config| 17 | include TargetHelpers 18 | 19 | config.before(:suite) do 20 | # Stop the puppet service on the puppetserver to avoid edge-case conflicting 21 | # Puppet runs (one triggered by service vs one we trigger) 22 | shell_command = 'puppet resource service puppet ensure=stopped; '\ 23 | 'puppet module install puppetlabs-inifile --version 5.1.0' 24 | TARGET_SERVER.run_shell(shell_command) 25 | TARGET_SERVER.run_shell(DIR_TEST_COMMAND, expect_failures: true) 26 | TARGET_SERVER.bolt_upload_file(EVENT_FORWARDING_LOCAL_PATH, EVENT_FORWARDING_REMOTE_PATH) 27 | end 28 | end 29 | 30 | # TODO: This will cause some problems if we run the tests 31 | # in parallel. For example, what happens if two targets 32 | # try to modify site.pp at the same time? 33 | def set_sitepp_content(manifest) 34 | content = <<-HERE 35 | node default { 36 | #{manifest} 37 | } 38 | HERE 39 | 40 | TARGET_SERVER.write_file(content, '/etc/puppetlabs/code/environments/production/manifests/site.pp') 41 | TARGET_SERVER.run_shell("chown #{puppet_user}:#{puppet_user} /etc/puppetlabs/code/environments/production/manifests/site.pp") 42 | end 43 | 44 | def trigger_puppet_run(target, acceptable_exit_codes: [0, 2]) 45 | result = target.run_shell('puppet agent -t --detailed-exitcodes', expect_failures: true) 46 | unless acceptable_exit_codes.include?(result[:exit_code]) 47 | raise "Puppet run failed\nstdout: #{result[:stdout]}\nstderr: #{result[:stderr]}" 48 | end 49 | result 50 | end 51 | 52 | def declare(type, title, params = {}) 53 | params = params.map do |name, value| 54 | value = "'#{value}'" if value.is_a?(String) 55 | " #{name} => #{value}," 56 | end 57 | 58 | <<-HERE 59 | #{type} { '#{title}': 60 | #{params.join("\n")} 61 | } 62 | HERE 63 | end 64 | 65 | def to_manifest(*declarations) 66 | declarations.join("\n") 67 | end 68 | 69 | def host_name 70 | @puppetserver_hostname ||= TARGET_SERVER.run_shell('facter fqdn').stdout.chomp 71 | end 72 | 73 | def report_dir 74 | cmd = "reportdir=`puppet config print reportdir --section server` \n"\ 75 | "hostname=`facter fqdn` \n"\ 76 | 'echo \"$reportdir/$hostname\"' 77 | @report_dir ||= TARGET_SERVER.run_shell(cmd).stdout.chomp 78 | end 79 | 80 | def setup_manifest(disabled: false, cert_store: false, ssl_ca: nil, url: nil, with_event_forwarding: false) 81 | if url.nil? 82 | # This block is for checking whether we are testing locally or on the CloudCI 83 | # splunk_node.uri will work locally, but bc of the discrepancy of uri and hostname 84 | # on the CloudCI, and we use localhost 85 | begin 86 | splunk_runner = splunk_node.uri 87 | rescue 88 | splunk_runner = 'localhost' 89 | end 90 | url = "https://#{splunk_runner}:8088/services/collector/event" 91 | end 92 | 93 | manifest = '' 94 | params = { 95 | url: url, 96 | token: 'abcd1234', 97 | enable_reports: true, 98 | manage_routes: true, 99 | facts_terminus: 'yaml', 100 | record_event: true, 101 | pe_console: 'localhost', 102 | disabled: disabled, 103 | include_system_cert_store: cert_store, 104 | } 105 | 106 | params[:ssl_ca] = ssl_ca unless ssl_ca.nil? 107 | 108 | if with_event_forwarding 109 | manifest << add_event_forwarding 110 | params[:events_reporting_enabled] = true 111 | params[:orchestrator_data_filter] = ['options.scope.nodes', 'options.scope.blah', 'environment.name'] 112 | params[:pe_console_data_filter] = ['subject.name', 'subject.blah', 'events'] 113 | end 114 | 115 | manifest << declare(:class, :splunk_hec, params) 116 | manifest << add_service_resource unless puppet_user == 'pe-puppet' 117 | manifest 118 | end 119 | 120 | def add_service_resource 121 | params = { 122 | ensure: :running, 123 | hasrestart: true, 124 | restart: 'puppetserver reload' 125 | } 126 | declare(:service, :puppetserver, params) 127 | end 128 | 129 | def add_event_forwarding 130 | token = TARGET_SERVER.run_shell('puppet access show').stdout.chomp 131 | params = { 132 | pe_token: token, 133 | disabled: true 134 | } 135 | declare(:class, :pe_event_forwarding, params) 136 | end 137 | 138 | def configure_ssl(cert_store: false) 139 | inventory_hash = LitmusHelpers.inventory_hash_from_inventory_file 140 | image = LitmusHelpers.facts_from_node(inventory_hash, TARGET_SERVER) 141 | cmd = if cert_store 142 | if image['platform'].include?('ubuntu') 143 | 'cp $(puppet config print localcacert) /usr/local/share/ca-certificates/splunk_hec.crt && update-ca-certificates' 144 | else 145 | 'cp $(puppet config print localcacert) /etc/pki/ca-trust/source/anchors/splunk_hec.pem && update-ca-trust' 146 | end 147 | else 148 | 'cp $(puppet config print localcacert) /etc/puppetlabs/puppet/splunk_hec/' 149 | end 150 | TARGET_SERVER.run_shell(cmd) 151 | end 152 | 153 | def puppet_user 154 | @service_name ||= query_puppet_user 155 | end 156 | 157 | def query_puppet_user 158 | service_name = '' 159 | TARGET_SERVER.run_shell('[ -f /opt/puppetlabs/server/pe_version ]', expect_failures: true) do |result| 160 | service_name = (result.exit_code == 0) ? 'pe-puppet' : 'puppet' 161 | end 162 | service_name 163 | end 164 | 165 | def get_splunk_report(earliest, latest, sourcetype = 'puppet:summary') 166 | start_time = earliest.strftime('%m/%d/%Y:%H:%M:%S') 167 | end_time = (latest + 2).strftime('%m/%d/%Y:%H:%M:%S') 168 | query_command = 'curl -u admin:piepiepie -k '\ 169 | 'https://localhost:8089/services/search/v2/jobs/export -d output_mode=json '\ 170 | "-d search='search sourcetype=\"#{sourcetype}\" AND earliest=\"#{start_time}\" AND latest=\"#{end_time}\"'" 171 | sleep 1 172 | begin 173 | splunk_runner = splunk_node 174 | rescue 175 | splunk_runner = TARGET_SERVER 176 | end 177 | response = splunk_runner.run_shell(query_command).stdout 178 | JSON.parse("[#{response.split.join(',')}]") 179 | end 180 | 181 | def report_count(report) 182 | report[0]['result'].nil? ? 0 : report.count 183 | end 184 | 185 | def log_count(message, log) 186 | cmd = "grep '#{message}' #{log} -c" 187 | TARGET_SERVER.run_shell(cmd, expect_failures: true).stdout.chomp.to_i 188 | end 189 | 190 | def server_agent_run(manifest) 191 | set_sitepp_content(manifest) 192 | trigger_puppet_run(TARGET_SERVER) 193 | end 194 | 195 | def console_host_fqdn 196 | @console_host_fqdn ||= TARGET_SERVER.run_shell('hostname -A').stdout.strip 197 | end 198 | -------------------------------------------------------------------------------- /spec/support/acceptance/helpers.rb: -------------------------------------------------------------------------------- 1 | require 'puppet_litmus' 2 | PuppetLitmus.configure! 3 | 4 | # The Target class and TargetHelpers module are a useful ways 5 | # for tests to reuse Litmus' helpers when they want to do stuff 6 | # on nodes that may not be the current target host (like e.g. 7 | # the master or the ServiceNow instance). 8 | # 9 | # NOTE: The code here is Litmus' recommended approach for multi-node 10 | # testing (see https://github.com/puppetlabs/puppet_litmus/issues/72). 11 | # We should revisit it once Litmus has a standardized pattern for 12 | # multi-node testing. 13 | 14 | class Target 15 | include PuppetLitmus 16 | 17 | attr_reader :uri 18 | 19 | def initialize(uri) 20 | @uri = uri 21 | end 22 | 23 | def bolt_config 24 | inventory_hash = LitmusHelpers.inventory_hash_from_inventory_file 25 | LitmusHelpers.config_from_node(inventory_hash, @uri) 26 | end 27 | 28 | # Make sure that ENV['TARGET_HOST'] is set to uri 29 | # before each PuppetLitmus method call. This makes it 30 | # so if we have an array of targets, say 'agents', then 31 | # code like agents.each { |agent| agent.bolt_upload_file(...) } 32 | # will work as expected. Otherwise if we do this in, say, the 33 | # constructor, then the code will only work for the agent that 34 | # most recently set the TARGET_HOST variable. 35 | PuppetLitmus.instance_methods.each do |name| 36 | m = PuppetLitmus.instance_method(name) 37 | define_method(name) do |*args, **kwargs, &block| 38 | ENV['TARGET_HOST'] = uri 39 | m.bind(self).call(*args, **kwargs, &block) 40 | end 41 | end 42 | end 43 | 44 | class TargetNotFoundError < StandardError; end 45 | 46 | module TargetHelpers 47 | def puppetserver 48 | target('puppetserver', 'acceptance:provision_vms', 'server') 49 | end 50 | module_function :puppetserver 51 | 52 | def splunk_instance 53 | target('Splunk instance', 'acceptance:setup_splunk_targets', 'splunk_instance') 54 | end 55 | module_function :splunk_instance 56 | 57 | def splunk_node 58 | target('Splunk Node', 'acceptance:setup_splunk_instance', 'splunk_node') 59 | end 60 | module_function :splunk_node 61 | 62 | def target(name, setup_task, role) 63 | @targets ||= {} 64 | 65 | unless @targets[name] 66 | # Find the targets 67 | inventory_hash = LitmusHelpers.inventory_hash_from_inventory_file 68 | targets = LitmusHelpers.nodes_with_role(role, inventory_hash) 69 | unless targets 70 | raise TargetNotFoundError, "none of the targets in 'inventory.yaml' have the '#{role}' role set. Did you forget to run 'rake #{setup_task}'?" 71 | end 72 | @targets[name] = [] 73 | targets.each_with_index do |target, i| 74 | @targets[name][i] = Target.new(target) 75 | end 76 | end 77 | 78 | @targets[name] 79 | end 80 | module_function :target 81 | end 82 | 83 | module LitmusHelpers 84 | extend PuppetLitmus 85 | end 86 | -------------------------------------------------------------------------------- /spec/support/acceptance/install_pe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | version=`puppet --version` 4 | 5 | if [ -z "$version" ]; then 6 | PE_RELEASE=2019.8.7 7 | PE_LATEST=$(curl https://artifactory.delivery.puppetlabs.net/artifactory/generic_enterprise__local/archives/releases/${PE_RELEASE}/LATEST) 8 | PE_FILE_NAME=puppet-enterprise-${PE_LATEST}-ubuntu-18.04-amd64 9 | TAR_FILE=${PE_FILE_NAME}.tar 10 | DOWNLOAD_URL=https://artifactory.delivery.puppetlabs.net/artifactory/generic_enterprise__local/archives/releases/${PE_RELEASE}/${TAR_FILE} 11 | 12 | ## Download PE 13 | curl -o ${TAR_FILE} ${DOWNLOAD_URL} 14 | if [[ $? -ne 0 ]];then 15 | echo “Error: wget failed to download [${DOWNLOAD_URL}]” 16 | exit 2 17 | fi 18 | 19 | ## Install PE 20 | tar xvf ${TAR_FILE} 21 | if [[ $? -ne 0 ]];then 22 | echo “Error: Failed to untar [${TAR_FILE}]” 23 | exit 2 24 | fi 25 | 26 | cd ${PE_FILE_NAME} 27 | printf 'y' | ./puppet-enterprise-installer 28 | if [[ $? -ne 0 ]];then 29 | echo “Error: Failed to install Puppet Enterprise. Please check the logs and call Bryan.x ” 30 | exit 2 31 | fi 32 | 33 | ## Finalize configuration 34 | echo “Finalize PE install” 35 | puppet agent -t 36 | 37 | puppet infra console_password --password=pie 38 | 39 | ## Create and configure Certs 40 | echo "autosign = true" >> /etc/puppetlabs/puppet/puppet.conf 41 | 42 | ## Setup the RBAC token 43 | echo 'pie' | puppet access login --lifetime 1y --username admin 44 | fi 45 | 46 | version=`puppet --version` 47 | 48 | if [ -z "$version" ]; then 49 | echo 'puppet instaall failed' 50 | exit 1 51 | fi 52 | -------------------------------------------------------------------------------- /spec/support/acceptance/splunk/defaults/default.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This file is used by the Splunk container to configure settings that would 3 | # typically be set in the web interface. 4 | # https://splunk.github.io/docker-splunk/ADVANCED.html 5 | retry_num: 100 6 | splunk: 7 | opt: /opt 8 | home: /opt/splunk 9 | user: splunk 10 | group: splunk 11 | exec: /opt/splunk/bin/splunk 12 | pid: /opt/splunk/var/run/splunk/splunkd.pid 13 | password: "{{ splunk_password | default() }}" 14 | svc_port: 8089 15 | s2s_port: 9997 16 | http_port: 8000 17 | hec: 18 | enable: true 19 | ssl: true 20 | port: 8088 21 | token: abcd1234 22 | conf: 23 | - key: inputs 24 | value: 25 | directory: /opt/splunk/etc/apps/splunk_httpinput/local 26 | content: 27 | http: 28 | serverCert: 29 | smartstore: null 30 | -------------------------------------------------------------------------------- /spec/support/acceptance/splunk/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.0" 2 | 3 | services: 4 | enterprise: 5 | image: splunk/splunk 6 | hostname: splunk_instance 7 | environment: 8 | - SPLUNK_START_ARGS=--accept-license 9 | # The splunkbase credentials are to download and install the Puppet 10 | # Report Viewer from Splunkbase. 11 | # We could alternatively download the packaged app from a location 12 | # like github for testing. 13 | - SPLUNK_APPS_URL=https://github.com/puppetlabs/TA-puppet-report-viewer/tarball/main 14 | - SPLUNK_PASSWORD=piepiepie 15 | volumes: 16 | # default.yml is a mechanism to load splunk settings that would normally 17 | # be configured through the ui. 18 | - ./defaults:/tmp/defaults 19 | ports: 20 | # localhost:8000 will bring up the web interface 21 | - "0.0.0.0:8000:8000" 22 | # 8088 is the hec endpoint 23 | - "0.0.0.0:8088:8088" 24 | - "0.0.0.0:8089:8089" 25 | -------------------------------------------------------------------------------- /spec/support/acceptance/splunk_hec.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | "url" : "http://localhost:8088/services/collector/event" 3 | "token" : "abcd1234" 4 | "enable_reports" : "true" 5 | "manage_routes" : "true" 6 | -------------------------------------------------------------------------------- /spec/support/acceptance/start_splunk_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function cleanup() { 3 | # bolt_upload_file isn't idempotent, so remove this directory 4 | # to ensure that later invocations of the setup_servicenow_instance 5 | # task _are_ idempotent 6 | rm -rf /tmp/splunk 7 | } 8 | trap cleanup EXIT 9 | 10 | function start_splunk() { 11 | id=`docker ps -q -f name=splunk-enterprise-1 -f status=running` 12 | 13 | if [ ! -z "$id" ] 14 | then 15 | echo "Killing the current Splunk container (id = ${id}) ..." 16 | docker rm --force ${id} 17 | fi 18 | 19 | docker compose -f /tmp/splunk/docker-compose.yml up -d --remove-orphans 20 | 21 | id=`docker ps -q -f name=splunk-enterprise-1 -f status=running` 22 | 23 | if [ -z "$id" ] 24 | then 25 | echo 'Splunk container start failed.' 26 | exit 1 27 | fi 28 | echo 'Splunk container starting...' 29 | } 30 | 31 | function yum_install_docker() { 32 | yum install -y yum-utils 33 | yum-config-manager \ 34 | --add-repo \ 35 | https://download.docker.com/linux/centos/docker-ce.repo 36 | yum install docker-ce docker-ce-cli containerd.io docker-compose-plugin -y 37 | systemctl start docker 38 | } 39 | 40 | function compose_starting() { 41 | docker ps -f name=splunk-enterprise-1 | grep starting 42 | } 43 | 44 | function wait_for_compose() { 45 | r=0 46 | while [ ! -z "$(compose_starting)" ] && [ $r -lt 10 ] 47 | do 48 | sleep 30 49 | ((r++)) 50 | done 51 | } 52 | 53 | function setup_hec_ssl() { 54 | echo "Setting up HEC SSL..." 55 | certs=$(puppet config print certdir) 56 | keys="$(puppet config print privatekeydir)" 57 | s_cert='/tmp/splunk/puppet_hec.pem' 58 | s_apps='/opt/splunk/etc/apps' 59 | s_auth='/opt/splunk/etc/auth' 60 | /opt/puppetlabs/bin/puppetserver ca generate --certname localhost &>2 61 | cat "$certs/localhost.pem" "$keys/localhost.pem" "$certs/ca.pem" > $s_cert 62 | docker cp $s_cert splunk-enterprise-1:$s_auth 63 | docker exec -u root splunk-enterprise-1 sed -i "/Cert/c\serverCert = $s_auth/puppet_hec.pem" $s_apps/splunk_httpinput/local/inputs.conf 64 | } 65 | 66 | function splunk_set_minfreemb() { 67 | echo "Setting Splunk custom configs..." 68 | # This is a workaround for issues on Ubuntu where searches fail due to hitting default minfreemb of 5GB. 69 | docker exec -u root splunk-enterprise-1 /opt/splunk/bin/splunk set minfreemb 500 -auth admin:piepiepie &>2 70 | # We have to restart Splunk for the changes to get picked up. 71 | docker exec -u root splunk-enterprise-1 /opt/splunk/bin/splunk restart 72 | } 73 | 74 | YUM=$(cat /etc/*-release | grep 'CentOS\|rhel') 75 | 76 | nodocker=$(which docker 2>&1 | grep "no docker") 77 | status=$? 78 | 79 | if [ ! -z "$nodocker" ] 80 | then 81 | if [ ! -z "$YUM" ]; then 82 | yum_install_docker 83 | fi 84 | else 85 | # Add Docker repo for Ubuntu 86 | mkdir -m 0755 -p /etc/apt/keyrings 87 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg 88 | echo \ 89 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 90 | $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null 91 | # Install Docker and Docker Compose 92 | apt-get -qq update -y 1>&- 2>&- 93 | apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y 1>&- 2>&- 94 | fi 95 | 96 | printenv 97 | docker system info 98 | start_splunk 99 | wait_for_compose 100 | setup_hec_ssl 101 | splunk_set_minfreemb 102 | exit 0 103 | -------------------------------------------------------------------------------- /spec/support/unit/reports/splunk_hec_spec_helpers.rb: -------------------------------------------------------------------------------- 1 | require 'json' 2 | require 'spec_helper' 3 | require 'puppet/reports' 4 | 5 | def new_processor 6 | processor = Puppet::Transaction::Report.new('apply') 7 | processor.extend(Puppet::Reports.report(:splunk_hec)) 8 | 9 | allow(processor).to receive(:host).and_return 'host' 10 | allow(processor).to receive(:environment).and_return 'production' 11 | allow(processor).to receive(:job_id).and_return '1' 12 | allow(processor).to receive(:time).and_return(run_start_time) 13 | allow(processor).to receive(:metrics).and_return(metrics_hash) 14 | # The report processor logs all exceptions to Puppet.err. Thus, we mock it out 15 | # so that we can see them (and avoid false-positives). 16 | allow(Puppet).to receive(:err) do |msg| 17 | raise msg 18 | end 19 | processor 20 | end 21 | 22 | def metrics_hash 23 | { 24 | 'time' => { 'total' => 5 }, 25 | 'resources' => { 'total' => 0 }, 26 | 'changes' => { 'total' => 0 }, 27 | } 28 | end 29 | 30 | def run_total_time 31 | (run_start_time + metrics_hash['time']['total']).iso8601(3) 32 | end 33 | 34 | def epoch_time 35 | '%10.3f' % Time.parse(run_total_time).to_f 36 | end 37 | 38 | def default_credentials 39 | { 40 | user: 'test_user', 41 | password: 'test_password', 42 | } 43 | end 44 | 45 | def default_settings_hash 46 | { 47 | 'url' => 'splunk_testing.com', 48 | 'token' => 'test_token', 49 | 'collect_facts' => ['dmi', 'disks', 'partitions', 'processors', 'networking'], 50 | 'enable_reports' => true, 51 | 'record_event' => true, 52 | 'disabled' => false, 53 | 'managed_routes' => true, 54 | 'facts_terminus' => 'puppetdb', 55 | 'facts_cache_terminus' => 'splunk_hec', 56 | } 57 | end 58 | 59 | def mock_settings_file(settings_hash) 60 | allow(YAML).to receive(:load_file).with(%r{(.*)(settings|hec_secrets).yaml}).and_return(settings_hash) 61 | end 62 | 63 | def new_mock_response(status, body) 64 | response = instance_double('mock HTTP response') 65 | allow(response).to receive(:code).and_return(status.to_s) 66 | allow(response).to receive(:body).and_return(body) 67 | response 68 | end 69 | 70 | def new_mock_event(event_fields = {}) 71 | event_fields[:property] = 'message' 72 | event_fields[:message] = 'defined \'message\' as \'hello\'' 73 | Puppet::Transaction::Event.new(property: event_fields[:property], message: event_fields[:message], status: event_fields[:event_status], corrective_change: event_fields[:event_corrective_change]) 74 | end 75 | 76 | def new_mock_resource_status(events, status_changed, status_failed) 77 | status = instance_double('resource status') 78 | allow(status).to receive(:events).and_return(events) 79 | allow(status).to receive(:out_of_sync).and_return(status_changed) 80 | allow(status).to receive(:failed).and_return(status_failed) 81 | allow(status).to receive(:containment_path).and_return(['foo', 'bar']) 82 | allow(status).to receive(:file).and_return('site.pp') 83 | allow(status).to receive(:line).and_return(1) 84 | allow(status).to receive(:resource).and_return('resource') 85 | allow(status).to receive(:resource_type).and_return('resource_type') 86 | allow(status).to receive(:corrective_change).and_return(true) 87 | allow(status).to receive(:intentional_change).and_return(false) 88 | status 89 | end 90 | 91 | def mock_events(processor, *events) 92 | allow(processor).to receive(:resource_statuses).and_return('mock_resource' => new_mock_resource_status(events, true, false)) 93 | end 94 | 95 | def mock_event_as_resource_status(processor, event_status, event_corrective_change, status_changed = true, status_failed = false) 96 | mock_events = [new_mock_event(status: event_status, corrective_change: event_corrective_change)] 97 | mock_resource_status = new_mock_resource_status(mock_events, status_changed, status_failed) 98 | allow(processor).to receive(:resource_statuses).and_return('mock_resource' => mock_resource_status) 99 | end 100 | 101 | def expect_sent_event(_expected_credentials = {}) 102 | # will only be called to send an event 103 | expect(processor).to receive(:submit_request) do |request_body| 104 | yield request_body 105 | new_mock_response(200, '') 106 | end 107 | end 108 | 109 | def expect_requested_client(client) 110 | case client 111 | when :non_fips 112 | expect(processor).to receive(:send_with_nonfips).and_return(new_mock_response(200, '')) 113 | expect(processor).not_to receive(:send_with_fips) 114 | when :fips 115 | expect(processor).to receive(:send_with_fips).and_return(new_mock_response(200, '')) 116 | expect(processor).not_to receive(:send_with_nonfips) 117 | end 118 | end 119 | 120 | def default_facts 121 | { 122 | 'host' => 'foo.splunk.c.internal', 123 | 'time' => 'epoch', 124 | 'sourcetype' => 'puppet:summary', 125 | 'event' => { 126 | 'cached_catalog_status' => 'not_used', 127 | 'catalog_uuid' => '12345asdf', 128 | 'certname' => 'foo.splunk.internal', 129 | 'code_id' => 'null', 130 | 'configuration_version' => '123456789', 131 | 'corrective_change' => 'false', 132 | 'environment' => 'production', 133 | 'job_id' => 'null', 134 | 'metrics' => metrics_hash, 135 | 'noop' => 'false', 136 | 'noop_pending' => 'false', 137 | 'pe_console' => 'https://localhost', 138 | 'producer' => 'foo.splunk-1234.c.internal', 139 | 'puppet_version' => '6.22.1', 140 | 'report_format' => '11', 141 | 'status' => 'changed', 142 | 'time' => '2021-06-07T20:10:42.696Z', 143 | 'transaction_uuid' => 'a1s2d3f4g56', 144 | }, 145 | } 146 | end 147 | -------------------------------------------------------------------------------- /spec/unit/reports/splunk_hec_spec.rb: -------------------------------------------------------------------------------- 1 | require 'support/unit/reports/splunk_hec_spec_helpers' 2 | 3 | describe 'Splunk_hec report processor: miscellaneous tests' do 4 | let(:processor) { new_processor } 5 | let(:settings_hash) { default_settings_hash } 6 | let(:expected_credentials) { default_credentials } 7 | let(:facts) { default_facts } 8 | let(:run_start_time) { Time.now } 9 | 10 | before(:each) do 11 | mock_settings_file(settings_hash) 12 | allow(processor).to receive(:facts).and_return(facts) 13 | end 14 | 15 | context 'metrics' do 16 | it 'sends the correct timestamp' do 17 | expect_sent_event do |event| 18 | expect(event['time']).to eql(epoch_time) 19 | expect(event['event']['time']).to eql(run_total_time) 20 | end 21 | processor.process 22 | end 23 | end 24 | 25 | context 'testing splunk_hec disabling feature' do 26 | before(:each) do 27 | allow(processor).to receive(:status).and_return('changed') 28 | mock_event_as_resource_status(processor, 'success', false) 29 | end 30 | 31 | context 'when disabled is set to true' do 32 | let(:settings_hash) { super().merge('disabled' => true) } 33 | 34 | it 'does not run report processor' do 35 | expect(processor).not_to receive(:submit_request) 36 | processor.process 37 | end 38 | end 39 | 40 | context 'when disabled is set to false' do 41 | let(:settings_hash) { super().merge('disabled' => false) } 42 | 43 | it 'does run report processor' do 44 | expect_sent_event(expected_credentials) do |actual_event| 45 | expect(actual_event['event']['status']).to eql('changed') 46 | end 47 | processor.process 48 | end 49 | end 50 | end 51 | 52 | context 'testing splunk_hec only_changes features' do 53 | before(:each) do 54 | allow(processor).to receive(:status).and_return('unchanged') 55 | mock_event_as_resource_status(processor, 'success', false) 56 | end 57 | 58 | context 'when only_changes is set to true' do 59 | let(:settings_hash) { super().merge('only_changes' => true) } 60 | 61 | it 'does not run report processor' do 62 | expect(processor).not_to receive(:submit_request) 63 | processor.process 64 | end 65 | end 66 | 67 | context 'when only_changes is set to false' do 68 | let(:settings_hash) { super().merge('only_changes' => false) } 69 | 70 | it 'does run report processor' do 71 | expect_sent_event(expected_credentials) do |actual_event| 72 | expect(actual_event['event']['status']).to eql('unchanged') 73 | end 74 | processor.process 75 | end 76 | end 77 | end 78 | 79 | context 'when fips is enabled' do 80 | let(:settings_hash) { super().merge('fips_enabled' => true) } 81 | 82 | it 'the correct function get called' do 83 | expect_requested_client(:fips) 84 | processor.process 85 | end 86 | end 87 | 88 | context 'when fips is not enabled' do 89 | let(:settings_hash) { super().merge('fips_enabled' => false) } 90 | 91 | it 'the correct function is called' do 92 | expect_requested_client(:non_fips) 93 | processor.process 94 | end 95 | end 96 | end 97 | -------------------------------------------------------------------------------- /tasks/examples/bolt_apply.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "This task submits the a bolt apply report (and other data) to Splunk's HEC", 3 | "input_method": "stdin", 4 | "remote": true, 5 | "parameters": { 6 | "report": { 7 | "description": "A hash of the report, the bulk of this will be used for the splunk event", 8 | "type": "Hash" 9 | }, 10 | "facts": { 11 | "description": "A hash of facts, name => value", 12 | "type": "Hash" 13 | }, 14 | "host": { 15 | "description": "Target's Identifying name, will use clientcert from facts if not provided", 16 | "type": "Optional[String[1]]" 17 | }, 18 | "plan_guid": { 19 | "description": "A guid used to identify invocation of the plan (should change each run)", 20 | "type": "Optional[String[1]]" 21 | }, 22 | "plan_name": { 23 | "description": "The name of the plan being run (shouldn't change each run)", 24 | "type": "Optional[String[1]]" 25 | } 26 | } 27 | } -------------------------------------------------------------------------------- /tasks/examples/bolt_apply.rb: -------------------------------------------------------------------------------- 1 | #!/opt/puppetlabs/bolt/bin/ruby 2 | 3 | require 'yaml' 4 | require 'json' 5 | require 'date' 6 | require 'net/https' 7 | 8 | params = JSON.parse(STDIN.read) 9 | 10 | def make_error(msg) 11 | error = { 12 | '_error' => { 13 | 'kind' => 'execution error', 14 | 'msg' => msg, 15 | 'details' => {}, 16 | }, 17 | } 18 | error 19 | end 20 | 21 | target = params['_target'] 22 | 23 | splunk_server = target['hostname'] 24 | splunk_token = target['token'] 25 | 26 | splunk_port = target['port'] || '8088' 27 | 28 | report = params['report'] 29 | facts = params['facts'] 30 | 31 | # now we can create the event with the timestamp from the report 32 | time = DateTime.parse(report['time']) 33 | epoch = time.strftime('%Q').to_str.insert(-4, '.') 34 | 35 | event = report 36 | 37 | hostname = report['host'] 38 | 39 | event['facts'] = facts 40 | event['event_type'] = 'bolt_apply' 41 | 42 | unless params['plan_guid'].nil? 43 | event['plan_guid'] = params['plan_guid'] 44 | end 45 | 46 | unless params['plan_name'].nil? 47 | event['plan_name'] = params['plan_name'] 48 | end 49 | 50 | # remove duplicate host from event body 51 | event.delete('host') 52 | 53 | splunk_event = { 54 | 'host' => hostname, 55 | 'time' => epoch, 56 | 'event' => event, 57 | } 58 | 59 | # create header here 60 | # header = "Authorization: Splunk #{splunk_token}" 61 | 62 | request = Net::HTTP::Post.new("https://#{splunk_server}:#{splunk_port}/services/collector") 63 | request.add_field('Authorization', "Splunk #{splunk_token}") 64 | request.add_field('Content-Type', 'application/json') 65 | request.body = splunk_event.to_json 66 | 67 | client = Net::HTTP.new(splunk_server, splunk_port) 68 | 69 | client.use_ssl = true 70 | client.verify_mode = OpenSSL::SSL::VERIFY_NONE 71 | 72 | client.request(request) 73 | -------------------------------------------------------------------------------- /tasks/examples/bolt_result.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "This task submits a bolt task result to Splunk's HEC", 3 | "input_method": "stdin", 4 | "remote": true, 5 | "parameters": { 6 | "result": { 7 | "description": "The individual result from a result set", 8 | "type": "Hash" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /tasks/examples/bolt_result.rb: -------------------------------------------------------------------------------- 1 | #!/opt/puppetlabs/bolt/bin/ruby 2 | 3 | require 'yaml' 4 | require 'json' 5 | require 'date' 6 | require 'net/https' 7 | require 'uri' 8 | 9 | params = JSON.parse(STDIN.read) 10 | 11 | def make_error(msg) 12 | error = { 13 | '_error' => { 14 | 'kind' => 'execution error', 15 | 'msg' => msg, 16 | 'details' => {}, 17 | }, 18 | } 19 | error 20 | end 21 | 22 | target = params['_target'] 23 | 24 | splunk_server = target['hostname'] 25 | splunk_token = target['token'] 26 | 27 | splunk_port = target['port'] || '8088' 28 | 29 | result = params['result'] 30 | 31 | puts result 32 | 33 | # facts = params['facts'] 34 | 35 | # now we can create the event with the timestamp from the report 36 | # time = DateTime.parse(report['time']) 37 | # epoch = time.strftime('%Q').to_str.insert(-4, '.') 38 | 39 | uri = URI(result['target']) 40 | 41 | host = uri.host 42 | 43 | result['event_type'] = 'bolt_result' 44 | 45 | splunk_event = { 46 | 'host' => host, 47 | 'event' => result, 48 | } 49 | 50 | # create header here 51 | # header = "Authorization: Splunk #{splunk_token}" 52 | 53 | request = Net::HTTP::Post.new("https://#{splunk_server}:#{splunk_port}/services/collector") 54 | request.add_field('Authorization', "Splunk #{splunk_token}") 55 | request.add_field('Content-Type', 'application/json') 56 | request.body = splunk_event.to_json 57 | 58 | client = Net::HTTP.new(splunk_server, splunk_port) 59 | 60 | client.use_ssl = true 61 | client.verify_mode = OpenSSL::SSL::VERIFY_NONE 62 | 63 | client.request(request) 64 | -------------------------------------------------------------------------------- /tasks/examples/cleanup_tokens.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Revokes the tokens generated by the Splunk App Puppet Report Viewer", 3 | "input_method": "environment", 4 | "parameters": { 5 | "username": { 6 | "description": "Username configured in Puppet Report Viewer", 7 | "type": "String" 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /tasks/examples/cleanup_tokens.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | declare -x PUPPET='/opt/puppetlabs/bin/puppet' 4 | declare -x CURL='/bin/curl' 5 | 6 | SSLDIR=$($PUPPET config print ssldir --section master) 7 | CERTNAME=$($PUPPET config print certname --section master) 8 | 9 | USERNAME="$PT_username" 10 | 11 | $CURL -X DELETE "https://$CERTNAME:4433/rbac-api/v2/tokens" \ 12 | --tlsv1 \ 13 | --cacert $SSLDIR/certs/ca.pem \ 14 | --cert $SSLDIR/certs/$CERTNAME.pem \ 15 | --key $SSLDIR/private_keys/$CERTNAME.pem \ 16 | -d "{\"revoke_tokens_by_usernames\": [\"$USERNAME\"]}" -------------------------------------------------------------------------------- /templates/settings.yaml.epp: -------------------------------------------------------------------------------- 1 | # managed by splunk_hec module 2 | --- 3 | <% if $splunk_hec::url { -%> 4 | "url" : "<%= $splunk_hec::url %>" 5 | <% } -%> 6 | "facts.allowlist" : 7 | <% $splunk_hec::facts_allowlist.each |$fact| {-%> 8 | - <%= $fact %> 9 | <% } -%> 10 | <% unless $splunk_hec::facts_blocklist == undef { -%> 11 | "facts.blocklist" : 12 | <% [$splunk_hec::facts_blocklist].flatten.each |$fact| { -%> 13 | - <%= $fact %> 14 | <% } -%> 15 | <% } -%> 16 | <% if $splunk_hec::pe_console { -%> 17 | "pe_console" : "<%= $splunk_hec::pe_console %>" 18 | <% } -%> 19 | <% if $splunk_hec::timeout { -%> 20 | "timeout" : "<%= $splunk_hec::timeout %>" 21 | <% } -%> 22 | <% if $splunk_hec::ssl_ca { -%> 23 | "ssl_ca" : "<%= $splunk_hec::ssl_ca %>" 24 | <% } -%> 25 | <% if $splunk_hec::include_system_cert_store { -%> 26 | "include_system_cert_store" : "<%= $splunk_hec::include_system_cert_store %>" 27 | <% } -%> 28 | <% if $splunk_hec::record_event { -%> 29 | "record_event" : "<%= $splunk_hec::record_event %>" 30 | <% } -%> 31 | <% if $splunk_hec::url_summary { -%> 32 | "url_summary" : "<%= $splunk_hec::url_summary %>" 33 | <% } -%> 34 | <% if $splunk_hec::url_facts { -%> 35 | "url_facts" : "<%= $splunk_hec::url_facts %>" 36 | <% } -%> 37 | <% if $splunk_hec::url_metrics { -%> 38 | "url_metrics" : "<%= $splunk_hec::url_metrics %>" 39 | <% } -%> 40 | <% if $splunk_hec::url_events { -%> 41 | "url_events" : "<%= $splunk_hec::url_events %>" 42 | <% } -%> 43 | <% if $splunk_hec::include_logs_status { -%> 44 | "include_logs_status" : 45 | <% $splunk_hec::include_logs_status.each |$status| {-%> 46 | - <%= $status %> 47 | <% } -%> 48 | <% } -%> 49 | <% if $splunk_hec::include_logs_catalog_failure { -%> 50 | "include_logs_catalog_failure" : "<%= $splunk_hec::include_logs_catalog_failure %>" 51 | <% } -%> 52 | <% if $splunk_hec::include_logs_corrective_change { -%> 53 | "include_logs_corrective_change" : "<%= $splunk_hec::include_logs_corrective_change %>" 54 | <% } -%> 55 | <% if $splunk_hec::include_resources_status { -%> 56 | "include_resources_status" : 57 | <% $splunk_hec::include_resources_status.each |$status| {-%> 58 | - <%= $status %> 59 | <% } -%> 60 | <% } -%> 61 | <% if $splunk_hec::include_resources_corrective_change { -%> 62 | "include_resources_corrective_change" : "<%= $splunk_hec::include_resources_corrective_change %>" 63 | <% } -%> 64 | <% if $splunk_hec::summary_resources_format { -%> 65 | "summary_resources_format" : "<%= $splunk_hec::summary_resources_format %>" 66 | <% } -%> 67 | <% if $splunk_hec::disabled { -%> 68 | "disabled" : "<%= $splunk_hec::disabled %>" 69 | <% } -%> 70 | <% if $splunk_hec::only_changes { -%> 71 | "only_changes" : "<%= $splunk_hec::only_changes %>" 72 | <% } -%> 73 | <% if $splunk_hec::events_reporting_enabled { -%> 74 | "events_reporting_enabled" : "<%= $splunk_hec::events_reporting_enabled %>" 75 | "event_types" : "<%= $splunk_hec::event_types %>" 76 | <% } -%> 77 | <% if $splunk_hec::orchestrator_data_filter { -%> 78 | "orchestrator_data_filter" : 79 | <% $splunk_hec::orchestrator_data_filter.each |$subset| {-%> 80 | - <%= $subset %> 81 | <% } -%> 82 | <% } -%> 83 | <% if $splunk_hec::rbac_data_filter { -%> 84 | "rbac_data_filter" : 85 | <% $splunk_hec::rbac_data_filter.each |$subset| {-%> 86 | - <%= $subset %> 87 | <% } -%> 88 | <% } -%> 89 | <% if $splunk_hec::classifier_data_filter { -%> 90 | "classifier_data_filter" : 91 | <% $splunk_hec::classifier_data_filter.each |$subset| {-%> 92 | - <%= $subset %> 93 | <% } -%> 94 | <% } -%> 95 | <% if $splunk_hec::pe_console_data_filter { -%> 96 | "pe-console_data_filter" : 97 | <% $splunk_hec::pe_console_data_filter.each |$subset| {-%> 98 | - <%= $subset %> 99 | <% } -%> 100 | <% } -%> 101 | <% if $splunk_hec::code_manager_data_filter { -%> 102 | "code-manager_data_filter" : 103 | <% $splunk_hec::code_manager_data_filter.each |$subset| {-%> 104 | - <%= $subset %> 105 | <% } -%> 106 | <% } -%> 107 | <% if $facts['fips_enabled'] { -%> 108 | "fips_enabled" : "true" 109 | "certificate_revocation" : <%= $splunk_hec::fips_crl_check %> 110 | "verify_peer" : <%= $splunk_hec::fips_verify_peer %> 111 | <% } -%> 112 | -------------------------------------------------------------------------------- /templates/splunk_hec_routes.yaml.epp: -------------------------------------------------------------------------------- 1 | # managed by puppet splunk_hec module 2 | --- 3 | master: 4 | facts: 5 | terminus: "<%= $splunk_hec::facts_terminus %>" 6 | cache: "<%= $splunk_hec::facts_cache_terminus %>" 7 | -------------------------------------------------------------------------------- /templates/util_splunk_hec.erb: -------------------------------------------------------------------------------- 1 | #!/opt/puppetlabs/puppet/bin/ruby 2 | 3 | require 'facter' 4 | require 'fileutils' 5 | require 'net/http' 6 | require 'net/https' 7 | require 'uri' 8 | require 'yaml' 9 | require 'json' 10 | require 'time' 11 | 12 | # Rails has deep merge, but pure ruby does not. So we need to implement it ourselves. 13 | class ::Hash 14 | def deep_merge(second) 15 | merger = proc { |key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : v2 } 16 | self.merge(second, &merger) 17 | end 18 | end 19 | 20 | @confdir = '/etc/puppetlabs/puppet/splunk_hec' 21 | 22 | INDICES = { 23 | 'orchestrator' => 'puppet:jobs', 24 | 'rbac' => 'puppet:activities_rbac', 25 | 'classifier' => 'puppet:activities_classifier', 26 | 'pe-console' => 'puppet:activities_console', 27 | 'code-manager' => 'puppet:activities_code_manager' 28 | } 29 | 30 | @settings_file = "#{@confdir}/settings.yaml" 31 | @secrets_file = "#{@confdir}/hec_secrets.yaml" 32 | 33 | def settings 34 | @settings ||= YAML.load_file(@settings_file) 35 | end 36 | 37 | def secrets 38 | @secrets ||= YAML.load_file(@secrets_file) 39 | end 40 | 41 | def build_ca_store(cert_store_file_path) 42 | store = OpenSSL::X509::Store.new 43 | store.add_file(cert_store_file_path) 44 | store 45 | end 46 | 47 | def create_http(source_type) 48 | splunk_url = get_splunk_url(source_type) 49 | @uri = URI.parse(splunk_url) 50 | timeout = settings['timeout'] || '5' 51 | http = Net::HTTP.new(@uri.host, @uri.port) 52 | http.open_timeout = timeout.to_i 53 | http.read_timeout = timeout.to_i 54 | http.use_ssl = @uri.scheme == 'https' 55 | if http.use_ssl? 56 | if (settings['ssl_ca'] && !settings['ssl_ca'].empty?) || settings['include_system_cert_store'] 57 | ssl_ca_file = if settings['ssl_ca'] 58 | File.join(@confdir, settings['ssl_ca']) 59 | elsif !settings['ssl_ca'] && Facter.value(:os)['family'].eql?('RedHat') 60 | '/etc/ssl/certs/ca-bundle.crt' 61 | elsif !settings['ssl_ca'] && Facter.value(:os)['family'].eql?('Suse') 62 | '/etc/ssl/ca-bundle.pem' 63 | else 64 | '/etc/ssl/certs/ca-certificates.crt' 65 | end 66 | message = if settings['ssl_ca'] 67 | "Puppet will verify #{splunk_url} SSL identity against Splunk HEC SSL #{settings['ssl_ca']}" 68 | else 69 | "Puppet will verify #{splunk_url} SSL identity against system store" 70 | end 71 | 72 | unless File.exist?(ssl_ca_file) && !File.zero?(ssl_ca_file) 73 | raise Puppet::Error, 74 | "CA file #{ssl_ca_file} is an empty file or does not exist" 75 | end 76 | 77 | ssl_ca = build_ca_store(ssl_ca_file) 78 | http.cert_store = ssl_ca 79 | http.verify_mode = OpenSSL::SSL::VERIFY_PEER 80 | puts message 81 | else 82 | message = "Puppet will NOT verify #{splunk_url} SSL identity" 83 | puts message 84 | http.verify_mode = OpenSSL::SSL::VERIFY_NONE 85 | end 86 | end 87 | 88 | http 89 | end 90 | 91 | def submit_request(body) 92 | # we want users to be able to provide different tokens per sourcetype if they want 93 | source_type = 'events' 94 | token_name = "token_#{source_type}" 95 | http = create_http(source_type) 96 | token = secrets[token_name] || secrets['token'] || raise('Must provide token parameter to splunk class') 97 | req = Net::HTTP::Post.new(@uri.path.to_str) 98 | req.add_field('Authorization', "Splunk #{token}") 99 | req.add_field('Content-Type', 'application/json') 100 | req.content_type = 'application/json' 101 | # req.body = body.to_json 102 | req.body = body 103 | http.request(req) 104 | end 105 | 106 | def get_splunk_url(source_type) 107 | url_name = "url_#{source_type}" 108 | settings[url_name] || settings['url'] || raise('Must provide url parameter to splunk class') 109 | 110 | end 111 | 112 | def pe_console 113 | settings['pe_console'] || certname 114 | end 115 | 116 | def record_event 117 | result = if settings['record_event'] == 'true' 118 | true 119 | else 120 | false 121 | end 122 | result 123 | end 124 | 125 | # standard function to make sure we're using the same time format our sourcetypes are set to parse 126 | def sourcetypetime(time, duration = 0) 127 | parsed_time = time.is_a?(String) ? Time.parse(time) : time 128 | total = Time.parse((parsed_time + duration).iso8601(3)) 129 | '%10.3f' % total.to_f 130 | end 131 | 132 | def extract_events(events_data, index, selectors) 133 | events_collector = [] 134 | 135 | return unless !events_data['events'].nil? 136 | 137 | events_data['events'].map do |event| 138 | collector = {} 139 | data = {} 140 | 141 | selectors.each do |selector| 142 | build_data(collector, event, selector.split('.')) 143 | data = data.deep_merge(collector) 144 | end unless selectors.nil? 145 | 146 | events_collector << { 147 | 'time' => sourcetypetime(event['created_timestamp'] || event['timestamp']), 148 | 'host' => settings['pe_console'], 149 | 'sourcetype' => index, 150 | 'event' => data.empty? ? event : data 151 | }.to_json 152 | end 153 | 154 | "#{events_collector.join("\n")}\n" 155 | end 156 | 157 | def build_data(final_data, event, path) 158 | if path.count == 1 159 | if event[path[0]].nil? 160 | puts "ERROR with last FILTER KEY; Check your filter parameter" 161 | end 162 | final_data[path[0]] = event[path[0]] 163 | final_data 164 | else 165 | begin 166 | dig_result = event.dig(*path[0,1]) 167 | final_data[path[0]] = {} 168 | build_data(final_data[path[0]], dig_result, path[1..-1]) 169 | rescue => e 170 | puts "Potential ERROR with middle FILTER KEY: #{e.backtrace}" 171 | end 172 | end 173 | end 174 | --------------------------------------------------------------------------------