├── .dockerignore
├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ ├── config.yml
│ ├── feature-request.md
│ └── story.md
├── PULL_REQUEST_TEMPLATE.md
├── ct.yaml
├── renovate.json5
└── workflows
│ ├── changelog.yml
│ ├── e2e.yaml
│ ├── nightly.yml
│ ├── push_pr.yml
│ ├── release-chart.yml
│ ├── release-integration.yml
│ ├── repolinter.yml
│ ├── security.yml
│ └── trigger_release.yml
├── .gitignore
├── .golangci.yml
├── .trivyignore
├── CHANGELOG.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── THIRD_PARTY_NOTICES.md
├── assets
└── licence
│ ├── THIRD_PARTY_NOTICES.md.tmpl
│ ├── overrides
│ └── rules.json
├── charts
├── internal
│ └── e2e-resources
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── templates
│ │ └── deployment.yaml
│ │ └── values.yaml
└── nri-kube-events
│ ├── Chart.lock
│ ├── Chart.yaml
│ ├── README.md
│ ├── README.md.gotmpl
│ ├── ci
│ ├── test-bare-minimum-values.yaml
│ ├── test-custom-attributes-as-map.yaml
│ ├── test-custom-attributes-as-string.yaml
│ └── test-values.yaml
│ ├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── _helpers_compatibility.tpl
│ ├── agent-configmap.yaml
│ ├── clusterrole.yaml
│ ├── clusterrolebinding.yaml
│ ├── configmap.yaml
│ ├── deployment.yaml
│ ├── secret.yaml
│ └── serviceaccount.yaml
│ ├── tests
│ ├── agent_configmap_test.yaml
│ ├── configmap_test.yaml
│ ├── deployment_test.yaml
│ ├── images_test.yaml
│ └── security_context_test.yaml
│ └── values.yaml
├── cla.md
├── cmd
└── nri-kube-events
│ ├── config.go
│ ├── config_test.go
│ └── main.go
├── deploy
└── local.yaml.example
├── e2e
├── 1_28-exceptions.yml
├── 1_29-exceptions.yml
├── 1_30-exceptions.yml
├── 1_31-exceptions.yml
├── 1_32-exceptions.yml
├── README.md
├── e2e-tests.sh
├── e2e-values.yml
└── test-specs.yml
├── go.mod
├── go.sum
├── pkg
├── common
│ ├── objects.go
│ ├── utils.go
│ └── utils_test.go
├── descriptions
│ └── router.go
├── events
│ ├── router.go
│ └── router_test.go
├── router
│ └── config.go
└── sinks
│ ├── new_relic_infra.go
│ ├── new_relic_infra_test.go
│ ├── sinks.go
│ ├── stdout.go
│ └── testdata
│ └── event_data.json
├── skaffold.yaml
├── skaffold_build.sh
└── test
└── integration
├── integration_test.go
└── test_agent_sink.go
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git/*
2 | Dockerfile
3 | deploy
4 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # This is a comment.
2 | # Each line is a file pattern followed by one or more owners.
3 |
4 | # These owners will be the default owners for everything in
5 | # the repo. Unless a later match takes precedence.
6 |
7 | * @newrelic/k8s-agents
8 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | ^^ Provide a general summary of the issue in the title above. ^^
11 |
12 | ## Description
13 | Describe the problem you're encountering.
14 | TIP: Do NOT share sensitive information, whether personal, proprietary, or otherwise!
15 |
16 | ## Expected Behavior
17 | Tell us what you expected to happen.
18 |
19 | ## [Troubleshooting](https://discuss.newrelic.com/t/troubleshooting-frameworks/108787) or [NR Diag](https://docs.newrelic.com/docs/using-new-relic/cross-product-functions/troubleshooting/new-relic-diagnostics) results
20 | Provide any other relevant log data.
21 | TIP: Scrub logs and diagnostic information for sensitive information
22 |
23 | ## Steps to Reproduce
24 | Please be as specific as possible.
25 | TIP: Link a sample application that demonstrates the issue.
26 |
27 | ## Your Environment
28 | Include as many relevant details about your environment as possible including the running version of New Relic software and any relevant configurations.
29 |
30 | ## Additional context
31 | Add any other context about the problem here. For example, relevant community posts or support tickets.
32 |
33 | ## For Maintainers Only or Hero Triaging this bug
34 | *Suggested Priority (P1,P2,P3,P4,P5):*
35 | *Suggested T-Shirt size (S, M, L, XL, Unknown):*
36 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Troubleshooting
4 | url: https://github.com/newrelic/nri-kube-events/blob/main/README.md#support
5 | about: Check out the README for troubleshooting directions
6 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: 'feature request'
6 | assignees: ''
7 | priority: ''
8 | ---
9 | ### Description
10 | _A clear and concise description of the feature you want or need_
11 |
12 | ### Acceptance Criteria
13 | _What tasks need to be accomplished to achieve the goal?_
14 |
15 | ### Describe Alternatives
16 | _A clear and concise description of any alternative solutions or features you've considered_
17 | _Are there examples you could link us to?_
18 |
19 | ### Dependencies
20 | _Do any other teams or parts of the New Relic product need to be considered?_
21 | _Some common areas: UI, collector, documentation_
22 |
23 | ### Additional context
24 | _What else should we know about this story that might not fit into the other categories?_
25 |
26 | ### Estimates
27 | _Please provide initial t-shirt size. S = 1-3 days, M = 3-5 days (1 week), L = 1-2 weeks (1 sprint)_
28 |
29 | ## For Maintainers Only or Hero Triaging this bug
30 | *Suggested Priority (P1,P2,P3,P4,P5):*
31 | *Suggested T-Shirt size (S, M, L, XL, Unknown):*
32 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/story.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Story
3 | about: Issue describing development work to fulfill a feature request
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 | priority: ''
8 | ---
9 | ### Description
10 | _What's the goal of this unit of work? What is included? What isn't included?_
11 |
12 | ### Acceptance Criteria
13 | _What tasks need to be accomplished to achieve the goal?_
14 |
15 | ### Design Consideration/Limitations
16 | _Why is this the route we should take to achieve our goal?_
17 | _What can't be achieved within this story?_
18 |
19 | ### Dependencies
20 | _Do any other teams or parts of the New Relic product need to be considered?_
21 | _Some common areas: UI, collector, documentation_
22 |
23 | ### Additional context
24 | _What else should we know about this story that might not fit into the other categories?_
25 |
26 | ### Estimates
27 | _Please provide initial t-shirt size. S = 1-3 days, M = 3-5 days (1 week), L = 1-2 weeks (1 sprint)_
28 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 |
4 | ## Type of change
5 |
6 |
7 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
8 | - [ ] New feature / enhancement (non-breaking change which adds functionality)
9 | - [ ] Security fix
10 | - [ ] Bug fix (non-breaking change which fixes an issue)
11 |
12 | ## Checklist:
13 |
14 |
15 | - [ ] Add changelog entry following the [contributing guide](../CONTRIBUTING.md#pull-requests)
16 | - [ ] Documentation has been updated
17 | - [ ] This change requires changes in testing:
18 | - [ ] unit tests
19 | - [ ] E2E tests
20 |
--------------------------------------------------------------------------------
/.github/ct.yaml:
--------------------------------------------------------------------------------
1 | # Chart linter defaults to `master` branch so we need to specify this as the default branch
2 | # or `cl` will fail with a not-so-helpful error that says:
3 | # "Error linting charts: Error identifying charts to process: Error running process: exit status 128"
4 | target-branch: main
5 |
6 | chart-repos:
7 | - newrelic=https://helm-charts.newrelic.com
8 |
9 | # Charts will be released manually.
10 | check-version-increment: false
11 |
12 |
--------------------------------------------------------------------------------
/.github/renovate.json5:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "github>newrelic/k8s-agents-automation:renovate-base.json5"
4 | ],
5 | }
6 |
--------------------------------------------------------------------------------
/.github/workflows/changelog.yml:
--------------------------------------------------------------------------------
1 | # This action requires that any PR should touch at
2 | # least one CHANGELOG file.
3 |
4 | name: changelog
5 |
6 | on:
7 | pull_request:
8 | types: [opened, synchronize, reopened, labeled, unlabeled]
9 |
10 | jobs:
11 | check-changelog:
12 | uses: newrelic/k8s-agents-automation/.github/workflows/reusable-changelog.yml@main
13 |
--------------------------------------------------------------------------------
/.github/workflows/e2e.yaml:
--------------------------------------------------------------------------------
1 | name: E2E over minikube
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches:
7 | - main
8 | - renovate/**
9 |
10 | jobs:
11 | e2eTests:
12 | name: Run E2E Tests
13 | # Do not run e2e tests if PR has skip-e2e label
14 | if: ${{ ! contains(github.event.pull_request.labels.*.name, 'ci/skip-e2e') }}
15 | runs-on: ubuntu-latest
16 | strategy:
17 | matrix:
18 | # Latest patch version can be found in https://kubernetes.io/releases/patch-releases/
19 | # Some versions might not be available yet in https://storage.googleapis.com/kubernetes-release/release/v1.X.Y/bin/linux/amd64/kubelet
20 | k8sVersion: ["v1.32.0", "v1.31.0", "v1.30.0", "v1.29.5", "v1.28.3"]
21 | cri: [ containerd ]
22 | steps:
23 | - name: Checkout repository
24 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
25 |
26 | - name: Setup Go
27 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
28 | with:
29 | go-version-file: 'go.mod'
30 |
31 | - name: Setup Minikube
32 | uses: manusa/actions-setup-minikube@b589f2d61bf96695c546929c72b38563e856059d # v2.14.0
33 | with:
34 | minikube version: v1.36.0
35 | kubernetes version: ${{ matrix.k8sVersion }}
36 | driver: docker
37 | start args: "--container-runtime=${{ matrix.cri }}"
38 |
39 | - name: Build and load Images
40 | run: |
41 | docker buildx build --load . --tag e2e/nri-kube-events:e2e
42 | minikube image load e2e/nri-kube-events:e2e
43 |
44 | - name: Setup Helm
45 | # First command avoids getting following warning:
46 | # "WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /home/runner/.kube/config"
47 | run: |
48 | chmod go-r /home/runner/.kube/config
49 | helm repo add nri-kube-events https://newrelic.github.io/nri-kube-events
50 | helm repo update
51 |
52 | - name: Select metrics exception file
53 | id: exceptions-version
54 | run: |
55 | MINOR=$(echo "${{ matrix.k8sVersion }}" | sed -n 's/v\([0-9]\)\.\([0-9][0-9]*\)\.[0-9].*/\1_\2/p')
56 |
57 | echo $MINOR
58 | echo "exceptions=$MINOR" >> $GITHUB_OUTPUT
59 |
60 | - name: Run E2E tests
61 | uses: newrelic/newrelic-integration-e2e-action@c548a33a0c2941a4db4899ded766923eb3178e0e # v1.12.0
62 | env:
63 | EXCEPTIONS_SOURCE_FILE: ${{ steps.exceptions-version.outputs.exceptions }}-exceptions.yml
64 | with:
65 | retry_seconds: 60
66 | retry_attempts: 5
67 | agent_enabled: false
68 | spec_path: e2e/test-specs.yml
69 | account_id: ${{ secrets.K8S_AGENTS_E2E_ACCOUNT_ID }}
70 | api_key: ${{ secrets.K8S_AGENTS_E2E_API_KEY }}
71 | license_key: ${{ secrets.K8S_AGENTS_E2E_LICENSE_KEY }}
72 |
--------------------------------------------------------------------------------
/.github/workflows/nightly.yml:
--------------------------------------------------------------------------------
1 | name: Nightly build
2 |
3 | on:
4 | schedule:
5 | - cron: "0 3 * * *"
6 | push:
7 | branches:
8 | - master
9 | - main
10 |
11 | env:
12 | ORIGINAL_REPO_NAME: ${{ github.event.repository.full_name }}
13 |
14 | jobs:
15 | release:
16 | name: Build and push docker images
17 | runs-on: ubuntu-latest
18 | steps:
19 | - uses: actions/checkout@v4
20 | - name: Build args
21 | run: |
22 | echo "DATE=`date`" >> $GITHUB_ENV
23 | - name: Set up QEMU
24 | uses: docker/setup-qemu-action@v3
25 | - name: Set up Docker Buildx
26 | uses: docker/setup-buildx-action@v3
27 | - uses: docker/login-action@v3
28 | with:
29 | username: ${{ secrets.K8S_AGENTS_DOCKERHUB_USERNAME }}
30 | password: ${{ secrets.K8S_AGENTS_DOCKERHUB_TOKEN }}
31 | - name: Build and push docker image
32 | uses: docker/build-push-action@v6
33 | with:
34 | build-args: |
35 | "COMMIT=${{ github.sha }}"
36 | "DATE=${{ env.DATE }}"
37 | "TAG=nightly"
38 | cache-from: type=gha
39 | cache-to: type=gha,mode=max
40 | context: .
41 | push: true
42 | platforms: |
43 | linux/amd64
44 | linux/arm64
45 | linux/arm
46 | tags: newrelic/nri-kube-events:nightly
47 |
48 | notify-failure:
49 | if: ${{ always() && failure() }}
50 | needs: [release]
51 | runs-on: ubuntu-latest
52 | steps:
53 | - name: Notify failure via Slack
54 | uses: archive/github-actions-slack@v2.10.1
55 | with:
56 | slack-bot-user-oauth-access-token: ${{ secrets.K8S_AGENTS_SLACK_TOKEN }}
57 | slack-channel: ${{ secrets.K8S_AGENTS_SLACK_CHANNEL }}
58 | slack-text: "❌ `${{ env.ORIGINAL_REPO_NAME }}`: <${{ github.server_url }}/${{ env.ORIGINAL_REPO_NAME }}/actions/runs/${{ github.run_id }}|'Nightly build' failed>."
59 |
--------------------------------------------------------------------------------
/.github/workflows/push_pr.yml:
--------------------------------------------------------------------------------
1 | name: Build, lint and test
2 | on:
3 | push:
4 | branches:
5 | - main
6 | - renovate/**
7 | pull_request:
8 | jobs:
9 | build:
10 | name: Build integration for
11 | runs-on: ubuntu-latest
12 | strategy:
13 | matrix:
14 | goos: [ linux ]
15 | goarch: [ amd64, arm64, arm ]
16 | steps:
17 | - uses: actions/checkout@v4
18 | - uses: actions/setup-go@v5
19 | with:
20 | go-version-file: 'go.mod'
21 | - name: Build integration
22 | env:
23 | GOOS: ${{ matrix.goos }}
24 | GOARCH: ${{ matrix.goarch }}
25 | run: |
26 | make compile
27 |
28 | chart-lint:
29 | name: Helm chart Lint
30 | runs-on: ubuntu-latest
31 | timeout-minutes: 10
32 | steps:
33 | - uses: actions/checkout@v4
34 | with:
35 | fetch-depth: 0
36 | - uses: helm/chart-testing-action@v2.7.0
37 |
38 | - name: Lint charts
39 | run: ct --config .github/ct.yaml lint --debug
40 |
41 | - name: Check for changed installable charts
42 | id: list-changed
43 | run: |
44 | changed=$(ct --config .github/ct.yaml list-changed)
45 | if [[ -n "$changed" ]]; then
46 | echo "changed=true" >> $GITHUB_OUTPUT
47 | fi
48 | - name: Run helm unit tests
49 | if: steps.list-changed.outputs.changed == 'true'
50 | run: |
51 | helm plugin install https://github.com/helm-unittest/helm-unittest --version=0.3.1
52 | for chart in $(ct --config .github/ct.yaml list-changed); do
53 | if [ -d "$chart/tests/" ]; then
54 | helm unittest $chart
55 | else
56 | echo "No unit tests found for $chart"
57 | fi
58 | done
59 |
60 | docker-build:
61 | name: Build docker image for integration tests
62 | runs-on: ubuntu-latest
63 | timeout-minutes: 10
64 | env:
65 | DOCKER_BUILDKIT: '1'
66 | steps:
67 | - uses: actions/checkout@v4
68 | with:
69 | fetch-depth: 0
70 | - name: Set up QEMU
71 | uses: docker/setup-qemu-action@v3
72 | - name: Set up Docker Buildx
73 | uses: docker/setup-buildx-action@v3
74 | - name: Build docker image
75 | uses: docker/build-push-action@v6
76 | with:
77 | cache-from: type=gha
78 | cache-to: type=gha,mode=max
79 | context: .
80 | outputs: type=docker,dest=nri-kube-events.tar
81 | platforms: |
82 | linux/amd64
83 | tags: e2e/nri-kube-events:test
84 | - name: Upload Image
85 | uses: actions/upload-artifact@v4
86 | with:
87 | name: nri-kube-events
88 | path: nri-kube-events.tar
89 | retention-days: 1
90 |
91 | static-analysis:
92 | name: Run all static analysis checks
93 | runs-on: ubuntu-latest
94 | steps:
95 | - uses: actions/checkout@v4
96 | - uses: actions/setup-go@v5
97 | with:
98 | go-version-file: 'go.mod'
99 | - uses: newrelic/newrelic-infra-checkers@v1
100 | with:
101 | golangci-lint-config: golangci-lint-limited
102 | # - name: Semgrep
103 | # uses: returntocorp/semgrep-action@v1
104 | # with:
105 | # auditOn: push
106 | - name: golangci-lint
107 | uses: golangci/golangci-lint-action@v8
108 | continue-on-error: ${{ github.event_name != 'pull_request' }}
109 | with:
110 | only-new-issues: true
111 | skip-pkg-cache: true
112 | skip-build-cache: true
113 |
114 | test:
115 | name: Run unit tests
116 | runs-on: ubuntu-latest
117 | needs: [ build ]
118 | steps:
119 | - uses: actions/checkout@v4
120 | - uses: actions/setup-go@v5
121 | with:
122 | go-version-file: 'go.mod'
123 | - name: Test
124 | run: make test-unit
125 | - name: Upload coverage to Codecov
126 | uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
127 | with:
128 | fail_ci_if_error: false
129 | env:
130 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
131 |
132 | test-integration:
133 | name: Run integration tests
134 | runs-on: ubuntu-latest
135 | needs: [ docker-build ]
136 | timeout-minutes: 10
137 | env:
138 | DOCKER_BUILDKIT: '1'
139 | strategy:
140 | max-parallel: 5
141 | fail-fast: false
142 | matrix:
143 | k8sVersion: ["v1.32.0", "v1.31.0", "v1.30.0", "v1.29.5", "v1.28.3"]
144 | cri: [ containerd ]
145 | steps:
146 | - uses: actions/checkout@v4
147 | with:
148 | fetch-depth: 0
149 | - uses: actions/setup-go@v5
150 | with:
151 | go-version-file: 'go.mod'
152 | - name: Setup Minikube
153 | uses: manusa/actions-setup-minikube@v2.14.0
154 | with:
155 | minikube version: v1.36.0
156 | kubernetes version: ${{ matrix.k8sVersion }}
157 | driver: docker
158 | github token: ${{ secrets.GITHUB_TOKEN }}
159 | start args: "--container-runtime=${{ matrix.cri }}"
160 | - name: Run integreation tests
161 | run: make test-integration
162 | - name: Download Image
163 | uses: actions/download-artifact@v4
164 | with:
165 | name: nri-kube-events
166 | - name: Load image for chart testing
167 | run: |
168 | minikube image load nri-kube-events.tar
169 | minikube image ls
170 | kubectl create ns ct
171 | - uses: actions/setup-python@v5
172 | with:
173 | python-version: '3.13'
174 | - uses: helm/chart-testing-action@v2.7.0
175 | - name: Test install charts
176 | run: ct install --namespace ct --config .github/ct.yaml --debug
177 | - name: Test upgrade charts
178 | run: ct install --namespace ct --config .github/ct.yaml --debug --upgrade
179 |
--------------------------------------------------------------------------------
/.github/workflows/release-chart.yml:
--------------------------------------------------------------------------------
1 | name: Release nri-kube-events chart
2 | on:
3 | push:
4 | branches:
5 | - main
6 |
7 | jobs:
8 | release-chart:
9 | permissions:
10 | contents: write
11 | uses: newrelic/k8s-agents-automation/.github/workflows/reusable-release-chart.yml@main
12 | secrets:
13 | gh_token: "${{ secrets.GITHUB_TOKEN }}"
14 | slack_channel: ${{ secrets.K8S_AGENTS_SLACK_CHANNEL }}
15 | slack_token: ${{ secrets.K8S_AGENTS_SLACK_TOKEN }}
16 |
--------------------------------------------------------------------------------
/.github/workflows/release-integration.yml:
--------------------------------------------------------------------------------
1 | name: Pre-release and Release pipeline
2 |
3 | on:
4 | release:
5 | types: [prereleased, released]
6 | tags:
7 | - 'v*'
8 | workflow_dispatch:
9 |
10 | jobs:
11 | release-integration:
12 | permissions:
13 | contents: write
14 | pull-requests: write
15 | uses: newrelic/k8s-agents-automation/.github/workflows/reusable-release-integration.yml@main
16 | with:
17 | repo_name: nri-kube-events
18 | artifact_path: bin/
19 | docker_image_name: newrelic/nri-kube-events
20 | chart_directory: charts/nri-kube-events
21 | secrets:
22 | dockerhub_username: ${{ secrets.K8S_AGENTS_DOCKERHUB_USERNAME }}
23 | dockerhub_token: ${{ secrets.K8S_AGENTS_DOCKERHUB_TOKEN }}
24 | bot_token: ${{ secrets.K8S_AGENTS_BOT_TOKEN }}
25 | slack_channel: ${{ secrets.K8S_AGENTS_SLACK_CHANNEL }}
26 | slack_token: ${{ secrets.K8S_AGENTS_SLACK_TOKEN }}
27 |
--------------------------------------------------------------------------------
/.github/workflows/repolinter.yml:
--------------------------------------------------------------------------------
1 | # NOTE: This file should always be named `repolinter.yml` to allow
2 | # workflow_dispatch to work properly
3 | name: Repolinter Action
4 |
5 | # NOTE: This workflow will ONLY check the default branch!
6 | # Currently there is no elegant way to specify the default
7 | # branch in the event filtering, so branches are instead
8 | # filtered in the "Test Default Branch" step.
9 | on: [push, workflow_dispatch]
10 |
11 | jobs:
12 | repolint:
13 | name: Run Repolinter
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Test Default Branch
17 | id: default-branch
18 | uses: actions/github-script@v7
19 | with:
20 | script: |
21 | const data = await github.rest.repos.get(context.repo)
22 | return data.data && data.data.default_branch === context.ref.split('/').slice(-1)[0]
23 | - name: Checkout Self
24 | if: ${{ steps.default-branch.outputs.result == 'true' }}
25 | uses: actions/checkout@v4
26 | - name: Run Repolinter
27 | if: ${{ steps.default-branch.outputs.result == 'true' }}
28 | uses: newrelic/repolinter-action@v1
29 | with:
30 | config_url: https://raw.githubusercontent.com/newrelic/.github/main/repolinter-rulesets/community-plus.yml
31 | output_type: issue
32 |
--------------------------------------------------------------------------------
/.github/workflows/security.yml:
--------------------------------------------------------------------------------
1 | name: Security Scan
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | - main
8 | - renovate/**
9 | pull_request:
10 | schedule:
11 | - cron: "0 3 * * *"
12 |
13 | jobs:
14 | trivy:
15 | permissions:
16 | contents: read # for actions/checkout to fetch code
17 | security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
18 | uses: newrelic/k8s-agents-automation/.github/workflows/reusable-security.yaml@main
19 | secrets:
20 | slack_channel: ${{ secrets.K8S_AGENTS_SLACK_CHANNEL }}
21 | slack_token: ${{ secrets.K8S_AGENTS_SLACK_TOKEN }}
22 |
--------------------------------------------------------------------------------
/.github/workflows/trigger_release.yml:
--------------------------------------------------------------------------------
1 | name: Trigger release creation
2 |
3 | # This workflow triggers a release creation with changelog and the release notes created by the release toolkit.
4 | # This workflow should be triggered merely from the default branch.
5 | # For more details about how to release follow https://github.com/newrelic/coreint-automation/blob/main/docs/release_runbook.md
6 |
7 | on:
8 | workflow_dispatch:
9 | schedule:
10 | - cron: "0 12 * * 1" # Monday at 12pm UTC or 5am PT
11 |
12 | jobs:
13 | trigger-release:
14 | uses: newrelic/k8s-agents-automation/.github/workflows/reusable-trigger-release.yml@main
15 | with:
16 | bot_email: '${{ vars.K8S_AGENTS_BOT_EMAIL }}'
17 | bot_name: '${{ vars.K8S_AGENTS_BOT_NAME }}'
18 | secrets:
19 | bot_token: ${{ secrets.K8S_AGENTS_BOT_TOKEN }}
20 | slack_channel: ${{ secrets.K8S_AGENTS_SLACK_CHANNEL }}
21 | slack_token: ${{ secrets.K8S_AGENTS_SLACK_TOKEN }}
22 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | cmd/nri-kube-events/config.yaml
3 | bin/
4 | deploy/local.yaml
5 | .DS_STORE
6 |
7 | # Downloaded chart dependencies
8 | **/charts/*.tgz
9 |
10 | # Release toolkit
11 | CHANGELOG.partial.md
12 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | linters:
3 | default: none
4 | enable:
5 | - bodyclose
6 | - copyloopvar
7 | - depguard
8 | - dogsled
9 | - dupl
10 | - errcheck
11 | - errorlint
12 | - exhaustive
13 | - gocognit
14 | - gocritic
15 | - gocyclo
16 | - goprintffuncname
17 | - gosec
18 | - govet
19 | - ineffassign
20 | - misspell
21 | - mnd
22 | - nestif
23 | - nilerr
24 | - noctx
25 | - prealloc
26 | - revive
27 | - rowserrcheck
28 | - staticcheck
29 | - unconvert
30 | - unparam
31 | - unused
32 | - whitespace
33 | settings:
34 | dupl:
35 | threshold: 100
36 | gocyclo:
37 | min-complexity: 20
38 | govet:
39 | enable:
40 | - shadow
41 | - fieldalignment
42 | misspell:
43 | locale: US
44 | mnd:
45 | checks:
46 | - argument
47 | - case
48 | - condition
49 | - return
50 | exclusions:
51 | generated: lax
52 | presets:
53 | - comments
54 | - common-false-positives
55 | - legacy
56 | - std-error-handling
57 | paths:
58 | - third_party$
59 | - builtin$
60 | - examples$
61 | formatters:
62 | enable:
63 | - gofmt
64 | - goimports
65 | settings:
66 | goimports:
67 | local-prefixes:
68 | - github.com/newrelic/nri-kube-events
69 | exclusions:
70 | generated: lax
71 | paths:
72 | - third_party$
73 | - builtin$
74 | - examples$
75 |
--------------------------------------------------------------------------------
/.trivyignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/newrelic/nri-kube-events/7e9442bace7a5123df70ed7d898a1bb50d77216e/.trivyignore
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Contributions are always welcome. Before contributing please read the
4 | [code of conduct](https://opensource.newrelic.com/code-of-conduct/) and [search the issue tracker](issues); your issue may have already been discussed or fixed in `master`. To contribute,
5 | [fork](https://help.github.com/articles/fork-a-repo/) this repository, commit your changes, and [send a Pull Request](https://help.github.com/articles/using-pull-requests/).
6 |
7 | Note that our [code of conduct](./CODE_OF_CONDUCT.md) applies to all platforms and venues related to this project; please follow it in all your interactions with the project and its participants.
8 |
9 | ## Feature Requests
10 |
11 | Feature requests should be submitted in the [Issue tracker](../../issues), with a description of the expected behavior & use case, where they’ll remain closed until sufficient interest, [e.g. :+1: reactions](https://help.github.com/articles/about-discussions-in-issues-and-pull-requests/), has been [shown by the community](../../issues?q=label%3A%22votes+needed%22+sort%3Areactions-%2B1-desc).
12 | Before submitting an Issue, please search for similar ones in the
13 | [closed issues](../../issues?q=is%3Aissue+is%3Aclosed+label%3Aenhancement).
14 |
15 | ## Pull Requests
16 |
17 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a build.
18 | 2. Increase the version numbers in any examples files and the README.md to the new version that this Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/).
19 | 3. Add an entry as an unordered list to the CHANGELOG under the `Unreleased` section under an L3 header that specifies the type of your PR. If there is no L3 header for your type of PR already in the Unreleased section, add a new L3 header. Include your github handle and a link to your PR in the entry. Here's an example of how it should look:
20 | ```md
21 | ## Unreleased
22 |
23 | ### bugfix
24 | - Fix some bug in some file @yourGithubHandle [#123](linkToThisPR)
25 | ```
26 |
27 | - Here are the accepted L3 headers (case sensitive)
28 | + `breaking`
29 | + `security`
30 | + `enhancement`
31 | + `bugfix`
32 | + `dependency`
33 |
34 | - You can skip the changelog requirement by using the "Skip Changelog" label if your pull request is only updating files related to the CI/CD process or minor doc changes.
35 |
36 | 4. You may merge the Pull Request in once you have the sign-off of one other developers, or if you do not have permission to do that, you may request the other reviewer to merge it for you.
37 |
38 | ## Contributor License Agreement
39 |
40 | Keep in mind that when you submit your Pull Request, you'll need to sign the CLA via the click-through using CLA-Assistant. If you'd like to execute our corporate CLA, or if you have any questions, please drop us an email at opensource@newrelic.com.
41 |
42 | For more information about CLAs, please check out Alex Russell’s excellent post,
43 | [“Why Do I Need to Sign This?”](https://infrequently.org/2008/06/why-do-i-need-to-sign-this/).
44 |
45 | ## Slack
46 |
47 | We host a public Slack with a dedicated channel for contributors and maintainers of open source projects hosted by New Relic. If you are contributing to this project, you're welcome to request access to the #oss-contributors channel in the newrelicusers.slack.com workspace. To request access, see https://join.slack.com/t/newrelicusers/shared_invite/zt-1ayj69rzm-~go~Eo1whIQGYnu3qi15ng.
48 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM --platform=$BUILDPLATFORM golang:1.24-alpine AS build
2 |
3 | # Set by docker automatically
4 | ARG TARGETOS TARGETARCH
5 |
6 | # Need to be set manually
7 | ARG TAG=dev
8 | ARG COMMIT=unknown
9 | ARG DATE="Sun Jan 1 00:00:00 UTC 2023"
10 |
11 | ARG GOOS=$TARGETOS
12 | ARG GOARCH=$TARGETARCH
13 |
14 | WORKDIR /src
15 |
16 | # We don't expect the go.mod/go.sum to change frequently.
17 | # So splitting out the mod download helps create another layer
18 | # that should cache well.
19 | COPY go.mod .
20 | COPY go.sum .
21 | RUN go mod download
22 |
23 | COPY . .
24 | RUN go build \
25 | -ldflags="-X 'main.integrationVersion=${TAG}' -X 'main.gitCommit=${COMMIT}' -X 'main.buildDate=${DATE}'" \
26 | -o bin/nri-kube-events ./cmd/nri-kube-events
27 |
28 | FROM alpine:3.22.0
29 | WORKDIR /app
30 |
31 | RUN apk add --no-cache --upgrade \
32 | tini ca-certificates \
33 | && addgroup -g 2000 nri-kube-events \
34 | && adduser -D -H -u 1000 -G nri-kube-events nri-kube-events
35 | EXPOSE 8080
36 |
37 | USER nri-kube-events
38 |
39 | COPY --chown=nri-kube-events:nri-kube-events --from=build /src/bin/nri-kube-events ./
40 |
41 | # Enable custom attributes decoration in the infra SDK
42 | ENV METADATA=true
43 |
44 | ENTRYPOINT ["/sbin/tini", "--", "./nri-kube-events"]
45 | CMD ["--config", "config.yaml", "-promaddr", "0.0.0.0:8080"]
46 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Copyright 2019 New Relic Corporation. All rights reserved.
2 | # SPDX-License-Identifier: Apache-2.0
3 | INTEGRATION = nri-kube-events
4 | DOCKER_IMAGE_NAME ?= newrelic/nri-kube-events
5 | BIN_DIR = ./bin
6 | BUILD_TARGET ?= $(BIN_DIR)/$(INTEGRATION)
7 | TEST_COVERAGE_DIR := $(BIN_DIR)/test-coverage
8 |
9 | DATE := $(shell date)
10 | TAG ?= dev
11 | COMMIT ?= $(shell git rev-parse HEAD || echo "unknown")
12 |
13 | LDFLAGS ?= -ldflags="-X 'main.integrationVersion=$(TAG)' -X 'main.gitCommit=$(COMMIT)' -X 'main.buildDate=$(DATE)' "
14 |
15 | all: build
16 |
17 | build: clean test compile
18 |
19 | clean:
20 | @echo "=== $(INTEGRATION) === [ clean ]: Removing binaries and coverage file..."
21 | @rm -rfv $(BIN_DIR)
22 |
23 | fmt:
24 | @echo "=== $(INTEGRATION) === [ fmt ]: Running Gofmt...."
25 | @go fmt ./...
26 |
27 | compile:
28 | @echo "=== $(INTEGRATION) === [ compile ]: Building $(INTEGRATION)..."
29 | @go build $(LDFLAGS) -o $(BUILD_TARGET) ./cmd/nri-kube-events
30 |
31 | test: test-unit
32 | test-unit:
33 | @echo "=== $(INTEGRATION) === [ test ]: Running unit tests..."
34 | @mkdir -p $(TEST_COVERAGE_DIR)
35 | @go test ./... -v -count=1 -coverprofile=$(TEST_COVERAGE_DIR)/coverage.out -covermode=count
36 |
37 | test-integration:
38 | @echo "=== $(INTEGRATION) === [ test ]: Running integration tests..."
39 | @go test -v -tags integration ./test/integration
40 |
41 | docker:
42 | @docker buildx build --build-arg "TAG=$(TAG)" --build-arg "DATE=$(DATE)" --build-arg "COMMIT=$(COMMIT)" --load . -t "$(DOCKER_IMAGE_NAME)"
43 |
44 | docker-multiarch:
45 | @docker buildx build --build-arg "TAG=$(TAG)" --build-arg "DATE=$(DATE)" --build-arg "COMMIT=$(COMMIT)" --platform linux/amd64,linux/arm64,linux/arm . -t "$(DOCKER_IMAGE_NAME)"
46 |
47 | buildThirdPartyNotice:
48 | @go list -m -json all | go-licence-detector -rules ./assets/licence/rules.json -noticeTemplate ./assets/licence/THIRD_PARTY_NOTICES.md.tmpl -noticeOut THIRD_PARTY_NOTICES.md -includeIndirect -overrides ./assets/licence/overrides
49 |
50 | # rt-update-changelog runs the release-toolkit run.sh script by piping it into bash to update the CHANGELOG.md.
51 | # It also passes down to the script all the flags added to the make target. To check all the accepted flags,
52 | # see: https://github.com/newrelic/release-toolkit/blob/main/contrib/ohi-release-notes/run.sh
53 | # e.g. `make rt-update-changelog -- -v`
54 | rt-update-changelog:
55 | curl "https://raw.githubusercontent.com/newrelic/release-toolkit/v1/contrib/ohi-release-notes/run.sh" | bash -s -- $(filter-out $@,$(MAKECMDGOALS))
56 |
57 | .PHONY: all build clean fmt compile test test-unit docker docker-multiarch buildThirdPartyNotice rt-update-changelog
58 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # New Relic integration for Kubernetes events [](https://codecov.io/gh/newrelic/nri-kube-events)
4 |
5 | This repository contains a simple event router for the Kubernetes project.
6 | The event router serves as an active watcher of event resource in the Kubernetes system,
7 | which takes those events and pushes them to a list of configured sinks.
8 |
9 | ## Table of contents
10 |
11 | - [Table of contents](#table-of-contents)
12 | - [Installation](#installation)
13 | - [Getting started](#getting-started)
14 | - [Development flow](#development-flow)
15 | - [Running locally](#running-locally)
16 | - [Configuration](#configuration)
17 | - [Available sinks](#available-sinks)
18 | - [stdout](#stdout)
19 | - [newRelicInfra](#newrelicinfra)
20 | - [Support](#support)
21 | - [Contributing](#contributing)
22 | - [License](#license)
23 |
24 | ## Installation
25 |
26 | For installation instructions see our [docs](https://docs.newrelic.com/docs/integrations/kubernetes-integration/kubernetes-events/install-kubernetes-events-integration).
27 |
28 | ## Getting started
29 |
30 | Once you've installed the integration, and you've configured the New Relic sink,
31 | you can find your events in New Relic using this query:
32 |
33 | ```
34 | FROM InfrastructureEvent
35 | SELECT event.involvedObject.kind, event.involvedObject.name, event.type, event.message, event.reason
36 | WHERE category = 'kubernetes' AND clusterName='YOUR_CLUSTER_NAME'
37 | ```
38 |
39 | ## Helm chart
40 |
41 | You can install this chart using [`nri-bundle`](https://github.com/newrelic/helm-charts/tree/master/charts/nri-bundle) located in the
42 | [helm-charts repository](https://github.com/newrelic/helm-charts) or directly from this repository by adding this Helm repository:
43 |
44 | ```shell
45 | helm repo add nri-kube-events https://newrelic.github.io/nri-kube-events
46 | helm upgrade --install nri-kube-events/nri-kube-events -f your-custom-values.yaml
47 | ```
48 |
49 | For further information of the configuration needed for the chart just read the [chart's README](/charts/nri-kube-events/README.md).
50 |
51 | ## Development flow
52 |
53 | This project uses a Makefile for the most common use cases:
54 |
55 | Some available commands include:
56 |
57 | ```sh
58 | make test # run the unit tests
59 | make lint # lint the code using golangci-lint
60 | make compile # compile the project into a single binary
61 | ```
62 |
63 | ### Running locally
64 |
65 | The easiest way to get started is by using [Skaffold](https://skaffold.dev)
66 | and [Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/).
67 |
68 | Follow these steps to run this project:
69 |
70 | - Ensure Minikube is running
71 | ```sh
72 | $ minikube status
73 | host: Running
74 | kubelet: Running
75 | apiserver: Running
76 | kubectl: Correctly Configured: pointing to minikube-vm at 192.168.x.x
77 | ```
78 |
79 | - Copy the example configuration and configure the placeholders marked with ``
80 | ```sh
81 | cp deploy/local.yaml.example deploy/local.yaml
82 |
83 | # Command to see all placeholders that need to be configured:
84 | grep -nrie '' deploy/local.yaml
85 | ```
86 |
87 | - Start the project with the following command
88 | ```sh
89 | $ skaffold dev
90 | Generating tags...
91 | - quay.io/newrelic/nri-kube-events -> quay.io/newrelic/nri-kube-events:latest
92 | Tags generated in 684.354µs
93 | Checking cache...
94 | - quay.io/newrelic/nri-kube-events: Not found. Building
95 | Cache check complete in 39.444528ms
96 | ... more
97 | ```
98 |
99 | This might take up to a minute to start, but this should start the application
100 | in your Minikube cluster with 2 sinks enabled!
101 |
102 | ### E2E Tests
103 | See the [E2E README](./e2e/README.md) for more details regarding running E2E tests.
104 |
105 | ## Configuration
106 |
107 | nri-kube-events uses a YAML file to configure the application. The structure is
108 | as follows. See [Available Sinks](#available-sinks) for a list of sinks.
109 |
110 | ```yaml
111 | sinks:
112 | - name: sink1
113 | config:
114 | config_key_1: config_value_1
115 | config_key_2: config_value_2
116 | - name: newRelicInfra
117 | config:
118 | agentEndpoint: http://infra-agent.default:8001/v1/data
119 | clusterName: minikube
120 | ```
121 |
122 | ## Available sinks
123 |
124 | | Name | Description |
125 | | ------------------------------- | ----------------------------------------------------------- |
126 | | [stdout](#stdout) | Logs all events to standard output |
127 | | [newRelicInfra](#newRelicInfra) | Sends all events to a locally running New Relic infrastructure agent |
128 |
129 |
130 | ### stdout
131 |
132 | The stdout sink has no configuration.
133 |
134 | ### newRelicInfra
135 |
136 | | Key | Type | Description | Required | Default value (if any) | |
137 | | ---------------- | ------------------------------------------------------ | --------------------------------------------------------- | -------- | ---------------------- | --- |
138 | | clusterName | string | The name of your Kubernetes cluster | ✅ | | |
139 | | agentEndpoint | string | URL of the locally running New Relic infrastructure Agent | ✅ | | |
140 | | agentHTTPTimeout | [duration](https://golang.org/pkg/time/#ParseDuration) | HTTP timeout for sending http request to the agent | | 10s | |
141 |
142 | ## Support
143 |
144 | New Relic hosts and moderates an online forum where customers can interact with
145 | New Relic employees as well as other customers to get help and share best
146 | practices. Like all official New Relic open source projects, there's a related
147 | Community topic in the New Relic Explorers Hub. You can find this project's
148 | topic/threads here:
149 |
150 | https://forum.newrelic.com/t/new-relic-kube-events-integration/109094
151 |
152 | ## Contributing
153 |
154 | Full details about how to contribute to Contributions to improve New Relic
155 | integration for Kubernetes events are encouraged! Keep in mind when you submit
156 | your pull request, you'll need to sign the CLA via the click-through using
157 | CLA-Assistant. You only have to sign the CLA one time per project. To execute
158 | our corporate CLA, which is required if your contribution is on behalf of a
159 | company, or if you have any questions, please drop us an email at
160 | opensource@newrelic.com.
161 |
162 | **A note about vulnerabilities**
163 |
164 | As noted in our [security policy](../../security/policy), New Relic is committed
165 | to the privacy and security of our customers and their data. We believe that
166 | providing coordinated disclosure by security researchers and engaging with the
167 | security community are important means to achieve our security goals.
168 |
169 | If you believe you have found a security vulnerability in this project or any of
170 | New Relic's products or websites, we welcome and greatly appreciate you reporting
171 | it to New Relic through [Bugcrowd](https://bugcrowd.com/engagements/newrelic-mbb-og-public).
172 |
173 | If you would like to contribute to this project, please review [these guidelines](./CONTRIBUTING.md).
174 |
175 | To all contributors, we thank you! Without your contribution, this project would
176 | not be what it is today.
177 |
178 | ## License
179 | The New Relic integration for Kubernetes events is licensed under the [Apache
180 | 2.0](http://apache.org/licenses/LICENSE-2.0.txt) License.
181 |
182 | The New Relic integration for Kubernetes events also uses source code from
183 | third party libraries. Full details on which libraries are used and the terms
184 | under which they are licensed can be found in the third party notices document.
185 |
--------------------------------------------------------------------------------
/assets/licence/THIRD_PARTY_NOTICES.md.tmpl:
--------------------------------------------------------------------------------
1 | {{- define "depInfo" -}}
2 | {{- range $i, $dep := . }}
3 |
4 | ## [{{ $dep.Name }}]({{ $dep.URL }})
5 |
6 | Distributed under the following license(s):
7 |
8 | * {{ $dep.LicenceType }}
9 |
10 | {{ end }}
11 | {{- end -}}
12 |
13 | # Third Party Notices
14 |
15 | The New Relic integration for Kubernetes Events uses source code from third party libraries which carry
16 | their own copyright notices and license terms. These notices are provided
17 | below.
18 |
19 | In the event that a required notice is missing or incorrect, please notify us
20 | either by [opening an issue](https://github.com/newrelic/nri-kube-events/issues/new),
21 | or by e-mailing [open-source@newrelic.com](mailto:open-source@newrelic.com).
22 |
23 | For any licenses that require the disclosure of source code, the source code
24 | can be found at https://github.com/newrelic/infrastructure-agent/.
25 |
26 |
27 | {{ template "depInfo" .Direct }}
28 |
29 | {{ if .Indirect }}
30 |
31 | Indirect dependencies
32 |
33 | {{ template "depInfo" .Indirect }}
34 | {{ end }}
35 |
--------------------------------------------------------------------------------
/assets/licence/overrides:
--------------------------------------------------------------------------------
1 | {"name": "github.com/go-errors/errors", "licenceType": "MIT"}
2 | {"name": "github.com/munnerz/goautoneg", "licenceType": "BSD-3-Clause"}
3 |
--------------------------------------------------------------------------------
/assets/licence/rules.json:
--------------------------------------------------------------------------------
1 | {
2 | "allowlist": [
3 | "Apache-2.0",
4 | "MIT",
5 | "ISC",
6 | "BSD-2-Clause-FreeBSD",
7 | "BSD-2-Clause-NetBSD",
8 | "BSD-2-Clause",
9 | "BSD-3-Clause-Attribution",
10 | "BSD-3-Clause-Clear",
11 | "BSD-3-Clause-LBNL",
12 | "BSD-3-Clause",
13 | "BSD-4-Clause-UC",
14 | "BSD-4-Clause",
15 | "BSD-Protection",
16 | "MS-PL",
17 | "Ruby",
18 | "ISC",
19 | "CC0-1.0",
20 | "Zlib",
21 | "MPL-2.0"
22 | ]
23 | }
24 |
--------------------------------------------------------------------------------
/charts/internal/e2e-resources/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | version: 1.0.0-devel
3 | description: This chart creates E2E resources for nri-kube-events.
4 | name: e2e-resources
5 |
6 | maintainers:
7 | - name: juanjjaramillo
8 | url: https://github.com/juanjjaramillo
9 | - name: csongnr
10 | url: https://github.com/csongnr
11 | - name: dbudziwojskiNR
12 | url: https://github.com/dbudziwojskiNR
13 |
--------------------------------------------------------------------------------
/charts/internal/e2e-resources/README.md:
--------------------------------------------------------------------------------
1 | # e2e-resources
2 |
3 | 
4 |
5 | This chart creates E2E resources for nri-kube-events.
6 |
7 | ## Maintainers
8 |
9 | | Name | Email | Url |
10 | | ---- | ------ | --- |
11 | | juanjjaramillo | | |
12 | | csongnr | | |
13 | | dbudziwojskiNR | | |
14 |
15 | ## Values
16 |
17 | | Key | Type | Default | Description |
18 | |-----|------|---------|-------------|
19 | | deployment.enabled | bool | `true` | |
20 | | fileSystemTest.fileName | string | `"pi.txt"` | |
21 |
22 | ----------------------------------------------
23 | Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)
24 |
--------------------------------------------------------------------------------
/charts/internal/e2e-resources/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.deployment.enabled }}
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: {{ .Release.Name }}-deployment
6 | spec:
7 | paused: false
8 | minReadySeconds: 6
9 | replicas: 2
10 | strategy:
11 | type: RollingUpdate
12 | rollingUpdate:
13 | maxSurge: 1
14 | maxUnavailable: 1
15 | selector:
16 | matchLabels:
17 | app: deployment
18 | template:
19 | metadata:
20 | labels:
21 | app: deployment
22 | spec:
23 | containers:
24 | - name: compute-pi-digits
25 | image: perl:5.34.0
26 | imagePullPolicy: IfNotPresent
27 | resources:
28 | requests:
29 | cpu: 40m
30 | memory: 15Mi
31 | limits:
32 | cpu: 80m
33 | memory: 30Mi
34 | command:
35 | - perl
36 | - -Mbignum=bpi
37 | - -wle
38 | - while (true) { open(FH, '>', './{{ .Values.fileSystemTest.fileName }}') or die "Cannot open file - $!"; print FH bpi(100); close(FH); print bpi(2700) }
39 | volumeMounts:
40 | - mountPath: /output
41 | name: storage
42 | - name: failing-container
43 | image: docker.io/library/bash:5
44 | command: ["bash"]
45 | args:
46 | - -c
47 | - echo "Hello world! I'm going to exit with 42 to simulate a software bug." && sleep 30 && exit 42
48 | volumes:
49 | - name: storage
50 | emptyDir:
51 | sizeLimit: 30Mi
52 | {{- end }}
53 |
--------------------------------------------------------------------------------
/charts/internal/e2e-resources/values.yaml:
--------------------------------------------------------------------------------
1 | # Deploy a dummy deployment
2 | deployment:
3 | enabled: true
4 |
5 | # Variables for filesystem testing
6 | fileSystemTest:
7 | fileName: 'pi.txt'
8 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/Chart.lock:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - name: common-library
3 | repository: https://helm-charts.newrelic.com
4 | version: 1.3.1
5 | digest: sha256:cfa7bfb136b9bcfe87e37d3556c3fedecc58f42685c4ce39485da106408b6619
6 | generated: "2025-01-09T00:16:28.768416201Z"
7 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: nri-kube-events
3 | description: A Helm chart to deploy the New Relic Kube Events router
4 | home: https://docs.newrelic.com/docs/integrations/kubernetes-integration/kubernetes-events/install-kubernetes-events-integration
5 | icon: https://newrelic.com/themes/custom/curio/assets/mediakit/NR_logo_Horizontal.svg
6 | sources:
7 | - https://github.com/newrelic/nri-kube-events/
8 | - https://github.com/newrelic/nri-kube-events/tree/main/charts/nri-kube-events
9 | - https://github.com/newrelic/infrastructure-agent/
10 | version: 3.13.1
11 | appVersion: 2.13.1
12 | dependencies:
13 | - name: common-library
14 | version: 1.3.1
15 | repository: "https://helm-charts.newrelic.com"
16 | maintainers:
17 | - name: danielstokes
18 | url: https://github.com/danielstokes
19 | - name: dbudziwojskiNR
20 | url: https://github.com/dbudziwojskiNR
21 | - name: kondracek-nr
22 | url: https://github.com/kondracek-nr
23 | - name: kpattaswamy
24 | url: https://github.com/kpattaswamy
25 | - name: Philip-R-Beckwith
26 | url: https://github.com/Philip-R-Beckwith
27 | - name: TmNguyen12
28 | url: https://github.com/TmNguyen12
29 | keywords:
30 | - infrastructure
31 | - newrelic
32 | - monitoring
33 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/README.md:
--------------------------------------------------------------------------------
1 | # nri-kube-events
2 |
3 |  
4 |
5 | A Helm chart to deploy the New Relic Kube Events router
6 |
7 | **Homepage:**
8 |
9 | # Helm installation
10 |
11 | You can install this chart using [`nri-bundle`](https://github.com/newrelic/helm-charts/tree/master/charts/nri-bundle) located in the
12 | [helm-charts repository](https://github.com/newrelic/helm-charts) or directly from this repository by adding this Helm repository:
13 |
14 | ```shell
15 | helm repo add nri-kube-events https://newrelic.github.io/nri-kube-events
16 | helm upgrade --install nri-kube-events/nri-kube-events -f your-custom-values.yaml
17 | ```
18 |
19 | ## Source Code
20 |
21 | *
22 | *
23 | *
24 |
25 | ## Values managed globally
26 |
27 | This chart implements the [New Relic's common Helm library](https://github.com/newrelic/helm-charts/tree/master/library/common-library) which
28 | means that it honors a wide range of defaults and globals common to most New Relic Helm charts.
29 |
30 | Options that can be defined globally include `affinity`, `nodeSelector`, `tolerations`, `proxy` and others. The full list can be found at
31 | [user's guide of the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md).
32 |
33 | ## Values
34 |
35 | | Key | Type | Default | Description |
36 | |-----|------|---------|-------------|
37 | | affinity | object | `{}` | Sets pod/node affinities. Can be configured also with `global.affinity` |
38 | | agentHTTPTimeout | string | `"30s"` | Amount of time to wait until timeout to send metrics to the metric forwarder |
39 | | cluster | string | `""` | Name of the Kubernetes cluster monitored. Mandatory. Can be configured also with `global.cluster` |
40 | | containerSecurityContext | object | `{}` | Sets security context (at container level). Can be configured also with `global.containerSecurityContext` |
41 | | customAttributes | object | `{}` | Adds extra attributes to the cluster and all the metrics emitted to the backend. Can be configured also with `global.customAttributes` |
42 | | customSecretLicenseKey | string | `""` | In case you don't want to have the license key in you values, this allows you to point to which secret key is the license key located. Can be configured also with `global.customSecretLicenseKey` |
43 | | customSecretName | string | `""` | In case you don't want to have the license key in you values, this allows you to point to a user created secret to get the key from there. Can be configured also with `global.customSecretName` |
44 | | deployment.annotations | object | `{}` | Annotations to add to the Deployment. |
45 | | dnsConfig | object | `{}` | Sets pod's dnsConfig. Can be configured also with `global.dnsConfig` |
46 | | fedramp.enabled | bool | `false` | Enables FedRAMP. Can be configured also with `global.fedramp.enabled` |
47 | | forwarder | object | `{"resources":{}}` | Resources for the forwarder sidecar container |
48 | | fullnameOverride | string | `""` | Override the full name of the release |
49 | | hostNetwork | bool | `false` | Sets pod's hostNetwork. Can be configured also with `global.hostNetwork` |
50 | | images | object | See `values.yaml` | Images used by the chart for the integration and agents |
51 | | images.agent | object | See `values.yaml` | Image for the New Relic Infrastructure Agent sidecar |
52 | | images.integration | object | See `values.yaml` | Image for the New Relic Kubernetes integration |
53 | | images.pullSecrets | list | `[]` | The secrets that are needed to pull images from a custom registry. |
54 | | labels | object | `{}` | Additional labels for chart objects |
55 | | licenseKey | string | `""` | This set this license key to use. Can be configured also with `global.licenseKey` |
56 | | nameOverride | string | `""` | Override the name of the chart |
57 | | nodeSelector | object | `{}` | Sets pod's node selector. Can be configured also with `global.nodeSelector` |
58 | | nrStaging | bool | `false` | Send the metrics to the staging backend. Requires a valid staging license key. Can be configured also with `global.nrStaging` |
59 | | podAnnotations | object | `{}` | Annotations to add to the pod. |
60 | | podLabels | object | `{}` | Additional labels for chart pods |
61 | | podSecurityContext | object | `{}` | Sets security context (at pod level). Can be configured also with `global.podSecurityContext` |
62 | | priorityClassName | string | `""` | Sets pod's priorityClassName. Can be configured also with `global.priorityClassName` |
63 | | proxy | string | `""` | Configures the integration to send all HTTP/HTTPS request through the proxy in that URL. The URL should have a standard format like `https://user:password@hostname:port`. Can be configured also with `global.proxy` |
64 | | rbac.create | bool | `true` | Specifies whether RBAC resources should be created |
65 | | resources | object | `{}` | Resources for the integration container |
66 | | scrapers | object | See `values.yaml` | Configure the various kinds of scrapers that should be run. |
67 | | serviceAccount | object | See `values.yaml` | Settings controlling ServiceAccount creation |
68 | | serviceAccount.create | bool | `true` | Specifies whether a ServiceAccount should be created |
69 | | sinks | object | See `values.yaml` | Configure where will the metrics be written. Mostly for debugging purposes. |
70 | | sinks.newRelicInfra | bool | `true` | The newRelicInfra sink sends all events to New Relic. |
71 | | sinks.stdout | bool | `false` | Enable the stdout sink to also see all events in the logs. |
72 | | tolerations | list | `[]` | Sets pod's tolerations to node taints. Can be configured also with `global.tolerations` |
73 | | verboseLog | bool | `false` | Sets the debug logs to this integration or all integrations if it is set globally. Can be configured also with `global.verboseLog` |
74 |
75 | ## Maintainers
76 |
77 | * [danielstokes](https://github.com/danielstokes)
78 | * [dbudziwojskiNR](https://github.com/dbudziwojskiNR)
79 | * [kondracek-nr](https://github.com/kondracek-nr)
80 | * [kpattaswamy](https://github.com/kpattaswamy)
81 | * [Philip-R-Beckwith](https://github.com/Philip-R-Beckwith)
82 | * [TmNguyen12](https://github.com/TmNguyen12)
83 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/README.md.gotmpl:
--------------------------------------------------------------------------------
1 | {{ template "chart.header" . }}
2 | {{ template "chart.deprecationWarning" . }}
3 |
4 | {{ template "chart.badgesSection" . }}
5 |
6 | {{ template "chart.description" . }}
7 |
8 | {{ template "chart.homepageLine" . }}
9 |
10 | # Helm installation
11 |
12 | You can install this chart using [`nri-bundle`](https://github.com/newrelic/helm-charts/tree/master/charts/nri-bundle) located in the
13 | [helm-charts repository](https://github.com/newrelic/helm-charts) or directly from this repository by adding this Helm repository:
14 |
15 | ```shell
16 | helm repo add nri-kube-events https://newrelic.github.io/nri-kube-events
17 | helm upgrade --install nri-kube-events/nri-kube-events -f your-custom-values.yaml
18 | ```
19 |
20 | {{ template "chart.sourcesSection" . }}
21 |
22 | ## Values managed globally
23 |
24 | This chart implements the [New Relic's common Helm library](https://github.com/newrelic/helm-charts/tree/master/library/common-library) which
25 | means that it honors a wide range of defaults and globals common to most New Relic Helm charts.
26 |
27 | Options that can be defined globally include `affinity`, `nodeSelector`, `tolerations`, `proxy` and others. The full list can be found at
28 | [user's guide of the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md).
29 |
30 | {{ template "chart.valuesSection" . }}
31 |
32 | {{ if .Maintainers }}
33 | ## Maintainers
34 | {{ range .Maintainers }}
35 | {{- if .Name }}
36 | {{- if .Url }}
37 | * [{{ .Name }}]({{ .Url }})
38 | {{- else }}
39 | * {{ .Name }}
40 | {{- end }}
41 | {{- end }}
42 | {{- end }}
43 | {{- end }}
44 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/ci/test-bare-minimum-values.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | licenseKey: 1234567890abcdef1234567890abcdef12345678
3 | cluster: test-cluster
4 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/ci/test-custom-attributes-as-map.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | licenseKey: 1234567890abcdef1234567890abcdef12345678
3 | cluster: test-cluster
4 |
5 | customAttributes:
6 | test_tag_label: test_tag_value
7 |
8 | image:
9 | kubeEvents:
10 | repository: e2e/nri-kube-events
11 | tag: test
12 | pullPolicy: IfNotPresent
13 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/ci/test-custom-attributes-as-string.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | licenseKey: 1234567890abcdef1234567890abcdef12345678
3 | cluster: test-cluster
4 |
5 | customAttributes: '{"test_tag_label": "test_tag_value"}'
6 |
7 | image:
8 | kubeEvents:
9 | repository: e2e/nri-kube-events
10 | tag: test
11 | pullPolicy: IfNotPresent
12 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/ci/test-values.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | licenseKey: 1234567890abcdef1234567890abcdef12345678
3 | cluster: test-cluster
4 |
5 | sinks:
6 | # Enable the stdout sink to also see all events in the logs.
7 | stdout: true
8 | # The newRelicInfra sink sends all events to New relic.
9 | newRelicInfra: true
10 |
11 | customAttributes:
12 | test_tag_label: test_tag_value
13 |
14 | config:
15 | accountID: 111
16 | region: EU
17 |
18 | rbac:
19 | create: true
20 |
21 | serviceAccount:
22 | create: true
23 |
24 | podAnnotations:
25 | annotation1: "annotation"
26 |
27 | nodeSelector:
28 | kubernetes.io/os: linux
29 |
30 | tolerations:
31 | - key: "key1"
32 | effect: "NoSchedule"
33 | operator: "Exists"
34 |
35 | affinity:
36 | nodeAffinity:
37 | requiredDuringSchedulingIgnoredDuringExecution:
38 | nodeSelectorTerms:
39 | - matchExpressions:
40 | - key: kubernetes.io/os
41 | operator: In
42 | values:
43 | - linux
44 |
45 | hostNetwork: true
46 |
47 | dnsConfig:
48 | nameservers:
49 | - 1.2.3.4
50 | searches:
51 | - my.dns.search.suffix
52 | options:
53 | - name: ndots
54 | value: "1"
55 |
56 | image:
57 | kubeEvents:
58 | repository: e2e/nri-kube-events
59 | tag: test
60 | pullPolicy: IfNotPresent
61 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | {{ include "nri-kube-events.compatibility.message.securityContext.runAsUser" . }}
2 |
3 | {{ include "nri-kube-events.compatibility.message.images" . }}
4 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 |
3 | {{- define "nri-kube-events.securityContext.pod" -}}
4 | {{- $defaults := fromYaml ( include "nriKubernetes.securityContext.podDefaults" . ) -}}
5 | {{- $compatibilityLayer := include "nri-kube-events.compatibility.securityContext.pod" . | fromYaml -}}
6 | {{- $commonLibrary := fromYaml ( include "newrelic.common.securityContext.pod" . ) -}}
7 |
8 | {{- $finalSecurityContext := dict -}}
9 | {{- if $commonLibrary -}}
10 | {{- $finalSecurityContext = mustMergeOverwrite $commonLibrary $compatibilityLayer -}}
11 | {{- else -}}
12 | {{- $finalSecurityContext = mustMergeOverwrite $defaults $compatibilityLayer -}}
13 | {{- end -}}
14 | {{- toYaml $finalSecurityContext -}}
15 | {{- end -}}
16 |
17 |
18 |
19 | {{- /* These are the defaults that are used for all the containers in this chart */ -}}
20 | {{- define "nriKubernetes.securityContext.podDefaults" -}}
21 | runAsUser: 1000
22 | runAsNonRoot: true
23 | {{- end -}}
24 |
25 |
26 |
27 | {{- define "nri-kube-events.securityContext.container" -}}
28 | {{- if include "newrelic.common.securityContext.container" . -}}
29 | {{- include "newrelic.common.securityContext.container" . -}}
30 | {{- else -}}
31 | privileged: false
32 | allowPrivilegeEscalation: false
33 | readOnlyRootFilesystem: true
34 | {{- end -}}
35 | {{- end -}}
36 |
37 |
38 |
39 | {{- /* */ -}}
40 | {{- define "nri-kube-events.agentConfig" -}}
41 | is_forward_only: true
42 | http_server_enabled: true
43 | http_server_port: 8001
44 | {{ include "newrelic.common.agentConfig.defaults" . }}
45 | {{- end -}}
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/_helpers_compatibility.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Returns a dictionary with legacy runAsUser config.
3 | We know that it only has "one line" but it is separated from the rest of the helpers because it is a temporary things
4 | that we should EOL. The EOL time of this will be marked when we GA the deprecation of Helm v2.
5 | */}}
6 | {{- define "nri-kube-events.compatibility.securityContext.pod" -}}
7 | {{- if .Values.runAsUser -}}
8 | runAsUser: {{ .Values.runAsUser }}
9 | {{- end -}}
10 | {{- end -}}
11 |
12 |
13 |
14 | {{- /*
15 | Functions to get values from the globals instead of the common library
16 | We make this because there could be difficult to see what is going under
17 | the hood if we use the common-library here. So it is easy to read something
18 | like:
19 | {{- $registry := $oldRegistry | default $newRegistry | default $globalRegistry -}}
20 | */ -}}
21 | {{- define "nri-kube-events.compatibility.global.registry" -}}
22 | {{- if .Values.global -}}
23 | {{- if .Values.global.images -}}
24 | {{- if .Values.global.images.registry -}}
25 | {{- .Values.global.images.registry -}}
26 | {{- end -}}
27 | {{- end -}}
28 | {{- end -}}
29 | {{- end -}}
30 |
31 | {{- /* Functions to fetch integration image configuration from the old .Values.image */ -}}
32 | {{- /* integration's old registry */ -}}
33 | {{- define "nri-kube-events.compatibility.old.integration.registry" -}}
34 | {{- if .Values.image -}}
35 | {{- if .Values.image.kubeEvents -}}
36 | {{- if .Values.image.kubeEvents.registry -}}
37 | {{- .Values.image.kubeEvents.registry -}}
38 | {{- end -}}
39 | {{- end -}}
40 | {{- end -}}
41 | {{- end -}}
42 |
43 | {{- /* integration's old repository */ -}}
44 | {{- define "nri-kube-events.compatibility.old.integration.repository" -}}
45 | {{- if .Values.image -}}
46 | {{- if .Values.image.kubeEvents -}}
47 | {{- if .Values.image.kubeEvents.repository -}}
48 | {{- .Values.image.kubeEvents.repository -}}
49 | {{- end -}}
50 | {{- end -}}
51 | {{- end -}}
52 | {{- end -}}
53 |
54 | {{- /* integration's old tag */ -}}
55 | {{- define "nri-kube-events.compatibility.old.integration.tag" -}}
56 | {{- if .Values.image -}}
57 | {{- if .Values.image.kubeEvents -}}
58 | {{- if .Values.image.kubeEvents.tag -}}
59 | {{- .Values.image.kubeEvents.tag -}}
60 | {{- end -}}
61 | {{- end -}}
62 | {{- end -}}
63 | {{- end -}}
64 |
65 | {{- /* integration's old imagePullPolicy */ -}}
66 | {{- define "nri-kube-events.compatibility.old.integration.pullPolicy" -}}
67 | {{- if .Values.image -}}
68 | {{- if .Values.image.kubeEvents -}}
69 | {{- if .Values.image.kubeEvents.pullPolicy -}}
70 | {{- .Values.image.kubeEvents.pullPolicy -}}
71 | {{- end -}}
72 | {{- end -}}
73 | {{- end -}}
74 | {{- end -}}
75 |
76 | {{- /* Functions to fetch agent image configuration from the old .Values.image */ -}}
77 | {{- /* agent's old registry */ -}}
78 | {{- define "nri-kube-events.compatibility.old.agent.registry" -}}
79 | {{- if .Values.image -}}
80 | {{- if .Values.image.infraAgent -}}
81 | {{- if .Values.image.infraAgent.registry -}}
82 | {{- .Values.image.infraAgent.registry -}}
83 | {{- end -}}
84 | {{- end -}}
85 | {{- end -}}
86 | {{- end -}}
87 |
88 | {{- /* agent's old repository */ -}}
89 | {{- define "nri-kube-events.compatibility.old.agent.repository" -}}
90 | {{- if .Values.image -}}
91 | {{- if .Values.image.infraAgent -}}
92 | {{- if .Values.image.infraAgent.repository -}}
93 | {{- .Values.image.infraAgent.repository -}}
94 | {{- end -}}
95 | {{- end -}}
96 | {{- end -}}
97 | {{- end -}}
98 |
99 | {{- /* agent's old tag */ -}}
100 | {{- define "nri-kube-events.compatibility.old.agent.tag" -}}
101 | {{- if .Values.image -}}
102 | {{- if .Values.image.infraAgent -}}
103 | {{- if .Values.image.infraAgent.tag -}}
104 | {{- .Values.image.infraAgent.tag -}}
105 | {{- end -}}
106 | {{- end -}}
107 | {{- end -}}
108 | {{- end -}}
109 |
110 | {{- /* agent's old imagePullPolicy */ -}}
111 | {{- define "nri-kube-events.compatibility.old.agent.pullPolicy" -}}
112 | {{- if .Values.image -}}
113 | {{- if .Values.image.infraAgent -}}
114 | {{- if .Values.image.infraAgent.pullPolicy -}}
115 | {{- .Values.image.infraAgent.pullPolicy -}}
116 | {{- end -}}
117 | {{- end -}}
118 | {{- end -}}
119 | {{- end -}}
120 |
121 |
122 |
123 | {{/*
124 | Creates the image string needed to pull the integration image respecting the breaking change we made in the values file
125 | */}}
126 | {{- define "nri-kube-events.compatibility.images.integration" -}}
127 | {{- $globalRegistry := include "nri-kube-events.compatibility.global.registry" . -}}
128 | {{- $oldRegistry := include "nri-kube-events.compatibility.old.integration.registry" . -}}
129 | {{- $newRegistry := .Values.images.integration.registry -}}
130 | {{- $registry := $oldRegistry | default $newRegistry | default $globalRegistry -}}
131 |
132 | {{- $oldRepository := include "nri-kube-events.compatibility.old.integration.repository" . -}}
133 | {{- $newRepository := .Values.images.integration.repository -}}
134 | {{- $repository := $oldRepository | default $newRepository }}
135 |
136 | {{- $oldTag := include "nri-kube-events.compatibility.old.integration.tag" . -}}
137 | {{- $newTag := .Values.images.integration.tag -}}
138 | {{- $tag := $oldTag | default $newTag | default .Chart.AppVersion -}}
139 |
140 | {{- if $registry -}}
141 | {{- printf "%s/%s:%s" $registry $repository $tag -}}
142 | {{- else -}}
143 | {{- printf "%s:%s" $repository $tag -}}
144 | {{- end -}}
145 | {{- end -}}
146 |
147 |
148 |
149 | {{/*
150 | Creates the image string needed to pull the agent's image respecting the breaking change we made in the values file
151 | */}}
152 | {{- define "nri-kube-events.compatibility.images.agent" -}}
153 | {{- $globalRegistry := include "nri-kube-events.compatibility.global.registry" . -}}
154 | {{- $oldRegistry := include "nri-kube-events.compatibility.old.agent.registry" . -}}
155 | {{- $newRegistry := .Values.images.agent.registry -}}
156 | {{- $registry := $oldRegistry | default $newRegistry | default $globalRegistry -}}
157 |
158 | {{- $oldRepository := include "nri-kube-events.compatibility.old.agent.repository" . -}}
159 | {{- $newRepository := .Values.images.agent.repository -}}
160 | {{- $repository := $oldRepository | default $newRepository }}
161 |
162 | {{- $oldTag := include "nri-kube-events.compatibility.old.agent.tag" . -}}
163 | {{- $newTag := .Values.images.agent.tag -}}
164 | {{- $tag := $oldTag | default $newTag -}}
165 |
166 | {{- if $registry -}}
167 | {{- printf "%s/%s:%s" $registry $repository $tag -}}
168 | {{- else -}}
169 | {{- printf "%s:%s" $repository $tag -}}
170 | {{- end -}}
171 | {{- end -}}
172 |
173 |
174 |
175 | {{/*
176 | Returns the pull policy for the integration image taking into account that we made a breaking change on the values path.
177 | */}}
178 | {{- define "nri-kube-events.compatibility.images.pullPolicy.integration" -}}
179 | {{- $old := include "nri-kube-events.compatibility.old.integration.pullPolicy" . -}}
180 | {{- $new := .Values.images.integration.pullPolicy -}}
181 |
182 | {{- $old | default $new -}}
183 | {{- end -}}
184 |
185 |
186 |
187 | {{/*
188 | Returns the pull policy for the agent image taking into account that we made a breaking change on the values path.
189 | */}}
190 | {{- define "nri-kube-events.compatibility.images.pullPolicy.agent" -}}
191 | {{- $old := include "nri-kube-events.compatibility.old.agent.pullPolicy" . -}}
192 | {{- $new := .Values.images.agent.pullPolicy -}}
193 |
194 | {{- $old | default $new -}}
195 | {{- end -}}
196 |
197 |
198 |
199 | {{/*
200 | Returns a merged list of pull secrets ready to be used
201 | */}}
202 | {{- define "nri-kube-events.compatibility.images.renderPullSecrets" -}}
203 | {{- $list := list -}}
204 |
205 | {{- if .Values.image -}}
206 | {{- if .Values.image.pullSecrets -}}
207 | {{- $list = append $list .Values.image.pullSecrets }}
208 | {{- end -}}
209 | {{- end -}}
210 |
211 | {{- if .Values.images.pullSecrets -}}
212 | {{- $list = append $list .Values.images.pullSecrets -}}
213 | {{- end -}}
214 |
215 | {{- include "newrelic.common.images.renderPullSecrets" ( dict "pullSecrets" $list "context" .) }}
216 | {{- end -}}
217 |
218 |
219 |
220 | {{- /* Messege to show to the user saying that image value is not supported anymore */ -}}
221 | {{- define "nri-kube-events.compatibility.message.images" -}}
222 | {{- $oldIntegrationRegistry := include "nri-kube-events.compatibility.old.integration.registry" . -}}
223 | {{- $oldIntegrationRepository := include "nri-kube-events.compatibility.old.integration.repository" . -}}
224 | {{- $oldIntegrationTag := include "nri-kube-events.compatibility.old.integration.tag" . -}}
225 | {{- $oldIntegrationPullPolicy := include "nri-kube-events.compatibility.old.integration.pullPolicy" . -}}
226 | {{- $oldAgentRegistry := include "nri-kube-events.compatibility.old.agent.registry" . -}}
227 | {{- $oldAgentRepository := include "nri-kube-events.compatibility.old.agent.repository" . -}}
228 | {{- $oldAgentTag := include "nri-kube-events.compatibility.old.agent.tag" . -}}
229 | {{- $oldAgentPullPolicy := include "nri-kube-events.compatibility.old.agent.pullPolicy" . -}}
230 |
231 | {{- if or $oldIntegrationRegistry $oldIntegrationRepository $oldIntegrationTag $oldIntegrationPullPolicy $oldAgentRegistry $oldAgentRepository $oldAgentTag $oldAgentPullPolicy }}
232 | Configuring image repository an tag under 'image' is no longer supported.
233 | This is the list values that we no longer support:
234 | - image.kubeEvents.registry
235 | - image.kubeEvents.repository
236 | - image.kubeEvents.tag
237 | - image.kubeEvents.pullPolicy
238 | - image.infraAgent.registry
239 | - image.infraAgent.repository
240 | - image.infraAgent.tag
241 | - image.infraAgent.pullPolicy
242 |
243 | Please set:
244 | - images.agent.* to configure the infrastructure-agent forwarder.
245 | - images.integration.* to configure the image in charge of scraping k8s data.
246 |
247 | ------
248 | {{- end }}
249 | {{- end -}}
250 |
251 |
252 |
253 | {{- /* Messege to show to the user saying that image value is not supported anymore */ -}}
254 | {{- define "nri-kube-events.compatibility.message.securityContext.runAsUser" -}}
255 | {{- if .Values.runAsUser }}
256 | WARNING: `runAsUser` is deprecated
257 | ==================================
258 |
259 | We have automatically translated your `runAsUser` setting to the new format, but this shimming will be removed in the
260 | future. Please migrate your configs to the new format in the `securityContext` key.
261 | {{- end }}
262 | {{- end -}}
263 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/agent-configmap.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.sinks.newRelicInfra -}}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | labels:
6 | {{- include "newrelic.common.labels" . | nindent 4 }}
7 | name: {{ include "newrelic.common.naming.fullname" . }}-agent-config
8 | namespace: {{ .Release.Namespace }}
9 | data:
10 | newrelic-infra.yml: |
11 | {{- include "nri-kube-events.agentConfig" . | nindent 4 }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/clusterrole.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.create }}
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | {{- include "newrelic.common.labels" . | nindent 4 }}
7 | name: {{ include "newrelic.common.naming.fullname" . }}
8 | rules:
9 | - apiGroups:
10 | - ""
11 | resources:
12 | - events
13 | - namespaces
14 | - nodes
15 | - jobs
16 | - persistentvolumes
17 | - persistentvolumeclaims
18 | - pods
19 | - services
20 | verbs:
21 | - get
22 | - watch
23 | - list
24 | - apiGroups:
25 | - apps
26 | resources:
27 | - daemonsets
28 | - deployments
29 | verbs:
30 | - get
31 | - watch
32 | - list
33 | - apiGroups:
34 | - batch
35 | resources:
36 | - cronjobs
37 | - jobs
38 | verbs:
39 | - get
40 | - watch
41 | - list
42 | {{- end -}}
43 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.create }}
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | labels:
6 | {{- include "newrelic.common.labels" . | nindent 4 }}
7 | name: {{ include "newrelic.common.naming.fullname" . }}
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: ClusterRole
11 | name: {{ include "newrelic.common.naming.fullname" . }}
12 | subjects:
13 | - kind: ServiceAccount
14 | name: {{ include "newrelic.common.serviceAccount.name" . }}
15 | namespace: {{ .Release.Namespace }}
16 | {{- end -}}
17 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | labels:
5 | {{- include "newrelic.common.labels" . | nindent 4 }}
6 | name: {{ include "newrelic.common.naming.fullname" . }}-config
7 | namespace: {{ .Release.Namespace }}
8 | data:
9 | config.yaml: |-
10 | sinks:
11 | {{- if .Values.sinks.stdout }}
12 | - name: stdout
13 | {{- end }}
14 | {{- if .Values.sinks.newRelicInfra }}
15 | - name: newRelicInfra
16 | config:
17 | agentEndpoint: http://localhost:8001/v1/data
18 | clusterName: {{ include "newrelic.common.cluster" . }}
19 | agentHTTPTimeout: {{ .Values.agentHTTPTimeout }}
20 | {{- end }}
21 | captureDescribe: {{ .Values.scrapers.descriptions.enabled }}
22 | describeRefresh: {{ .Values.scrapers.descriptions.resyncPeriod | default "24h" }}
23 | captureEvents: {{ .Values.scrapers.events.enabled }}
24 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "newrelic.common.naming.fullname" . }}
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{- include "newrelic.common.labels" . | nindent 4 }}
8 | annotations:
9 | {{- if .Values.deployment.annotations }}
10 | {{- toYaml .Values.deployment.annotations | nindent 4 }}
11 | {{- end }}
12 | spec:
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/name: {{ include "newrelic.common.naming.name" . }}
16 | template:
17 | metadata:
18 | {{- if .Values.podAnnotations }}
19 | annotations:
20 | {{- toYaml .Values.podAnnotations | nindent 8}}
21 | {{- end }}
22 | labels:
23 | {{- include "newrelic.common.labels.podLabels" . | nindent 8 }}
24 | spec:
25 | {{- with include "nri-kube-events.compatibility.images.renderPullSecrets" . }}
26 | imagePullSecrets:
27 | {{- . | nindent 8 }}
28 | {{- end }}
29 | {{- with include "nri-kube-events.securityContext.pod" . }}
30 | securityContext:
31 | {{- . | nindent 8 }}
32 | {{- end }}
33 | containers:
34 | - name: kube-events
35 | image: {{ include "nri-kube-events.compatibility.images.integration" . }}
36 | imagePullPolicy: {{ include "nri-kube-events.compatibility.images.pullPolicy.integration" . }}
37 | {{- with include "nri-kube-events.securityContext.container" . }}
38 | securityContext:
39 | {{- . | nindent 12 }}
40 | {{- end }}
41 | {{- if .Values.resources }}
42 | resources:
43 | {{- toYaml .Values.resources | nindent 12 }}
44 | {{- end }}
45 | args: ["-config", "/app/config/config.yaml", "-loglevel", "debug"]
46 | volumeMounts:
47 | - name: config-volume
48 | mountPath: /app/config
49 | {{- if .Values.sinks.newRelicInfra }}
50 | - name: forwarder
51 | image: {{ include "nri-kube-events.compatibility.images.agent" . }}
52 | imagePullPolicy: {{ include "nri-kube-events.compatibility.images.pullPolicy.agent" . }}
53 | {{- with include "nri-kube-events.securityContext.container" . }}
54 | securityContext:
55 | {{- . | nindent 12 }}
56 | {{- end }}
57 | ports:
58 | - containerPort: {{ get (fromYaml (include "nri-kube-events.agentConfig" .)) "http_server_port" }}
59 | env:
60 | - name: NRIA_LICENSE_KEY
61 | valueFrom:
62 | secretKeyRef:
63 | name: {{ include "newrelic.common.license.secretName" . }}
64 | key: {{ include "newrelic.common.license.secretKeyName" . }}
65 |
66 | - name: NRIA_OVERRIDE_HOSTNAME_SHORT
67 | valueFrom:
68 | fieldRef:
69 | apiVersion: v1
70 | fieldPath: spec.nodeName
71 |
72 | volumeMounts:
73 | - mountPath: /var/db/newrelic-infra/data
74 | name: tmpfs-data
75 | - mountPath: /var/db/newrelic-infra/user_data
76 | name: tmpfs-user-data
77 | - mountPath: /tmp
78 | name: tmpfs-tmp
79 | - name: config
80 | mountPath: /etc/newrelic-infra.yml
81 | subPath: newrelic-infra.yml
82 | {{- if ((.Values.forwarder).resources) }}
83 | resources:
84 | {{- toYaml .Values.forwarder.resources | nindent 12 }}
85 | {{- end }}
86 | {{- end }}
87 | serviceAccountName: {{ include "newrelic.common.serviceAccount.name" . }}
88 | volumes:
89 | {{- if .Values.sinks.newRelicInfra }}
90 | - name: config
91 | configMap:
92 | name: {{ include "newrelic.common.naming.fullname" . }}-agent-config
93 | items:
94 | - key: newrelic-infra.yml
95 | path: newrelic-infra.yml
96 | {{- end }}
97 | - name: config-volume
98 | configMap:
99 | name: {{ include "newrelic.common.naming.fullname" . }}-config
100 | - name: tmpfs-data
101 | emptyDir: {}
102 | - name: tmpfs-user-data
103 | emptyDir: {}
104 | - name: tmpfs-tmp
105 | emptyDir: {}
106 | {{- with include "newrelic.common.priorityClassName" . }}
107 | priorityClassName: {{ . }}
108 | {{- end }}
109 | nodeSelector:
110 | kubernetes.io/os: linux
111 | {{ include "newrelic.common.nodeSelector" . | nindent 8 }}
112 | {{- with include "newrelic.common.tolerations" . }}
113 | tolerations:
114 | {{- . | nindent 8 }}
115 | {{- end }}
116 | {{- with include "newrelic.common.affinity" . }}
117 | affinity:
118 | {{- . | nindent 8 }}
119 | {{- end }}
120 | hostNetwork: {{ include "newrelic.common.hostNetwork.value" . }}
121 | {{- with include "newrelic.common.dnsConfig" . }}
122 | dnsConfig:
123 | {{- . | nindent 8 }}
124 | {{- end }}
125 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/secret.yaml:
--------------------------------------------------------------------------------
1 | {{- /* Common library will take care of creating the secret or not. */}}
2 | {{- include "newrelic.common.license.secret" . }}
3 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if include "newrelic.common.serviceAccount.create" . }}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | {{- include "newrelic.common.labels" . | nindent 4 }}
7 | name: {{ include "newrelic.common.serviceAccount.name" . }}
8 | namespace: {{ .Release.Namespace }}
9 | annotations:
10 | {{ include "newrelic.common.serviceAccount.annotations" . | indent 4 }}
11 | {{- end -}}
12 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/tests/agent_configmap_test.yaml:
--------------------------------------------------------------------------------
1 | suite: test configmap for newrelic infra agent
2 | templates:
3 | - templates/agent-configmap.yaml
4 | release:
5 | name: my-release
6 | namespace: my-namespace
7 | tests:
8 | - it: has the correct default values
9 | set:
10 | cluster: test-cluster
11 | licenseKey: us-whatever
12 | asserts:
13 | - equal:
14 | path: data["newrelic-infra.yml"]
15 | value: |
16 | is_forward_only: true
17 | http_server_enabled: true
18 | http_server_port: 8001
19 |
20 | - it: integrates properly with the common library
21 | set:
22 | cluster: test-cluster
23 | licenseKey: us-whatever
24 | fedramp.enabled: true
25 | verboseLog: true
26 | asserts:
27 | - equal:
28 | path: data["newrelic-infra.yml"]
29 | value: |
30 | is_forward_only: true
31 | http_server_enabled: true
32 | http_server_port: 8001
33 |
34 | log:
35 | level: trace
36 | fedramp: true
37 |
38 | - it: does not template if the http sink is disabled
39 | set:
40 | cluster: test-cluster
41 | licenseKey: us-whatever
42 | sinks:
43 | newRelicInfra: false
44 | asserts:
45 | - hasDocuments:
46 | count: 0
47 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/tests/configmap_test.yaml:
--------------------------------------------------------------------------------
1 | suite: test configmap for sinks
2 | templates:
3 | - templates/configmap.yaml
4 | release:
5 | name: my-release
6 | namespace: my-namespace
7 | tests:
8 | - it: has the correct sinks when default values used
9 | set:
10 | licenseKey: us-whatever
11 | cluster: a-cluster
12 | asserts:
13 | - equal:
14 | path: data["config.yaml"]
15 | value: |-
16 | sinks:
17 | - name: newRelicInfra
18 | config:
19 | agentEndpoint: http://localhost:8001/v1/data
20 | clusterName: a-cluster
21 | agentHTTPTimeout: 30s
22 | captureDescribe: true
23 | describeRefresh: 24h
24 | captureEvents: true
25 |
26 | - it: honors agentHTTPTimeout
27 | set:
28 | licenseKey: us-whatever
29 | cluster: a-cluster
30 | agentHTTPTimeout: 10s
31 | asserts:
32 | - equal:
33 | path: data["config.yaml"]
34 | value: |-
35 | sinks:
36 | - name: newRelicInfra
37 | config:
38 | agentEndpoint: http://localhost:8001/v1/data
39 | clusterName: a-cluster
40 | agentHTTPTimeout: 10s
41 | captureDescribe: true
42 | describeRefresh: 24h
43 | captureEvents: true
44 |
45 | - it: has the correct sinks defined in local values
46 | set:
47 | licenseKey: us-whatever
48 | cluster: a-cluster
49 | sinks:
50 | stdout: true
51 | newRelicInfra: false
52 | asserts:
53 | - equal:
54 | path: data["config.yaml"]
55 | value: |-
56 | sinks:
57 | - name: stdout
58 | captureDescribe: true
59 | describeRefresh: 24h
60 | captureEvents: true
61 |
62 | - it: allows enabling/disabling event scraping
63 | set:
64 | licenseKey: us-whatever
65 | cluster: a-cluster
66 | scrapers:
67 | events:
68 | enabled: false
69 | asserts:
70 | - equal:
71 | path: data["config.yaml"]
72 | value: |-
73 | sinks:
74 | - name: newRelicInfra
75 | config:
76 | agentEndpoint: http://localhost:8001/v1/data
77 | clusterName: a-cluster
78 | agentHTTPTimeout: 30s
79 | captureDescribe: true
80 | describeRefresh: 24h
81 | captureEvents: false
82 |
83 | - it: allows enabling/disabling description scraping
84 | set:
85 | licenseKey: us-whatever
86 | cluster: a-cluster
87 | scrapers:
88 | descriptions:
89 | enabled: false
90 | asserts:
91 | - equal:
92 | path: data["config.yaml"]
93 | value: |-
94 | sinks:
95 | - name: newRelicInfra
96 | config:
97 | agentEndpoint: http://localhost:8001/v1/data
98 | clusterName: a-cluster
99 | agentHTTPTimeout: 30s
100 | captureDescribe: false
101 | describeRefresh: 24h
102 | captureEvents: true
103 |
104 | - it: allows changing description resync intervals
105 | set:
106 | licenseKey: us-whatever
107 | cluster: a-cluster
108 | scrapers:
109 | descriptions:
110 | resyncPeriod: 4h
111 | asserts:
112 | - equal:
113 | path: data["config.yaml"]
114 | value: |-
115 | sinks:
116 | - name: newRelicInfra
117 | config:
118 | agentEndpoint: http://localhost:8001/v1/data
119 | clusterName: a-cluster
120 | agentHTTPTimeout: 30s
121 | captureDescribe: true
122 | describeRefresh: 4h
123 | captureEvents: true
124 |
125 | - it: has another document generated with the proper config set
126 | set:
127 | licenseKey: us-whatever
128 | cluster: a-cluster
129 | sinks:
130 | stdout: false
131 | newRelicInfra: false
132 | asserts:
133 | - equal:
134 | path: data["config.yaml"]
135 | value: |-
136 | sinks:
137 | captureDescribe: true
138 | describeRefresh: 24h
139 | captureEvents: true
140 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/tests/deployment_test.yaml:
--------------------------------------------------------------------------------
1 | suite: test deployment images
2 | templates:
3 | - templates/deployment.yaml
4 | release:
5 | name: my-release
6 | namespace: my-namespace
7 | tests:
8 | - it: deployment image uses pullSecrets
9 | set:
10 | cluster: my-cluster
11 | licenseKey: us-whatever
12 | images:
13 | pullSecrets:
14 | - name: regsecret
15 | asserts:
16 | - equal:
17 | path: spec.template.spec.imagePullSecrets
18 | value:
19 | - name: regsecret
20 |
21 | - it: deployment images use the proper image tag
22 | set:
23 | cluster: test-cluster
24 | licenseKey: us-whatever
25 | images:
26 | integration:
27 | repository: newrelic/nri-kube-events
28 | tag: "latest"
29 | agent:
30 | repository: newrelic/k8s-events-forwarder
31 | tag: "latest"
32 | asserts:
33 | - matchRegex:
34 | path: spec.template.spec.containers[0].image
35 | pattern: .*newrelic/nri-kube-events:latest$
36 | - matchRegex:
37 | path: spec.template.spec.containers[1].image
38 | pattern: .*newrelic/k8s-events-forwarder:latest$
39 |
40 |
41 | - it: by default the agent forwarder templates
42 | set:
43 | cluster: test-cluster
44 | licenseKey: us-whatever
45 | asserts:
46 | - contains:
47 | path: spec.template.spec.containers
48 | any: true
49 | content:
50 | name: forwarder
51 | - contains:
52 | path: spec.template.spec.volumes
53 | content:
54 | name: config
55 | configMap:
56 | name: my-release-nri-kube-events-agent-config
57 | items:
58 | - key: newrelic-infra.yml
59 | path: newrelic-infra.yml
60 |
61 | - it: agent does not template if the sink is disabled
62 | set:
63 | cluster: test-cluster
64 | licenseKey: us-whatever
65 | sinks:
66 | newRelicInfra: false
67 | asserts:
68 | - notContains:
69 | path: spec.template.spec.containers
70 | any: true
71 | content:
72 | name: forwarder
73 | - notContains:
74 | path: spec.template.spec.volumes
75 | content:
76 | name: config
77 | configMap:
78 | name: my-release-nri-kube-events-agent-config
79 | items:
80 | - key: newrelic-infra.yml
81 | path: newrelic-infra.yml
82 |
83 | - it: has a linux node selector by default
84 | set:
85 | cluster: my-cluster
86 | licenseKey: us-whatever
87 | asserts:
88 | - equal:
89 | path: spec.template.spec.nodeSelector
90 | value:
91 | kubernetes.io/os: linux
92 |
93 | - it: has a linux node selector and additional selectors
94 | set:
95 | cluster: my-cluster
96 | licenseKey: us-whatever
97 | nodeSelector:
98 | aCoolTestLabel: aCoolTestValue
99 | asserts:
100 | - equal:
101 | path: spec.template.spec.nodeSelector
102 | value:
103 | kubernetes.io/os: linux
104 | aCoolTestLabel: aCoolTestValue
105 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/tests/images_test.yaml:
--------------------------------------------------------------------------------
1 | suite: test image compatibility layer
2 | templates:
3 | - templates/deployment.yaml
4 | release:
5 | name: my-release
6 | namespace: my-namespace
7 | tests:
8 | - it: by default the tag is not nil
9 | set:
10 | cluster: test-cluster
11 | licenseKey: us-whatever
12 | asserts:
13 | - notMatchRegex:
14 | path: spec.template.spec.containers[0].image
15 | pattern: ".*nil.*"
16 | - notMatchRegex:
17 | path: spec.template.spec.containers[1].image
18 | pattern: ".*nil.*"
19 |
20 | - it: templates image correctly from the new values
21 | set:
22 | cluster: test-cluster
23 | licenseKey: us-whatever
24 | images:
25 | integration:
26 | registry: ireg
27 | repository: irep
28 | tag: itag
29 | agent:
30 | registry: areg
31 | repository: arep
32 | tag: atag
33 | asserts:
34 | - equal:
35 | path: spec.template.spec.containers[0].image
36 | value: ireg/irep:itag
37 | - equal:
38 | path: spec.template.spec.containers[1].image
39 | value: areg/arep:atag
40 |
41 | - it: templates image correctly from old values
42 | set:
43 | cluster: test-cluster
44 | licenseKey: us-whatever
45 | image:
46 | kubeEvents:
47 | registry: ireg
48 | repository: irep
49 | tag: itag
50 | infraAgent:
51 | registry: areg
52 | repository: arep
53 | tag: atag
54 | asserts:
55 | - equal:
56 | path: spec.template.spec.containers[0].image
57 | value: ireg/irep:itag
58 | - equal:
59 | path: spec.template.spec.containers[1].image
60 | value: areg/arep:atag
61 |
62 | - it: old image values take precedence
63 | set:
64 | cluster: test-cluster
65 | licenseKey: us-whatever
66 | images:
67 | integration:
68 | registry: inew
69 | repository: inew
70 | tag: inew
71 | agent:
72 | registry: anew
73 | repository: anew
74 | tag: anew
75 | image:
76 | kubeEvents:
77 | registry: iold
78 | repository: iold
79 | tag: iold
80 | infraAgent:
81 | registry: aold
82 | repository: aold
83 | tag: aold
84 | asserts:
85 | - equal:
86 | path: spec.template.spec.containers[0].image
87 | value: iold/iold:iold
88 | - equal:
89 | path: spec.template.spec.containers[1].image
90 | value: aold/aold:aold
91 |
92 | - it: pullImagePolicy templates correctly from the new values
93 | set:
94 | cluster: test-cluster
95 | licenseKey: us-whatever
96 | images:
97 | integration:
98 | pullPolicy: new
99 | agent:
100 | pullPolicy: new
101 | asserts:
102 | - equal:
103 | path: spec.template.spec.containers[0].imagePullPolicy
104 | value: new
105 | - equal:
106 | path: spec.template.spec.containers[1].imagePullPolicy
107 | value: new
108 |
109 | - it: pullImagePolicy templates correctly from old values
110 | set:
111 | cluster: test-cluster
112 | licenseKey: us-whatever
113 | image:
114 | kubeEvents:
115 | pullPolicy: old
116 | infraAgent:
117 | pullPolicy: old
118 | asserts:
119 | - equal:
120 | path: spec.template.spec.containers[0].imagePullPolicy
121 | value: old
122 | - equal:
123 | path: spec.template.spec.containers[1].imagePullPolicy
124 | value: old
125 |
126 | - it: old imagePullPolicy values take precedence
127 | set:
128 | cluster: test-cluster
129 | licenseKey: us-whatever
130 | images:
131 | integration:
132 | pullPolicy: new
133 | agent:
134 | pullPolicy: new
135 | image:
136 | kubeEvents:
137 | pullPolicy: old
138 | infraAgent:
139 | pullPolicy: old
140 | asserts:
141 | - equal:
142 | path: spec.template.spec.containers[0].imagePullPolicy
143 | value: old
144 | - equal:
145 | path: spec.template.spec.containers[1].imagePullPolicy
146 | value: old
147 |
148 | - it: imagePullSecrets merge properly
149 | set:
150 | cluster: test-cluster
151 | licenseKey: us-whatever
152 | global:
153 | images:
154 | pullSecrets:
155 | - global: global
156 | images:
157 | pullSecrets:
158 | - images: images
159 | image:
160 | pullSecrets:
161 | - image: image
162 | asserts:
163 | - equal:
164 | path: spec.template.spec.imagePullSecrets
165 | value:
166 | - global: global
167 | - image: image
168 | - images: images
169 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/tests/security_context_test.yaml:
--------------------------------------------------------------------------------
1 | suite: test deployment security context
2 | templates:
3 | - templates/deployment.yaml
4 | release:
5 | name: my-release
6 | namespace: my-namespace
7 | tests:
8 | - it: pod securityContext set to defaults when no values provided
9 | set:
10 | cluster: my-cluster
11 | licenseKey: us-whatever
12 | asserts:
13 | - equal:
14 | path: spec.template.spec.securityContext
15 | value:
16 | runAsUser: 1000
17 | runAsNonRoot: true
18 | - it: pod securityContext set common-library values
19 | set:
20 | cluster: test-cluster
21 | licenseKey: us-whatever
22 | podSecurityContext:
23 | foobar: true
24 | asserts:
25 | - equal:
26 | path: spec.template.spec.securityContext.foobar
27 | value: true
28 | - it: pod securityContext compatibility layer overrides values from common-library
29 | set:
30 | cluster: test-cluster
31 | licenseKey: us-whatever
32 | runAsUser: 1001
33 | podSecurityContext:
34 | runAsUser: 1000
35 | runAsNonRoot: false
36 | asserts:
37 | - equal:
38 | path: spec.template.spec.securityContext
39 | value:
40 | runAsUser: 1001
41 | runAsNonRoot: false
42 | - it: pod securityContext compatibility layer overrides defaults
43 | set:
44 | cluster: test-cluster
45 | licenseKey: us-whatever
46 | runAsUser: 1001
47 | asserts:
48 | - equal:
49 | path: spec.template.spec.securityContext.runAsUser
50 | value: 1001
51 | - it: set to defaults when no containerSecurityContext set
52 | set:
53 | cluster: my-cluster
54 | licenseKey: us-whatever
55 | asserts:
56 | - equal:
57 | path: spec.template.spec.containers[0].securityContext
58 | value:
59 | allowPrivilegeEscalation: false
60 | privileged: false
61 | readOnlyRootFilesystem: true
62 | - equal:
63 | path: spec.template.spec.containers[1].securityContext
64 | value:
65 | allowPrivilegeEscalation: false
66 | privileged: false
67 | readOnlyRootFilesystem: true
68 | - it: set containerSecurityContext custom values
69 | set:
70 | cluster: test-cluster
71 | licenseKey: us-whatever
72 | containerSecurityContext:
73 | foobar: true
74 | asserts:
75 | - equal:
76 | path: spec.template.spec.containers[0].securityContext.foobar
77 | value: true
78 |
--------------------------------------------------------------------------------
/charts/nri-kube-events/values.yaml:
--------------------------------------------------------------------------------
1 | # -- Override the name of the chart
2 | nameOverride: ""
3 | # -- Override the full name of the release
4 | fullnameOverride: ""
5 |
6 | # -- Name of the Kubernetes cluster monitored. Mandatory. Can be configured also with `global.cluster`
7 | cluster: ""
8 | # -- This set this license key to use. Can be configured also with `global.licenseKey`
9 | licenseKey: ""
10 | # -- In case you don't want to have the license key in you values, this allows you to point to a user created secret to get the key from there. Can be configured also with `global.customSecretName`
11 | customSecretName: ""
12 | # -- In case you don't want to have the license key in you values, this allows you to point to which secret key is the license key located. Can be configured also with `global.customSecretLicenseKey`
13 | customSecretLicenseKey: ""
14 |
15 | # -- Images used by the chart for the integration and agents
16 | # @default -- See `values.yaml`
17 | images:
18 | # -- Image for the New Relic Kubernetes integration
19 | # @default -- See `values.yaml`
20 | integration:
21 | registry:
22 | repository: newrelic/nri-kube-events
23 | tag:
24 | pullPolicy: IfNotPresent
25 | # -- Image for the New Relic Infrastructure Agent sidecar
26 | # @default -- See `values.yaml`
27 | agent:
28 | registry:
29 | repository: newrelic/k8s-events-forwarder
30 | tag: 1.64.0
31 | pullPolicy: IfNotPresent
32 | # -- The secrets that are needed to pull images from a custom registry.
33 | pullSecrets: []
34 | # - name: regsecret
35 |
36 | # -- Resources for the integration container
37 | resources: {}
38 | # limits:
39 | # cpu: 100m
40 | # memory: 128Mi
41 | # requests:
42 | # cpu: 100m
43 | # memory: 128Mi
44 |
45 | # -- Resources for the forwarder sidecar container
46 | forwarder:
47 | resources: {}
48 | # limits:
49 | # cpu: 100m
50 | # memory: 128Mi
51 | # requests:
52 | # cpu: 100m
53 | # memory: 128Mi
54 |
55 | rbac:
56 | # -- Specifies whether RBAC resources should be created
57 | create: true
58 |
59 | # -- Settings controlling ServiceAccount creation
60 | # @default -- See `values.yaml`
61 | serviceAccount:
62 | # serviceAccount.create -- (bool) Specifies whether a ServiceAccount should be created
63 | # @default -- `true`
64 | create:
65 | # If not set and create is true, a name is generated using the fullname template
66 | name: ""
67 | # Specify any annotations to add to the ServiceAccount
68 | annotations:
69 |
70 | # -- Annotations to add to the pod.
71 | podAnnotations: {}
72 | deployment:
73 | # deployment.annotations -- Annotations to add to the Deployment.
74 | annotations: {}
75 | # -- Additional labels for chart pods
76 | podLabels: {}
77 | # -- Additional labels for chart objects
78 | labels: {}
79 |
80 | # -- Amount of time to wait until timeout to send metrics to the metric forwarder
81 | agentHTTPTimeout: "30s"
82 |
83 | # -- Configure where will the metrics be written. Mostly for debugging purposes.
84 | # @default -- See `values.yaml`
85 | sinks:
86 | # -- Enable the stdout sink to also see all events in the logs.
87 | stdout: false
88 | # -- The newRelicInfra sink sends all events to New Relic.
89 | newRelicInfra: true
90 |
91 | # -- Configure the various kinds of scrapers that should be run.
92 | # @default -- See `values.yaml`
93 | scrapers:
94 | descriptions:
95 | enabled: true
96 | resyncPeriod: "24h"
97 | events:
98 | enabled: true
99 |
100 | # -- Sets pod's priorityClassName. Can be configured also with `global.priorityClassName`
101 | priorityClassName: ""
102 | # -- (bool) Sets pod's hostNetwork. Can be configured also with `global.hostNetwork`
103 | # @default -- `false`
104 | hostNetwork:
105 | # -- Sets pod's dnsConfig. Can be configured also with `global.dnsConfig`
106 | dnsConfig: {}
107 | # -- Sets security context (at pod level). Can be configured also with `global.podSecurityContext`
108 | podSecurityContext: {}
109 | # -- Sets security context (at container level). Can be configured also with `global.containerSecurityContext`
110 | containerSecurityContext: {}
111 |
112 | # -- Sets pod/node affinities. Can be configured also with `global.affinity`
113 | affinity: {}
114 | # -- Sets pod's node selector. Can be configured also with `global.nodeSelector`
115 | nodeSelector: {}
116 | # -- Sets pod's tolerations to node taints. Can be configured also with `global.tolerations`
117 | tolerations: []
118 |
119 | # -- Adds extra attributes to the cluster and all the metrics emitted to the backend. Can be configured also with `global.customAttributes`
120 | customAttributes: {}
121 |
122 | # -- Configures the integration to send all HTTP/HTTPS request through the proxy in that URL. The URL should have a standard format like `https://user:password@hostname:port`. Can be configured also with `global.proxy`
123 | proxy: ""
124 |
125 | # -- (bool) Send the metrics to the staging backend. Requires a valid staging license key. Can be configured also with `global.nrStaging`
126 | # @default -- `false`
127 | nrStaging:
128 | fedramp:
129 | # -- (bool) Enables FedRAMP. Can be configured also with `global.fedramp.enabled`
130 | # @default -- `false`
131 | enabled:
132 |
133 | # -- (bool) Sets the debug logs to this integration or all integrations if it is set globally. Can be configured also with `global.verboseLog`
134 | # @default -- `false`
135 | verboseLog:
136 |
--------------------------------------------------------------------------------
/cla.md:
--------------------------------------------------------------------------------
1 | # NEW RELIC, INC.
2 | ## INDIVIDUAL CONTRIBUTOR LICENSE AGREEMENT
3 | Thank you for your interest in contributing to the open source projects of New Relic, Inc. (“New Relic”). In order to clarify the intellectual property license granted with Contributions from any person or entity, New Relic must have a Contributor License Agreement ("Agreement") on file that has been signed by each Contributor, indicating agreement to the license terms below. This Agreement is for your protection as a Contributor as well as the protection of New Relic; it does not change your rights to use your own Contributions for any other purpose.
4 |
5 | You accept and agree to the following terms and conditions for Your present and future Contributions submitted to New Relic. Except for the licenses granted herein to New Relic and recipients of software distributed by New Relic, You reserve all right, title, and interest in and to Your Contributions.
6 |
7 | ## Definitions.
8 | 1. "You" (or "Your") shall mean the copyright owner or legal entity authorized by the copyright owner that is entering into this Agreement with New Relic. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
9 | 2. "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to New Relic for inclusion in, or documentation of, any of the products managed or maintained by New Relic (the "Work"). For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to New Relic or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, New Relic for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
10 | 3. Grant of Copyright License. Subject to the terms and conditions of this Agreement, You hereby grant to New Relic and to recipients of software distributed by New Relic a perpetual, worldwide, non-exclusive, no-charge, royalty-free, transferable, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works.
11 | 4. Grant of Patent License. Subject to the terms and conditions of this Agreement, You hereby grant to New Relic and to recipients of software distributed by New Relic a perpetual, worldwide, non-exclusive, no-charge, royalty-free, transferable, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contributions alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that Your Contribution, or the Work to which You have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
12 | 5. You represent that You are legally entitled to grant the above licenses. If Your employer(s) has rights to intellectual property that You create that includes Your Contributions, You represent that You have received permission to make Contributions on behalf of that employer, that Your employer has waived such rights for Your Contributions to New Relic, or that Your employer has executed a separate Agreement with New Relic.
13 | 6. You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which You are personally aware and which are associated with any part of Your Contributions.
14 | 7. You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
15 | 8. Should You wish to submit work that is not Your original creation, You may submit it to New Relic separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which You are personally aware, and conspicuously marking the work as "Submitted on behalf of a third-party: [named here]".
16 | 9. You agree to notify New Relic of any facts or circumstances of which You become aware that would make these representations inaccurate in any respect.
17 |
--------------------------------------------------------------------------------
/cmd/nri-kube-events/config.go:
--------------------------------------------------------------------------------
1 | // Copyright 2019 New Relic Corporation. All rights reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | package main
4 |
5 | import (
6 | "fmt"
7 | "io"
8 | "os"
9 | "time"
10 |
11 | "github.com/sirupsen/logrus"
12 | "gopkg.in/yaml.v3"
13 |
14 | "github.com/newrelic/nri-kube-events/pkg/sinks"
15 | )
16 |
17 | const DefaultDescribeRefresh = 24 * time.Hour
18 |
19 | type config struct {
20 | WorkQueueLength *int `yaml:"workQueueLength"`
21 | Sinks []sinks.SinkConfig
22 |
23 | CaptureEvents *bool `yaml:"captureEvents"`
24 | CaptureDescribe *bool `yaml:"captureDescribe"`
25 | DescribeRefresh *time.Duration `yaml:"describeRefresh"`
26 | }
27 |
28 | func loadConfig(file io.Reader) (config, error) {
29 | var cfg config
30 |
31 | contents, err := io.ReadAll(file)
32 | if err != nil {
33 | return cfg, fmt.Errorf("could not read configuration file: %w", err)
34 | }
35 |
36 | err = yaml.Unmarshal(contents, &cfg)
37 | if err != nil {
38 | return cfg, fmt.Errorf("could not parse configuration file: %w", err)
39 | }
40 |
41 | return cfg, nil
42 | }
43 |
44 | func mustLoadConfigFile(configFile string) config {
45 | f, err := os.Open(configFile)
46 | if err != nil {
47 | logrus.Fatalf("could not open configuration file: %v", err)
48 | }
49 |
50 | cfg, err := loadConfig(f)
51 |
52 | if errClose := f.Close(); errClose != nil {
53 | logrus.Warningf("error closing config file: %v", errClose)
54 | }
55 |
56 | if err != nil {
57 | logrus.Fatalf("could not parse configuration file: %v", err)
58 | }
59 |
60 | return cfg
61 | }
62 |
--------------------------------------------------------------------------------
/cmd/nri-kube-events/config_test.go:
--------------------------------------------------------------------------------
1 | // Copyright 2019 New Relic Corporation. All rights reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | package main
4 |
5 | import (
6 | "strings"
7 | "testing"
8 | "time"
9 |
10 | "github.com/stretchr/testify/assert"
11 |
12 | "github.com/newrelic/nri-kube-events/pkg/sinks"
13 | )
14 |
15 | var testConf = `
16 | captureEvents: false
17 | captureDescribe: true
18 | describeRefresh: 3h
19 | workQueueLength: 1337
20 | sinks:
21 | - name: stdout
22 | config:
23 | verbose: true
24 | - name: newRelicInfra
25 | config:
26 | agentEndpoint: "http://infra-agent.default:8001/v1/data"
27 | clusterName: "minikube"
28 | `
29 |
30 | func TestConfigParse(t *testing.T) {
31 | captureEvents := false
32 | captureDescribe := true
33 | describeRefresh := 3 * time.Hour
34 | workQueueLength := 1337
35 |
36 | tests := []struct {
37 | serialized string
38 | parsed config
39 | }{
40 | {
41 | serialized: testConf,
42 | parsed: config{
43 | CaptureEvents: &captureEvents,
44 | CaptureDescribe: &captureDescribe,
45 | DescribeRefresh: &describeRefresh,
46 | WorkQueueLength: &workQueueLength,
47 | Sinks: []sinks.SinkConfig{
48 | {
49 | Name: "stdout",
50 | Config: map[string]string{
51 | "verbose": "true",
52 | },
53 | },
54 | {
55 | Name: "newRelicInfra",
56 | Config: map[string]string{
57 | "clusterName": "minikube",
58 | "agentEndpoint": "http://infra-agent.default:8001/v1/data",
59 | },
60 | },
61 | },
62 | },
63 | },
64 | {
65 | serialized: "",
66 | parsed: config{
67 | WorkQueueLength: nil,
68 | Sinks: []sinks.SinkConfig(nil),
69 | },
70 | },
71 | }
72 |
73 | for _, test := range tests {
74 | conf := strings.NewReader(test.serialized)
75 | got, err := loadConfig(conf)
76 | assert.NoError(t, err)
77 | assert.Equal(t, test.parsed, got)
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/cmd/nri-kube-events/main.go:
--------------------------------------------------------------------------------
1 | // Copyright 2019 New Relic Corporation. All rights reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | package main
4 |
5 | import (
6 | "context"
7 | "flag"
8 | "fmt"
9 | "net/http"
10 | "os"
11 | "os/signal"
12 | "runtime"
13 | "sync"
14 | "syscall"
15 | "time"
16 |
17 | "github.com/prometheus/client_golang/prometheus/promhttp"
18 | "github.com/sirupsen/logrus"
19 | "k8s.io/client-go/informers"
20 | "k8s.io/client-go/kubernetes"
21 | restclient "k8s.io/client-go/rest"
22 | "k8s.io/client-go/tools/cache"
23 | "k8s.io/client-go/tools/clientcmd"
24 |
25 | "github.com/newrelic/nri-kube-events/pkg/descriptions"
26 | "github.com/newrelic/nri-kube-events/pkg/events"
27 | "github.com/newrelic/nri-kube-events/pkg/router"
28 | "github.com/newrelic/nri-kube-events/pkg/sinks"
29 | )
30 |
31 | var (
32 | integrationVersion = "0.0.0"
33 | gitCommit = ""
34 | buildDate = ""
35 | )
36 |
37 | var (
38 | configFile = flag.String("config", "config.yaml", "location of the configuration file")
39 | kubeConfig = flag.String("kubeconfig", "", "location of the k8s configuration file. Usually in ~/.kube/config")
40 | logLevel = flag.String("loglevel", "info", "Log level: [warning, info, debug]")
41 | promAddr = flag.String("promaddr", "0.0.0.0:8080", "Address to serve prometheus metrics on")
42 | )
43 |
44 | func main() {
45 | flag.Parse()
46 | setLogLevel(*logLevel, logrus.InfoLevel)
47 |
48 | logrus.Infof(
49 | "New Relic Kube Events integration Version: %s, Platform: %s, GoVersion: %s, GitCommit: %s, BuildDate: %s",
50 | integrationVersion,
51 | fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
52 | runtime.Version(),
53 | gitCommit,
54 | buildDate)
55 | cfg := mustLoadConfigFile(*configFile)
56 |
57 | activeSinks, err := sinks.Create(cfg.Sinks, integrationVersion)
58 | if err != nil {
59 | logrus.Fatalf("could not create sinks: %v", err)
60 | }
61 |
62 | wg := &sync.WaitGroup{}
63 | stopChan := listenForStopSignal()
64 |
65 | opts := []router.ConfigOption{
66 | router.WithWorkQueueLength(cfg.WorkQueueLength), // will ignore null values
67 | }
68 |
69 | if cfg.CaptureEvents == nil || *cfg.CaptureEvents {
70 | eventsInformer := createEventsInformer(stopChan)
71 | activeEventHandlers := make(map[string]events.EventHandler)
72 |
73 | for name, sink := range activeSinks {
74 | activeEventHandlers[name] = sink
75 | }
76 |
77 | eventRouter := events.NewRouter(eventsInformer, activeEventHandlers, opts...)
78 | wg.Add(1)
79 | go func() {
80 | defer wg.Done()
81 | eventRouter.Run(stopChan)
82 | }()
83 | }
84 |
85 | if cfg.CaptureDescribe == nil || *cfg.CaptureDescribe {
86 | resync := DefaultDescribeRefresh
87 | if cfg.DescribeRefresh != nil {
88 | resync = *cfg.DescribeRefresh
89 | }
90 | resourceInformers := createInformers(stopChan, resync)
91 | activeObjectHandlers := make(map[string]descriptions.ObjectHandler)
92 |
93 | for name, sink := range activeSinks {
94 | activeObjectHandlers[name] = sink
95 | }
96 |
97 | descRouter := descriptions.NewRouter(resourceInformers, activeObjectHandlers, opts...)
98 |
99 | wg.Add(1)
100 | go func() {
101 | defer wg.Done()
102 | descRouter.Run(stopChan)
103 | }()
104 | }
105 |
106 | wg.Add(1)
107 | go func() {
108 | defer wg.Done()
109 | servePrometheus(*promAddr, stopChan)
110 | }()
111 |
112 | wg.Wait()
113 | logrus.Infoln("Shutdown complete")
114 | }
115 |
116 | func servePrometheus(addr string, stopChan <-chan struct{}) {
117 | logrus.Infof("Serving Prometheus metrics on %s", addr)
118 |
119 | server := &http.Server{
120 | Addr: addr,
121 | ReadHeaderTimeout: 3 * time.Second,
122 | Handler: promhttp.Handler(),
123 | }
124 |
125 | go func() {
126 | err := server.ListenAndServe()
127 | logrus.Fatalf("Could not serve Prometheus on %s: %v", addr, err)
128 | }()
129 |
130 | <-stopChan
131 | err := server.Shutdown(context.Background())
132 | logrus.WithError(err).Warn("Failed to gracefully shutdown prometheus server")
133 | }
134 |
135 | // listenForStopSignal returns a channel that will be closed
136 | // when a SIGINT or SIGTERM signal is received
137 | func listenForStopSignal() <-chan struct{} {
138 | stopChan := make(chan struct{})
139 | go func() {
140 | c := make(chan os.Signal, 1)
141 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
142 | sig := <-c
143 |
144 | logrus.Infof("%s signal detected, stopping server.", sig)
145 | close(stopChan)
146 | }()
147 |
148 | return stopChan
149 | }
150 |
151 | // createEventsInformer creates a SharedIndexInformer that will listen for Events.
152 | // Only events happening after creation will be returned, existing events are discarded.
153 | func createEventsInformer(stopChan <-chan struct{}) cache.SharedIndexInformer {
154 | clientset, err := getClientset(*kubeConfig)
155 | if err != nil {
156 | logrus.Fatalf("could not create kubernetes client: %v", err)
157 | }
158 |
159 | // Setting resync to 0 means the SharedInformer will never refresh its internal cache against the API Server.
160 | // This is important, because later on we clear the initial cache.
161 | resync := time.Duration(0)
162 | sharedInformers := informers.NewSharedInformerFactory(clientset, resync)
163 | eventsInformer := sharedInformers.Core().V1().Events().Informer()
164 |
165 | sharedInformers.Start(stopChan)
166 |
167 | // wait for the internal cache to sync. This is the only time the cache will be filled,
168 | // since we've set resync to 0. This behavior is very important,
169 | // because we will delete the cache to prevent duplicate events from being sent.
170 | // If we remove this cache-deletion and you restart nri-kube-events, we will sent lots of duplicated events
171 | sharedInformers.WaitForCacheSync(stopChan)
172 |
173 | // There doesn't seem to be a way to start a SharedInformer without local cache,
174 | // So we manually delete the cached events. We are only interested in new events.
175 | for _, obj := range eventsInformer.GetStore().List() {
176 | if err := eventsInformer.GetStore().Delete(obj); err != nil {
177 | logrus.Warningln("Unable to delete cached event, duplicated event is possible")
178 | }
179 | }
180 |
181 | return eventsInformer
182 | }
183 |
184 | // createInformers creates a SharedIndexInformer that will listen for resources we care aobut.
185 | func createInformers(stopChan <-chan struct{}, resync time.Duration) []cache.SharedIndexInformer {
186 | clientset, err := getClientset(*kubeConfig)
187 | if err != nil {
188 | logrus.Fatalf("could not create kubernetes client: %v", err)
189 | }
190 |
191 | sharedInformers := informers.NewSharedInformerFactory(clientset, resync)
192 |
193 | cronjobsInformer := sharedInformers.Batch().V1().CronJobs().Informer()
194 | daemonsetsInformer := sharedInformers.Apps().V1().DaemonSets().Informer()
195 | deploymentInformer := sharedInformers.Apps().V1().Deployments().Informer()
196 | namespacesInformer := sharedInformers.Core().V1().Namespaces().Informer()
197 | nodesInformer := sharedInformers.Core().V1().Nodes().Informer()
198 | jobsInformer := sharedInformers.Batch().V1().Jobs().Informer()
199 | pvInformer := sharedInformers.Core().V1().PersistentVolumes().Informer()
200 | pvcInformer := sharedInformers.Core().V1().PersistentVolumeClaims().Informer()
201 | podsInformer := sharedInformers.Core().V1().Pods().Informer()
202 | servicesInformer := sharedInformers.Core().V1().Services().Informer()
203 |
204 | sharedInformers.Start(stopChan)
205 |
206 | return []cache.SharedIndexInformer{
207 | cronjobsInformer,
208 | daemonsetsInformer,
209 | deploymentInformer,
210 | namespacesInformer,
211 | nodesInformer,
212 | jobsInformer,
213 | pvInformer,
214 | pvcInformer,
215 | podsInformer,
216 | servicesInformer,
217 | }
218 | }
219 |
220 | // getClientset returns a kubernetes clientset.
221 | // It loads a kubeconfig file if the kubeconfig parameter is set
222 | // If it's not set, it will try to load the InClusterConfig
223 | func getClientset(kubeconfig string) (*kubernetes.Clientset, error) {
224 | var conf *restclient.Config
225 | var err error
226 |
227 | if kubeconfig != "" {
228 | conf, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
229 | } else {
230 | conf, err = restclient.InClusterConfig()
231 | }
232 |
233 | if err != nil {
234 | return nil, fmt.Errorf("cannot load kubernetes client configuration: %w", err)
235 | }
236 |
237 | return kubernetes.NewForConfig(conf)
238 | }
239 |
240 | func setLogLevel(logLevel string, fallback logrus.Level) {
241 | level, err := logrus.ParseLevel(logLevel)
242 | if err != nil {
243 | logrus.Warningf("invalid loglevel %s, defaulting to %s.", logLevel, fallback.String())
244 | level = fallback
245 | }
246 |
247 | logrus.SetLevel(level)
248 | }
249 |
--------------------------------------------------------------------------------
/deploy/local.yaml.example:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: nri-dev
6 | ---
7 | apiVersion: v1
8 | kind: ServiceAccount
9 | metadata:
10 | name: nri-kube-events
11 | namespace: nri-dev
12 | ---
13 | apiVersion: rbac.authorization.k8s.io/v1
14 | kind: ClusterRole
15 | metadata:
16 | name: nri-kube-events
17 | rules:
18 | - apiGroups:
19 | - ""
20 | resources:
21 | - events
22 | - namespaces
23 | - nodes
24 | - jobs
25 | - persistentvolumes
26 | - persistentvolumeclaims
27 | - pods
28 | - services
29 | verbs:
30 | - get
31 | - watch
32 | - list
33 | - apiGroups:
34 | - apps
35 | resources:
36 | - daemonsets
37 | - deployments
38 | verbs:
39 | - get
40 | - watch
41 | - list
42 | - apiGroups:
43 | - batch
44 | resources:
45 | - cronjobs
46 | - jobs
47 | verbs:
48 | - get
49 | - watch
50 | - list
51 | ---
52 | apiVersion: rbac.authorization.k8s.io/v1
53 | kind: ClusterRoleBinding
54 | metadata:
55 | name: nri-kube-events
56 | roleRef:
57 | apiGroup: rbac.authorization.k8s.io
58 | kind: ClusterRole
59 | name: nri-kube-events
60 | subjects:
61 | - kind: ServiceAccount
62 | name: nri-kube-events
63 | namespace: nri-dev
64 | ---
65 | apiVersion: v1
66 | data:
67 | config.yaml: |-
68 | sinks:
69 | - name: stdout
70 | - name: newRelicInfra
71 | config:
72 | agentEndpoint: http://localhost:8001/v1/data
73 | clusterName:
74 | agentHTTPTimeout: 30s
75 | kind: ConfigMap
76 | metadata:
77 | name: nri-kube-events
78 | namespace: nri-dev
79 | ---
80 | apiVersion: apps/v1
81 | kind: Deployment
82 | metadata:
83 | name: nri-kube-events
84 | namespace: nri-dev
85 | labels:
86 | app: nri-kube-events
87 | app.kubernetes.io/name: nri-kube-events
88 | spec:
89 | replicas: 1
90 | selector:
91 | matchLabels:
92 | app.kubernetes.io/name: nri-kube-events
93 | template:
94 | metadata:
95 | labels:
96 | app.kubernetes.io/name: nri-kube-events
97 | spec:
98 | containers:
99 | - name: kube-events
100 | image: newrelic/nri-kube-events:latest
101 | resources:
102 | limits:
103 | memory: "128Mi"
104 | cpu: "500m"
105 | requests:
106 | memory: "128Mi"
107 | cpu: "100m"
108 | imagePullPolicy: IfNotPresent
109 | args: ["-config", "/app/config/config.yaml", "-loglevel", "debug"]
110 | volumeMounts:
111 | - name: config-volume
112 | mountPath: /app/config
113 | - name: infra-agent
114 | image: newrelic/k8s-events-forwarder:1.42.0
115 | resources:
116 | limits:
117 | memory: 128Mi
118 | cpu: 500m
119 | requests:
120 | memory: 128Mi
121 | cpu: 100m
122 | securityContext:
123 | privileged: false
124 | runAsUser: 1000 # nri-kube-events
125 | runAsGroup: 2000 # nri-kube-events
126 | runAsNonRoot: false
127 | allowPrivilegeEscalation: false
128 | readOnlyRootFilesystem: true
129 | ports:
130 | - containerPort: 8001
131 | env:
132 | - name: "NRIA_LICENSE_KEY"
133 | value: ""
134 | # - name: "NRIA_VERBOSE"
135 | # value: "1"
136 | - name: NRIA_STAGING
137 | value: "true"
138 | - name: NRIA_COLLECTOR_URL
139 | value: "https://staging-infra-api.newrelic.com"
140 | volumeMounts:
141 | - mountPath: /var/db/newrelic-infra/data
142 | name: tmpfs-data
143 | - mountPath: /var/db/newrelic-infra/user_data
144 | name: tmpfs-user-data
145 | - mountPath: /tmp
146 | name: tmpfs-tmp
147 | serviceAccountName: nri-kube-events
148 | volumes:
149 | - name: config-volume
150 | configMap:
151 | name: nri-kube-events
152 | - name: tmpfs-data
153 | emptyDir: {}
154 | - name: tmpfs-user-data
155 | emptyDir: {}
156 | - name: tmpfs-tmp
157 | emptyDir: {}
158 |
--------------------------------------------------------------------------------
/e2e/1_28-exceptions.yml:
--------------------------------------------------------------------------------
1 | except_metrics: []
2 |
--------------------------------------------------------------------------------
/e2e/1_29-exceptions.yml:
--------------------------------------------------------------------------------
1 | except_metrics: []
2 |
--------------------------------------------------------------------------------
/e2e/1_30-exceptions.yml:
--------------------------------------------------------------------------------
1 | except_metrics: []
2 |
--------------------------------------------------------------------------------
/e2e/1_31-exceptions.yml:
--------------------------------------------------------------------------------
1 | except_metrics: []
2 |
--------------------------------------------------------------------------------
/e2e/1_32-exceptions.yml:
--------------------------------------------------------------------------------
1 | except_metrics: []
2 |
--------------------------------------------------------------------------------
/e2e/README.md:
--------------------------------------------------------------------------------
1 | # E2E tests
2 | You can run E2E tests on any cluster, please notice that scraping the control plane could be not possible or needing specific values depending on the flavour.
3 |
4 | ## Automated local tests in Minikube cluster
5 | In order to run E2E tests locally, you can use `e2e-tests.sh`. To get help on usage, call the script with the `--help` flag:
6 | ```shell
7 | ./e2e-tests.sh --help
8 | ```
9 | Please note that the script expects a New Relic account in the production environment.
10 |
11 | ## Personalized tests
12 | Sometimes you may need extra flexibility on how to run tests. While the following description uses Minikube as an example, you can personalize the example as needed to use the cluster of your choosing, or to run tests in the staging environment.
13 |
14 | Initialize a test cluster.
15 | ```shell
16 | minikube start --container-runtime=containerd --kubernetes-version=v1.XX.X
17 | minikube addons enable metrics-server
18 | ```
19 |
20 | Note that the control plane flags in `e2e-values.yml` have been set meeting the minikube specifications.
21 |
22 | Then you need to build and load the image:
23 | ```shell
24 | docker buildx build --load . --tag e2e/nri-kube-events:e2e
25 | minikube image load e2e/nri-kube-events:e2e
26 | ```
27 |
28 | Then, include helm needed repositories.
29 | ```shell
30 | helm repo add nri-kube-events https://newrelic.github.io/nri-kube-events
31 | helm repo update
32 | ```
33 |
34 | You need to install the binary `https://github.com/newrelic/newrelic-integration-e2e-action/tree/main/newrelic-integration-e2e` used in the e2e test
35 | ```shell
36 | go install github.com/newrelic/newrelic-integration-e2e-action@latest
37 | ```
38 |
39 | You need New Relic's `LICENSE_KEY` (Ingest - License), `API_KEY` (User key) and `ACCOUNT_ID` before running the tests. More information on how to find these keys, please see [this](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/).
40 |
41 | Set the following environment variables:
42 | ```shell
43 | export EXCEPTIONS_SOURCE_FILE="*-exceptions.yml"
44 | export LICENSE_KEY=xxx
45 | export API_KEY=xxx
46 | export ACCOUNT_ID=xxx
47 | ```
48 |
49 | Since some metrics are removed and added depending on the k8s version, the `EXCEPTIONS_SOURCE_FILE` should point, depending on the k8s version you are testing on, to one of the `*-exceptions.yml` files.
50 |
51 | Run the following command to execute the test and make sure that it is ran at the root of the repo:
52 |
53 | ```shell
54 | EXCEPTIONS_SOURCE_FILE=${EXCEPTIONS_SOURCE_FILE} LICENSE_KEY=${LICENSE_KEY} go run github.com/newrelic/newrelic-integration-e2e-action@latest \
55 | --commit_sha=test-string --retry_attempts=5 --retry_seconds=60 \
56 | --account_id=${ACCOUNT_ID} --api_key=${API_KEY} --license_key=${LICENSE_KEY} \
57 | --spec_path=test-specs.yml --verbose_mode=true --agent_enabled="false"
58 | ```
59 |
60 | ### Notes specific to staging environment
61 | In order to enable testing against staging environment, the following modifications need to be made:
62 | - Open the the `./test-specs.yml` and add `--set global.nrStaging=true` to the end of **all** occurrences of this line `- helm upgrade --install ${SCENARIO_TAG} --namespace nr-${SCENARIO_TAG} --create-namespace ../charts/newrelic-infrastructure ...` .
63 | - Add and set `--region="Staging"` the command that executes the tests. For example:
64 | ```shell
65 | EXCEPTIONS_SOURCE_FILE=${EXCEPTIONS_SOURCE_FILE} LICENSE_KEY=${LICENSE_KEY} go run github.com/newrelic/newrelic-integration-e2e-action@latest \
66 | --commit_sha=test-string --retry_attempts=5 --retry_seconds=60 \
67 | --account_id=${ACCOUNT_ID} --api_key=${API_KEY} --license_key=${LICENSE_KEY} \
68 | --spec_path=test-specs.yml --verbose_mode=true --agent_enabled="false" --region="Staging"
69 | ```
70 |
71 | You may check [e2e workflow](../.github/workflows/e2e.yaml) to have more details about how this is used in development workflow.
72 |
--------------------------------------------------------------------------------
/e2e/e2e-tests.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Test cluster
4 | CLUSTER_NAME=""
5 | K8S_VERSION=""
6 |
7 | # Metric exceptions
8 | EXCEPTIONS_SOURCE_FILE=""
9 |
10 | # New Relic account (production) details
11 | ACCOUNT_ID=""
12 | API_KEY=""
13 | LICENSE_KEY=""
14 |
15 | # Unset if you only want to setup a test cluster with E2E specifications
16 | # Set to true if you additionally want to run tests
17 | RUN_TESTS=""
18 |
19 | function main() {
20 | parse_args "$@"
21 | create_cluster
22 | if [[ "$RUN_TESTS" == "true" ]]; then
23 | run_tests
24 | teardown
25 | fi
26 | }
27 |
28 | function parse_args() {
29 | totalArgs=$#
30 |
31 | # Arguments are passed by value, so other functions
32 | # are not affected by destructive processing
33 | while [[ $# -gt 0 ]]; do
34 | case $1 in
35 | --account_id)
36 | shift
37 | ACCOUNT_ID="$1"
38 | ;;
39 | --api_key)
40 | shift
41 | API_KEY="$1"
42 | ;;
43 | --exceptions)
44 | shift
45 | EXCEPTIONS_SOURCE_FILE="$1"
46 | ;;
47 | --help)
48 | help
49 | exit 0
50 | ;;
51 | --k8s_version)
52 | shift
53 | K8S_VERSION="$1"
54 | ;;
55 | --license_key)
56 | shift
57 | LICENSE_KEY="$1"
58 | ;;
59 | --run_tests)
60 | RUN_TESTS="true"
61 | ;;
62 | -*|--*|*)
63 | echo "Unknown field: $1"
64 | exit 1
65 | ;;
66 | esac
67 | shift
68 | done
69 |
70 | if [[ totalArgs -lt 10 ]]; then
71 | help
72 | exit 1
73 | fi
74 | }
75 |
76 | function help() {
77 | cat < --exceptions
80 | --account_id --api_key
81 | --license_key [--run_tests]
82 |
83 | --k8s_version: valid Kubernetes cluster version. It is highly recommended to use same versions as E2E tests
84 | --exceptions: choose one '*-exceptions.yml' file
85 | --account_id: New Relic account in production
86 | --api_key: key type 'USER'
87 | --license_key: key type 'INGEST - LICENSE'
88 | --run_tests: if unset, create a cluster with specifications matching E2E tests
89 | otherwise run tests in addition to setting up cluster
90 | END
91 | }
92 |
93 | function create_cluster() {
94 | cd ..
95 |
96 | echo "🔄 Setup"
97 | minikube delete --all > /dev/null
98 | now=$( date "+%Y-%m-%d-%H-%M-%S" )
99 | CLUSTER_NAME=${now}-e2e-tests
100 |
101 | echo "🔄 Creating cluster ${CLUSTER_NAME}"
102 | minikube start --container-runtime=containerd --kubernetes-version=v${K8S_VERSION} --profile ${CLUSTER_NAME} > /dev/null
103 |
104 | echo "🔄 Building Docker image"
105 | docker buildx build --load . --tag e2e/nri-kube-events:e2e --quiet > /dev/null
106 |
107 | echo "🔄 Loading image into cluster"
108 | minikube image load e2e/nri-kube-events:e2e --profile ${CLUSTER_NAME} > /dev/null
109 |
110 | echo "🔄 Adding Helm repositories"
111 | helm repo add nri-kube-events https://newrelic.github.io/nri-kube-events > /dev/null
112 | helm repo update > /dev/null
113 |
114 | cd e2e/
115 | }
116 |
117 | function run_tests() {
118 | echo "🔄 Installing E2E action"
119 | go install github.com/newrelic/newrelic-integration-e2e-action@latest > /dev/null
120 |
121 | echo "🔄 Starting E2E tests"
122 | export EXCEPTIONS_SOURCE_FILE=${EXCEPTIONS_SOURCE_FILE}
123 | export ACCOUNT_ID=${ACCOUNT_ID}
124 | export API_KEY=${API_KEY}
125 | export LICENSE_KEY=${LICENSE_KEY}
126 | EXCEPTIONS_SOURCE_FILE=${EXCEPTIONS_SOURCE_FILE} LICENSE_KEY=${LICENSE_KEY} go run github.com/newrelic/newrelic-integration-e2e-action@latest \
127 | --commit_sha=test-string --retry_attempts=5 --retry_seconds=60 \
128 | --account_id=${ACCOUNT_ID} --api_key=${API_KEY} --license_key=${LICENSE_KEY} \
129 | --spec_path=test-specs.yml --verbose_mode=true --agent_enabled="false"
130 | }
131 |
132 | function teardown() {
133 | echo "🔄 Teardown"
134 | minikube delete --all > /dev/null
135 | }
136 |
137 | main "$@"
138 |
--------------------------------------------------------------------------------
/e2e/e2e-values.yml:
--------------------------------------------------------------------------------
1 | images:
2 | integration:
3 | repository: e2e/nri-kube-events
4 | tag: e2e
5 | pullPolicy: Never
6 |
--------------------------------------------------------------------------------
/e2e/test-specs.yml:
--------------------------------------------------------------------------------
1 | description: |
2 | End-to-end tests for nri-kube-events
3 |
4 | custom_test_key: clusterName
5 |
6 | scenarios:
7 | - description: |
8 | This scenario will verify that metrics from a k8s Cluster are correctly collected.
9 | before:
10 | - helm dependency update ../charts/internal/e2e-resources
11 | - helm dependency update ../charts/nri-kube-events
12 | - helm upgrade --install ${SCENARIO_TAG}-resources --namespace nr-${SCENARIO_TAG} --create-namespace ../charts/internal/e2e-resources
13 | - helm upgrade --install ${SCENARIO_TAG} --namespace nr-${SCENARIO_TAG} --create-namespace ../charts/nri-kube-events --values e2e-values.yml --set global.licenseKey=${LICENSE_KEY} --set global.cluster=${SCENARIO_TAG}
14 | after:
15 | - kubectl logs --selector app.kubernetes.io/name=nri-kube-events --namespace nr-${SCENARIO_TAG} --all-containers --prefix=true
16 | - kubectl get pods --namespace nr-${SCENARIO_TAG}
17 | - helm delete ${SCENARIO_TAG}-resources --namespace nr-${SCENARIO_TAG}
18 | - helm delete ${SCENARIO_TAG} --namespace nr-${SCENARIO_TAG}
19 | tests:
20 | nrqls:
21 | - query: FROM InfrastructureEvent SELECT latest(event.involvedObject.kind) AS 'Kind' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
22 | expected_results:
23 | - key: "Kind"
24 | value: "Pod"
25 | - query: FROM InfrastructureEvent SELECT latest(event.reason) AS 'Reason' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
26 | expected_results:
27 | - key: "Reason"
28 | value: "BackOff"
29 | - query: FROM InfrastructureEvent SELECT latest(event.count) AS 'Count' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
30 | expected_results:
31 | - key: "Count"
32 | lowerBoundedValue: 1.0
33 | - query: FROM InfrastructureEvent SELECT latest(event.involvedObject.apiVersion) AS 'API Version' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
34 | expected_results:
35 | - key: "API Version"
36 | value: "v1"
37 | - query: FROM InfrastructureEvent SELECT latest(event.involvedObject.fieldPath) AS 'Field Path' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
38 | expected_results:
39 | - key: "Field Path"
40 | value: "spec.containers{failing-container}"
41 | - query: FROM InfrastructureEvent SELECT latest(event.source.component) AS 'Component' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
42 | expected_results:
43 | - key: "Component"
44 | value: "kubelet"
45 | - query: FROM InfrastructureEvent SELECT latest(eventRouterVersion) AS 'Router Version' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
46 | expected_results:
47 | - key: "Router Version"
48 | value: "dev"
49 | - query: FROM InfrastructureEvent SELECT latest(integrationName) AS 'Integration Name' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
50 | expected_results:
51 | - key: "Integration Name"
52 | value: "kube_events"
53 | - query: FROM InfrastructureEvent SELECT latest(verb) AS 'Verb' WHERE category = 'kubernetes' AND event.type = 'Warning' AND event.metadata.namespace = 'nr-${SCENARIO_TAG}' AND event.involvedObject.name like '${SCENARIO_TAG}-resources-deployment-%'
54 | expected_results:
55 | - key: "Verb"
56 | value: "UPDATE"
57 | entities: []
58 | metrics: []
59 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/newrelic/nri-kube-events
2 |
3 | go 1.24.3
4 |
5 | require (
6 | github.com/newrelic/infra-integrations-sdk v3.8.2+incompatible
7 | github.com/prometheus/client_golang v1.22.0
8 | github.com/prometheus/client_model v0.6.2
9 | github.com/sethgrid/pester v1.2.0
10 | github.com/sirupsen/logrus v1.9.3
11 | github.com/stretchr/testify v1.10.0
12 | gopkg.in/yaml.v3 v3.0.1
13 | k8s.io/api v0.32.3
14 | k8s.io/apimachinery v0.32.3
15 | k8s.io/client-go v0.32.3
16 | // Remove the replace directives at the bottom of this file
17 | // when upgrading to v0.28 or above
18 | k8s.io/kubectl v0.32.3
19 | )
20 |
21 | require (
22 | github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
23 | github.com/beorn7/perks v1.0.1 // indirect
24 | github.com/blang/semver/v4 v4.0.0 // indirect
25 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
26 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
27 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect
28 | github.com/fatih/camelcase v1.0.0 // indirect
29 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect
30 | github.com/go-errors/errors v1.4.2 // indirect
31 | github.com/go-logr/logr v1.4.2 // indirect
32 | github.com/go-openapi/jsonpointer v0.21.0 // indirect
33 | github.com/go-openapi/jsonreference v0.20.2 // indirect
34 | github.com/go-openapi/swag v0.23.0 // indirect
35 | github.com/gogo/protobuf v1.3.2 // indirect
36 | github.com/golang/protobuf v1.5.4 // indirect
37 | github.com/google/btree v1.0.1 // indirect
38 | github.com/google/gnostic-models v0.6.8 // indirect
39 | github.com/google/go-cmp v0.7.0 // indirect
40 | github.com/google/gofuzz v1.2.0 // indirect
41 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
42 | github.com/google/uuid v1.6.0 // indirect
43 | github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
44 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
45 | github.com/josharian/intern v1.0.0 // indirect
46 | github.com/json-iterator/go v1.1.12 // indirect
47 | github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
48 | github.com/mailru/easyjson v0.7.7 // indirect
49 | github.com/moby/term v0.5.0 // indirect
50 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
51 | github.com/modern-go/reflect2 v1.0.2 // indirect
52 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
53 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
54 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
55 | github.com/pkg/errors v0.9.1 // indirect
56 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
57 | github.com/prometheus/common v0.62.0 // indirect
58 | github.com/prometheus/procfs v0.15.1 // indirect
59 | github.com/spf13/cobra v1.8.1 // indirect
60 | github.com/spf13/pflag v1.0.5 // indirect
61 | github.com/stretchr/objx v0.5.2 // indirect
62 | github.com/x448/float16 v0.8.4 // indirect
63 | github.com/xlab/treeprint v1.2.0 // indirect
64 | golang.org/x/net v0.36.0 // indirect
65 | golang.org/x/oauth2 v0.24.0 // indirect
66 | golang.org/x/sync v0.11.0 // indirect
67 | golang.org/x/sys v0.30.0 // indirect
68 | golang.org/x/term v0.29.0 // indirect
69 | golang.org/x/text v0.22.0 // indirect
70 | golang.org/x/time v0.7.0 // indirect
71 | google.golang.org/protobuf v1.36.6 // indirect
72 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
73 | gopkg.in/inf.v0 v0.9.1 // indirect
74 | k8s.io/cli-runtime v0.32.3 // indirect
75 | k8s.io/klog/v2 v2.130.1 // indirect
76 | k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
77 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
78 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
79 | sigs.k8s.io/kustomize/api v0.18.0 // indirect
80 | sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
81 | sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
82 | sigs.k8s.io/yaml v1.4.0 // indirect
83 | )
84 |
85 | // These can be removed once kubectl v0.28 or above is released
86 | // We need a kubectl release that includes https://github.com/kubernetes/kubectl/commit/4bd3f1b03dacd034c35d9b7f188e0c5bacf91fad
87 | // The apimachinery replacement is for compatibility with the given kubectl version.
88 | replace (
89 | k8s.io/apimachinery => k8s.io/apimachinery v0.32.3
90 | k8s.io/kubectl => k8s.io/kubectl v0.32.3
91 | )
92 |
--------------------------------------------------------------------------------
/pkg/common/objects.go:
--------------------------------------------------------------------------------
1 | // Package common ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package common
5 |
6 | import (
7 | v1 "k8s.io/api/core/v1"
8 | "k8s.io/apimachinery/pkg/runtime"
9 | )
10 |
11 | // KubeEvent represents a Kubernetes event. It specifies if this is the first
12 | // time the event is seen or if it's an update to a previous event.
13 | type KubeEvent struct {
14 | Verb string `json:"verb"`
15 | Event *v1.Event `json:"event"`
16 | OldEvent *v1.Event `json:"old_event,omitempty"`
17 | }
18 |
19 | // KubeObject represents a Kubernetes runtime object.
20 | // It specifies if this is the first time the object is seen or
21 | // if it's an update to a previous object.
22 | type KubeObject struct {
23 | Verb string `json:"verb"`
24 | Obj runtime.Object `json:"obj"`
25 | OldObj runtime.Object `json:"old_obj,omitempty"`
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/common/utils.go:
--------------------------------------------------------------------------------
1 | // Package common ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package common
5 |
6 | import (
7 | "encoding/json"
8 | "errors"
9 | "fmt"
10 | "unicode/utf8"
11 |
12 | log "github.com/sirupsen/logrus"
13 | "k8s.io/apimachinery/pkg/api/meta"
14 | "k8s.io/apimachinery/pkg/runtime"
15 | "k8s.io/apimachinery/pkg/runtime/schema"
16 | "k8s.io/kubectl/pkg/scheme"
17 | )
18 |
19 | const SplitMaxCols = 16
20 | const NRDBLimit = 4095
21 |
22 | // LimitSplit splits the input string into multiple strings at the specified limit
23 | // taking care not to split mid-rune.
24 | func LimitSplit(input string, limit int) []string {
25 | if limit <= 0 {
26 | return []string{input}
27 | }
28 |
29 | var splits []string
30 | for len(input) > limit {
31 | boundary := limit
32 | // Check if this is a run boundary, else go backwards upto UTFMax bytes to look for
33 | // a boundary. If one isn't found in max bytes, give up and split anyway.
34 | for !utf8.RuneStart(input[boundary]) && boundary >= limit-utf8.UTFMax {
35 | boundary--
36 | }
37 | splits = append(splits, input[:boundary])
38 | input = input[boundary:]
39 | }
40 | if len(input) > 0 {
41 | splits = append(splits, input)
42 | }
43 | return splits
44 | }
45 |
46 | // K8SObjGetGVK gets the GVK for the given object.
47 | func K8SObjGetGVK(obj runtime.Object) schema.GroupVersionKind {
48 | gvks, _, err := scheme.Scheme.ObjectKinds(obj)
49 | if err != nil {
50 | log.Warnf("missing apiVersion or kind and cannot assign it; %v", err)
51 | return schema.GroupVersionKind{}
52 | }
53 |
54 | for _, gvk := range gvks {
55 | if len(gvk.Kind) == 0 {
56 | continue
57 | }
58 | if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal {
59 | continue
60 | }
61 | return gvk
62 | }
63 | return schema.GroupVersionKind{}
64 | }
65 |
66 | func GetObjNamespaceAndName(obj runtime.Object) (string, string, error) {
67 | accessor := meta.NewAccessor()
68 | var errs []error
69 |
70 | ns, err := accessor.Namespace(obj)
71 | errs = append(errs, err)
72 |
73 | name, err := accessor.Name(obj)
74 | errs = append(errs, err)
75 |
76 | return ns, name, errors.Join(errs...)
77 | }
78 |
79 | func FlattenStruct(v interface{}) (map[string]interface{}, error) {
80 | m := make(map[string]interface{})
81 |
82 | data, err := json.Marshal(v)
83 | if err != nil {
84 | return nil, err
85 | }
86 | var unflattened map[string]interface{}
87 | err = json.Unmarshal(data, &unflattened)
88 | if err != nil {
89 | return nil, err
90 | }
91 |
92 | var doFlatten func(string, interface{}, map[string]interface{})
93 |
94 | doFlatten = func(key string, v interface{}, m map[string]interface{}) {
95 | switch parsedType := v.(type) {
96 | case map[string]interface{}:
97 | for k, n := range parsedType {
98 | doFlatten(key+"."+k, n, m)
99 | }
100 | case []interface{}:
101 | for i, n := range parsedType {
102 | doFlatten(key+fmt.Sprintf("[%d]", i), n, m)
103 | }
104 | case string:
105 | // ignore empty strings
106 | if parsedType == "" {
107 | return
108 | }
109 |
110 | m[key] = v
111 |
112 | default:
113 | // ignore nil values
114 | if v == nil {
115 | return
116 | }
117 |
118 | m[key] = v
119 | }
120 | }
121 |
122 | for k, v := range unflattened {
123 | doFlatten(k, v, m)
124 | }
125 |
126 | return m, nil
127 | }
128 |
--------------------------------------------------------------------------------
/pkg/common/utils_test.go:
--------------------------------------------------------------------------------
1 | // Copyright 2019 New Relic Corporation. All rights reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | package common_test
4 |
5 | import (
6 | "testing"
7 |
8 | "github.com/stretchr/testify/assert"
9 | v1 "k8s.io/api/core/v1"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 |
12 | "github.com/newrelic/nri-kube-events/pkg/common"
13 | )
14 |
15 | func TestLimitSplit(t *testing.T) {
16 | tests := []struct {
17 | input string
18 | limit int
19 | output []string
20 | }{
21 | {
22 | input: "short string",
23 | limit: 20,
24 | output: []string{"short string"},
25 | },
26 | {
27 | input: "very very very long string",
28 | limit: 20,
29 | output: []string{"very very very long ", "string"},
30 | },
31 | {
32 | input: "",
33 | limit: 20,
34 | output: nil,
35 | },
36 | {
37 | input: "short",
38 | limit: 0,
39 | output: []string{"short"},
40 | },
41 | {
42 | input: "日本語",
43 | limit: 4,
44 | output: []string{"日", "本", "語"},
45 | },
46 | {
47 | input: "bad utf8 \xbd\xb2\x3d\xbc\x20\xe2\x8c\x98",
48 | limit: 8,
49 | output: []string{"bad utf8", " \xbd\xb2\x3d\xbc\x20", "\xe2\x8c\x98"},
50 | },
51 | }
52 |
53 | for _, test := range tests {
54 | assert.Equal(t, test.output, common.LimitSplit(test.input, test.limit))
55 | }
56 | }
57 |
58 | func TestFlattenStruct(t *testing.T) {
59 | got, _ := common.FlattenStruct(common.KubeEvent{Verb: "UPDATE", Event: &v1.Event{
60 | ObjectMeta: metav1.ObjectMeta{
61 | Name: "test",
62 | Labels: map[string]string{
63 | "test_label1": "test_value1",
64 | "test_label2": "test_value2",
65 | },
66 | Finalizers: []string{"1", "2"},
67 | },
68 | Count: 10,
69 | InvolvedObject: v1.ObjectReference{
70 | Kind: "Pod",
71 | Namespace: "test_namespace",
72 | },
73 | }})
74 |
75 | want := map[string]interface{}{
76 | "event.count": float64(10),
77 | "event.metadata.name": "test",
78 | "event.metadata.labels.test_label1": "test_value1",
79 | "event.metadata.labels.test_label2": "test_value2",
80 | "event.involvedObject.kind": "Pod",
81 | "event.involvedObject.namespace": "test_namespace",
82 | "event.metadata.finalizers[0]": "1",
83 | "event.metadata.finalizers[1]": "2",
84 | "verb": "UPDATE",
85 | }
86 |
87 | assert.Equal(t, want, got)
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/descriptions/router.go:
--------------------------------------------------------------------------------
1 | // Package descriptions ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package descriptions
5 |
6 | import (
7 | "time"
8 |
9 | "github.com/prometheus/client_golang/prometheus"
10 | "github.com/prometheus/client_golang/prometheus/promauto"
11 | "github.com/sirupsen/logrus"
12 | "k8s.io/apimachinery/pkg/runtime"
13 | "k8s.io/client-go/tools/cache"
14 |
15 | "github.com/newrelic/nri-kube-events/pkg/common"
16 | "github.com/newrelic/nri-kube-events/pkg/router"
17 | )
18 |
19 | var (
20 | requestDurationSeconds = promauto.NewHistogramVec(prometheus.HistogramOpts{
21 | Namespace: "nr",
22 | Subsystem: "k8s_descriptions",
23 | Name: "sink_request_duration_seconds",
24 | Help: "Duration of requests for each sink",
25 | }, []string{"sink"})
26 | descsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
27 | Namespace: "nr",
28 | Subsystem: "k8s_descriptions",
29 | Name: "received",
30 | Help: "Total amount of descriptions received per sink, including failures",
31 | }, []string{"sink"})
32 | descsFailuresTotal = promauto.NewCounterVec(prometheus.CounterOpts{
33 | Namespace: "nr",
34 | Subsystem: "k8s_descriptions",
35 | Name: "failed",
36 | Help: "Total amount of failed descriptions per sink",
37 | }, []string{"sink"})
38 | )
39 |
40 | type ObjectHandler interface {
41 | HandleObject(kubeEvent common.KubeObject) error
42 | }
43 |
44 | // Router listens for events coming from a SharedIndexInformer,
45 | // and forwards them to the registered sinks
46 | type Router struct {
47 | // list of handlers to send events to
48 | handlers map[string]ObjectHandler
49 |
50 | // all updates & adds will be appended to this queue
51 | workQueue chan common.KubeObject
52 | }
53 |
54 | type observedObjectHandler struct {
55 | ObjectHandler
56 | prometheus.Observer
57 | }
58 |
59 | func (o *observedObjectHandler) HandleObject(kubeObject common.KubeObject) error {
60 | t := time.Now()
61 | defer func() { o.Observer.Observe(time.Since(t).Seconds()) }()
62 |
63 | return o.ObjectHandler.HandleObject(kubeObject)
64 | }
65 |
66 | // NewRouter returns a new Router which listens to the given SharedIndexInformer,
67 | // and forwards all incoming events to the given sinks
68 | func NewRouter(informers []cache.SharedIndexInformer, handlers map[string]ObjectHandler, opts ...router.ConfigOption) *Router {
69 | config, err := router.NewConfig(opts...)
70 | if err != nil {
71 | logrus.Fatalf("Error with Router configuration: %v", err)
72 | }
73 |
74 | workQueue := make(chan common.KubeObject, config.WorkQueueLength())
75 |
76 | for _, informer := range informers {
77 | _, err = informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
78 | AddFunc: func(obj interface{}) {
79 | workQueue <- common.KubeObject{
80 | Obj: obj.(runtime.Object),
81 | Verb: "ADDED",
82 | }
83 | },
84 | UpdateFunc: func(oldObj, newObj interface{}) {
85 | workQueue <- common.KubeObject{
86 | Obj: newObj.(runtime.Object),
87 | OldObj: oldObj.(runtime.Object),
88 | Verb: "UPDATE",
89 | }
90 | },
91 | })
92 |
93 | if err != nil {
94 | logrus.Warnf("Error with add informer event handlers: %v", err)
95 | }
96 | }
97 |
98 | // instrument all sinks with histogram observation
99 | observedSinks := map[string]ObjectHandler{}
100 | for name, handler := range handlers {
101 | observedSinks[name] = &observedObjectHandler{
102 | ObjectHandler: handler,
103 | Observer: requestDurationSeconds.WithLabelValues(name),
104 | }
105 | }
106 |
107 | return instrument(&Router{
108 | handlers: observedSinks,
109 | workQueue: workQueue,
110 | })
111 | }
112 |
113 | func instrument(r *Router) *Router {
114 | if err := prometheus.Register(prometheus.NewGaugeFunc(
115 | prometheus.GaugeOpts{
116 | Namespace: "nr",
117 | Subsystem: "k8s_descriptions",
118 | Name: "workqueue_length",
119 | Help: "Number of k8s objects currently queued in the workqueue.",
120 | },
121 | func() float64 {
122 | return float64(len(r.workQueue))
123 | },
124 | )); err != nil {
125 | logrus.Warningf("could not register workqueue_queue_length prometheus gauge")
126 | }
127 |
128 | return r
129 | }
130 |
131 | // Run listens to the workQueue and forwards incoming objects
132 | // to all registered sinks
133 | func (r *Router) Run(stopChan <-chan struct{}) {
134 | logrus.Infof("Router started")
135 | defer logrus.Infof("Router stopped")
136 |
137 | for {
138 | select {
139 | case <-stopChan:
140 | return
141 | case event := <-r.workQueue:
142 | r.publishObjectDescription(event)
143 | }
144 | }
145 | }
146 |
147 | func (r *Router) publishObjectDescription(kubeObject common.KubeObject) {
148 | for name, handler := range r.handlers {
149 | descsReceivedTotal.WithLabelValues(name).Inc()
150 |
151 | if err := handler.HandleObject(kubeObject); err != nil {
152 | logrus.Warningf("Sink %s HandleEvent error: %v", name, err)
153 | descsFailuresTotal.WithLabelValues(name).Inc()
154 | }
155 | }
156 | }
157 |
--------------------------------------------------------------------------------
/pkg/events/router.go:
--------------------------------------------------------------------------------
1 | // Package events ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package events
5 |
6 | import (
7 | "time"
8 |
9 | "github.com/prometheus/client_golang/prometheus"
10 | "github.com/prometheus/client_golang/prometheus/promauto"
11 | "github.com/sirupsen/logrus"
12 | v1 "k8s.io/api/core/v1"
13 | "k8s.io/client-go/tools/cache"
14 |
15 | "github.com/newrelic/nri-kube-events/pkg/common"
16 | "github.com/newrelic/nri-kube-events/pkg/router"
17 | )
18 |
19 | var (
20 | requestDurationSeconds = promauto.NewHistogramVec(prometheus.HistogramOpts{
21 | Namespace: "nr",
22 | Subsystem: "kube_events",
23 | Name: "sink_request_duration_seconds",
24 | Help: "Duration of requests for each sink",
25 | }, []string{"sink"})
26 | eventsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
27 | Namespace: "nr",
28 | Subsystem: "kube_events",
29 | Name: "received_events_total",
30 | Help: "Total amount of events received per sink, including failures",
31 | }, []string{"sink"})
32 | eventsFailuresTotal = promauto.NewCounterVec(prometheus.CounterOpts{
33 | Namespace: "nr",
34 | Subsystem: "kube_events",
35 | Name: "failed_events_total",
36 | Help: "Total amount of failed events per sink",
37 | }, []string{"sink"})
38 | )
39 |
40 | type EventHandler interface {
41 | HandleEvent(kubeEvent common.KubeEvent) error
42 | }
43 |
44 | // Router listens for events coming from a SharedIndexInformer,
45 | // and forwards them to the registered sinks
46 | type Router struct {
47 | // list of handlers to send events to
48 | handlers map[string]EventHandler
49 |
50 | // all updates & adds will be appended to this queue
51 | workQueue chan common.KubeEvent
52 | }
53 |
54 | type observedEventHandler struct {
55 | EventHandler
56 | prometheus.Observer
57 | }
58 |
59 | func (o *observedEventHandler) HandleEvent(kubeEvent common.KubeEvent) error {
60 | t := time.Now()
61 | defer func() { o.Observer.Observe(time.Since(t).Seconds()) }()
62 |
63 | return o.EventHandler.HandleEvent(kubeEvent)
64 | }
65 |
66 | // NewRouter returns a new Router which listens to the given SharedIndexInformer,
67 | // and forwards all incoming events to the given sinks
68 | func NewRouter(informer cache.SharedIndexInformer, handlers map[string]EventHandler, opts ...router.ConfigOption) *Router {
69 | config, err := router.NewConfig(opts...)
70 | if err != nil {
71 | logrus.Fatalf("Error with Router configuration: %v", err)
72 | }
73 |
74 | // According to the shared_informer source code it's not designed to
75 | // wait for the event handlers to finish, they should return quickly
76 | // Therefore we push to a queue and handle it in another goroutine
77 | // See: https://github.com/kubernetes/client-go/blob/c8dc69f8a8bf8d8640493ce26688b26c7bfde8e6/tools/cache/shared_informer.go#L111
78 | workQueue := make(chan common.KubeEvent, config.WorkQueueLength())
79 |
80 | _, err = informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
81 | AddFunc: func(obj interface{}) {
82 | workQueue <- common.KubeEvent{
83 | Event: obj.(*v1.Event),
84 | Verb: "ADDED",
85 | }
86 | },
87 | UpdateFunc: func(oldObj, newObj interface{}) {
88 | workQueue <- common.KubeEvent{
89 | Event: newObj.(*v1.Event),
90 | OldEvent: oldObj.(*v1.Event),
91 | Verb: "UPDATE",
92 | }
93 | },
94 | })
95 |
96 | if err != nil {
97 | logrus.Warnf("Error with add informer event handlers: %v", err)
98 | }
99 |
100 | // instrument all sinks with histogram observation
101 | observedSinks := map[string]EventHandler{}
102 | for name, handler := range handlers {
103 | observedSinks[name] = &observedEventHandler{
104 | EventHandler: handler,
105 | Observer: requestDurationSeconds.WithLabelValues(name),
106 | }
107 | }
108 |
109 | return instrument(&Router{
110 | handlers: observedSinks,
111 | workQueue: workQueue,
112 | })
113 | }
114 |
115 | func instrument(r *Router) *Router {
116 | if err := prometheus.Register(prometheus.NewGaugeFunc(
117 | prometheus.GaugeOpts{
118 | Namespace: "nr",
119 | Subsystem: "kube_events",
120 | Name: "workqueue_length",
121 | Help: "Number of k8s events currently queued in the workqueue.",
122 | },
123 | func() float64 {
124 | return float64(len(r.workQueue))
125 | },
126 | )); err != nil {
127 | logrus.Warningf("could not register workqueue_queue_length prometheus gauge")
128 | }
129 |
130 | return r
131 | }
132 |
133 | // Run listens to the workQueue and forwards incoming events
134 | // to all registered sinks
135 | func (r *Router) Run(stopChan <-chan struct{}) {
136 | logrus.Infof("Router started")
137 | defer logrus.Infof("Router stopped")
138 |
139 | for {
140 | select {
141 | case <-stopChan:
142 | return
143 | case event := <-r.workQueue:
144 | r.publishEvent(event)
145 | }
146 | }
147 | }
148 |
149 | func (r *Router) publishEvent(kubeEvent common.KubeEvent) {
150 | for name, handler := range r.handlers {
151 | eventsReceivedTotal.WithLabelValues(name).Inc()
152 |
153 | if err := handler.HandleEvent(kubeEvent); err != nil {
154 | logrus.Warningf("Sink %s HandleEvent error: %v", name, err)
155 | eventsFailuresTotal.WithLabelValues(name).Inc()
156 | }
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/pkg/events/router_test.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | import (
4 | "errors"
5 | "sync"
6 | "testing"
7 | "time"
8 |
9 | "github.com/prometheus/client_golang/prometheus"
10 | dto "github.com/prometheus/client_model/go"
11 | log "github.com/sirupsen/logrus"
12 | "github.com/stretchr/testify/assert"
13 | "github.com/stretchr/testify/mock"
14 | v1 "k8s.io/api/core/v1"
15 | "k8s.io/client-go/tools/cache"
16 |
17 | "github.com/newrelic/nri-kube-events/pkg/common"
18 | )
19 |
20 | func TestNewRouter(t *testing.T) {
21 | type args struct {
22 | informer *MockSharedIndexInformer
23 | handlers map[string]EventHandler
24 | }
25 | tests := []struct {
26 | name string
27 | args args
28 | assert func(t *testing.T, args args, r *Router)
29 | }{
30 | {
31 | name: "AddEventHandler AddFunc",
32 | args: args{
33 | informer: new(MockSharedIndexInformer),
34 | },
35 | assert: func(t *testing.T, args args, r *Router) {
36 | assert.Len(t, args.informer.Calls, 1)
37 | hf := args.informer.Calls[0].Arguments.Get(0).(cache.ResourceEventHandlerFuncs)
38 | added := new(v1.Event)
39 | assert.NotNil(t, hf.AddFunc)
40 | go hf.AddFunc(added)
41 | select {
42 | case ke := <-r.workQueue:
43 | assert.NotNil(t, ke)
44 | assert.Equal(t, "ADDED", ke.Verb)
45 | assert.Equal(t, ke.Event, added)
46 | assert.Nil(t, ke.OldEvent)
47 | case <-time.After(1 * time.Second):
48 | assert.Fail(t, "Nothing on worker queue")
49 | }
50 | },
51 | },
52 | {
53 | name: "AddEventHandler UpdateFunc",
54 | args: args{
55 | informer: new(MockSharedIndexInformer),
56 | },
57 | assert: func(t *testing.T, args args, r *Router) {
58 | assert.Len(t, args.informer.Calls, 1)
59 | hf := args.informer.Calls[0].Arguments.Get(0).(cache.ResourceEventHandlerFuncs)
60 | oldObj := &v1.Event{
61 | Action: "Some old action",
62 | }
63 | newObj := &v1.Event{
64 | Action: "Some new action",
65 | }
66 | assert.NotNil(t, hf.UpdateFunc)
67 | go hf.UpdateFunc(oldObj, newObj)
68 | select {
69 | case ke := <-r.workQueue:
70 | assert.NotNil(t, ke)
71 | assert.Equal(t, "UPDATE", ke.Verb)
72 | assert.Equal(t, ke.Event, newObj)
73 | assert.Equal(t, ke.OldEvent, oldObj)
74 | case <-time.After(1 * time.Second):
75 | assert.Fail(t, "Nothing on worker queue")
76 | }
77 | },
78 | },
79 | {
80 | name: "workQueue",
81 | args: args{
82 | informer: new(MockSharedIndexInformer),
83 | },
84 | assert: func(t *testing.T, args args, r *Router) {
85 | assert.Equal(t, 1024, cap(r.workQueue), "Wrong default work queue length")
86 | },
87 | },
88 | {
89 | name: "sinks",
90 | args: args{
91 | informer: new(MockSharedIndexInformer),
92 | handlers: map[string]EventHandler{
93 | "stub": &stubSink{stubData: "some data"},
94 | },
95 | },
96 | assert: func(t *testing.T, args args, r *Router) {
97 | assert.Len(t, r.handlers, 1)
98 | s, ok := r.handlers["stub"]
99 | assert.True(t, ok)
100 | assert.NotNil(t, s)
101 | assert.Equal(t, args.handlers["stub"], s.(*observedEventHandler).EventHandler)
102 | obs := s.(*observedEventHandler).Observer
103 | assert.NotNil(t, obs)
104 | h := obs.(prometheus.Histogram)
105 | m := dto.Metric{}
106 | assert.NoError(t, h.Write(&m))
107 | name := "sink"
108 | value := "stub"
109 | // Check correct label pair added
110 | assert.Equal(t, []*dto.LabelPair{{Name: &name, Value: &value}}, m.Label)
111 | },
112 | },
113 | }
114 | for _, tt := range tests {
115 | t.Run(tt.name, func(t *testing.T) {
116 | tt.args.informer.
117 | On("AddEventHandler", mock.AnythingOfType("cache.ResourceEventHandlerFuncs")).
118 | Once()
119 |
120 | r := NewRouter(tt.args.informer, tt.args.handlers)
121 | assert.NotNil(t, r)
122 | tt.assert(t, tt.args, r)
123 | tt.args.informer.AssertExpectations(t)
124 | })
125 | }
126 | }
127 |
128 | type MockSharedIndexInformer struct {
129 | mock.Mock
130 | cache.SharedIndexInformer
131 | }
132 |
133 | func (m *MockSharedIndexInformer) SetupMock() {
134 | m.
135 | On("AddEventHandler", mock.AnythingOfType("cache.ResourceEventHandlerFuncs")).
136 | Once()
137 | }
138 |
139 | type stubResourceEventHandlerRegistration struct {
140 | mock.Mock
141 | }
142 |
143 | func (s *stubResourceEventHandlerRegistration) HasSynced() bool {
144 | s.Called()
145 | return true
146 | }
147 |
148 | func (m *MockSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) {
149 | m.Called(handler)
150 | return &stubResourceEventHandlerRegistration{}, nil
151 | }
152 |
153 | type stubSink struct {
154 | mock.Mock
155 | EventHandler
156 | stubData string
157 | }
158 |
159 | func (s *stubSink) HandleEvent(kubeEvent common.KubeEvent) error {
160 | args := s.Called(kubeEvent)
161 | return args.Error(0)
162 | }
163 |
164 | func TestRouter_Run(t *testing.T) {
165 | informer := new(MockSharedIndexInformer)
166 | informer.SetupMock()
167 | stubSink := new(stubSink)
168 | handlers := map[string]EventHandler{
169 | "stub": stubSink,
170 | }
171 |
172 | r := NewRouter(informer, handlers)
173 | stopChan := make(chan struct{})
174 |
175 | wg := sync.WaitGroup{}
176 | wg.Add(1)
177 | go func() {
178 | defer wg.Done()
179 | r.Run(stopChan)
180 | }()
181 |
182 | ke := &v1.Event{
183 | Action: "Some old action",
184 | }
185 |
186 | stubSink.On("HandleEvent", mock.AnythingOfType("KubeEvent")).Run(func(args mock.Arguments) {
187 | log.Info("stub called")
188 | ake := args.Get(0).(common.KubeEvent)
189 | assert.Equal(t, ke, ake.Event)
190 | defer close(stopChan)
191 | }).Return(nil).Once()
192 |
193 | go func() {
194 | r.workQueue <- common.KubeEvent{
195 | Event: ke,
196 | }
197 | }()
198 |
199 | wg.Wait()
200 | stubSink.AssertExpectations(t)
201 | }
202 |
203 | func TestRouter_RunError(t *testing.T) {
204 | informer := new(MockSharedIndexInformer)
205 | informer.SetupMock()
206 | stubSink := new(stubSink)
207 | handlers := map[string]EventHandler{
208 | "stub": stubSink,
209 | }
210 |
211 | r := NewRouter(informer, handlers)
212 | stopChan := make(chan struct{})
213 |
214 | wg := sync.WaitGroup{}
215 | wg.Add(1)
216 | go func() {
217 | defer wg.Done()
218 | r.Run(stopChan)
219 | }()
220 |
221 | ke := &v1.Event{
222 | Action: "Some old action",
223 | }
224 |
225 | expectedError := errors.New("something went wrong")
226 | stubSink.On("HandleEvent", mock.AnythingOfType("KubeEvent")).Run(func(args mock.Arguments) {
227 | defer close(stopChan)
228 | }).Return(expectedError).Once()
229 |
230 | go func() {
231 | r.workQueue <- common.KubeEvent{
232 | Event: ke,
233 | }
234 | }()
235 |
236 | wg.Wait()
237 | stubSink.AssertExpectations(t)
238 | c, err := eventsFailuresTotal.GetMetricWithLabelValues("stub")
239 | assert.NoError(t, err)
240 | m := dto.Metric{}
241 | assert.NoError(t, c.Write(&m))
242 | expCnt := float64(1)
243 | assert.Equal(t, expCnt, *m.Counter.Value)
244 | }
245 |
--------------------------------------------------------------------------------
/pkg/router/config.go:
--------------------------------------------------------------------------------
1 | // Package router ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package router
5 |
6 | import "errors"
7 |
8 | var ErrInvalidWorkQueueLength = errors.New("new workQueueLength value. Value should be greater than 0")
9 |
10 | type Config struct {
11 | // workQueueLength defines the workQueue's channel backlog.
12 | // It's needed to handle surges of new objects.
13 | workQueueLength int
14 | }
15 |
16 | // ConfigOption set attributes of the `router.Config`.
17 | type ConfigOption func(*Config) error
18 |
19 | func NewConfig(opts ...ConfigOption) (*Config, error) {
20 | c := &Config{
21 | workQueueLength: 1024,
22 | }
23 | for _, opt := range opts {
24 | err := opt(c)
25 | if err != nil {
26 | return c, err
27 | }
28 | }
29 | return c, nil
30 | }
31 |
32 | // WithWorkQueueLength sets the workQueueLength.
33 | // Handle nil values here to make the configuration code more clean.
34 | func WithWorkQueueLength(length *int) ConfigOption {
35 | return func(rc *Config) error {
36 | if length == nil {
37 | return nil
38 | }
39 |
40 | if *length <= 0 {
41 | return ErrInvalidWorkQueueLength
42 | }
43 |
44 | rc.workQueueLength = *length
45 | return nil
46 | }
47 | }
48 |
49 | func (rc *Config) WorkQueueLength() int {
50 | return rc.workQueueLength
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/sinks/new_relic_infra.go:
--------------------------------------------------------------------------------
1 | // Package sinks ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package sinks
5 |
6 | import (
7 | "bytes"
8 | "context"
9 | "encoding/json"
10 | "fmt"
11 | "io"
12 | "net/http"
13 | "strconv"
14 | "strings"
15 | "time"
16 |
17 | sdkArgs "github.com/newrelic/infra-integrations-sdk/args"
18 | sdkAttr "github.com/newrelic/infra-integrations-sdk/data/attribute"
19 | sdkEvent "github.com/newrelic/infra-integrations-sdk/data/event"
20 | sdkIntegration "github.com/newrelic/infra-integrations-sdk/integration"
21 | "github.com/prometheus/client_golang/prometheus"
22 | "github.com/prometheus/client_golang/prometheus/promauto"
23 | "github.com/sethgrid/pester"
24 | "github.com/sirupsen/logrus"
25 | "k8s.io/kubectl/pkg/describe"
26 |
27 | "github.com/newrelic/nri-kube-events/pkg/common"
28 | )
29 |
30 | func init() {
31 | register("newRelicInfra", createNewRelicInfraSink)
32 | }
33 |
34 | const (
35 | newRelicNamespace = "k8s"
36 | newRelicCategory = "kubernetes"
37 | newRelicSDKName = "kube_events"
38 | defaultAgentHTTPTimeout = time.Second * 10
39 |
40 | bucketStart = 1 << 11
41 | bucketFactor = 2
42 | bucketCount = 6
43 | )
44 |
45 | func createNewRelicInfraSink(config SinkConfig, integrationVersion string) (Sink, error) {
46 | clusterName := config.MustGetString("clusterName")
47 | agentEndpoint := config.MustGetString("agentEndpoint")
48 | agentHTTPTimeout := config.GetDurationOr("agentHTTPTimeout", defaultAgentHTTPTimeout)
49 |
50 | args := struct {
51 | sdkArgs.DefaultArgumentList
52 | ClusterName string `help:"Identifier of your cluster. You could use it later to filter data in your New Relic account"`
53 | }{
54 | ClusterName: clusterName,
55 | }
56 |
57 | i, err := sdkIntegration.New(newRelicSDKName, integrationVersion, sdkIntegration.Args(&args))
58 | if err != nil {
59 | return nil, fmt.Errorf("error while initializing New Relic SDK integration: %w", err)
60 | }
61 |
62 | logrus.Debugf("NewRelic sink configuration: agentTimeout=%s, clusterName=%s, agentEndpoint=%s",
63 | agentHTTPTimeout,
64 | clusterName,
65 | agentEndpoint,
66 | )
67 |
68 | p := pester.New()
69 | p.Backoff = pester.ExponentialBackoff
70 | p.LogHook = func(e pester.ErrEntry) {
71 | logrus.Debugf("Pester HTTP error: %#v", e)
72 | }
73 | // 32 is semi-randomly chosen. It should be high enough not to block events coming from the k8s API,
74 | // but not too high, because the number is directly related to the amount of goroutines that are running.
75 | p.Concurrency = 32
76 | p.MaxRetries = 3
77 |
78 | return &newRelicInfraSink{
79 | pesterClient: p,
80 | clusterName: clusterName,
81 | sdkIntegration: i,
82 | agentEndpoint: agentEndpoint,
83 | metrics: createNewRelicInfraSinkMetrics(),
84 | }, nil
85 | }
86 |
87 | func createNewRelicInfraSinkMetrics() newRelicInfraSinkMetrics {
88 | return newRelicInfraSinkMetrics{
89 | httpTotalFailures: promauto.NewCounter(prometheus.CounterOpts{
90 | Namespace: "nr",
91 | Subsystem: "http_sink",
92 | Name: "infra_sink_http_failures_total",
93 | Help: "Total amount of http failures connecting to the Agent",
94 | }),
95 | httpResponses: promauto.NewCounterVec(prometheus.CounterOpts{
96 | Namespace: "nr",
97 | Subsystem: "http_sink",
98 | Name: "infra_sink_http_responses_total",
99 | Help: "Total amount of http responses, per code, from the New Relic Infra Agent",
100 | }, []string{"code"}),
101 | descSizes: promauto.NewHistogramVec(prometheus.HistogramOpts{
102 | Namespace: "nr",
103 | Subsystem: "k8s_descriptions",
104 | Name: "size",
105 | Help: "Sizes of the object describe output",
106 | Buckets: prometheus.ExponentialBuckets(bucketStart, bucketFactor, bucketCount),
107 | }, []string{"obj_kind"}),
108 | descErr: promauto.NewCounterVec(prometheus.CounterOpts{
109 | Namespace: "nr",
110 | Subsystem: "k8s_descriptions",
111 | Name: "err",
112 | Help: "Total errors encountered when trying to describe an object",
113 | }, []string{"obj_kind"}),
114 | }
115 | }
116 |
117 | type newRelicInfraSinkMetrics struct {
118 | httpTotalFailures prometheus.Counter
119 | httpResponses *prometheus.CounterVec
120 | descSizes *prometheus.HistogramVec
121 | descErr *prometheus.CounterVec
122 | }
123 |
124 | // The newRelicInfraSink implements the Sink interface.
125 | // It will forward all events to the locally running Relic Infrastructure Agent
126 | type newRelicInfraSink struct {
127 | pesterClient *pester.Client
128 | sdkIntegration *sdkIntegration.Integration
129 | clusterName string
130 | agentEndpoint string
131 | metrics newRelicInfraSinkMetrics
132 | }
133 |
134 | // HandleObject sends the descriptions for the object to the New Relic Agent
135 | func (ns *newRelicInfraSink) HandleObject(kubeObj common.KubeObject) error {
136 | defer ns.sdkIntegration.Clear()
137 |
138 | gvk := common.K8SObjGetGVK(kubeObj.Obj)
139 | objKind := gvk.Kind
140 |
141 | desc, err := describe.DefaultObjectDescriber.DescribeObject(kubeObj.Obj)
142 | if err != nil {
143 | ns.metrics.descErr.WithLabelValues(objKind).Inc()
144 | return fmt.Errorf("failed to describe object: %w", err)
145 | }
146 | ns.metrics.descSizes.WithLabelValues(objKind).Observe(float64(len(desc)))
147 |
148 | descSplits := common.LimitSplit(desc, common.NRDBLimit)
149 | if len(descSplits) == 0 {
150 | return nil
151 | }
152 |
153 | objNS, objName, err := common.GetObjNamespaceAndName(kubeObj.Obj)
154 | if err != nil {
155 | return fmt.Errorf("failed to get object namespace/name: %w", err)
156 | }
157 |
158 | e, err := ns.sdkIntegration.Entity(objName, fmt.Sprintf("k8s:%s:%s:%s", ns.clusterName, objNS, strings.ToLower(objKind)))
159 | if err != nil {
160 | return fmt.Errorf("failed to create entity: %w", err)
161 | }
162 |
163 | e.AddAttributes(
164 | sdkAttr.Attr("clusterName", ns.clusterName),
165 | sdkAttr.Attr("displayName", e.Metadata.Name),
166 | )
167 |
168 | extraAttrs := make(map[string]interface{})
169 | extraAttrs["clusterName"] = ns.clusterName
170 | extraAttrs["type"] = fmt.Sprintf("%s.Description", objKind)
171 |
172 | summary := descSplits[0]
173 | for i := 0; i < common.SplitMaxCols; i++ {
174 | key := fmt.Sprintf("summary.part[%d]", i)
175 | val := ""
176 | if i < len(descSplits) {
177 | val = descSplits[i]
178 | }
179 | extraAttrs[key] = val
180 | }
181 |
182 | ns.decorateAttrs(extraAttrs)
183 |
184 | err = e.AddEvent(sdkEvent.NewWithAttributes(summary, newRelicCategory, extraAttrs))
185 | if err != nil {
186 | return fmt.Errorf("couldn't add event: %w", err)
187 | }
188 |
189 | err = ns.sendIntegrationPayloadToAgent()
190 | if err != nil {
191 | return fmt.Errorf("error sending data to agent: %w", err)
192 | }
193 |
194 | return nil
195 | }
196 |
197 | // HandleEvent sends the event to the New Relic Agent
198 | func (ns *newRelicInfraSink) HandleEvent(kubeEvent common.KubeEvent) error {
199 | defer ns.sdkIntegration.Clear()
200 |
201 | entityType, entityName := formatEntityID(ns.clusterName, kubeEvent)
202 |
203 | e, err := ns.sdkIntegration.Entity(entityName, entityType)
204 | if err != nil {
205 | return fmt.Errorf("unable to create entity: %w", err)
206 | }
207 |
208 | flattenedEvent, err := common.FlattenStruct(kubeEvent)
209 |
210 | if err != nil {
211 | return fmt.Errorf("could not flatten EventData struct: %w", err)
212 | }
213 |
214 | ns.decorateAttrs(flattenedEvent)
215 |
216 | event := sdkEvent.NewWithAttributes(
217 | kubeEvent.Event.Message,
218 | newRelicCategory,
219 | flattenedEvent,
220 | )
221 | err = e.AddEvent(event)
222 | if err != nil {
223 | return fmt.Errorf("couldn't add event: %w", err)
224 | }
225 |
226 | err = ns.sendIntegrationPayloadToAgent()
227 | if err != nil {
228 | return fmt.Errorf("error sending data to agent: %w", err)
229 | }
230 |
231 | return nil
232 | }
233 |
234 | // formatEntity returns an entity id information as tuple of (entityType, entityName).
235 | //
236 | // Returned values should be structured as follows:
237 | // (k8s:::, )
238 | //
239 | // Example pod:
240 | // ("k8s:fsi-cluster-explorer:default:pod", "newrelic-infra-s2wh9")
241 | //
242 | // Example node entityName:
243 | // ("k8s:fsi-cluster-explorer:node", "worker-node-1")
244 | func formatEntityID(clusterName string, kubeEvent common.KubeEvent) (string, string) {
245 | parts := []string{newRelicNamespace}
246 |
247 | parts = append(parts, clusterName)
248 |
249 | if kubeEvent.Event.InvolvedObject.Namespace != "" {
250 | parts = append(parts, kubeEvent.Event.InvolvedObject.Namespace)
251 | }
252 |
253 | parts = append(parts, strings.ToLower(kubeEvent.Event.InvolvedObject.Kind))
254 |
255 | return strings.Join(parts, ":"), kubeEvent.Event.InvolvedObject.Name
256 | }
257 |
258 | func (ns *newRelicInfraSink) sendIntegrationPayloadToAgent() error {
259 | jsonBytes, err := json.Marshal(ns.sdkIntegration)
260 | if err != nil {
261 | return fmt.Errorf("unable to marshal data: %w", err)
262 | }
263 |
264 | ctx, cancel := context.WithCancel(context.Background())
265 | defer cancel()
266 | request, err := http.NewRequestWithContext(ctx, "POST", ns.agentEndpoint, bytes.NewBuffer(jsonBytes))
267 | if err != nil {
268 | return fmt.Errorf("unable to prepare request: %w", err)
269 | }
270 |
271 | resp, err := ns.pesterClient.Do(request)
272 |
273 | if err != nil {
274 | ns.metrics.httpTotalFailures.Inc()
275 | return fmt.Errorf("HTTP transport error: %w", err)
276 | }
277 |
278 | disposeBody(resp)
279 |
280 | ns.metrics.httpResponses.WithLabelValues(strconv.Itoa(resp.StatusCode)).Inc()
281 |
282 | if resp.StatusCode != http.StatusNoContent {
283 | return fmt.Errorf("unexpected statuscode:%s, expected: 204 No Content", resp.Status)
284 | }
285 |
286 | return nil
287 | }
288 |
289 | // disposeBody reads the entire http response body and closes it after.
290 | // This is a performance optimisation. According to the docs:
291 | //
292 | // https://golang.org/pkg/net/http/#Client.Do
293 | // If the returned error is nil, the Response will contain a non-nil Body which the user is expected to close.
294 | // If the Body is not both read to EOF and closed, the Client's underlying RoundTripper (typically Transport)
295 | // may not be able to re-use a persistent TCP connection to the server for a subsequent "keep-alive" request.
296 | func disposeBody(response *http.Response) {
297 | if _, err := io.Copy(io.Discard, response.Body); err != nil {
298 | logrus.Debugf("warning: could not discard response body: %v", err)
299 | }
300 | if err := response.Body.Close(); err != nil {
301 | logrus.Debugf("warning: could not close response body: %v", err)
302 | }
303 | }
304 |
305 | func (ns *newRelicInfraSink) decorateAttrs(attrs map[string]interface{}) {
306 | attrs["eventRouterVersion"] = ns.sdkIntegration.IntegrationVersion
307 | attrs["integrationVersion"] = ns.sdkIntegration.IntegrationVersion
308 | attrs["integrationName"] = ns.sdkIntegration.Name
309 | attrs["clusterName"] = ns.clusterName
310 | }
311 |
--------------------------------------------------------------------------------
/pkg/sinks/new_relic_infra_test.go:
--------------------------------------------------------------------------------
1 | // Copyright 2019 New Relic Corporation. All rights reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | package sinks
4 |
5 | import (
6 | "encoding/json"
7 | "io"
8 | "net/http"
9 | "net/http/httptest"
10 | "os"
11 | "strings"
12 | "testing"
13 |
14 | "github.com/stretchr/testify/assert"
15 | v1 "k8s.io/api/core/v1"
16 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
17 |
18 | "github.com/newrelic/nri-kube-events/pkg/common"
19 | )
20 |
21 | func TestFormatEntityID(t *testing.T) {
22 | podObject := v1.ObjectReference{
23 | Kind: "Pod",
24 | Namespace: "test_namespace",
25 | Name: "TestPod",
26 | }
27 |
28 | nodeObject := v1.ObjectReference{
29 | Kind: "Node",
30 | Name: "Worker1c",
31 | }
32 |
33 | tt := []struct {
34 | involvedObject v1.ObjectReference
35 | expectedEntityType, expectedEntityName, clusterName string
36 | }{
37 | {
38 | involvedObject: podObject,
39 | expectedEntityType: "k8s:test_cluster:test_namespace:pod",
40 | expectedEntityName: "TestPod",
41 | clusterName: "test_cluster",
42 | },
43 | {
44 | involvedObject: podObject,
45 | expectedEntityType: "k8s:different_cluster_name:test_namespace:pod",
46 | expectedEntityName: "TestPod",
47 | clusterName: "different_cluster_name",
48 | },
49 | {
50 | involvedObject: nodeObject,
51 | expectedEntityType: "k8s:my_cluster:node",
52 | expectedEntityName: "Worker1c",
53 | clusterName: "my_cluster",
54 | },
55 | }
56 |
57 | for _, testCase := range tt {
58 | entityType, entityName := formatEntityID(
59 | testCase.clusterName,
60 | common.KubeEvent{
61 | Event: &v1.Event{
62 | InvolvedObject: testCase.involvedObject,
63 | },
64 | },
65 | )
66 |
67 | assert.Equal(t, testCase.expectedEntityName, entityName)
68 | assert.Equal(t, testCase.expectedEntityType, entityType)
69 | }
70 | }
71 |
72 | func TestNewRelicSinkIntegration_HandleEvent_Success(t *testing.T) {
73 | _ = os.Setenv("METADATA", "true")
74 | _ = os.Setenv("NRI_KUBE_EVENTS_myCustomAttribute", "attrValue")
75 | defer os.Clearenv()
76 | expectedPostJSON, err := os.ReadFile("./testdata/event_data.json")
77 | if err != nil {
78 | t.Fatalf("could not read test_post_data.json: %v", err)
79 | }
80 | var expectedData interface{}
81 | if err = json.Unmarshal(expectedPostJSON, &expectedData); err != nil {
82 | t.Fatalf("error unmarshalling test_post_data.json: %v", err)
83 | }
84 |
85 | responseHandler := func(w http.ResponseWriter, r *http.Request) {
86 | body, err2 := io.ReadAll(r.Body)
87 |
88 | defer func() {
89 | _ = r.Body.Close()
90 | }()
91 |
92 | if err2 != nil {
93 | t.Fatalf("error reading request body: %v", err2)
94 | }
95 |
96 | var postData interface{}
97 | if err2 = json.Unmarshal(body, &postData); err2 != nil {
98 | t.Fatalf("error unmarshalling request body: %v", err2)
99 | }
100 |
101 | assert.Equal(t, expectedData, postData)
102 | w.WriteHeader(http.StatusNoContent)
103 | }
104 | var testServer = httptest.NewServer(http.HandlerFunc(responseHandler))
105 |
106 | config := SinkConfig{
107 | Config: map[string]string{
108 | "clusterName": "test-cluster",
109 | "agentEndpoint": testServer.URL,
110 | },
111 | }
112 | sink, _ := createNewRelicInfraSink(config, "0.0.0")
113 | err = sink.HandleEvent(common.KubeEvent{
114 | Verb: "ADDED",
115 | Event: &v1.Event{
116 | Message: "The event message",
117 | ObjectMeta: metav1.ObjectMeta{
118 | Name: "test",
119 | Labels: map[string]string{
120 | "test_label1": "test_value1",
121 | "test_label2": "test_value2",
122 | },
123 | Finalizers: []string{"1", "2"},
124 | },
125 | Count: 10,
126 | InvolvedObject: v1.ObjectReference{
127 | Kind: "Pod",
128 | Namespace: "test_namespace",
129 | Name: "TestPod",
130 | },
131 | }})
132 | if err != nil {
133 | t.Errorf("unexpected error handling event: %v", err)
134 | }
135 | }
136 |
137 | func TestNewRelicInfraSink_HandleEvent_AddEventError(t *testing.T) {
138 | t.Skip("Speak to OHAI about global flags automatically registered when we call integration.New")
139 | config := SinkConfig{
140 | Config: map[string]string{
141 | "clusterName": "test-cluster",
142 | "agentEndpoint": "",
143 | },
144 | }
145 | sink, _ := createNewRelicInfraSink(config, "0.0.0")
146 | err := sink.HandleEvent(common.KubeEvent{
147 | Verb: "ADDED",
148 | Event: &v1.Event{
149 | Message: "",
150 | ObjectMeta: metav1.ObjectMeta{
151 | Name: "test",
152 | Labels: map[string]string{
153 | "test_label1": "test_value1",
154 | "test_label2": "test_value2",
155 | },
156 | Finalizers: []string{"1", "2"},
157 | },
158 | Count: 10,
159 | InvolvedObject: v1.ObjectReference{
160 | Kind: "Pod",
161 | Namespace: "test_namespace",
162 | Name: "TestPod",
163 | },
164 | }})
165 | if err == nil {
166 | t.Fatal("expected error, got nothing")
167 | }
168 |
169 | wantedError := "couldn't add event"
170 | if !strings.Contains(err.Error(), wantedError) {
171 | t.Errorf("wanted error with message '%s' got: '%v'", wantedError, err)
172 | }
173 | }
174 |
--------------------------------------------------------------------------------
/pkg/sinks/sinks.go:
--------------------------------------------------------------------------------
1 | // Package sinks ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package sinks
5 |
6 | import (
7 | "fmt"
8 | "time"
9 |
10 | "github.com/sirupsen/logrus"
11 |
12 | "github.com/newrelic/nri-kube-events/pkg/common"
13 | )
14 |
15 | // Sink receives events from the router, process and publish them to a certain
16 | // destination (stdout, NewRelic platform, etc.).
17 | type Sink interface {
18 | HandleEvent(kubeEvent common.KubeEvent) error
19 | HandleObject(kubeObject common.KubeObject) error
20 | }
21 |
22 | // SinkConfig defines the name and config map of an `Sink`
23 | type SinkConfig struct {
24 | Name string
25 | Config map[string]string
26 | }
27 |
28 | // MustGetString returns the string variable by the given name.
29 | // If it's not present, an error will given and the application will stop.
30 | func (s SinkConfig) MustGetString(name string) string {
31 | val, ok := s.Config[name]
32 | if !ok {
33 | logrus.Fatalf("Required string variable %s not set for %s Sink", name, s.Name)
34 | }
35 | return val
36 | }
37 |
38 | // GetDurationOr returns the duration variable by the given name.
39 | // It will return the fallback in case the duration is not found.
40 | // Invalid durations in configuration are not accepted.
41 | func (s SinkConfig) GetDurationOr(name string, fallback time.Duration) time.Duration {
42 | val, ok := s.Config[name]
43 | if !ok {
44 | return fallback
45 | }
46 |
47 | dur, err := time.ParseDuration(val)
48 | if err != nil {
49 | logrus.Fatalf("Duration config field '%s' has invalid value of '%s' for %s Sink: %v", name, val, s.Name, err)
50 | }
51 |
52 | return dur
53 | }
54 |
55 | type sinkFactory func(config SinkConfig, integrationVersion string) (Sink, error)
56 |
57 | // registeredFactories holds all the registered sinks by this package
58 | var registeredFactories = map[string]sinkFactory{}
59 |
60 | func register(name string, factory sinkFactory) {
61 | if _, ok := registeredFactories[name]; ok {
62 | logrus.Fatal("registered a double sink factory")
63 | }
64 |
65 | registeredFactories[name] = factory
66 | }
67 |
68 | // Create takes a slice of SinkConfigs and attempts
69 | // to initialize the sink handlers.
70 | func Create(configs []SinkConfig, integrationVersion string) (map[string]Sink, error) {
71 | sinks := make(map[string]Sink)
72 |
73 | for _, sinkConf := range configs {
74 | var ok bool
75 | var factory sinkFactory
76 |
77 | if factory, ok = registeredFactories[sinkConf.Name]; !ok {
78 | return sinks, fmt.Errorf("sink not found: %s", sinkConf.Name)
79 | }
80 |
81 | sink, err := factory(sinkConf, integrationVersion)
82 | if err != nil {
83 | return sinks, fmt.Errorf("could not initialize sink %s: %w", sinkConf.Name, err)
84 | }
85 |
86 | logrus.Infof("Created sink: %s", sinkConf.Name)
87 |
88 | sinks[sinkConf.Name] = sink
89 | }
90 |
91 | return sinks, nil
92 | }
93 |
--------------------------------------------------------------------------------
/pkg/sinks/stdout.go:
--------------------------------------------------------------------------------
1 | // Package sinks ...
2 | // Copyright 2019 New Relic Corporation. All rights reserved.
3 | // SPDX-License-Identifier: Apache-2.0
4 | package sinks
5 |
6 | import (
7 | "encoding/json"
8 | "fmt"
9 |
10 | "github.com/sirupsen/logrus"
11 |
12 | "github.com/newrelic/nri-kube-events/pkg/common"
13 | )
14 |
15 | func init() {
16 | register("stdout", createStdoutSink)
17 | }
18 |
19 | func createStdoutSink(_ SinkConfig, _ string) (Sink, error) {
20 | return &stdoutSink{}, nil
21 | }
22 |
23 | type stdoutSink struct{}
24 |
25 | func (stdoutSink) HandleEvent(event common.KubeEvent) error {
26 | b, err := json.Marshal(event)
27 |
28 | if err != nil {
29 | return fmt.Errorf("stdoutSink: could not marshal event: %w", err)
30 | }
31 |
32 | logrus.Info(string(b))
33 | return nil
34 | }
35 |
36 | func (stdoutSink) HandleObject(object common.KubeObject) error {
37 | b, err := json.Marshal(object)
38 |
39 | if err != nil {
40 | return fmt.Errorf("stdoutSink: could not marshal object: %w", err)
41 | }
42 |
43 | logrus.Info(string(b))
44 | return nil
45 | }
46 |
--------------------------------------------------------------------------------
/pkg/sinks/testdata/event_data.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kube_events",
3 | "protocol_version": "3",
4 | "integration_version": "0.0.0",
5 | "data": [
6 | {
7 | "entity": {
8 | "name": "TestPod",
9 | "type": "k8s:test-cluster:test_namespace:pod",
10 | "id_attributes": []
11 | },
12 | "metrics": [],
13 | "inventory": {},
14 | "events": [
15 | {
16 | "attributes": {
17 | "event.count": 10,
18 | "event.involvedObject.kind": "Pod",
19 | "event.involvedObject.name": "TestPod",
20 | "event.involvedObject.namespace": "test_namespace",
21 | "event.metadata.finalizers[0]": "1",
22 | "event.metadata.finalizers[1]": "2",
23 | "event.metadata.labels.test_label1": "test_value1",
24 | "event.metadata.labels.test_label2": "test_value2",
25 | "event.metadata.name": "test",
26 | "clusterName": "test-cluster",
27 | "eventRouterVersion": "0.0.0",
28 | "integrationName": "kube_events",
29 | "integrationVersion": "0.0.0",
30 | "verb": "ADDED",
31 | "event.message": "The event message",
32 | "myCustomAttribute": "attrValue"
33 | },
34 | "summary": "The event message",
35 | "category": "kubernetes"
36 | }
37 | ]
38 | }
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/skaffold.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: skaffold/v4beta1
2 | kind: Config
3 | metadata:
4 | name: nri-kube-events
5 | build:
6 | artifacts:
7 | - image: newrelic/nri-kube-events
8 | context: .
9 | custom:
10 | buildCommand: ./skaffold_build.sh
11 | dependencies:
12 | paths:
13 | - "**/*.go"
14 | - go.mod
15 | - skaffold_build.sh
16 | tagPolicy:
17 | dateTime: {}
18 | manifests:
19 | rawYaml:
20 | - deploy/local.yaml
21 | deploy:
22 | kubectl: {}
23 |
--------------------------------------------------------------------------------
/skaffold_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | export DOCKER_BUILDKIT=1
4 | export DOCKER_IMAGE_NAME="${IMAGE}"
5 |
6 | pushd "${BUILD_CONTEXT}" > /dev/null
7 | make docker
8 |
9 | if [[ "${PUSH_IMAGE}" = true ]]; then
10 | docker push "${DOCKER_IMAGE_NAME}"
11 | fi
12 |
--------------------------------------------------------------------------------
/test/integration/integration_test.go:
--------------------------------------------------------------------------------
1 | //go:build integration
2 | // +build integration
3 |
4 | // Package integration_test implements simple integration test against a local cluster, whose config is loaded from the kubeconfig file.
5 | package integration_test
6 |
7 | import (
8 | "context"
9 | "encoding/json"
10 | "os"
11 | "path"
12 | "strings"
13 | "testing"
14 | "time"
15 |
16 | sdkEvent "github.com/newrelic/infra-integrations-sdk/data/event"
17 | "github.com/stretchr/testify/assert"
18 | v1 "k8s.io/api/core/v1"
19 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
20 | "k8s.io/client-go/informers"
21 | "k8s.io/client-go/kubernetes"
22 | "k8s.io/client-go/rest"
23 | "k8s.io/client-go/tools/clientcmd"
24 |
25 | "github.com/newrelic/nri-kube-events/pkg/events"
26 | "github.com/newrelic/nri-kube-events/test/integration"
27 | )
28 |
29 | // We must have a global TestAgentSink because the infrastructure-sdk attempts to register global flags when the
30 | // agent sink is created, which results in a panic if multiple instantiations are attempted.
31 | var testSinkInstance *integration.TestAgentSink
32 |
33 | // Test_Sink_receives_common_Pod_creation_events checks that events related to pod creation are received.
34 | func Test_Sink_receives_common_Pod_creation_events(t *testing.T) {
35 | client, agentMock := initialize(t)
36 |
37 | t.Log("Creating test namespace...")
38 | ns := &v1.Namespace{
39 | ObjectMeta: metav1.ObjectMeta{
40 | GenerateName: nsName(t),
41 | },
42 | }
43 | ns, err := client.CoreV1().Namespaces().Create(contextFromTestDeadline(t), ns, metav1.CreateOptions{})
44 | if err != nil {
45 | t.Fatalf("could not create test namespace '%s': %v", ns, err)
46 | }
47 |
48 | t.Cleanup(func() {
49 | t.Log("Cleaning up test namespace...")
50 | err := client.CoreV1().Namespaces().Delete(contextFromTestDeadline(t), ns.Name, metav1.DeleteOptions{})
51 | if err != nil {
52 | t.Logf("could not delete test namespace '%s': %v", ns.Name, err)
53 | }
54 | })
55 |
56 | t.Log("Creating test pod...")
57 | testpod, err := client.CoreV1().Pods(ns.Name).Create(contextFromTestDeadline(t), &v1.Pod{
58 | ObjectMeta: metav1.ObjectMeta{
59 | Name: "nginx-e2e",
60 | },
61 | Spec: v1.PodSpec{
62 | Containers: []v1.Container{
63 | {
64 | Name: "nginx",
65 | Image: "nginx",
66 | },
67 | },
68 | },
69 | }, metav1.CreateOptions{})
70 | if err != nil {
71 | t.Fatalf("could not create test pod: %v", err)
72 | }
73 |
74 | t.Log("Waiting for events to show up...")
75 | agentMock.Wait(10*time.Second, 1*time.Minute)
76 | for _, event := range []sdkEvent.Event{
77 | // All strings are matched in a very relaxed way, using strings.Contains(real, test)
78 | {
79 | Summary: "Successfully assigned " + ns.Name + "/" + testpod.Name + " to ",
80 | Category: "kubernetes",
81 | Attributes: map[string]interface{}{
82 | "event.metadata.name": testpod.Name + ".",
83 | "event.metadata.namespace": ns.Name,
84 | "event.reason": "Scheduled",
85 | "clusterName": "",
86 | "event.involvedObject.apiVersion": "",
87 | "event.involvedObject.kind": "Pod",
88 | "event.involvedObject.name": testpod.Name,
89 | "event.message": "Successfully assigned " + ns.Name + "/" + testpod.Name + " to ",
90 | "event.type": "Normal",
91 | "verb": "ADDED",
92 | },
93 | },
94 | {
95 | Summary: "Pulling image \"" + testpod.Spec.Containers[0].Image + "\"",
96 | Category: "kubernetes",
97 | Attributes: map[string]interface{}{
98 | "event.metadata.name": testpod.Name + ".",
99 | "event.metadata.namespace": ns.Name,
100 | "event.reason": "Pulling",
101 | "clusterName": "",
102 | "event.involvedObject.apiVersion": "",
103 | "event.involvedObject.kind": "Pod",
104 | "event.involvedObject.name": testpod.Name,
105 | "event.message": "Pulling image \"" + testpod.Spec.Containers[0].Image + "\"",
106 | "event.type": "Normal",
107 | "verb": "ADDED",
108 | },
109 | },
110 | {
111 | Summary: "Successfully pulled image \"" + testpod.Spec.Containers[0].Image + "\"",
112 | Category: "kubernetes",
113 | Attributes: map[string]interface{}{
114 | "event.metadata.name": testpod.Name + ".",
115 | "event.metadata.namespace": ns.Name,
116 | "event.reason": "Pulled",
117 | "clusterName": "",
118 | "event.involvedObject.apiVersion": "",
119 | "event.involvedObject.kind": "Pod",
120 | "event.involvedObject.name": testpod.Name,
121 | "event.message": "Successfully pulled image \"" + testpod.Spec.Containers[0].Image + "\"",
122 | "event.type": "Normal",
123 | "verb": "ADDED",
124 | },
125 | },
126 | {
127 | Summary: "Started container " + testpod.Spec.Containers[0].Name,
128 | Category: "kubernetes",
129 | Attributes: map[string]interface{}{
130 | "event.metadata.name": testpod.Name + ".",
131 | "event.metadata.namespace": ns.Name,
132 | "event.reason": "Started",
133 | "clusterName": "",
134 | "event.involvedObject.apiVersion": "",
135 | "event.involvedObject.kind": "Pod",
136 | "event.involvedObject.name": testpod.Name,
137 | "event.message": "Started container " + testpod.Spec.Containers[0].Name,
138 | "event.type": "Normal",
139 | "verb": "ADDED",
140 | },
141 | },
142 | } {
143 | if agentMock.Has(&event) {
144 | continue
145 | }
146 |
147 | e := json.NewEncoder(os.Stderr)
148 | t.Log("Expected:")
149 | _ = e.Encode(event)
150 | t.Log("Have:")
151 | _ = e.Encode(agentMock.Events())
152 | t.Fatalf("Event was not captured")
153 | }
154 |
155 | // TODO(kpattaswamy): Once the latest 5 versions of Kubernetes contain the same container creation
156 | // events, we should move this logic back up.
157 | // For context, as of version 1.32 in Kubernetes, the event summary and event message contain a colon after
158 | // the word "container". Since these tests aim to provide coverage across the latest 5 versions of
159 | // Kubernetes, we check to see if either an older or newer container creation evevnt exist in the agent.
160 | pre1dot32CreateEvent := sdkEvent.Event{
161 | Summary: "Created container " + testpod.Spec.Containers[0].Name,
162 | Category: "kubernetes",
163 | Attributes: map[string]interface{}{
164 | "event.metadata.name": testpod.Name + ".",
165 | "event.metadata.namespace": ns.Name,
166 | "event.reason": "Created",
167 | "clusterName": "",
168 | "event.involvedObject.apiVersion": "",
169 | "event.involvedObject.kind": "Pod",
170 | "event.involvedObject.name": testpod.Name,
171 | "event.message": "Created container " + testpod.Spec.Containers[0].Name,
172 | "event.type": "Normal",
173 | "verb": "ADDED",
174 | },
175 | }
176 |
177 | post1dot32CreateEvent := sdkEvent.Event{
178 | Summary: "Created container: " + testpod.Spec.Containers[0].Name,
179 | Category: "kubernetes",
180 | Attributes: map[string]interface{}{
181 | "event.metadata.name": testpod.Name + ".",
182 | "event.metadata.namespace": ns.Name,
183 | "event.reason": "Created",
184 | "clusterName": "",
185 | "event.involvedObject.apiVersion": "",
186 | "event.involvedObject.kind": "Pod",
187 | "event.involvedObject.name": testpod.Name,
188 | "event.message": "Created container: " + testpod.Spec.Containers[0].Name,
189 | "event.type": "Normal",
190 | "verb": "ADDED",
191 | },
192 | }
193 |
194 | if !agentMock.Has(&pre1dot32CreateEvent) && !agentMock.Has(&post1dot32CreateEvent) {
195 | e := json.NewEncoder(os.Stderr)
196 | t.Log("Expected:")
197 | _ = e.Encode(pre1dot32CreateEvent)
198 | t.Log("Or")
199 | _ = e.Encode(post1dot32CreateEvent)
200 | t.Fatalf("Event was not captured")
201 | }
202 |
203 | assert.NoError(t, agentMock.Errors())
204 | }
205 |
206 | func Test_Sink_receives_common_Pod_deletion_events(t *testing.T) {
207 | client, agentMock := initialize(t)
208 |
209 | t.Log("Creating test namespace...")
210 | ns := &v1.Namespace{
211 | ObjectMeta: metav1.ObjectMeta{
212 | GenerateName: nsName(t),
213 | },
214 | }
215 | ns, err := client.CoreV1().Namespaces().Create(contextFromTestDeadline(t), ns, metav1.CreateOptions{})
216 | if err != nil {
217 | t.Fatalf("could not create %s namespace: %v", ns, err)
218 | }
219 |
220 | t.Cleanup(func() {
221 | t.Log("Cleaning up test namespace...")
222 | err := client.CoreV1().Namespaces().Delete(contextFromTestDeadline(t), ns.Name, metav1.DeleteOptions{})
223 | if err != nil {
224 | t.Logf("could not delete test namespace '%s': %v", ns.Name, err)
225 | }
226 | })
227 |
228 | t.Log("Creating test pod...")
229 | testpod, err := client.CoreV1().Pods(ns.Name).Create(contextFromTestDeadline(t), &v1.Pod{
230 | ObjectMeta: metav1.ObjectMeta{
231 | Name: "nginx-e2e-killable",
232 | },
233 | Spec: v1.PodSpec{
234 | Containers: []v1.Container{
235 | {
236 | Name: "nginx",
237 | Image: "nginx",
238 | },
239 | },
240 | },
241 | }, metav1.CreateOptions{})
242 | if err != nil {
243 | t.Fatalf("could not create test pod: %v", err)
244 | }
245 |
246 | time.Sleep(7 * time.Second)
247 |
248 | err = client.CoreV1().Pods(ns.Name).Delete(contextFromTestDeadline(t), testpod.Name, metav1.DeleteOptions{})
249 | if err != nil {
250 | t.Fatalf("could not create test pod: %v", err)
251 | }
252 |
253 | t.Log("Waiting for events to show up...")
254 | agentMock.Wait(15*time.Second, 1*time.Minute)
255 | for _, event := range []sdkEvent.Event{
256 | {
257 | Summary: "Stopping container " + testpod.Spec.Containers[0].Name,
258 | Category: "kubernetes",
259 | Attributes: map[string]interface{}{
260 | "event.metadata.name": testpod.Name + ".",
261 | "event.metadata.namespace": ns.Name,
262 | "event.reason": "Killing",
263 | "clusterName": "",
264 | "event.involvedObject.apiVersion": "",
265 | "event.involvedObject.kind": "Pod",
266 | "event.involvedObject.name": testpod.Name,
267 | "event.message": "Stopping container " + testpod.Spec.Containers[0].Name,
268 | "event.type": "Normal",
269 | "verb": "ADDED",
270 | },
271 | },
272 | } {
273 | if agentMock.Has(&event) {
274 | continue
275 | }
276 |
277 | e := json.NewEncoder(os.Stderr)
278 | t.Log("Expected:")
279 | e.Encode(event) // nolint:errcheck
280 | t.Fatalf("Event was not captured")
281 | }
282 | assert.NoError(t, agentMock.Errors())
283 | }
284 |
285 | // nsName performs basic sanitization on the test name to convert it to an acceptable namespace name.
286 | func nsName(t *testing.T) string {
287 | t.Helper()
288 |
289 | return "e2e-" + strings.ReplaceAll(strings.ToLower(t.Name()), "_", "-")
290 | }
291 |
292 | func contextFromTestDeadline(t *testing.T) context.Context {
293 | deadline, hasDeadline := t.Deadline()
294 | if !hasDeadline {
295 | return context.Background()
296 | }
297 |
298 | ctx, _ := context.WithDeadline(context.Background(), deadline)
299 | return ctx
300 | }
301 |
302 | // initialize returns a kubernets client and a mocked agent sink ready to receive events
303 | func initialize(t *testing.T) (*kubernetes.Clientset, *integration.TestAgentSink) {
304 | t.Helper()
305 |
306 | conf, err := restConfig()
307 | if err != nil {
308 | t.Fatalf("could not build kubernetes config: %v", err)
309 | }
310 |
311 | client, err := kubernetes.NewForConfig(conf)
312 | if err != nil {
313 | t.Fatalf("could not build kubernetes client: %v", err)
314 | }
315 |
316 | sharedInformers := informers.NewSharedInformerFactory(client, time.Duration(0))
317 | eventsInformer := sharedInformers.Core().V1().Events().Informer()
318 | sharedInformers.Start(nil)
319 | sharedInformers.WaitForCacheSync(nil)
320 | for _, obj := range eventsInformer.GetStore().List() {
321 | _ = eventsInformer.GetStore().Delete(obj)
322 | }
323 |
324 | if testSinkInstance == nil {
325 | testSinkInstance = integration.NewTestAgentSink()
326 | }
327 | testSinkInstance.ForgetEvents()
328 |
329 | router := events.NewRouter(eventsInformer, map[string]events.EventHandler{"mock": testSinkInstance})
330 | go router.Run(nil)
331 |
332 | return client, testSinkInstance
333 | }
334 |
335 | // restConfig attempts to build a k8s config from the environment, or the default kubeconfig path
336 | func restConfig() (*rest.Config, error) {
337 | config, err := rest.InClusterConfig()
338 | if err == nil {
339 | return config, nil
340 | }
341 |
342 | config, err = clientcmd.BuildConfigFromFlags("", path.Join(os.ExpandEnv("$HOME"), ".kube", "config"))
343 | if err == nil {
344 | return config, nil
345 | }
346 |
347 | return nil, err
348 | }
349 |
--------------------------------------------------------------------------------
/test/integration/test_agent_sink.go:
--------------------------------------------------------------------------------
1 | package integration
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "net/http/httptest"
10 | "reflect"
11 | "strings"
12 | "sync"
13 | "time"
14 |
15 | sdkEvent "github.com/newrelic/infra-integrations-sdk/data/event"
16 | log "github.com/sirupsen/logrus"
17 |
18 | "github.com/newrelic/nri-kube-events/pkg/common"
19 | "github.com/newrelic/nri-kube-events/pkg/sinks"
20 | )
21 |
22 | // Must be in sync with unexported name in pkg/sinks/new_relic_infra.go:32.
23 | const newRelicInfraSinkID = "newRelicInfra"
24 | const sinkChanBuffer = 128
25 |
26 | // TestAgentSink is an instrumented infra-agent sink for testing e2e reception and processing.
27 | type TestAgentSink struct {
28 | agentSink sinks.Sink
29 | httpServer *httptest.Server
30 | eventReceivedChan chan struct{}
31 | receivedEvents []sdkEvent.Event
32 | mtx *sync.RWMutex
33 |
34 | errs []error
35 | }
36 |
37 | // NewTestAgentSink returns an instrumented infra-agent sink for testing.
38 | func NewTestAgentSink() *TestAgentSink {
39 | mockedAgentSink := &TestAgentSink{
40 | mtx: &sync.RWMutex{},
41 | eventReceivedChan: make(chan struct{}, sinkChanBuffer),
42 | }
43 | mockedAgentSink.httpServer = httptest.NewServer(mockedAgentSink)
44 |
45 | agentSinkConfig := sinks.SinkConfig{
46 | Name: newRelicInfraSinkID,
47 | Config: map[string]string{
48 | "clusterName": "integrationTest",
49 | "agentEndpoint": "http://" + mockedAgentSink.httpServer.Listener.Addr().String(),
50 | },
51 | }
52 |
53 | createdSinks, err := sinks.Create([]sinks.SinkConfig{agentSinkConfig}, "0.0.0")
54 | if err != nil {
55 | log.Fatalf("error creating infra sink: %v", err)
56 | }
57 |
58 | agentSink, ok := createdSinks[newRelicInfraSinkID]
59 | if !ok {
60 | log.Fatal("could not retrieve agent infra sink from map")
61 | }
62 |
63 | mockedAgentSink.agentSink = agentSink
64 |
65 | return mockedAgentSink
66 | }
67 |
68 | // HandleEvent sends a notification to the event received channel and then forwards it to the underlying sink.
69 | func (tas *TestAgentSink) HandleEvent(kubeEvent common.KubeEvent) error {
70 | tas.eventReceivedChan <- struct{}{}
71 | return tas.agentSink.HandleEvent(kubeEvent)
72 | }
73 |
74 | // ServeHTTP handles a request that would be for the infra-agent and stores the unmarshalled event.
75 | func (tas *TestAgentSink) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
76 | tas.mtx.Lock()
77 | defer tas.mtx.Unlock()
78 |
79 | var ev struct {
80 | Data []struct {
81 | Events []sdkEvent.Event `json:"events"`
82 | } `json:"data"`
83 | }
84 |
85 | body, err := io.ReadAll(r.Body)
86 | _ = r.Body.Close()
87 | if err != nil {
88 | tas.errs = append(tas.errs, fmt.Errorf("error reading request body: %w", err))
89 | }
90 |
91 | err = json.Unmarshal(body, &ev)
92 | if err != nil {
93 | tas.errs = append(tas.errs, fmt.Errorf("error unmarshalling request body: %w", err))
94 | }
95 |
96 | if len(ev.Data) == 0 {
97 | log.Warnf("received payload with no data: %s", string(body))
98 | rw.WriteHeader(http.StatusBadRequest)
99 | return
100 | }
101 |
102 | tas.receivedEvents = append(tas.receivedEvents, ev.Data[0].Events...)
103 | rw.WriteHeader(http.StatusNoContent) // Return 204 as the infra-agent does.
104 | }
105 |
106 | func (tas *TestAgentSink) Errors() error {
107 | return errors.Join(tas.errs...)
108 | }
109 |
110 | // Has relaxedly checks whether the mocked agent has received an event.
111 | func (tas *TestAgentSink) Has(testEvent *sdkEvent.Event) bool {
112 | tas.mtx.RLock()
113 | defer tas.mtx.RUnlock()
114 |
115 | for i := range tas.receivedEvents {
116 | receivedEvent := &tas.receivedEvents[i]
117 |
118 | if isEventSubset(receivedEvent, testEvent) {
119 | return true
120 | }
121 | }
122 |
123 | return false
124 | }
125 |
126 | // Events returns the list of events the mock has captured.
127 | func (tas *TestAgentSink) Events() []sdkEvent.Event {
128 | tas.mtx.RLock()
129 | defer tas.mtx.RUnlock()
130 |
131 | retEvents := make([]sdkEvent.Event, len(tas.receivedEvents))
132 | copy(retEvents, tas.receivedEvents)
133 |
134 | return retEvents
135 | }
136 |
137 | // ForgetEvents erases all the recorded events.
138 | func (tas *TestAgentSink) ForgetEvents() {
139 | tas.mtx.Lock()
140 | defer tas.mtx.Unlock()
141 |
142 | tas.receivedEvents = nil
143 | }
144 |
145 | // Wait blocks until betweenEvents time has passed since the last received event, or up to max time has passed since the call.
146 | // Returns false if we had to exhaust max.
147 | func (tas *TestAgentSink) Wait(betweenEvents, max time.Duration) bool {
148 | eventTimer := time.NewTimer(betweenEvents)
149 | maxTimer := time.NewTimer(max)
150 |
151 | for {
152 | select {
153 | // Reset betweenEvents timer whenever an event is received.
154 | case <-tas.eventReceivedChan:
155 | if !eventTimer.Stop() {
156 | <-eventTimer.C
157 | }
158 | eventTimer.Reset(betweenEvents)
159 |
160 | // Return false if max timeout is reached.
161 | case <-maxTimer.C:
162 | return false
163 |
164 | // Return true when small timeout is reached.
165 | case <-eventTimer.C:
166 | return true
167 | }
168 | }
169 | }
170 |
171 | // isEventSubset checks whether the new event is a subset of the old event.
172 | func isEventSubset(old, new *sdkEvent.Event) bool {
173 | if old == new {
174 | return true
175 | }
176 |
177 | if !strings.Contains(old.Category, new.Category) ||
178 | !strings.Contains(old.Summary, new.Summary) {
179 | return false
180 | }
181 |
182 | // Check new map is a subset of old map.
183 | for nk, nv := range new.Attributes {
184 | // Check the old event contains all keys of the new one.
185 | ov, found := old.Attributes[nk]
186 | if !found {
187 | return false
188 | }
189 |
190 | // Ensure types are equal.
191 | if reflect.TypeOf(ov) != reflect.TypeOf(nv) {
192 | return false
193 | }
194 |
195 | // If both are strings, check the old contains the new (partial matching).
196 | // Otherwise, just check for equality.
197 | switch nvs := nv.(type) {
198 | case string:
199 | ovs := ov.(string)
200 | if !strings.Contains(ovs, nvs) {
201 | return false
202 | }
203 | default:
204 | if !reflect.DeepEqual(ov, nv) {
205 | return false
206 | }
207 | }
208 | }
209 |
210 | return true
211 | }
212 |
--------------------------------------------------------------------------------