├── .eslintrc.js
├── .gitignore
├── LICENSE
├── README.md
├── __mocks__
└── mockDashboards.js
├── __tests__
├── addressController.js
└── grafanaController.js
├── assets
├── Alerts.gif
├── App-Icons
│ ├── .DS_Store
│ ├── Icon 1
│ │ └── icon_512x512.png
│ └── Icon2
│ │ └── appIcon.png
├── CreateDashboard.gif
├── HealthPerformance.gif
├── KafOpticon Sample1.png
├── Submit.gif
├── Watchtower.png
├── kafopticon.png
├── kafopticonBG.png
└── kafopticonbg.png
├── docker-test
├── consumer.js
├── docker-compose.yml
├── jmx_prometheus_javaagent-0.20.0.jar
├── kafka-jmx-exporter-config.yml
├── producer.js
└── prometheus.yml
├── grafana
├── Dockerfile
│ └── provisioning
│ │ ├── dashboards
│ │ ├── all.yml
│ │ └── dashboard.json
│ │ └── datasources
│ │ └── datasource.yml
└── grafana.ini
├── local-test
├── jmx_prometheus_httpserver-0.19.0.jar
├── kafka-config
│ ├── server1.properties
│ ├── server2.properties
│ ├── server3.properties
│ └── zookeeper.properties
├── producers-and-consumers
│ └── producer.js
└── scraping-config
│ ├── jmxConfigTemplate.yml
│ ├── jmxConfigTestFile.yml
│ ├── jmxconfig.yml
│ └── prometheus.yml
├── main
├── controllers
│ ├── addressController.js
│ ├── alertsController.js
│ ├── grafanaController.js
│ ├── kafkaMonitoringController.js
│ └── prometheus.yml
├── dashboards
│ └── bigDashboard.js
├── electron.js
├── expressServer.js
└── routers
│ ├── addressRouter.js
│ ├── alertsRouter.js
│ └── kafkaMonitoringRouter.js
├── package-lock.json
├── package.json
├── src
├── app.jsx
├── components
│ ├── AlertsContainer.jsx
│ ├── Header.jsx
│ ├── MainContainer.jsx
│ ├── Menu.jsx
│ ├── MetricsContainer.jsx
│ ├── PerformanceContainer.jsx
│ └── UserDashboard.jsx
├── index.html
├── react.jsx
└── styles
│ └── styles.css
└── webpack.config.js
/.eslintrc.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | env: {
3 | browser: true,
4 | es2021: true,
5 | node: true,
6 | 'jest/globals': true,
7 | },
8 | extends: 'airbnb',
9 | overrides: [
10 | {
11 | env: {
12 | node: true,
13 | },
14 | files: ['.eslintrc.{js,cjs}'],
15 | parserOptions: {
16 | sourceType: 'script',
17 | },
18 | },
19 | ],
20 | parserOptions: {
21 | ecmaVersion: 'latest',
22 | sourceType: 'module',
23 | },
24 | rules: {},
25 | };
26 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Logs
3 | logs
4 | *.log
5 | npm-debug.log*
6 | yarn-debug.log*
7 | yarn-error.log*
8 | lerna-debug.log*
9 | .pnpm-debug.log*
10 |
11 | # Diagnostic reports (https://nodejs.org/api/report.html)
12 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
13 |
14 | # Runtime data
15 | pids
16 | *.pid
17 | *.seed
18 | *.pid.lock
19 |
20 | # Directory for instrumented libs generated by jscoverage/JSCover
21 | lib-cov
22 |
23 | # Coverage directory used by tools like istanbul
24 | coverage
25 | *.lcov
26 |
27 | # nyc test coverage
28 | .nyc_output
29 |
30 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
31 | .grunt
32 |
33 | # Bower dependency directory (https://bower.io/)
34 | bower_components
35 |
36 | # node-waf configuration
37 | .lock-wscript
38 |
39 | # Compiled binary addons (https://nodejs.org/api/addons.html)
40 | build/Release
41 |
42 | # Dependency directories
43 | node_modules/
44 | jspm_packages/
45 |
46 | # Snowpack dependency directory (https://snowpack.dev/)
47 | web_modules/
48 |
49 | # TypeScript cache
50 | *.tsbuildinfo
51 |
52 | # Optional npm cache directory
53 | .npm
54 |
55 | # Optional eslint cache
56 | .eslintcache
57 |
58 | # Optional stylelint cache
59 | .stylelintcache
60 |
61 | # Microbundle cache
62 | .rpt2_cache/
63 | .rts2_cache_cjs/
64 | .rts2_cache_es/
65 | .rts2_cache_umd/
66 |
67 | # Optional REPL history
68 | .node_repl_history
69 |
70 | # Output of 'npm pack'
71 | *.tgz
72 |
73 | # Yarn Integrity file
74 | .yarn-integrity
75 |
76 | # dotenv environment variable files
77 | .env
78 | .env.development.local
79 | .env.test.local
80 | .env.production.local
81 | .env.local
82 |
83 | # parcel-bundler cache (https://parceljs.org/)
84 | .cache
85 | .parcel-cache
86 |
87 | # Next.js build output
88 | .next
89 | out
90 |
91 | # Nuxt.js build / generate output
92 | .nuxt
93 | dist
94 |
95 | # Gatsby files
96 | .cache/
97 | # Comment in the public line in if your project uses Gatsby and not Next.js
98 | # https://nextjs.org/blog/next-9-1#public-directory-support
99 | # public
100 |
101 | # vuepress build output
102 | .vuepress/dist
103 |
104 | # vuepress v2.x temp and cache directory
105 | .temp
106 | .cache
107 |
108 | # Docusaurus cache and generated files
109 | .docusaurus
110 |
111 | # Serverless directories
112 | .serverless/
113 |
114 | # FuseBox cache
115 | .fusebox/
116 |
117 | # DynamoDB Local files
118 | .dynamodb/
119 |
120 | # TernJS port file
121 | .tern-port
122 |
123 | # Stores VSCode versions used for testing VSCode extensions
124 | .vscode-test
125 |
126 | # yarn v2
127 | .yarn/cache
128 | .yarn/unplugged
129 | .yarn/build-state.yml
130 | .yarn/install-state.gz
131 | .pnp.*
132 |
133 | #DS_Store
134 | main/.DS_Store
135 | .DS_Store
136 |
137 | #env
138 | .env
139 |
140 | #prometheus data folder
141 | data
142 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 OSLabs Beta
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # KafOpticon
4 |
5 | Kafka Cluster Monitor and Visualizer
6 |
7 | ## Built With
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | # About the Project
28 |
29 | **What:** KafOpticon is an open-source electron app used for monitoring,
30 | visualizing, and alerting users of Kafka cluster/server metrics. Kafka is a
31 | powerful open-source data stream processing software platform. KafOpticon
32 | article can be found
33 | [here](https://medium.com/@gosoriocanales/monitor-and-visualize-your-kafka-clusters-with-kafopticon-3a08b1db822d).
34 |
35 | **Why:** Monitoring relevant cluster metrics is challenging due to the volume
36 | and accessibility of the metrics. KafOpticon aims to solve these problems by
37 | providing REAL-TIME monitoring, visualization, and alert features.
38 | 
39 |
40 | **Submit JMX Port Numbers** 
41 |
42 | Customize your own dashboard with user-selected metrics for more granular
43 | control. 
44 |
45 | **Customize Automatic Email Alerts**
46 | 
47 |
48 | **Dependencies:** Grafana and Prometheus
49 |
50 | # Installation
51 |
52 | To install as an app, navigate to the releases page of our repository, find the
53 | file that matches your OS, and download it.
54 |
55 | Additionally, you could clone this repository, run `npm install` and then
56 | `npm start` to run the app from source code.
57 |
58 | It is also possible to clone the repository, checkout into the electron-builder
59 | branch that matches your OS, and execute `npm run dist`. The app will be in the
60 | out/ folder at the repository's root.
61 |
62 | ## Using KafOpticon with Docker-mediated Prometheus and Grafana
63 |
64 | KafOpticon is specially designed for ease of monitoring Kafka clusters. It
65 | automatically generates Prometheus and Grafana containers in Docker, which
66 | connect to the metrics endpoints you provide. This setup uses JMX Exporter's
67 | JMX-Prometheus Java Agent to scrape metric endpoints from each Kafka cluster.
68 |
69 | ### Configuration Steps:
70 |
71 | 1. **Set Up Your Kafka Cluster for Monitoring:**
72 |
73 | - For configuring your own Kafka cluster to be monitored using KafOpticon,
74 | ensure that your Kafka cluster exposes Prometheus-readable metrics data.
75 | This is crucial for KafOpticon’s Prometheus container to connect
76 | effectively.
77 | - For more information on setting up JMX Exporter, visit the
78 | [JMX Exporter GitHub page](https://github.com/prometheus/jmx_exporter).
79 |
80 | 2. **Connect KafOpticon to Your Cluster:**
81 | - Open the KafOpticon app.
82 | - Provide a comma-separated list of the exposed JMX Endpoints of your Kafka
83 | cluster (e.g. 9991, 9992, 9993).
84 | - Click on the “send to Docker monitoring” checkbox and hit send.
85 |
86 | ### Monitoring with Docker Desktop:
87 |
88 | - After setting up, monitor the process in Docker Desktop. Prometheus and
89 | Grafana containers will be built for monitoring your cluster.
90 | - Allow KafOpticon some time to refresh as these containers are built and
91 | monitoring information is sent to the app.
92 | - If the dashboard is not displaying as expected, press `Command+R` (or `Ctrl+R`
93 | on Windows) to refresh the page.
94 | - Regularly check Docker Desktop to ensure the Prometheus and Grafana containers
95 | are running as expected. Restart any containers that have exited to
96 | troubleshoot potential issues.
97 |
98 | ## Run Docker Test
99 |
100 | If you want to test the app, but you don't have your own Kafka cluster, follow
101 | the directions below.
102 |
103 | 1. Preconditions:
104 |
105 | - Docker Desktop must be installed and the Docker daemon must be running.
106 | - Ports 3000, 3010, 9090, 9991, 9992, and 9993 must be available.
107 |
108 | 2. Run Kafka Test Cluster in Docker:
109 |
110 | - Navigate to the `docker-test` directory and run `docker-compose up -d` to
111 | start a Kafka cluster in Docker with three brokers, each exposing metrics
112 | on ports 9991, 9992,and 9993, respectively. Ensure the container is running
113 | without issues in Docker Desktop. Restart any exited containers if
114 | necessary.
115 |
116 | ```javascript
117 | docker-compose up -d
118 |
119 | ```
120 |
121 | 3. Run Electron:
122 |
123 | - Run electron with `npm start` in the root directory.
124 |
125 | ```javascript
126 | npm start
127 | ```
128 |
129 | 4. Start Monitoring:
130 |
131 | - Provide a comma-separated list of the exposed JMX Endpoints of your Kafka
132 | cluster (9991, 9992, 9993 if using the provided docker-compose file), check
133 | the “send to Docker monitoring checkbox”, and hit send. Monitor the setup in
134 | Docker Desktop and refresh the app as needed using `COMMAND+R`.
135 |
136 | 5. Stop Docker Test:
137 |
138 | - Stop the prometheus and grafana containers, and run `docker-compose down`
139 | within the docker-test directory.
140 |
141 | ```javascript
142 | docker-compose down
143 | ```
144 |
145 | ## Run local test
146 |
147 | 1. Preconditions:
148 |
149 | - Prometheus and Grafana must be installed
150 | - Ensure that Prometheus and Kafka are in your `$PATH`
151 | - Grafana must be running already (on port 3000) and must be configured to
152 | allow anonymous access with the admin role in the Main Org.
153 | - Ports 3010, 9090, 3030, and 9092 must be available
154 |
155 | 2. Open up two terminals in the root directory.
156 | 3. Run `npm run bootZoo` and `npm run bootKaf1` in the separate terminals.
157 |
158 | ```javascript
159 | npm run bootZoo
160 | ```
161 |
162 | ```javascript
163 | npm run bootKaf1
164 | ```
165 |
166 | 4. Open up a terminal in the root directory. Run `npm start` in the root
167 | directory
168 |
169 | ```javascript
170 | npm start
171 | ```
172 |
173 | 5. In the electron app, type '2020' into the form, check 'send to
174 | local monitoring' and submit it. Wait. Refresh if necessary. Click into
175 | performance or health metrics. Data on the kafka cluster should be displayed.
176 |
177 | ## Customization
178 |
179 | KafOpticon offers flexible customization options for both locally mediated and
180 | Docker-mediated testing environments.
181 |
182 | ### Customizing Monitoring Dashboard:
183 |
184 | - **Pre-configured Panels:** You can configure an additional monitoring tab with
185 | up to 10 panels of pre-selected metrics.
186 | - **Grafana Dashboard Customization:**
187 | 1. Navigate to `localhost:3000` in your browser.
188 | 2. Click into `Dashboards`, and locate a dashboard titled _KafOpticon
189 | Dashboard_.
190 | 3. Use the features of Grafana to customize your dashboard to your
191 | preference.
192 |
193 | ### Making Customizations Persistent:
194 |
195 | - **Saving Changes in Grafana:**
196 |
197 | - After making your desired changes, save them within Grafana.
198 | - Export the dashboard JSON that Grafana provides.
199 |
200 | - **Updating the Source Code:**
201 | - If you are running KafOpticon from the source code, navigate to
202 | `grafana/Dockerfile/provisioning/dashboards`.
203 | - Replace the existing `dashboard.json` with your new one and save the
204 | changes.
205 | - Currently, this will only work if you choose to use KafOpticon's Docker
206 | route to monitor your clusters.
207 |
208 | By following these steps, you can tailor the KafOpticon monitoring experience to
209 | better suit your specific needs and preferences.
210 |
211 | # Alerts
212 |
213 | In order to enable automatic alerts an email and app password are required. The
214 | easiest way to do this is to provide a gmail username and an app password in the
215 | relevant fields of the alerts dashboard. After clicking the submit button, you
216 | must restart Grafana for the changes to take place. After Grafana has been
217 | restarted, you are now free to add any emails you want to receive the alerts in
218 | the Grafana alerts contact points. You can send test email using the test
219 | button. Configure and customize threshholds for the alerts within Grafana.
220 | Congratulations! 🏆️ You should now be able to receive automatic email alerts
221 | about your Kafka clusters.
222 |
223 | # Road Map
224 |
225 | | Feature | Status |
226 | | --------------------------------------- | ------ |
227 | | Visualize Kafka Cluster | ✅ |
228 | | User Customized Dashboard | ⏳ |
229 | | Testing | ⏳ |
230 | | Windows Distribution | ⏳ |
231 | | Enhance Security | 🙏🏻 |
232 | | Enable Connecting to Remote Cluster | 🙏🏻 |
233 |
234 | - ✅ = Ready to use
235 | - ⏳ = In progress
236 | - 🙏🏻 = Looking for contributors
237 |
238 | # Bugs
239 |
240 | Please report bugs to the [issues](https://github.com/oslabs-beta/KafOpticon/issues) tab.
241 |
242 | # Contributing
243 |
244 | Contributions are what make the open source community a great place! If you
245 | would like to contribute to this project take the steps below:
246 |
247 | 1. Fork the Project
248 | 2. Create a Feature Branch Use `git checkout -b newFeature`
249 |
250 | ```javascript
251 | git checkout -b newFeature
252 | ```
253 |
254 | 3. Commit your Changes Use `git commit -m newFeature`
255 |
256 | ```javascript
257 | git commit -m newFeature
258 | ```
259 |
260 | 4. Push to the Branch Use `git push origin newFeature`
261 |
262 | ```javascript
263 | git push origin newFeature
264 | ```
265 |
266 | 5. Open a Pull Request
267 |
268 | # Contributors
269 |
270 |
https://github.com/anewatech
https://github.com/GOsorioCanales
271 |
https://github.com/kelaompachai
https://github.com/zackweiss
272 |
273 |
--------------------------------------------------------------------------------
/__tests__/addressController.js:
--------------------------------------------------------------------------------
1 | const controller = require('../main/controllers/addressController');
2 |
3 | const { writeJmxConfig1, writeJmxConfig2, connectToKafka, startPrometheus } = controller;
4 |
5 | // not sure how to test middleware that does nothing other than spawn child processes
6 |
7 | // get fs module in here so that I can mock it
8 | const fs = require('fs');
9 | jest.mock('fs', () => {
10 | return {
11 | promises: {
12 | writeFile: jest.fn().mockResolvedValue('Hello World'),
13 | readFile: jest.fn().mockResolvedValue('cheese')
14 | }}
15 | });
16 |
17 |
18 | describe ('writeJmxConfig1', () => {
19 |
20 | const req = {
21 | body: {
22 | address: 'localhost:2020'
23 | }
24 | };
25 | const res = {locals: {}};
26 | const next = jest.fn();
27 |
28 | beforeAll(() => {
29 | writeJmxConfig1(req, res, next);
30 | });
31 | describe('', () => {
32 |
33 | test('adds jmxConfig property to res.locals that is correct', () => {
34 | expect(res.locals.jmxConfig).toBe('hostPort: localhost:2020\n');
35 | });
36 |
37 | test('next is called once and only once', () => {
38 | expect(next).toHaveBeenCalledTimes(1);
39 | });
40 |
41 | })
42 |
43 | });
44 |
45 | describe('writeJmxConfig2', () => {
46 | const req = {};
47 | const res = {
48 | locals: {
49 | jmxConfig: 'hostPort: localhost:2020\n'
50 | }
51 | };
52 | const next = jest.fn();
53 |
54 | describe('handle successful fs API calls', () => {
55 | beforeAll(async () => {
56 | await writeJmxConfig2(req, res, next);
57 | });
58 |
59 | test('res.locals.jmxConfig is correct', () => {
60 | expect(res.locals.jmxConfig).toBe('hostPort: localhost:2020\ncheese');
61 | });
62 |
63 | test('next is called once and only once', () => {
64 | expect(next).toHaveBeenCalledTimes(1);
65 | });
66 | });
67 |
68 | describe('handle failed fs API calls', () => {
69 |
70 | beforeEach(() => {
71 | next.mockClear();
72 | });
73 | test('calls global error handler if readFile errors out', async () => {
74 | // how to make readFile return a rejected promise?
75 | fs.promises.readFile = jest.fn().mockRejectedValueOnce(new Error('oh no'));
76 |
77 | await writeJmxConfig2(req, res, next);
78 |
79 | expect(next).toHaveBeenLastCalledWith(expect.objectContaining({
80 | log: 'An error occurred in addressController.writeJmxConfig2',
81 | status: 422
82 | }));
83 |
84 | expect(next).toHaveBeenCalledTimes(1);
85 | });
86 |
87 | test('calls global error handler if writeFile errors out', async () => {
88 | fs.promises.writeFile = jest.fn().mockRejectedValueOnce(new Error('oh no'));
89 |
90 | await writeJmxConfig2(req, res, next);
91 |
92 | expect(next).toHaveBeenLastCalledWith(expect.objectContaining({
93 | log: 'An error occurred in addressController.writeJmxConfig2',
94 | status: 422
95 | }));
96 | expect(next).toHaveBeenCalledTimes(1);
97 | });
98 |
99 | test('next is called once and only once in the event of a readFile error', async () => {
100 | // how to do this?
101 | const next = jest.fn();
102 | fs.promises.readFile = jest.fn().mockRejectedValueOnce(new Error('oh no'));
103 | await writeJmxConfig2(req, res, next);
104 |
105 | expect(next).toHaveBeenCalledTimes(1);
106 | });
107 | });
108 | });
109 |
110 | // describe('connectToKafka', () => {
111 | // const req = {};
112 | // const res = {locals: {}};
113 | // const next = jest.fn();
114 | // describe('', () => {
115 | // test.only('confirm that jmx exporter http server has started', async () => {
116 | // // run connectToKafka
117 | // connectToKafka(req, res, next);
118 |
119 | // // wait for a second or two
120 | // const delay = (milliseconds) => {
121 | // return new Promise(resolve => {
122 | // setTimeout(resolve, milliseconds);
123 | // });
124 | // };
125 |
126 | // await delay(1000);
127 |
128 | // // send a get request to localhost:3030
129 | // const response = await fetch('http://localhost:3030');
130 | // // pass the test if the response's status is 200ok, otherwise fail the test
131 | // expect(response.status).toBe(200);
132 |
133 | // console.log(res.locals.PID);
134 | // });
135 | // });
136 | // });
--------------------------------------------------------------------------------
/__tests__/grafanaController.js:
--------------------------------------------------------------------------------
1 | const controller = require('../main/controllers/grafanaController');
2 |
3 | const { getPrometheus, createPromSource, generateDashJson, createDashboard} = controller;
4 | const { answer } = require('../__mocks__/mockDashboards');
5 |
6 | let next = jest.fn();
7 |
8 |
9 |
10 | beforeEach(() => {
11 | fetch = jest.fn(() =>
12 | Promise.resolve({
13 | json: () => Promise.resolve({uid: 'f3re6'})
14 | }));
15 |
16 | next = jest.fn();
17 | fetch.mockClear();
18 | next.mockClear();
19 | });
20 |
21 | describe('getPrometheus', () => {
22 | const req = {};
23 | const res = {
24 | locals: {}
25 | };
26 | const res2 = {
27 | locals: {
28 | prom: true
29 | }
30 | };
31 |
32 | describe('', () => {
33 | test('check whether function skips itself if res.locals.prom is true', async () => {
34 | await getPrometheus(req, res2, next);
35 | expect(next).toHaveBeenCalledTimes(1);
36 | expect(fetch).toHaveBeenCalledTimes(0);
37 | });
38 | });
39 |
40 | describe('handle successful API calls', () => {
41 | beforeEach(async () => {
42 | await getPrometheus(req, res, next);
43 | });
44 |
45 | test('the correct argument is passed to fetch', () => {
46 | expect(fetch).toHaveBeenCalledWith('http://localhost:3000/api/datasources/name/Prometheus');
47 | });
48 |
49 | test('the correct data is added to res.locals', () => {
50 | expect(res.locals.promUid).toBe('f3re6');
51 | expect(res.locals.prom).toBe(true);
52 | });
53 |
54 | test('next is called once and only once', () => {
55 | expect(next).toHaveBeenCalledTimes(1);
56 | });
57 | });
58 |
59 | describe('handle failed API calls', () => {
60 | test('calls global error handler, passing in error object', async () => {
61 | res.locals = {};
62 | fetch = jest.fn().mockRejectedValueOnce(new Error('oh no'));
63 | await getPrometheus(req, res, next);
64 | expect(next).toHaveBeenCalledTimes(1);
65 | expect(next).toHaveBeenLastCalledWith(expect.objectContaining({
66 | log: 'An error occurred in grafanaController.getPrometheus',
67 | status: 500
68 | }));
69 | });
70 | });
71 | });
72 |
73 | describe('createPromSource', () => {
74 | const req = {};
75 | const res = {
76 | locals: {
77 | prom: true
78 | }
79 | };
80 |
81 | describe('', () => {
82 | test('check whether function skips itself if res.locals.prom is true', async () => {
83 | await createPromSource(req, res, next);
84 | expect(next).toHaveBeenCalledTimes(1);
85 | expect(fetch).toHaveBeenCalledTimes(0);
86 | });
87 | });
88 |
89 |
90 | describe('handle failed API calls', () => {
91 | test('calls global error handler, passing in error object', async () => {
92 | res.locals = {};
93 | fetch = jest.fn().mockRejectedValueOnce(new Error('oh no'));
94 | await createPromSource(req, res, next);
95 | expect(next).toHaveBeenCalledTimes(1);
96 | expect(next).toHaveBeenCalledWith(expect.objectContaining({
97 | log: 'An error occurred in grafanaController.createPromSource',
98 | status: 500
99 | }));
100 | });
101 | });
102 | });
103 |
104 | // this test is not good
105 | // it will fail as soon as we change the bigDashboard.js
106 | // maybe we could use environment variables to change it
107 | describe('generateDashJson', () => {
108 |
109 | const req = {};
110 | const res = {locals: {
111 | promUid: 'testing'
112 | }};
113 |
114 | describe('', () => {
115 | test('changes uids to Prometheus\'s uid', async () => {
116 | await generateDashJson(req, res, next);
117 | expect(res.locals.dashboardJSON).toEqual(answer);
118 | });
119 | });
120 | });
121 |
122 | describe('createDashboard', () => {
123 | const req = {};
124 | const res = {locals: {
125 | dashboardJSON: 'cheesy'
126 | }};
127 |
128 | describe('', () => {
129 | test('calls the API with the generatedDashboard', async () => {
130 | await createDashboard(req, res, next);
131 | expect(fetch).toHaveBeenCalledWith('http://localhost:3000/api/dashboards/db', expect.objectContaining({
132 | headers: {
133 | 'Content-Type': 'application/json'
134 | },
135 | method: 'POST',
136 | body: JSON.stringify(res.locals.dashboardJSON)
137 | }));
138 | });
139 | });
140 |
141 | describe('handle successful API calls', () => {
142 | test('the correct data is added to res.locals and next is called only once', async () => {
143 | await createDashboard(req, res, next);
144 | expect(res.locals.grafanaResponse).toEqual({uid: 'f3re6'});
145 | expect(next).toHaveBeenCalledTimes(1);
146 | });
147 | });
148 |
149 | describe('handle failed API calls', () => {
150 | test('calls global error handler, passing in error object', async () => {
151 | fetch = jest.fn().mockRejectedValueOnce(new Error('oh no'))
152 | await createDashboard(res, res, next);
153 | expect(next).toHaveBeenCalledTimes(1);
154 | expect(next).toHaveBeenCalledWith(expect.objectContaining({
155 | log: 'An error occurred in grafanaController.createDashboard',
156 | status: 500,
157 | }));
158 | });
159 | });
160 | });
--------------------------------------------------------------------------------
/assets/Alerts.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/Alerts.gif
--------------------------------------------------------------------------------
/assets/App-Icons/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/App-Icons/.DS_Store
--------------------------------------------------------------------------------
/assets/App-Icons/Icon 1/icon_512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/App-Icons/Icon 1/icon_512x512.png
--------------------------------------------------------------------------------
/assets/App-Icons/Icon2/appIcon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/App-Icons/Icon2/appIcon.png
--------------------------------------------------------------------------------
/assets/CreateDashboard.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/CreateDashboard.gif
--------------------------------------------------------------------------------
/assets/HealthPerformance.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/HealthPerformance.gif
--------------------------------------------------------------------------------
/assets/KafOpticon Sample1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/KafOpticon Sample1.png
--------------------------------------------------------------------------------
/assets/Submit.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/Submit.gif
--------------------------------------------------------------------------------
/assets/Watchtower.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/Watchtower.png
--------------------------------------------------------------------------------
/assets/kafopticon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/kafopticon.png
--------------------------------------------------------------------------------
/assets/kafopticonBG.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/kafopticonBG.png
--------------------------------------------------------------------------------
/assets/kafopticonbg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/assets/kafopticonbg.png
--------------------------------------------------------------------------------
/docker-test/consumer.js:
--------------------------------------------------------------------------------
1 | const { Kafka } = require('kafkajs');
2 |
3 | const kafka = new Kafka({
4 | clientId: 'my-consumer',
5 | brokers: ['localhost:19092', 'localhost:29092', 'localhost:39092'],
6 | });
7 |
8 | const consumer = kafka.consumer({ groupId: 'test-group' });
9 |
10 | const runConsumer = async () => {
11 | await consumer.connect();
12 | await consumer.subscribe({ topic: 'test-topic', fromBeginning: true });
13 |
14 | await consumer.run({
15 | eachMessage: async ({ topic, partition, message }) => {
16 | console.log({
17 | value: message.value.toString(),
18 | });
19 | },
20 | });
21 | };
22 |
23 | runConsumer().catch(error => {
24 | console.error('Error in consumer:', error);
25 | process.exit(1);
26 | });
27 |
--------------------------------------------------------------------------------
/docker-test/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | zookeeper:
4 | image: confluentinc/cp-zookeeper:latest
5 | container_name: zookeeper
6 | environment:
7 | ZOOKEEPER_CLIENT_PORT: 2181
8 | ZOOKEEPER_TICK_TIME: 2000
9 | ports:
10 | - '2181:2181'
11 |
12 | schema-registry:
13 | image: confluentinc/cp-schema-registry:latest
14 | hostname: schema-registry
15 | depends_on:
16 | - kafka-broker-1
17 | - kafka-broker-2
18 | - kafka-broker-3
19 | ports:
20 | - '8081:8081'
21 | environment:
22 | SCHEMA_REGISTRY_HOST_NAME: schema-registry
23 | SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
24 | SCHEMA_REGISTRY_LISTENERS: http://schema-registry:8081
25 | SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-broker-2:9092,PLAINTEXT_INTERNAL://localhost:29092
26 | SCHEMA_REGISTRY_DEBUG: 'true'
27 |
28 | kafka-broker-1:
29 | image: confluentinc/cp-kafka:latest
30 | hostname: kafka-broker-1
31 | ports:
32 | - '19092:19092'
33 | - '9991:9999'
34 | volumes:
35 | - ./jmx_prometheus_javaagent-0.20.0.jar:/usr/app/jmx_prometheus_javaagent.jar
36 | - ./kafka-jmx-exporter-config.yml:/usr/app/kafka-jmx-exporter-config.yml
37 | depends_on:
38 | - zookeeper
39 | environment:
40 | KAFKA_BROKER_ID: 1
41 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
42 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
43 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-broker-1:9092,PLAINTEXT_HOST://localhost:19092
44 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
45 | KAFKA_JMX_HOSTNAME: kafka-broker-1
46 | KAFKA_JVM_PERFORMANCE_OPTS: '-javaagent:/usr/app/jmx_prometheus_javaagent.jar=9999:/usr/app/kafka-jmx-exporter-config.yml'
47 | kafka-broker-2:
48 | image: confluentinc/cp-kafka:latest
49 | hostname: kafka-broker-2
50 | ports:
51 | - '29092:29092'
52 | - '9992:9999'
53 | volumes:
54 | - ./jmx_prometheus_javaagent-0.20.0.jar:/usr/app/jmx_prometheus_javaagent.jar
55 | - ./kafka-jmx-exporter-config.yml:/usr/app/kafka-jmx-exporter-config.yml
56 | depends_on:
57 | - zookeeper
58 | environment:
59 | KAFKA_BROKER_ID: 2
60 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
61 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
62 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-broker-2:9092,PLAINTEXT_HOST://localhost:29092
63 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
64 | KAFKA_JMX_HOSTNAME: kafka-broker-2
65 | KAFKA_JVM_PERFORMANCE_OPTS: '-javaagent:/usr/app/jmx_prometheus_javaagent.jar=9999:/usr/app/kafka-jmx-exporter-config.yml'
66 | kafka-broker-3:
67 | image: confluentinc/cp-kafka:latest
68 | hostname: kafka-broker-3
69 | ports:
70 | - '39092:39092'
71 | - '9993:9999'
72 | volumes:
73 | - ./jmx_prometheus_javaagent-0.20.0.jar:/usr/app/jmx_prometheus_javaagent.jar
74 | - ./kafka-jmx-exporter-config.yml:/usr/app/kafka-jmx-exporter-config.yml
75 | depends_on:
76 | - zookeeper
77 | environment:
78 | KAFKA_BROKER_ID: 3
79 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
80 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
81 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-broker-3:9092,PLAINTEXT_HOST://localhost:39092
82 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
83 | KAFKA_JMX_HOSTNAME: kafka-broker-3
84 | KAFKA_JVM_PERFORMANCE_OPTS: '-javaagent:/usr/app/jmx_prometheus_javaagent.jar=9999:/usr/app/kafka-jmx-exporter-config.yml'
85 |
--------------------------------------------------------------------------------
/docker-test/jmx_prometheus_javaagent-0.20.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/docker-test/jmx_prometheus_javaagent-0.20.0.jar
--------------------------------------------------------------------------------
/docker-test/kafka-jmx-exporter-config.yml:
--------------------------------------------------------------------------------
1 | startDelaySeconds: 0
2 | ssl: false
3 | lowercaseOutputName: false
4 | lowercaseOutputLabelNames: false
5 |
6 | rules:
7 | # Special cases and very specific rules
8 | - pattern:
9 | kafka.server<>Value
11 | name: kafka_server_$1_$2
12 | type: GAUGE
13 | labels:
14 | clientId: '$3'
15 | topic: '$4'
16 | partition: '$5'
17 | - pattern:
18 | kafka.server<>Value
20 | name: kafka_server_$1_$2
21 | type: GAUGE
22 | labels:
23 | clientId: '$3'
24 | broker: '$4:$5'
25 | - pattern: kafka.coordinator.(\w+)<>Value
26 | name: kafka_coordinator_$1_$2_$3
27 | type: GAUGE
28 |
29 | # Generic per-second counters with 0-2 key/value pairs
30 | - pattern:
31 | kafka.(\w+)<>Count
32 | name: kafka_$1_$2_$3_total
33 | type: COUNTER
34 | labels:
35 | '$4': '$5'
36 | '$6': '$7'
37 | - pattern: kafka.(\w+)<>Count
38 | name: kafka_$1_$2_$3_total
39 | type: COUNTER
40 | labels:
41 | '$4': '$5'
42 | - pattern: kafka.(\w+)<>Count
43 | name: kafka_$1_$2_$3_total
44 | type: COUNTER
45 |
46 | # Quota specific rules
47 | - pattern: kafka.server<>([a-z-]+)
48 | name: kafka_server_quota_$4
49 | type: GAUGE
50 | labels:
51 | resource: '$1'
52 | user: '$2'
53 | clientId: '$3'
54 | - pattern: kafka.server<>([a-z-]+)
55 | name: kafka_server_quota_$3
56 | type: GAUGE
57 | labels:
58 | resource: '$1'
59 | clientId: '$2'
60 | - pattern: kafka.server<>([a-z-]+)
61 | name: kafka_server_quota_$3
62 | type: GAUGE
63 | labels:
64 | resource: '$1'
65 | user: '$2'
66 |
67 | # Generic gauges with 0-2 key/value pairs
68 | - pattern: kafka.(\w+)<>Value
69 | name: kafka_$1_$2_$3
70 | type: GAUGE
71 | labels:
72 | '$4': '$5'
73 | '$6': '$7'
74 | - pattern: kafka.(\w+)<>Value
75 | name: kafka_$1_$2_$3
76 | type: GAUGE
77 | labels:
78 | '$4': '$5'
79 | - pattern: kafka.(\w+)<>Value
80 | name: kafka_$1_$2_$3
81 | type: GAUGE
82 |
83 | # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
84 | #
85 | # Note that these are missing the '_sum' metric!
86 | - pattern: kafka.(\w+)<>Count
87 | name: kafka_$1_$2_$3_count
88 | type: COUNTER
89 | labels:
90 | '$4': '$5'
91 | '$6': '$7'
92 | - pattern:
93 | kafka.(\w+)<>(\d+)thPercentile
94 | name: kafka_$1_$2_$3
95 | type: GAUGE
96 | labels:
97 | '$4': '$5'
98 | '$6': '$7'
99 | quantile: '0.$8'
100 | - pattern: kafka.(\w+)<>Count
101 | name: kafka_$1_$2_$3_count
102 | type: COUNTER
103 | labels:
104 | '$4': '$5'
105 | - pattern: kafka.(\w+)<>(\d+)thPercentile
106 | name: kafka_$1_$2_$3
107 | type: GAUGE
108 | labels:
109 | '$4': '$5'
110 | quantile: '0.$6'
111 | - pattern: kafka.(\w+)<>Count
112 | name: kafka_$1_$2_$3_count
113 | type: COUNTER
114 | - pattern: kafka.(\w+)<>(\d+)thPercentile
115 | name: kafka_$1_$2_$3
116 | type: GAUGE
117 | labels:
118 | quantile: '0.$4'
119 |
120 | # Generic gauges for MeanRate Percent
121 | # Ex) kafka.server<>MeanRate
122 | - pattern: kafka.(\w+)<>MeanRate
123 | name: kafka_$1_$2_$3_percent
124 | type: GAUGE
125 | - pattern: kafka.(\w+)<>Value
126 | name: kafka_$1_$2_$3_percent
127 | type: GAUGE
128 | - pattern: kafka.(\w+)<>Value
129 | name: kafka_$1_$2_$3_percent
130 | type: GAUGE
131 | labels:
132 | '$4': '$5'
133 |
--------------------------------------------------------------------------------
/docker-test/producer.js:
--------------------------------------------------------------------------------
1 | const { Kafka } = require('kafkajs');
2 |
3 | const kafka = new Kafka({
4 | clientId: 'my-producer',
5 | brokers: ['localhost:19092', 'localhost:29092', 'localhost:39092'],
6 | });
7 |
8 | const producer = kafka.producer();
9 |
10 | const runProducer = async () => {
11 | await producer.connect();
12 | let messageCount = 0;
13 |
14 | setInterval(async () => {
15 | try {
16 | await producer.send({
17 | topic: 'test-topic',
18 | messages: [
19 | { value: `Hello KafkaJS user! Message number ${++messageCount}` },
20 | ],
21 | });
22 |
23 | console.log(`Sent message number ${messageCount}`);
24 | } catch (error) {
25 | console.error('Error sending message:', error);
26 | process.exit(1);
27 | }
28 | }, 1000);
29 | };
30 |
31 | runProducer().catch(error => {
32 | console.error('Error in producer:', error);
33 | process.exit(1);
34 | });
35 |
--------------------------------------------------------------------------------
/docker-test/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 15s
3 | evaluation_interval: 15s
4 |
5 | scrape_configs:
6 | - job_name: 'kafka'
7 | static_configs:
8 | - targets:
9 | ['kafka-broker-1:9999', 'kafka-broker-2:9999', 'kafka-broker-3:9999']
10 |
--------------------------------------------------------------------------------
/grafana/Dockerfile/provisioning/dashboards/all.yml:
--------------------------------------------------------------------------------
1 | #sets up prometheus ad a data source for grafana and specifies where dashboars are stored
2 | apiVersion: 1
3 |
4 | providers:
5 | - name: 'Prometheus'
6 | orgId: 1
7 | folder: ''
8 | type: file
9 | disableDeletion: false
10 | editable: false
11 | allowUiUpdates: true
12 | options:
13 | path: /etc/grafana/provisioning/dashboards
--------------------------------------------------------------------------------
/grafana/Dockerfile/provisioning/dashboards/dashboard.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "grafana",
8 | "uid": "-- Grafana --"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "type": "dashboard"
15 | }
16 | ]
17 | },
18 | "editable": true,
19 | "fiscalYearStartMonth": 0,
20 | "graphTooltip": 0,
21 | "id": 1,
22 | "links": [],
23 | "liveNow": false,
24 | "panels": [
25 | {
26 | "datasource": {
27 | "type": "prometheus",
28 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
29 | },
30 | "description": "Subset of replicas for a partition that are considered to be in sync with the leader replica. ",
31 | "fieldConfig": {
32 | "defaults": {
33 | "color": {
34 | "mode": "palette-classic"
35 | },
36 | "custom": {
37 | "axisBorderShow": false,
38 | "axisCenteredZero": false,
39 | "axisColorMode": "text",
40 | "axisLabel": "",
41 | "axisPlacement": "auto",
42 | "barAlignment": 0,
43 | "drawStyle": "line",
44 | "fillOpacity": 0,
45 | "gradientMode": "none",
46 | "hideFrom": {
47 | "legend": false,
48 | "tooltip": false,
49 | "viz": false
50 | },
51 | "insertNulls": false,
52 | "lineInterpolation": "linear",
53 | "lineWidth": 1,
54 | "pointSize": 5,
55 | "scaleDistribution": {
56 | "type": "linear"
57 | },
58 | "showPoints": "auto",
59 | "spanNulls": false,
60 | "stacking": {
61 | "group": "A",
62 | "mode": "none"
63 | },
64 | "thresholdsStyle": {
65 | "mode": "off"
66 | }
67 | },
68 | "mappings": [],
69 | "thresholds": {
70 | "mode": "absolute",
71 | "steps": [
72 | {
73 | "color": "green",
74 | "value": null
75 | },
76 | {
77 | "color": "red",
78 | "value": 80
79 | }
80 | ]
81 | }
82 | },
83 | "overrides": []
84 | },
85 | "gridPos": {
86 | "h": 8,
87 | "w": 12,
88 | "x": 0,
89 | "y": 0
90 | },
91 | "id": 25,
92 | "options": {
93 | "legend": {
94 | "calcs": [],
95 | "displayMode": "list",
96 | "placement": "bottom",
97 | "showLegend": true
98 | },
99 | "tooltip": {
100 | "mode": "single",
101 | "sort": "none"
102 | }
103 | },
104 | "targets": [
105 | {
106 | "datasource": {
107 | "type": "prometheus",
108 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
109 | },
110 | "disableTextWrap": false,
111 | "editorMode": "builder",
112 | "expr": "kafka_cluster_Partition_InSyncReplicasCount{topic!=\"__consumer_offsets\"}",
113 | "fullMetaSearch": false,
114 | "includeNullMetadata": true,
115 | "instant": false,
116 | "legendFormat": "{{instance}}, partition: {{partition}}, topic: {{topic}}",
117 | "range": true,
118 | "refId": "A",
119 | "useBackend": false
120 | }
121 | ],
122 | "title": "In Sync Replicas Count",
123 | "type": "timeseries"
124 | },
125 | {
126 | "datasource": {
127 | "type": "prometheus",
128 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
129 | },
130 | "description": "Number of unreplicated partitions",
131 | "fieldConfig": {
132 | "defaults": {
133 | "color": {
134 | "mode": "continuous-BlPu"
135 | },
136 | "custom": {
137 | "axisBorderShow": false,
138 | "axisCenteredZero": false,
139 | "axisColorMode": "text",
140 | "axisLabel": "",
141 | "axisPlacement": "auto",
142 | "barAlignment": 0,
143 | "drawStyle": "line",
144 | "fillOpacity": 3,
145 | "gradientMode": "none",
146 | "hideFrom": {
147 | "legend": false,
148 | "tooltip": false,
149 | "viz": false
150 | },
151 | "insertNulls": false,
152 | "lineInterpolation": "linear",
153 | "lineStyle": {
154 | "fill": "solid"
155 | },
156 | "lineWidth": 2,
157 | "pointSize": 7,
158 | "scaleDistribution": {
159 | "type": "linear"
160 | },
161 | "showPoints": "auto",
162 | "spanNulls": false,
163 | "stacking": {
164 | "group": "A",
165 | "mode": "none"
166 | },
167 | "thresholdsStyle": {
168 | "mode": "off"
169 | }
170 | },
171 | "mappings": [],
172 | "thresholds": {
173 | "mode": "absolute",
174 | "steps": [
175 | {
176 | "color": "green",
177 | "value": null
178 | },
179 | {
180 | "color": "red",
181 | "value": 80
182 | }
183 | ]
184 | }
185 | },
186 | "overrides": []
187 | },
188 | "gridPos": {
189 | "h": 8,
190 | "w": 12,
191 | "x": 12,
192 | "y": 0
193 | },
194 | "id": 2,
195 | "options": {
196 | "legend": {
197 | "calcs": [],
198 | "displayMode": "list",
199 | "placement": "bottom",
200 | "showLegend": true
201 | },
202 | "tooltip": {
203 | "mode": "single",
204 | "sort": "none"
205 | }
206 | },
207 | "targets": [
208 | {
209 | "datasource": {
210 | "type": "prometheus",
211 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
212 | },
213 | "disableTextWrap": false,
214 | "editorMode": "builder",
215 | "expr": "kafka_cluster_Partition_UnderReplicated{topic!=\"__consumer_offsets\"}",
216 | "fullMetaSearch": false,
217 | "includeNullMetadata": true,
218 | "instant": false,
219 | "legendFormat": "{{instance}}, partition: {{partition}}, topic: {{topic}}",
220 | "range": true,
221 | "refId": "A",
222 | "useBackend": false
223 | }
224 | ],
225 | "title": "Under Replicated Partitions",
226 | "type": "timeseries"
227 | },
228 | {
229 | "datasource": {
230 | "type": "prometheus",
231 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
232 | },
233 | "description": "Number of copies or replicas of a partition that are maintained across different broker nodes in a Kafka cluster. ",
234 | "fieldConfig": {
235 | "defaults": {
236 | "color": {
237 | "mode": "continuous-BlYlRd"
238 | },
239 | "custom": {
240 | "axisBorderShow": false,
241 | "axisCenteredZero": false,
242 | "axisColorMode": "text",
243 | "axisLabel": "",
244 | "axisPlacement": "auto",
245 | "barAlignment": 0,
246 | "drawStyle": "line",
247 | "fillOpacity": 0,
248 | "gradientMode": "none",
249 | "hideFrom": {
250 | "legend": false,
251 | "tooltip": false,
252 | "viz": false
253 | },
254 | "insertNulls": false,
255 | "lineInterpolation": "linear",
256 | "lineWidth": 1,
257 | "pointSize": 5,
258 | "scaleDistribution": {
259 | "type": "linear"
260 | },
261 | "showPoints": "auto",
262 | "spanNulls": false,
263 | "stacking": {
264 | "group": "A",
265 | "mode": "none"
266 | },
267 | "thresholdsStyle": {
268 | "mode": "off"
269 | }
270 | },
271 | "mappings": [],
272 | "thresholds": {
273 | "mode": "absolute",
274 | "steps": [
275 | {
276 | "color": "green",
277 | "value": null
278 | },
279 | {
280 | "color": "red",
281 | "value": 80
282 | }
283 | ]
284 | }
285 | },
286 | "overrides": []
287 | },
288 | "gridPos": {
289 | "h": 8,
290 | "w": 12,
291 | "x": 0,
292 | "y": 8
293 | },
294 | "id": 24,
295 | "options": {
296 | "legend": {
297 | "calcs": [],
298 | "displayMode": "list",
299 | "placement": "bottom",
300 | "showLegend": true
301 | },
302 | "tooltip": {
303 | "mode": "single",
304 | "sort": "none"
305 | }
306 | },
307 | "targets": [
308 | {
309 | "datasource": {
310 | "type": "prometheus",
311 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
312 | },
313 | "disableTextWrap": false,
314 | "editorMode": "builder",
315 | "expr": "kafka_cluster_Partition_ReplicasCount{topic!=\"__consumer_offsets\"}",
316 | "fullMetaSearch": false,
317 | "includeNullMetadata": true,
318 | "instant": false,
319 | "legendFormat": "{{instance}}, partition: {{partition}}, topic: {{topic}}",
320 | "range": true,
321 | "refId": "A",
322 | "useBackend": false
323 | }
324 | ],
325 | "title": "Replicas Count",
326 | "type": "timeseries"
327 | },
328 | {
329 | "datasource": {
330 | "type": "prometheus",
331 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
332 | },
333 | "description": "Number of “unclean” elections per ms",
334 | "fieldConfig": {
335 | "defaults": {
336 | "color": {
337 | "mode": "continuous-BlPu"
338 | },
339 | "mappings": [],
340 | "thresholds": {
341 | "mode": "absolute",
342 | "steps": [
343 | {
344 | "color": "green",
345 | "value": null
346 | },
347 | {
348 | "color": "red",
349 | "value": 80
350 | }
351 | ]
352 | },
353 | "unit": "short"
354 | },
355 | "overrides": []
356 | },
357 | "gridPos": {
358 | "h": 8,
359 | "w": 12,
360 | "x": 12,
361 | "y": 8
362 | },
363 | "id": 4,
364 | "options": {
365 | "colorMode": "background",
366 | "graphMode": "none",
367 | "justifyMode": "auto",
368 | "orientation": "auto",
369 | "reduceOptions": {
370 | "calcs": ["lastNotNull"],
371 | "fields": "",
372 | "values": false
373 | },
374 | "textMode": "auto",
375 | "wideLayout": true
376 | },
377 | "pluginVersion": "10.2.2",
378 | "targets": [
379 | {
380 | "datasource": {
381 | "type": "prometheus",
382 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
383 | },
384 | "disableTextWrap": false,
385 | "editorMode": "builder",
386 | "expr": "kafka_controller_ControllerStats_UncleanLeaderElectionEnableRateAndTimeMs_count",
387 | "fullMetaSearch": false,
388 | "includeNullMetadata": true,
389 | "instant": false,
390 | "legendFormat": "{{instance}}",
391 | "range": true,
392 | "refId": "A",
393 | "useBackend": false
394 | }
395 | ],
396 | "title": "Unclean Leader Election Rate Ms",
397 | "type": "stat"
398 | },
399 | {
400 | "datasource": {
401 | "type": "prometheus",
402 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
403 | },
404 | "description": "Measures the total number of network-related errors that occur during communication between clients and the Kafka broker.",
405 | "fieldConfig": {
406 | "defaults": {
407 | "color": {
408 | "mode": "palette-classic"
409 | },
410 | "custom": {
411 | "hideFrom": {
412 | "legend": false,
413 | "tooltip": false,
414 | "viz": false
415 | }
416 | },
417 | "mappings": []
418 | },
419 | "overrides": []
420 | },
421 | "gridPos": {
422 | "h": 8,
423 | "w": 12,
424 | "x": 0,
425 | "y": 16
426 | },
427 | "id": 23,
428 | "options": {
429 | "displayLabels": ["percent"],
430 | "legend": {
431 | "calcs": [],
432 | "displayMode": "hidden",
433 | "placement": "right",
434 | "showLegend": false,
435 | "values": []
436 | },
437 | "pieType": "donut",
438 | "reduceOptions": {
439 | "calcs": ["lastNotNull"],
440 | "fields": "",
441 | "values": false
442 | },
443 | "tooltip": {
444 | "mode": "single",
445 | "sort": "none"
446 | }
447 | },
448 | "targets": [
449 | {
450 | "datasource": {
451 | "type": "prometheus",
452 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
453 | },
454 | "disableTextWrap": false,
455 | "editorMode": "builder",
456 | "expr": "kafka_network_RequestMetrics_Errors_total",
457 | "fullMetaSearch": false,
458 | "includeNullMetadata": true,
459 | "instant": false,
460 | "legendFormat": "{{instance}}, Error: {{error}}, Request: {{request}}",
461 | "range": true,
462 | "refId": "A",
463 | "useBackend": false
464 | }
465 | ],
466 | "title": "Network Error Total",
467 | "type": "piechart"
468 | },
469 | {
470 | "datasource": {
471 | "type": "prometheus",
472 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
473 | },
474 | "description": "Total time (in ms) to serve the specified request (Produce/Fetch)",
475 | "fieldConfig": {
476 | "defaults": {
477 | "color": {
478 | "mode": "palette-classic"
479 | },
480 | "custom": {
481 | "axisBorderShow": false,
482 | "axisCenteredZero": false,
483 | "axisColorMode": "text",
484 | "axisLabel": "",
485 | "axisPlacement": "auto",
486 | "barAlignment": 0,
487 | "drawStyle": "line",
488 | "fillOpacity": 0,
489 | "gradientMode": "none",
490 | "hideFrom": {
491 | "legend": false,
492 | "tooltip": false,
493 | "viz": false
494 | },
495 | "insertNulls": false,
496 | "lineInterpolation": "linear",
497 | "lineWidth": 1,
498 | "pointSize": 5,
499 | "scaleDistribution": {
500 | "type": "linear"
501 | },
502 | "showPoints": "auto",
503 | "spanNulls": false,
504 | "stacking": {
505 | "group": "A",
506 | "mode": "none"
507 | },
508 | "thresholdsStyle": {
509 | "mode": "off"
510 | }
511 | },
512 | "mappings": [],
513 | "thresholds": {
514 | "mode": "absolute",
515 | "steps": [
516 | {
517 | "color": "green",
518 | "value": null
519 | },
520 | {
521 | "color": "red",
522 | "value": 80
523 | }
524 | ]
525 | }
526 | },
527 | "overrides": []
528 | },
529 | "gridPos": {
530 | "h": 8,
531 | "w": 12,
532 | "x": 12,
533 | "y": 16
534 | },
535 | "id": 5,
536 | "options": {
537 | "legend": {
538 | "calcs": [],
539 | "displayMode": "list",
540 | "placement": "bottom",
541 | "showLegend": true
542 | },
543 | "tooltip": {
544 | "mode": "single",
545 | "sort": "none"
546 | }
547 | },
548 | "targets": [
549 | {
550 | "datasource": {
551 | "type": "prometheus",
552 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
553 | },
554 | "disableTextWrap": false,
555 | "editorMode": "builder",
556 | "expr": "kafka_network_RequestMetrics_TotalTimeMs",
557 | "fullMetaSearch": false,
558 | "includeNullMetadata": true,
559 | "instant": false,
560 | "legendFormat": "{{instance}}, quantile: {{quantile}}, request: {{request}}",
561 | "range": true,
562 | "refId": "A",
563 | "useBackend": false
564 | }
565 | ],
566 | "title": "Total Time Ms",
567 | "type": "timeseries"
568 | },
569 | {
570 | "datasource": {
571 | "type": "prometheus",
572 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
573 | },
574 | "description": "Measures the total number of network requests made to the Kafka broker.",
575 | "fieldConfig": {
576 | "defaults": {
577 | "color": {
578 | "mode": "palette-classic"
579 | },
580 | "custom": {
581 | "hideFrom": {
582 | "legend": false,
583 | "tooltip": false,
584 | "viz": false
585 | }
586 | },
587 | "mappings": []
588 | },
589 | "overrides": []
590 | },
591 | "gridPos": {
592 | "h": 8,
593 | "w": 12,
594 | "x": 0,
595 | "y": 24
596 | },
597 | "id": 22,
598 | "options": {
599 | "displayLabels": ["percent"],
600 | "legend": {
601 | "calcs": [],
602 | "displayMode": "hidden",
603 | "placement": "right",
604 | "showLegend": false,
605 | "values": []
606 | },
607 | "pieType": "pie",
608 | "reduceOptions": {
609 | "calcs": ["lastNotNull"],
610 | "fields": "",
611 | "values": false
612 | },
613 | "tooltip": {
614 | "mode": "single",
615 | "sort": "none"
616 | }
617 | },
618 | "targets": [
619 | {
620 | "datasource": {
621 | "type": "prometheus",
622 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
623 | },
624 | "disableTextWrap": false,
625 | "editorMode": "builder",
626 | "expr": "kafka_network_RequestMetrics_Requests_total",
627 | "fullMetaSearch": false,
628 | "includeNullMetadata": true,
629 | "instant": false,
630 | "legendFormat": "{{instance}}, request: {{request}}, version: {{version}}",
631 | "range": true,
632 | "refId": "A",
633 | "useBackend": false
634 | }
635 | ],
636 | "title": "Request Total",
637 | "type": "piechart"
638 | },
639 | {
640 | "datasource": {
641 | "type": "prometheus",
642 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
643 | },
644 | "description": "Number of failed partitions",
645 | "fieldConfig": {
646 | "defaults": {
647 | "color": {
648 | "mode": "continuous-BlYlRd"
649 | },
650 | "mappings": [],
651 | "thresholds": {
652 | "mode": "absolute",
653 | "steps": [
654 | {
655 | "color": "green",
656 | "value": null
657 | },
658 | {
659 | "color": "red",
660 | "value": 80
661 | }
662 | ]
663 | },
664 | "unit": "short"
665 | },
666 | "overrides": []
667 | },
668 | "gridPos": {
669 | "h": 8,
670 | "w": 12,
671 | "x": 12,
672 | "y": 24
673 | },
674 | "id": 9,
675 | "options": {
676 | "colorMode": "background",
677 | "graphMode": "none",
678 | "justifyMode": "auto",
679 | "orientation": "auto",
680 | "reduceOptions": {
681 | "calcs": ["lastNotNull"],
682 | "fields": "",
683 | "values": false
684 | },
685 | "textMode": "auto",
686 | "wideLayout": true
687 | },
688 | "pluginVersion": "10.2.2",
689 | "targets": [
690 | {
691 | "datasource": {
692 | "type": "prometheus",
693 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
694 | },
695 | "disableTextWrap": false,
696 | "editorMode": "builder",
697 | "expr": "kafka_server_ReplicaFetcherManager_FailedPartitionsCount",
698 | "fullMetaSearch": false,
699 | "includeNullMetadata": true,
700 | "instant": false,
701 | "legendFormat": "{{instance}}, clientId: {{clientId}}",
702 | "range": true,
703 | "refId": "A",
704 | "useBackend": false
705 | }
706 | ],
707 | "title": "Failed Partitions Count",
708 | "type": "stat"
709 | },
710 | {
711 | "datasource": {
712 | "type": "prometheus",
713 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
714 | },
715 | "description": "Number of messages consumer is behind producer on this partition",
716 | "fieldConfig": {
717 | "defaults": {
718 | "color": {
719 | "mode": "continuous-YlBl"
720 | },
721 | "custom": {
722 | "axisBorderShow": false,
723 | "axisCenteredZero": false,
724 | "axisColorMode": "text",
725 | "axisLabel": "",
726 | "axisPlacement": "auto",
727 | "barAlignment": 0,
728 | "drawStyle": "line",
729 | "fillOpacity": 0,
730 | "gradientMode": "none",
731 | "hideFrom": {
732 | "legend": false,
733 | "tooltip": false,
734 | "viz": false
735 | },
736 | "insertNulls": false,
737 | "lineInterpolation": "linear",
738 | "lineWidth": 1,
739 | "pointSize": 5,
740 | "scaleDistribution": {
741 | "type": "linear"
742 | },
743 | "showPoints": "auto",
744 | "spanNulls": false,
745 | "stacking": {
746 | "group": "A",
747 | "mode": "none"
748 | },
749 | "thresholdsStyle": {
750 | "mode": "off"
751 | }
752 | },
753 | "mappings": [],
754 | "thresholds": {
755 | "mode": "absolute",
756 | "steps": [
757 | {
758 | "color": "green",
759 | "value": null
760 | },
761 | {
762 | "color": "red",
763 | "value": 80
764 | }
765 | ]
766 | }
767 | },
768 | "overrides": []
769 | },
770 | "gridPos": {
771 | "h": 8,
772 | "w": 12,
773 | "x": 0,
774 | "y": 32
775 | },
776 | "id": 21,
777 | "options": {
778 | "legend": {
779 | "calcs": [],
780 | "displayMode": "list",
781 | "placement": "bottom",
782 | "showLegend": true
783 | },
784 | "tooltip": {
785 | "mode": "single",
786 | "sort": "none"
787 | }
788 | },
789 | "targets": [
790 | {
791 | "datasource": {
792 | "type": "prometheus",
793 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
794 | },
795 | "disableTextWrap": false,
796 | "editorMode": "builder",
797 | "expr": "kafka_server_FetcherLagMetrics_ConsumerLag{topic!=\"__consumer_offsets\"}",
798 | "fullMetaSearch": false,
799 | "includeNullMetadata": true,
800 | "instant": false,
801 | "legendFormat": "{{instance}}, clientId: {{clientId}}, partition: {{partition}}, topic: {{topic}}",
802 | "range": true,
803 | "refId": "A",
804 | "useBackend": false
805 | }
806 | ],
807 | "title": "Consumer's Lag",
808 | "type": "timeseries"
809 | },
810 | {
811 | "datasource": {
812 | "type": "prometheus",
813 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
814 | },
815 | "description": "Number of (producer|consumer|follower) requests per second",
816 | "fieldConfig": {
817 | "defaults": {
818 | "color": {
819 | "mode": "continuous-BlYlRd"
820 | },
821 | "custom": {
822 | "axisBorderShow": false,
823 | "axisCenteredZero": false,
824 | "axisColorMode": "text",
825 | "axisLabel": "",
826 | "axisPlacement": "auto",
827 | "barAlignment": 0,
828 | "drawStyle": "line",
829 | "fillOpacity": 3,
830 | "gradientMode": "none",
831 | "hideFrom": {
832 | "legend": false,
833 | "tooltip": false,
834 | "viz": false
835 | },
836 | "insertNulls": false,
837 | "lineInterpolation": "linear",
838 | "lineStyle": {
839 | "dash": [10, 10],
840 | "fill": "dash"
841 | },
842 | "lineWidth": 3,
843 | "pointSize": 5,
844 | "scaleDistribution": {
845 | "type": "linear"
846 | },
847 | "showPoints": "auto",
848 | "spanNulls": false,
849 | "stacking": {
850 | "group": "A",
851 | "mode": "none"
852 | },
853 | "thresholdsStyle": {
854 | "mode": "off"
855 | }
856 | },
857 | "mappings": [],
858 | "thresholds": {
859 | "mode": "absolute",
860 | "steps": [
861 | {
862 | "color": "green",
863 | "value": null
864 | },
865 | {
866 | "color": "red",
867 | "value": 80
868 | }
869 | ]
870 | }
871 | },
872 | "overrides": []
873 | },
874 | "gridPos": {
875 | "h": 8,
876 | "w": 12,
877 | "x": 12,
878 | "y": 32
879 | },
880 | "id": 10,
881 | "options": {
882 | "legend": {
883 | "calcs": [],
884 | "displayMode": "list",
885 | "placement": "bottom",
886 | "showLegend": true
887 | },
888 | "tooltip": {
889 | "mode": "single",
890 | "sort": "none"
891 | }
892 | },
893 | "targets": [
894 | {
895 | "datasource": {
896 | "type": "prometheus",
897 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
898 | },
899 | "disableTextWrap": false,
900 | "editorMode": "builder",
901 | "expr": "kafka_network_RequestMetrics_TotalTimeMs",
902 | "fullMetaSearch": false,
903 | "includeNullMetadata": true,
904 | "instant": false,
905 | "legendFormat": "{{instance}}, quantile: {{quantile}}, request: {{request}}",
906 | "range": true,
907 | "refId": "A",
908 | "useBackend": false
909 | }
910 | ],
911 | "title": "Total Request x Ms",
912 | "type": "timeseries"
913 | },
914 | {
915 | "datasource": {
916 | "type": "prometheus",
917 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
918 | },
919 | "description": "Producer failed to send message to broker.",
920 | "fieldConfig": {
921 | "defaults": {
922 | "color": {
923 | "mode": "continuous-BlYlRd"
924 | },
925 | "custom": {
926 | "axisBorderShow": false,
927 | "axisCenteredZero": false,
928 | "axisColorMode": "text",
929 | "axisLabel": "",
930 | "axisPlacement": "auto",
931 | "barAlignment": 0,
932 | "drawStyle": "line",
933 | "fillOpacity": 0,
934 | "gradientMode": "none",
935 | "hideFrom": {
936 | "legend": false,
937 | "tooltip": false,
938 | "viz": false
939 | },
940 | "insertNulls": false,
941 | "lineInterpolation": "linear",
942 | "lineWidth": 1,
943 | "pointSize": 5,
944 | "scaleDistribution": {
945 | "type": "linear"
946 | },
947 | "showPoints": "auto",
948 | "spanNulls": false,
949 | "stacking": {
950 | "group": "A",
951 | "mode": "none"
952 | },
953 | "thresholdsStyle": {
954 | "mode": "off"
955 | }
956 | },
957 | "mappings": [],
958 | "thresholds": {
959 | "mode": "absolute",
960 | "steps": [
961 | {
962 | "color": "green",
963 | "value": null
964 | },
965 | {
966 | "color": "red",
967 | "value": 80
968 | }
969 | ]
970 | }
971 | },
972 | "overrides": []
973 | },
974 | "gridPos": {
975 | "h": 8,
976 | "w": 12,
977 | "x": 0,
978 | "y": 40
979 | },
980 | "id": 20,
981 | "options": {
982 | "legend": {
983 | "calcs": [],
984 | "displayMode": "list",
985 | "placement": "bottom",
986 | "showLegend": true
987 | },
988 | "tooltip": {
989 | "mode": "single",
990 | "sort": "none"
991 | }
992 | },
993 | "targets": [
994 | {
995 | "datasource": {
996 | "type": "prometheus",
997 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
998 | },
999 | "disableTextWrap": false,
1000 | "editorMode": "builder",
1001 | "expr": "kafka_server_BrokerTopicMetrics_FailedProduceRequests_total",
1002 | "fullMetaSearch": false,
1003 | "includeNullMetadata": true,
1004 | "instant": false,
1005 | "legendFormat": "{{instance}}",
1006 | "range": true,
1007 | "refId": "A",
1008 | "useBackend": false
1009 | }
1010 | ],
1011 | "title": "Failed Producer Request",
1012 | "type": "timeseries"
1013 | },
1014 | {
1015 | "datasource": {
1016 | "type": "prometheus",
1017 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1018 | },
1019 | "description": "Aggregate incoming bytes",
1020 | "fieldConfig": {
1021 | "defaults": {
1022 | "color": {
1023 | "mode": "continuous-BlYlRd"
1024 | },
1025 | "custom": {
1026 | "axisBorderShow": false,
1027 | "axisCenteredZero": false,
1028 | "axisColorMode": "text",
1029 | "axisLabel": "",
1030 | "axisPlacement": "auto",
1031 | "barAlignment": 0,
1032 | "drawStyle": "line",
1033 | "fillOpacity": 25,
1034 | "gradientMode": "none",
1035 | "hideFrom": {
1036 | "legend": false,
1037 | "tooltip": false,
1038 | "viz": false
1039 | },
1040 | "insertNulls": false,
1041 | "lineInterpolation": "linear",
1042 | "lineWidth": 1,
1043 | "pointSize": 5,
1044 | "scaleDistribution": {
1045 | "type": "linear"
1046 | },
1047 | "showPoints": "auto",
1048 | "spanNulls": false,
1049 | "stacking": {
1050 | "group": "A",
1051 | "mode": "percent"
1052 | },
1053 | "thresholdsStyle": {
1054 | "mode": "off"
1055 | }
1056 | },
1057 | "mappings": [],
1058 | "thresholds": {
1059 | "mode": "absolute",
1060 | "steps": [
1061 | {
1062 | "color": "green",
1063 | "value": null
1064 | },
1065 | {
1066 | "color": "red",
1067 | "value": 80
1068 | }
1069 | ]
1070 | }
1071 | },
1072 | "overrides": []
1073 | },
1074 | "gridPos": {
1075 | "h": 8,
1076 | "w": 12,
1077 | "x": 12,
1078 | "y": 40
1079 | },
1080 | "id": 6,
1081 | "options": {
1082 | "legend": {
1083 | "calcs": [],
1084 | "displayMode": "hidden",
1085 | "placement": "right",
1086 | "showLegend": false
1087 | },
1088 | "tooltip": {
1089 | "mode": "single",
1090 | "sort": "none"
1091 | }
1092 | },
1093 | "targets": [
1094 | {
1095 | "datasource": {
1096 | "type": "prometheus",
1097 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1098 | },
1099 | "disableTextWrap": false,
1100 | "editorMode": "builder",
1101 | "expr": "label_replace(label_replace(kafka_server_BrokerTopicMetrics_BytesIn_total, \"formatted_topic\", \"TOTAL\", \"topic\", \"^$\"), \"formatted_topic\", \"$1\", \"topic\", \"(.+)\")",
1102 | "fullMetaSearch": false,
1103 | "includeNullMetadata": true,
1104 | "instant": false,
1105 | "legendFormat": "{{instance}}, topic: {{formatted_topic}}",
1106 | "range": true,
1107 | "refId": "A",
1108 | "useBackend": false
1109 | }
1110 | ],
1111 | "title": "Broker Bytes In Total",
1112 | "type": "timeseries"
1113 | },
1114 | {
1115 | "datasource": {
1116 | "type": "prometheus",
1117 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1118 | },
1119 | "description": "The total sum of young or old garbage collection processes executed by the JVM",
1120 | "fieldConfig": {
1121 | "defaults": {
1122 | "color": {
1123 | "mode": "continuous-BlPu"
1124 | },
1125 | "custom": {
1126 | "axisBorderShow": false,
1127 | "axisCenteredZero": false,
1128 | "axisColorMode": "text",
1129 | "axisLabel": "",
1130 | "axisPlacement": "auto",
1131 | "barAlignment": 0,
1132 | "drawStyle": "bars",
1133 | "fillOpacity": 100,
1134 | "gradientMode": "hue",
1135 | "hideFrom": {
1136 | "legend": false,
1137 | "tooltip": false,
1138 | "viz": false
1139 | },
1140 | "insertNulls": false,
1141 | "lineInterpolation": "linear",
1142 | "lineWidth": 1,
1143 | "pointSize": 5,
1144 | "scaleDistribution": {
1145 | "type": "linear"
1146 | },
1147 | "showPoints": "auto",
1148 | "spanNulls": false,
1149 | "stacking": {
1150 | "group": "A",
1151 | "mode": "percent"
1152 | },
1153 | "thresholdsStyle": {
1154 | "mode": "off"
1155 | }
1156 | },
1157 | "mappings": [],
1158 | "thresholds": {
1159 | "mode": "absolute",
1160 | "steps": [
1161 | {
1162 | "color": "green",
1163 | "value": null
1164 | },
1165 | {
1166 | "color": "red",
1167 | "value": 80
1168 | }
1169 | ]
1170 | }
1171 | },
1172 | "overrides": []
1173 | },
1174 | "gridPos": {
1175 | "h": 8,
1176 | "w": 12,
1177 | "x": 0,
1178 | "y": 48
1179 | },
1180 | "id": 19,
1181 | "options": {
1182 | "legend": {
1183 | "calcs": [],
1184 | "displayMode": "hidden",
1185 | "placement": "right",
1186 | "showLegend": false
1187 | },
1188 | "tooltip": {
1189 | "mode": "single",
1190 | "sort": "none"
1191 | }
1192 | },
1193 | "targets": [
1194 | {
1195 | "datasource": {
1196 | "type": "prometheus",
1197 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1198 | },
1199 | "disableTextWrap": false,
1200 | "editorMode": "builder",
1201 | "expr": "jvm_gc_collection_seconds_sum",
1202 | "fullMetaSearch": false,
1203 | "includeNullMetadata": true,
1204 | "instant": false,
1205 | "legendFormat": "{{instance}}, gc: {{gc}}",
1206 | "range": true,
1207 | "refId": "A",
1208 | "useBackend": false
1209 | }
1210 | ],
1211 | "title": "Collection X Seconds Sum",
1212 | "type": "timeseries"
1213 | },
1214 | {
1215 | "datasource": {
1216 | "type": "prometheus",
1217 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1218 | },
1219 | "description": "Cumulative number of requests made by producers to the brokers",
1220 | "fieldConfig": {
1221 | "defaults": {
1222 | "color": {
1223 | "mode": "continuous-BlPu"
1224 | },
1225 | "custom": {
1226 | "axisBorderShow": false,
1227 | "axisCenteredZero": false,
1228 | "axisColorMode": "text",
1229 | "axisLabel": "",
1230 | "axisPlacement": "auto",
1231 | "barAlignment": 0,
1232 | "drawStyle": "line",
1233 | "fillOpacity": 2,
1234 | "gradientMode": "none",
1235 | "hideFrom": {
1236 | "legend": false,
1237 | "tooltip": false,
1238 | "viz": false
1239 | },
1240 | "insertNulls": false,
1241 | "lineInterpolation": "linear",
1242 | "lineWidth": 2,
1243 | "pointSize": 2,
1244 | "scaleDistribution": {
1245 | "type": "linear"
1246 | },
1247 | "showPoints": "auto",
1248 | "spanNulls": false,
1249 | "stacking": {
1250 | "group": "A",
1251 | "mode": "none"
1252 | },
1253 | "thresholdsStyle": {
1254 | "mode": "off"
1255 | }
1256 | },
1257 | "mappings": [],
1258 | "thresholds": {
1259 | "mode": "absolute",
1260 | "steps": [
1261 | {
1262 | "color": "green",
1263 | "value": null
1264 | },
1265 | {
1266 | "color": "red",
1267 | "value": 80
1268 | }
1269 | ]
1270 | }
1271 | },
1272 | "overrides": []
1273 | },
1274 | "gridPos": {
1275 | "h": 8,
1276 | "w": 12,
1277 | "x": 12,
1278 | "y": 48
1279 | },
1280 | "id": 11,
1281 | "options": {
1282 | "legend": {
1283 | "calcs": [],
1284 | "displayMode": "hidden",
1285 | "placement": "right",
1286 | "showLegend": false
1287 | },
1288 | "tooltip": {
1289 | "mode": "single",
1290 | "sort": "none"
1291 | }
1292 | },
1293 | "pluginVersion": "10.2.0",
1294 | "targets": [
1295 | {
1296 | "datasource": {
1297 | "type": "prometheus",
1298 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1299 | },
1300 | "disableTextWrap": false,
1301 | "editorMode": "builder",
1302 | "expr": "label_replace(label_replace(kafka_server_BrokerTopicMetrics_TotalProduceRequests_total, \"formatted_topic\", \"TOTAL\", \"topic\", \"^$\"), \"formatted_topic\", \"$1\", \"topic\", \"(.+)\")",
1303 | "fullMetaSearch": false,
1304 | "includeNullMetadata": true,
1305 | "instant": false,
1306 | "legendFormat": "{{instance}}, topic: {{formatted_topic}}",
1307 | "range": true,
1308 | "refId": "A",
1309 | "useBackend": false
1310 | }
1311 | ],
1312 | "title": "Total Producer Request",
1313 | "type": "timeseries"
1314 | },
1315 | {
1316 | "datasource": {
1317 | "type": "prometheus",
1318 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1319 | },
1320 | "description": "The total amount of time (in milliseconds) the JVM has spent executing young or old garbage collection processes",
1321 | "fieldConfig": {
1322 | "defaults": {
1323 | "color": {
1324 | "mode": "continuous-YlBl"
1325 | },
1326 | "custom": {
1327 | "axisBorderShow": false,
1328 | "axisCenteredZero": false,
1329 | "axisColorMode": "text",
1330 | "axisLabel": "",
1331 | "axisPlacement": "auto",
1332 | "barAlignment": 0,
1333 | "drawStyle": "bars",
1334 | "fillOpacity": 100,
1335 | "gradientMode": "hue",
1336 | "hideFrom": {
1337 | "legend": false,
1338 | "tooltip": false,
1339 | "viz": false
1340 | },
1341 | "insertNulls": false,
1342 | "lineInterpolation": "linear",
1343 | "lineWidth": 1,
1344 | "pointSize": 5,
1345 | "scaleDistribution": {
1346 | "type": "linear"
1347 | },
1348 | "showPoints": "auto",
1349 | "spanNulls": false,
1350 | "stacking": {
1351 | "group": "A",
1352 | "mode": "normal"
1353 | },
1354 | "thresholdsStyle": {
1355 | "mode": "off"
1356 | }
1357 | },
1358 | "mappings": [],
1359 | "thresholds": {
1360 | "mode": "absolute",
1361 | "steps": [
1362 | {
1363 | "color": "green",
1364 | "value": null
1365 | },
1366 | {
1367 | "color": "red",
1368 | "value": 80
1369 | }
1370 | ]
1371 | }
1372 | },
1373 | "overrides": []
1374 | },
1375 | "gridPos": {
1376 | "h": 8,
1377 | "w": 12,
1378 | "x": 0,
1379 | "y": 56
1380 | },
1381 | "id": 18,
1382 | "options": {
1383 | "legend": {
1384 | "calcs": [],
1385 | "displayMode": "hidden",
1386 | "placement": "right",
1387 | "showLegend": false
1388 | },
1389 | "tooltip": {
1390 | "mode": "single",
1391 | "sort": "none"
1392 | }
1393 | },
1394 | "targets": [
1395 | {
1396 | "datasource": {
1397 | "type": "prometheus",
1398 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1399 | },
1400 | "disableTextWrap": false,
1401 | "editorMode": "builder",
1402 | "expr": "jvm_gc_collection_seconds_count",
1403 | "fullMetaSearch": false,
1404 | "includeNullMetadata": true,
1405 | "instant": false,
1406 | "legendFormat": "{{instance}}, gc: {{gc}}",
1407 | "range": true,
1408 | "refId": "A",
1409 | "useBackend": false
1410 | }
1411 | ],
1412 | "title": "Collection X Seconds Count",
1413 | "type": "timeseries"
1414 | },
1415 | {
1416 | "datasource": {
1417 | "type": "prometheus",
1418 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1419 | },
1420 | "description": "Number of offline partitions",
1421 | "fieldConfig": {
1422 | "defaults": {
1423 | "color": {
1424 | "mode": "thresholds"
1425 | },
1426 | "mappings": [],
1427 | "thresholds": {
1428 | "mode": "percentage",
1429 | "steps": [
1430 | {
1431 | "color": "super-light-blue",
1432 | "value": null
1433 | },
1434 | {
1435 | "color": "dark-blue",
1436 | "value": 70
1437 | },
1438 | {
1439 | "color": "dark-purple",
1440 | "value": 85
1441 | }
1442 | ]
1443 | }
1444 | },
1445 | "overrides": []
1446 | },
1447 | "gridPos": {
1448 | "h": 8,
1449 | "w": 12,
1450 | "x": 12,
1451 | "y": 56
1452 | },
1453 | "id": 1,
1454 | "options": {
1455 | "minVizHeight": 75,
1456 | "minVizWidth": 75,
1457 | "orientation": "auto",
1458 | "reduceOptions": {
1459 | "calcs": ["lastNotNull"],
1460 | "fields": "",
1461 | "values": false
1462 | },
1463 | "showThresholdLabels": false,
1464 | "showThresholdMarkers": true
1465 | },
1466 | "pluginVersion": "10.2.2",
1467 | "targets": [
1468 | {
1469 | "datasource": {
1470 | "type": "prometheus",
1471 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1472 | },
1473 | "disableTextWrap": false,
1474 | "editorMode": "builder",
1475 | "expr": "kafka_controller_KafkaController_OfflinePartitionsCount",
1476 | "fullMetaSearch": false,
1477 | "includeNullMetadata": true,
1478 | "instant": false,
1479 | "legendFormat": "{{instance}}",
1480 | "range": true,
1481 | "refId": "A",
1482 | "useBackend": false
1483 | }
1484 | ],
1485 | "title": "Offline Partition Count",
1486 | "type": "gauge"
1487 | },
1488 | {
1489 | "datasource": {
1490 | "type": "prometheus",
1491 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1492 | },
1493 | "description": "Number of requests waiting in producer purgatory/Number of requests waiting in fetch purgatory",
1494 | "fieldConfig": {
1495 | "defaults": {
1496 | "color": {
1497 | "mode": "palette-classic"
1498 | },
1499 | "custom": {
1500 | "axisBorderShow": false,
1501 | "axisCenteredZero": false,
1502 | "axisColorMode": "text",
1503 | "axisLabel": "",
1504 | "axisPlacement": "auto",
1505 | "barAlignment": 0,
1506 | "drawStyle": "line",
1507 | "fillOpacity": 25,
1508 | "gradientMode": "none",
1509 | "hideFrom": {
1510 | "legend": false,
1511 | "tooltip": false,
1512 | "viz": false
1513 | },
1514 | "insertNulls": false,
1515 | "lineInterpolation": "linear",
1516 | "lineWidth": 1,
1517 | "pointSize": 5,
1518 | "scaleDistribution": {
1519 | "type": "linear"
1520 | },
1521 | "showPoints": "auto",
1522 | "spanNulls": false,
1523 | "stacking": {
1524 | "group": "A",
1525 | "mode": "normal"
1526 | },
1527 | "thresholdsStyle": {
1528 | "mode": "off"
1529 | }
1530 | },
1531 | "mappings": [],
1532 | "thresholds": {
1533 | "mode": "absolute",
1534 | "steps": [
1535 | {
1536 | "color": "green",
1537 | "value": null
1538 | },
1539 | {
1540 | "color": "red",
1541 | "value": 80
1542 | }
1543 | ]
1544 | }
1545 | },
1546 | "overrides": []
1547 | },
1548 | "gridPos": {
1549 | "h": 8,
1550 | "w": 12,
1551 | "x": 0,
1552 | "y": 64
1553 | },
1554 | "id": 17,
1555 | "options": {
1556 | "legend": {
1557 | "calcs": [],
1558 | "displayMode": "hidden",
1559 | "placement": "right",
1560 | "showLegend": false
1561 | },
1562 | "tooltip": {
1563 | "mode": "single",
1564 | "sort": "none"
1565 | }
1566 | },
1567 | "targets": [
1568 | {
1569 | "datasource": {
1570 | "type": "prometheus",
1571 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1572 | },
1573 | "disableTextWrap": false,
1574 | "editorMode": "builder",
1575 | "expr": "kafka_server_DelayedOperationPurgatory_PurgatorySize",
1576 | "fullMetaSearch": false,
1577 | "includeNullMetadata": true,
1578 | "instant": false,
1579 | "legendFormat": "{{instance}}, Delayed Operation: {{delayedOperation}}",
1580 | "range": true,
1581 | "refId": "A",
1582 | "useBackend": false
1583 | }
1584 | ],
1585 | "title": "Purgatory Size",
1586 | "type": "timeseries"
1587 | },
1588 | {
1589 | "datasource": {
1590 | "type": "prometheus",
1591 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1592 | },
1593 | "description": "Rate at which the pool of in-sync replicas (ISRs) shrinks/expands",
1594 | "fieldConfig": {
1595 | "defaults": {
1596 | "color": {
1597 | "mode": "palette-classic"
1598 | },
1599 | "custom": {
1600 | "axisBorderShow": false,
1601 | "axisCenteredZero": false,
1602 | "axisColorMode": "text",
1603 | "axisLabel": "",
1604 | "axisPlacement": "auto",
1605 | "barAlignment": 0,
1606 | "drawStyle": "line",
1607 | "fillOpacity": 0,
1608 | "gradientMode": "none",
1609 | "hideFrom": {
1610 | "legend": false,
1611 | "tooltip": false,
1612 | "viz": false
1613 | },
1614 | "insertNulls": false,
1615 | "lineInterpolation": "linear",
1616 | "lineWidth": 1,
1617 | "pointSize": 5,
1618 | "scaleDistribution": {
1619 | "type": "linear"
1620 | },
1621 | "showPoints": "auto",
1622 | "spanNulls": false,
1623 | "stacking": {
1624 | "group": "A",
1625 | "mode": "none"
1626 | },
1627 | "thresholdsStyle": {
1628 | "mode": "off"
1629 | }
1630 | },
1631 | "mappings": [],
1632 | "thresholds": {
1633 | "mode": "absolute",
1634 | "steps": [
1635 | {
1636 | "color": "green",
1637 | "value": null
1638 | },
1639 | {
1640 | "color": "red",
1641 | "value": 80
1642 | }
1643 | ]
1644 | }
1645 | },
1646 | "overrides": []
1647 | },
1648 | "gridPos": {
1649 | "h": 8,
1650 | "w": 12,
1651 | "x": 12,
1652 | "y": 64
1653 | },
1654 | "id": 8,
1655 | "options": {
1656 | "legend": {
1657 | "calcs": [],
1658 | "displayMode": "list",
1659 | "placement": "bottom",
1660 | "showLegend": true
1661 | },
1662 | "tooltip": {
1663 | "mode": "single",
1664 | "sort": "none"
1665 | }
1666 | },
1667 | "pluginVersion": "10.2.2",
1668 | "targets": [
1669 | {
1670 | "datasource": {
1671 | "type": "prometheus",
1672 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1673 | },
1674 | "disableTextWrap": false,
1675 | "editorMode": "builder",
1676 | "expr": "kafka_server_ReplicaManager_IsrShrinks_total",
1677 | "fullMetaSearch": false,
1678 | "hide": false,
1679 | "includeNullMetadata": true,
1680 | "instant": false,
1681 | "legendFormat": "{{__name__}}, {{instance}}",
1682 | "range": true,
1683 | "refId": "B",
1684 | "useBackend": false
1685 | },
1686 | {
1687 | "datasource": {
1688 | "type": "prometheus",
1689 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1690 | },
1691 | "disableTextWrap": false,
1692 | "editorMode": "builder",
1693 | "expr": "kafka_server_ReplicaManager_IsrExpands_total",
1694 | "fullMetaSearch": false,
1695 | "hide": false,
1696 | "includeNullMetadata": true,
1697 | "instant": false,
1698 | "legendFormat": "{{__name__}}, {{instance}}",
1699 | "range": true,
1700 | "refId": "A",
1701 | "useBackend": false
1702 | }
1703 | ],
1704 | "title": "Replica Manager ISR Shrinks/Expands",
1705 | "type": "timeseries"
1706 | },
1707 | {
1708 | "datasource": {
1709 | "type": "prometheus",
1710 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1711 | },
1712 | "description": "The time it takes for Kafka brokers to interact with the Zookeeper ensemble to perform various operations.",
1713 | "fieldConfig": {
1714 | "defaults": {
1715 | "color": {
1716 | "mode": "continuous-BlYlRd"
1717 | },
1718 | "custom": {
1719 | "axisBorderShow": false,
1720 | "axisCenteredZero": false,
1721 | "axisColorMode": "text",
1722 | "axisLabel": "",
1723 | "axisPlacement": "auto",
1724 | "barAlignment": 0,
1725 | "drawStyle": "line",
1726 | "fillOpacity": 0,
1727 | "gradientMode": "none",
1728 | "hideFrom": {
1729 | "legend": false,
1730 | "tooltip": false,
1731 | "viz": false
1732 | },
1733 | "insertNulls": false,
1734 | "lineInterpolation": "linear",
1735 | "lineWidth": 1,
1736 | "pointSize": 5,
1737 | "scaleDistribution": {
1738 | "type": "linear"
1739 | },
1740 | "showPoints": "auto",
1741 | "spanNulls": false,
1742 | "stacking": {
1743 | "group": "A",
1744 | "mode": "none"
1745 | },
1746 | "thresholdsStyle": {
1747 | "mode": "off"
1748 | }
1749 | },
1750 | "mappings": [],
1751 | "thresholds": {
1752 | "mode": "absolute",
1753 | "steps": [
1754 | {
1755 | "color": "green",
1756 | "value": null
1757 | },
1758 | {
1759 | "color": "red",
1760 | "value": 80
1761 | }
1762 | ]
1763 | }
1764 | },
1765 | "overrides": []
1766 | },
1767 | "gridPos": {
1768 | "h": 8,
1769 | "w": 12,
1770 | "x": 0,
1771 | "y": 72
1772 | },
1773 | "id": 16,
1774 | "options": {
1775 | "legend": {
1776 | "calcs": [],
1777 | "displayMode": "list",
1778 | "placement": "bottom",
1779 | "showLegend": true
1780 | },
1781 | "tooltip": {
1782 | "mode": "single",
1783 | "sort": "none"
1784 | }
1785 | },
1786 | "targets": [
1787 | {
1788 | "datasource": {
1789 | "type": "prometheus",
1790 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1791 | },
1792 | "disableTextWrap": false,
1793 | "editorMode": "builder",
1794 | "expr": "kafka_server_ZooKeeperClientMetrics_ZooKeeperRequestLatencyMs",
1795 | "fullMetaSearch": false,
1796 | "includeNullMetadata": true,
1797 | "instant": false,
1798 | "legendFormat": "{{instance}}, quantile: {{quantile}}",
1799 | "range": true,
1800 | "refId": "A",
1801 | "useBackend": false
1802 | }
1803 | ],
1804 | "title": "Zookeeper Request Latency Ms",
1805 | "type": "timeseries"
1806 | },
1807 | {
1808 | "datasource": {
1809 | "type": "prometheus",
1810 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1811 | },
1812 | "description": "Number of active controllers in cluster",
1813 | "fieldConfig": {
1814 | "defaults": {
1815 | "color": {
1816 | "mode": "continuous-YlBl"
1817 | },
1818 | "mappings": [],
1819 | "thresholds": {
1820 | "mode": "absolute",
1821 | "steps": [
1822 | {
1823 | "color": "super-light-blue",
1824 | "value": null
1825 | },
1826 | {
1827 | "color": "dark-blue",
1828 | "value": 80
1829 | }
1830 | ]
1831 | }
1832 | },
1833 | "overrides": []
1834 | },
1835 | "gridPos": {
1836 | "h": 8,
1837 | "w": 12,
1838 | "x": 12,
1839 | "y": 72
1840 | },
1841 | "id": 3,
1842 | "options": {
1843 | "minVizHeight": 75,
1844 | "minVizWidth": 75,
1845 | "orientation": "auto",
1846 | "reduceOptions": {
1847 | "calcs": ["lastNotNull"],
1848 | "fields": "",
1849 | "values": false
1850 | },
1851 | "showThresholdLabels": false,
1852 | "showThresholdMarkers": true
1853 | },
1854 | "pluginVersion": "10.2.2",
1855 | "targets": [
1856 | {
1857 | "datasource": {
1858 | "type": "prometheus",
1859 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1860 | },
1861 | "disableTextWrap": false,
1862 | "editorMode": "builder",
1863 | "expr": "kafka_controller_KafkaController_ActiveControllerCount",
1864 | "fullMetaSearch": false,
1865 | "includeNullMetadata": true,
1866 | "instant": false,
1867 | "legendFormat": "{{instance}}",
1868 | "range": true,
1869 | "refId": "A",
1870 | "useBackend": false
1871 | }
1872 | ],
1873 | "title": "Active Controller Count",
1874 | "type": "gauge"
1875 | },
1876 | {
1877 | "datasource": {
1878 | "type": "prometheus",
1879 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1880 | },
1881 | "gridPos": {
1882 | "h": 8,
1883 | "w": 12,
1884 | "x": 0,
1885 | "y": 80
1886 | },
1887 | "id": 15,
1888 | "options": {
1889 | "bgColor": "super-light-yellow",
1890 | "clockType": "12 hour",
1891 | "countdownSettings": {
1892 | "endCountdownTime": "2023-11-14T23:10:31-06:00",
1893 | "endText": "00:00:00"
1894 | },
1895 | "countupSettings": {
1896 | "beginCountupTime": "2023-11-14T23:10:31-06:00",
1897 | "beginText": "00:00:00"
1898 | },
1899 | "dateSettings": {
1900 | "dateFormat": "YYYY-MM-DD",
1901 | "fontSize": "20px",
1902 | "fontWeight": "normal",
1903 | "locale": "",
1904 | "showDate": true
1905 | },
1906 | "fontMono": false,
1907 | "mode": "time",
1908 | "refresh": "sec",
1909 | "timeSettings": {
1910 | "fontSize": "30px",
1911 | "fontWeight": "bold"
1912 | },
1913 | "timezone": "US/Central",
1914 | "timezoneSettings": {
1915 | "fontSize": "12px",
1916 | "fontWeight": "normal",
1917 | "showTimezone": false,
1918 | "zoneFormat": "offsetAbbv"
1919 | }
1920 | },
1921 | "pluginVersion": "2.1.3",
1922 | "title": "Houston",
1923 | "type": "grafana-clock-panel"
1924 | },
1925 | {
1926 | "datasource": {
1927 | "type": "prometheus",
1928 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1929 | },
1930 | "description": "Number of active brokers.",
1931 | "fieldConfig": {
1932 | "defaults": {
1933 | "color": {
1934 | "mode": "palette-classic"
1935 | },
1936 | "custom": {
1937 | "hideFrom": {
1938 | "legend": false,
1939 | "tooltip": false,
1940 | "viz": false
1941 | }
1942 | },
1943 | "mappings": []
1944 | },
1945 | "overrides": []
1946 | },
1947 | "gridPos": {
1948 | "h": 8,
1949 | "w": 12,
1950 | "x": 12,
1951 | "y": 80
1952 | },
1953 | "id": 12,
1954 | "options": {
1955 | "displayLabels": ["percent"],
1956 | "legend": {
1957 | "calcs": [],
1958 | "displayMode": "hidden",
1959 | "placement": "right",
1960 | "showLegend": false,
1961 | "values": []
1962 | },
1963 | "pieType": "donut",
1964 | "reduceOptions": {
1965 | "calcs": ["lastNotNull"],
1966 | "fields": "",
1967 | "values": false
1968 | },
1969 | "tooltip": {
1970 | "mode": "single",
1971 | "sort": "none"
1972 | }
1973 | },
1974 | "pluginVersion": "10.2.0",
1975 | "targets": [
1976 | {
1977 | "datasource": {
1978 | "type": "prometheus",
1979 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
1980 | },
1981 | "disableTextWrap": false,
1982 | "editorMode": "builder",
1983 | "expr": "kafka_controller_KafkaController_ActiveBrokerCount",
1984 | "fullMetaSearch": false,
1985 | "includeNullMetadata": true,
1986 | "instant": false,
1987 | "legendFormat": "{{instance}}",
1988 | "range": true,
1989 | "refId": "A",
1990 | "useBackend": false
1991 | }
1992 | ],
1993 | "title": "Active Broker Count",
1994 | "type": "piechart"
1995 | },
1996 | {
1997 | "datasource": {
1998 | "type": "prometheus",
1999 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
2000 | },
2001 | "gridPos": {
2002 | "h": 8,
2003 | "w": 12,
2004 | "x": 0,
2005 | "y": 88
2006 | },
2007 | "id": 14,
2008 | "options": {
2009 | "bgColor": "dark-purple",
2010 | "clockType": "12 hour",
2011 | "countdownSettings": {
2012 | "endCountdownTime": "2023-11-14T23:09:08-06:00",
2013 | "endText": "00:00:00"
2014 | },
2015 | "countupSettings": {
2016 | "beginCountupTime": "2023-11-14T23:09:08-06:00",
2017 | "beginText": "00:00:00"
2018 | },
2019 | "dateSettings": {
2020 | "dateFormat": "YYYY-MM-DD",
2021 | "fontSize": "20px",
2022 | "fontWeight": "normal",
2023 | "locale": "",
2024 | "showDate": true
2025 | },
2026 | "fontMono": false,
2027 | "mode": "time",
2028 | "refresh": "sec",
2029 | "timeSettings": {
2030 | "fontSize": "30px",
2031 | "fontWeight": "bold"
2032 | },
2033 | "timezone": "America/Los_Angeles",
2034 | "timezoneSettings": {
2035 | "fontSize": "12px",
2036 | "fontWeight": "normal",
2037 | "showTimezone": false,
2038 | "zoneFormat": "offsetAbbv"
2039 | }
2040 | },
2041 | "pluginVersion": "2.1.3",
2042 | "title": "Los Angeles",
2043 | "type": "grafana-clock-panel"
2044 | },
2045 | {
2046 | "datasource": {
2047 | "type": "prometheus",
2048 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
2049 | },
2050 | "description": "Aggregate outgoing bytes",
2051 | "fieldConfig": {
2052 | "defaults": {
2053 | "color": {
2054 | "mode": "continuous-YlBl"
2055 | },
2056 | "custom": {
2057 | "axisBorderShow": false,
2058 | "axisCenteredZero": false,
2059 | "axisColorMode": "text",
2060 | "axisLabel": "",
2061 | "axisPlacement": "auto",
2062 | "barAlignment": 0,
2063 | "drawStyle": "line",
2064 | "fillOpacity": 12,
2065 | "gradientMode": "none",
2066 | "hideFrom": {
2067 | "legend": false,
2068 | "tooltip": false,
2069 | "viz": false
2070 | },
2071 | "insertNulls": false,
2072 | "lineInterpolation": "linear",
2073 | "lineStyle": {
2074 | "dash": [0, 3, 3],
2075 | "fill": "dot"
2076 | },
2077 | "lineWidth": 2,
2078 | "pointSize": 5,
2079 | "scaleDistribution": {
2080 | "type": "linear"
2081 | },
2082 | "showPoints": "auto",
2083 | "spanNulls": false,
2084 | "stacking": {
2085 | "group": "A",
2086 | "mode": "none"
2087 | },
2088 | "thresholdsStyle": {
2089 | "mode": "off"
2090 | }
2091 | },
2092 | "mappings": [],
2093 | "thresholds": {
2094 | "mode": "absolute",
2095 | "steps": [
2096 | {
2097 | "color": "green",
2098 | "value": null
2099 | },
2100 | {
2101 | "color": "red",
2102 | "value": 80
2103 | }
2104 | ]
2105 | }
2106 | },
2107 | "overrides": []
2108 | },
2109 | "gridPos": {
2110 | "h": 8,
2111 | "w": 12,
2112 | "x": 12,
2113 | "y": 88
2114 | },
2115 | "id": 7,
2116 | "options": {
2117 | "legend": {
2118 | "calcs": [],
2119 | "displayMode": "hidden",
2120 | "placement": "right",
2121 | "showLegend": false
2122 | },
2123 | "tooltip": {
2124 | "mode": "single",
2125 | "sort": "none"
2126 | }
2127 | },
2128 | "pluginVersion": "10.2.0",
2129 | "targets": [
2130 | {
2131 | "datasource": {
2132 | "type": "prometheus",
2133 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
2134 | },
2135 | "disableTextWrap": false,
2136 | "editorMode": "builder",
2137 | "expr": "kafka_server_BrokerTopicMetrics_BytesOut_total",
2138 | "fullMetaSearch": false,
2139 | "includeNullMetadata": true,
2140 | "instant": false,
2141 | "legendFormat": "{{instance}}",
2142 | "range": true,
2143 | "refId": "A",
2144 | "useBackend": false
2145 | }
2146 | ],
2147 | "title": "Broker Bytes Out Total",
2148 | "type": "timeseries"
2149 | },
2150 | {
2151 | "datasource": {
2152 | "type": "prometheus",
2153 | "uid": "f370827f-878f-4256-a0d4-192418827a14"
2154 | },
2155 | "gridPos": {
2156 | "h": 8,
2157 | "w": 12,
2158 | "x": 0,
2159 | "y": 96
2160 | },
2161 | "id": 13,
2162 | "options": {
2163 | "bgColor": "dark-blue",
2164 | "clockType": "12 hour",
2165 | "countdownSettings": {
2166 | "endCountdownTime": "2023-11-14T23:07:27-06:00",
2167 | "endText": "00:00:00"
2168 | },
2169 | "countupSettings": {
2170 | "beginCountupTime": "2023-11-14T23:07:27-06:00",
2171 | "beginText": "00:00:00"
2172 | },
2173 | "dateSettings": {
2174 | "dateFormat": "YYYY-MM-DD",
2175 | "fontSize": "20px",
2176 | "fontWeight": "normal",
2177 | "locale": "",
2178 | "showDate": true
2179 | },
2180 | "fontMono": false,
2181 | "mode": "time",
2182 | "refresh": "sec",
2183 | "timeSettings": {
2184 | "fontSize": "30px",
2185 | "fontWeight": "bold"
2186 | },
2187 | "timezone": "America/New_York",
2188 | "timezoneSettings": {
2189 | "fontSize": "12px",
2190 | "fontWeight": "normal",
2191 | "showTimezone": false,
2192 | "zoneFormat": "offsetAbbv"
2193 | }
2194 | },
2195 | "pluginVersion": "2.1.3",
2196 | "title": "New York",
2197 | "type": "grafana-clock-panel"
2198 | }
2199 | ],
2200 | "refresh": false,
2201 | "schemaVersion": 38,
2202 | "tags": [],
2203 | "templating": {
2204 | "list": []
2205 | },
2206 | "time": {
2207 | "from": "now-1h",
2208 | "to": "now"
2209 | },
2210 | "timepicker": {},
2211 | "timezone": "",
2212 | "title": "KafOpticon Dashboard",
2213 | "uid": "d9098b29-ef80-4e40-86bc-b28bd6e85756",
2214 | "version": 1,
2215 | "weekStart": ""
2216 | }
2217 |
--------------------------------------------------------------------------------
/grafana/Dockerfile/provisioning/datasources/datasource.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | # Tells grafana where to look and what for to get metrics
4 | datasources:
5 | - name: Prometheus
6 | type: prometheus
7 | access: proxy
8 | orgId: 1
9 | url: http://prometheus:9090
10 | basicAuth: false
11 | isDefault: true
12 | editable: true
13 | uid: f370827f-878f-4256-a0d4-192418827a14
--------------------------------------------------------------------------------
/grafana/grafana.ini:
--------------------------------------------------------------------------------
1 | #configures grafana settings and grafana.ini
2 | [security]
3 | allow_embedding=true
4 | #allows user to see grafana clok panel
5 | [plugins]
6 | plugins=grafana-clock-panel
7 |
8 | [smtp]
9 | enabled=true
10 | host=smtp.gmail.com:465
11 | user= null
12 | password= null
13 |
14 | [auth.anonymous]
15 | enabled=true
16 | org_name=Main Org.
17 | org_role=Admin
18 |
--------------------------------------------------------------------------------
/local-test/jmx_prometheus_httpserver-0.19.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/KafOpticon/ccee926e56cbbf52175f4c7e79600ebc8deb97b7/local-test/jmx_prometheus_httpserver-0.19.0.jar
--------------------------------------------------------------------------------
/local-test/kafka-config/server1.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | #
17 | # This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required.
18 | # See kafka.server.KafkaConfig for additional details and defaults
19 | #
20 |
21 | ############################# Server Basics #############################
22 |
23 | # The id of the broker. This must be set to a unique integer for each broker.
24 | broker.id=0
25 |
26 | ############################# Socket Server Settings #############################
27 |
28 | # The address the socket server listens on. If not configured, the host name will be equal to the value of
29 | # java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
30 | # FORMAT:
31 | # listeners = listener_name://host_name:port
32 | # EXAMPLE:
33 | # listeners = PLAINTEXT://your.host.name:9092
34 | listeners=PLAINTEXT://:9092
35 | # listeners=SASL_PLAINTEXT://:9092
36 |
37 | # Listener name, hostname and port the broker will advertise to clients.
38 | # If not set, it uses the value for "listeners".
39 | #advertised.listeners=PLAINTEXT://your.host.name:9092
40 | #advertised.listeners=SASL_PLAINTEXT://:9092
41 |
42 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
43 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
44 | # security.inter.broker.protocol=SASL_PLAINTEXT
45 | # sasl.mechanism.inter.broker.protocol=PLAIN
46 | # sasl.enable.mechanisms=PLAIN
47 |
48 | #normally in separate JAAS file but for simplicity, config listed below
49 | #must be single line in server.properties file, as below
50 | # sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="kafka" password="kafkapassword" user_kafka="kafka"
51 |
52 |
53 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
54 | num.network.threads=3
55 |
56 | # The number of threads that the server uses for processing requests, which may include disk I/O
57 | num.io.threads=8
58 |
59 | # The send buffer (SO_SNDBUF) used by the socket server
60 | socket.send.buffer.bytes=102400
61 |
62 | # The receive buffer (SO_RCVBUF) used by the socket server
63 | socket.receive.buffer.bytes=102400
64 |
65 | # The maximum size of a request that the socket server will accept (protection against OOM)
66 | socket.request.max.bytes=104857600
67 |
68 |
69 | ############################# Log Basics #############################
70 |
71 | # A comma separated list of directories under which to store log files
72 | log.dirs=/tmp/kafka-logs1
73 |
74 | # The default number of log partitions per topic. More partitions allow greater
75 | # parallelism for consumption, but this will also result in more files across
76 | # the brokers.
77 | num.partitions=1
78 |
79 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
80 | # This value is recommended to be increased for installations with data dirs located in RAID array.
81 | num.recovery.threads.per.data.dir=1
82 |
83 | ############################# Internal Topic Settings #############################
84 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
85 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
86 | offsets.topic.replication.factor=1
87 | transaction.state.log.replication.factor=1
88 | transaction.state.log.min.isr=1
89 |
90 | ############################# Log Flush Policy #############################
91 |
92 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
93 | # the OS cache lazily. The following configurations control the flush of data to disk.
94 | # There are a few important trade-offs here:
95 | # 1. Durability: Unflushed data may be lost if you are not using replication.
96 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
97 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
98 | # The settings below allow one to configure the flush policy to flush data after a period of time or
99 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
100 |
101 | # The number of messages to accept before forcing a flush of data to disk
102 | #log.flush.interval.messages=10000
103 |
104 | # The maximum amount of time a message can sit in a log before we force a flush
105 | #log.flush.interval.ms=1000
106 |
107 | ############################# Log Retention Policy #############################
108 |
109 | # The following configurations control the disposal of log segments. The policy can
110 | # be set to delete segments after a period of time, or after a given size has accumulated.
111 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
112 | # from the end of the log.
113 |
114 | # The minimum age of a log file to be eligible for deletion due to age
115 | log.retention.hours=168
116 |
117 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
118 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
119 | #log.retention.bytes=1073741824
120 |
121 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
122 | #log.segment.bytes=1073741824
123 |
124 | # The interval at which log segments are checked to see if they can be deleted according
125 | # to the retention policies
126 | log.retention.check.interval.ms=300000
127 |
128 | ############################# Zookeeper #############################
129 |
130 | # Zookeeper connection string (see zookeeper docs for details).
131 | # This is a comma separated host:port pairs, each corresponding to a zk
132 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
133 | # You can also append an optional chroot string to the urls to specify the
134 | # root directory for all kafka znodes.
135 | zookeeper.connect=localhost:2181
136 |
137 | # Timeout in ms for connecting to zookeeper
138 | zookeeper.connection.timeout.ms=18000
139 |
140 |
141 | ############################# Group Coordinator Settings #############################
142 |
143 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
144 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
145 | # The default value for this is 3 seconds.
146 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
147 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
148 | group.initial.rebalance.delay.ms=0
149 |
--------------------------------------------------------------------------------
/local-test/kafka-config/server2.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | #
17 | # This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required.
18 | # See kafka.server.KafkaConfig for additional details and defaults
19 | #
20 |
21 | ############################# Server Basics #############################
22 |
23 | # The id of the broker. This must be set to a unique integer for each broker.
24 | broker.id=1
25 |
26 | ############################# Socket Server Settings #############################
27 |
28 | # The address the socket server listens on. If not configured, the host name will be equal to the value of
29 | # java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
30 | # FORMAT:
31 | # listeners = listener_name://host_name:port
32 | # EXAMPLE:
33 | # listeners = PLAINTEXT://your.host.name:9092
34 | listeners=PLAINTEXT://:9093
35 |
36 | # Listener name, hostname and port the broker will advertise to clients.
37 | # If not set, it uses the value for "listeners".
38 | #advertised.listeners=PLAINTEXT://your.host.name:9092
39 |
40 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
41 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
42 |
43 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
44 | num.network.threads=3
45 |
46 | # The number of threads that the server uses for processing requests, which may include disk I/O
47 | num.io.threads=8
48 |
49 | # The send buffer (SO_SNDBUF) used by the socket server
50 | socket.send.buffer.bytes=102400
51 |
52 | # The receive buffer (SO_RCVBUF) used by the socket server
53 | socket.receive.buffer.bytes=102400
54 |
55 | # The maximum size of a request that the socket server will accept (protection against OOM)
56 | socket.request.max.bytes=104857600
57 |
58 |
59 | ############################# Log Basics #############################
60 |
61 | # A comma separated list of directories under which to store log files
62 | log.dirs=/tmp/kafka-logs2
63 |
64 | # The default number of log partitions per topic. More partitions allow greater
65 | # parallelism for consumption, but this will also result in more files across
66 | # the brokers.
67 | num.partitions=1
68 |
69 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
70 | # This value is recommended to be increased for installations with data dirs located in RAID array.
71 | num.recovery.threads.per.data.dir=1
72 |
73 | ############################# Internal Topic Settings #############################
74 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
75 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
76 | offsets.topic.replication.factor=1
77 | transaction.state.log.replication.factor=1
78 | transaction.state.log.min.isr=1
79 |
80 | ############################# Log Flush Policy #############################
81 |
82 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
83 | # the OS cache lazily. The following configurations control the flush of data to disk.
84 | # There are a few important trade-offs here:
85 | # 1. Durability: Unflushed data may be lost if you are not using replication.
86 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
87 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
88 | # The settings below allow one to configure the flush policy to flush data after a period of time or
89 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
90 |
91 | # The number of messages to accept before forcing a flush of data to disk
92 | #log.flush.interval.messages=10000
93 |
94 | # The maximum amount of time a message can sit in a log before we force a flush
95 | #log.flush.interval.ms=1000
96 |
97 | ############################# Log Retention Policy #############################
98 |
99 | # The following configurations control the disposal of log segments. The policy can
100 | # be set to delete segments after a period of time, or after a given size has accumulated.
101 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
102 | # from the end of the log.
103 |
104 | # The minimum age of a log file to be eligible for deletion due to age
105 | log.retention.hours=168
106 |
107 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
108 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
109 | #log.retention.bytes=1073741824
110 |
111 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
112 | #log.segment.bytes=1073741824
113 |
114 | # The interval at which log segments are checked to see if they can be deleted according
115 | # to the retention policies
116 | log.retention.check.interval.ms=300000
117 |
118 | ############################# Zookeeper #############################
119 |
120 | # Zookeeper connection string (see zookeeper docs for details).
121 | # This is a comma separated host:port pairs, each corresponding to a zk
122 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
123 | # You can also append an optional chroot string to the urls to specify the
124 | # root directory for all kafka znodes.
125 | zookeeper.connect=localhost:2181
126 |
127 | # Timeout in ms for connecting to zookeeper
128 | zookeeper.connection.timeout.ms=18000
129 |
130 |
131 | ############################# Group Coordinator Settings #############################
132 |
133 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
134 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
135 | # The default value for this is 3 seconds.
136 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
137 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
138 | group.initial.rebalance.delay.ms=0
139 |
--------------------------------------------------------------------------------
/local-test/kafka-config/server3.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | #
17 | # This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required.
18 | # See kafka.server.KafkaConfig for additional details and defaults
19 | #
20 |
21 | ############################# Server Basics #############################
22 |
23 | # The id of the broker. This must be set to a unique integer for each broker.
24 | broker.id=2
25 |
26 | ############################# Socket Server Settings #############################
27 |
28 | # The address the socket server listens on. If not configured, the host name will be equal to the value of
29 | # java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
30 | # FORMAT:
31 | # listeners = listener_name://host_name:port
32 | # EXAMPLE:
33 | # listeners = PLAINTEXT://your.host.name:9092
34 | listeners=PLAINTEXT://:9094
35 |
36 | # Listener name, hostname and port the broker will advertise to clients.
37 | # If not set, it uses the value for "listeners".
38 | #advertised.listeners=PLAINTEXT://your.host.name:9092
39 |
40 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
41 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
42 |
43 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
44 | num.network.threads=3
45 |
46 | # The number of threads that the server uses for processing requests, which may include disk I/O
47 | num.io.threads=8
48 |
49 | # The send buffer (SO_SNDBUF) used by the socket server
50 | socket.send.buffer.bytes=102400
51 |
52 | # The receive buffer (SO_RCVBUF) used by the socket server
53 | socket.receive.buffer.bytes=102400
54 |
55 | # The maximum size of a request that the socket server will accept (protection against OOM)
56 | socket.request.max.bytes=104857600
57 |
58 |
59 | ############################# Log Basics #############################
60 |
61 | # A comma separated list of directories under which to store log files
62 | log.dirs=/tmp/kafka-logs3
63 |
64 | # The default number of log partitions per topic. More partitions allow greater
65 | # parallelism for consumption, but this will also result in more files across
66 | # the brokers.
67 | num.partitions=1
68 |
69 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
70 | # This value is recommended to be increased for installations with data dirs located in RAID array.
71 | num.recovery.threads.per.data.dir=1
72 |
73 | ############################# Internal Topic Settings #############################
74 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
75 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
76 | offsets.topic.replication.factor=1
77 | transaction.state.log.replication.factor=1
78 | transaction.state.log.min.isr=1
79 |
80 | ############################# Log Flush Policy #############################
81 |
82 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
83 | # the OS cache lazily. The following configurations control the flush of data to disk.
84 | # There are a few important trade-offs here:
85 | # 1. Durability: Unflushed data may be lost if you are not using replication.
86 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
87 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
88 | # The settings below allow one to configure the flush policy to flush data after a period of time or
89 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
90 |
91 | # The number of messages to accept before forcing a flush of data to disk
92 | #log.flush.interval.messages=10000
93 |
94 | # The maximum amount of time a message can sit in a log before we force a flush
95 | #log.flush.interval.ms=1000
96 |
97 | ############################# Log Retention Policy #############################
98 |
99 | # The following configurations control the disposal of log segments. The policy can
100 | # be set to delete segments after a period of time, or after a given size has accumulated.
101 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
102 | # from the end of the log.
103 |
104 | # The minimum age of a log file to be eligible for deletion due to age
105 | log.retention.hours=168
106 |
107 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
108 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
109 | #log.retention.bytes=1073741824
110 |
111 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
112 | #log.segment.bytes=1073741824
113 |
114 | # The interval at which log segments are checked to see if they can be deleted according
115 | # to the retention policies
116 | log.retention.check.interval.ms=300000
117 |
118 | ############################# Zookeeper #############################
119 |
120 | # Zookeeper connection string (see zookeeper docs for details).
121 | # This is a comma separated host:port pairs, each corresponding to a zk
122 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
123 | # You can also append an optional chroot string to the urls to specify the
124 | # root directory for all kafka znodes.
125 | zookeeper.connect=localhost:2181
126 |
127 | # Timeout in ms for connecting to zookeeper
128 | zookeeper.connection.timeout.ms=18000
129 |
130 |
131 | ############################# Group Coordinator Settings #############################
132 |
133 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
134 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
135 | # The default value for this is 3 seconds.
136 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
137 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
138 | group.initial.rebalance.delay.ms=0
139 |
--------------------------------------------------------------------------------
/local-test/kafka-config/zookeeper.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # the directory where the snapshot is stored.
16 | dataDir=/tmp/zookeeper
17 | # the port at which the clients will connect
18 | clientPort=2181
19 | # disable the per-ip limit on the number of connections since this is a non-production config
20 | maxClientCnxns=0
21 | # Disable the adminserver by default to avoid port conflicts.
22 | # Set the port to something non-conflicting if choosing to enable this
23 | admin.enableServer=false
24 | # admin.serverPort=8080
25 |
--------------------------------------------------------------------------------
/local-test/producers-and-consumers/producer.js:
--------------------------------------------------------------------------------
1 | // require in kafkaJS
2 | const { Kafka } = require('kafkajs');
3 | // get the environment variables
4 | require('dotenv').config();
5 |
6 |
7 | const run = async () => {
8 | // this function pushes logs into the kafka cluster
9 | // it writes one hundred messages to the cluster, waits one second,
10 | // and then writes another hundred
11 |
12 | // get access to the kafka cluster
13 | const kafka = new Kafka({
14 | clientId: 'my-app',
15 | brokers: ['localhost:9092'/* , 'localhost:9093', 'localhost:9094' */],
16 | });
17 |
18 |
19 | // declare a variable counter
20 | let counter = 0;
21 |
22 | // get a producer from kafkajs
23 | const producer = kafka.producer();
24 |
25 | // connect the producer to the cluster
26 | await producer.connect();
27 |
28 | // declare a helper function
29 | const helper = async (iteration) => {
30 | // store the messages in an array
31 | const messageArray = [];
32 |
33 | // fill the array through a loop
34 | for (let i = 0; i < 100; i += 1) {
35 | messageArray.push({ value: `Message ${iteration}.${i}` });
36 | }
37 |
38 | // call the send method on the producer passing in the messageArray into the messages property
39 | await producer.send({
40 | topic: 'testing',
41 | messages: messageArray,
42 | });
43 |
44 |
45 | // increment counter
46 | counter += 1;
47 |
48 | // wait one second, and call the helper function again
49 | setTimeout(() => { helper(counter); }, 1000);
50 | };
51 |
52 | helper(counter);
53 | };
54 |
55 | run().catch((err) => console.error(err));
56 |
--------------------------------------------------------------------------------
/local-test/scraping-config/jmxConfigTemplate.yml:
--------------------------------------------------------------------------------
1 | lowercaseOutputName: true
2 |
3 | rules:
4 | # Special cases and very specific rules
5 | - pattern : kafka.server<>Value
6 | name: kafka_server_$1_$2
7 | type: GAUGE
8 | labels:
9 | clientId: "$3"
10 | topic: "$4"
11 | partition: "$5"
12 | - pattern : kafka.server<>Value
13 | name: kafka_server_$1_$2
14 | type: GAUGE
15 | labels:
16 | clientId: "$3"
17 | broker: "$4:$5"
18 | - pattern : kafka.coordinator.(\w+)<>Value
19 | name: kafka_coordinator_$1_$2_$3
20 | type: GAUGE
21 |
22 | # Generic per-second counters with 0-2 key/value pairs
23 | - pattern: kafka.(\w+)<>Count
24 | name: kafka_$1_$2_$3_total
25 | type: COUNTER
26 | labels:
27 | "$4": "$5"
28 | "$6": "$7"
29 | - pattern: kafka.(\w+)<>Count
30 | name: kafka_$1_$2_$3_total
31 | type: COUNTER
32 | labels:
33 | "$4": "$5"
34 | - pattern: kafka.(\w+)<>Count
35 | name: kafka_$1_$2_$3_total
36 | type: COUNTER
37 |
38 | # Quota specific rules
39 | - pattern: kafka.server<>([a-z-]+)
40 | name: kafka_server_quota_$4
41 | type: GAUGE
42 | labels:
43 | resource: "$1"
44 | user: "$2"
45 | clientId: "$3"
46 | - pattern: kafka.server<>([a-z-]+)
47 | name: kafka_server_quota_$3
48 | type: GAUGE
49 | labels:
50 | resource: "$1"
51 | clientId: "$2"
52 | - pattern: kafka.server<>([a-z-]+)
53 | name: kafka_server_quota_$3
54 | type: GAUGE
55 | labels:
56 | resource: "$1"
57 | user: "$2"
58 |
59 | # Generic gauges with 0-2 key/value pairs
60 | - pattern: kafka.(\w+)<>Value
61 | name: kafka_$1_$2_$3
62 | type: GAUGE
63 | labels:
64 | "$4": "$5"
65 | "$6": "$7"
66 | - pattern: kafka.(\w+)<>Value
67 | name: kafka_$1_$2_$3
68 | type: GAUGE
69 | labels:
70 | "$4": "$5"
71 | - pattern: kafka.(\w+)<>Value
72 | name: kafka_$1_$2_$3
73 | type: GAUGE
74 |
75 | # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
76 | #
77 | # Note that these are missing the '_sum' metric!
78 | - pattern: kafka.(\w+)<>Count
79 | name: kafka_$1_$2_$3_count
80 | type: COUNTER
81 | labels:
82 | "$4": "$5"
83 | "$6": "$7"
84 | - pattern: kafka.(\w+)<>(\d+)thPercentile
85 | name: kafka_$1_$2_$3
86 | type: GAUGE
87 | labels:
88 | "$4": "$5"
89 | "$6": "$7"
90 | quantile: "0.$8"
91 | - pattern: kafka.(\w+)<>Count
92 | name: kafka_$1_$2_$3_count
93 | type: COUNTER
94 | labels:
95 | "$4": "$5"
96 | - pattern: kafka.(\w+)<>(\d+)thPercentile
97 | name: kafka_$1_$2_$3
98 | type: GAUGE
99 | labels:
100 | "$4": "$5"
101 | quantile: "0.$6"
102 | - pattern: kafka.(\w+)<>Count
103 | name: kafka_$1_$2_$3_count
104 | type: COUNTER
105 | - pattern: kafka.(\w+)<>(\d+)thPercentile
106 | name: kafka_$1_$2_$3
107 | type: GAUGE
108 | labels:
109 | quantile: "0.$4"
110 |
111 | # Generic gauges for MeanRate Percent
112 | # Ex) kafka.server<>MeanRate
113 | - pattern: kafka.(\w+)<>MeanRate
114 | name: kafka_$1_$2_$3_percent
115 | type: GAUGE
116 | - pattern: kafka.(\w+)<>Value
117 | name: kafka_$1_$2_$3_percent
118 | type: GAUGE
119 | - pattern: kafka.(\w+)<>Value
120 | name: kafka_$1_$2_$3_percent
121 | type: GAUGE
122 | labels:
123 | "$4": "$5"
--------------------------------------------------------------------------------
/local-test/scraping-config/jmxConfigTestFile.yml:
--------------------------------------------------------------------------------
1 | rules:
2 | - pattern : "kafka.controller.type=KafkaController.N*"
3 |
--------------------------------------------------------------------------------
/local-test/scraping-config/jmxconfig.yml:
--------------------------------------------------------------------------------
1 | hostPort: localhost:2020
2 | lowercaseOutputName: true
3 |
4 | rules:
5 | # Special cases and very specific rules
6 | - pattern : kafka.server<>Value
7 | name: kafka_server_$1_$2
8 | type: GAUGE
9 | labels:
10 | clientId: "$3"
11 | topic: "$4"
12 | partition: "$5"
13 | - pattern : kafka.server<>Value
14 | name: kafka_server_$1_$2
15 | type: GAUGE
16 | labels:
17 | clientId: "$3"
18 | broker: "$4:$5"
19 | - pattern : kafka.coordinator.(\w+)<>Value
20 | name: kafka_coordinator_$1_$2_$3
21 | type: GAUGE
22 |
23 | # Generic per-second counters with 0-2 key/value pairs
24 | - pattern: kafka.(\w+)<>Count
25 | name: kafka_$1_$2_$3_total
26 | type: COUNTER
27 | labels:
28 | "$4": "$5"
29 | "$6": "$7"
30 | - pattern: kafka.(\w+)<>Count
31 | name: kafka_$1_$2_$3_total
32 | type: COUNTER
33 | labels:
34 | "$4": "$5"
35 | - pattern: kafka.(\w+)<>Count
36 | name: kafka_$1_$2_$3_total
37 | type: COUNTER
38 |
39 | # Quota specific rules
40 | - pattern: kafka.server<>([a-z-]+)
41 | name: kafka_server_quota_$4
42 | type: GAUGE
43 | labels:
44 | resource: "$1"
45 | user: "$2"
46 | clientId: "$3"
47 | - pattern: kafka.server<>([a-z-]+)
48 | name: kafka_server_quota_$3
49 | type: GAUGE
50 | labels:
51 | resource: "$1"
52 | clientId: "$2"
53 | - pattern: kafka.server<>([a-z-]+)
54 | name: kafka_server_quota_$3
55 | type: GAUGE
56 | labels:
57 | resource: "$1"
58 | user: "$2"
59 |
60 | # Generic gauges with 0-2 key/value pairs
61 | - pattern: kafka.(\w+)<>Value
62 | name: kafka_$1_$2_$3
63 | type: GAUGE
64 | labels:
65 | "$4": "$5"
66 | "$6": "$7"
67 | - pattern: kafka.(\w+)<>Value
68 | name: kafka_$1_$2_$3
69 | type: GAUGE
70 | labels:
71 | "$4": "$5"
72 | - pattern: kafka.(\w+)<>Value
73 | name: kafka_$1_$2_$3
74 | type: GAUGE
75 |
76 | # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
77 | #
78 | # Note that these are missing the '_sum' metric!
79 | - pattern: kafka.(\w+)<>Count
80 | name: kafka_$1_$2_$3_count
81 | type: COUNTER
82 | labels:
83 | "$4": "$5"
84 | "$6": "$7"
85 | - pattern: kafka.(\w+)<>(\d+)thPercentile
86 | name: kafka_$1_$2_$3
87 | type: GAUGE
88 | labels:
89 | "$4": "$5"
90 | "$6": "$7"
91 | quantile: "0.$8"
92 | - pattern: kafka.(\w+)<>Count
93 | name: kafka_$1_$2_$3_count
94 | type: COUNTER
95 | labels:
96 | "$4": "$5"
97 | - pattern: kafka.(\w+)<>(\d+)thPercentile
98 | name: kafka_$1_$2_$3
99 | type: GAUGE
100 | labels:
101 | "$4": "$5"
102 | quantile: "0.$6"
103 | - pattern: kafka.(\w+)<>Count
104 | name: kafka_$1_$2_$3_count
105 | type: COUNTER
106 | - pattern: kafka.(\w+)<>(\d+)thPercentile
107 | name: kafka_$1_$2_$3
108 | type: GAUGE
109 | labels:
110 | quantile: "0.$4"
111 |
112 | # Generic gauges for MeanRate Percent
113 | # Ex) kafka.server<>MeanRate
114 | - pattern: kafka.(\w+)<>MeanRate
115 | name: kafka_$1_$2_$3_percent
116 | type: GAUGE
117 | - pattern: kafka.(\w+)<>Value
118 | name: kafka_$1_$2_$3_percent
119 | type: GAUGE
120 | - pattern: kafka.(\w+)<>Value
121 | name: kafka_$1_$2_$3_percent
122 | type: GAUGE
123 | labels:
124 | "$4": "$5"
--------------------------------------------------------------------------------
/local-test/scraping-config/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 15s # set default scrape interval to 15 seconds
3 |
4 | scrape_configs:
5 | # the job name is added as a label to any timeseries scraped from this config
6 | - job_name: kafka
7 |
8 | # override global default scrape interval
9 | scrape_interval: 10s
10 |
11 | static_configs:
12 | # point prometheus at the place to scrape
13 | - targets:
14 | - "localhost:3030"
--------------------------------------------------------------------------------
/main/controllers/addressController.js:
--------------------------------------------------------------------------------
1 | // this is where the controller middleware for the address route/endpoint lives
2 |
3 | const fsp = require('fs').promises;
4 | const path = require('path');
5 | const { spawn } = require('child_process');
6 |
7 | const addressController = {};
8 |
9 | // error constructor for this controller
10 | class ACError {
11 | constructor (location, status, message) {
12 | this.log = 'An error occurred in addressController.' + location;
13 | this.status = status;
14 | this.message = {err: message}
15 | }
16 | }
17 |
18 |
19 | addressController.writeJmxConfig1 = (req, res, next) => {
20 | // gets information off the request body and puts a transformation of it onto res.locals
21 | console.log('entered writeJmxConfig2')
22 |
23 | const { address } = req.body;
24 | const fullAddress = 'localhost:' + address;
25 |
26 | res.locals.jmxConfig = `hostPort: ${fullAddress}\n`;
27 |
28 | return next();
29 | };
30 |
31 | addressController.writeJmxConfig2 = async (req, res, next) => {
32 | // write the jmx config file using the user inputted kafka address
33 | console.log('entered writeJmxConfig2');
34 |
35 | // get paths to configuration files
36 | const templateFileAddress = path.join(__dirname, '..', '..', 'local-test', 'scraping-config', 'jmxConfigTemplate.yml');
37 | const destination = path.join(__dirname, '..', '..', 'local-test', 'scraping-config', 'jmxconfig.yml');
38 |
39 | // read the information from the template file and append it to newFileString
40 | try {
41 | const contents = await fsp.readFile(templateFileAddress, 'utf8');
42 | res.locals.jmxConfig += contents;
43 | } catch (err) {
44 | return next(new ACError('writeJmxConfig2', 422, err));
45 | }
46 |
47 | // write the newFileString to the destination file
48 | try {
49 | await fsp.writeFile(destination, res.locals.jmxConfig, 'utf8');
50 | } catch (err) {
51 | return next(new ACError('writeJmxConfig2', 422, err));
52 | }
53 |
54 | return next();
55 | }
56 |
57 | addressController.connectToKafka = (req, res, next) => {
58 | // create child process that runs jmx exporter and connect it to the kafka cluster
59 | console.log('entered connectToKafka');
60 |
61 | const child = spawn('npm run exportJmx', {
62 | shell: true,
63 | // stdio: 'inherit',
64 | cwd: path.join(__dirname, '..', '..', 'local-test')
65 | });
66 |
67 | return next();
68 | };
69 |
70 | addressController.startPrometheus = (req, res, next) => {
71 | // create child process that runs prometheus and connect it to jmx exporter
72 | console.log('entered startPrometheus');
73 |
74 | const child = spawn('npm run prometheus', {
75 | shell: true,
76 | // stdio: 'inherit',
77 | cwd: path.join(__dirname, '..', '..', 'local-test')
78 | });
79 |
80 | return next();
81 | };
82 |
83 |
84 | module.exports = addressController;
--------------------------------------------------------------------------------
/main/controllers/alertsController.js:
--------------------------------------------------------------------------------
1 | const path = require('path');
2 | const fs = require('fs')
3 | const ini = require('ini')
4 |
5 | const alertsController = {}
6 |
7 | alertsController.writeAlertsInfo = (req, res, next) => {
8 | try {
9 | const destination = path.join(__dirname, '../../grafana/grafana.ini')
10 |
11 | const { emailAddress, appPassword } = req.body
12 | console.log('alerts controller emailAddress, appPassword accepted!', 'entered email =>', emailAddress, 'enter appPassword =>', appPassword)
13 |
14 | //read grafana.ini file
15 | const config = ini.parse(fs.readFileSync(destination, 'utf-8'))
16 |
17 | //update grafana.ini file
18 | config['smtp'] = config['smtp'] || {};
19 | config['smtp']['user'] = emailAddress;
20 | config['smtp']['password'] = appPassword;
21 | config['smtp']['from_address'] = emailAddress;
22 |
23 | //write to grafana.ini file
24 | fs.writeFileSync(destination, ini.stringify(config));
25 |
26 | return next()
27 | } catch (err) {
28 | return next ({ message: 'Error in alertsController =>', err })
29 | }
30 | }
31 |
32 |
33 | module.exports = alertsController
--------------------------------------------------------------------------------
/main/controllers/grafanaController.js:
--------------------------------------------------------------------------------
1 |
2 | const path = require('path');
3 | const { spawn } = require('child_process');
4 |
5 | const { question } = require('../../__mocks__/mockDashboards')
6 | const dashboardJSON = require('../dashboards/bigDashboard');
7 |
8 | const grafanaController = {};
9 |
10 | class GrafanaError {
11 | constructor(location, status, message) {
12 | this.log = 'An error occurred in grafanaController.' + location;
13 | this.status = status;
14 | this.message = {err: message};
15 | }
16 | }
17 |
18 | grafanaController.getPrometheus = async (req, res, next) => {
19 | console.log('entered getPrometheus');
20 | // check whether the user already has a prometheus datasource for their grafana
21 | // and if they do save its uid
22 |
23 | // since this is called twice, if information has already been gathered, skip it
24 | if (res.locals.prom) return next();
25 | try {
26 | const response = await fetch('http://localhost:3000/api/datasources/name/Prometheus');
27 | const data = await response.json();
28 |
29 | if (data.uid) {
30 | res.locals.promUid = data.uid;
31 | res.locals.prom = true;
32 | }
33 |
34 | } catch (err) {
35 | return next(new GrafanaError('getPrometheus', 500, err));
36 | }
37 |
38 | return next();
39 | };
40 |
41 | grafanaController.createPromSource = async (req, res, next) => {
42 | // if the user does not have prometheus set up as a data source, create it
43 | console.log('entered createPromSource');
44 |
45 | // skip if user has prometheus and we already have its uid
46 | if (res.locals.prom) return next();
47 |
48 | const body = {
49 | name: 'Prometheus',
50 | type: 'prometheus',
51 | url: 'http://localhost:9090',
52 | access: 'proxy',
53 | basicAuth: false
54 | };
55 |
56 | try {
57 | await fetch('http://localhost:3000/api/datasources', {
58 | method: 'POST',
59 | body: JSON.stringify(body),
60 | headers: {
61 | 'Content-Type': 'application/json'
62 | }
63 | });
64 | } catch (err) {
65 | return next(new GrafanaError('createPromSource', 500, err));
66 | }
67 |
68 | return next();
69 | };
70 |
71 | grafanaController.generateDashJson = (req, res, next) => {
72 | // generate the dashboard json based on gathered prometheus uid
73 | console.log('entered generateDashJson');
74 |
75 | try {
76 | const array = (process.env.NODE_ENV === 'test') ? question.dashboard.panels : dashboardJSON.dashboard.panels;
77 | for (let i = 0; i < array.length; i += 1) {
78 | array[i].datasource.uid = res.locals.promUid;
79 | if (array[i].targets) {
80 | for (let j = 0; j < array[i].targets.length; j += 1) {
81 | array[i].targets[j].datasource.uid = res.locals.promUid
82 | }
83 | }
84 | }
85 | } catch (err){
86 | return next(new GrafanaError('generateDashJson', 500, err));
87 | }
88 |
89 | res.locals.dashboardJSON = (process.env.NODE_ENV === 'test') ? question : dashboardJSON;
90 | return next();
91 | };
92 |
93 | grafanaController.createDashboard = async (req, res, next) => {
94 | // create a dashboard in grafana
95 |
96 | console.log('entered createDashboard');
97 | // make post request to grafana dashboard api
98 | try {
99 | const data = await fetch('http://localhost:3000/api/dashboards/db', {
100 | method: 'POST',
101 | body: JSON.stringify(res.locals.dashboardJSON),
102 | headers: {
103 | "Content-Type": 'application/json'
104 | }});
105 |
106 | const text = await data.json();
107 | console.log('grafanaController.createDashboard= ~ text:', text);
108 |
109 | // save response to res.locals for testing purposes
110 | res.locals.grafanaResponse = text;
111 |
112 | return next();
113 |
114 | } catch (err) {
115 | return next(new GrafanaError('createDashboard', 500, err));
116 | }
117 | };
118 |
119 | module.exports = grafanaController;
--------------------------------------------------------------------------------
/main/controllers/kafkaMonitoringController.js:
--------------------------------------------------------------------------------
1 | const Docker = require('dockerode');
2 | const fs = require('fs');
3 | const path = require('path');
4 | const { promisify } = require('util');
5 | const docker = new Docker();
6 | const pullImage = promisify(docker.pull.bind(docker));
7 |
8 | const kafkaMonitoringController = {};
9 |
10 | class KMError {
11 | constructor(location, status, message) {
12 | this.log = 'An error occurred in kafkaMonitoringController.' + location;
13 | this.status = status;
14 | this.message = { err: message };
15 | }
16 | }
17 |
18 | function followPullProgress(stream) {
19 | return new Promise((resolve, reject) => {
20 | docker.modem.followProgress(stream, (err, res) =>
21 | err ? reject(err) : resolve(res),
22 | );
23 | });
24 | }
25 |
26 | kafkaMonitoringController.pullDockerImages = async (req, res, next) => {
27 | try {
28 | const promStream = await pullImage('prom/prometheus:latest');
29 | await followPullProgress(promStream);
30 | const grafanaStream = await pullImage('grafana/grafana:latest');
31 | await followPullProgress(grafanaStream);
32 | next();
33 | } catch (err) {
34 | next(new KMError('pullDockerImages', 424, err));
35 | }
36 | };
37 |
38 | kafkaMonitoringController.createNetwork = async (req, res, next) => {
39 | try {
40 | const networkName = `monitoring_network_${Date.now()}`;
41 | await docker.createNetwork({ Name: networkName, Driver: 'bridge' });
42 | req.networkName = networkName;
43 | next();
44 | } catch (err) {
45 | next(new KMError('createNetwork', 424, err));
46 | }
47 | };
48 |
49 | kafkaMonitoringController.generatePrometheusConfig = (req, res, next) => {
50 | try {
51 | const jmxTargets = req.body.address.map(
52 | port => `host.docker.internal:${port}`,
53 | );
54 | const prometheusConfigTemplate = `
55 | global:
56 | scrape_interval: 15s
57 |
58 | scrape_configs:
59 | - job_name: 'kafka'
60 | static_configs:
61 | - targets: ${JSON.stringify(jmxTargets)}
62 | `;
63 | fs.writeFileSync(
64 | path.join(__dirname, 'prometheus.yml'),
65 | prometheusConfigTemplate.trim(),
66 | );
67 | next();
68 | } catch (err) {
69 | next(new KMError('generatePrometheusConfig', 424, err));
70 | }
71 | };
72 |
73 | kafkaMonitoringController.stopAndRemoveContainer = async (
74 | containerName,
75 | req,
76 | res,
77 | next,
78 | ) => {
79 | try {
80 | const container = docker.getContainer(containerName);
81 | const data = await container.inspect();
82 | if (data.State.Running) {
83 | await container.stop();
84 | }
85 | await container.remove();
86 | next();
87 | } catch (err) {
88 | if (err.statusCode !== 404) {
89 | next(new KMError('getContainer', 424, err));
90 | } else {
91 | next();
92 | }
93 | }
94 | };
95 |
96 | kafkaMonitoringController.createPrometheusContainer = async (
97 | req,
98 | res,
99 | next,
100 | ) => {
101 | try {
102 | const promConfigPath = path.join(__dirname, 'prometheus.yml');
103 | const container = await docker.createContainer({
104 | name: 'prometheus',
105 | Image: 'prom/prometheus:latest',
106 | Volumes: { '/etc/prometheus/prometheus.yml': {} },
107 | HostConfig: {
108 | Binds: [`${promConfigPath}:/etc/prometheus/prometheus.yml`],
109 | PortBindings: { '9090/tcp': [{ HostPort: '9090' }] },
110 | },
111 | ExposedPorts: { '9090/tcp': {} },
112 | NetworkingConfig: { EndpointsConfig: { [req.networkName]: {} } },
113 | });
114 | await container.start();
115 | next();
116 | } catch (err) {
117 | next(new KMError('createPrometheusContainer', 424, err));
118 | }
119 | };
120 |
121 | kafkaMonitoringController.createGrafanaContainer = async (req, res, next) => {
122 | try {
123 | const grafanaEnv = [
124 | 'GF_SECURITY_ADMIN_USER=admin',
125 | 'GF_SECURITY_ADMIN_PASSWORD=admin',
126 | 'GF_USERS_ALLOW_SIGN_UP=false',
127 | 'GF_SECURITY_ALLOW_EMBEDDING=true',
128 | 'GF_AUTH_ANONYMOUS_ENABLED=true',
129 | 'GF_INSTALL_PLUGINS=grafana-clock-panel',
130 | ];
131 | const grafanaBinds = [
132 | `${path.join(
133 | __dirname,
134 | '../../grafana/Dockerfile/provisioning/dashboards',
135 | )}:/etc/grafana/provisioning/dashboards`,
136 | `${path.join(
137 | __dirname,
138 | '../../grafana/Dockerfile/provisioning/datasources/datasource.yml',
139 | )}:/etc/grafana/provisioning/datasources/datasource.yml`,
140 | `${path.join(
141 | __dirname,
142 | '../../grafana/grafana.ini',
143 | )}:/etc/grafana/grafana.ini`,
144 | 'grafana-storage:/var/lib/grafana',
145 | ];
146 | const container = await docker.createContainer({
147 | name: 'grafana',
148 | Image: 'grafana/grafana:latest',
149 | Volumes: { '/var/lib/grafana': {} },
150 | ExposedPorts: { '3000/tcp': {} },
151 | HostConfig: {
152 | Binds: grafanaBinds,
153 | PortBindings: { '3000/tcp': [{ HostPort: '3000' }] },
154 | },
155 | Env: grafanaEnv,
156 | NetworkingConfig: { EndpointsConfig: { [req.networkName]: {} } },
157 | });
158 | await container.start();
159 | next();
160 | } catch (err) {
161 | next(new KMError('createGrafanaError', 424, err));
162 | }
163 | };
164 |
165 | module.exports = kafkaMonitoringController;
166 |
--------------------------------------------------------------------------------
/main/controllers/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 15s
3 |
4 | scrape_configs:
5 | - job_name: 'kafka'
6 | static_configs:
7 | - targets: ["host.docker.internal:9991","host.docker.internal:9992","host.docker.internal:9993"]
--------------------------------------------------------------------------------
/main/electron.js:
--------------------------------------------------------------------------------
1 | // this is the entry point for the app
2 |
3 | const { app, BrowserWindow, ipcMain } = require('electron');
4 | const path = require('path');
5 |
6 | // require in express server so that it gets booted when electron app is ready
7 | const expressServer = require('./expressServer');
8 |
9 | // electron reloader documentation recommends using a try/catch block to avoid
10 | // crashing the app is node environment is in production
11 | try {
12 | const electronReloader = require('electron-reloader');
13 | electronReloader(module, {
14 | ignore: [path.join(__dirname), path.join(__dirname, '..', 'src')],
15 | });
16 | } catch {
17 | console.log('electron reloader failed');
18 | }
19 |
20 | const createWindow = () => {
21 | // create a browser window
22 | const win = new BrowserWindow({
23 | height: 600, // look into auto full-screen?
24 | width: 800,
25 | icon: path.join(__dirname, '../assets/App-Icons/Icon2/appIcon.png'),
26 | webPreferences: {
27 | preload: path.join(__dirname, 'preload.js'),
28 | },
29 | });
30 |
31 | // load the index.html into it
32 | win.loadFile(path.join(__dirname, '..', 'dist', 'index.html'));
33 | };
34 |
35 | // when electron is finished initializing and the 'ready' event is
36 | // emitted, boot up express server and run createWindow
37 | app.on('ready', () => {
38 | expressServer.listen(3010, () => {
39 | console.log('Server listening on port 3010');
40 | });
41 | createWindow();
42 | });
43 |
--------------------------------------------------------------------------------
/main/expressServer.js:
--------------------------------------------------------------------------------
1 | // this is the entry point for our express server
2 |
3 | const express = require('express');
4 | const app = express();
5 | const path = require('path');
6 |
7 | const addressRouter = require('./routers/addressRouter');
8 | const kafkaMonitoringRouter = require('./routers/kafkaMonitoringRouter');
9 | const alertsRouter = require('./routers/alertsRouter')
10 |
11 | const PORT = 3010;
12 |
13 | // parse incoming data
14 | app.use(express.json());
15 | app.use(express.urlencoded({ extended: false }));
16 |
17 | // serve static files
18 | // may be unnecessary for electron but is useful in development if you want to work on
19 | // localhost:3010 in the browser
20 | // app.use(express.static('dist'));
21 |
22 | // handle form data to address route with address router
23 | app.use('/address', addressRouter);
24 |
25 | app.use('/alerts', alertsRouter);
26 |
27 | app.use('/setup-kafka-monitoring', kafkaMonitoringRouter);
28 |
29 | // handle unknown routes
30 | app.use((req, res) => {
31 | res.status(404).send();
32 | });
33 |
34 | // global error handler
35 | app.use((err, req, res, next) => {
36 | const defaultErr = {
37 | log: 'An error occurred',
38 | status: 500,
39 | message: { err: 'Watch out for those errors' },
40 | };
41 |
42 | const trueError = {
43 | ...defaultErr,
44 | ...err,
45 | };
46 | console.log(trueError.message);
47 | console.log(trueError.log);
48 | res.status(trueError.status).send(trueError.message);
49 | });
50 |
51 | // server is started in electron.js--only use this code if testing backend without frontend
52 | // app.listen(PORT, () => {
53 | // console.log('Listening on port: ' + PORT);
54 | // });
55 |
56 | module.exports = app;
57 |
--------------------------------------------------------------------------------
/main/routers/addressRouter.js:
--------------------------------------------------------------------------------
1 | // get express and Router
2 |
3 | const express = require('express');
4 | const addressController = require('../controllers/addressController');
5 | const grafanaController = require('../controllers/grafanaController');
6 |
7 | const router = express.Router();
8 |
9 | router.post('/',
10 | addressController.writeJmxConfig1,
11 | addressController.writeJmxConfig2,
12 | addressController.connectToKafka,
13 | addressController.startPrometheus,
14 | grafanaController.getPrometheus,
15 | grafanaController.createPromSource,
16 | grafanaController.getPrometheus,
17 | grafanaController.generateDashJson,
18 | grafanaController.createDashboard,
19 | (req, res) => {
20 | res.redirect('back');
21 | }
22 | );
23 |
24 | module.exports = router;
--------------------------------------------------------------------------------
/main/routers/alertsRouter.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const alertsController = require('../controllers/alertsController');
3 |
4 | const router = express.Router();
5 |
6 | router.post('/', alertsController.writeAlertsInfo, (req, res) => {
7 | res.status(200);
8 | })
9 |
10 |
11 |
12 | module.exports = router;
--------------------------------------------------------------------------------
/main/routers/kafkaMonitoringRouter.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const kafkaMonitoringController = require('../controllers/kafkaMonitoringController');
3 | const router = express.Router();
4 |
5 | router.post(
6 | '/',
7 | kafkaMonitoringController.pullDockerImages,
8 | kafkaMonitoringController.createNetwork,
9 | kafkaMonitoringController.generatePrometheusConfig,
10 | (req, res, next) =>
11 | kafkaMonitoringController.stopAndRemoveContainer(
12 | 'prometheus',
13 | req,
14 | res,
15 | next,
16 | ),
17 | (req, res, next) =>
18 | kafkaMonitoringController.stopAndRemoveContainer('grafana', req, res, next),
19 | kafkaMonitoringController.createPrometheusContainer,
20 | kafkaMonitoringController.createGrafanaContainer,
21 | (req, res) => {
22 | res
23 | .status(200)
24 | .json({ message: 'Monitoring setup initiated successfully.' });
25 | },
26 | );
27 |
28 | module.exports = router;
29 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kafopticon",
3 | "version": "1.0.0",
4 | "description": "kafka surveillance",
5 | "scripts": {
6 | "test": "jest --verbose",
7 | "build": "webpack --config ./webpack.config.js --watch",
8 | "start": "concurrently \"npm run build\" \"electron main/electron.js\" --kill-others",
9 | "server": "nodemon main/expressServer.js",
10 | "bootZoo": "zookeeper-server-start.sh ./local-test/kafka-config/zookeeper.properties",
11 | "bootKaf1": "JMX_PORT=2020 kafka-server-start.sh ./local-test/kafka-config/server1.properties",
12 | "bootKaf2": "kafka-server-start.sh ./local-test/kafka-config/server2.properties",
13 | "bootKaf3": "kafka-server-start.sh ./local-test/kafka-config/server3.properties",
14 | "consume": "kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic",
15 | "produce": "node ./local-test/producers-and-consumers/producer.js",
16 | "exportJmx": "java -jar local-test/jmx_prometheus_httpserver-0.19.0.jar 3030 ./local-test/scraping-config/jmxconfig.yml",
17 | "prometheus": "prometheus --config.file=./local-test/scraping-config/prometheus.yml"
18 | },
19 | "repository": {
20 | "type": "git",
21 | "url": "git+https://github.com/oslabs-beta/KafOpticon.git"
22 | },
23 | "author": "Andrew Cirt, Ernesto Osorio, Joseph Paul, Zack Weiss",
24 | "license": "ISC",
25 | "bugs": {
26 | "url": "https://github.com/oslabs-beta/KafOpticon/issues"
27 | },
28 | "homepage": "https://github.com/oslabs-beta/KafOpticon#readme",
29 | "devDependencies": {
30 | "@babel/core": "^7.23.2",
31 | "@babel/preset-env": "^7.23.2",
32 | "@babel/preset-react": "^7.22.15",
33 | "@types/jest": "^29.5.8",
34 | "babel-loader": "^9.1.3",
35 | "concurrently": "^8.2.2",
36 | "css-loader": "^6.8.1",
37 | "electron": "^27.0.3",
38 | "electron-reloader": "^1.2.3",
39 | "file-loader": "^6.2.0",
40 | "html-webpack-plugin": "^5.5.3",
41 | "ini": "^4.1.1",
42 | "jest": "^29.7.0",
43 | "nodemon": "^3.0.1",
44 | "react-router": "^6.18.0",
45 | "react-router-dom": "^6.18.0",
46 | "sass-loader": "^13.3.2",
47 | "style-loader": "^3.3.3",
48 | "url-loader": "^4.1.1",
49 | "webpack": "^5.89.0",
50 | "webpack-cli": "^5.1.4"
51 | },
52 | "dependencies": {
53 | "axios": "^1.6.2",
54 | "dockerode": "^4.0.0",
55 | "express": "^4.18.2",
56 | "kafkajs": "^2.2.4",
57 | "react": "^18.2.0",
58 | "react-dom": "^18.2.0"
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/app.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import Header from '../src/components/Header.jsx'
3 | import MainContainer from './components/MainContainer.jsx';
4 | import { HashRouter as Router, Route, Routes } from 'react-router-dom';
5 | import '../src/styles/styles.css'
6 |
7 | //create app element
8 | function App () {
9 | return (
10 | // wraps app elements with react-router
11 |
12 |
16 |
17 |
18 | );
19 | }
20 |
21 | export default App;
--------------------------------------------------------------------------------
/src/components/AlertsContainer.jsx:
--------------------------------------------------------------------------------
1 | import React from "react"
2 |
3 | function alertsContainer(){
4 | const handleClick = () => {
5 | alert('Button Clicked!')
6 | };
7 | //accept email and password credentials to forward alerts from grafana to selected emails
8 | return (
9 |
18 | )
19 | }
20 |
21 | export default alertsContainer;
--------------------------------------------------------------------------------
/src/components/Header.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 |
3 | function Header() {
4 | const [clusterURL, setClusterURL] = useState('');
5 | const [sendToMonitoring, setSendToMonitoring] = useState(false);
6 | const [sendToAddress, setSendToAddress] = useState(false);
7 | const [isLoading, setIsLoading] = useState(false);
8 |
9 | const handleCheckboxChange = e => {
10 | if (e.target.name === 'sendToMonitoring') {
11 | setSendToMonitoring(e.target.checked);
12 | setSendToAddress(!e.target.checked);
13 | } else if (e.target.name === 'sendToAddress') {
14 | setSendToAddress(e.target.checked);
15 | setSendToMonitoring(!e.target.checked);
16 | }
17 | };
18 | const handleSubmit = async e => {
19 | e.preventDefault();
20 | setIsLoading(true);
21 |
22 | const portsArray = clusterURL.split(',').map(port => port.trim());
23 |
24 | const requestOptions = {
25 | method: 'POST',
26 | headers: { 'Content-Type': 'application/json' },
27 | body: JSON.stringify({ address: portsArray }),
28 | };
29 |
30 | try {
31 | let response;
32 | if (sendToMonitoring) {
33 | response = await fetch(
34 | 'http://localhost:3010/setup-kafka-monitoring',
35 | requestOptions,
36 | );
37 | } else if (sendToAddress) {
38 | response = await fetch('http://localhost:3010/address', requestOptions);
39 | }
40 |
41 | if (response && response.ok) {
42 | setIsLoading(false);
43 | } else {
44 | throw new Error('Request failed');
45 | }
46 | } catch (error) {
47 | console.error('Error sending request:', error);
48 | alert('Error sending request');
49 | setIsLoading(false);
50 | }
51 | };
52 |
53 | return (
54 |
99 | );
100 | }
101 |
102 | export default Header;
103 |
--------------------------------------------------------------------------------
/src/components/MainContainer.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Route, Routes } from 'react-router-dom';
3 | import Menu from './Menu.jsx';
4 | import PerformanceContainer from './PerformanceContainer.jsx';
5 | import AlertsContainer from './AlertsContainer.jsx';
6 | import MetricsContainer from './MetricsContainer.jsx';
7 | import UserDashboard from './UserDashboard.jsx'
8 |
9 | //creates maincontainer
10 | function MainContainer() {
11 | return (
12 |
13 |
14 | {/* react-router routes/paths for all the different dashboars and alerts*/}
15 |
16 | } />
17 | } />
18 | } />
19 | } />
20 |
21 |
22 | );
23 | }
24 |
25 | export default MainContainer;
26 |
--------------------------------------------------------------------------------
/src/components/Menu.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Link } from 'react-router-dom';
3 |
4 | //creates the main menu
5 | function Menu() {
6 | return (
7 |
18 | )
19 | };
20 |
21 | export default Menu;
--------------------------------------------------------------------------------
/src/components/MetricsContainer.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | //creates the main metrics container/health container
4 | function MetricsContainer() {
5 | return (
6 |
7 | {/* renders a section element with an iframe that contains a grafana dashboard with a specific metric */}
8 |
16 |
23 |
30 |
37 |
44 |
51 |
52 | );
53 | }
54 |
55 | export default MetricsContainer;
56 |
--------------------------------------------------------------------------------
/src/components/PerformanceContainer.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | //creates performace metrics container
4 | function PerformanceContainer() {
5 | return (
6 |
7 | {/* renders a section element with an iframe that contains a grafana dashboard with a specific metric */}
8 |
9 | {/* panel url from granafa */}
10 |
11 |
12 |
15 |
18 |
21 |
24 |
27 |
28 | )
29 | };
30 |
31 | export default PerformanceContainer;
--------------------------------------------------------------------------------
/src/components/UserDashboard.jsx:
--------------------------------------------------------------------------------
1 | import React, { useEffect, useState } from 'react';
2 |
3 | //creates user customizable dashboard
4 | function UserContainer() {
5 | //state for iframes and dropdown menu
6 | const [iframe, setIframe] = useState([])
7 | const [menuOpen, setMenuOpen] = useState(false);
8 |
9 | //changes iframe's state with a new iframe that cotains a unic id and a metric key
10 | const handleAdd = () => {
11 | const newIframe = [...iframe, { id: Date.now(), metric: '' }]
12 | setIframe(newIframe)
13 | }
14 | //deletes an iframe based on iframe unique id
15 | const deleteIframe = (id) => {
16 | const delIframe = iframe.filter(frame => frame.id !== id);
17 | setIframe(delIframe);
18 | }
19 | //sets state for dropdown menu show/hide
20 | const menu = () => {
21 | setMenuOpen(!menuOpen);
22 | }
23 |
24 | //contains ten panel's urls for pre-selected metrics
25 | const metricsCont = {
26 | 1: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=17",
27 | 2: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=18",
28 | 3: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=16",
29 | 4: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=19",
30 | 5: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=20",
31 | 6: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=21",
32 | 7: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=22",
33 | 8: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=23",
34 | 9: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=24",
35 | 10: "http://localhost:3000/d-solo/d9098b29-ef80-4e40-86bc-b28bd6e85756/test-dash?refresh=10s&orgId=1&from=now-1h&to=now&panelId=25"
36 | }
37 |
38 | //sets the metric key in iframe to the matching url for each metric in the dropdown menu
39 | const metrichandle = (iframeIndex, metric) => {
40 | const newIframe = [...iframe];
41 | newIframe[iframeIndex].metric = metricsCont[metric];
42 | setIframe(newIframe)
43 | }
44 |
45 | //reders user dashboard elements
46 | return (
47 |
48 |
49 |
53 |
76 | {/* renders iframe elements */}
77 | {iframe.map(({ id, metric },i) => {
78 | return (
79 |
80 |
81 | {/* iframe that contains the url to the panel that corresponds to selected metric */}
82 |
83 |
84 |
88 |
89 | )
90 | })}
91 |
92 |
93 |
94 | )
95 | };
96 |
97 | export default UserContainer;
--------------------------------------------------------------------------------
/src/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | React App
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/src/react.jsx:
--------------------------------------------------------------------------------
1 | // this file is react's entry point into the application
2 |
3 | import React from 'react';
4 | import { createRoot } from 'react-dom/client';
5 | import App from './app.jsx';
6 |
7 |
8 |
9 | const main = document.querySelector('#root');
10 |
11 | const root = createRoot(main);
12 | root.render()
13 |
--------------------------------------------------------------------------------
/src/styles/styles.css:
--------------------------------------------------------------------------------
1 | body {
2 | background-color: rgb(5, 5, 46);
3 | margin: 0 auto;
4 | width: 100vw;
5 | height: 100vh;
6 | background-repeat: no-repeat;
7 | background-position: center;
8 | background-attachment: fixed;
9 | background-size: cover;
10 | }
11 |
12 | #root {
13 | position: relative;
14 | height: 100%;
15 | }
16 |
17 | #headerLog {
18 | display: flex;
19 | /* position: relative; */
20 | justify-content: space-between;
21 |
22 | align-items: center;
23 | top: 10px;
24 | background-color: rgb(60, 114, 186, 0.3);
25 | height: 100px;
26 | width: 96.5%;
27 | margin-left: 20px;
28 | border-style: none;
29 | box-shadow: 10px 5px 5px rgb(10, 135, 213);
30 | border-color: black;
31 | border-radius: 10px;
32 | }
33 |
34 | #mainContainer {
35 | position: relative;
36 | display: flex;
37 | flex-direction: row;
38 | justify-content: space-between;
39 | top: 20px;
40 | }
41 |
42 | #metricsContainer {
43 | position: relative;
44 | display: flex;
45 | flex-direction: row;
46 | flex-wrap: wrap;
47 | justify-content: space-between;
48 | align-content: flex-start;
49 | background-color: rgb(60, 114, 186, 0.3);
50 | margin-right: 30px;
51 | border-style: none;
52 | border-color: black;
53 | box-shadow: 10px 5px 5px rgb(1, 116, 190);
54 | border-radius: 10px;
55 | }
56 |
57 | #alertsContainer {
58 | position: relative;
59 | display: flex;
60 | flex-direction: row;
61 | flex-wrap: wrap;
62 | /* flex-grow: 1; */
63 | justify-content: flex-start;
64 | align-content: flex-start;
65 | background-color: rgb(60, 114, 186, 0.3);
66 | margin-right: 30px;
67 | border-style: none;
68 | border-color: black;
69 | box-shadow: 10px 5px 5px rgb(1, 116, 190);
70 | /* height: 100%; */
71 | width: 100%;
72 | border-radius: 10px;
73 | }
74 |
75 | #menu {
76 | position: relative;
77 | display: flex;
78 | flex-direction: column;
79 | justify-content: space-between;
80 | margin-left: 20px;
81 | margin-right: 20px;
82 | background-color: rgb(60, 114, 186, 0.3);
83 | border-style: none;
84 | border-color: black;
85 | height: 650px;
86 | width: 300px;
87 | border-radius: 10px;
88 | box-shadow: 10px 5px 5px rgb(1, 116, 190);
89 | }
90 |
91 | .metricBox {
92 | position: relative;
93 | top: 0;
94 | height: 30%;
95 | width: 45%;
96 | margin: 10px;
97 | text-align: center;
98 | justify-content: space-between;
99 | background-color: none;
100 | border-style: solid;
101 | border-color: rgb(80, 185, 250);
102 | border-width: 4px;
103 | min-width: 500px;
104 | min-height: 300px;
105 | max-height: 300px;
106 | }
107 | #kafopticonbg {
108 | height: 100px;
109 | }
110 |
111 | .formContainer {
112 | position: relative;
113 | display: flex;
114 | flex-direction: row;
115 | align-items: center;
116 | /* justify-content: flex-end; */
117 | margin-top: 15px;
118 | margin-right: 30px;
119 | height: 70px;
120 | width: 600px;
121 | }
122 |
123 | #submitbutton {
124 | height: 40px;
125 | width: 150px;
126 | background-color: rgb(60, 175, 247);
127 | padding: 10px;
128 | border-radius: 7px;
129 | font-size: 14px;
130 | font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande',
131 | 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
132 | color: white;
133 | font-weight: bold;
134 | border-style: solid;
135 | border-color: white;
136 | margin-left: 5px;
137 | }
138 |
139 | #mainform {
140 | display: flex;
141 | align-items: center;
142 | position: relative;
143 | height: 70px;
144 | width: 550px;
145 | margin-top: 10px;
146 | }
147 |
148 | #input {
149 | padding: 10px;
150 | border-radius: 7px;
151 | border-style: solid;
152 | width: 360px;
153 | border-color: white;
154 | margin-right: 5px;
155 | }
156 |
157 | #chart {
158 | position: relative;
159 | margin-top: 0px;
160 | }
161 |
162 | .menuButton {
163 | height: 40px;
164 | width: 200px;
165 | background-color: rgb(60, 175, 247);
166 | padding: 10px;
167 | border-radius: 7px;
168 | font-size: 14px;
169 | font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande',
170 | 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
171 | color: white;
172 | font-weight: bold;
173 | border-style: solid;
174 | border-color: white;
175 | margin-left: 50px;
176 | }
177 |
178 | .clock {
179 | position: relative;
180 | margin-left: 25px;
181 | }
182 |
183 | .checkbox-label {
184 | color: yellow;
185 | }
186 |
187 | .checkboxes {
188 | display: flex;
189 | flex-direction: column;
190 | margin-right: 10px;
191 | }
192 |
193 | #alerts {
194 | background-color: orange;
195 | }
196 |
197 | #addPanel {
198 | height: 40px;
199 | width: 200px;
200 | background-color: rgb(60, 175, 247);
201 | padding: 10px;
202 | border-radius: 7px;
203 | font-size: 14px;
204 | font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande',
205 | 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
206 | color: white;
207 | font-weight: bold;
208 | border-style: solid;
209 | border-color: white;
210 | margin-left: 10px;
211 | }
212 |
213 | #dropMenu {
214 | height: 40px;
215 | width: 200px;
216 | background-color: rgb(60, 175, 247);
217 | padding: 10px;
218 | border-radius: 7px;
219 | font-size: 14px;
220 | font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande',
221 | 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
222 | color: white;
223 | font-weight: bold;
224 | border-style: solid;
225 | border-color: white;
226 | margin-left: 10px;
227 | }
228 |
229 | #deletePanel {
230 | height: 40px;
231 | width: 200px;
232 | background-color: rgb(60, 175, 247);
233 | padding: 10px;
234 | border-radius: 7px;
235 | font-size: 14px;
236 | font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande',
237 | 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
238 | color: white;
239 | font-weight: bold;
240 | border-style: solid;
241 | border-color: white;
242 | margin-left: 50px;
243 | }
244 |
245 | #dropDownMenu {
246 | width: 75%;
247 | min-width: 800px;
248 | }
249 |
250 | #menuSection {
251 | position: relative;
252 | background: orange;
253 | border-style: solid;
254 | border-color: white;
255 | width: 300px;
256 | border-radius: 5px;
257 | margin-left: 50px;
258 | max-height: 70px;
259 | overflow-y: auto;
260 | }
261 |
262 | .menu {
263 | position: relative;
264 | list-style: none;
265 | color: white;
266 | font-size: 14px;
267 | font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande',
268 | 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
269 | font-weight: bold;
270 | margin-right: 20px;
271 | }
272 |
273 | .menu li:hover {
274 | color: rgb(60, 114, 186);
275 | cursor: pointer;
276 | }
277 |
278 | button:hover {
279 | cursor: pointer;
280 | }
281 | .loading-alert {
282 | position: fixed;
283 | top: 0;
284 | left: 0;
285 | width: 100%;
286 | height: 100%;
287 | z-index: 1000; /* High z-index to be on top */
288 | background: rgba(255, 255, 255, 0.8); /* Semi-transparent background */
289 | display: flex;
290 | justify-content: center;
291 | align-items: center;
292 | }
293 |
294 | #addedPanel {
295 | position: relative;
296 | min-width: 500px;
297 | min-height: 350px;
298 | max-height: 350px;
299 | width: 48%;
300 | height: 33%;
301 | }
302 |
303 | .userMetricBox {
304 | position: relative;
305 | top: 0;
306 | height: 81%;
307 | width: 95%;
308 | margin: 10px;
309 | text-align: center;
310 | justify-content: space-between;
311 | background-color: none;
312 | border-style: solid;
313 | border-color: rgb(80, 185, 250);
314 | border-width: 4px;
315 | }
316 |
317 | #panelDeleteButton {
318 | position: relative;
319 | }
320 |
321 | #userDashboardContainer {
322 | position: relative;
323 | display: flex;
324 | flex-direction: row;
325 | flex-wrap: wrap;
326 | justify-content: space-between;
327 | align-content: flex-start;
328 | background-color: rgb(60, 114, 186, 0.3);
329 | margin-right: 30px;
330 | border-style: none;
331 | border-color: black;
332 | box-shadow: 10px 5px 5px rgb(1, 116, 190);
333 | border-radius: 10px;
334 | overflow-y: auto;
335 | width: 80%;
336 | }
337 |
338 | #addPanelButton {
339 | width: 10%;
340 | }
341 |
--------------------------------------------------------------------------------
/webpack.config.js:
--------------------------------------------------------------------------------
1 | const path = require('path');
2 | const HtmlWebpackPlugin = require('html-webpack-plugin');
3 |
4 | module.exports = [
5 | {
6 | mode: 'development',
7 | entry: path.join(__dirname, 'src', 'react.jsx'),
8 | target: 'electron-renderer',
9 | output: {
10 | path: path.join(__dirname, 'dist'),
11 | filename: 'react.js',
12 | },
13 | module: {
14 | rules: [
15 | {
16 | test: /\.jsx?$/,
17 | exclude: /node_modules/,
18 | use: {
19 | loader: 'babel-loader',
20 | options: {
21 | presets: ['@babel/preset-env', '@babel/preset-react'],
22 | },
23 | },
24 | },
25 | {
26 | test: /\.s[ac]ss$/i,
27 | use: ['style-loader', 'css-loader', 'sass-loader'],
28 | },
29 | {
30 | test: /\.css$/,
31 | use: ['style-loader', 'css-loader'],
32 | },
33 | {
34 | test: /\.(png|jpg|jpeg|gif|svg)$/,
35 | use: [
36 | {
37 | loader: 'file-loader',
38 | options: {
39 | name: '[name].[ext]',
40 | outputPath: 'images',
41 | },
42 | },
43 | ],
44 | },
45 | ],
46 | },
47 |
48 | plugins: [
49 | new HtmlWebpackPlugin({
50 | template: path.join(__dirname, 'src', 'index.html'),
51 | }),
52 | ],
53 | },
54 | ];
55 |
--------------------------------------------------------------------------------