├── README.md
├── comparisons
└── prometheus-compression
│ ├── Makefile
│ ├── README.md
│ ├── docker-compose.yml
│ ├── influxdb
│ └── ddl.txt
│ ├── prometheus
│ └── prometheus.yml
│ └── telegraf
│ └── telegraf.conf
├── http
└── stress-testing
│ └── gatling
│ ├── Makefile
│ ├── README.md
│ ├── docker-compose.yml
│ ├── gatling.conf
│ └── telegraf.conf
├── influxdb
├── centralising-logs
│ ├── README.md
│ ├── docker-compose.yml
│ ├── nginx.conf
│ ├── screenshot.png
│ └── telegraf.conf
├── ddl-dml
│ ├── Makefile
│ ├── README.md
│ ├── ddl.txt
│ ├── dml.txt
│ ├── docker-compose.yml
│ └── together.txt
├── rollups
│ ├── Makefile
│ ├── README.md
│ ├── continuous-query.txt
│ ├── ddl.txt
│ ├── docker-compose.yml
│ ├── influxdb.conf
│ └── telegraf.conf
└── storing-logs
│ ├── README.md
│ ├── docker-compose.yml
│ ├── screenshot.png
│ └── telegraf.conf
├── influxdb2
├── consuming-query-api-from-js
│ ├── .gitignore
│ ├── Makefile
│ ├── README.md
│ ├── package.json
│ ├── public
│ │ ├── favicon.ico
│ │ ├── index.html
│ │ ├── logo192.png
│ │ ├── logo512.png
│ │ ├── manifest.json
│ │ └── robots.txt
│ ├── src
│ │ ├── App.css
│ │ ├── App.js
│ │ ├── App.test.js
│ │ ├── index.css
│ │ ├── index.js
│ │ ├── logo.svg
│ │ ├── serviceWorker.js
│ │ └── setupTests.js
│ └── yarn.lock
└── python-query-example
│ ├── Dockerfile
│ ├── Makefile
│ ├── Pipfile
│ ├── Pipfile.lock
│ ├── README.md
│ ├── docker-compose.yml
│ └── src
│ └── read.py
├── influxdbv2-helm-chart
├── .helmignore
├── Chart.yaml
├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── job-setup-admin.yaml
│ ├── persistent-volume-claim.yaml
│ ├── secret.yaml
│ ├── service.yaml
│ └── statefulset.yaml
└── values.yaml
├── jenkins_cloud.conf
├── jsonnet
└── community-template-linux
│ ├── .gitignore
│ ├── Makefile
│ └── src
│ └── main.jsonnet
├── kapacitor
└── removing-a-tag
│ ├── README.md
│ ├── define-tasks.sh
│ ├── docker-compose.yml
│ ├── historical-batch.tick
│ ├── live-stream.tick
│ └── telegraf.conf
├── kubernetes
├── .envrc
├── Makefile
├── control-plane
│ ├── telegraf-config.yaml
│ └── telegraf.yaml
├── gitops-alerting
│ ├── README.md
│ ├── influxdb
│ │ └── resources.yaml
│ └── kubernetes
│ │ └── resources.yaml
├── influxdb
│ ├── influxdb-setup.yaml
│ └── influxdb.yaml
├── nodes
│ ├── Dockerfile
│ ├── telegraf.yaml
│ └── telegraf
│ │ └── telegraf.conf
├── prometheus
│ ├── exporters
│ │ └── consul-exporter.yml
│ ├── telegraf-config.yml
│ └── telegraf.yaml
└── setup
│ ├── namespaces.yaml
│ └── rbac.yaml
├── python-jenkins.py
└── telegraf
├── archive.luftdaten.info
├── .gitignore
├── Makefile
├── docker-compose.yml
└── telegraf.conf
├── csv
├── README.md
├── data.csv
├── docker-compose.yml
└── telegraf.conf
├── influxdays-sf
├── 2.sh
├── Dockerfile
├── autocurl.sh
├── curl.sh
├── docker-compose.yml
├── echo.sh
├── prometheus.sh
├── python-app
│ ├── Dockerfile
│ ├── main.py
│ └── requirements.txt
├── t-consume.conf
└── t-edge.conf
├── influxdb-ha
├── Dockerfile
├── docker-compose.yml
└── telegraf.conf
├── iso8601
├── README.md
├── docker-compose.yml
├── input.json
└── telegraf.conf
├── json-over-http
├── README.md
├── docker-compose.yml
└── telegraf.conf
├── metric-buffer
├── README.md
├── docker-compose.yml
└── telegraf.conf
├── mqtt
├── Dockerfile
├── Makefile
├── README.md
├── docker-compose.yml
├── main.py
├── mosquitto.conf
├── requirements.txt
└── telegraf.conf
├── reusable-config
├── README.md
├── docker-compose.yml
├── shared
│ ├── input-system.conf
│ └── output-influxdb.conf
├── telegraf-1
│ └── telegraf.conf
└── telegraf-2
│ └── telegraf.conf
├── socket_listener
├── README.md
├── docker-compose.yml
├── send_metrics.sh
└── telegraf.conf
├── speedtest-cli
├── Dockerfile
├── docker-compose.yml
└── telegraf.conf
├── sqlserver-monitoring
├── docker-compose.yml
└── telegraf.conf
├── traefik
├── docker-compose.yml
├── telegraf.conf
└── test-from-host.sh
├── write-to-elasticsearch
├── README.md
├── docker-compose.yml
├── telegraf-as-influxdb.conf
└── telegraf.conf
└── x509
├── README.md
├── docker-compose.yml
├── screenshot.png
└── telegraf.conf
/README.md:
--------------------------------------------------------------------------------
1 | # InfluxDB Examples
2 |
3 | **Warning:** These examples are, typically, a minimum-viable-configuration. They should never be considered a production example of how to do something, except when explicitly stated.
4 |
--------------------------------------------------------------------------------
/comparisons/prometheus-compression/Makefile:
--------------------------------------------------------------------------------
1 | setup: containers
2 | @docker-compose exec influxdb influx -import -path=/tmp/influxdb/ddl.txt
3 | @docker-compose exec influxdb2 influx setup -f --host 'http://localhost:9999' -b storagecomparison -o storagecomparison -p storagecomparison -u storagecomparison -t storagecomparison
4 |
5 | containers:
6 | @docker-compose up -d
7 |
--------------------------------------------------------------------------------
/comparisons/prometheus-compression/README.md:
--------------------------------------------------------------------------------
1 | # Compression Comparison
2 |
3 | This directory has some code to test the compression of InfluxDB with Prometheus.
4 |
5 | ## Setup
6 |
7 | ### Configure InfluxDB
8 |
9 | InfluxDB has been configured to use a retention policy of 15d, and a shard duration of 2 hours
10 |
11 | ## Notes
12 |
13 | - Prometheus retention is 15d, but this can be configured
14 | - Prometheus shard duration is 2 hours and cannot be changed
15 | - InfluxDB's shard duration is changes depending on the retention policy duration
16 | - Default retention is forever, which has a shard duration of 1w
17 | - Retention policy durations less than 2 days means a shard duration of 1h
18 | - Retention policy durations longer than 2 days, but less than 6 months, means a shard duration of 1d
19 |
--------------------------------------------------------------------------------
/comparisons/prometheus-compression/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.4"
2 |
3 | services:
4 | chronograf:
5 | image: chronograf:1.7
6 | command: chronograf --influxdb-url=http://influxdb:8086 --kapacitor-url=http://kapacitor:9092
7 | ports:
8 | - 8888:8888
9 | depends_on:
10 | - influxdb
11 |
12 | influxdb:
13 | image: influxdb:1.7
14 | volumes:
15 | - ./influxdb:/tmp/influxdb/
16 |
17 | influxdb2:
18 | image: quay.io/influxdb/influxdb:2.0.0-alpha
19 | ports:
20 | - 9999:9999
21 |
22 | prometheus:
23 | image: prom/prometheus:v2.15.2
24 | ports:
25 | - 9090:9090
26 | volumes:
27 | - ./prometheus:/etc/prometheus/
28 |
29 | telegraf:
30 | image: telegraf:1.13
31 | volumes:
32 | - ./telegraf/:/etc/telegraf/
33 | depends_on:
34 | - influxdb
35 | - influxdb2
36 | - prometheus
37 |
--------------------------------------------------------------------------------
/comparisons/prometheus-compression/influxdb/ddl.txt:
--------------------------------------------------------------------------------
1 | # DDL
2 | CREATE DATABASE storagecomparison
3 | CREATE RETENTION POLICY fifteen ON storagecomparison DURATION 15d REPLICATION 1 SHARD DURATION 2h DEFAULT
4 |
--------------------------------------------------------------------------------
/comparisons/prometheus-compression/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 2s
3 |
4 | scrape_configs:
5 | - job_name: "prometheus"
6 | static_configs:
7 | - targets: ["telegraf:9090"]
8 |
--------------------------------------------------------------------------------
/comparisons/prometheus-compression/telegraf/telegraf.conf:
--------------------------------------------------------------------------------
1 | [agent]
2 | interval = "2s"
3 |
4 | [[inputs.cpu]]
5 | [[inputs.disk]]
6 | [[inputs.diskio]]
7 | [[inputs.internal]]
8 | [[inputs.kernel]]
9 | [[inputs.kernel_vmstat]]
10 | [[inputs.mem]]
11 | [[inputs.net]]
12 | [[inputs.net_response]]
13 | protocol = "tcp"
14 | address = "duckduckgo.com:80"
15 | [[inputs.ping]]
16 | urls = ["google.com"]
17 | [[inputs.processes]]
18 | [[inputs.system]]
19 | [[inputs.swap]]
20 | [[inputs.temp]]
21 |
22 | [[outputs.influxdb]]
23 | urls = ["http://influxdb:8086"]
24 | database = "storagecomparison"
25 |
26 | [[outputs.influxdb_v2]]
27 | urls = ["http://influxdb2:9999"]
28 | token = "storagecomparison"
29 | organization = "storagecomparison"
30 | bucket = "storagecomparison"
31 |
32 | [[outputs.prometheus_client]]
33 | listen = ":9090"
34 |
--------------------------------------------------------------------------------
/http/stress-testing/gatling/Makefile:
--------------------------------------------------------------------------------
1 | up:
2 | @docker-compose up -d chronograf influxdb telegraf
3 | @docker-compose run --rm --entrypoint=gatling.sh gatling
4 |
--------------------------------------------------------------------------------
/http/stress-testing/gatling/README.md:
--------------------------------------------------------------------------------
1 | # Gatling Metrics
2 |
3 | **Gatling currently isn't sending metrics to Telegraf**
4 |
5 | ## InfluxDB through Telegraf
6 |
7 | This demo using Telegraf as a Graphite endpoint, rather than InfluxDB directly, to emulate writing to InfluxCloud v1.
8 |
9 | ## Running
10 |
11 | ```shell
12 | make up
13 |
14 | # Follow interactive prompts
15 | # Then open browser to http://localhost:8888 and explore the telegraf database
16 | ```
17 |
--------------------------------------------------------------------------------
/http/stress-testing/gatling/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.4"
2 |
3 | services:
4 | chronograf:
5 | image: chronograf:1.7
6 | command: chronograf --influxdb-url=http://influxdb:8086
7 | ports:
8 | - 8888:8888
9 | depends_on:
10 | influxdb:
11 | condition: service_healthy
12 |
13 | influxdb:
14 | image: influxdb:1.7
15 | healthcheck:
16 | test: bash -c "
10 | Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
11 |
12 | The page will reload if you make edits.
13 | You will also see any lint errors in the console.
14 |
15 | ### `yarn test`
16 |
17 | Launches the test runner in the interactive watch mode.
18 | See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
19 |
20 | ### `yarn build`
21 |
22 | Builds the app for production to the `build` folder.
23 | It correctly bundles React in production mode and optimizes the build for the best performance.
24 |
25 | The build is minified and the filenames include the hashes.
26 | Your app is ready to be deployed!
27 |
28 | See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
29 |
30 | ### `yarn eject`
31 |
32 | **Note: this is a one-way operation. Once you `eject`, you can’t go back!**
33 |
34 | If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
35 |
36 | Instead, it will copy all the configuration files and the transitive dependencies (Webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own.
37 |
38 | You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it.
39 |
40 | ## Learn More
41 |
42 | You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
43 |
44 | To learn React, check out the [React documentation](https://reactjs.org/).
45 |
46 | ### Code Splitting
47 |
48 | This section has moved here: https://facebook.github.io/create-react-app/docs/code-splitting
49 |
50 | ### Analyzing the Bundle Size
51 |
52 | This section has moved here: https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size
53 |
54 | ### Making a Progressive Web App
55 |
56 | This section has moved here: https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app
57 |
58 | ### Advanced Configuration
59 |
60 | This section has moved here: https://facebook.github.io/create-react-app/docs/advanced-configuration
61 |
62 | ### Deployment
63 |
64 | This section has moved here: https://facebook.github.io/create-react-app/docs/deployment
65 |
66 | ### `yarn build` fails to minify
67 |
68 | This section has moved here: https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify
69 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "consuming-query-api-from-js",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@testing-library/jest-dom": "^4.2.4",
7 | "@testing-library/react": "^9.3.2",
8 | "@testing-library/user-event": "^7.1.2",
9 | "react": "^16.12.0",
10 | "react-dom": "^16.12.0",
11 | "react-scripts": "3.3.0"
12 | },
13 | "scripts": {
14 | "start": "react-scripts start",
15 | "build": "react-scripts build",
16 | "test": "react-scripts test",
17 | "eject": "react-scripts eject"
18 | },
19 | "eslintConfig": {
20 | "extends": "react-app"
21 | },
22 | "browserslist": {
23 | "production": [
24 | ">0.2%",
25 | "not dead",
26 | "not op_mini all"
27 | ],
28 | "development": [
29 | "last 1 chrome version",
30 | "last 1 firefox version",
31 | "last 1 safari version"
32 | ]
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rawkode/influxdb-examples/6b15758da935284749a156cb039da0961f3d1743/influxdb2/consuming-query-api-from-js/public/favicon.ico
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
12 |
13 |
17 |
18 |
27 | React App
28 |
29 |
30 |
31 |
32 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/public/logo192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rawkode/influxdb-examples/6b15758da935284749a156cb039da0961f3d1743/influxdb2/consuming-query-api-from-js/public/logo192.png
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/public/logo512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rawkode/influxdb-examples/6b15758da935284749a156cb039da0961f3d1743/influxdb2/consuming-query-api-from-js/public/logo512.png
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | },
10 | {
11 | "src": "logo192.png",
12 | "type": "image/png",
13 | "sizes": "192x192"
14 | },
15 | {
16 | "src": "logo512.png",
17 | "type": "image/png",
18 | "sizes": "512x512"
19 | }
20 | ],
21 | "start_url": ".",
22 | "display": "standalone",
23 | "theme_color": "#000000",
24 | "background_color": "#ffffff"
25 | }
26 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/public/robots.txt:
--------------------------------------------------------------------------------
1 | # https://www.robotstxt.org/robotstxt.html
2 | User-agent: *
3 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/App.css:
--------------------------------------------------------------------------------
1 | .App {
2 | text-align: center;
3 | }
4 |
5 | .App-logo {
6 | height: 40vmin;
7 | pointer-events: none;
8 | }
9 |
10 | @media (prefers-reduced-motion: no-preference) {
11 | .App-logo {
12 | animation: App-logo-spin infinite 20s linear;
13 | }
14 | }
15 |
16 | .App-header {
17 | background-color: #282c34;
18 | min-height: 100vh;
19 | display: flex;
20 | flex-direction: column;
21 | align-items: center;
22 | justify-content: center;
23 | font-size: calc(10px + 2vmin);
24 | color: white;
25 | }
26 |
27 | .App-link {
28 | color: #61dafb;
29 | }
30 |
31 | @keyframes App-logo-spin {
32 | from {
33 | transform: rotate(0deg);
34 | }
35 | to {
36 | transform: rotate(360deg);
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/App.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import logo from "./logo.svg";
3 | import "./App.css";
4 |
5 | function App() {
6 | fetch("http://influxdb.com:9999/api/v2/query?org=rawkode", {
7 | headers: {
8 | "Content-Type": "application/vnd.flux",
9 | Origin: "http://myapp.com",
10 | Authorization:
11 | "Token WwCEGtBcz6NqwDbkLsIRE84xOXYg_F3Dkmu7USPniMn8JyEAMlob33Nq_3D0wg0d5HfyDvZWd3586HxK9HOS3Q=="
12 | }
13 | })
14 | .then(response => {
15 | return response.json();
16 | })
17 | .then(myJson => {
18 | console.log(myJson);
19 | });
20 |
21 | return (
22 |
38 | );
39 | }
40 |
41 | export default App;
42 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/App.test.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from '@testing-library/react';
3 | import App from './App';
4 |
5 | test('renders learn react link', () => {
6 | const { getByText } = render();
7 | const linkElement = getByText(/learn react/i);
8 | expect(linkElement).toBeInTheDocument();
9 | });
10 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
5 | sans-serif;
6 | -webkit-font-smoothing: antialiased;
7 | -moz-osx-font-smoothing: grayscale;
8 | }
9 |
10 | code {
11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
12 | monospace;
13 | }
14 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import './index.css';
4 | import App from './App';
5 | import * as serviceWorker from './serviceWorker';
6 |
7 | ReactDOM.render(, document.getElementById('root'));
8 |
9 | // If you want your app to work offline and load faster, you can change
10 | // unregister() to register() below. Note this comes with some pitfalls.
11 | // Learn more about service workers: https://bit.ly/CRA-PWA
12 | serviceWorker.unregister();
13 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/logo.svg:
--------------------------------------------------------------------------------
1 |
8 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/serviceWorker.js:
--------------------------------------------------------------------------------
1 | // This optional code is used to register a service worker.
2 | // register() is not called by default.
3 |
4 | // This lets the app load faster on subsequent visits in production, and gives
5 | // it offline capabilities. However, it also means that developers (and users)
6 | // will only see deployed updates on subsequent visits to a page, after all the
7 | // existing tabs open on the page have been closed, since previously cached
8 | // resources are updated in the background.
9 |
10 | // To learn more about the benefits of this model and instructions on how to
11 | // opt-in, read https://bit.ly/CRA-PWA
12 |
13 | const isLocalhost = Boolean(
14 | window.location.hostname === 'localhost' ||
15 | // [::1] is the IPv6 localhost address.
16 | window.location.hostname === '[::1]' ||
17 | // 127.0.0.0/8 are considered localhost for IPv4.
18 | window.location.hostname.match(
19 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
20 | )
21 | );
22 |
23 | export function register(config) {
24 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
25 | // The URL constructor is available in all browsers that support SW.
26 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href);
27 | if (publicUrl.origin !== window.location.origin) {
28 | // Our service worker won't work if PUBLIC_URL is on a different origin
29 | // from what our page is served on. This might happen if a CDN is used to
30 | // serve assets; see https://github.com/facebook/create-react-app/issues/2374
31 | return;
32 | }
33 |
34 | window.addEventListener('load', () => {
35 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
36 |
37 | if (isLocalhost) {
38 | // This is running on localhost. Let's check if a service worker still exists or not.
39 | checkValidServiceWorker(swUrl, config);
40 |
41 | // Add some additional logging to localhost, pointing developers to the
42 | // service worker/PWA documentation.
43 | navigator.serviceWorker.ready.then(() => {
44 | console.log(
45 | 'This web app is being served cache-first by a service ' +
46 | 'worker. To learn more, visit https://bit.ly/CRA-PWA'
47 | );
48 | });
49 | } else {
50 | // Is not localhost. Just register service worker
51 | registerValidSW(swUrl, config);
52 | }
53 | });
54 | }
55 | }
56 |
57 | function registerValidSW(swUrl, config) {
58 | navigator.serviceWorker
59 | .register(swUrl)
60 | .then(registration => {
61 | registration.onupdatefound = () => {
62 | const installingWorker = registration.installing;
63 | if (installingWorker == null) {
64 | return;
65 | }
66 | installingWorker.onstatechange = () => {
67 | if (installingWorker.state === 'installed') {
68 | if (navigator.serviceWorker.controller) {
69 | // At this point, the updated precached content has been fetched,
70 | // but the previous service worker will still serve the older
71 | // content until all client tabs are closed.
72 | console.log(
73 | 'New content is available and will be used when all ' +
74 | 'tabs for this page are closed. See https://bit.ly/CRA-PWA.'
75 | );
76 |
77 | // Execute callback
78 | if (config && config.onUpdate) {
79 | config.onUpdate(registration);
80 | }
81 | } else {
82 | // At this point, everything has been precached.
83 | // It's the perfect time to display a
84 | // "Content is cached for offline use." message.
85 | console.log('Content is cached for offline use.');
86 |
87 | // Execute callback
88 | if (config && config.onSuccess) {
89 | config.onSuccess(registration);
90 | }
91 | }
92 | }
93 | };
94 | };
95 | })
96 | .catch(error => {
97 | console.error('Error during service worker registration:', error);
98 | });
99 | }
100 |
101 | function checkValidServiceWorker(swUrl, config) {
102 | // Check if the service worker can be found. If it can't reload the page.
103 | fetch(swUrl, {
104 | headers: { 'Service-Worker': 'script' }
105 | })
106 | .then(response => {
107 | // Ensure service worker exists, and that we really are getting a JS file.
108 | const contentType = response.headers.get('content-type');
109 | if (
110 | response.status === 404 ||
111 | (contentType != null && contentType.indexOf('javascript') === -1)
112 | ) {
113 | // No service worker found. Probably a different app. Reload the page.
114 | navigator.serviceWorker.ready.then(registration => {
115 | registration.unregister().then(() => {
116 | window.location.reload();
117 | });
118 | });
119 | } else {
120 | // Service worker found. Proceed as normal.
121 | registerValidSW(swUrl, config);
122 | }
123 | })
124 | .catch(() => {
125 | console.log(
126 | 'No internet connection found. App is running in offline mode.'
127 | );
128 | });
129 | }
130 |
131 | export function unregister() {
132 | if ('serviceWorker' in navigator) {
133 | navigator.serviceWorker.ready.then(registration => {
134 | registration.unregister();
135 | });
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/influxdb2/consuming-query-api-from-js/src/setupTests.js:
--------------------------------------------------------------------------------
1 | // jest-dom adds custom jest matchers for asserting on DOM nodes.
2 | // allows you to do things like:
3 | // expect(element).toHaveTextContent(/react/i)
4 | // learn more: https://github.com/testing-library/jest-dom
5 | import '@testing-library/jest-dom/extend-expect';
6 |
--------------------------------------------------------------------------------
/influxdb2/python-query-example/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine AS dshell
2 |
3 | RUN apk add --update curl make
4 | RUN pip install pipenv
5 |
--------------------------------------------------------------------------------
/influxdb2/python-query-example/Makefile:
--------------------------------------------------------------------------------
1 | dshell:
2 | @docker-compose run --rm --service-ports --entrypoint=ash python
3 |
4 | setup:
5 | @curl -XPOST \
6 | -H "Content-type: application/json" \
7 | -d '{"username": "rawkode", "token": "rawkode123", "password": "rawkode123", "org": "rawkode", "bucket": "rawkode", "retentionPeriodHrs": 0 }' \
8 | http://influxdb:9999/api/v2/setup
9 |
10 | load-data:
11 | @curl -XPOST \
12 | -H "Authorization: Token rawkode123" \
13 | -d 'measurement,name=rawkode random=457373' \
14 | "http://influxdb:9999/api/v2/write?org=rawkode&bucket=rawkode"
15 |
--------------------------------------------------------------------------------
/influxdb2/python-query-example/Pipfile:
--------------------------------------------------------------------------------
1 | [requires]
2 | python_version = '3.8'
3 |
4 | [packages]
5 | requests = { }
6 |
--------------------------------------------------------------------------------
/influxdb2/python-query-example/Pipfile.lock:
--------------------------------------------------------------------------------
1 | {
2 | "_meta": {
3 | "hash": {
4 | "sha256": "f02d08e037f11b86f47b699a687ba21fe1bf2f5244edcc3f08dee55e0c02b997"
5 | },
6 | "pipfile-spec": 6,
7 | "requires": {
8 | "python_version": "3.8"
9 | },
10 | "sources": [
11 | {
12 | "name": "pypi",
13 | "url": "https://pypi.org/simple",
14 | "verify_ssl": true
15 | }
16 | ]
17 | },
18 | "default": {
19 | "certifi": {
20 | "hashes": [
21 | "sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304",
22 | "sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519"
23 | ],
24 | "version": "==2020.4.5.1"
25 | },
26 | "chardet": {
27 | "hashes": [
28 | "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
29 | "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
30 | ],
31 | "version": "==3.0.4"
32 | },
33 | "idna": {
34 | "hashes": [
35 | "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb",
36 | "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa"
37 | ],
38 | "version": "==2.9"
39 | },
40 | "requests": {
41 | "hashes": [
42 | "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee",
43 | "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6"
44 | ],
45 | "index": "pypi",
46 | "version": "==2.23.0"
47 | },
48 | "urllib3": {
49 | "hashes": [
50 | "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527",
51 | "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115"
52 | ],
53 | "version": "==1.25.9"
54 | }
55 | },
56 | "develop": {}
57 | }
58 |
--------------------------------------------------------------------------------
/influxdb2/python-query-example/README.md:
--------------------------------------------------------------------------------
1 | # Python Client Library Example for InfluxDB 2
2 |
3 | ## Setup
4 |
5 | Currently this example can be setup with the following commands:
6 |
7 | ```console
8 | # This puts you inside a container
9 | make dshell
10 |
11 | # From inside the container, run:
12 | make setup
13 | make load-data
14 | ```
15 |
16 | ## Dependencies
17 |
18 | ```console
19 | pipenv install
20 | ```
21 |
22 | ## Read
23 |
24 | ```console
25 | pipenv run python src/read.py
26 | ```
27 |
28 | ## Write
29 |
30 | I will add Python write examples in time.
31 |
--------------------------------------------------------------------------------
/influxdb2/python-query-example/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.4"
2 |
3 | services:
4 | python:
5 | build:
6 | target: dshell
7 | context: .
8 | working_dir: /code
9 | volumes:
10 | - .:/code
11 | depends_on:
12 | influxdb:
13 | condition: service_started
14 |
15 | influxdb:
16 | image: quay.io/influxdb/influxdb:2.0.0-beta
17 | ports:
18 | - 9999:9999
--------------------------------------------------------------------------------
/influxdb2/python-query-example/src/read.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from pprint import pprint
3 |
4 | url="http://influxdb:9999/api/v2/query?org=rawkode"
5 |
6 | headers = {
7 | "Authorization": "Token rawkode123",
8 | "Accept" : "application/csv",
9 | "Content-Type" : "application/vnd.flux"
10 | }
11 |
12 | data = """
13 | from(bucket: "rawkode")
14 | |> range(start: -2h)
15 | """
16 |
17 | response = requests.post(url, headers=headers, data=data)
18 |
19 | pprint(response.status_code)
20 | pprint(response.text)
21 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | appVersion: 2.0.0-alpha
3 |
4 | name: influxdb
5 | description: A Helm chart for InfluxDB v2
6 | type: application
7 | version: 1.0.0
8 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | InfluxDB 2 is deployed as a StatefulSet on your cluster.
2 |
3 | You can access it by using the service name: {{ template "influxdb.fullname" . }}
4 |
5 | Admin password and token are available in the secret: {{ template "influxdb.fullname" . }}-auth
6 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "influxdb.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified app name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | If release name contains chart name it will be used as a full name.
13 | */}}
14 | {{- define "influxdb.fullname" -}}
15 | {{- if .Values.fullnameOverride -}}
16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
17 | {{- else -}}
18 | {{- $name := default .Chart.Name .Values.nameOverride -}}
19 | {{- if contains $name .Release.Name -}}
20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
21 | {{- else -}}
22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
23 | {{- end -}}
24 | {{- end -}}
25 | {{- end -}}
26 |
27 | {{/*
28 | Create chart name and version as used by the chart label.
29 | */}}
30 | {{- define "influxdb.chart" -}}
31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
32 | {{- end -}}
33 |
34 | {{/*
35 | Common labels
36 | */}}
37 | {{- define "influxdb.labels" -}}
38 | app.kubernetes.io/name: {{ include "influxdb.name" . }}
39 | helm.sh/chart: {{ include "influxdb.chart" . }}
40 | app.kubernetes.io/instance: {{ .Release.Name }}
41 | {{- if .Chart.AppVersion }}
42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
43 | {{- end }}
44 | app.kubernetes.io/managed-by: {{ .Release.Service }}
45 | {{- end -}}
46 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/templates/job-setup-admin.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | labels:
5 | app: "{{ template "influxdb.fullname" . }}"
6 | chart: "{{ template "influxdb.chart" . }}"
7 | release: "{{ .Release.Name }}"
8 | heritage: "{{ .Release.Service }}"
9 | name: {{ template "influxdb.fullname" . }}-create-admin-user
10 | annotations:
11 | "helm.sh/hook": post-install
12 | "helm.sh/hook-delete-policy": hook-succeeded
13 | "helm.sh/hook-delete-policy": "before-hook-creation"
14 | spec:
15 | activeDeadlineSeconds: 30
16 | template:
17 | metadata:
18 | labels:
19 | app: {{ template "influxdb.fullname" . }}
20 | release: "{{ .Release.Name }}"
21 | spec:
22 | containers:
23 | - name: {{ template "influxdb.fullname" . }}-create-admin-user
24 | image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
25 | env:
26 | - name: INFLUXDB_PASSWORD
27 | valueFrom:
28 | secretKeyRef:
29 | name: {{ template "influxdb.fullname" . }}-auth
30 | key: admin-password
31 | - name: INFLUXDB_TOKEN
32 | valueFrom:
33 | secretKeyRef:
34 | name: {{ template "influxdb.fullname" . }}-auth
35 | key: admin-token
36 | command:
37 | - bash
38 | args:
39 | - -c
40 | - |
41 | influx setup -f --host \
42 | http://{{ template "influxdb.fullname" . }}:{{ .Values.service.port }} \
43 | -b {{ .Values.adminUser.bucket }} -o {{ .Values.adminUser.organization }} \
44 | -p ${INFLUXDB_PASSWORD} -u {{ .Values.adminUser.user }} -t $INFLUXDB_TOKEN
45 | restartPolicy: OnFailure
46 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/templates/persistent-volume-claim.yaml:
--------------------------------------------------------------------------------
1 | {{- if and (.Values.persistence.enabled) (not .Values.persistence.useExisting) }}
2 | kind: PersistentVolumeClaim
3 | apiVersion: v1
4 | metadata:
5 | name: "{{- if not (empty .Values.persistence.name) }}{{ .Values.persistence.name }}{{- else }}{{ template "influxdb.fullname" . }}{{- end }}"
6 | labels:
7 | app: "{{- if not (empty .Values.persistence.name) }}{{ .Values.persistence.name }}{{- else }}{{ template "influxdb.fullname" . }}{{- end }}"
8 | chart: "{{ template "influxdb.chart" . }}"
9 | release: "{{ .Release.Name }}"
10 | heritage: "{{ .Release.Service }}"
11 | spec:
12 | accessModes:
13 | - {{ .Values.persistence.accessMode | quote }}
14 | resources:
15 | requests:
16 | storage: {{ .Values.persistence.size | quote }}
17 | {{- if .Values.persistence.storageClass }}
18 | {{- if (eq "-" .Values.persistence.storageClass) }}
19 | storageClassName: ""
20 | {{- else }}
21 | storageClassName: "{{ .Values.persistence.storageClass }}"
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/templates/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | labels:
5 | app: "{{ template "influxdb.fullname" . }}"
6 | chart: "{{ template "influxdb.chart" . }}"
7 | heritage: "{{ .Release.Service }}"
8 | release: "{{ .Release.Name }}"
9 | name: {{ template "influxdb.fullname" . }}-auth
10 | data:
11 | admin-password: {{ randAlphaNum 32 | b64enc | quote }}
12 | admin-token: {{ randAlphaNum 32 | b64enc | quote }}
13 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ template "influxdb.fullname" . }}
5 | labels:
6 | {{ include "influxdb.labels" . | indent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: 9999
12 | protocol: TCP
13 | name: http
14 | selector:
15 | app.kubernetes.io/name: {{ include "influxdb.name" . }}
16 | app.kubernetes.io/instance: {{ .Release.Name }}
17 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/templates/statefulset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta2
2 | kind: Deployment
3 | metadata:
4 | name: {{ template "influxdb.fullname" . }}
5 | labels:
6 | {{ include "influxdb.labels" . | indent 4 }}
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app.kubernetes.io/name: {{ include "influxdb.name" . }}
12 | app.kubernetes.io/instance: {{ .Release.Name }}
13 | template:
14 | metadata:
15 | labels:
16 | app.kubernetes.io/name: {{ include "influxdb.name" . }}
17 | app.kubernetes.io/instance: {{ .Release.Name }}
18 | spec:
19 | volumes:
20 | - name: data
21 | {{- if .Values.persistence.enabled }}
22 | {{- if not (empty .Values.persistence.name) }}
23 | persistentVolumeClaim:
24 | claimName: {{ .Values.persistence.name }}
25 | {{- else }}
26 | persistentVolumeClaim:
27 | claimName: {{ template "influxdb.fullname" . }}
28 | {{- end }}
29 | {{- else }}
30 | emptyDir: {}
31 | {{- end }}
32 | containers:
33 | - name: {{ .Chart.Name }}
34 | image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
35 | imagePullPolicy: {{ .Values.image.pullPolicy }}
36 | ports:
37 | - name: http
38 | containerPort: 9999
39 | protocol: TCP
40 | livenessProbe:
41 | httpGet:
42 | path: /health
43 | port: http
44 | readinessProbe:
45 | httpGet:
46 | path: /health
47 | port: http
48 | volumeMounts:
49 | - name: data
50 | mountPath: /root/.influxdbv2
51 | resources:
52 | {{ toYaml .Values.resources | indent 12 }}
53 | {{- with .Values.nodeSelector }}
54 | nodeSelector:
55 | {{ toYaml . | indent 8 }}
56 | {{- end }}
57 | {{- with .Values.affinity }}
58 | affinity:
59 | {{ toYaml . | indent 8 }}
60 | {{- end }}
61 | {{- with .Values.tolerations }}
62 | tolerations:
63 | {{ toYaml . | indent 8 }}
64 | {{- end }}
65 |
--------------------------------------------------------------------------------
/influxdbv2-helm-chart/values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | repository: quay.io/influxdb/influxdb
3 | pullPolicy: IfNotPresent
4 |
5 | nameOverride: ""
6 | fullnameOverride: ""
7 |
8 | service:
9 | type: ClusterIP
10 | port: 80
11 |
12 | resources: {}
13 | # We usually recommend not to specify default resources and to leave this as a conscious
14 | # choice for the user. This also increases chances charts run on environments with little
15 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
16 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
17 | # limits:
18 | # cpu: 100m
19 | # memory: 128Mi
20 | # requests:
21 | # cpu: 100m
22 | # memory: 128Mi
23 |
24 | nodeSelector: {}
25 |
26 | tolerations: []
27 |
28 | affinity: {}
29 |
30 | ## Create default user through Kubernetes job
31 | ## Defaults indicated below
32 | ##
33 | adminUser:
34 | organization: "influxdata"
35 | bucket: "default"
36 | user: "admin"
37 |
38 | ## Persist data to a persistent volume
39 | ##
40 | persistence:
41 | enabled: true
42 | ## If true will use an existing PVC instead of creating one
43 | # useExisting: false
44 | ## Name of existing PVC to be used in the influx deployment
45 | # name:
46 | ## influxdb data Persistent Volume Storage Class
47 | ## If defined, storageClassName:
48 | ## If set to "-", storageClassName: "", which disables dynamic provisioning
49 | ## If undefined (the default) or set to null, no storageClassName spec is
50 | ## set, choosing the default provisioner. (gp2 on AWS, standard on
51 | ## GKE, AWS & OpenStack)
52 | ##
53 | # storageClass: "-"
54 | accessMode: ReadWriteOnce
55 | size: 8Gi
56 |
--------------------------------------------------------------------------------
/jenkins_cloud.conf:
--------------------------------------------------------------------------------
1 | # Telegraf Configuration
2 | #
3 | # Telegraf is entirely plugin driven. All metrics are gathered from the
4 | # declared inputs, and sent to the declared outputs.
5 | #
6 | # Plugins must be declared in here to be active.
7 | # To deactivate a plugin, comment out the name and any variables.
8 | #
9 | # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
10 | # file would generate.
11 | #
12 | # Environment variables can be used anywhere in this config file, simply prepend
13 | # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
14 | # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
15 |
16 |
17 | # Global tags can be specified here in key="value" format.
18 | [global_tags]
19 | # dc = "us-east-1" # will tag all metrics with dc=us-east-1
20 | # rack = "1a"
21 | ## Environment variables can be used as tags, and throughout the config file
22 | # user = "$USER"
23 |
24 |
25 | # Configuration for telegraf agent
26 | [agent]
27 | ## Default data collection interval for all inputs
28 | interval = "10s"
29 | ## Rounds collection interval to 'interval'
30 | ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
31 | round_interval = true
32 |
33 | ## Telegraf will send metrics to outputs in batches of at most
34 | ## metric_batch_size metrics.
35 | ## This controls the size of writes that Telegraf sends to output plugins.
36 | metric_batch_size = 1000
37 |
38 | ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
39 | ## output, and will flush this buffer on a successful write. Oldest metrics
40 | ## are dropped first when this buffer fills.
41 | ## This buffer only fills when writes fail to output plugin(s).
42 | metric_buffer_limit = 10000
43 |
44 | ## Collection jitter is used to jitter the collection by a random amount.
45 | ## Each plugin will sleep for a random time within jitter before collecting.
46 | ## This can be used to avoid many plugins querying things like sysfs at the
47 | ## same time, which can have a measurable effect on the system.
48 | collection_jitter = "0s"
49 |
50 | ## Default flushing interval for all outputs. Maximum flush_interval will be
51 | ## flush_interval + flush_jitter
52 | flush_interval = "10s"
53 | ## Jitter the flush interval by a random amount. This is primarily to avoid
54 | ## large write spikes for users running a large number of telegraf instances.
55 | ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
56 | flush_jitter = "0s"
57 |
58 | ## By default or when set to "0s", precision will be set to the same
59 | ## timestamp order as the collection interval, with the maximum being 1s.
60 | ## ie, when interval = "10s", precision will be "1s"
61 | ## when interval = "250ms", precision will be "1ms"
62 | ## Precision will NOT be used for service inputs. It is up to each individual
63 | ## service input to set the timestamp at the appropriate precision.
64 | ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
65 | precision = ""
66 |
67 | ## Logging configuration:
68 | ## Run telegraf with debug log messages.
69 | debug = false
70 | ## Run telegraf in quiet mode (error log messages only).
71 | quiet = false
72 | ## Specify the log file name. The empty string means to log to stderr.
73 | logfile = ""
74 |
75 | ## Override default hostname, if empty use os.Hostname()
76 | hostname = ""
77 | ## If set to true, do no set the "host" tag in the telegraf agent.
78 | omit_hostname = false
79 |
80 |
81 | ###############################################################################
82 | # OUTPUT PLUGINS #
83 | ###############################################################################
84 |
85 | # Configuration for sending metrics to InfluxDB
86 | [[outputs.influxdb_v2]]
87 | ## The URLs of the InfluxDB cluster nodes.
88 | ##
89 | ## Multiple URLs can be specified for a single cluster, only ONE of the
90 | ## urls will be written to each interval.
91 | urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
92 |
93 | ## Token for authentication.
94 | token = "$token"
95 |
96 | ## Organization is the name of the organization you wish to write to; must exist.
97 | organization = "anais@influxdata.com"
98 |
99 | ## Destination bucket to write into.
100 | bucket = "Jenkins"
101 |
102 | ## The value of this tag will be used to determine the bucket. If this
103 | ## tag is not set the 'bucket' option is used as the default.
104 | # bucket_tag = ""
105 |
106 | ## Timeout for HTTP messages.
107 | # timeout = "5s"
108 |
109 | ## Additional HTTP headers
110 | # http_headers = {"X-Special-Header" = "Special-Value"}
111 |
112 | ## HTTP Proxy override, if unset values the standard proxy environment
113 | ## variables are consulted to determine which proxy, if any, should be used.
114 | # http_proxy = "http://corporate.proxy:3128"
115 |
116 | ## HTTP User-Agent
117 | # user_agent = "telegraf"
118 |
119 | ## Content-Encoding for write request body, can be set to "gzip" to
120 | ## compress body or "identity" to apply no encoding.
121 | # content_encoding = "gzip"
122 |
123 | ## Enable or disable uint support for writing uints influxdb 2.0.
124 | # influx_uint_support = false
125 |
126 | ## Optional TLS Config for use on HTTP connections.
127 | # tls_ca = "/etc/telegraf/ca.pem"
128 | # tls_cert = "/etc/telegraf/cert.pem"
129 | # tls_key = "/etc/telegraf/key.pem"
130 | ## Use TLS but skip chain & host verification
131 | # insecure_skip_verify = false
132 |
133 |
134 |
135 | ###############################################################################
136 | # PROCESSOR PLUGINS #
137 | ###############################################################################
138 |
139 | # # Convert values to another metric value type
140 | # [[processors.converter]]
141 | # ## Tags to convert
142 | # ##
143 | # ## The table key determines the target type, and the array of key-values
144 | # ## select the keys to convert. The array may contain globs.
145 | # ## = [...]
146 | # [processors.converter.tags]
147 | # string = []
148 | # integer = []
149 | # unsigned = []
150 | # boolean = []
151 | # float = []
152 | #
153 | # ## Fields to convert
154 | # ##
155 | # ## The table key determines the target type, and the array of key-values
156 | # ## select the keys to convert. The array may contain globs.
157 | # ## = [...]
158 | # [processors.converter.fields]
159 | # tag = []
160 | # string = []
161 | # integer = []
162 | # unsigned = []
163 | # boolean = []
164 | # float = []
165 |
166 |
167 | # # Map enum values according to given table.
168 | # [[processors.enum]]
169 | # [[processors.enum.mapping]]
170 | # ## Name of the field to map
171 | # field = "status"
172 | #
173 | # ## Destination field to be used for the mapped value. By default the source
174 | # ## field is used, overwriting the original value.
175 | # # dest = "status_code"
176 | #
177 | # ## Default value to be used for all values not contained in the mapping
178 | # ## table. When unset, the unmodified value for the field will be used if no
179 | # ## match is found.
180 | # # default = 0
181 | #
182 | # ## Table of mappings
183 | # [processors.enum.mapping.value_mappings]
184 | # green = 1
185 | # yellow = 2
186 | # red = 3
187 |
188 |
189 | # # Apply metric modifications using override semantics.
190 | # [[processors.override]]
191 | # ## All modifications on inputs and aggregators can be overridden:
192 | # # name_override = "new_name"
193 | # # name_prefix = "new_name_prefix"
194 | # # name_suffix = "new_name_suffix"
195 | #
196 | # ## Tags to be added (all values must be strings)
197 | # # [processors.override.tags]
198 | # # additional_tag = "tag_value"
199 |
200 |
201 | # # Parse a value in a specified field/tag(s) and add the result in a new metric
202 | # [[processors.parser]]
203 | # ## The name of the fields whose value will be parsed.
204 | # parse_fields = []
205 | #
206 | # ## If true, incoming metrics are not emitted.
207 | # drop_original = false
208 | #
209 | # ## If set to override, emitted metrics will be merged by overriding the
210 | # ## original metric using the newly parsed metrics.
211 | # merge = "override"
212 | #
213 | # ## The dataformat to be read from files
214 | # ## Each data format has its own unique set of configuration options, read
215 | # ## more about them here:
216 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
217 | # data_format = "influx"
218 |
219 |
220 | # # Print all metrics that pass through this filter.
221 | # [[processors.printer]]
222 |
223 |
224 | # # Transforms tag and field values with regex pattern
225 | # [[processors.regex]]
226 | # ## Tag and field conversions defined in a separate sub-tables
227 | # # [[processors.regex.tags]]
228 | # # ## Tag to change
229 | # # key = "resp_code"
230 | # # ## Regular expression to match on a tag value
231 | # # pattern = "^(\\d)\\d\\d$"
232 | # # ## Pattern for constructing a new value (${1} represents first subgroup)
233 | # # replacement = "${1}xx"
234 | #
235 | # # [[processors.regex.fields]]
236 | # # key = "request"
237 | # # ## All the power of the Go regular expressions available here
238 | # # ## For example, named subgroups
239 | # # pattern = "^/api(?P/[\\w/]+)\\S*"
240 | # # replacement = "${method}"
241 | # # ## If result_key is present, a new field will be created
242 | # # ## instead of changing existing field
243 | # # result_key = "method"
244 | #
245 | # ## Multiple conversions may be applied for one field sequentially
246 | # ## Let's extract one more value
247 | # # [[processors.regex.fields]]
248 | # # key = "request"
249 | # # pattern = ".*category=(\\w+).*"
250 | # # replacement = "${1}"
251 | # # result_key = "search_category"
252 |
253 |
254 | # # Rename measurements, tags, and fields that pass through this filter.
255 | # [[processors.rename]]
256 |
257 |
258 | # # Perform string processing on tags, fields, and measurements
259 | # [[processors.strings]]
260 | # ## Convert a tag value to uppercase
261 | # # [[processors.strings.uppercase]]
262 | # # tag = "method"
263 | #
264 | # ## Convert a field value to lowercase and store in a new field
265 | # # [[processors.strings.lowercase]]
266 | # # field = "uri_stem"
267 | # # dest = "uri_stem_normalised"
268 | #
269 | # ## Trim leading and trailing whitespace using the default cutset
270 | # # [[processors.strings.trim]]
271 | # # field = "message"
272 | #
273 | # ## Trim leading characters in cutset
274 | # # [[processors.strings.trim_left]]
275 | # # field = "message"
276 | # # cutset = "\t"
277 | #
278 | # ## Trim trailing characters in cutset
279 | # # [[processors.strings.trim_right]]
280 | # # field = "message"
281 | # # cutset = "\r\n"
282 | #
283 | # ## Trim the given prefix from the field
284 | # # [[processors.strings.trim_prefix]]
285 | # # field = "my_value"
286 | # # prefix = "my_"
287 | #
288 | # ## Trim the given suffix from the field
289 | # # [[processors.strings.trim_suffix]]
290 | # # field = "read_count"
291 | # # suffix = "_count"
292 | #
293 | # ## Replace all non-overlapping instances of old with new
294 | # # [[processors.strings.replace]]
295 | # # measurement = "*"
296 | # # old = ":"
297 | # # new = "_"
298 |
299 |
300 | # # Print all metrics that pass through this filter.
301 | # [[processors.topk]]
302 | # ## How many seconds between aggregations
303 | # # period = 10
304 | #
305 | # ## How many top metrics to return
306 | # # k = 10
307 | #
308 | # ## Over which tags should the aggregation be done. Globs can be specified, in
309 | # ## which case any tag matching the glob will aggregated over. If set to an
310 | # ## empty list is no aggregation over tags is done
311 | # # group_by = ['*']
312 | #
313 | # ## Over which fields are the top k are calculated
314 | # # fields = ["value"]
315 | #
316 | # ## What aggregation to use. Options: sum, mean, min, max
317 | # # aggregation = "mean"
318 | #
319 | # ## Instead of the top k largest metrics, return the bottom k lowest metrics
320 | # # bottomk = false
321 | #
322 | # ## The plugin assigns each metric a GroupBy tag generated from its name and
323 | # ## tags. If this setting is different than "" the plugin will add a
324 | # ## tag (which name will be the value of this setting) to each metric with
325 | # ## the value of the calculated GroupBy tag. Useful for debugging
326 | # # add_groupby_tag = ""
327 | #
328 | # ## These settings provide a way to know the position of each metric in
329 | # ## the top k. The 'add_rank_field' setting allows to specify for which
330 | # ## fields the position is required. If the list is non empty, then a field
331 | # ## will be added to each and every metric for each string present in this
332 | # ## setting. This field will contain the ranking of the group that
333 | # ## the metric belonged to when aggregated over that field.
334 | # ## The name of the field will be set to the name of the aggregation field,
335 | # ## suffixed with the string '_topk_rank'
336 | # # add_rank_fields = []
337 | #
338 | # ## These settings provide a way to know what values the plugin is generating
339 | # ## when aggregating metrics. The 'add_agregate_field' setting allows to
340 | # ## specify for which fields the final aggregation value is required. If the
341 | # ## list is non empty, then a field will be added to each every metric for
342 | # ## each field present in this setting. This field will contain
343 | # ## the computed aggregation for the group that the metric belonged to when
344 | # ## aggregated over that field.
345 | # ## The name of the field will be set to the name of the aggregation field,
346 | # ## suffixed with the string '_topk_aggregate'
347 | # # add_aggregate_fields = []
348 |
349 |
350 |
351 | ###############################################################################
352 | # AGGREGATOR PLUGINS #
353 | ###############################################################################
354 |
355 | # # Keep the aggregate basicstats of each metric passing through.
356 | # [[aggregators.basicstats]]
357 | # ## The period on which to flush & clear the aggregator.
358 | # period = "30s"
359 | # ## If true, the original metric will be dropped by the
360 | # ## aggregator and will not get sent to the output plugins.
361 | # drop_original = false
362 | #
363 | # ## Configures which basic stats to push as fields
364 | # # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
365 |
366 |
367 | # # Create aggregate histograms.
368 | # [[aggregators.histogram]]
369 | # ## The period in which to flush the aggregator.
370 | # period = "30s"
371 | #
372 | # ## If true, the original metric will be dropped by the
373 | # ## aggregator and will not get sent to the output plugins.
374 | # drop_original = false
375 | #
376 | # ## Example config that aggregates all fields of the metric.
377 | # # [[aggregators.histogram.config]]
378 | # # ## The set of buckets.
379 | # # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
380 | # # ## The name of metric.
381 | # # measurement_name = "cpu"
382 | #
383 | # ## Example config that aggregates only specific fields of the metric.
384 | # # [[aggregators.histogram.config]]
385 | # # ## The set of buckets.
386 | # # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
387 | # # ## The name of metric.
388 | # # measurement_name = "diskio"
389 | # # ## The concrete fields of metric
390 | # # fields = ["io_time", "read_time", "write_time"]
391 |
392 |
393 | # # Keep the aggregate min/max of each metric passing through.
394 | # [[aggregators.minmax]]
395 | # ## General Aggregator Arguments:
396 | # ## The period on which to flush & clear the aggregator.
397 | # period = "30s"
398 | # ## If true, the original metric will be dropped by the
399 | # ## aggregator and will not get sent to the output plugins.
400 | # drop_original = false
401 |
402 |
403 | # # Count the occurrence of values in fields.
404 | # [[aggregators.valuecounter]]
405 | # ## General Aggregator Arguments:
406 | # ## The period on which to flush & clear the aggregator.
407 | # period = "30s"
408 | # ## If true, the original metric will be dropped by the
409 | # ## aggregator and will not get sent to the output plugins.
410 | # drop_original = false
411 | # ## The fields for which the values will be counted
412 | # fields = []
413 |
414 |
415 |
416 | ###############################################################################
417 | # INPUT PLUGINS #
418 | ###############################################################################
419 |
420 | # Read jobs and cluster metrics from Jenkins instances
421 | [[inputs.jenkins]]
422 | ## The Jenkins URL
423 | url = "http://localhost:8080"
424 | username = "admin"
425 | password = "$jenkins"
426 |
427 | ## Set response_timeout
428 | response_timeout = "5s"
429 |
430 | ## Optional TLS Config
431 | # tls_ca = "/etc/telegraf/ca.pem"
432 | # tls_cert = "/etc/telegraf/cert.pem"
433 | # tls_key = "/etc/telegraf/key.pem"
434 | ## Use SSL but skip chain & host verification
435 | # insecure_skip_verify = false
436 |
437 | ## Optional Max Job Build Age filter
438 | ## Default 1 hour, ignore builds older than max_build_age
439 | # max_build_age = "1h"
440 |
441 | ## Optional Sub Job Depth filter
442 | ## Jenkins can have unlimited layer of sub jobs
443 | ## This config will limit the layers of pulling, default value 0 means
444 | ## unlimited pulling until no more sub jobs
445 | # max_subjob_depth = 0
446 |
447 | ## Optional Sub Job Per Layer
448 | ## In workflow-multibranch-plugin, each branch will be created as a sub job.
449 | ## This config will limit to call only the lasted branches in each layer,
450 | ## empty will use default value 10
451 | # max_subjob_per_layer = 10
452 |
453 | ## Jobs to exclude from gathering
454 | # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
455 |
456 | ## Nodes to exclude from gathering
457 | # node_exclude = [ "node1", "node2" ]
458 |
459 | ## Worker pool for jenkins plugin only
460 | ## Empty this field will use default value 5
461 | # max_connections = 5
462 |
463 |
--------------------------------------------------------------------------------
/jsonnet/community-template-linux/.gitignore:
--------------------------------------------------------------------------------
1 | /influxdb-jsonnet
--------------------------------------------------------------------------------
/jsonnet/community-template-linux/Makefile:
--------------------------------------------------------------------------------
1 | setup:
2 | @git clone https://github.com/influxdata/influxdb-jsonnet
3 |
4 | influxdb:
5 | @docker container run --detach --rm --publish 9999:9999 quay.io/influxdb/influxdb:2.0.0-beta
6 |
7 | influxdb-setup:
8 | @influx setup --force -u rawkode -p rawkode123 -t rawkode -o rawkode -b rawkode -r 0
9 |
10 | compile:
11 | @jsonnet -J ./influxdb-jsonnet ./src/main.jsonnet
12 |
--------------------------------------------------------------------------------
/jsonnet/community-template-linux/src/main.jsonnet:
--------------------------------------------------------------------------------
1 | local influxdb = import '../influxdb-jsonnet/src/influxdb.libsonnet';
2 |
3 | local inputLabelColor = "#326BBA";
4 | local outputLabelColor = "#108174";
5 |
6 | local labelLinuxSystemTemplate = influxdb.label.new(name="Linux System Template", color="#7A65F2");
7 |
8 | local labelInputsCPU = influxdb.label.new(name="inputs.cpu", color=inputLabelColor);
9 | local labelInputsDisk = influxdb.label.new(name="inputs.disk", color=inputLabelColor);
10 | local labelInputsDiskIO = influxdb.label.new(name="inputs.diskio", color=inputLabelColor);
11 | local labelInputsKernel = influxdb.label.new(name="inputs.kernel", color=inputLabelColor);
12 | local labelInputsMem = influxdb.label.new(name="inputs.mem", color=inputLabelColor);
13 | local labelInputsNet = influxdb.label.new(name="inputs.net", color=inputLabelColor);
14 | local labelInputsProcesses = influxdb.label.new(name="inputs.processes", color=inputLabelColor);
15 | local labelInputsSwap = influxdb.label.new(name="inputs.swap", color=inputLabelColor);
16 | local labelInputsSystem = influxdb.label.new(name="inputs.system", color=inputLabelColor);
17 | local labelOutputsInfluxDB2 = influxdb.label.new(name="outputs.influxdb_v2", color=outputLabelColor);
18 |
19 | local variableBucket = influxdb.variable.query.new(name="bucket", language="flux", query=|||
20 | buckets()
21 | |> filter(fn: (r) => r.name !~ /^_/)
22 | |> rename(columns: {name: "_value"})
23 | |> keep(columns: ["_value"])
24 | |||);
25 |
26 | local variableHost = influxdb.variable.query.new(name="linux_host", language="flux", query=|||
27 | import "influxdata/influxdb/v1"
28 | v1.measurementTagValues(bucket: v.bucket, measurement: "cpu", tag: "host")
29 | |||);
30 |
31 | [
32 | labelLinuxSystemTemplate,
33 | labelInputsCPU,
34 | labelInputsDisk,
35 | labelInputsDiskIO,
36 | labelInputsKernel,
37 | labelInputsMem,
38 | labelInputsNet,
39 | labelInputsProcesses,
40 | labelInputsSwap,
41 | labelInputsSystem,
42 | labelOutputsInfluxDB2,
43 |
44 | variableBucket,
45 | variableHost,
46 |
47 | influxdb.bucket.new(name="telegraf", retentionRules=[{type: "expire", everySeconds: 604800}]),
48 |
49 | influxdb.telegraf.new(name="Linux System Monitoring", labels=[labelInputsCPU, labelInputsSystem, labelInputsSwap, labelInputsProcesses, labelInputsMem, labelInputsKernel, labelInputsDisk, labelInputsDiskIO, labelLinuxSystemTemplate, labelOutputsInfluxDB2], config=|||
50 | # Telegraf Configuration
51 | #
52 | # Telegraf is entirely plugin driven. All metrics are gathered from the
53 | # declared inputs, and sent to the declared outputs.
54 | #
55 | # Plugins must be declared in here to be active.
56 | # To deactivate a plugin, comment out the name and any variables.
57 | #
58 | # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
59 | # file would generate.
60 | #
61 | # Environment variables can be used anywhere in this config file, simply surround
62 | # them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
63 | # for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
64 |
65 |
66 | # Global tags can be specified here in key="value" format.
67 | [global_tags]
68 | # dc = "us-east-1" # will tag all metrics with dc=us-east-1
69 | # rack = "1a"
70 | ## Environment variables can be used as tags, and throughout the config file
71 | # user = "$USER"
72 |
73 |
74 | # Configuration for telegraf agent
75 | [agent]
76 | ## Default data collection interval for all inputs
77 | interval = "10s"
78 | ## Rounds collection interval to 'interval'
79 | ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
80 | round_interval = true
81 |
82 | ## Telegraf will send metrics to outputs in batches of at most
83 | ## metric_batch_size metrics.
84 | ## This controls the size of writes that Telegraf sends to output plugins.
85 | metric_batch_size = 1000
86 |
87 | ## Maximum number of unwritten metrics per output. Increasing this value
88 | ## allows for longer periods of output downtime without dropping metrics at the
89 | ## cost of higher maximum memory usage.
90 | metric_buffer_limit = 10000
91 |
92 | ## Collection jitter is used to jitter the collection by a random amount.
93 | ## Each plugin will sleep for a random time within jitter before collecting.
94 | ## This can be used to avoid many plugins querying things like sysfs at the
95 | ## same time, which can have a measurable effect on the system.
96 | collection_jitter = "0s"
97 |
98 | ## Default flushing interval for all outputs. Maximum flush_interval will be
99 | ## flush_interval + flush_jitter
100 | flush_interval = "10s"
101 | ## Jitter the flush interval by a random amount. This is primarily to avoid
102 | ## large write spikes for users running a large number of telegraf instances.
103 | ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
104 | flush_jitter = "0s"
105 |
106 | ## By default or when set to "0s", precision will be set to the same
107 | ## timestamp order as the collection interval, with the maximum being 1s.
108 | ## ie, when interval = "10s", precision will be "1s"
109 | ## when interval = "250ms", precision will be "1ms"
110 | ## Precision will NOT be used for service inputs. It is up to each individual
111 | ## service input to set the timestamp at the appropriate precision.
112 | ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
113 | precision = ""
114 |
115 | ## Log at debug level.
116 | # debug = false
117 | ## Log only error level messages.
118 | # quiet = false
119 |
120 | ## Log target controls the destination for logs and can be one of "file",
121 | ## "stderr" or, on Windows, "eventlog". When set to "file", the output file
122 | ## is determined by the "logfile" setting.
123 | # logtarget = "file"
124 |
125 | ## Name of the file to be logged to when using the "file" logtarget. If set to
126 | ## the empty string then logs are written to stderr.
127 | # logfile = ""
128 |
129 | ## The logfile will be rotated after the time interval specified. When set
130 | ## to 0 no time based rotation is performed. Logs are rotated only when
131 | ## written to, if there is no log activity rotation may be delayed.
132 | # logfile_rotation_interval = "0d"
133 |
134 | ## The logfile will be rotated when it becomes larger than the specified
135 | ## size. When set to 0 no size based rotation is performed.
136 | # logfile_rotation_max_size = "0MB"
137 |
138 | ## Maximum number of rotated archives to keep, any older logs are deleted.
139 | ## If set to -1, no archives are removed.
140 | # logfile_rotation_max_archives = 5
141 |
142 | ## Override default hostname, if empty use os.Hostname()
143 | hostname = ""
144 | ## If set to true, do no set the "host" tag in the telegraf agent.
145 | omit_hostname = false
146 |
147 |
148 | ###############################################################################
149 | # OUTPUT PLUGINS #
150 | ###############################################################################
151 |
152 | # Configuration for sending metrics to InfluxDB
153 | [[outputs.influxdb_v2]]
154 | ## The URLs of the InfluxDB cluster nodes.
155 | ##
156 | ## Multiple URLs can be specified for a single cluster, only ONE of the
157 | ## urls will be written to each interval.
158 | ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
159 | urls = ["$INFLUX_HOST"]
160 | ## Token for authentication.
161 | token = "$INFLUX_TOKEN"
162 | ## Organization is the name of the organization you wish to write to; must exist.
163 | organization = "$INFLUX_ORG"
164 | ## Destination bucket to write into.
165 | bucket = "telegraf"
166 | ## The value of this tag will be used to determine the bucket. If this
167 | ## tag is not set the 'bucket' option is used as the default.
168 | # bucket_tag = ""
169 | ## If true, the bucket tag will not be added to the metric.
170 | # exclude_bucket_tag = false
171 | ## Timeout for HTTP messages.
172 | # timeout = "5s"
173 | ## Additional HTTP headers
174 | # http_headers = {"X-Special-Header" = "Special-Value"}
175 | ## HTTP Proxy override, if unset values the standard proxy environment
176 | ## variables are consulted to determine which proxy, if any, should be used.
177 | # http_proxy = "http://corporate.proxy:3128"
178 | ## HTTP User-Agent
179 | # user_agent = "telegraf"
180 | ## Content-Encoding for write request body, can be set to "gzip" to
181 | ## compress body or "identity" to apply no encoding.
182 | # content_encoding = "gzip"
183 | ## Enable or disable uint support for writing uints influxdb 2.0.
184 | # influx_uint_support = false
185 | ## Optional TLS Config for use on HTTP connections.
186 | # tls_ca = "/etc/telegraf/ca.pem"
187 | # tls_cert = "/etc/telegraf/cert.pem"
188 | # tls_key = "/etc/telegraf/key.pem"
189 | ## Use TLS but skip chain & host verification
190 | # insecure_skip_verify = false
191 |
192 | ###############################################################################
193 | # INPUT PLUGINS #
194 | ###############################################################################
195 |
196 |
197 | # Read metrics about cpu usage
198 | [[inputs.cpu]]
199 | ## Whether to report per-cpu stats or not
200 | percpu = true
201 | ## Whether to report total system cpu stats or not
202 | totalcpu = true
203 | ## If true, collect raw CPU time metrics.
204 | collect_cpu_time = false
205 | ## If true, compute and report the sum of all non-idle CPU states.
206 | report_active = false
207 |
208 |
209 | # Read metrics about disk usage by mount point
210 | [[inputs.disk]]
211 | ## By default stats will be gathered for all mount points.
212 | ## Set mount_points will restrict the stats to only the specified mount points.
213 | # mount_points = ["/"]
214 |
215 | ## Ignore mount points by filesystem type.
216 | ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
217 |
218 |
219 | # Read metrics about disk IO by device
220 | [[inputs.diskio]]
221 | ## By default, telegraf will gather stats for all devices including
222 | ## disk partitions.
223 | ## Setting devices will restrict the stats to the specified devices.
224 | # devices = ["sda", "sdb", "vd*"]
225 | ## Uncomment the following line if you need disk serial numbers.
226 | # skip_serial_number = false
227 | #
228 | ## On systems which support it, device metadata can be added in the form of
229 | ## tags.
230 | ## Currently only Linux is supported via udev properties. You can view
231 | ## available properties for a device by running:
232 | ## 'udevadm info -q property -n /dev/sda'
233 | ## Note: Most, but not all, udev properties can be accessed this way. Properties
234 | ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
235 | # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
236 | #
237 | ## Using the same metadata source as device_tags, you can also customize the
238 | ## name of the device via templates.
239 | ## The 'name_templates' parameter is a list of templates to try and apply to
240 | ## the device. The template may contain variables in the form of '$PROPERTY' or
241 | ## '${PROPERTY}'. The first template which does not contain any variables not
242 | ## present for the device is used as the device name tag.
243 | ## The typical use case is for LVM volumes, to get the VG/LV name instead of
244 | ## the near-meaningless DM-0 name.
245 | # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
246 |
247 |
248 | # Get kernel statistics from /proc/stat
249 | [[inputs.kernel]]
250 | # no configuration
251 |
252 |
253 | # Read metrics about memory usage
254 | [[inputs.mem]]
255 | # no configuration
256 |
257 | # Read metrics about network interface usage
258 | [[inputs.net]]
259 | ## By default, telegraf gathers stats from any up interface (excluding loopback)
260 | ## Setting interfaces will tell it to gather these explicit interfaces,
261 | ## regardless of status.
262 | ##
263 | # interfaces = ["eth0"]
264 | ##
265 | ## On linux systems telegraf also collects protocol stats.
266 | ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
267 | ##
268 | # ignore_protocol_stats = false
269 | ##
270 |
271 | # Get the number of processes and group them by status
272 | [[inputs.processes]]
273 | # no configuration
274 |
275 |
276 | # Read metrics about swap memory usage
277 | [[inputs.swap]]
278 | # no configuration
279 |
280 |
281 | # Read metrics about system load & uptime
282 | [[inputs.system]]
283 | ## Uncomment to remove deprecated metrics.
284 | # fielddrop = ["uptime_format"]
285 | |||),
286 |
287 | influxdb.dashboard.new(name="Linux System", charts=[
288 | influxdb.dashboard.charts.singleStat.new(
289 | name="System Uptime",
290 | queries=[
291 | influxdb.query.new(flux=|||
292 | from(bucket: v.%(bucketName)s)
293 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
294 | |> filter(fn: (r) => r._measurement == "system")
295 | |> filter(fn: (r) => r._field == "uptime")
296 | |> filter(fn: (r) => r.host == v.linux_host)
297 | |> last()
298 | |> map(fn: (r) => ({ _value: float(v: r._value) / 86400.00 }))
299 | ||| % { bucketName: variableBucket.metadata.name})
300 | ],
301 | note="System Uptime"
302 | )
303 | ])
304 | ]
305 | // - colors:
306 | // - hex: '#00C9FF'
307 | // name: laser
308 | // type: text
309 | // height: 1
310 | // suffix: ' days'
311 | // width: 3
312 | // yPos: 1
313 |
--------------------------------------------------------------------------------
/kapacitor/removing-a-tag/README.md:
--------------------------------------------------------------------------------
1 | # Removing Tags with Kapacitor
2 |
3 | ```shell
4 | docker-compose up -d
5 |
6 | # Wait 10 minutes, so we have some "old" data that won't be caught by the stream task
7 | docker-compose exec kapacitor /tmp/define-tasks.sh
8 | ```
9 |
10 | This should clone all points from telegraf.autogen.cpu to telegraf.autogen.notag without the host tag
11 |
12 | Live streaming handles new data in real-time with a stream task. Legacy data is handled by the batch job.
13 |
14 | See `define-tasks.sh`
15 |
--------------------------------------------------------------------------------
/kapacitor/removing-a-tag/define-tasks.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | kapacitor define notag-stream -tick /tmp/live-stream.tick
3 | kapacitor enable notag-stream
4 | kapacitor define notag-batch -tick /tmp/historical-batch.tick
5 | kapacitor replay-live batch -task notag-batch -past 90d -rec-time
6 |
--------------------------------------------------------------------------------
/kapacitor/removing-a-tag/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.4"
2 |
3 | services:
4 | chronograf:
5 | image: chronograf:1.7
6 | command: chronograf --influxdb-url=http://influxdb:8086 --kapacitor-url=http://kapacitor:9092
7 | ports:
8 | - 8888:8888
9 | depends_on:
10 | influxdb:
11 | condition: service_healthy
12 |
13 | influxdb:
14 | image: influxdb:1.7
15 | healthcheck:
16 | test: bash -c "
11 | import "http"
12 | import "json"
13 |
14 | from(bucket: "kubernetes")
15 | |> range(start: -1m)
16 | |> filter(fn: (r) => r._measurement == "kubernetes_pod_container")
17 | |> filter(fn: (r) => r.namespace == "gitops")
18 | |> filter(fn: (r) => not exists r["gitops.com/sha"])
19 | |> group(columns: ["pod_name"])
20 | |> first()
21 | |> map(fn: (r) => {
22 | payload = {
23 | message: "There's an unexpected pod running in our cluster!",
24 | pod_name: r.pod_name,
25 | namespace: r.namespace,
26 | node_name: r.node_name
27 | }
28 |
29 | _ = http.post(url: "https://rbox.app/box/request/7a0f9021-e16a-40f1-a96f-968b6d4778f3", data: json.encode(v: payload))
30 |
31 | return {r with message: string(v: json.encode(v: payload))}
32 | })
33 |
--------------------------------------------------------------------------------
/kubernetes/gitops-alerting/kubernetes/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: gitops
5 | ---
6 | apiVersion: apps/v1
7 | kind: Deployment
8 | metadata:
9 | name: nginx
10 | namespace: gitops
11 | spec:
12 | selector:
13 | matchLabels:
14 | app: nginx
15 | template:
16 | metadata:
17 | labels:
18 | app: nginx
19 | gitops.com/sha: fake_sha
20 | spec:
21 | containers:
22 | - name: nginx
23 | image: nginx
24 | resources: {}
25 | ports:
26 | - containerPort: 80
27 | ---
28 | apiVersion: apps/v1
29 | kind: Deployment
30 | metadata:
31 | name: nginx2
32 | namespace: gitops
33 | spec:
34 | selector:
35 | matchLabels:
36 | app: nginx2
37 | template:
38 | metadata:
39 | labels:
40 | app: nginx2
41 | spec:
42 | containers:
43 | - name: nginx2
44 | image: nginx
45 | resources: {}
46 | ports:
47 | - containerPort: 80
48 |
--------------------------------------------------------------------------------
/kubernetes/influxdb/influxdb-setup.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: influxdb-setup
5 | namespace: monitoring
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: create-credentials
11 | image: quay.io/influxdb/influxdb:2.0.0-alpha
12 | command:
13 | - influx
14 | args:
15 | - setup
16 | - --host
17 | - http://influxdb.monitoring:9999
18 | - -b
19 | - kubernetes
20 | - -o
21 | - InfluxData
22 | - -p
23 | - cloudnative
24 | - -u
25 | - rawkode
26 | - -t
27 | - secret-token
28 | - -f
29 | restartPolicy: Never
30 |
--------------------------------------------------------------------------------
/kubernetes/influxdb/influxdb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: influxdb
5 | namespace: monitoring
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: influxdb
10 | template:
11 | metadata:
12 | labels:
13 | app: influxdb
14 | spec:
15 | containers:
16 | - name: influxdb
17 | image: quay.io/influxdb/influxdb:2.0.0-alpha
18 | resources:
19 | limits:
20 | memory: "128Mi"
21 | cpu: "1000m"
22 | ports:
23 | - containerPort: 9999
24 | ---
25 | apiVersion: v1
26 | kind: Service
27 | metadata:
28 | name: influxdb
29 | namespace: monitoring
30 | spec:
31 | type: ClusterIP
32 | selector:
33 | app: influxdb
34 | ports:
35 | - port: 9999
36 |
--------------------------------------------------------------------------------
/kubernetes/nodes/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM rawkode/telegraf:byo AS build
2 |
3 | FROM alpine:3.7 AS telegraf
4 |
5 | COPY --from=build /etc/telegraf /etc/telegraf
6 | COPY --from=build /go/src/github.com/influxdata/telegraf/telegraf /bin/telegraf
7 | ENTRYPOINT [ "/bin/telegraf" ]
8 |
--------------------------------------------------------------------------------
/kubernetes/nodes/telegraf.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: telegraf-nodes
5 | namespace: monitoring
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: telegraf-nodes
10 | template:
11 | metadata:
12 | labels:
13 | app: telegraf-nodes
14 | spec:
15 | serviceAccount: telegraf
16 | volumes:
17 | - name: telegraf-config
18 | configMap:
19 | name: telegraf-nodes
20 | containers:
21 | - name: telegraf-nodes
22 | image: telegraf:master
23 | command:
24 | - telegraf
25 | imagePullPolicy: IfNotPresent
26 | resources: {}
27 | env:
28 | - name: HOSTIP
29 | valueFrom:
30 | fieldRef:
31 | fieldPath: status.hostIP
32 |
--------------------------------------------------------------------------------
/kubernetes/nodes/telegraf/telegraf.conf:
--------------------------------------------------------------------------------
1 | [[outputs.influxdb_v2]]
2 | urls = ["http://influxdb.monitoring:9999"]
3 |
4 | organization = "InfluxData"
5 | bucket = "kubernetes"
6 | token = "secret-token"
7 |
8 | [[inputs.kubernetes]]
9 | url = "https://$HOSTIP:10250"
10 | bearer_token = "/run/secrets/kubernetes.io/serviceaccount/token"
11 | insecure_skip_verify = true
12 | label_include = ["gitops.com/sha"]
13 | label_exclude = []
14 |
15 | [[outputs.file]]
16 | files = ["stdout"]
17 |
--------------------------------------------------------------------------------
/kubernetes/prometheus/exporters/consul-exporter.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: consul-exporter
5 | namespace: prometheus
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: consul-exporter
10 | template:
11 | metadata:
12 | labels:
13 | app: consul-exporter
14 | annotations:
15 | prometheus.io/scrape: "true"
16 | prometheus.io/scheme: "http"
17 | prometheus.io/path: "/metrics"
18 | prometheus.io/port: "9107"
19 | spec:
20 | containers:
21 | - name: consul-exporter
22 | image: prom/consul-exporter
23 | args:
24 | - --consul.server=consul.default:8500
25 | resources: {}
26 | ports:
27 | - containerPort: 9107
28 |
--------------------------------------------------------------------------------
/kubernetes/prometheus/telegraf-config.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: telegraf-prometheus
5 | namespace: monitoring
6 | data:
7 | telegraf.conf: |
8 | [[outputs.influxdb_v2]]
9 | urls = ["http://influxdb.monitoring:9999"]
10 |
11 | organization = "InfluxData"
12 | bucket = "kubernetes"
13 | token = "secret-token"
14 |
15 | [[inputs.prometheus]]
16 | monitor_kubernetes_pods = true
17 | # monitor_kubernetes_pods_namespace = ""
18 |
19 | bearer_token = "/run/secrets/kubernetes.io/serviceaccount/token"
20 | insecure_skip_verify = true
21 |
--------------------------------------------------------------------------------
/kubernetes/prometheus/telegraf.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: telegraf-prometheus
5 | namespace: monitoring
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: telegraf-prometheus
10 | template:
11 | metadata:
12 | labels:
13 | app: telegraf-prometheus
14 | spec:
15 | serviceAccount: telegraf
16 | volumes:
17 | - name: telegraf-config
18 | configMap:
19 | name: telegraf-prometheus
20 | containers:
21 | - name: telegraf-prometheus
22 | image: telegraf:1.11
23 | resources: {}
24 | volumeMounts:
25 | - name: telegraf-config
26 | mountPath: /etc/telegraf/telegraf.conf
27 | subPath: telegraf.conf
28 |
--------------------------------------------------------------------------------
/kubernetes/setup/namespaces.yaml:
--------------------------------------------------------------------------------
1 | kind: Namespace
2 | apiVersion: v1
3 | metadata:
4 | name: monitoring
5 | ---
6 | kind: Namespace
7 | apiVersion: v1
8 | metadata:
9 | name: prometheus
10 |
--------------------------------------------------------------------------------
/kubernetes/setup/rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: telegraf
5 | namespace: monitoring
6 | automountServiceAccountToken: true
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRole
10 | metadata:
11 | name: telegraf
12 | rules:
13 | - apiGroups:
14 | - "*"
15 | resources:
16 | - "*"
17 | verbs:
18 | - "*"
19 | - nonResourceURLs:
20 | - "*"
21 | verbs:
22 | - "get"
23 | - "list"
24 | - "watch"
25 | ---
26 | apiVersion: rbac.authorization.k8s.io/v1
27 | kind: ClusterRoleBinding
28 | metadata:
29 | name: telegraf
30 | subjects:
31 | - kind: ServiceAccount
32 | name: telegraf
33 | namespace: monitoring
34 | apiGroup: ""
35 | roleRef:
36 | kind: ClusterRole
37 | name: telegraf
38 | apiGroup: rbac.authorization.k8s.io
39 |
--------------------------------------------------------------------------------
/python-jenkins.py:
--------------------------------------------------------------------------------
1 | import jenkins
2 |
3 | server = jenkins.Jenkins('http://localhost:8080')
4 | print(server.jobs_count())
5 |
6 | my_job = server.get_job_config('Test')
7 |
8 | def create_jenkins_jobs(number):
9 | job_names = ["Test" + str(x) for x in range(number)]
10 | [server.create_job(job, my_job) for job in job_names]
11 | [server.enable_job(job) for job in job_names]
12 | [server.build_job(job) for job in job_names]
13 |
14 |
15 | def disable_jenkins_jobs(number):
16 | job_names = ["Test" + str(x) for x in range(number)]
17 | [server.disable_job(job) for job in job_names]
--------------------------------------------------------------------------------
/telegraf/archive.luftdaten.info/.gitignore:
--------------------------------------------------------------------------------
1 | *.csv
2 | *.zip
--------------------------------------------------------------------------------
/telegraf/archive.luftdaten.info/Makefile:
--------------------------------------------------------------------------------
1 | 2019-11_dht22.csv:
2 | @curl http://archive.luftdaten.info/csv_per_month/2019-11/2019-11_dht22.zip -o 2019-11_dht22.zip
3 | @unzip 2019-11_dht22.zip
4 |
5 | setup: 2019-11_dht22.csv
6 | @docker-compose up -d influxdb
7 | @sleep 10
8 | @docker-compose exec influxdb influx setup -f --host 'http://localhost:9999' -b luftdaten -o luftdaten -p luftdaten123 -u luftdaten -t luftdaten
9 |
10 | import:
11 | @docker-compose run --rm telegraf
12 |
13 | clean:
14 | @docker-compose down -v
--------------------------------------------------------------------------------
/telegraf/archive.luftdaten.info/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.4"
2 |
3 | services:
4 | influxdb:
5 | image: "quay.io/influxdb/influxdb:2.0.0-beta"
6 | ports:
7 | - "9999:9999"
8 |
9 | telegraf:
10 | image: "telegraf:1.14"
11 | mem_limit: "512m"
12 | volumes:
13 | - "./telegraf.conf:/etc/telegraf/telegraf.conf"
14 | - "./2019-11_dht22.csv:/data/2019-11_dht22.csv"
15 |
--------------------------------------------------------------------------------
/telegraf/archive.luftdaten.info/telegraf.conf:
--------------------------------------------------------------------------------
1 | [agent]
2 | interval = "5s"
3 | flush_interval = "5s"
4 | round_interval = false
5 | debug = true
6 | omit_hostname = true
7 | metric_batch_size = 10000
8 | metric_buffer_limit = 10000
9 |
10 | [[outputs.influxdb_v2]]
11 | urls = ["http://influxdb:9999"]
12 | token = "luftdaten"
13 | organization = "luftdaten"
14 | bucket = "luftdaten"
15 |
16 | [[inputs.internal]]
17 |
18 | [[inputs.tail]]
19 | files = ["/data/2019-11_dht22.csv"]
20 | name_override = "sensor"
21 | from_beginning = true
22 | data_format = "csv"
23 |
24 | # sensor_id;sensor_type;location;lat;lon;timestamp;temperature;humidity
25 | # 3794;DHT22;1912;51.358;12.282;2019-11-01T00:00:00;-0.50;1.00
26 | csv_header_row_count = 1
27 | csv_delimiter = ";"
28 | csv_trim_space = false
29 | csv_tag_columns = ["sensor_id", "sensor_type", "location", "lat", "lon"]
30 | csv_timestamp_column = "timestamp"
31 | csv_timestamp_format = "2006-01-02T15:04:05"
32 |
33 | fielddrop = ["timestamp"]
34 | tagexclude = ["path"]
35 |
36 | [[processors.converter]]
37 | [processors.converter.fields]
38 | float = ["humidity", "temperature"]
39 |
40 | [[processors.s2geo]]
41 | ## The name of the lat and lon fields containing WGS-84 latitude and
42 | ## longitude in decimal degrees.
43 | lat_field = "lat"
44 | lon_field = "lon"
45 |
46 | ## New tag to create
47 | tag_key = "s2_cell_id"
48 |
49 | ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
50 | cell_level = 9
51 |
--------------------------------------------------------------------------------
/telegraf/csv/README.md:
--------------------------------------------------------------------------------
1 | # CSV Parsing with Tail Input
2 |
3 | Uses `tail` input plugin, instead of `file`, so that the entire file isn't processed every `interval`.
4 |
5 | Commented out timestamp examples from (https://www.reddit.com/r/influxdb/comments/bkttj8/requesting_help_only_first_line_from_csv_file_is/), as I don't think it can be parsed in such a weak format.
6 |
7 | ## Output
8 |
9 | ```
10 | > select * from tail
11 | name: tail
12 | time ComputerName Id IsOpen ProcessName Time UserName host path
13 | ---- ------------ -- ------ ----------- ---- -------- ---- ----
14 | 1557087344019300200 Computer1 11524 1 Adobe Desktop Service 3.5.2019 12.17.00 User1 ee9ef3e20439 /data/data.csv
15 | 1557087344019398000 Computer2 11524 0 Adobe Desktop Service 3.5.2019 14.17.59 User1 ee9ef3e20439 /data/data.csv
16 | 1557087344019467500 Computer3 11524 0 Adobe Desktop Service 3.5.2019 14.17.59 User1 ee9ef3e20439 /data/data.csv
17 | 1557087344019550100 Computer4 11524 0 Adobe Desktop Service 3.5.2019 14.17.59 User1 ee9ef3e20439 /data/data.csv
18 | 1557087344019584100 Computer5 11524 0 Adobe Desktop Service 3.5.2019 14.17.59 User1 ee9ef3e20439 /data/data.csv
19 | ```
20 |
--------------------------------------------------------------------------------
/telegraf/csv/data.csv:
--------------------------------------------------------------------------------
1 | "ComputerName","UserName","ProcessName","Id","Time","IsOpen"
2 | "Computer1","User1","Adobe Desktop Service","11524","3.5.2019 12.17.00","1"
3 | "Computer2","User1","Adobe Desktop Service","11524","3.5.2019 14.17.59","0"
4 | "Computer3","User1","Adobe Desktop Service","11524","3.5.2019 14.17.59","0"
5 | "Computer4","User1","Adobe Desktop Service","11524","3.5.2019 14.17.59","0"
6 | "Computer5","User1","Adobe Desktop Service","11524","3.5.2019 14.17.59","0"
7 |
--------------------------------------------------------------------------------
/telegraf/csv/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 |
3 | services:
4 | chronograf:
5 | image: chronograf:1.7
6 | command: chronograf --influxdb-url=http://influxdb:8086
7 | ports:
8 | - 8888:8888
9 |
10 | influxdb:
11 | image: influxdb:1.7
12 |
13 | telegraf:
14 | image: telegraf
15 | volumes:
16 | - ./telegraf.conf:/etc/telegraf/telegraf.conf
17 | - ./data.csv:/data/data.csv
18 |
--------------------------------------------------------------------------------
/telegraf/csv/telegraf.conf:
--------------------------------------------------------------------------------
1 | [agent]
2 | debug = true
3 |
4 | [[outputs.influxdb]]
5 | urls = ["http://influxdb:8086"]
6 | database = "telegraf"
7 |
8 | [[inputs.tail]]
9 | files = ["/data/**.csv"]
10 | from_beginning = true
11 |
12 | data_format = "csv"
13 | csv_header_row_count = 1
14 | csv_trim_space = true
15 |
16 | csv_tag_columns = ["ComputerName", "UserName", "ProcessName"]
17 |
18 | # csv_timestamp_column = "Time"
19 | # csv_timestamp_format = "02.01.2006 15.04.05"
20 |
--------------------------------------------------------------------------------
/telegraf/influxdays-sf/2.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | docker-compose exec influxdb2 influx setup --host http://localhost:9999 \
3 | -b influxdays \
4 | -o influxdata \
5 | -p influx123 \
6 | -u influx \
7 | -t influx \
8 | -f
9 |
--------------------------------------------------------------------------------
/telegraf/influxdays-sf/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM telegraf:1.12-alpine
2 |
3 | RUN apk add --update curl netcat-openbsd
4 |
--------------------------------------------------------------------------------
/telegraf/influxdays-sf/autocurl.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | while true; do
3 | sleep 1
4 | curl http://python:5555
5 | done
6 |
7 |
--------------------------------------------------------------------------------
/telegraf/influxdays-sf/curl.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | echo curl -X POST http://localhost:8080/telegraf -d '{"name": "test", "some_value": 1, "some_other_value": 2}'
3 | curl -X POST http://localhost:8080/telegraf -d '{"name": "test", "some_value": 1, "some_other_value": 2}'
4 | echo curl -X POST http://localhost:8080/telegraf -d '{"name": "metric", "my_tag": "rawkode", "sum": 1}'
5 | curl -X POST http://localhost:8080/telegraf -d '{"name": "metric", "my_tag": "rawkode", "sum": 1}'
6 |
--------------------------------------------------------------------------------
/telegraf/influxdays-sf/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.4"
2 |
3 | services:
4 | telegraf-edge:
5 | build: .
6 | ports:
7 | - 5559:5559
8 | volumes:
9 | - ./t-edge.conf:/etc/telegraf/telegraf.conf
10 | - ./curl.sh:/usr/bin/tcurl
11 | - ./echo.sh:/usr/bin/techo
12 | - ./prometheus.sh:/usr/bin/prom
13 |
14 | telegraf-consume:
15 | image: telegraf:1.12
16 | volumes:
17 | - ./t-consume.conf:/etc/telegraf/telegraf.conf
18 |
19 | python:
20 | build: ./python-app
21 | healthcheck:
22 | test: ["CMD", "curl", "-f", "http://telegraf-edge:5559"]
23 | interval: 1s
24 | timeout: 1s
25 | retries: 1
26 | start_period: 1s
27 | restart: always
28 | ports:
29 | - 5555:5555
30 |
31 | chronograf:
32 | image: chronograf:1.7
33 | command: chronograf --influxdb-url=http://influxdb:8086 --kapacitor-url=http://kapacitor:9092
34 | ports:
35 | - 8888:8888
36 | depends_on:
37 | influxdb:
38 | condition: service_healthy
39 |
40 | influxdb:
41 | image: influxdb:1.7
42 | healthcheck:
43 | test: bash -c "