├── .env ├── .env-latest ├── .env-nightlies ├── .gitignore ├── LICENSE ├── README.md ├── docker-compose.yml ├── documentation ├── Dockerfile ├── builds │ └── documentation ├── chart │ └── jackserver │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── hpa.yaml │ │ ├── ingress.yaml │ │ └── service.yaml │ │ └── values.yaml ├── cmd │ └── main.go └── static │ ├── images │ ├── add-password-chronograf.png │ ├── alert-chronograf.png │ ├── alert-slack.png │ ├── alert-threshold.png │ ├── alert-time-series.png │ ├── alert-type.png │ ├── configure-chronograf.png │ ├── configure-influxdb.png │ ├── configure-kapacitor.png │ ├── connect-to-flux.png │ ├── connect-to-influxdb.png │ ├── dashboard.png │ ├── flux-editor-explore.png │ ├── flux-editor-script.png │ ├── flux-editor.png │ ├── flux-function-description-covariance.png │ ├── flux-function-explorer.png │ ├── flux-script-wizard.png │ ├── host-list.png │ ├── kapacitor-config.png │ ├── kapacitor-rules.png │ ├── landing-page.png │ ├── logo-black.png │ ├── logo.png │ ├── query-requests.png │ ├── rename-alert.png │ ├── sandbox-dashboard.png │ ├── slack-integration.png │ └── wikipedia.png │ ├── js │ └── highlight.pack.js │ ├── stylesheets │ └── main.css │ ├── templates │ ├── article.html │ ├── footer.html │ ├── header.html │ ├── hero.html │ └── index.html │ └── tutorials │ ├── create-alert.md │ ├── enable-auth.md │ ├── flux-getting-started.md │ ├── index.md │ ├── telegraf-socket-listener.md │ └── understanding-sandbox.md ├── images ├── chronograf │ ├── latest │ │ └── Dockerfile │ ├── nightly │ │ └── Dockerfile │ ├── sandbox-kapa.kap │ └── sandbox.src ├── influxdb │ ├── latest │ │ └── Dockerfile │ └── nightly │ │ └── Dockerfile ├── kapacitor │ ├── latest │ │ └── Dockerfile │ └── nightly │ │ └── Dockerfile └── telegraf │ ├── latest │ └── Dockerfile │ └── nightly │ └── Dockerfile ├── influxdb └── config │ └── influxdb.conf ├── kapacitor └── config │ └── kapacitor.conf ├── sandbox ├── sandbox.bat └── telegraf └── telegraf.conf /.env: -------------------------------------------------------------------------------- 1 | TELEGRAF_TAG=telegraf:latest 2 | INFLUXDB_TAG=influxdb:1.8 3 | CHRONOGRAF_TAG=chronograf:latest 4 | KAPACITOR_TAG=kapacitor:latest 5 | 6 | -------------------------------------------------------------------------------- /.env-latest: -------------------------------------------------------------------------------- 1 | export TELEGRAF_TAG=latest 2 | export INFLUXDB_TAG=1.8 3 | export CHRONOGRAF_TAG=latest 4 | export KAPACITOR_TAG=latest 5 | export TYPE=latest 6 | -------------------------------------------------------------------------------- /.env-nightlies: -------------------------------------------------------------------------------- 1 | export TELEGRAF_TAG=latest 2 | export INFLUXDB_TAG=nightly 3 | export CHRONOGRAF_TAG=nightly 4 | export KAPACITOR_TAG=latest 5 | export TYPE=nightly 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | chronograf/data/ 2 | influxdb/data/ 3 | kapacitor/data/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 InfluxData Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InfluxData 1.x Sandbox 2 | 3 | ***[InfluxDB 2.x is now available](https://portal.influxdata.com/downloads/) and available via [Docker Hub](https://hub.docker.com/_/influxdb). 4 | InfluxDB 2.x includes a native user interface, batch-style task processing and more. [Get Started Here!](https://docs.influxdata.com/influxdb/v2.0/get-started/)*** 5 | 6 | This repo is a quick way to get the entire 1.x TICK Stack spun up and working together. It uses [Docker](https://www.docker.com/) to spin up the full TICK stack in a connected 7 | fashion. This is heavily tested on MacOS and should mostly work on Linux and Windows. 8 | 9 | To get started you need a running docker installation. If you don't have one, you can download Docker for [Mac](https://www.docker.com/docker-mac) or [Windows](https://www.docker.com/docker-windows), or follow the installation instructions for Docker CE for your [Linux distribution](https://docs.docker.com/engine/installation/#server). 10 | 11 | ### Running 12 | 13 | To run the `sandbox`, simply use the convenient cli: 14 | 15 | ```bash 16 | $ ./sandbox 17 | sandbox commands: 18 | up -> spin up the sandbox environment (add -nightly to grab the latest nightly builds of InfluxDB and Chronograf) 19 | down -> tear down the sandbox environment 20 | restart -> restart the sandbox 21 | influxdb -> attach to the influx cli 22 | flux -> attach to the flux REPL 23 | 24 | enter (influxdb||kapacitor||chronograf||telegraf) -> enter the specified container 25 | logs (influxdb||kapacitor||chronograf||telegraf) -> stream logs for the specified container 26 | 27 | delete-data -> delete all data created by the TICK Stack 28 | docker-clean -> stop and remove all running docker containers 29 | rebuild-docs -> rebuild the documentation container to see updates 30 | ``` 31 | 32 | To get started just run `./sandbox up`. You browser will open two tabs: 33 | 34 | - `localhost:8888` - Chronograf's address. You will use this as a management UI for the full stack 35 | - `localhost:3010` - Documentation server. This contains a simple markdown server for tutorials and documentation. 36 | 37 | > NOTE: Make sure to stop any existing installations of `influxdb`, `kapacitor` or `chronograf`. If you have them running the Sandbox will run into port conflicts and fail to properly start. In this case stop the existing processes and run `./sandbox restart`. Also make sure you are **not** using _Docker Toolbox_. 38 | 39 | Once the Sandbox launches, you should see your dashboard appear in your browser: 40 | 41 | ![Dashboard](./documentation/static/images/landing-page.png) 42 | 43 | You are ready to get started with the TICK Stack! 44 | 45 | Click the Host icon in the left navigation bar to see your host (named `telegraf-getting-started`) and its overall status. 46 | ![Host List](./documentation/static/images/host-list.png) 47 | 48 | You can click on `system` hyperlink to see a pre-built dashboard visualizing the basic system stats for your 49 | host, then check out the tutorials at `http://localhost:3010/tutorials`. 50 | 51 | If you are using the nightly builds and want to get started with Flux, make sure you check out the [Getting Started with Flux](./documentation/static/tutorials/flux-getting-started.md) tutorial. 52 | 53 | > Note: see [influx-stress](https://github.com/influxdata/influx-stress) to create data for your Sandbox. 54 | 55 | ![Dashboard](./documentation/static/images/sandbox-dashboard.png) 56 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | influxdb: 4 | # Full tag list: https://hub.docker.com/r/library/influxdb/tags/ 5 | build: 6 | context: ./images/influxdb/ 7 | dockerfile: ./${TYPE}/Dockerfile 8 | args: 9 | INFLUXDB_TAG: ${INFLUXDB_TAG} 10 | image: "influxdb" 11 | volumes: 12 | # Mount for influxdb data directory 13 | - ./influxdb/data:/var/lib/influxdb 14 | # Mount for influxdb configuration 15 | - ./influxdb/config/:/etc/influxdb/ 16 | ports: 17 | # The API for InfluxDB is served on port 8086 18 | - "8086:8086" 19 | - "8082:8082" 20 | # UDP Port 21 | - "8089:8089/udp" 22 | 23 | telegraf: 24 | # Full tag list: https://hub.docker.com/r/library/telegraf/tags/ 25 | build: 26 | context: ./images/telegraf/ 27 | dockerfile: ./${TYPE}/Dockerfile 28 | args: 29 | TELEGRAF_TAG: ${TELEGRAF_TAG} 30 | image: "telegraf" 31 | environment: 32 | HOSTNAME: "telegraf-getting-started" 33 | # Telegraf requires network access to InfluxDB 34 | links: 35 | - influxdb 36 | volumes: 37 | # Mount for telegraf configuration 38 | - ./telegraf/:/etc/telegraf/ 39 | # Mount for Docker API access 40 | - /var/run/docker.sock:/var/run/docker.sock 41 | depends_on: 42 | - influxdb 43 | 44 | kapacitor: 45 | # Full tag list: https://hub.docker.com/r/library/kapacitor/tags/ 46 | build: 47 | context: ./images/kapacitor/ 48 | dockerfile: ./${TYPE}/Dockerfile 49 | args: 50 | KAPACITOR_TAG: ${KAPACITOR_TAG} 51 | image: "kapacitor" 52 | volumes: 53 | # Mount for kapacitor data directory 54 | - ./kapacitor/data/:/var/lib/kapacitor 55 | # Mount for kapacitor configuration 56 | - ./kapacitor/config/:/etc/kapacitor/ 57 | # Kapacitor requires network access to Influxdb 58 | links: 59 | - influxdb 60 | ports: 61 | # The API for Kapacitor is served on port 9092 62 | - "9092:9092" 63 | 64 | chronograf: 65 | # Full tag list: https://hub.docker.com/r/library/chronograf/tags/ 66 | build: 67 | context: ./images/chronograf 68 | dockerfile: ./${TYPE}/Dockerfile 69 | args: 70 | CHRONOGRAF_TAG: ${CHRONOGRAF_TAG} 71 | image: "chrono_config" 72 | environment: 73 | RESOURCES_PATH: "/usr/share/chronograf/resources" 74 | volumes: 75 | # Mount for chronograf database 76 | - ./chronograf/data/:/var/lib/chronograf/ 77 | links: 78 | # Chronograf requires network access to InfluxDB and Kapacitor 79 | - influxdb 80 | - kapacitor 81 | ports: 82 | # The WebUI for Chronograf is served on port 8888 83 | - "8888:8888" 84 | depends_on: 85 | - kapacitor 86 | - influxdb 87 | - telegraf 88 | 89 | documentation: 90 | build: 91 | context: ./documentation 92 | ports: 93 | - "3010:3000" 94 | -------------------------------------------------------------------------------- /documentation/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.12 2 | 3 | EXPOSE 3010:3000 4 | 5 | RUN mkdir -p /documentation 6 | 7 | COPY builds/documentation /documentation/ 8 | COPY static/ /documentation/static 9 | 10 | CMD ["/documentation/documentation", "-filePath", "/documentation/"] 11 | -------------------------------------------------------------------------------- /documentation/builds/documentation: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/builds/documentation -------------------------------------------------------------------------------- /documentation/chart/jackserver/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /documentation/chart/jackserver/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart for Kubernetes 3 | name: jackserver 4 | version: 0.1.0 5 | -------------------------------------------------------------------------------- /documentation/chart/jackserver/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 24 -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 24 -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /documentation/chart/jackserver/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "fullname" . }} 5 | labels: 6 | app: {{ template "fullname" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | spec: 11 | replicas: {{ .Values.replicas.initial }} 12 | template: 13 | metadata: 14 | labels: 15 | app: {{ template "fullname" . }} 16 | spec: 17 | containers: 18 | - name: {{ .Chart.Name }} 19 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 20 | imagePullPolicy: {{ .Values.image.pullPolicy }} 21 | ports: 22 | - containerPort: {{ .Values.service.containerPort }} 23 | livenessProbe: 24 | httpGet: 25 | path: / 26 | port: {{ .Values.service.containerPort }} 27 | readinessProbe: 28 | httpGet: 29 | path: / 30 | port: {{ .Values.service.containerPort }} 31 | resources: 32 | {{ toYaml .Values.resources | indent 10 }} 33 | -------------------------------------------------------------------------------- /documentation/chart/jackserver/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: {{ template "fullname" . }} 5 | labels: 6 | app: {{ template "fullname" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | spec: 11 | scaleTargetRef: 12 | apiVersion: extensions/v1beta1 13 | kind: Deployment 14 | name: {{ template "fullname" . }} 15 | minReplicas: {{ .Values.replicas.initial }} 16 | maxReplicas: {{ .Values.replicas.max }} 17 | targetCPUUtilizationPercentage: {{ .Values.replicas.cpuUtilization }} -------------------------------------------------------------------------------- /documentation/chart/jackserver/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled }} 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: {{ template "fullname" . }} 6 | labels: 7 | app: {{ template "fullname" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | annotations: 12 | {{ toYaml .Values.ingress.annotations | indent 4 }} 13 | spec: 14 | tls: 15 | - hosts: 16 | {{- range $domain := .Values.ingress.domains }} 17 | - {{ $domain }} 18 | {{- end }} 19 | secretName: {{ .Values.ingress.secretName }} 20 | rules: 21 | {{- $scope := . }} 22 | {{- range $domain := .Values.ingress.domains }} 23 | - host: {{ $domain }} 24 | http: 25 | paths: 26 | - path: / 27 | backend: 28 | serviceName: {{ template "fullname" $scope }} 29 | servicePort: {{ $scope.Values.service.servicePort }} 30 | {{- end }} 31 | {{- end }} -------------------------------------------------------------------------------- /documentation/chart/jackserver/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "fullname" . }} 5 | labels: 6 | app: {{ template "fullname" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - port: {{ .Values.service.servicePort }} 14 | targetPort: {{ .Values.service.containerPort }} 15 | selector: 16 | app: {{ template "fullname" . }} -------------------------------------------------------------------------------- /documentation/chart/jackserver/values.yaml: -------------------------------------------------------------------------------- 1 | ## replicas controls how many pods will be run and at what CPU Utilization the deployment will be scaled 2 | ## 3 | replicas: 4 | initial: 2 5 | max: 20 6 | cpuUtilization: 70 7 | 8 | ## image controls which image is used for the pods 9 | ## 10 | image: 11 | repository: gcr.io/jackzampolin-web/jackserver 12 | tag: 0.0.8 13 | pullPolicy: Always 14 | 15 | ## Resources controls max resources allocated to the individual pods 16 | ## 17 | resources: 18 | limits: 19 | cpu: 1 20 | memory: 2Gi 21 | requests: 22 | cpu: 100m 23 | memory: 128Mi 24 | 25 | ## service controls the application ports 26 | ## 27 | service: 28 | containerPort: 3010 29 | servicePort: 80 30 | 31 | ## ingress controls the ingress resource settings 32 | ## 33 | ingress: 34 | enabled: true 35 | secretName: "jackserver-tls" 36 | domains: 37 | - "jackzampolin.com" 38 | - "www.jackzampolin.com" 39 | annotations: 40 | kubernetes.io/tls-acme: "true" 41 | kubernetes.io/ingress.class: "nginx" 42 | -------------------------------------------------------------------------------- /documentation/cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "html/template" 7 | "io/ioutil" 8 | "log" 9 | "net/http" 10 | "os" 11 | 12 | "github.com/gorilla/mux" 13 | "github.com/shurcooL/github_flavored_markdown" 14 | ) 15 | 16 | var ( 17 | port *string 18 | filePath *string 19 | ) 20 | 21 | func init() { 22 | gopath := os.Getenv("GOPATH") 23 | files := fmt.Sprintf("%v/src/github.com/jackzampolin/sandbox/documentation/", gopath) 24 | port = flag.String("port", ":3010", "specify port to run server") 25 | filePath = flag.String("filePath", files, "path where assets live") 26 | flag.Parse() 27 | } 28 | 29 | func mp(add string) string { 30 | return fmt.Sprintf("%v%v", *filePath, add) 31 | } 32 | 33 | func main() { 34 | log.Printf("Files in path %v\n", *filePath) 35 | log.Printf("Server started. Listening on port %v\n", *port) 36 | RunRouter() 37 | } 38 | 39 | // RunRouter is the router 40 | func RunRouter() { 41 | mux := mux.NewRouter() 42 | mux.StrictSlash(true) 43 | mux.HandleFunc("/", HomeHandler) 44 | mux.HandleFunc("/tutorials", TutorialIndexHandler) 45 | mux.HandleFunc("/tutorials/{article:[-a-zA-Z]+}", TemplateHandler).Methods("GET") 46 | mux.HandleFunc("/healthz", HealthzHandler).Methods("GET") 47 | 48 | // Static files 49 | mux.PathPrefix("/").Handler(http.FileServer(http.Dir(mp("static/")))) 50 | 51 | http.Handle("/", mux) 52 | http.ListenAndServe(*port, nil) 53 | } 54 | 55 | // ################## 56 | // # HANDLERS # 57 | // ################## 58 | 59 | // RenderArticle is a helper that renders an article from templates 60 | func RenderArticle(w http.ResponseWriter, r *http.Request, body []byte) { 61 | fp := mp("static/templates/article.html") 62 | articleTemplate, err := ioutil.ReadFile(fp) 63 | if err != nil { 64 | log.Fatalf("failed to open %v", fp) 65 | } 66 | vars := map[string]interface{}{ 67 | "Header": template.HTML(loadTemplatePart("header")), 68 | "Hero": template.HTML(loadTemplatePart("hero")), 69 | "Body": template.HTML(body), 70 | "Footer": template.HTML(loadTemplatePart("footer")), 71 | } 72 | t := template.New("article template") 73 | t, _ = t.Parse(string(articleTemplate)) 74 | t.Execute(w, vars) 75 | } 76 | 77 | // TemplateHandler handles the /templates/:id route 78 | func TemplateHandler(w http.ResponseWriter, r *http.Request) { 79 | requestVars := mux.Vars(r) 80 | articleName := string(requestVars["article"]) 81 | article := loadArticle(articleName) 82 | if article == nil { 83 | errorHandler(w, r, http.StatusNotFound) 84 | return 85 | } 86 | RenderArticle(w, r, article.Body) 87 | } 88 | 89 | // HealthzHandler handles the /healthz 90 | func HealthzHandler(w http.ResponseWriter, r *http.Request) { 91 | w.WriteHeader(200) 92 | } 93 | 94 | // HomeHandler handles the / route 95 | func HomeHandler(w http.ResponseWriter, r *http.Request) { 96 | fp := mp("static/templates/index.html") 97 | indexTemplate, err := ioutil.ReadFile(fp) 98 | if err != nil { 99 | log.Fatalf("failed to open %v", fp) 100 | } 101 | t := template.New("index-template") 102 | t, _ = t.Parse(string(indexTemplate)) 103 | t.Execute(w, indexTemplate) 104 | } 105 | 106 | // TutorialIndexHandler handles the /templates route 107 | func TutorialIndexHandler(w http.ResponseWriter, r *http.Request) { 108 | article := loadArticle("index") 109 | RenderArticle(w, r, article.Body) 110 | } 111 | 112 | // TODO: write 404 page. 113 | func errorHandler(w http.ResponseWriter, r *http.Request, status int) { 114 | w.WriteHeader(status) 115 | if status == http.StatusNotFound { 116 | 117 | } 118 | } 119 | 120 | // ################## 121 | // # HELPERS # 122 | // ################## 123 | 124 | // Article is used to build the github_flavored_markdown for pages 125 | type Article struct { 126 | Body []byte 127 | } 128 | 129 | // buildArticlePath is a path helper for articles 130 | func buildArticlePath(fileName string) string { 131 | return mp(fmt.Sprintf("static/tutorials/%v.md", fileName)) 132 | } 133 | 134 | // loadArticle pulls the article file from disk and renders the markdown 135 | func loadArticle(fileName string) *Article { 136 | fp := buildArticlePath(fileName) 137 | body, err := ioutil.ReadFile(fp) 138 | if err != nil { 139 | log.Fatalf("failed to open %v", fp) 140 | } 141 | markdown := github_flavored_markdown.Markdown(body) 142 | return &Article{Body: markdown} 143 | } 144 | 145 | // loadTemplatePart loads the parts of the template into memory 146 | func loadTemplatePart(part string) string { 147 | fp := mp(fmt.Sprintf("static/templates/%v.html", part)) 148 | content, err := ioutil.ReadFile(fp) 149 | if err != nil { 150 | log.Fatalf("failed to open %v", fp) 151 | } 152 | return string(content) 153 | } 154 | -------------------------------------------------------------------------------- /documentation/static/images/add-password-chronograf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/add-password-chronograf.png -------------------------------------------------------------------------------- /documentation/static/images/alert-chronograf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/alert-chronograf.png -------------------------------------------------------------------------------- /documentation/static/images/alert-slack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/alert-slack.png -------------------------------------------------------------------------------- /documentation/static/images/alert-threshold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/alert-threshold.png -------------------------------------------------------------------------------- /documentation/static/images/alert-time-series.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/alert-time-series.png -------------------------------------------------------------------------------- /documentation/static/images/alert-type.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/alert-type.png -------------------------------------------------------------------------------- /documentation/static/images/configure-chronograf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/configure-chronograf.png -------------------------------------------------------------------------------- /documentation/static/images/configure-influxdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/configure-influxdb.png -------------------------------------------------------------------------------- /documentation/static/images/configure-kapacitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/configure-kapacitor.png -------------------------------------------------------------------------------- /documentation/static/images/connect-to-flux.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/connect-to-flux.png -------------------------------------------------------------------------------- /documentation/static/images/connect-to-influxdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/connect-to-influxdb.png -------------------------------------------------------------------------------- /documentation/static/images/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/dashboard.png -------------------------------------------------------------------------------- /documentation/static/images/flux-editor-explore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/flux-editor-explore.png -------------------------------------------------------------------------------- /documentation/static/images/flux-editor-script.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/flux-editor-script.png -------------------------------------------------------------------------------- /documentation/static/images/flux-editor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/flux-editor.png -------------------------------------------------------------------------------- /documentation/static/images/flux-function-description-covariance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/flux-function-description-covariance.png -------------------------------------------------------------------------------- /documentation/static/images/flux-function-explorer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/flux-function-explorer.png -------------------------------------------------------------------------------- /documentation/static/images/flux-script-wizard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/flux-script-wizard.png -------------------------------------------------------------------------------- /documentation/static/images/host-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/host-list.png -------------------------------------------------------------------------------- /documentation/static/images/kapacitor-config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/kapacitor-config.png -------------------------------------------------------------------------------- /documentation/static/images/kapacitor-rules.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/kapacitor-rules.png -------------------------------------------------------------------------------- /documentation/static/images/landing-page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/landing-page.png -------------------------------------------------------------------------------- /documentation/static/images/logo-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/logo-black.png -------------------------------------------------------------------------------- /documentation/static/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/logo.png -------------------------------------------------------------------------------- /documentation/static/images/query-requests.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/query-requests.png -------------------------------------------------------------------------------- /documentation/static/images/rename-alert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/rename-alert.png -------------------------------------------------------------------------------- /documentation/static/images/sandbox-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/sandbox-dashboard.png -------------------------------------------------------------------------------- /documentation/static/images/slack-integration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/slack-integration.png -------------------------------------------------------------------------------- /documentation/static/images/wikipedia.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/influxdata/sandbox/ce55e5847251a37b6befec4f1edaa0914433f761/documentation/static/images/wikipedia.png -------------------------------------------------------------------------------- /documentation/static/js/highlight.pack.js: -------------------------------------------------------------------------------- 1 | /*! highlight.js v9.1.0 | BSD3 License | git.io/hljslicense */ 2 | !function(e){"undefined"!=typeof exports?e(exports):(self.hljs=e({}),"function"==typeof define&&define.amd&&define("hljs",[],function(){return self.hljs}))}(function(e){function n(e){return e.replace(/&/gm,"&").replace(//gm,">")}function t(e){return e.nodeName.toLowerCase()}function r(e,n){var t=e&&e.exec(n);return t&&0==t.index}function a(e){return/^(no-?highlight|plain|text)$/i.test(e)}function i(e){var n,t,r,i=e.className+" ";if(i+=e.parentNode?e.parentNode.className:"",t=/\blang(?:uage)?-([\w-]+)\b/i.exec(i))return E(t[1])?t[1]:"no-highlight";for(i=i.split(/\s+/),n=0,r=i.length;r>n;n++)if(E(i[n])||a(i[n]))return i[n]}function o(e,n){var t,r={};for(t in e)r[t]=e[t];if(n)for(t in n)r[t]=n[t];return r}function u(e){var n=[];return function r(e,a){for(var i=e.firstChild;i;i=i.nextSibling)3==i.nodeType?a+=i.nodeValue.length:1==i.nodeType&&(n.push({event:"start",offset:a,node:i}),a=r(i,a),t(i).match(/br|hr|img|input/)||n.push({event:"stop",offset:a,node:i}));return a}(e,0),n}function c(e,r,a){function i(){return e.length&&r.length?e[0].offset!=r[0].offset?e[0].offset"}function u(e){l+=""}function c(e){("start"==e.event?o:u)(e.node)}for(var s=0,l="",f=[];e.length||r.length;){var g=i();if(l+=n(a.substr(s,g[0].offset-s)),s=g[0].offset,g==e){f.reverse().forEach(u);do c(g.splice(0,1)[0]),g=i();while(g==e&&g.length&&g[0].offset==s);f.reverse().forEach(o)}else"start"==g[0].event?f.push(g[0].node):f.pop(),c(g.splice(0,1)[0])}return l+n(a.substr(s))}function s(e){function n(e){return e&&e.source||e}function t(t,r){return new RegExp(n(t),"m"+(e.cI?"i":"")+(r?"g":""))}function r(a,i){if(!a.compiled){if(a.compiled=!0,a.k=a.k||a.bK,a.k){var u={},c=function(n,t){e.cI&&(t=t.toLowerCase()),t.split(" ").forEach(function(e){var t=e.split("|");u[t[0]]=[n,t[1]?Number(t[1]):1]})};"string"==typeof a.k?c("keyword",a.k):Object.keys(a.k).forEach(function(e){c(e,a.k[e])}),a.k=u}a.lR=t(a.l||/\b\w+\b/,!0),i&&(a.bK&&(a.b="\\b("+a.bK.split(" ").join("|")+")\\b"),a.b||(a.b=/\B|\b/),a.bR=t(a.b),a.e||a.eW||(a.e=/\B|\b/),a.e&&(a.eR=t(a.e)),a.tE=n(a.e)||"",a.eW&&i.tE&&(a.tE+=(a.e?"|":"")+i.tE)),a.i&&(a.iR=t(a.i)),void 0===a.r&&(a.r=1),a.c||(a.c=[]);var s=[];a.c.forEach(function(e){e.v?e.v.forEach(function(n){s.push(o(e,n))}):s.push("self"==e?a:e)}),a.c=s,a.c.forEach(function(e){r(e,a)}),a.starts&&r(a.starts,i);var l=a.c.map(function(e){return e.bK?"\\.?("+e.b+")\\.?":e.b}).concat([a.tE,a.i]).map(n).filter(Boolean);a.t=l.length?t(l.join("|"),!0):{exec:function(){return null}}}}r(e)}function l(e,t,a,i){function o(e,n){for(var t=0;t";return i+=e+'">',i+n+o}function p(){if(!L.k)return n(M);var e="",t=0;L.lR.lastIndex=0;for(var r=L.lR.exec(M);r;){e+=n(M.substr(t,r.index-t));var a=g(L,r);a?(B+=a[1],e+=h(a[0],n(r[0]))):e+=n(r[0]),t=L.lR.lastIndex,r=L.lR.exec(M)}return e+n(M.substr(t))}function d(){var e="string"==typeof L.sL;if(e&&!R[L.sL])return n(M);var t=e?l(L.sL,M,!0,y[L.sL]):f(M,L.sL.length?L.sL:void 0);return L.r>0&&(B+=t.r),e&&(y[L.sL]=t.top),h(t.language,t.value,!1,!0)}function b(){return void 0!==L.sL?d():p()}function v(e,t){var r=e.cN?h(e.cN,"",!0):"";e.rB?(k+=r,M=""):e.eB?(k+=n(t)+r,M=""):(k+=r,M=t),L=Object.create(e,{parent:{value:L}})}function m(e,t){if(M+=e,void 0===t)return k+=b(),0;var r=o(t,L);if(r)return k+=b(),v(r,t),r.rB?0:t.length;var a=u(L,t);if(a){var i=L;i.rE||i.eE||(M+=t),k+=b();do L.cN&&(k+=""),B+=L.r,L=L.parent;while(L!=a.parent);return i.eE&&(k+=n(t)),M="",a.starts&&v(a.starts,""),i.rE?0:t.length}if(c(t,L))throw new Error('Illegal lexeme "'+t+'" for mode "'+(L.cN||"")+'"');return M+=t,t.length||1}var N=E(e);if(!N)throw new Error('Unknown language: "'+e+'"');s(N);var w,L=i||N,y={},k="";for(w=L;w!=N;w=w.parent)w.cN&&(k=h(w.cN,"",!0)+k);var M="",B=0;try{for(var C,j,I=0;;){if(L.t.lastIndex=I,C=L.t.exec(t),!C)break;j=m(t.substr(I,C.index-I),C[0]),I=C.index+j}for(m(t.substr(I)),w=L;w.parent;w=w.parent)w.cN&&(k+="");return{r:B,value:k,language:e,top:L}}catch(O){if(-1!=O.message.indexOf("Illegal"))return{r:0,value:n(t)};throw O}}function f(e,t){t=t||x.languages||Object.keys(R);var r={r:0,value:n(e)},a=r;return t.forEach(function(n){if(E(n)){var t=l(n,e,!1);t.language=n,t.r>a.r&&(a=t),t.r>r.r&&(a=r,r=t)}}),a.language&&(r.second_best=a),r}function g(e){return x.tabReplace&&(e=e.replace(/^((<[^>]+>|\t)+)/gm,function(e,n){return n.replace(/\t/g,x.tabReplace)})),x.useBR&&(e=e.replace(/\n/g,"
")),e}function h(e,n,t){var r=n?w[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),-1===e.indexOf(r)&&a.push(r),a.join(" ").trim()}function p(e){var n=i(e);if(!a(n)){var t;x.useBR?(t=document.createElementNS("http://www.w3.org/1999/xhtml","div"),t.innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n")):t=e;var r=t.textContent,o=n?l(n,r,!0):f(r),s=u(t);if(s.length){var p=document.createElementNS("http://www.w3.org/1999/xhtml","div");p.innerHTML=o.value,o.value=c(s,u(p),r)}o.value=g(o.value),e.innerHTML=o.value,e.className=h(e.className,n,o.language),e.result={language:o.language,re:o.r},o.second_best&&(e.second_best={language:o.second_best.language,re:o.second_best.r})}}function d(e){x=o(x,e)}function b(){if(!b.called){b.called=!0;var e=document.querySelectorAll("pre code");Array.prototype.forEach.call(e,p)}}function v(){addEventListener("DOMContentLoaded",b,!1),addEventListener("load",b,!1)}function m(n,t){var r=R[n]=t(e);r.aliases&&r.aliases.forEach(function(e){w[e]=n})}function N(){return Object.keys(R)}function E(e){return e=(e||"").toLowerCase(),R[e]||R[w[e]]}var x={classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:void 0},R={},w={};return e.highlight=l,e.highlightAuto=f,e.fixMarkup=g,e.highlightBlock=p,e.configure=d,e.initHighlighting=b,e.initHighlightingOnLoad=v,e.registerLanguage=m,e.listLanguages=N,e.getLanguage=E,e.inherit=o,e.IR="[a-zA-Z]\\w*",e.UIR="[a-zA-Z_]\\w*",e.NR="\\b\\d+(\\.\\d+)?",e.CNR="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",e.BNR="\\b(0b[01]+)",e.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",e.BE={b:"\\\\[\\s\\S]",r:0},e.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[e.BE]},e.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[e.BE]},e.PWM={b:/\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|like)\b/},e.C=function(n,t,r){var a=e.inherit({cN:"comment",b:n,e:t,c:[]},r||{});return a.c.push(e.PWM),a.c.push({cN:"doctag",b:"(?:TODO|FIXME|NOTE|BUG|XXX):",r:0}),a},e.CLCM=e.C("//","$"),e.CBCM=e.C("/\\*","\\*/"),e.HCM=e.C("#","$"),e.NM={cN:"number",b:e.NR,r:0},e.CNM={cN:"number",b:e.CNR,r:0},e.BNM={cN:"number",b:e.BNR,r:0},e.CSSNM={cN:"number",b:e.NR+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",r:0},e.RM={cN:"regexp",b:/\//,e:/\/[gimuy]*/,i:/\n/,c:[e.BE,{b:/\[/,e:/\]/,r:0,c:[e.BE]}]},e.TM={cN:"title",b:e.IR,r:0},e.UTM={cN:"title",b:e.UIR,r:0},e});hljs.registerLanguage("css",function(e){var c="[a-zA-Z-][a-zA-Z0-9_-]*",t={b:/[A-Z\_\.\-]+\s*:/,rB:!0,e:";",eW:!0,c:[{cN:"attribute",b:/\S/,e:":",eE:!0,starts:{eW:!0,eE:!0,c:[{b:/[\w-]+\s*\(/,rB:!0,c:[{cN:"built_in",b:/[\w-]+/}]},e.CSSNM,e.QSM,e.ASM,e.CBCM,{cN:"number",b:"#[0-9A-Fa-f]+"},{cN:"meta",b:"!important"}]}}]};return{cI:!0,i:/[=\/|'\$]/,c:[e.CBCM,{cN:"selector-id",b:/#[A-Za-z0-9_-]+/},{cN:"selector-class",b:/\.[A-Za-z0-9_-]+/},{cN:"selector-attr",b:/\[/,e:/\]/,i:"$"},{cN:"selector-pseudo",b:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{b:"@(font-face|page)",l:"[a-z-]+",k:"font-face page"},{b:"@",e:"[{;]",c:[{cN:"keyword",b:/\S+/},{b:/\s/,eW:!0,eE:!0,r:0,c:[e.ASM,e.QSM,e.CSSNM]}]},{cN:"selector-tag",b:c,r:0},{b:"{",e:"}",i:/\S/,c:[e.CBCM,t]}]}});hljs.registerLanguage("json",function(e){var t={literal:"true false null"},i=[e.QSM,e.CNM],r={e:",",eW:!0,eE:!0,c:i,k:t},s={b:"{",e:"}",c:[{cN:"attr",b:'\\s*"',e:'"\\s*:\\s*',eB:!0,eE:!0,c:[e.BE],i:"\\n",starts:r}],i:"\\S"},n={b:"\\[",e:"\\]",c:[e.inherit(r)],i:"\\S"};return i.splice(i.length,0,s,n),{c:i,k:t,i:"\\S"}});hljs.registerLanguage("python",function(e){var r={cN:"meta",b:/^(>>>|\.\.\.) /},b={cN:"string",c:[e.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[r],r:10},{b:/(u|b)?r?"""/,e:/"""/,c:[r],r:10},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)"/,e:/"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)"/,e:/"/},e.ASM,e.QSM]},a={cN:"number",r:0,v:[{b:e.BNR+"[lLjJ]?"},{b:"\\b(0o[0-7]+)[lLjJ]?"},{b:e.CNR+"[lLjJ]?"}]},l={cN:"params",b:/\(/,e:/\)/,c:["self",r,a,b]};return{aliases:["py","gyp"],k:{keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10 None True False",built_in:"Ellipsis NotImplemented"},i:/(<\/|->|\?)/,c:[r,a,b,e.HCM,{v:[{cN:"function",bK:"def",r:10},{cN:"class",bK:"class"}],e:/:/,i:/[${=;\n,]/,c:[e.UTM,l,{b:/->/,eW:!0,k:"None"}]},{cN:"meta",b:/^[\t ]*@/,e:/$/},{b:/\b(print|exec)\(/}]}});hljs.registerLanguage("xml",function(s){var t="[A-Za-z0-9\\._:-]+",e={b:/<\?(php)?(?!\w)/,e:/\?>/,sL:"php"},r={eW:!0,i:/]+/}]}]}]};return{aliases:["html","xhtml","rss","atom","xsl","plist"],cI:!0,c:[{cN:"meta",b:"",r:10,c:[{b:"\\[",e:"\\]"}]},s.C("",{r:10}),{b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{cN:"tag",b:"|$)",e:">",k:{name:"style"},c:[r],starts:{e:"",rE:!0,sL:["css","xml"]}},{cN:"tag",b:"|$)",e:">",k:{name:"script"},c:[r],starts:{e:"",rE:!0,sL:["actionscript","javascript","handlebars","xml"]}},e,{cN:"meta",b:/<\?\w+/,e:/\?>/,r:10},{cN:"tag",b:"",c:[{cN:"name",b:/[^\/><\s]+/,r:0},r]}]}});hljs.registerLanguage("http",function(e){var t="HTTP/[0-9\\.]+";return{aliases:["https"],i:"\\S",c:[{b:"^"+t,e:"$",c:[{cN:"number",b:"\\b\\d{3}\\b"}]},{b:"^[A-Z]+ (.*?) "+t+"$",rB:!0,e:"$",c:[{cN:"string",b:" ",e:" ",eB:!0,eE:!0},{b:t},{cN:"keyword",b:"[A-Z]+"}]},{cN:"attribute",b:"^\\w",e:": ",eE:!0,i:"\\n|\\s|=",starts:{e:"$",r:0}},{b:"\\n\\n",starts:{sL:[],eW:!0}}]}});hljs.registerLanguage("clojure",function(e){var t={"builtin-name":"def defonce cond apply if-not if-let if not not= = < > <= >= == + / * - rem quot neg? pos? delay? symbol? keyword? true? false? integer? empty? coll? list? set? ifn? fn? associative? sequential? sorted? counted? reversible? number? decimal? class? distinct? isa? float? rational? reduced? ratio? odd? even? char? seq? vector? string? map? nil? contains? zero? instance? not-every? not-any? libspec? -> ->> .. . inc compare do dotimes mapcat take remove take-while drop letfn drop-last take-last drop-while while intern condp case reduced cycle split-at split-with repeat replicate iterate range merge zipmap declare line-seq sort comparator sort-by dorun doall nthnext nthrest partition eval doseq await await-for let agent atom send send-off release-pending-sends add-watch mapv filterv remove-watch agent-error restart-agent set-error-handler error-handler set-error-mode! error-mode shutdown-agents quote var fn loop recur throw try monitor-enter monitor-exit defmacro defn defn- macroexpand macroexpand-1 for dosync and or when when-not when-let comp juxt partial sequence memoize constantly complement identity assert peek pop doto proxy defstruct first rest cons defprotocol cast coll deftype defrecord last butlast sigs reify second ffirst fnext nfirst nnext defmulti defmethod meta with-meta ns in-ns create-ns import refer keys select-keys vals key val rseq name namespace promise into transient persistent! conj! assoc! dissoc! pop! disj! use class type num float double short byte boolean bigint biginteger bigdec print-method print-dup throw-if printf format load compile get-in update-in pr pr-on newline flush read slurp read-line subvec with-open memfn time re-find re-groups rand-int rand mod locking assert-valid-fdecl alias resolve ref deref refset swap! reset! set-validator! compare-and-set! alter-meta! reset-meta! commute get-validator alter ref-set ref-history-count ref-min-history ref-max-history ensure sync io! new next conj set! to-array future future-call into-array aset gen-class reduce map filter find empty hash-map hash-set sorted-map sorted-map-by sorted-set sorted-set-by vec vector seq flatten reverse assoc dissoc list disj get union difference intersection extend extend-type extend-protocol int nth delay count concat chunk chunk-buffer chunk-append chunk-first chunk-rest max min dec unchecked-inc-int unchecked-inc unchecked-dec-inc unchecked-dec unchecked-negate unchecked-add-int unchecked-add unchecked-subtract-int unchecked-subtract chunk-next chunk-cons chunked-seq? prn vary-meta lazy-seq spread list* str find-keyword keyword symbol gensym force rationalize"},r="a-zA-Z_\\-!.?+*=<>&#'",n="["+r+"]["+r+"0-9/;:]*",a="[-+]?\\d+(\\.\\d+)?",o={b:n,r:0},s={cN:"number",b:a,r:0},i=e.inherit(e.QSM,{i:null}),c=e.C(";","$",{r:0}),d={cN:"literal",b:/\b(true|false|nil)\b/},l={b:"[\\[\\{]",e:"[\\]\\}]"},m={cN:"comment",b:"\\^"+n},p=e.C("\\^\\{","\\}"),u={cN:"symbol",b:"[:]"+n},f={b:"\\(",e:"\\)"},h={eW:!0,r:0},y={k:t,l:n,cN:"name",b:n,starts:h},b=[f,i,m,p,c,u,l,s,d,o];return f.c=[e.C("comment",""),y,h],h.c=b,l.c=b,{aliases:["clj"],i:/\S/,c:[f,i,m,p,c,u,l,s,d]}});hljs.registerLanguage("scala",function(e){var t={cN:"meta",b:"@[A-Za-z]+"},a={cN:"subst",v:[{b:"\\$[A-Za-z0-9_]+"},{b:"\\${",e:"}"}]},r={cN:"string",v:[{b:'"',e:'"',i:"\\n",c:[e.BE]},{b:'"""',e:'"""',r:10},{b:'[a-z]+"',e:'"',i:"\\n",c:[e.BE,a]},{cN:"string",b:'[a-z]+"""',e:'"""',c:[a],r:10}]},c={cN:"symbol",b:"'\\w[\\w\\d_]*(?!')"},i={cN:"type",b:"\\b[A-Z][A-Za-z0-9_]*",r:0},s={cN:"title",b:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,r:0},n={cN:"class",bK:"class object trait type",e:/[:={\[\n;]/,eE:!0,c:[{bK:"extends with",r:10},{b:/\[/,e:/\]/,eB:!0,eE:!0,r:0,c:[i]},{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,r:0,c:[i]},s]},l={cN:"function",bK:"def",e:/[:={\[(\n;]/,eE:!0,c:[s]};return{k:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},c:[e.CLCM,e.CBCM,r,c,i,l,n,e.CNM,t]}});hljs.registerLanguage("haskell",function(e){var i={v:[e.C("--","$"),e.C("{-","-}",{c:["self"]})]},a={cN:"meta",b:"{-#",e:"#-}"},l={cN:"meta",b:"^#",e:"$"},c={cN:"type",b:"\\b[A-Z][\\w']*",r:0},n={b:"\\(",e:"\\)",i:'"',c:[a,l,{cN:"type",b:"\\b[A-Z][\\w]*(\\((\\.\\.|,|\\w+)\\))?"},e.inherit(e.TM,{b:"[_a-z][\\w']*"}),i]},s={b:"{",e:"}",c:n.c};return{aliases:["hs"],k:"let in if then else case of where do module import hiding qualified type data newtype deriving class instance as default infix infixl infixr foreign export ccall stdcall cplusplus jvm dotnet safe unsafe family forall mdo proc rec",c:[{bK:"module",e:"where",k:"module where",c:[n,i],i:"\\W\\.|;"},{b:"\\bimport\\b",e:"$",k:"import qualified as hiding",c:[n,i],i:"\\W\\.|;"},{cN:"class",b:"^(\\s*)?(class|instance)\\b",e:"where",k:"class family instance where",c:[c,n,i]},{cN:"class",b:"\\b(data|(new)?type)\\b",e:"$",k:"data family type newtype deriving",c:[a,c,n,s,i]},{bK:"default",e:"$",c:[c,n,i]},{bK:"infix infixl infixr",e:"$",c:[e.CNM,i]},{b:"\\bforeign\\b",e:"$",k:"foreign import export ccall stdcall cplusplus jvm dotnet safe unsafe",c:[c,e.QSM,i]},{cN:"meta",b:"#!\\/usr\\/bin\\/env runhaskell",e:"$"},a,l,e.QSM,e.CNM,c,e.inherit(e.TM,{b:"^[_a-z][\\w']*"}),i,{b:"->|<-"}]}});hljs.registerLanguage("bash",function(e){var t={cN:"variable",v:[{b:/\$[\w\d#@][\w\d_]*/},{b:/\$\{(.*?)}/}]},s={cN:"string",b:/"/,e:/"/,c:[e.BE,t,{cN:"variable",b:/\$\(/,e:/\)/,c:[e.BE]}]},a={cN:"string",b:/'/,e:/'/};return{aliases:["sh","zsh"],l:/-?[a-z\.]+/,k:{keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},c:[{cN:"meta",b:/^#![^\n]+sh\s*$/,r:10},{cN:"function",b:/\w[\w\d_]*\s*\(\s*\)\s*\{/,rB:!0,c:[e.inherit(e.TM,{b:/\w[\w\d_]*/})],r:0},e.HCM,s,a,t]}});hljs.registerLanguage("java",function(e){var a=e.UIR+"(<"+e.UIR+"(\\s*,\\s*"+e.UIR+")*>)?",t="false synchronized int abstract float private char boolean static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private",r="\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",c={cN:"number",b:r,r:0};return{aliases:["jsp"],k:t,i:/<\/|#/,c:[e.C("/\\*\\*","\\*/",{r:0,c:[{b:/\w+@/,r:0},{cN:"doctag",b:"@[A-Za-z]+"}]}),e.CLCM,e.CBCM,e.ASM,e.QSM,{cN:"class",bK:"class interface",e:/[{;=]/,eE:!0,k:"class interface",i:/[:"\[\]]/,c:[{bK:"extends implements"},e.UTM]},{bK:"new throw return else",r:0},{cN:"function",b:"("+a+"\\s+)+"+e.UIR+"\\s*\\(",rB:!0,e:/[{;=]/,eE:!0,k:t,c:[{b:e.UIR+"\\s*\\(",rB:!0,r:0,c:[e.UTM]},{cN:"params",b:/\(/,e:/\)/,k:t,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]},c,{cN:"meta",b:"@[A-Za-z]+"}]}}); -------------------------------------------------------------------------------- /documentation/static/stylesheets/main.css: -------------------------------------------------------------------------------- 1 | * { 2 | -webkit-font-smoothing: antialiased; 3 | font-smoothing: always; 4 | } 5 | 6 | /** Medium */ 7 | @font-face { 8 | font-family: "San Francisco"; 9 | font-weight: normal; 10 | src: url("https://applesocial.s3.amazonaws.com/assets/styles/fonts/sanfrancisco/sanfranciscodisplay-medium-webfont.woff2"); 11 | } 12 | 13 | /** Semi Bold */ 14 | @font-face { 15 | font-family: "San Francisco"; 16 | font-weight: 500; 17 | src: url("https://applesocial.s3.amazonaws.com/assets/styles/fonts/sanfrancisco/sanfranciscodisplay-semibold-webfont.woff2"); 18 | } 19 | 20 | /** Bold */ 21 | @font-face { 22 | font-family: "San Francisco"; 23 | font-weight: bold; 24 | src: url("https://applesocial.s3.amazonaws.com/assets/styles/fonts/sanfrancisco/sanfranciscodisplay-bold-webfont.woff2"); 25 | } 26 | 27 | body { 28 | font-family: 'San Francisco', "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "Lucida Grande", sans-serif; 29 | font-size: 14px; 30 | color: #444; 31 | line-height: 1.5; 32 | } 33 | 34 | h1, h2, h3 { color: #222; font-weight: 600; } 35 | 36 | a, a:hover { color: #513CC6; } 37 | 38 | ul { margin: 0; padding: 0; list-style: none; } 39 | 40 | .container { 41 | max-width: 968px; 42 | margin: 0 auto; 43 | font-size: 16px; 44 | } 45 | 46 | article { 47 | clear: both; 48 | padding: 24px 0 48px; 49 | max-width: 1024px; 50 | margin: 0 auto; 51 | } 52 | 53 | article img { 54 | margin: 24px 0; 55 | max-width: 100%; 56 | } 57 | 58 | article ul { list-style: disc; } 59 | article li { list-style: disc; margin-left: 24px; } 60 | 61 | blockquote { 62 | border-left: 10px solid #ccc; 63 | margin: 24px ; 64 | padding: 24px; 65 | quotes: "\201C""\201D""\2018""\2019"; 66 | font-style: italic; 67 | } 68 | 69 | #hero { 70 | width: 100%; 71 | height: 100%; 72 | background: #222 url(https://www.influxdata.com/wp-content/uploads/Background.png) no-repeat center fixed; 73 | background-size: cover; 74 | position: relative; 75 | overflow: hidden; // firefox bug 76 | } 77 | 78 | #hero h1, #intro h2, #hero a { color: #fff; } 79 | 80 | #hero a { text-decoration: none; } 81 | 82 | #hero h2 { color: #ccc; } 83 | 84 | #hero .overlay { 85 | background: rgba(0, 0, 0, 0.4); 86 | width: 100%; 87 | height: 100%; 88 | position: absolute; 89 | top: 0; 90 | left: 0; 91 | } 92 | 93 | #hero .container { z-index: 2; position: relative; height: 100%; } 94 | 95 | #hero .fa { 96 | color: white; 97 | opacity: 0.7; 98 | font-size: 2em; 99 | transition: opacity .25s ease-in-out; 100 | -moz-transition: opacity .25s ease-in-out; 101 | -webkit-transition: opacity .25s ease-in-out; 102 | } 103 | 104 | #hero .fa:hover { 105 | opacity: 1; 106 | } 107 | 108 | #hero .social { 109 | text-align: center; 110 | margin: 48px 0 0 0; 111 | } 112 | 113 | #hero .social li { 114 | display: inline; 115 | margin: 0 12px 0 0; 116 | } 117 | 118 | #hero h1 { 119 | font-size: 2.8em; 120 | } 121 | 122 | #hero nav h1 { 123 | color: white; 124 | margin: 0; 125 | position: relative; 126 | top: 24px; 127 | } 128 | 129 | #hero nav { 130 | height: 96px; 131 | } 132 | 133 | #hero.article-hero { 134 | background: white; 135 | height: 80px; 136 | border-bottom: 2px solid #eee; 137 | } 138 | 139 | #hero.article-hero h1 { 140 | color: #333; 141 | } 142 | 143 | #hero.article-hero a { 144 | color: #046FB3; 145 | } 146 | 147 | #hero .logo { font-size: 24px; } 148 | 149 | .menu h1 { float: left; } 150 | .menu ul { float: right; position: relative; top: 31px; } 151 | .menu ul li { display: inline-block; margin-left: 24px; } 152 | .menu ul li a { text-decoration: none; color: white; color: rgba(255,255,255,0.8); } 153 | 154 | #hero .inner { 155 | margin: 172px 0 296px 0; 156 | text-align: center; 157 | color: white; 158 | } 159 | 160 | #hero h2 { 161 | opacity: 0.7; 162 | font-size: 1.2em; 163 | font-weight: normal; 164 | } 165 | 166 | #logo { 167 | border-radius: 100%; 168 | border: 6px solid white; 169 | padding: 24px; 170 | width: 60px; 171 | height: 60px; 172 | text-align: center; 173 | margin: 0 auto; 174 | } 175 | 176 | #intro { 177 | background: #513CC6; 178 | text-align: center; 179 | padding: 48px 0 96px 0; 180 | color: white; 181 | } 182 | 183 | #intro .fa { 184 | margin-right: 12px; 185 | font-size: 2em; 186 | } 187 | 188 | .social a { color: transparent; } 189 | 190 | #intro .fa-envelope-o { font-size: 1.5em; } 191 | 192 | #intro a { 193 | color: white; 194 | font-weight: bold; 195 | } 196 | 197 | #intro h2 { 198 | font-size: 2.4em; 199 | font-weight: 300; 200 | } 201 | 202 | #intro p { 203 | font-size: 1.2em; 204 | color: rgba(255,255,255,0.9); 205 | } 206 | 207 | #intro .contact { 208 | margin: 48px 0; 209 | } 210 | 211 | #intro .contact li { 212 | margin: 0 12px 0 0; 213 | font-weight: bold; 214 | display: inline;o 215 | } 216 | 217 | #main { 218 | background: white; 219 | min-height: 400px; 220 | padding: 24px 0; 221 | } 222 | 223 | #main li { margin: 0 0 12px 0; } 224 | 225 | #main h2 { 226 | color: #2f2f30; 227 | } 228 | 229 | #footer { 230 | padding: 24px; 231 | background: #333; 232 | color: white; 233 | } 234 | 235 | @media screen and (max-width: 768px) { 236 | 237 | .container { 238 | margin: 0 24px; 239 | } 240 | 241 | #hero .inner { 242 | margin: 72px 0 120px 0; 243 | } 244 | 245 | #hero h1 { font-size: 1.8em; } 246 | 247 | #hero h2, #intro p { font-size: 1em; } 248 | 249 | #intro h2 { 250 | font-size: 1.4em; 251 | } 252 | 253 | #intro .contact li { display: block; margin-bottom: 12px; } 254 | } 255 | 256 | .hljs { 257 | display: block; 258 | background: white; 259 | padding: 0.5em; 260 | color: #333333; 261 | overflow-x: auto; 262 | } 263 | 264 | .hljs-comment, 265 | .hljs-meta { 266 | color: #969896; 267 | } 268 | 269 | .hljs-string, 270 | .hljs-variable, 271 | .hljs-template-variable, 272 | .hljs-strong, 273 | .hljs-emphasis, 274 | .hljs-quote { 275 | color: #df5000; 276 | } 277 | 278 | .hljs-keyword, 279 | .hljs-selector-tag, 280 | .hljs-type { 281 | color: #a71d5d; 282 | } 283 | 284 | .hljs-literal, 285 | .hljs-symbol, 286 | .hljs-bullet, 287 | .hljs-attribute { 288 | color: #0086b3; 289 | } 290 | 291 | .hljs-section, 292 | .hljs-name { 293 | color: #63a35c; 294 | } 295 | 296 | .hljs-tag { 297 | color: #333333; 298 | } 299 | 300 | .hljs-title, 301 | .hljs-attr, 302 | .hljs-selector-id, 303 | .hljs-selector-class, 304 | .hljs-selector-attr, 305 | .hljs-selector-pseudo { 306 | color: #795da3; 307 | } 308 | 309 | .hljs-addition { 310 | color: #55a532; 311 | background-color: #eaffea; 312 | } 313 | 314 | .hljs-deletion { 315 | color: #bd2c00; 316 | background-color: #ffecec; 317 | } 318 | 319 | .hljs-link { 320 | text-decoration: underline; 321 | } 322 | 323 | .logo-icon { 324 | height: 3vw; 325 | width: 3vw; 326 | } -------------------------------------------------------------------------------- /documentation/static/templates/article.html: -------------------------------------------------------------------------------- 1 | {{.Header}} 2 | {{.Hero}} 3 |
4 |
5 | {{.Body}} 6 |
7 |
8 | {{.Footer}} 9 | -------------------------------------------------------------------------------- /documentation/static/templates/footer.html: -------------------------------------------------------------------------------- 1 | 2 | 12 | 13 | -------------------------------------------------------------------------------- /documentation/static/templates/header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | InfluxData Sandbox Tutorials 6 | 8 | 9 | 10 | 12 | 13 | 14 | 15 | 16 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /documentation/static/templates/hero.html: -------------------------------------------------------------------------------- 1 |
2 |
3 | 9 |
10 |
11 | -------------------------------------------------------------------------------- /documentation/static/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | InfluxData Sandbox Tutorials 6 | 8 | 9 | 10 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 |
20 |
21 |
22 | 28 |
29 |

InfluxData Sandbox

30 |

Documentation:

31 |

InfluxDBKapacitorTelegrafChronografFlux

32 |

--

33 |

InfluxDB 2.0 is here now! Check out the latest and greatest version.

34 | 58 |
59 |
60 |
61 |
62 |
63 |

The TICK Stack is The Modern Engine for Metrics

64 |

Want to buy a cluster? Contact Sales!

65 | 71 |
72 |
73 |
74 |
75 | 76 | 86 | 87 | -------------------------------------------------------------------------------- /documentation/static/tutorials/create-alert.md: -------------------------------------------------------------------------------- 1 | # Create (and trigger) your first Kapacitor Alert 2 | 3 | Chronograf makes creating alerts in Kapacitor quick and easy! For our first alert we are going to send a message to slack when there is significant query load on our InfluxDB server. First we are going to need to configure the slack output. You will need to create an [incoming webhook integration](https://api.slack.com/incoming-webhooks) for slack before starting this tutorial. You will need just the `url` which will be in the following format: `https://hooks.slack.com/services/XXXXXXXXX/XXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXX` 4 | 5 | ### Configure Slack integration 6 | 7 | First open the Kapacitor configuration section in Chronograf, by selecting the Configuration option ("gear" icon) within the left-hand navigation panel. 8 | Then, select the drop list on the data source under the Active Kapacitor description. From within the drop list you can 9 | click the "pencil" icon to access the Kapacitor configuration options: 10 | 11 | ![Kapacitor Configuration](../images/kapacitor-config.png) 12 | 13 | Then select the Slack tab and add the webhook URL and Slack channel to post to: 14 | 15 | ![Slack Integration](../images/slack-integration.png) 16 | 17 | ### Use the rule builder 18 | 19 | Navigate to the Rule Builder by selecting `Create` from the Alert option ("warning" icon) within the left-hand navigation panel. Next, click on the `Build Rule` button in the upper right portion of the page: 20 | 21 | ![Kapacitor Alerts](../images/kapacitor-rules.png) 22 | 23 | The first step in building a rule is to give it a name. Let's name this one: `Queries/Second` 24 | 25 | ![Rename alert](../images/rename-alert.png) 26 | 27 | Next, select the alert type. In this example, select the default value `threshold` is what we will use. 28 | 29 | ![Alert type](../images/alert-type.png) 30 | 31 | Make the following choices in the `Time Series` section. We are using the [`spread`](https://docs.influxdata.com/influxdb/v1.4/query_language/functions/#spread) function: 32 | 33 | ``` 34 | database: 'telegraf' 35 | measurement: 'influxdb_httpd' 36 | field: 'queryReq' 37 | function: 'spread' 38 | groupByTime: '10s' 39 | groupByTags: 'host' 40 | ``` 41 | Once completed, the screen shown should look as follows: 42 | 43 | ![Time series](../images/alert-time-series.png) 44 | 45 | The InfluxQL query generated looks like this: 46 | ```sql 47 | SELECT spread("queryReq") AS "spread_queryReq" 48 | FROM "telegraf"."autogen"."influxdb_httpd" 49 | WHERE time > now() - 15m 50 | GROUP BY host, time(10s) 51 | ``` 52 | 53 | In the `Conditions` section you need to `Send Alert where queryReq is Greater Than 10`. You should see a visual representation of the alert below that: 54 | 55 | ![Alert Threshold](../images/alert-threshold.png) 56 | 57 | Finally you need to set the message you want to send with the alert: `Alert {{ .ID }} is {{ .Level }} -> {{ index .Fields "value" }}`. Clicking `Save Rule` at the top of the page [defines and enables](https://docs.influxdata.com/chronograf/v1.4/guides/create-a-kapacitor-alert/#step-6-save-the-alert-rule) the rule. 58 | 59 | ### Trigger it! See some alerts! 60 | 61 | The easiest way to trigger an alert from this rule is to go to the `influxdb` canned dashboard under the `telegraf-getting-started` host and refreshing the page ~5-10 times in quick succession. You should see the precanned `InfluxDB - Query Requests` graph spike: 62 | 63 | ![Query Requests](../images/query-requests.png) 64 | 65 | Then check the `Alerting -> Alert History` section of kapacitor or your output Slack channel to see the resulting alerts: 66 | 67 | ## Slack 68 | ![Slack Alerts](../images/alert-slack.png) 69 | 70 | ## Chronograf 71 | ![Chronograf Alerts](../images/alert-chronograf.png) 72 | -------------------------------------------------------------------------------- /documentation/static/tutorials/enable-auth.md: -------------------------------------------------------------------------------- 1 | # Enabling Authentication 2 | 3 | First you will need to create an [admin user](https://docs.influxdata.com/influxdb/v1.4/query_language/authentication_and_authorization/#authorization) on your influxdb instance: 4 | 5 | ``` 6 | $ ./sandbox influxdb 7 | ... 8 | > CREATE USER "stanley" WITH PASSWORD 'stellllAAAA' WITH ALL PRIVILEGES 9 | > SHOW USERS 10 | user admin 11 | ---- ----- 12 | stanley true 13 | ``` 14 | 15 | Then change the [`[http] auth-enabled` variable](https://docs.influxdata.com/influxdb/v1.4/query_language/authentication_and_authorization/#set-up-authentication) in the configuration file at `./influxdb/config/influxdb.conf` to true. Then you will need to add this username and password to both the telegraf and kapacitor configuration files: 16 | 17 | ``` 18 | # ./influxdb/config/influxdb.conf 19 | [http] 20 | auth-enabled = true 21 | 22 | # ./kapacitor/config/kapacitor.conf 23 | [[influxdb]] 24 | username = 'stanley' 25 | password = 'stellllAAAA' 26 | 27 | # ./telegraf/telegraf.conf 28 | [[outputs.influxdb]] 29 | username = 'stanley' 30 | password = 'stellllAAAA' 31 | ``` 32 | 33 | Also make sure to add the credentials in the Chronograf instance: 34 | 35 | ![add creds to chronograf](../images/add-password-chronograf.png) 36 | 37 | Once those steps are completed then you need to restart the Sandbox to ensure authentication is enforced: 38 | 39 | ``` 40 | $ ./sandbox restart 41 | Stopping all processes... 42 | Starting all processes... 43 | Services available! 44 | $ 45 | ``` 46 | -------------------------------------------------------------------------------- /documentation/static/tutorials/flux-getting-started.md: -------------------------------------------------------------------------------- 1 | # Get Started with Flux 2 | Flux is InfluxData's new data language designed for querying, analyzing, and acting on data stored in InfluxDB. 3 | Its takes the power of InfluxQL and the TICKscript and combines them into a single, unified data scripting language. 4 | 5 | ## Using Flux with the Sandbox 6 | Starting with the 1.7 releases of Chronograf and InfluxDB, Flux is now completely integrated within 7 | the [InfluxData Sandbox](https://github.com/influxdata/sandbox). 8 | 9 | There will be ongoing updates and refinements made to the language and the implementation. So, you can always grab the 10 | latest by starting the Sandbox with the `-nightly` flag to pull the nightly builds of InfluxDB and 11 | Chronograf. 12 | 13 | ```bash 14 | ./sandbox up -nightly 15 | ``` 16 | 17 | ### Flux via CLI 18 | Integrated within the InfluxCLI is a Flux Read-Eval-Print-Loop (REPL) Command Line Interface. 19 | 20 | To access the Flux REPL: 21 | ``` 22 | // Enter the docker container containing the InfluxCLI from the command prompt 23 | $ ./sandbox enter influxdb 24 | 25 | // Once inside the container, start the InfluxCLI tool using the -type flux option 26 | # influx -type flux 27 | 28 | //The following should appear 29 | Connected to http://localhost:8086 version 1.7.0 30 | InfluxDB shell version: 1.7.0 31 | Enter a Flux query 32 | > 33 | ``` 34 | 35 | __Remember to use CTL+D to exit the Flux REPL.__ 36 | 37 | ### Get started with the Flux Editor via Chronograf 38 | The Flux Editor makes working with Flux a visual process. It consists of 3 panes: 39 | 40 | 1. **[Schema Explorer](#schema-explorer)** Allows you to explore the actual structure of your data as you're building Flux scripts. 41 | 2. **[Script Editor](#script-editor)** Where the actual Flux code is written and displayed. 42 | 3. **[Function Explorer](#function-explorer)** An online quick reference for many Flux functions. 43 | 44 | ![Flux Editor](../images/flux-editor.png) 45 | 46 | Each pane can be minimized, maximized, or closed depending on how you want to work. 47 | 48 | ### Schema Explorer 49 | The "Explore" pane of the Flux Editor allows you to visual explore the structure of your data. 50 | This is incredibly helpful as you're building out Flux queries. 51 | 52 | ![Schema Explorer](../images/flux-editor-explore.png) 53 | 54 | ### Script Editor 55 | Flux queries are written in the "Script" pane of the Flux Editor. 56 | ![Script Editor](../images/flux-editor-script.png) 57 | 58 | You can use the "explorer" tools around the Script Editor to help you rapidly build out queries. 59 | There is also a Script Wizard as part of the Script Editor which allows you to quickly build out the first few 60 | lines of your Flux Script. 61 | 62 | ![Script Wizard](../images/flux-script-wizard.png) 63 | 64 | After you've written your query you can hit the "Run Script" button in the Script Editor to execute the query. 65 | 66 | You may want to toggle the "View Raw Data" button above the query visualization panel to switch between your 67 | selected visualization and the data returned by the query you've constructed. The default visualization type is 68 | a line graph. But, you can select the "Visualization" at the top middle of the screen to alter how the query 69 | results are displayed. 70 | 71 | ### Function Explorer 72 | The Function Explorer lists the various Flux Functions and in-line documentation describing the function's description, 73 | arguments, and an example for how to use it. Scroll through the functions to explore the power of Flux. 74 | 75 | ![Flux Builder](../images/flux-function-explorer.png) 76 | 77 | ## Learn the basics of the Flux language 78 | Flux draws inspiration from programming languages such as Lisp, Elixir, Elm, 79 | Javascript and others, but is specifically designed for analyzing and acting on data. 80 | For an introduction into the Flux syntax, view the 81 | [Flux Getting Started](https://docs.influxdata.com/flux/latest/introduction/getting-started/) 82 | section of the documentation. 83 | 84 | 85 | ## Additional Information 86 | [Flux Documentation](https://docs.influxdata.com/flux/latest/) 87 | [Flux Specification](https://github.com/influxdata/flux/blob/master/docs/SPEC.md) 88 | [Flux Introduction Slides](https://speakerdeck.com/pauldix/flux-number-fluxlang-a-new-time-series-data-scripting-language) 89 | -------------------------------------------------------------------------------- /documentation/static/tutorials/index.md: -------------------------------------------------------------------------------- 1 | ## Getting started 2 | 3 | + [Understanding the Sandbox](/tutorials/understanding-sandbox) 4 | + [Getting started with Flux](/tutorials/flux-getting-started) 5 | + [Learning Flux](https://docs.influxdata.com/flux) 6 | 7 | ## Tutorials 8 | 9 | + [Enabling Auth in InfluxDB](/tutorials/enable-auth) 10 | + [Create Alert](/tutorials/create-alert) 11 | + [Using the Telegraf socket_listener Plugin](/tutorials/telegraf-socket-listener) 12 | -------------------------------------------------------------------------------- /documentation/static/tutorials/telegraf-socket-listener.md: -------------------------------------------------------------------------------- 1 | # Using the Telegraf socket_listener Plugin 2 | 3 | The Telegraf [socket_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener) plugin allows you to send arbitrary measurements to Telegraf using UDP or TCP. It is often the fastest way to get your metrics into InfluxDB, as it will parse various [inputs](https://docs.influxdata.com/telegraf/v1.5/concepts/data_formats_input/) and take care of things like automatic retries for you. 4 | 5 | In order to use the Telegraf socket_listener Plugin with the InfluxData Sandbox, we will need to expose a port for the Telegraf container to receive data. We can do that by adding a `ports` section under `telegraf` in `./docker-compose.yml`. 6 | 7 | It should look like this: 8 | 9 | ``` 10 | telegraf: 11 | # Full tag list: https://hub.docker.com/r/library/telegraf/tags/ 12 | image: telegraf:1.5.3 13 | environment: 14 | HOSTNAME: "telegraf-getting-started" 15 | # Telegraf requires network access to InfluxDB 16 | links: 17 | - influxdb 18 | volumes: 19 | # Mount for telegraf configuration 20 | - ./telegraf/:/etc/telegraf/ 21 | # Mount for Docker API access 22 | - /var/run/docker.sock:/var/run/docker.sock 23 | ports: 24 | # The socket_listener plugin listens or port 8094 25 | - "8094:8094/udp" 26 | depends_on: 27 | - influxdb 28 | ``` 29 | 30 | We'll also need to configure the Telegraf plugin to receive UDP packets. Open `./telegraf/telegraf.conf` in your editor and add the following lines at the end of the file: 31 | 32 | ``` 33 | [[inputs.socket_listener]] 34 | service_address = "udp://:8094" 35 | data_format = "influx" 36 | ``` 37 | 38 | Once you've made those changes you can apply them by restarting the Sandbox: 39 | 40 | ``` 41 | $ ./sandbox restart 42 | Stopping all processes... 43 | Starting all processes... 44 | Services available! 45 | $ 46 | ``` 47 | 48 | Now let's test that Telegraf can receive data via UDP. We'll send a metric using the InfluxDB [Line Protocol](https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/), which Telegraf understands. To send the data, we'll use two common command line utilities, `echo` and `nc`, or netcat. 49 | 50 | Both of these utilites are included on macOS and most Linux distributions; if you're using Windows, you can [install Ubuntu from the Windows Store](https://www.microsoft.com/store/productId/9NBLGGH4MSV6) and use these tools from within the Windows Subsystem for Linux (WSL). 51 | 52 | On macOS, enter the following commands: 53 | 54 | ``` 55 | $ echo "my_measurement,my_tag_key=my_tag_value value=1" | nc -u -4 -w 1 localhost 8094 56 | $ echo "my_measurement,my_tag_key=my_tag_value value=2" | nc -u -4 -w 1 localhost 8094 57 | $ echo "my_measurement,my_tag_key=my_tag_value value=3" | nc -u -4 -w 1 localhost 8094 58 | $ echo "my_measurement,my_tag_key=my_tag_value value=4" | nc -u -4 -w 1 localhost 8094 59 | $ echo "my_measurement,my_tag_key=my_tag_value value=5" | nc -u -4 -w 1 localhost 8094 60 | ``` 61 | 62 | The `echo` command prints the text within quotes, which in this case is data encoded using the InfluxDB line protocol. Next, we use the `|` character to "pipe" the text into the next command, `nc`. We provide several arguments to `nc`: `-u`, `-4`, and `-w 1`. The first argument tells `nc` to send data using UDP; the second argument says that we should use IPv4, and the third argument tells `nc` to wait one second before terminating the connection. 63 | 64 | On Linux or WSL you can substitute `-q` for `-w 1`, which tells `nc` to quit once the data has been sent. 65 | 66 | Telegraf has the ability to aggregate and process data it receives before sending it on using an output plugin. In the Sandbox, Telegraf is configured with a five second "flush" interval, which means that Telegraf will collect measurements for five seconds, aggregate them, and send the resulting value to InfluxDB. Since we have not specified the type of aggregation we'd like, Telegraf will default to computing the mean of the measurements received during the interval. 67 | 68 | Now that we've sent some measurements to Telegraf, let's verify that they made their way into the InfluxDB database. Open Chronograf at [http://localhost:8888](http://localhost:8888) and navigate to the data explorer. 69 | 70 | Select the `telegraf.autogen` database, then the `my_measurement` field. You should see a graph with some values! 71 | 72 | Because of the aggregation, the values on the graph likely will not match the values you entered at the command line unless you waited more than five seconds between each command. Play around with sending different values in different intervals to Telegraf to better understand how the aggregates are computed! 73 | -------------------------------------------------------------------------------- /documentation/static/tutorials/understanding-sandbox.md: -------------------------------------------------------------------------------- 1 | # Understanding the Sandbox 2 | 3 | The InfluxData Sandbox runs a complete InfluxData setup and provides a convenient way to learn to use all of the products in concert through the Chronograf management user interface. It also contains a learning portal with links to relevant pages. The Sandbox is built with [Docker](https://www.docker.com/). 4 | 5 | ### Configuration changes 6 | 7 | To change the configuration for any of the products, just change the config file in the respective directory and restart the Sandbox: 8 | 9 | ```bash 10 | $ vi ./influxdb/config/influxdb.conf 11 | # Make some changes... 12 | $ ./sandbox restart 13 | # terminal output... 14 | ``` 15 | 16 | ### Data 17 | 18 | After initial startup you will see the data directories for `influxdb`, `chronograf` and `kapacitor` created. This is where the data for `sandbox` is persisted. 19 | 20 | ```bash 21 | . 22 | ├── chronograf 23 | │   └── data 24 | ├── influxdb 25 | │   ├── config 26 | │   └── data 27 | ├── kapacitor 28 | │   ├── config 29 | │   └── data 30 | └── telegraf 31 |    └── telegraf.conf 32 | ``` 33 | 34 | ### InfluxDB 35 | 36 | In the `sandbox` InfluxDB is collecting data that is created by `telegraf`, forwarding it to `kapacitor` for alerting, and serving dashboard queries from `chronograf`. The API is available at `http://localhost:8086` 37 | 38 | If you have an existing installation of InfluxDB on your computer, you can use the `influx` cli tool to run commands against the `sandbox` instances without additional configuration. If you do not have the tools then use the Sandbox to attach to the CLI: 39 | 40 | ```bash 41 | $ ./sandbox influxdb 42 | Entering the influx cli... 43 | Connected to http://localhost:8086 version 1.2.2 44 | InfluxDB shell version: 1.2.2 45 | > SHOW DATABASES 46 | name: databases 47 | name 48 | ---- 49 | telegraf 50 | _internal 51 | ``` 52 | 53 | ### Kapacitor 54 | 55 | In the `sandbox` Kapacitor catches and processes data coming in from InfluxDB and can have tasks created on it by Chronograf. The API is available at `http://localhost:9092` 56 | 57 | While a downloaded copy of the `kapacitor` cli will work to run commands against the Sandbox instance, you will need to use the cli in the container if you do not have it locally: 58 | 59 | ```bash 60 | $ ./sandbox enter kapacitor 61 | Entering /bin/bash session in the kapacitor container... 62 | root@d5d99840dbc7:/# kapacitor stats ingress 63 | Database Retention Policy Measurement Points Received 64 | _internal monitor cq 146 65 | _internal monitor database 292 66 | _internal monitor httpd 146 67 | _internal monitor queryExecutor 146 68 | .... 69 | ``` 70 | 71 | ### Chronograf 72 | 73 | In the `sandbox` Chronograf acts as the control hub for the different products. It can query data from and perform management function for InfluxDB and create tasks on Kapacitor. The Chronograf web interface is available at `http://localhost:8888`. 74 | 75 | ### Telegraf 76 | 77 | In the `sandbox` Telegraf is collecting data. It has the following plugins configured by default: 78 | 79 | * [`outputs.influxdb`](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb) 80 | * [`inputs.docker`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker) 81 | * [`inputs.cpu`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system) 82 | * [`inputs.system`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system) 83 | * [`inputs.influxdb`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) 84 | -------------------------------------------------------------------------------- /images/chronograf/latest/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CHRONOGRAF_TAG 2 | FROM chronograf:$CHRONOGRAF_TAG 3 | ADD ./sandbox.src ./usr/share/chronograf/resources/ 4 | ADD ./sandbox-kapa.kap ./usr/share/chronograf/resources/ 5 | -------------------------------------------------------------------------------- /images/chronograf/nightly/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CHRONOGRAF_TAG 2 | FROM quay.io/influxdb/chronograf:$CHRONOGRAF_TAG 3 | ADD ./sandbox.src ./usr/share/chronograf/resources/ 4 | ADD ./sandbox-kapa.kap ./usr/share/chronograf/resources/ 5 | -------------------------------------------------------------------------------- /images/chronograf/sandbox-kapa.kap: -------------------------------------------------------------------------------- 1 | { 2 | "id": "10001", 3 | "srcID": "10000", 4 | "name": "My Kapacitor", 5 | "url": "http://kapacitor:9092", 6 | "active": true, 7 | "organization": "sandbox_org" 8 | } 9 | -------------------------------------------------------------------------------- /images/chronograf/sandbox.src: -------------------------------------------------------------------------------- 1 | { 2 | "id": "10000", 3 | "name": "My InfluxDB", 4 | "username": "", 5 | "password": "", 6 | "url": "http://influxdb:8086", 7 | "type": "influx", 8 | "insecureSkipVerify": false, 9 | "default": true, 10 | "telegraf": "telegraf", 11 | "organization": "sandbox_org" 12 | } 13 | -------------------------------------------------------------------------------- /images/influxdb/latest/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG INFLUXDB_TAG 2 | FROM influxdb:$INFLUXDB_TAG 3 | 4 | -------------------------------------------------------------------------------- /images/influxdb/nightly/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG INFLUXDB_TAG 2 | FROM quay.io/influxdb/influxdb:$INFLUXDB_TAG 3 | -------------------------------------------------------------------------------- /images/kapacitor/latest/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG KAPACITOR_TAG 2 | FROM kapacitor:$KAPACITOR_TAG 3 | -------------------------------------------------------------------------------- /images/kapacitor/nightly/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG KAPACITOR_TAG 2 | FROM kapacitor:$KAPACITOR_TAG 3 | -------------------------------------------------------------------------------- /images/telegraf/latest/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG TELEGRAF_TAG 2 | FROM telegraf:$TELEGRAF_TAG 3 | 4 | -------------------------------------------------------------------------------- /images/telegraf/nightly/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG TELEGRAF_TAG 2 | FROM telegraf:$TELEGRAF_TAG 3 | 4 | -------------------------------------------------------------------------------- /influxdb/config/influxdb.conf: -------------------------------------------------------------------------------- 1 | reporting-disabled = false 2 | bind-address = ":8088" 3 | 4 | [meta] 5 | dir = "/var/lib/influxdb/meta" 6 | retention-autocreate = true 7 | logging-enabled = true 8 | 9 | [data] 10 | dir = "/var/lib/influxdb/data" 11 | wal-dir = "/var/lib/influxdb/wal" 12 | query-log-enabled = true 13 | cache-max-memory-size = 1073741824 14 | cache-snapshot-memory-size = 26214400 15 | cache-snapshot-write-cold-duration = "10m0s" 16 | compact-full-write-cold-duration = "4h0m0s" 17 | max-series-per-database = 1000000 18 | max-values-per-tag = 100000 19 | index-version = "tsi1" 20 | trace-logging-enabled = false 21 | 22 | [coordinator] 23 | write-timeout = "10s" 24 | max-concurrent-queries = 0 25 | query-timeout = "0s" 26 | log-queries-after = "0s" 27 | max-select-point = 0 28 | max-select-series = 0 29 | max-select-buckets = 0 30 | 31 | [retention] 32 | enabled = true 33 | check-interval = "30m0s" 34 | 35 | [shard-precreation] 36 | enabled = true 37 | check-interval = "10m0s" 38 | advance-period = "30m0s" 39 | 40 | [monitor] 41 | store-enabled = true 42 | store-database = "_internal" 43 | store-interval = "10s" 44 | 45 | [subscriber] 46 | enabled = true 47 | http-timeout = "30s" 48 | insecure-skip-verify = false 49 | ca-certs = "" 50 | write-concurrency = 40 51 | write-buffer-size = 1000 52 | 53 | [http] 54 | enabled = true 55 | flux-enabled = true 56 | bind-address = ":8086" 57 | auth-enabled = false 58 | log-enabled = true 59 | write-tracing = false 60 | pprof-enabled = true 61 | https-enabled = false 62 | https-certificate = "/etc/ssl/influxdb.pem" 63 | https-private-key = "" 64 | max-row-limit = 0 65 | max-connection-limit = 0 66 | shared-secret = "" 67 | realm = "InfluxDB" 68 | unix-socket-enabled = false 69 | bind-socket = "/var/run/influxdb.sock" 70 | 71 | [[graphite]] 72 | enabled = false 73 | bind-address = ":2003" 74 | database = "graphite" 75 | retention-policy = "" 76 | protocol = "tcp" 77 | batch-size = 5000 78 | batch-pending = 10 79 | batch-timeout = "1s" 80 | consistency-level = "one" 81 | separator = "." 82 | udp-read-buffer = 0 83 | 84 | [[collectd]] 85 | enabled = false 86 | bind-address = ":25826" 87 | database = "collectd" 88 | retention-policy = "" 89 | batch-size = 5000 90 | batch-pending = 10 91 | batch-timeout = "10s" 92 | read-buffer = 0 93 | typesdb = "/usr/share/collectd/types.db" 94 | security-level = "none" 95 | auth-file = "/etc/collectd/auth_file" 96 | 97 | [[opentsdb]] 98 | enabled = false 99 | bind-address = ":4242" 100 | database = "opentsdb" 101 | retention-policy = "" 102 | consistency-level = "one" 103 | tls-enabled = false 104 | certificate = "/etc/ssl/influxdb.pem" 105 | batch-size = 1000 106 | batch-pending = 5 107 | batch-timeout = "1s" 108 | log-point-errors = true 109 | 110 | [[udp]] 111 | enabled = true 112 | bind-address = ":8089" 113 | database = "udp" 114 | retention-policy = "" 115 | batch-size = 5000 116 | batch-pending = 10 117 | read-buffer = 0 118 | batch-timeout = "1s" 119 | precision = "" 120 | 121 | [continuous_queries] 122 | log-enabled = true 123 | enabled = true 124 | run-interval = "1s" 125 | 126 | -------------------------------------------------------------------------------- /kapacitor/config/kapacitor.conf: -------------------------------------------------------------------------------- 1 | hostname = "localhost" 2 | data_dir = "/var/lib/kapacitor" 3 | skip-config-overrides = false 4 | default-retention-policy = "" 5 | 6 | [http] 7 | bind-address = ":9092" 8 | auth-enabled = false 9 | log-enabled = true 10 | write-tracing = false 11 | pprof-enabled = false 12 | https-enabled = false 13 | https-certificate = "/etc/ssl/kapacitor.pem" 14 | shutdown-timeout = "10s" 15 | shared-secret = "" 16 | 17 | [replay] 18 | dir = "/var/lib/kapacitor/replay" 19 | 20 | [storage] 21 | boltdb = "/var/lib/kapacitor/kapacitor.db" 22 | 23 | [task] 24 | dir = "/var/lib/kapacitor/tasks" 25 | snapshot-interval = "1m0s" 26 | 27 | [[influxdb]] 28 | enabled = true 29 | name = "default" 30 | default = false 31 | urls = ["http://influxdb:8086"] 32 | username = "" 33 | password = "" 34 | ssl-ca = "" 35 | ssl-cert = "" 36 | ssl-key = "" 37 | insecure-skip-verify = false 38 | timeout = "0s" 39 | disable-subscriptions = false 40 | subscription-protocol = "http" 41 | kapacitor-hostname = "" 42 | http-port = 0 43 | udp-bind = "" 44 | udp-buffer = 1000 45 | udp-read-buffer = 0 46 | startup-timeout = "5m0s" 47 | subscriptions-sync-interval = "1m0s" 48 | [influxdb.excluded-subscriptions] 49 | _kapacitor = ["autogen"] 50 | 51 | [logging] 52 | file = "STDERR" 53 | level = "INFO" 54 | 55 | [config-override] 56 | enabled = true 57 | 58 | [collectd] 59 | enabled = false 60 | bind-address = ":25826" 61 | database = "collectd" 62 | retention-policy = "" 63 | batch-size = 5000 64 | batch-pending = 10 65 | batch-timeout = "10s" 66 | read-buffer = 0 67 | typesdb = "/usr/share/collectd/types.db" 68 | 69 | [opentsdb] 70 | enabled = false 71 | bind-address = ":4242" 72 | database = "opentsdb" 73 | retention-policy = "" 74 | consistency-level = "one" 75 | tls-enabled = false 76 | certificate = "/etc/ssl/influxdb.pem" 77 | batch-size = 1000 78 | batch-pending = 5 79 | batch-timeout = "1s" 80 | log-point-errors = true 81 | 82 | [alerta] 83 | enabled = false 84 | url = "" 85 | token = "" 86 | environment = "" 87 | origin = "" 88 | 89 | [hipchat] 90 | enabled = false 91 | url = "" 92 | token = "" 93 | room = "" 94 | global = false 95 | state-changes-only = false 96 | 97 | [opsgenie] 98 | enabled = false 99 | api-key = "" 100 | url = "https://api.opsgenie.com/v1/json/alert" 101 | recovery_url = "https://api.opsgenie.com/v1/json/alert/note" 102 | global = false 103 | 104 | [pagerduty] 105 | enabled = false 106 | url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" 107 | service-key = "" 108 | global = false 109 | 110 | [smtp] 111 | enabled = false 112 | host = "localhost" 113 | port = 25 114 | username = "" 115 | password = "" 116 | no-verify = false 117 | global = false 118 | state-changes-only = false 119 | from = "" 120 | idle-timeout = "30s" 121 | 122 | [sensu] 123 | enabled = false 124 | addr = "" 125 | source = "Kapacitor" 126 | 127 | [slack] 128 | enabled = false 129 | url = "" 130 | channel = "" 131 | username = "kapacitor" 132 | icon-emoji = "" 133 | global = false 134 | state-changes-only = false 135 | 136 | [talk] 137 | enabled = false 138 | url = "" 139 | author_name = "" 140 | 141 | [telegram] 142 | enabled = false 143 | url = "https://api.telegram.org/bot" 144 | token = "" 145 | chat-id = "" 146 | parse-mode = "" 147 | disable-web-page-preview = false 148 | disable-notification = false 149 | global = false 150 | state-changes-only = false 151 | 152 | [victorops] 153 | enabled = false 154 | api-key = "" 155 | routing-key = "" 156 | url = "https://alert.victorops.com/integrations/generic/20131114/alert" 157 | global = false 158 | 159 | [kubernetes] 160 | enabled = false 161 | in-cluster = false 162 | token = "" 163 | ca-path = "" 164 | namespace = "" 165 | 166 | [reporting] 167 | enabled = true 168 | url = "https://usage.influxdata.com" 169 | 170 | [stats] 171 | enabled = true 172 | stats-interval = "10s" 173 | database = "_kapacitor" 174 | retention-policy = "autogen" 175 | timing-sample-rate = 0.1 176 | timing-movavg-size = 1000 177 | 178 | [udf] 179 | 180 | [deadman] 181 | interval = "10s" 182 | threshold = 0.0 183 | id = "{{ .Group }}:NODE_NAME for task '{{ .TaskName }}'" 184 | message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"emitted\" | printf \"%0.3f\" }} points/INTERVAL." 185 | global = false 186 | 187 | -------------------------------------------------------------------------------- /sandbox: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | IFS=$'\n\t' 4 | 5 | if ! [ -x "$(command -v docker)" ]; then 6 | echo 'Error: docker is not installed.' >&2 7 | exit 1 8 | fi 9 | 10 | if ! [ -x "$(command -v docker-compose)" ]; then 11 | echo 'Error: docker-compose is not installed.' >&2 12 | exit 1 13 | fi 14 | 15 | sandbox () { 16 | if [ "$2" == "-nightly" ]; then 17 | source .env-nightlies 18 | echo "Using nightlies...removing old ones." 19 | # If nightly images already exist, containers are stopped, destroyed, 20 | # and rebuilt using newly pulled images 21 | if [ $(docker images | grep nightly | tr -s ' ' | cut -d ' ' -f 3 | wc -l) -gt 0 ]; then 22 | docker-compose down 23 | docker-compose rm -f 24 | docker-compose build --pull 25 | fi 26 | else 27 | source .env-latest 28 | echo "Using latest, stable releases" 29 | fi 30 | # Enter attaches users to a shell in the desired container 31 | enter () { 32 | case $2 in 33 | influxdb) 34 | echo "Entering /bin/bash session in the influxdb container..." 35 | docker-compose exec influxdb /bin/bash 36 | ;; 37 | chronograf) 38 | echo "Entering /bin/sh session in the chronograf container..." 39 | docker-compose exec chronograf /bin/sh 40 | ;; 41 | kapacitor) 42 | echo "Entering /bin/bash session in the kapacitor container..." 43 | docker-compose exec kapacitor /bin/bash 44 | ;; 45 | telegraf) 46 | echo "Entering /bin/bash session in the telegraf container..." 47 | docker-compose exec telegraf /bin/bash 48 | ;; 49 | *) 50 | echo "sandbox enter (influxdb||chronograf||kapacitor||telegraf)" 51 | ;; 52 | esac 53 | } 54 | 55 | # Logs streams the logs from the container to the shell 56 | logs () { 57 | case $2 in 58 | influxdb) 59 | echo "Following the logs from the influxdb container..." 60 | docker-compose logs -f influxdb 61 | ;; 62 | chronograf) 63 | echo "Following the logs from the chronograf container..." 64 | docker-compose logs -f chronograf 65 | ;; 66 | kapacitor) 67 | echo "Following the logs from the kapacitor container..." 68 | docker-compose logs -f kapacitor 69 | ;; 70 | telegraf) 71 | echo "Following the logs from the telegraf container..." 72 | docker-compose logs -f telegraf 73 | ;; 74 | *) 75 | echo "sandbox logs (influxdb||chronograf||kapacitor||telegraf)" 76 | ;; 77 | esac 78 | } 79 | 80 | case $1 in 81 | up) 82 | echo "Spinning up Docker Images..." 83 | echo "If this is your first time starting sandbox this might take a minute..." 84 | docker-compose up -d --build 85 | echo "Opening tabs in browser..." 86 | sleep 3 87 | if [ $(uname) == "Darwin" ]; then 88 | open http://localhost:3010 89 | open http://localhost:8888 90 | elif [ $(uname) == "Linux" ]; then 91 | xdg-open http://localhost:8888 92 | xdg-open http://localhost:3010 93 | else 94 | echo "no browser detected..." 95 | fi 96 | ;; 97 | down) 98 | echo "Stopping sandbox containers..." 99 | docker-compose down 100 | ;; 101 | restart) 102 | echo "Stopping all sandbox processes..." 103 | docker-compose down > /dev/null 2>&1 104 | echo "Starting all sandbox processes..." 105 | docker-compose up -d --build > /dev/null 2>&1 106 | echo "Services available!" 107 | ;; 108 | delete-data) 109 | echo "deleting all influxdb, kapacitor and chronograf data..." 110 | rm -rf kapacitor/data influxdb/data chronograf/data 111 | ;; 112 | docker-clean) 113 | echo "Stopping and removing running sandbox containers..." 114 | docker-compose down 115 | echo "Removing TICK images..." 116 | docker rmi sandbox_documentation influxdb:latest telegraf:latest kapacitor:latest chronograf:latest chrono_config:latest quay.io/influxdb/influxdb:nightly quay.io/influxdb/chronograf:nightly> /dev/null 2>&1 117 | docker rmi $(docker images -f "dangling=true" -q) 118 | ;; 119 | influxdb) 120 | echo "Entering the influx cli..." 121 | docker-compose exec influxdb /usr/bin/influx 122 | ;; 123 | flux) 124 | echo "Entering the flux repl..." 125 | docker-compose exec influxdb /usr/bin/influx -type flux 126 | ;; 127 | rebuild-docs) 128 | echo "Rebuilding documentation container..." 129 | docker build -t sandbox_documentation documentation/ > /dev/null 2>&1 130 | echo "Restarting..." 131 | docker-compose down > /dev/null 2>&1 132 | docker-compose up -d --build > /dev/null 2>&1 133 | ;; 134 | enter) 135 | enter $@ 136 | ;; 137 | logs) 138 | logs $@ 139 | ;; 140 | *) 141 | cat <<-EOF 142 | sandbox commands: 143 | up (-nightly) -> spin up the sandbox environment (latest or nightlies specified in the companion file) 144 | down -> tear down the sandbox environment (latest or nightlies specified in the companion file) 145 | restart (-nightly) -> restart the sandbox 146 | influxdb -> attach to the influx cli 147 | flux -> attach to the flux REPL 148 | 149 | enter (influxdb||kapacitor||chronograf||telegraf) -> enter the specified container 150 | logs (influxdb||kapacitor||chronograf||telegraf) -> stream logs for the specified container 151 | 152 | delete-data -> delete all data created by the TICK Stack 153 | docker-clean -> stop and remove all running docker containers and images 154 | rebuild-docs -> rebuild the documentation image 155 | EOF 156 | ;; 157 | esac 158 | } 159 | 160 | pushd `dirname $0` > /dev/null 161 | sandbox $@ 162 | popd > /dev/null 163 | -------------------------------------------------------------------------------- /sandbox.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | TITLE sandbox.bat - TICK Sandbox 3 | 4 | SET interactive=1 5 | SET COMPOSE_CONVERT_WINDOWS_PATHS=1 6 | 7 | SET TYPE=latest 8 | SET TELEGRAF_TAG=latest 9 | SET INFLUXDB_TAG=latest 10 | SET CHRONOGRAF_TAG=latest 11 | SET KAPACITOR_TAG=latest 12 | 13 | ECHO %cmdcmdline% | FIND /i "/c" 14 | IF %ERRORLEVEL% == 0 SET interactive=0 15 | 16 | REM Enter attaches users to a shell in the desired container 17 | IF "%1"=="enter" ( 18 | IF "%2"=="" ( 19 | ECHO sandbox enter ^(influxdb^|^|chronograf^|^|kapacitor^|^|telegraf^) 20 | GOTO End 21 | ) 22 | IF "%2"=="influxdb" ( 23 | ECHO Entering ^/bin^/bash session in the influxdb container... 24 | docker-compose exec influxdb /bin/bash 25 | GOTO End 26 | ) 27 | IF "%2"=="chronograf" ( 28 | ECHO Entering ^/bin^/bash session in the chronograf container... 29 | docker-compose exec chronograf /bin/bash 30 | GOTO End 31 | ) 32 | IF "%2"=="kapacitor" ( 33 | ECHO Entering ^/bin^/bash session in the kapacitor container... 34 | docker-compose exec kapacitor /bin/bash 35 | GOTO End 36 | ) 37 | IF "%2"=="telegraf" ( 38 | ECHO Entering ^/bin^/bash session in the telegraf container... 39 | docker-compose exec telegraf /bin/bash 40 | GOTO End 41 | ) 42 | ) 43 | 44 | REM Logs streams the logs from the container to the shell 45 | IF "%1"=="logs" ( 46 | IF "%2"=="" ( 47 | ECHO sandbox logs ^(influxdb^|^|chronograf^|^|kapacitor^|^|telegraf^) 48 | GOTO End 49 | ) 50 | IF "%2"=="influxdb" ( 51 | ECHO Following the logs from the influxdb container... 52 | docker-compose logs -f influxdb 53 | GOTO End 54 | ) 55 | IF "%2"=="chronograf" ( 56 | ECHO Following the logs from the chronograf container... 57 | docker-compose logs -f chronograf 58 | GOTO End 59 | ) 60 | IF "%2"=="kapacitor" ( 61 | ECHO Following the logs from the kapacitor container... 62 | docker-compose logs -f kapacitor 63 | GOTO End 64 | ) 65 | IF "%2"=="telegraf" ( 66 | ECHO Following the logs from the telegraf container... 67 | docker-compose logs -f telegraf 68 | GOTO End 69 | ) 70 | ) 71 | 72 | 73 | IF "%1"=="up" ( 74 | IF "%2"=="-nightly" ( 75 | ECHO Spinning up nightly Docker Images... 76 | ECHO If this is your first time starting sandbox this might take a minute... 77 | SET TYPE=nightly 78 | SET INFLUXDB_TAG=nightly 79 | SET CHRONOGRAF_TAG=nightly 80 | docker-compose up -d --build 81 | ECHO Opening tabs in browser... 82 | timeout /t 3 /nobreak > NUL 83 | START "" http://localhost:3010 84 | START "" http://localhost:8888 85 | GOTO End 86 | ) ELSE ( 87 | ECHO Spinning up latest, stable Docker Images... 88 | ECHO If this is your first time starting sandbox this might take a minute... 89 | docker-compose up -d --build 90 | ECHO Opening tabs in browser... 91 | timeout /t 3 /nobreak > NUL 92 | START "" http://localhost:3010 93 | START "" http://localhost:8888 94 | GOTO End 95 | ) 96 | ) 97 | 98 | IF "%1"=="down" ( 99 | ECHO Stopping and removing running sandbox containers... 100 | docker-compose down 101 | GOTO End 102 | ) 103 | 104 | IF "%1"=="restart" ( 105 | ECHO Stopping all sandbox processes... 106 | docker-compose down >NUL 2>NUL 107 | ECHO Starting all sandbox processes... 108 | docker-compose up -d --build >NUL 2>NUL 109 | ECHO Services available! 110 | GOTO End 111 | ) 112 | 113 | IF "%1"=="delete-data" ( 114 | ECHO Deleting all influxdb, kapacitor and chronograf data... 115 | rmdir /S /Q kapacitor\data influxdb\data chronograf\data 116 | GOTO End 117 | ) 118 | 119 | IF "%1"=="docker-clean" ( 120 | ECHO Stopping all running sandbox containers... 121 | docker-compose down 122 | echo Removing TICK images... 123 | docker-compose down --rmi=all 124 | GOTO End 125 | ) 126 | 127 | IF "%1"=="influxdb" ( 128 | ECHO Entering the influx cli... 129 | docker-compose exec influxdb /usr/bin/influx 130 | GOTO End 131 | ) 132 | 133 | IF "%1"=="flux" ( 134 | ECHO Entering the flux cli... 135 | docker-compose exec influxdb /usr/bin/influx -type flux 136 | GOTO End 137 | ) 138 | 139 | IF "%1"=="rebuild-docs" ( 140 | echo Rebuilding documentation container... 141 | docker build -t sandbox_documentation documentation\ >NUL 2>NUL 142 | echo "Restarting..." 143 | docker-compose down >NUL 2>NUL 144 | docker-compose up -d --build >NUL 2>NUL 145 | GOTO End 146 | ) 147 | 148 | ECHO sandbox commands: 149 | ECHO up -^> spin up the sandbox environment 150 | ECHO down -^> tear down the sandbox environment 151 | ECHO restart -^> restart the sandbox 152 | ECHO influxdb -^> attach to the influx cli 153 | ECHO flux -^> attach to the flux REPL 154 | ECHO. 155 | ECHO enter ^(influxdb^|^|kapacitor^|^|chronograf^|^|telegraf^) -^> enter the specified container 156 | ECHO logs ^(influxdb^|^|kapacitor^|^|chronograf^|^|telegraf^) -^> stream logs for the specified container 157 | ECHO. 158 | ECHO delete-data -^> delete all data created by the TICK Stack 159 | ECHO docker-clean -^> stop and remove all running docker containers and images 160 | ECHO rebuild-docs -^> rebuild the documentation image 161 | 162 | :End 163 | IF "%interactive%"=="0" PAUSE 164 | EXIT /B 0 165 | -------------------------------------------------------------------------------- /telegraf/telegraf.conf: -------------------------------------------------------------------------------- 1 | [agent] 2 | interval = "5s" 3 | round_interval = true 4 | metric_batch_size = 1000 5 | metric_buffer_limit = 10000 6 | collection_jitter = "0s" 7 | flush_interval = "5s" 8 | flush_jitter = "0s" 9 | precision = "" 10 | debug = false 11 | quiet = false 12 | logfile = "" 13 | hostname = "$HOSTNAME" 14 | omit_hostname = false 15 | 16 | [[outputs.influxdb]] 17 | urls = ["http://influxdb:8086"] 18 | database = "telegraf" 19 | username = "" 20 | password = "" 21 | retention_policy = "" 22 | write_consistency = "any" 23 | timeout = "5s" 24 | 25 | [[inputs.docker]] 26 | endpoint = "unix:///var/run/docker.sock" 27 | container_names = [] 28 | timeout = "5s" 29 | perdevice = true 30 | total = false 31 | 32 | [[inputs.cpu]] 33 | [[inputs.system]] 34 | [[inputs.influxdb]] 35 | urls = ["http://influxdb:8086/debug/vars"] 36 | [[inputs.syslog]] 37 | # ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 38 | # ## Protocol, address and port to host the syslog receiver. 39 | # ## If no host is specified, then localhost is used. 40 | # ## If no port is specified, 6514 is used (RFC5425#section-4.1). 41 | server = "tcp://localhost:6514" 42 | --------------------------------------------------------------------------------