├── .auth.env ├── .env ├── .gitignore ├── CONTRIBUTING.md ├── Jenkinsfile ├── Jenkinsfile.blackduck ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── ansible ├── Dockerfile ├── datastore.json ├── library │ └── grafana_dashboard_cleanup.py ├── main.yml ├── requirements.txt └── tasks │ ├── dashboard_import.yml │ ├── grafana.yml │ ├── grafana_backup.yml │ └── plugin_tasks.yml ├── blackduck └── Dockerfile ├── build ├── alpine │ ├── Dockerfile │ ├── repositories │ └── repositories.internal └── python │ ├── Dockerfile │ ├── pip.conf │ └── pip.conf.internal ├── docker-compose.yml ├── grafana ├── Dockerfile └── grafana.ini ├── influxdb ├── Dockerfile └── influxdb.conf ├── plugins ├── eseries_monitoring │ ├── alpine_base │ │ ├── Dockerfile │ │ ├── repositories │ │ └── repositories.internal │ ├── build_info.txt │ ├── collector │ │ ├── Dockerfile │ │ ├── collector-graphite.py │ │ ├── collector.py │ │ ├── config.json │ │ ├── config.sample.json │ │ ├── docker-entrypoint.sh │ │ ├── requirements.txt │ │ └── tests │ │ │ ├── initiate_testing.sh │ │ │ ├── requirements.txt │ │ │ ├── runtests.sh │ │ │ └── test_collector.py │ ├── dashboards │ │ ├── Disk View Dashboard.json │ │ ├── Interface View Dashboard.json │ │ ├── System View Dashboard.json │ │ └── Volume View Dashboard.json │ ├── docker-compose.yml │ ├── python_base │ │ ├── Dockerfile │ │ ├── pip.conf │ │ └── pip.conf.internal │ └── webservices │ │ ├── Dockerfile │ │ ├── users.properties │ │ ├── users.template │ │ └── wsconfig.xml └── influxdb_internal_monitoring │ ├── ansible_tasks │ ├── datasource.yml │ └── datastore.json │ └── dashboards │ ├── InfluxDB Overview.json │ └── InfluxDB Overview.json~ └── scripts ├── check_docker_version.sh ├── docker_exists.sh ├── images.sh ├── plugin_build_info.sh ├── plugin_compose_info.sh ├── plugin_configure.sh ├── plugin_dashboard_info.sh ├── plugin_image_info.sh ├── plugin_remove_info.sh └── plugin_task_info.sh /.auth.env: -------------------------------------------------------------------------------- 1 | # NOTE: If this password is changed here then you need to change the password 2 | # used for the collector in plugins/eseries_monitoring/collector/config.json 3 | PROXY_PASSWORD=admin 4 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | TAG=3.0 2 | PROJ_NAME=ntap-grafana 3 | RETENTION_PERIOD=52w 4 | CONTAINER_ALPINE_TAG=3.14.3 5 | CONTAINER_GRAFANA_TAG=8.3.6 6 | CONTAINER_INFLUXDB_TAG=1.8-alpine 7 | CONTAINER_PYTHON_TAG=3.10-alpine3.14 8 | CONTAINER_WEBSERVICES_TAG=5.10 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ignore the graphite database 2 | graphite-database/* 3 | .idea/* 4 | # ignore all array files except the example 5 | !ansible/arrays/example_array.json 6 | ansible/arrays/* 7 | # ignore the influxbdb database 8 | influx-database/* 9 | # ignore the web services working directory 10 | plugins/eseries_monitoring/webservices_working/* 11 | # ignore python test cache 12 | plugins/eseries_monitoring/collector/tests/__pycache__/* 13 | plugins/eseries_monitoring/collector/tests/collector.py 14 | # ignore emacs backup files 15 | *~ -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thank you for your interest in contributing to the E-Series Performance Analyzer project! 🎉 4 | 5 | We appreciate that you want to take the time to contribute! Please follow these steps before submitting your PR. 6 | 7 | ## Creating a Pull Request 8 | 9 | 1. Please search [existing issues](https://github.com/NetApp/eseries-perf-analyzer/issues) to determine if an issue already exists for what you intend to contribute. 10 | 2. If the issue does not exist, [create a new one](https://github.com/NetApp/eseries-perf-analyzer/issues/new) that explains the bug or feature request. 11 | * Let us know in the issue that you plan on creating a pull request for it. This helps us to keep track of the pull request and make sure there isn't duplicate effort. 12 | 3. Before creating a pull request, write up a brief proposal in the issue describing what your change would be and how it would work so that others can comment. 13 | * It's better to wait for feedback from someone on NetApp's E-Series Performance Analyzer development team before writing code. We don't have an SLA for our feedback, but we will do our best to respond in a timely manner (at a minimum, to give you an idea if you're on the right track and that you should proceed, or not). 14 | 4. Sign and submit [NetApp's Corporate Contributor License Agreement (CCLA)](https://netapp.tap.thinksmart.com/prod/Portal/ShowWorkFlow/AnonymousEmbed/3d2f3aa5-9161-4970-997d-e482b0b033fa). 15 | * From the **Project Name** dropdown select `E-Series Performance Analyzer`. 16 | * For the **Project Website** specify `https://github.com/NetApp/eseries-perf-analyzer` 17 | 5. If you've made it this far, have written the code that solves your issue, and addressed the review comments, then feel free to create your pull request. 18 | 19 | Important: **NetApp will NOT look at the PR or any of the code submitted in the PR if the CCLA is not on file with NetApp Legal.** 20 | 21 | ## E-Series Performance Analyzer Team's Commitment 22 | 23 | While we truly appreciate your efforts on pull requests, we **cannot** commit to including your PR in the E-Series Performance Analyzer project. Here are a few reasons why: 24 | 25 | * There are many factors involved in integrating new code into this project, including things like: 26 | * support for a wide variety of NetApp backends 27 | * proper adherence to our existing and/or upcoming architecture 28 | * sufficient functional and/or scenario tests across all backends 29 | * etc. 30 | 31 | In other words, while your bug fix or feature may be perfect as a standalone patch, we have to ensure that the changes work in all use cases, configurations, backends and across our support matrix. 32 | 33 | * The E-Series Performance Analyzer team must plan our resources to integrate your code into our code base and CI platform, and depending on the complexity of your PR, we may or may not have the resources available to make it happen in a timely fashion. We'll do our best. 34 | 35 | * Sometimes a PR doesn't fit into our future plans or conflicts with other items on the roadmap. It's possible that a PR you submit doesn't align with our upcoming plans, thus we won't be able to use it. It's not personal. 36 | 37 | Thank you for considering to contribute to the E-Series Performance Analyzer project! 38 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { label 'linux-docker' } 3 | options { 4 | timeout(time: 1, unit: 'HOURS') 5 | disableConcurrentBuilds() 6 | buildDiscarder(logRotator(artifactNumToKeepStr: '5', numToKeepStr: '20')) 7 | } 8 | environment { 9 | TAG = "${BRANCH_NAME}-${BUILD_NUMBER}" 10 | PROJECT_NAME = "esg-grafana" 11 | VERSION = "3.0" 12 | QUIET = "yes" 13 | } 14 | stages { 15 | stage('Run docker builds') { 16 | steps { 17 | sh''' 18 | # Overwrite the default environment options 19 | sed --in-place \ 20 | -e "s/^TAG=.*/TAG=${TAG}/" \ 21 | -e "s/^PROJ_NAME=.*/PROJ_NAME=${PROJECT_NAME}/" \ 22 | .env 23 | cat .env 24 | make build 25 | ''' 26 | sh 'echo ${GIT_COMMIT}' 27 | } 28 | } 29 | stage('Run python unit tests') { 30 | steps { 31 | sh''' 32 | # Overwrite the default environment options 33 | sed --in-place \ 34 | -e "s/^TAG=.*/TAG=${TAG}/" \ 35 | -e "s/^PROJ_NAME=.*/PROJ_NAME=${PROJECT_NAME}/" \ 36 | .env 37 | ./plugins/eseries_monitoring/collector/tests/initiate_testing.sh ${PROJECT_NAME} ${TAG} 38 | ''' 39 | } 40 | } 41 | } 42 | post { 43 | always { 44 | sh''' 45 | make clean || true 46 | ''' 47 | cleanWs deleteDirs: true 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /Jenkinsfile.blackduck: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { label 'linux-docker' } 3 | options { 4 | timeout(time: 1, unit: 'HOURS') 5 | disableConcurrentBuilds() 6 | buildDiscarder(logRotator(artifactNumToKeepStr: '5', numToKeepStr: '20')) 7 | } 8 | parameters { 9 | booleanParam name: "DO_BLACKDUCK_SCAN", description: "Select true to run a blackduck scan. This is only available on trunk and release/* branches." 10 | } 11 | environment { 12 | TAG = "${BRANCH_NAME}-${BUILD_NUMBER}" 13 | PROJECT_NAME = "esg-grafana" 14 | VERSION = "3.0" 15 | QUIET = "yes" 16 | } 17 | stages { 18 | stage('Security Scan'){ 19 | when { 20 | expression { return params.DO_BLACKDUCK_SCAN } 21 | } 22 | steps { 23 | script { 24 | docker.build("perf-analyzer-blackduck", "./blackduck").inside("-u 0") { 25 | sh "cd ./plugins/eseries_monitoring/collector && pip --default-timeout=5 --retries 15 install -r requirements.txt" 26 | sh "cd ./ansible && pip --default-timeout=5 --retries 15 install -r requirements.txt" 27 | 28 | // Scan the source code of the project 29 | synopsys_detect detectProperties: """ 30 | --detect.python.python3=true 31 | --detect.pip.project.name=${PROJECT_NAME} 32 | --detect.pip.project.version.name=${VERSION} 33 | --detect.project.name=${PROJECT_NAME} 34 | --detect.project.version.name=${VERSION} 35 | --detect.cleanup=false 36 | --detect.output.path=/tmp/scanTempDir 37 | --detect.project.code.location.unmap=true 38 | --detect.detector.search.depth=25 39 | --detect.code.location.name=${PROJECT_NAME}_${VERSION}_code 40 | --detect.bom.aggregate.name=${PROJECT_NAME}_${VERSION}_bom 41 | --detect.detector.search.exclusion.paths=scanTempDir 42 | --detect.blackduck.signature.scanner.exclusion.patterns=scanTempDir 43 | """ 44 | // This error occurs when using a non-root user within the container: 45 | // Error creating directory /synopsys-detect/download. 46 | // The curl response was 000, which is not successful - please check your configuration and environment. 47 | // So after the scan do a chmod on the files so that the workspace can be cleaned. 48 | sh "chmod -R 777 ." 49 | } 50 | 51 | // The container images are not published by NetApp and therefore do not need to be scanned. 52 | // The Blackduck project will have manual entries added for these component versions. 53 | // Keeping this code around just in case scans are needed in the future. 54 | // 55 | // def images = [ 56 | // "${PROJECT_NAME}/ansible:${TAG}", 57 | // "${PROJECT_NAME}/influxdb:${TAG}", 58 | // "${PROJECT_NAME}/grafana:${TAG}", 59 | // "${PROJECT_NAME}-plugin/eseries_monitoring/collector:latest", 60 | // "${PROJECT_NAME}-plugin/eseries_monitoring/webservices:latest" 61 | // ] 62 | // // For each image, perform the blackduck scan. 63 | // images.each() { 64 | // def scanImage = it.substring(it.lastIndexOf("/") + 1, it.lastIndexOf(":")) 65 | // synopsys_detect detectProperties: """ 66 | // --detect.project.name=${PROJECT_NAME} \ 67 | // --detect.project.version.name=${VERSION} \ 68 | // --detect.cleanup=false \ 69 | // --detect.output.path=scanTempDir \ 70 | // --detect.detector.search.exclusion.paths=scanTempDir/ \ 71 | // --detect.detector.search.depth=25 \ 72 | // --detect.tools=DOCKER \ 73 | // --detect.tools=SIGNATURE_SCAN 74 | // --detect.code.location.name=${PROJECT_NAME}_${VERSION}_container_${scanImage}_code \ 75 | // --detect.bom.aggregate.name=${PROJECT_NAME}_${VERSION}_container_${scanImage}_bom \ 76 | // --detect.docker.image=${it} \ 77 | // """ 78 | // } 79 | } 80 | } 81 | } 82 | } 83 | post { 84 | always { 85 | cleanWs deleteDirs: true 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 NetApp, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # import config. 2 | # You can change the default config with `make cnf="config_special.env" build` 3 | cnf ?= .env 4 | include $(cnf) 5 | export $(shell sed 's/=.*//' $(cnf)) 6 | 7 | configuration ?= "" 8 | 9 | TAG ?= 1.0 10 | 11 | # external repos 12 | PIP_CONF ?= pip.conf 13 | ALPINE_REPO_FILE ?= repositories 14 | 15 | configuration := .$(configuration) 16 | 17 | 18 | ## 19 | # plugin targets 20 | ## 21 | configure-plugins: ## Perform plugin configuration 22 | @scripts/plugin_configure.sh 23 | 24 | run-plugins: ## Run all plugins 25 | @$(shell ./scripts/plugin_compose_info.sh "up -d") 26 | 27 | build-plugins: configure-plugins ## Build all plugins 28 | @$(shell PROJ_NAME=$(PROJ_NAME) ./scripts/plugin_build_info.sh) 29 | 30 | stop-plugins: ## Stop all plugins 31 | @$(shell ./scripts/plugin_compose_info.sh "stop") 32 | 33 | down-plugins: ## Run docker-compose down on all plugins 34 | @$(shell ./scripts/plugin_compose_info.sh "down") 35 | 36 | clean-plugins: ## Remove all images built by plugins 37 | @$(shell PROJ_NAME=$(PROJ_NAME) ./scripts/plugin_remove_info.sh) 38 | 39 | export-plugins: ## Export all plugin images 40 | @$(shell PROJ_NAME=$(PROJ_NAME) ./scripts/plugin_image_info.sh) 41 | 42 | # HELP 43 | # This will output the help for each task 44 | # thanks to https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html 45 | .PHONY: help warn 46 | 47 | help: ## This help. 48 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 49 | 50 | .DEFAULT_GOAL := help 51 | 52 | # DOCKER TASKS 53 | # Build the container 54 | build: __docker-find __docker-version warn ## Build the container 55 | # Prepare dashboards for import 56 | $(shell mkdir -p ansible/dashboards) 57 | @chmod +x scripts/* 58 | @scripts/plugin_dashboard_info.sh 59 | 60 | # Prepare plugin tasks 61 | $(shell mkdir -p ansible/tasks/plugin_tasks) 62 | @chmod +x scripts/* 63 | @scripts/plugin_task_info.sh 64 | 65 | # Create our docker network 66 | docker network inspect eseries_perf_analyzer >/dev/null 2>&1 || docker network create eseries_perf_analyzer 67 | 68 | # Build core services 69 | docker build --build-arg REPO_FILE=$(ALPINE_REPO_FILE) --build-arg CONTAINER_ALPINE_TAG --build-arg TAG=$(TAG) -t $(PROJ_NAME)/alpine-base:${TAG} build/alpine 70 | docker build --build-arg PIP_CONF=$(PIP_CONF) --build-arg CONTAINER_PYTHON_TAG --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/python-base:${TAG} build/python 71 | docker build --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/ansible:${TAG} ansible 72 | docker build --build-arg CONTAINER_INFLUXDB_TAG --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/influxdb:$(TAG) influxdb 73 | docker build --build-arg CONTAINER_GRAFANA_TAG --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/grafana:$(TAG) grafana 74 | docker-compose build 75 | 76 | # Build plugins 77 | @$(MAKE) --no-print-directory build-plugins 78 | 79 | build-nc: __docker-find __docker-version warn ## Build the container without caching 80 | # Prepare dashboards for import 81 | $(shell mkdir -p ansible/dashboards) 82 | @chmod +x scripts/* 83 | @scripts/plugin_dashboard_info.sh 84 | 85 | # Create our docker network 86 | docker network inspect eseries_perf_analyzer >/dev/null 2>&1 || docker network create eseries_perf_analyzer 87 | 88 | # Prepare plugin tasks 89 | $(shell mkdir -p ansible/tasks/plugin_tasks) 90 | @chmod +x scripts/* 91 | @scripts/plugin_task_info.sh 92 | 93 | docker build --no-cache --build-arg REPO_FILE=$(ALPINE_REPO_FILE) --build-arg CONTAINER_ALPINE_TAG --build-arg TAG=$(TAG) -t $(PROJ_NAME)/alpine-base:${TAG} build/alpine 94 | docker build --no-cache --build-arg PIP_CONF=$(PIP_CONF) --build-arg CONTAINER_PYTHON_TAG --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/python-base:${TAG} build/python 95 | docker build --no-cache --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/ansible:${TAG} ansible 96 | docker build --no-cache --build-arg CONTAINER_INFLUXDB_TAG --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/influxdb:$(TAG) influxdb 97 | docker build --no-cache --build-arg CONTAINER_GRAFANA_TAG --build-arg TAG=$(TAG) --build-arg PROJ_NAME=$(PROJ_NAME) -t $(PROJ_NAME)/grafana:$(TAG) grafana 98 | docker-compose build --pull --no-cache 99 | 100 | # Build plugins 101 | @$(MAKE) --no-print-directory build-plugins 102 | 103 | run: build ## Build and run 104 | # Start core services using our compose file and run in the background 105 | docker-compose up -d 106 | 107 | # Run plugins 108 | @$(MAKE) --no-print-directory run-plugins 109 | 110 | # Start an instance of our Ansible image to perform setup on the running instance 111 | docker run --rm --network=container:grafana $(PROJ_NAME)/ansible:${TAG} 112 | @$(shell rm -rf ansible/dashboards/*) 113 | @$(shell rm -rf ansible/tasks/plugin_tasks/*) 114 | docker ps 115 | 116 | run-nc: build-nc ## Build and run 117 | # Start core services using our compose file and run in the background 118 | docker-compose up -d 119 | 120 | # Run plugins 121 | @$(MAKE) --no-print-directory run-plugins 122 | 123 | # Start an instance of our Ansible image to perform setup on the running instance 124 | docker run --rm --network=container:grafana $(PROJ_NAME)/ansible:${TAG} 125 | @$(shell rm -rf ansible/dashboards/*) 126 | @$(shell rm -rf ansible/tasks/plugin_tasks/*) 127 | docker ps 128 | 129 | export-nc: build-nc ## Build the images and export them 130 | mkdir -p images 131 | docker save $(PROJ_NAME)/ansible:${TAG} > images/ansible.tar 132 | docker save $(PROJ_NAME)/influxdb:${TAG} > images/influxdb.tar 133 | docker save $(PROJ_NAME)/grafana:${TAG} > images/grafana.tar 134 | 135 | # Export core plugin images 136 | docker save $(PROJ_NAME)-plugin/eseries_monitoring/collector:latest > images/eseries_monitoring_collector.tar 137 | docker save $(PROJ_NAME)-plugin/eseries_monitoring/webservices:latest > images/eseries_monitoring_webservices.tar 138 | 139 | # Including this will scan and export any plugin images we find that have been built. 140 | # This includes images that are only built as a base 141 | #@$(MAKE) --no-print-directory export-plugins 142 | 143 | export: build ## Build the images and export them 144 | mkdir -p images 145 | docker save $(PROJ_NAME)/ansible:${TAG} > images/ansible.tar 146 | docker save $(PROJ_NAME)/influxdb:${TAG} > images/influxdb.tar 147 | docker save $(PROJ_NAME)/grafana:${TAG} > images/grafana.tar 148 | 149 | # Export core plugin images 150 | docker save $(PROJ_NAME)-plugin/eseries_monitoring/collector:latest > images/eseries_monitoring_collector.tar 151 | docker save $(PROJ_NAME)-plugin/eseries_monitoring/webservices:latest > images/eseries_monitoring_webservices.tar 152 | 153 | # Including this will scan and export any plugin images we find that have been built. 154 | # This includes images that are only built as a base 155 | #@$(MAKE) --no-print-directory export-plugins 156 | 157 | stop: __docker-find __docker-version ## Stop all of our running services 158 | # Stop running plugins 159 | @$(MAKE) --no-print-directory stop-plugins 160 | 161 | # Stop core componenets 162 | docker-compose stop 163 | 164 | restart: stop run ## 'stop' followed by 'run' 165 | 166 | rm: __docker-find __docker-version ## Remove all existing containers defined by the project 167 | docker-compose rm -s -f 168 | $(shell ./scripts/plugin_compose_info.sh "rm -s -f") 169 | 170 | clean: stop rm ## Remove all images and containers built by the project 171 | rm -rf images 172 | docker rmi $(PROJ_NAME)/ansible:${TAG} 173 | docker rmi $(PROJ_NAME)/influxdb:${TAG} 174 | docker rmi $(PROJ_NAME)/grafana:${TAG} 175 | docker rmi -f $(shell docker images -q -f "label=autodelete=true") 176 | docker rmi -f $(shell docker images -q --filter "reference=$(PROJ_NAME)/*:${TAG}") 177 | 178 | # Clean plugins 179 | @$(MAKE) --no-print-directory clean-plugins 180 | 181 | # Remove our created docker network 182 | docker network rm eseries_perf_analyzer 183 | 184 | warn: ## 185 | ifndef QUIET 186 | @chmod +x scripts/* 187 | @scripts/images.sh 188 | endif 189 | 190 | __docker-find: ## 191 | @chmod +x scripts/* 192 | @scripts/docker_exists.sh 193 | 194 | __docker-version: ## 195 | @chmod +x scripts/* 196 | @scripts/check_docker_version.sh 197 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NetApp E-Series Performance Analyzer 2 | This project provides an automated installation and deployment of the NetApp E-Series Performance Analyzer, a collection of software and scripts for monitoring the performance of NetApp E-Series storage systems. 3 | 4 | This project is intended to allow you to quickly and simply deploy an instance of our performance analyzer for monitoring your E-Series storage systems. We incorporate various open source components and tools in order to do so. While it is primarily intended to serve as a reference implementation for using Grafana to visualize the performance of your E-Series systems, it is also intended to be customizable and extensible based on your individual needs via a developer-friendly plugin architecture. This README is primarily focused on the E-Series performance analysis components. For more information on plugin development please find the "Plugin Architecture" section of this README. 5 | 6 | ## Quickstart Guide 7 | You'll need to have [Docker (v1.13.0+)](https://docs.docker.com/install/) and [Docker-Compose](https://docs.docker.com/compose/install/) installed in order to get started. We also utilize [Make](https://www.gnu.org/software/make/) for starting/stopping the components so make sure you have a version of that installed. 8 | 9 | The storage systems to be monitored must be defined in the *"/plugins/eseries_monitoring/collector/config.json"* file. There is an example file located at *"/plugins/eseries_monitoring/collector/config.sample.json"* for your reference. You may also choose to add the systems to Web Services manually, as detailed below. 10 | 11 | Once Docker is installed and the storage systems are configured, run the command _"make run"_ in the project's root folder. You will then be prompted for confirmation to download the necessary container images. If you wish to update to a newer image tag, you can cancel out and do so now. Within a few minutes, all dependencies should be retrieved and installed, and the performance analyzer should be running. 12 | 13 | Open **http://:3000/d/ZOshR4NZk/system-view-dashboard** to reach the Grafana login page and the E-Series System View dashboard. Use the default login credentials of _admin/admin_ for first-time login. 14 | 15 | ## Overview 16 | The Web Services Proxy will periodically poll your storage system(s) for performance data at a regular interval. Using a simple Python script, this data is collected and pushed into an [InfluxDB](https://www.influxdata.com/) time-series database. [Grafana](https://grafana.com/), a data visualization engine for time-series data, is then utilized along with several customized dashboards to present the data graphically. All of these components are integrated together using [Docker](https://www.docker.com/) and [Ansible](https://www.ansible.com/). 17 | 18 | The only real requirements to utilize this project are a Linux OS and a Docker installation with Docker Compose. ~95% of the installation and configuration process is automated using Ansible and Docker. 19 | 20 | Our descriptions below of the various components will in no way fully do them justice. It is recommended that you visit the project/home pages for each in order to gain a full understanding of what they can provide and how they can be fully utilized. We will attempt to provide the high-level information that you absolutely need to know, but probably little beyond that. 21 | 22 | ## Components 23 | ### NetApp SANtricity Web Services Proxy 24 | The Web Services Proxy provides a RESTful interface for managing/monitoring E-Series storage systems. Our newest hardware models provide a RESTful API out-of-the-box, but the Web Services Proxy will support the newest systems as well as the legacy storage systems that do not. It is highly scalable and can support upwards of 500 E-Series systems while using < 2 GB of memory. 25 | 26 | The Web Services Proxy is provided with the default configuration and settings. It can be accessed at **http://:8080**. If you do not wish for the API to be externally accessible, you may remove the port mapping in the *"/plugins/eseries_monitoring/docker-compose.yml"* file: 27 | ~~~~ 28 | netapp_web_services: 29 | ... 30 | ports: 31 | - 8080:8080 32 | - 8443:8443 33 | ~~~~ 34 | 35 | The Web Services Proxy installation includes a GUI component that can be used to manage the newest E-Series systems (those running firmware levels 11.40 and above), which may or may not work for your environment. 36 | 37 | #### Managing Web Services Proxy Credentials 38 | By default the credentials _admin/admin_ will be used for accessing the Web Services Proxy. If you wish to modify the password used then follow this procedure. 39 | 40 | ##### Update the password for the web services proxy service 41 | Edit the file `.auth.env` in the root of the project. Change the value of the `PROXY_PASSWORD` variable to the password that you wish to use and save the file. This 42 | will be used to configure the password for the proxy service. 43 | 44 | ##### Update the password used by the collector plugin 45 | Edit the file `/plugins/eseries_monitoring/collector/config.json` and change the value of the `password` key. This is the top level `password` key and not the password for an individual storage system which is a different password. 46 | 47 | Once both the `.auth.env` and `config.json` files have been updated then the images will need to be rebuilt if the project is running. This can be done with a simple `make restart` command. If the project has not been built or started yet then the changes will take effect when you do build and start the project. 48 | 49 | 50 | ### InfluxDB 51 | [InfluxDB](https://www.influxdata.com/) is our persistent store for preserving metrics data. Grafana supports [many different backends](https://grafana.com/plugins?type=datasource), but we chose InfluxDB due to its speed and scalability as well as the power and simplicity of its query language. 52 | 53 | While we do have a Python script predefined for use with InfluxDB and the Web Services Proxy, which collects E-Series performance metrics, we also provide some additional collector example scripts at the root of the project. One of these is written in Python, and the other in Bash. If you would like to provide additional metrics for collection, you may use these scripts as an example. 54 | ### Grafana 55 | [Grafana](https://grafana.com/) is an open-source tool designed to help you visualize time-series data. It has the capability to accept plugins for additional functionality, but its core provides a lot of power with no addons. 56 | 57 | Data from a configured datasource is displayed in Grafana via user-defined dashboards. Grafana dashboards are built/generated in the GUI, but are stored/represented in JSON format on disk. While we provide several pre-built dashboards, it is entirely possible (and encouraged) for you to [create your own](http://docs.grafana.org/guides/getting_started/). The source for our dashboards can be found in *"/plugins/eseries_monitoring/dashboards/"* 58 | 59 | ## Supporting Tools 60 | Installing each of these components and configuring them properly on an arbitrary OS version can be difficult. Rather than requiring a complex installation and configuration step, we utilize a couple of different tools to facilitate this type of deployment. 61 | ### Ansible 62 | We use [Ansible](https://www.ansible.com/) in order to define and apply consistent configurations for the different components listed above. A simple Ansible playbook can save thousands of lines worth of shell scripting. 63 | 64 | Primarily, we utilize Ansible to configure Grafana and import/export dashboards as required. 65 | ### Docker 66 | [Docker](https://www.docker.com/) allows you to define an environment to run a particular application in code, including the OS, dependencies, and any required configuration/customization. It is similar to creating a custom virtual machine image for each component, but much easier, more dynamic, and lighter weight resource-wise. Such a configuration is known as a Docker image. Each component of our solution has an official, unofficial, or custom-built Docker image that defines its environment and configuration such that only an installation of Docker is required to use it. 67 | 68 | We use version 2 of the Compose file format, with features that require at least Docker version 1.13.0+. 69 | 70 | [Docker Compose](https://docs.docker.com/compose/) allows multiple Docker images to be orchestrated together to solve a larger problem. A common example is a web server that also requires a database. 71 | 72 | In our case, we have several components that must be run together for everything to work correctly. There are startup dependencies, and certain components require communication with other components. Docker-Compose allows us to define the various services we require, how they should behave, where they should store their data, and which should be externally accessible. This is all done via Docker Compose. 73 | 74 | ## Getting Started 75 | ### Dependencies 76 | You'll need to install [Docker](https://docs.docker.com/install/) and [Docker Compose](https://docs.docker.com/compose/install/). All other dependencies are provided through use of our Docker images. You also need access to [Make](https://www.gnu.org/software/make/). 77 | ### Configuration 78 | #### Storage Systems 79 | Arrays to be monitored should be added to the *"/plugins/eseries_monitoring/collector/config.json"* file. A sample configuration file is provided at *"/plugins/eseries_monitoring/collector/config.sample.json"* for reference. For most systems, you will also need to provide a valid password to log in to the target storage system. If you do not, or you provide an incorrect password, it's possible that we won't be able to pull performance data for that system. 80 | 81 | It is also possible to manually add storage systems using the Web Services Proxy interactive API documentation found at **http://:8080/devmgr/docs/#/Storage-Systems/new_StorageSystem**. 82 | 83 | Once everything is started, arrays can also be managed through the SANtricity® Unified Manager as described below. Note that although they will still be monitored, legacy arrays added through the API/config files will not appear in this manager. Use of this manager is briefly described below. 84 | #### Disk Usage, Data Retention, and Downsampling 85 | With our data collection we use ~260 KB per drive/volume per day. Based on this, you can expect to consume 250-300 GB of storage space for 100 systems for one year. 86 | 87 | By default, we retain performance metrics for one week before they are downsampled. Those downsampled metrics are then retained for one year by default. This retention period is modifiable and we utilize an environment variable *"RETENTION_PERIOD"* for this purpose. The best place to set this is within the *"/.env"* file. For example, setting a retention period of 4 weeks would look like this: 88 | ~~~~ 89 | ... 90 | RETENTION_PERIOD=4w 91 | ... 92 | ~~~~ 93 | A list of possible durations and valid duration formats can be found [here](https://docs.influxdata.com/influxdb/v1.7/query_language/spec/#durations). Note that the minimum possible retention duration is 1 hour. Setting this variable to a value of **INF** will result in performance metrics that are retained indefinitely. 94 | 95 | **Note:** A change in the retention period requires a restart of the services before it will take effect. 96 | #### InfluxDB 97 | InfluxDB is configurable through the config file located at *"/influxdb/influxdb.conf"*. Information about configuration options can be found [here](https://docs.influxdata.com/influxdb/v1.7/administration/config/). 98 | #### Dashboards 99 | The included E-Series dashboards are located in *"/plugins/eseries_monitoring/ansible/dashboards/"* and will be imported into Grafana when started. Dashboards can also be imported from within the Grafana interface by navigating to **Dashboards->Home**, clicking on the drop-down at the top of the page, and selecting **Import Dashboard**. 100 | 101 | Dashboards are imported/exported using JSON and that documentation can be found [here](http://docs.grafana.org/reference/dashboard/). You may use the provided pre-configured dashboards as a reference for creating your own. We have provided a make target for automatically exporting new/user-modified dashboards to disk for backup. This pulls current dashboards from the service and stores them locally in the *"/backups/"* directory in JSON format. To execute this simply run the command _"make backup-dashboards"_ in the root folder of the project. The Grafana instance must be running when you execute this command. 102 | ### Starting It Up 103 | It's pretty simple: run the command _"make run"_ from within the project root directory. This will begin the process of building, setting up, and running everything. When you want to stop it, run the command _"make stop"_. If you're trying to monitor the status of any of these tools, you can do so using standard Docker commands. To remove any current container instances, run the command _"make clean"_. A list of all possible make targets can be viewed using the _"make help"_ command. 104 | 105 | _"make run"_ will prompt you for confirmation on whether or not you wish to continue and allow the downloading of default container images. If you wish to update to a newer tag image, you can cancel out and do so now. At this time core services will start, followed thereafter by any plugins, including the E-Series performance monitoring services. 106 | 107 | We've done our best to ensure that the configured Docker images not only build and work in most environments, but that they are also well-used and tested by the community, and don't have security holes. New security issues are found all of the time, however, and we may not be able to update the image tags immediately. You may choose to change the image tags to a newer or different version, just be aware that we haven't tested that variation and you might run into problems with the build or during runtime. 108 | 109 | Once everything is started, you have access to several pages to control and configure. 110 | 111 | ## Once It's Started 112 | ### Accessing the Web Services Proxy 113 | The Web Services Proxy can be accessed at **http://:8080**. From here you can access the SANtricity® Unified Manager using default credentials of _admin/admin_. This is a UI frontend for managing storage arrays. There are also links to the Web Services API reference as well as the NetApp support site. Through this manager, it is also possible to create and organize your arrays into folders. This allows you to arrange arrays into logical groups. These groups are exposed in the dashboards, and graphs can be filtered by these groups. These folders are updated at start, and then periodically every 10 minutes. If you would like to see your folder changes reflected in dashboards immediately, simply restart services using _"make restart"_. 114 | ### Accessing the Grafana Interface and Dashboards 115 | The dashboards are available at **http://:3000** using default credentials _admin/admin_. Grafana should be pre-configured for immediate access to your data. Documentation for additional configuration and navigation can be found [here](http://docs.grafana.org/guides/getting_started/). 116 | ## Troubleshooting 117 | ### I don't have access to Docker 118 | At this time (and this is unlikely to change), Docker is a hard requirement for using this project. 119 | ### I don't have network access on this machine 120 | Your only option at this point is to save/export the Docker images on a machine that does have general internet access and then copy them and import them to the target machine. This is an advanced workflow that we do not currently cover in this guide, but is not overly difficult to achieve. 121 | ### I can't pull the Docker images 122 | Check your access to DockerHub. If you are running this on a machine with network segregation, you may need to update your Docker binary to utilize a local DockerHub mirror or repository to get this to work. 123 | ### A Docker image failed to build 124 | We pin our Docker images to a known good tag at the time that we commit changes. The downside to pinning to a major/minor version rather than a specific image hash is that while you do get the benefit of new patches (security updates, etc.), the possibility of breakage does exist. If an image fails to build, try to determine where the failure occurred and if it's an environment issue or an issue with an update to the Docker image tag. It's quite likely that you'll be able to get things to function correctly by rolling back to an older version. 125 | ### I don't see any data in the charts 126 | Did you remember to add any storage systems to the Web Services Proxy instance, either through the Ansible helper scripts or manually? If not, you didn't give us anything to push metrics on yet. 127 | 128 | Assuming that you did, verify that the collector container is running and that it is successfully collecting metrics. You can do this by checking the container logs by running the command _"docker logs -f collector"_. 129 | 130 | If you have added your own metrics that aren't showing up, verify that you're sending the data to the correct server address and port. 131 | ### I made some changes to and now everything is broken! 132 | While we do encourage variations, improvements, and additions, these are definitely something we can't support. While you may enter an issue and/or ask for help, we can't guarantee that we can, or will try to fix your deployment and may ask you to revert to a known configuration. 133 | ### I get prompted each time I perform "make run" 134 | You may add _"QUIET=1"_ to the *"/.env"* file. This will automatically choose "yes" when prompted by the build/run process. 135 | 136 | ## Plugin architecture 137 | As of version 2.1, the Performance Analyzer project has been restructured to support extensions via plugins. Core services of the Performance Analyzer that are not considered plugins include: Grafana, InfluxDB, and Ansible. Plugins can make use of these services to extend functionality to suit a particular user's situation. Plugins can be found in the *"/plugins"* directory, and each gets their own folder within. When services are started (or restarted) via the **make** commands, and once core services have started, this plugins folder is scanned and any plugins found are then built (if necessary) and started as well. 138 | 139 | When services are stopped, plugins are stopped first, followed by core services. 140 | 141 | ### Plugin development 142 | Plugins can consist of as much or as little as needed for their functionality. This can be as simple as just including some extra dashboards, or as complex as spinning up their own Docker containers. For a complete example of this plugin architecture, the E-Series monitoring components are now featured in the *eseries_monitoring* plugin found in the plugins directory. We intend this plugin to not only be the primary purpose of this project, but to also serve as a reference for plugin development. This plugin showcases all of the major components a plugin might want to incorporate: It spins up Docker containers via its own **docker-compose.yml** and **build_info.txt** files, it uses Ansible to create a new data source for Grafana, and it includes multiple of its own dashboards that are imported when services are started. 143 | 144 | ### Plugin structure 145 | #### Docker containers 146 | In order for plugins to spin up their own Docker containers, you must include a **docker-compose.yml** file in the root directory of your plugin. This Compose file does not require any special formatting. However, we do recommend you format any image names like so: 147 | ~~~~ 148 | image: ${PROJ_NAME}-plugin/PLUGIN_NAME/COMPONENT_NAME 149 | ~~~~ 150 | Where *${PROJ_NAME}* will be automatically replaced. *PLUGIN_NAME* is the name of your plugin's directory, and *COMPONENT_NAME* is the name of the specific component image. We recommend this for organization purposes, and to match how our included plugins are set up. This will make your plugin consistent with the included NetApp plugins and will make it easier to distinguish when listing Docker containers. 151 | 152 | Another reason for this naming convention is that any of your plugin's component images that need to be built are defined in the **build_info.txt** file, which you should place in your plugin's root directory, and those images will have their names formatted this way automatically. Component images are built using standard Dockerfile conventions. The **build_info.txt** included for the *eseries_monitoring* plugin is commented to explain how this file is formatted and used: 153 | ~~~~ 154 | # This file defines the order in which components for this plugin are built. 155 | # Components are built from top to bottom. 156 | # Per-line: first is the folder containing the Dockerfile, and second is the output image tag 157 | # NOTE: The output image tag will be prefixed with "ntap-grafana-plugin/*plugin_directory_name*/" 158 | # The output image tag is optional, if omitted it will match the Dockerfile directory 159 | 160 | # ex. The alpine image here will be built from the folder "plugins/eseries_monitoring/alpine_base" 161 | # and will be tagged "ntap-grafana-plugin/eseries_monitoring/alpine-base" 162 | # 163 | # The webservices image here will be built from the folder "plugins/eseries_monitoring/webservices" 164 | # and will be tagged "ntap-grafana-plugin/eseries_monitoring/webservices" 165 | 166 | alpine_base alpine-base 167 | python_base python-base 168 | webservices 169 | collector 170 | ~~~~ 171 | 172 | Plugin containers are managed automatically when services are started/stopped/cleaned. They are intended to be plug-and-play, so if you would like to disable a plugin from being part of your services, simply remove its folder from the *plugins* directory and restart the project. 173 | 174 | ##### Docker networking 175 | All containers in the core service offering are part of a Docker network we create at start. This network is named **eseries_perf_analyzer**. If you would like your plugin to interface with these core services, you can connect them to this Docker network. In your plugin's **docker-compose.yml** file, any service that would like access to this network must include: 176 | ~~~~ 177 | networks: 178 | - eseries_perf_analyzer 179 | ~~~~ 180 | And at the bottom of your **docker-compose.yml** file, you must declare this network as external like so: 181 | ~~~~ 182 | networks: 183 | eseries_perf_analyzer: 184 | external: true 185 | ~~~~ 186 | 187 | For an example of this, please look at the *eseries_monitoring* plugin's **docker-compose.yml** file. 188 | 189 | #### Dashboards 190 | If you would like your plugin to include custom Grafana dashboards, simply place their **JSON** files into a *dashboards* folder in your plugin's root directory. For example: */plugins/my_plugin/dashboards/my_custom_dashboard.json* 191 | 192 | This folder is scanned when services start and any dashboards found are imported into Grafana automatically. 193 | 194 | #### Ansible tasks 195 | We provide the ability for plugins to run their own Ansible tasks using the Ansible container we spin up as part of the core services. Create an *ansible_tasks* folder in your plugin's root directory, and place any valid **.yml** files into it. When services are started, this folder is scanned and any valid Ansible tasks will be ran after tasks in the core playbook. 196 | 197 | As an example of this, please take a look at the *influxdb_internal_monitoring* plugin, which runs a task to add a new data source to Grafana. 198 | -------------------------------------------------------------------------------- /ansible/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG TAG=latest 2 | ARG PROJ_NAME=ntap-grafana 3 | ARG IMAGE=${PROJ_NAME}/python-base:${TAG} 4 | # Installing Ansible requires gcc and other dependencies to build packages from source. We'll do this in 2 stages so we don't 5 | # have to have all of the build-time dependencies in the final image. 6 | FROM ${IMAGE} as builder 7 | 8 | # Signifies this is a temporary image that can be purged 9 | LABEL autodelete="true" 10 | RUN apk add --update gcc musl-dev libffi-dev make openssl-dev 11 | RUN python -m pip install --upgrade pip 12 | RUN pip --default-timeout=5 --retries 15 install --upgrade --prefix=/install -r requirements.txt 13 | 14 | FROM ${IMAGE} 15 | COPY --from=builder /install /usr/local 16 | ADD *.yml *.json ./ 17 | ADD dashboards/ ./dashboards 18 | ADD tasks/ ./tasks 19 | RUN mkdir -p /etc/ansible && touch /etc/ansible/hosts && mkdir -p /home/dashboards/backup 20 | ENTRYPOINT ["ansible-playbook", "-v"] 21 | CMD ["main.yml"] 22 | -------------------------------------------------------------------------------- /ansible/datastore.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"WSP", 3 | "label": "WSP", 4 | "type": "influxdb", 5 | "url":"http://influxdb:8086", 6 | "access":"proxy", 7 | "basicAuth": false, 8 | "isDefault": true, 9 | "database":"eseries" 10 | } 11 | -------------------------------------------------------------------------------- /ansible/library/grafana_dashboard_cleanup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from ansible.module_utils.basic import * 4 | 5 | ANSIBLE_METADATA = { 6 | 'metadata_version': '1.1', 7 | 'status': ['preview'], 8 | 'supported_by': 'community' 9 | } 10 | 11 | DOCUMENTATION = ''' 12 | --- 13 | module: grafana_dashboard_cleanup 14 | 15 | short_description: Cleanup exported Grafana Dashboards so they can be re-imported. 16 | 17 | version_added: "2.4" 18 | 19 | description: 20 | - "Cleanup exported Grafana Dashboards so they can be re-imported." 21 | 22 | options: 23 | dashboard: 24 | description: 25 | - A json String representing the dashboard 26 | required: true 27 | 28 | author: 29 | - Michael Price (@lmprice) 30 | ''' 31 | 32 | fields = { 33 | "dashboard": {"required": True, "type": "str"}, 34 | } 35 | 36 | import json 37 | 38 | 39 | def main(): 40 | module = AnsibleModule(argument_spec=fields) 41 | data = module.params['dashboard'] 42 | data = json.loads(data) 43 | data['dashboard']['id'] = None 44 | data['dashboard']['refresh'] = "5s" 45 | data['dashboard']['time'] = { 46 | "from": "now-5m", 47 | "to": "now" 48 | } 49 | module.exit_json(changed=True, json=data) 50 | 51 | 52 | if __name__ == '__main__': 53 | main() 54 | -------------------------------------------------------------------------------- /ansible/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | vars: 5 | grafana_username: admin 6 | grafana_password: admin 7 | tasks: 8 | - include_tasks: tasks/grafana.yml 9 | - include_tasks: tasks/dashboard_import.yml 10 | - include_tasks: tasks/plugin_tasks.yml -------------------------------------------------------------------------------- /ansible/requirements.txt: -------------------------------------------------------------------------------- 1 | ansible>=2.9.2 2 | influxdb 3 | -------------------------------------------------------------------------------- /ansible/tasks/dashboard_import.yml: -------------------------------------------------------------------------------- 1 | - name: Find dashboards 2 | find: 3 | file_type: file 4 | paths: ./dashboards # NOTE: Plugin dashboards are copied to subdirectories at this path at the start of the build process 5 | patterns: "*.json" 6 | recurse: yes # Each plugin gets its own subfolder for dashboards, and we maintain their folder structure 7 | register: dashboards 8 | 9 | - name: Create Grafana dashboards 10 | ignore_errors: yes 11 | uri: 12 | url: "http://grafana:3000/api/dashboards/import" 13 | method: POST 14 | headers: 15 | Content-Type: "application/json" 16 | user: "{{ grafana_username }}" 17 | password: "{{ grafana_password }}" 18 | body: "{{ lookup('file', item.path) | from_json }}" 19 | status_code: 200 20 | body_format: json 21 | force_basic_auth: yes 22 | with_items: "{{ dashboards.files }}" -------------------------------------------------------------------------------- /ansible/tasks/grafana.yml: -------------------------------------------------------------------------------- 1 | - name: Wait for InfluxDB to startup completely 2 | wait_for: 3 | host: "influxdb" 4 | port: 8086 5 | timeout: 10 6 | 7 | - name: Get influxdb datasource 8 | uri: 9 | url: "http://grafana:3000/api/datasources/id/WSP" 10 | method: GET 11 | user: "{{ grafana_username }}" 12 | password: "{{ grafana_password }}" 13 | force_basic_auth: yes 14 | status_code: 404,200 15 | headers: 16 | Accept: "application/json" 17 | register: resp 18 | 19 | - name: Define influxdb datasource 20 | uri: 21 | url: "http://grafana:3000/api/datasources" 22 | method: POST 23 | user: "{{ grafana_username }}" 24 | password: "{{ grafana_password }}" 25 | body: "{{ lookup('file', 'datastore.json') | from_json }}" 26 | status_code: 200 27 | body_format: json 28 | force_basic_auth: yes 29 | headers: 30 | Content-Type: "application/json" 31 | when: resp.status == 404 32 | -------------------------------------------------------------------------------- /ansible/tasks/grafana_backup.yml: -------------------------------------------------------------------------------- 1 | - name: Get existing dashboards 2 | uri: 3 | url: "http://localhost:3000/api/search/" 4 | method: GET 5 | user: "{{ grafana_username }}" 6 | password: "{{ grafana_password }}" 7 | force_basic_auth: yes 8 | headers: 9 | Accept: "application/json" 10 | register: resp 11 | 12 | - name: Retrieve each individual dashboard from the list 13 | uri: 14 | url: "http://localhost:3000/api/dashboards/{{ item.uri }}" 15 | method: GET 16 | user: "{{ grafana_username }}" 17 | password: "{{ grafana_password }}" 18 | force_basic_auth: yes 19 | headers: 20 | Accept: "application/json" 21 | with_items: "{{ resp.json }}" 22 | register: dashboards 23 | 24 | - name: Save the retrieved dashboards to disk 25 | copy: 26 | force: True 27 | content: "{{ item.json | to_nice_json}}" 28 | dest: "/home/dashboards/backup/{{item.json.meta.slug}}.json" 29 | with_items: "{{ dashboards.results }}" 30 | loop_control: 31 | label: "{{ item.json.meta.slug }}" 32 | -------------------------------------------------------------------------------- /ansible/tasks/plugin_tasks.yml: -------------------------------------------------------------------------------- 1 | - name: Find tasks defined by plugins 2 | find: 3 | file_type: file 4 | paths: ./tasks/plugin_tasks # NOTE: Plugin tasks are copied to subdirectories at this path at the start of the build process 5 | patterns: "*.yml" 6 | recurse: yes # Each plugin gets its own subfolder for tasks, and we maintain their folder structure 7 | register: plugin_tasks 8 | 9 | - name: Run plugin tasks 10 | include_tasks: 11 | file: "{{ item.path }}" 12 | with_items: "{{ plugin_tasks.files }}" -------------------------------------------------------------------------------- /blackduck/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10 2 | RUN apt-get update && apt-get install -y openjdk-11-jre-headless 3 | RUN python -m pip install --upgrade pip 4 | -------------------------------------------------------------------------------- /build/alpine/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CONTAINER_ALPINE_TAG=3.14.3 2 | ARG TAG=latest 3 | FROM alpine:${CONTAINER_ALPINE_TAG} 4 | LABEL VERSION=${TAG} 5 | ARG REPO_FILE=repositories 6 | ADD $REPO_FILE /etc/apk/repositories 7 | ONBUILD RUN apk update && apk upgrade && rm -rf /var/cache/apk/* 8 | -------------------------------------------------------------------------------- /build/alpine/repositories: -------------------------------------------------------------------------------- 1 | http://dl-cdn.alpinelinux.org/alpine/v3.14/main 2 | http://dl-cdn.alpinelinux.org/alpine/v3.14/community 3 | -------------------------------------------------------------------------------- /build/alpine/repositories.internal: -------------------------------------------------------------------------------- 1 | http://repomirror-ict.eng.netapp.com/alpine-linux/v3.14/main 2 | http://repomirror-ict.eng.netapp.com/alpine-linux/v3.14/community 3 | -------------------------------------------------------------------------------- /build/python/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CONTAINER_PYTHON_TAG=3.10-alpine3.14 2 | ARG TAG=latest 3 | ARG PROJ_NAME=ntap-grafana 4 | FROM ${PROJ_NAME}/alpine-base:${TAG} as builder 5 | # Signifies this is a temporary image that can be purged 6 | LABEL autodelete="true" 7 | 8 | FROM python:${CONTAINER_PYTHON_TAG} 9 | ARG PIP_CONF=pip.conf 10 | ADD $PIP_CONF /etc/pip.conf 11 | COPY --from=builder /etc/apk/repositories /etc/apk/repositories 12 | ONBUILD WORKDIR /home 13 | ONBUILD COPY requirements.txt . 14 | ONBUILD RUN apk update && apk upgrade && rm -rf /var/cache/apk/* 15 | -------------------------------------------------------------------------------- /build/python/pip.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | timeout = 10 3 | -------------------------------------------------------------------------------- /build/python/pip.conf.internal: -------------------------------------------------------------------------------- 1 | [global] 2 | timeout = 10 3 | index-url = http://esgweb.eng.netapp.com/~lorenp/python-mirror 4 | trusted-host = esgweb.eng.netapp.com 5 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | 4 | influxdb: 5 | image: ${PROJ_NAME}/influxdb:${TAG} 6 | container_name: influxdb 7 | mem_limit: 2G 8 | restart: unless-stopped 9 | # Internal ports to be exposed to other linked services 10 | # They will not be exposed to the host machine 11 | # expose: 12 | # - 8086 13 | # - 8083 14 | # - 2003 15 | logging: 16 | driver: "json-file" 17 | options: 18 | max-file: "5" 19 | max-size: 10m 20 | networks: 21 | - eseries_perf_analyzer 22 | volumes: 23 | - ./influx-database:/var/lib/influxdb 24 | 25 | grafana: 26 | image: ${PROJ_NAME}/grafana:${TAG} 27 | container_name: grafana 28 | restart: unless-stopped 29 | depends_on: 30 | - influxdb 31 | ports: 32 | - 3000:3000 33 | volumes: 34 | - ./grafana/grafana.ini:/etc/grafana/grafana.ini:ro 35 | - grafana_data:/var/lib/grafana 36 | logging: 37 | driver: "json-file" 38 | options: 39 | max-file: "5" 40 | max-size: 10m 41 | networks: 42 | - eseries_perf_analyzer 43 | 44 | networks: 45 | eseries_perf_analyzer: 46 | external: true 47 | 48 | volumes: 49 | grafana_data: 50 | -------------------------------------------------------------------------------- /grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CONTAINER_GRAFANA_TAG=8.3.6 2 | ARG TAG=latest 3 | ARG PROJ_NAME=ntap-grafana 4 | FROM grafana/grafana:${CONTAINER_GRAFANA_TAG} 5 | -------------------------------------------------------------------------------- /grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | ##################### Grafana Configuration Example ##################### 2 | # 3 | # Everything has defaults so you only need to uncomment things you want to 4 | # change 5 | 6 | # possible values : production, development 7 | ;app_mode = production 8 | 9 | # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty 10 | ;instance_name = ${HOSTNAME} 11 | 12 | #################################### Paths #################################### 13 | [paths] 14 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) 15 | ;data = /var/lib/grafana 16 | 17 | # Temporary files in `data` directory older than given duration will be removed 18 | ;temp_data_lifetime = 24h 19 | 20 | # Directory where grafana can store logs 21 | ;logs = /var/log/grafana 22 | 23 | # Directory where grafana will automatically scan and look for plugins 24 | ;plugins = /var/lib/grafana/plugins 25 | 26 | # folder that contains provisioning config files that grafana will apply on startup and while running. 27 | ;provisioning = conf/provisioning 28 | 29 | #################################### Server #################################### 30 | [server] 31 | # Protocol (http, https, socket) 32 | ;protocol = http 33 | 34 | # The ip address to bind to, empty will bind to all interfaces 35 | ;http_addr = 36 | 37 | # The http port to use 38 | ;http_port = 3000 39 | 40 | # The public facing domain name used to access grafana from a browser 41 | ;domain = localhost 42 | 43 | # Redirect to correct domain if host header does not match domain 44 | # Prevents DNS rebinding attacks 45 | ;enforce_domain = false 46 | 47 | # The full public facing url you use in browser, used for redirects and emails 48 | # If you use reverse proxy and sub path specify full url (with sub path) 49 | ;root_url = http://localhost:3000 50 | 51 | # Log web requests 52 | ;router_logging = false 53 | 54 | # the path relative working path 55 | ;static_root_path = public 56 | 57 | # enable gzip 58 | ;enable_gzip = false 59 | 60 | # https certs & key file 61 | ;cert_file = 62 | ;cert_key = 63 | 64 | # Unix socket path 65 | ;socket = 66 | 67 | #################################### Database #################################### 68 | [database] 69 | # You can configure the database connection by specifying type, host, name, user and password 70 | # as separate properties or as on string using the url properties. 71 | 72 | # Either "mysql", "postgres" or "sqlite3", it's your choice 73 | ;type = sqlite3 74 | ;host = 127.0.0.1:3306 75 | ;name = grafana 76 | ;user = root 77 | # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" 78 | ;password = 79 | 80 | # Use either URL or the previous fields to configure the database 81 | # Example: mysql://user:secret@host:port/database 82 | ;url = 83 | 84 | # For "postgres" only, either "disable", "require" or "verify-full" 85 | ;ssl_mode = disable 86 | 87 | # For "sqlite3" only, path relative to data_path setting 88 | ;path = grafana.db 89 | 90 | # Max idle conn setting default is 2 91 | ;max_idle_conn = 2 92 | 93 | # Max conn setting default is 0 (mean not set) 94 | ;max_open_conn = 95 | 96 | # Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours) 97 | ;conn_max_lifetime = 14400 98 | 99 | # Set to true to log the sql calls and execution times. 100 | log_queries = 101 | 102 | #################################### Session #################################### 103 | [session] 104 | # Either "memory", "file", "redis", "mysql", "postgres", default is "file" 105 | ;provider = file 106 | 107 | # Provider config options 108 | # memory: not have any config yet 109 | # file: session dir path, is relative to grafana data_path 110 | # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` 111 | # mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name` 112 | # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable 113 | ;provider_config = sessions 114 | 115 | # Session cookie name 116 | ;cookie_name = grafana_sess 117 | 118 | # If you use session in https only, default is false 119 | ;cookie_secure = false 120 | 121 | # Session life time, default is 86400 122 | ;session_life_time = 86400 123 | 124 | #################################### Data proxy ########################### 125 | [dataproxy] 126 | 127 | # This enables data proxy logging, default is false 128 | ;logging = false 129 | 130 | #################################### Analytics #################################### 131 | [analytics] 132 | # Server reporting, sends usage counters to stats.grafana.org every 24 hours. 133 | # No ip addresses are being tracked, only simple counters to track 134 | # running instances, dashboard and error counts. It is very helpful to us. 135 | # Change this option to false to disable reporting. 136 | ;reporting_enabled = true 137 | 138 | # Set to false to disable all checks to https://grafana.net 139 | # for new vesions (grafana itself and plugins), check is used 140 | # in some UI views to notify that grafana or plugin update exists 141 | # This option does not cause any auto updates, nor send any information 142 | # only a GET request to http://grafana.com to get latest versions 143 | ;check_for_updates = true 144 | 145 | # Google Analytics universal tracking code, only enabled if you specify an id here 146 | ;google_analytics_ua_id = 147 | 148 | #################################### Security #################################### 149 | [security] 150 | # default admin user, created on startup 151 | ;admin_user = admin 152 | 153 | # default admin password, can be changed before first start of grafana, or in profile settings 154 | ;admin_password = admin 155 | 156 | # used for signing 157 | ;secret_key = SW2YcwTIb9zpOOhoPsMm 158 | 159 | # Auto-login remember days 160 | ;login_remember_days = 7 161 | ;cookie_username = grafana_user 162 | ;cookie_remember_name = grafana_remember 163 | 164 | # disable gravatar profile images 165 | ;disable_gravatar = false 166 | 167 | # data source proxy whitelist (ip_or_domain:port separated by spaces) 168 | ;data_source_proxy_whitelist = 169 | 170 | # disable protection against brute force login attempts 171 | ;disable_brute_force_login_protection = false 172 | 173 | #################################### Snapshots ########################### 174 | [snapshots] 175 | # snapshot sharing options 176 | ;external_enabled = true 177 | ;external_snapshot_url = https://snapshots-origin.raintank.io 178 | ;external_snapshot_name = Publish to snapshot.raintank.io 179 | 180 | # remove expired snapshot 181 | ;snapshot_remove_expired = true 182 | 183 | #################################### Dashboards History ################## 184 | [dashboards] 185 | # Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1 186 | ;versions_to_keep = 20 187 | 188 | #################################### Users ############################### 189 | [users] 190 | # disable user signup / registration 191 | allow_sign_up = true 192 | 193 | # Allow non admin users to create organizations 194 | ;allow_org_create = true 195 | 196 | # Set to true to automatically assign new users to the default organization (id 1) 197 | auto_assign_org = true 198 | 199 | # Default role new users will be automatically assigned (if disabled above is set to true) 200 | auto_assign_org_role = Viewer 201 | 202 | # Background text for the user field on the login page 203 | ;login_hint = email or username 204 | 205 | # Default UI theme ("dark" or "light") 206 | ;default_theme = dark 207 | 208 | # External user management, these options affect the organization users view 209 | ;external_manage_link_url = 210 | ;external_manage_link_name = 211 | ;external_manage_info = 212 | 213 | # Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard. 214 | ;viewers_can_edit = false 215 | 216 | [auth] 217 | # Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false 218 | ;disable_login_form = true 219 | 220 | # Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false 221 | ;disable_signout_menu = false 222 | 223 | # URL to redirect the user to after sign out 224 | ;signout_redirect_url = 225 | 226 | # Set to true to attempt login with OAuth automatically, skipping the login screen. 227 | # This setting is ignored if multiple OAuth providers are configured. 228 | ;oauth_auto_login = false 229 | 230 | #################################### Anonymous Auth ########################## 231 | [auth.anonymous] 232 | # enable anonymous access 233 | enabled = true 234 | 235 | # specify organization name that should be used for unauthenticated users 236 | org_name = Main Org. 237 | 238 | # specify role for unauthenticated users 239 | org_role = Viewer 240 | 241 | #################################### Github Auth ########################## 242 | [auth.github] 243 | ;enabled = false 244 | ;allow_sign_up = true 245 | ;client_id = some_id 246 | ;client_secret = some_secret 247 | ;scopes = user:email,read:org 248 | ;auth_url = https://github.com/login/oauth/authorize 249 | ;token_url = https://github.com/login/oauth/access_token 250 | ;api_url = https://api.github.com/user 251 | ;team_ids = 252 | ;allowed_organizations = 253 | 254 | #################################### Google Auth ########################## 255 | [auth.google] 256 | ;enabled = false 257 | ;allow_sign_up = true 258 | ;client_id = some_client_id 259 | ;client_secret = some_client_secret 260 | ;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email 261 | ;auth_url = https://accounts.google.com/o/oauth2/auth 262 | ;token_url = https://accounts.google.com/o/oauth2/token 263 | ;api_url = https://www.googleapis.com/oauth2/v1/userinfo 264 | ;allowed_domains = 265 | 266 | #################################### Generic OAuth ########################## 267 | [auth.generic_oauth] 268 | ;enabled = false 269 | ;name = OAuth 270 | ;allow_sign_up = true 271 | ;client_id = some_id 272 | ;client_secret = some_secret 273 | ;scopes = user:email,read:org 274 | ;auth_url = https://foo.bar/login/oauth/authorize 275 | ;token_url = https://foo.bar/login/oauth/access_token 276 | ;api_url = https://foo.bar/user 277 | ;team_ids = 278 | ;allowed_organizations = 279 | ;tls_skip_verify_insecure = false 280 | ;tls_client_cert = 281 | ;tls_client_key = 282 | ;tls_client_ca = 283 | 284 | #################################### Grafana.com Auth #################### 285 | [auth.grafana_com] 286 | ;enabled = false 287 | ;allow_sign_up = true 288 | ;client_id = some_id 289 | ;client_secret = some_secret 290 | ;scopes = user:email 291 | ;allowed_organizations = 292 | 293 | #################################### Auth Proxy ########################## 294 | [auth.proxy] 295 | ;enabled = false 296 | ;header_name = X-WEBAUTH-USER 297 | ;header_property = username 298 | ;auto_sign_up = true 299 | ;ldap_sync_ttl = 60 300 | ;whitelist = 192.168.1.1, 192.168.2.1 301 | ;headers = Email:X-User-Email, Name:X-User-Name 302 | 303 | #################################### Basic Auth ########################## 304 | [auth.basic] 305 | ;enabled = true 306 | 307 | #################################### Auth LDAP ########################## 308 | [auth.ldap] 309 | ;enabled = false 310 | ;config_file = /etc/grafana/ldap.toml 311 | ;allow_sign_up = true 312 | 313 | #################################### SMTP / Emailing ########################## 314 | [smtp] 315 | ;enabled = false 316 | ;host = localhost:25 317 | ;user = 318 | # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;""" 319 | ;password = 320 | ;cert_file = 321 | ;key_file = 322 | ;skip_verify = false 323 | ;from_address = admin@grafana.localhost 324 | ;from_name = Grafana 325 | # EHLO identity in SMTP dialog (defaults to instance_name) 326 | ;ehlo_identity = dashboard.example.com 327 | 328 | [emails] 329 | ;welcome_email_on_sign_up = false 330 | 331 | #################################### Logging ########################## 332 | [log] 333 | # Either "console", "file", "syslog". Default is console and file 334 | # Use space to separate multiple modes, e.g. "console file" 335 | ;mode = console file 336 | 337 | # Either "debug", "info", "warn", "error", "critical", default is "info" 338 | ;level = info 339 | 340 | # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug 341 | ;filters = 342 | 343 | # For "console" mode only 344 | [log.console] 345 | ;level = 346 | 347 | # log line format, valid options are text, console and json 348 | ;format = console 349 | 350 | # For "file" mode only 351 | [log.file] 352 | ;level = 353 | 354 | # log line format, valid options are text, console and json 355 | ;format = text 356 | 357 | # This enables automated log rotate(switch of following options), default is true 358 | ;log_rotate = true 359 | 360 | # Max line number of single file, default is 1000000 361 | ;max_lines = 1000000 362 | 363 | # Max size shift of single file, default is 28 means 1 << 28, 256MB 364 | ;max_size_shift = 28 365 | 366 | # Segment log daily, default is true 367 | ;daily_rotate = true 368 | 369 | # Expired days of log file(delete after max days), default is 7 370 | ;max_days = 7 371 | 372 | [log.syslog] 373 | ;level = 374 | 375 | # log line format, valid options are text, console and json 376 | ;format = text 377 | 378 | # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. 379 | ;network = 380 | ;address = 381 | 382 | # Syslog facility. user, daemon and local0 through local7 are valid. 383 | ;facility = 384 | 385 | # Syslog tag. By default, the process' argv[0] is used. 386 | ;tag = 387 | 388 | #################################### Alerting ############################ 389 | [alerting] 390 | # Disable alerting engine & UI features 391 | ;enabled = true 392 | # Makes it possible to turn off alert rule execution but alerting UI is visible 393 | ;execute_alerts = true 394 | 395 | # Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state) 396 | ;error_or_timeout = alerting 397 | 398 | # Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok) 399 | ;nodata_or_nullvalues = no_data 400 | 401 | # Alert notifications can include images, but rendering many images at the same time can overload the server 402 | # This limit will protect the server from render overloading and make sure notifications are sent out quickly 403 | ;concurrent_render_limit = 5 404 | 405 | #################################### Explore ############################# 406 | [explore] 407 | # Enable the Explore section 408 | ;enabled = false 409 | 410 | #################################### Internal Grafana Metrics ########################## 411 | # Metrics available at HTTP API Url /metrics 412 | [metrics] 413 | # Disable / Enable internal metrics 414 | ;enabled = true 415 | 416 | # Publish interval 417 | ;interval_seconds = 10 418 | 419 | # Send internal metrics to Graphite 420 | [metrics.graphite] 421 | # Enable by setting the address setting (ex localhost:2003) 422 | ;address = 423 | ;prefix = prod.grafana.%(instance_name)s. 424 | 425 | #################################### Distributed tracing ############ 426 | [tracing.jaeger] 427 | # Enable by setting the address sending traces to jaeger (ex localhost:6831) 428 | ;address = localhost:6831 429 | # Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2) 430 | ;always_included_tag = tag1:value1 431 | # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote 432 | ;sampler_type = const 433 | # jaeger samplerconfig param 434 | # for "const" sampler, 0 or 1 for always false/true respectively 435 | # for "probabilistic" sampler, a probability between 0 and 1 436 | # for "rateLimiting" sampler, the number of spans per second 437 | # for "remote" sampler, param is the same as for "probabilistic" 438 | # and indicates the initial sampling rate before the actual one 439 | # is received from the mothership 440 | ;sampler_param = 1 441 | 442 | #################################### Grafana.com integration ########################## 443 | # Url used to import dashboards directly from Grafana.com 444 | [grafana_com] 445 | ;url = https://grafana.com 446 | 447 | #################################### External image storage ########################## 448 | [external_image_storage] 449 | # Used for uploading images to public servers so they can be included in slack/email messages. 450 | # you can choose between (s3, webdav, gcs, azure_blob, local) 451 | ;provider = 452 | 453 | [external_image_storage.s3] 454 | ;bucket = 455 | ;region = 456 | ;path = 457 | ;access_key = 458 | ;secret_key = 459 | 460 | [external_image_storage.webdav] 461 | ;url = 462 | ;public_url = 463 | ;username = 464 | ;password = 465 | 466 | [external_image_storage.gcs] 467 | ;key_file = 468 | ;bucket = 469 | ;path = 470 | 471 | [external_image_storage.azure_blob] 472 | ;account_name = 473 | ;account_key = 474 | ;container_name = 475 | 476 | [external_image_storage.local] 477 | # does not require any configuration 478 | 479 | [rendering] 480 | # Options to configure external image rendering server like https://github.com/grafana/grafana-image-renderer 481 | ;server_url = 482 | ;callback_url = 483 | 484 | [enterprise] 485 | # Path to a valid Grafana Enterprise license.jwt file 486 | ;license_path = 487 | -------------------------------------------------------------------------------- /influxdb/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CONTAINER_INFLUXDB_TAG=1.8-alpine 2 | ARG TAG=latest 3 | ARG PROJ_NAME=ntap-grafana 4 | FROM ${PROJ_NAME}/alpine-base:${TAG} as builder 5 | LABEL autodelete="true" 6 | 7 | FROM influxdb:${CONTAINER_INFLUXDB_TAG} 8 | ADD influxdb.conf /etc/influxdb 9 | COPY --from=builder /etc/apk/repositories /etc/apk/repositories 10 | RUN apk update && apk upgrade && rm -rf /var/cache/apk/* 11 | 12 | EXPOSE 8086 13 | 14 | CMD ["influxd"] 15 | -------------------------------------------------------------------------------- /influxdb/influxdb.conf: -------------------------------------------------------------------------------- 1 | [meta] 2 | dir = "/var/lib/influxdb/meta" 3 | retention-autocreate = true 4 | logging-enabled = true 5 | 6 | [data] 7 | dir = "/var/lib/influxdb/data" 8 | index-version = "tsi1" 9 | wal-dir = "/var/lib/influxdb/wal" 10 | wal-fsync-delay = "0s" 11 | validate-keys = false 12 | query-log-enabled = true 13 | cache-max-memory-size = 1073741824 14 | cache-snapshot-memory-size = 26214400 15 | cache-snapshot-write-cold-duration = "10m0s" 16 | compact-full-write-cold-duration = "4h0m0s" 17 | compact-throughput = 50331648 18 | compact-throughput-burst = 50331648 19 | max-series-per-database = 1000000 20 | max-values-per-tag = 100000 21 | max-concurrent-compactions = 0 22 | max-index-log-file-size = 1048576 23 | series-id-set-cache-size = 100 24 | trace-logging-enabled = false 25 | tsm-use-madv-willneed = false 26 | 27 | [coordinator] 28 | write-timeout = "10s" 29 | max-concurrent-queries = 0 30 | query-timeout = "0s" 31 | log-queries-after = "0s" 32 | max-select-point = 0 33 | max-select-series = 0 34 | max-select-buckets = 0 35 | 36 | [retention] 37 | enabled = true 38 | check-interval = "30m0s" 39 | 40 | [shard-precreation] 41 | enabled = true 42 | check-interval = "10m0s" 43 | advance-period = "30m0s" 44 | 45 | [monitor] 46 | store-enabled = true 47 | store-database = "_internal" 48 | store-interval = "10s" 49 | 50 | [subscriber] 51 | enabled = true 52 | http-timeout = "30s" 53 | insecure-skip-verify = false 54 | ca-certs = "" 55 | write-concurrency = 40 56 | write-buffer-size = 1000 57 | 58 | [http] 59 | enabled = true 60 | bind-address = ":8086" 61 | auth-enabled = false 62 | log-enabled = true 63 | suppress-write-log = false 64 | write-tracing = false 65 | flux-enabled = false 66 | flux-log-enabled = false 67 | pprof-enabled = true 68 | debug-pprof-enabled = false 69 | https-enabled = false 70 | https-certificate = "/etc/ssl/influxdb.pem" 71 | https-private-key = "" 72 | max-row-limit = 0 73 | max-connection-limit = 0 74 | shared-secret = "" 75 | realm = "InfluxDB" 76 | unix-socket-enabled = false 77 | unix-socket-permissions = "0777" 78 | bind-socket = "/var/run/influxdb.sock" 79 | max-body-size = 25000000 80 | access-log-path = "" 81 | max-concurrent-write-limit = 0 82 | max-enqueued-write-limit = 0 83 | enqueued-write-timeout = 30000000000 84 | 85 | [logging] 86 | format = "auto" 87 | level = "info" 88 | suppress-logo = false 89 | 90 | [[graphite]] 91 | enabled = false 92 | bind-address = ":2003" 93 | database = "graphite" 94 | retention-policy = "" 95 | protocol = "tcp" 96 | batch-size = 5000 97 | batch-pending = 10 98 | batch-timeout = "1s" 99 | consistency-level = "one" 100 | separator = "." 101 | udp-read-buffer = 0 102 | 103 | [[collectd]] 104 | enabled = false 105 | bind-address = ":25826" 106 | database = "collectd" 107 | retention-policy = "" 108 | batch-size = 5000 109 | batch-pending = 10 110 | batch-timeout = "10s" 111 | read-buffer = 0 112 | typesdb = "/usr/share/collectd/types.db" 113 | security-level = "none" 114 | auth-file = "/etc/collectd/auth_file" 115 | parse-multivalue-plugin = "split" 116 | 117 | [[opentsdb]] 118 | enabled = false 119 | bind-address = ":4242" 120 | database = "opentsdb" 121 | retention-policy = "" 122 | consistency-level = "one" 123 | tls-enabled = false 124 | certificate = "/etc/ssl/influxdb.pem" 125 | batch-size = 1000 126 | batch-pending = 5 127 | batch-timeout = "1s" 128 | log-point-errors = true 129 | 130 | [[udp]] 131 | enabled = false 132 | bind-address = ":8089" 133 | database = "udp" 134 | retention-policy = "" 135 | batch-size = 5000 136 | batch-pending = 10 137 | read-buffer = 0 138 | batch-timeout = "1s" 139 | precision = "" 140 | 141 | [continuous_queries] 142 | log-enabled = true 143 | enabled = true 144 | query-stats-enabled = false 145 | run-interval = "1s" 146 | 147 | [tls] 148 | min-version = "" 149 | max-version = "" 150 | 151 | -------------------------------------------------------------------------------- /plugins/eseries_monitoring/alpine_base/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CONTAINER_ALPINE_TAG=3.14.3 2 | ARG TAG=latest 3 | FROM alpine:${CONTAINER_ALPINE_TAG} 4 | LABEL VERSION=${TAG} 5 | ARG ALPINE_REPO_FILE=repositories 6 | ADD $ALPINE_REPO_FILE /etc/apk/repositories 7 | ONBUILD RUN apk update && apk upgrade && rm -rf /var/cache/apk/* 8 | -------------------------------------------------------------------------------- /plugins/eseries_monitoring/alpine_base/repositories: -------------------------------------------------------------------------------- 1 | http://dl-cdn.alpinelinux.org/alpine/edge/main 2 | http://dl-cdn.alpinelinux.org/alpine/edge/community -------------------------------------------------------------------------------- /plugins/eseries_monitoring/alpine_base/repositories.internal: -------------------------------------------------------------------------------- 1 | http://repomirror-ict.eng.netapp.com/alpine-linux/latest-stable/main 2 | http://repomirror-ict.eng.netapp.com/alpine-linux/latest-stable/community 3 | -------------------------------------------------------------------------------- /plugins/eseries_monitoring/build_info.txt: -------------------------------------------------------------------------------- 1 | # This file defines the order in which components for this plugin are built. 2 | # Components are built from top to bottom. 3 | # Per-line: first is the folder containing the Dockerfile, and second is the output image tag 4 | # NOTE: The output image tag will be prefixed with "ntap-grafana-plugin/*plugin_directory_name*/" 5 | # The output image tag is optional, if omitted it will match the Dockerfile directory 6 | 7 | # ex. The alpine image here will be built from the folder "plugins/eseries_monitoring/alpine_base" 8 | # and will be tagged "ntap-grafana-plugin/eseries_monitoring/alpine-base" 9 | # 10 | # The webservices image here will be built from the folder "plugins/eseries_monitoring/webservices" 11 | # and will be tagged "ntap-grafana-plugin/eseries_monitoring/webservices" 12 | 13 | alpine_base alpine-base 14 | python_base python-base 15 | webservices 16 | collector 17 | -------------------------------------------------------------------------------- /plugins/eseries_monitoring/collector/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG TAG=latest 2 | ARG PROJ_NAME=ntap-grafana 3 | FROM ${PROJ_NAME}-plugin/eseries_monitoring/python-base 4 | ENV COLLECTION_INTERVAL=30 5 | ENV RETENTION_PERIOD=52w 6 | ENV PROXY_ADDRESS=webservices:8080 7 | RUN python -m pip install --upgrade pip 8 | RUN pip --default-timeout=5 --retries 15 install --upgrade -r requirements.txt && rm -rf /root/.cache 9 | 10 | ADD docker-entrypoint.sh config.json *.py ./ 11 | RUN chmod +x *.sh *.py 12 | 13 | ENTRYPOINT ["./docker-entrypoint.sh"] 14 | -------------------------------------------------------------------------------- /plugins/eseries_monitoring/collector/collector-graphite.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | Retrieves and collects data from the the NetApp E-series web server 4 | and sends the data to a graphite server 5 | """ 6 | import struct 7 | import time 8 | import logging 9 | import socket 10 | import argparse 11 | import concurrent.futures 12 | import requests 13 | import json 14 | 15 | try: 16 | import cPickle as pickle 17 | except ImportError: 18 | import pickle 19 | 20 | __author__ = 'kevin5' 21 | __version__ = '1.0' 22 | 23 | DEFAULT_USERNAME = 'admin' 24 | DEFAULT_PASSWORD = 'admin' 25 | 26 | DEFAULT_SYSTEM_NAME = 'Unnamed' 27 | 28 | ####################### 29 | # LIST OF METRICS###### 30 | ####################### 31 | 32 | VOLUME_PARAMETERS = [ 33 | 'averageReadOpSize', 34 | 'averageWriteOpSize', 35 | 'combinedIOps', 36 | 'combinedResponseTime', 37 | 'combinedThroughput', 38 | 'flashCacheHitPct', 39 | 'flashCacheReadHitBytes', 40 | 'flashCacheReadHitOps', 41 | 'flashCacheReadResponseTime', 42 | 'flashCacheReadThroughput', 43 | 'otherIOps', 44 | 'queueDepthMax', 45 | 'queueDepthTotal', 46 | 'readCacheUtilization', 47 | 'readHitBytes', 48 | 'readHitOps', 49 | 'readIOps', 50 | 'readOps', 51 | 'readPhysicalIOps', 52 | 'readResponseTime', 53 | 'readThroughput', 54 | 'writeCacheUtilization', 55 | 'writeHitBytes', 56 | 'writeHitOps', 57 | 'writeIOps', 58 | 'writeOps', 59 | 'writePhysicalIOps', 60 | 'writeResponseTime', 61 | 'writeThroughput' 62 | ] 63 | 64 | DRIVE_PARAMETERS = [ 65 | 'averageReadOpSize', 66 | 'averageWriteOpSize', 67 | 'combinedIOps', 68 | 'combinedResponseTime', 69 | 'combinedThroughput', 70 | 'otherIOps', 71 | 'readIOps', 72 | 'readOps', 73 | 'readPhysicalIOps', 74 | 'readResponseTime', 75 | 'readThroughput', 76 | 'writeIOps', 77 | 'writeOps', 78 | 'writePhysicalIOps', 79 | 'writeResponseTime', 80 | 'writeThroughput' 81 | ] 82 | 83 | 84 | ####################### 85 | # PARAMETERS########### 86 | ####################### 87 | 88 | NUMBER_OF_THREADS = 10 89 | 90 | # LOGGING 91 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 92 | requests.packages.urllib3.disable_warnings() 93 | LOG = logging.getLogger("collector") 94 | 95 | # Disables reset connection warning message if the connection time is too long 96 | logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING) 97 | 98 | 99 | ####################### 100 | # ARGUMENT PARSER###### 101 | ####################### 102 | 103 | PARSER = argparse.ArgumentParser() 104 | 105 | PARSER.add_argument('-u', '--username', default='', 106 | help='Provide the username used to connect to the Web Services Proxy. ' 107 | 'If not specified, will check for the \'/collector/config.json\' file. ' 108 | 'Otherwise, it will default to \'' + DEFAULT_USERNAME + '\'') 109 | PARSER.add_argument('-p', '--password', default='', 110 | help='Provide the password for this user to connect to the Web Services Proxy. ' 111 | 'If not specified, will check for the \'/collector/config.json\' file. ' 112 | 'Otherwise, it will default to \'' + DEFAULT_PASSWORD + '\'') 113 | PARSER.add_argument('-t', '--intervalTime', type=int, default=5, 114 | help='Provide the time (seconds) in which the script polls and sends data ' 115 | 'from the SANtricity webServer to the Graphite backend. ' 116 | 'If not specified, will use the default time of 60 seconds.