├── config └── README.md ├── flasharray_collector ├── __init__.py ├── flasharray_metrics │ ├── __init__.py │ ├── host_volume_metrics.py │ ├── array_info_metrics.py │ ├── network_interface_metrics.py │ ├── array_events_metrics.py │ ├── host_space_metrics.py │ ├── pod_space_metrics.py │ ├── host_performance_metrics.py │ ├── pod_performance_metrics.py │ ├── pod_status_metrics.py │ ├── volume_performance_metrics.py │ ├── volume_space_metrics.py │ ├── mappings.py │ ├── array_space_metrics.py │ ├── array_performance_metrics.py │ ├── array_hardware_metrics.py │ └── flasharray.py └── flasharray_collector.py ├── flashblade_collector ├── __init__.py ├── flashblade_metrics │ ├── __init__.py │ ├── array_info_metrics.py │ ├── usage_users_metrics.py │ ├── usage_groups_metrics.py │ ├── array_events_metrics.py │ ├── filesystems_replica_metrics.py │ ├── buckets_replica_metrics.py │ ├── filesystems_space_metrics.py │ ├── array_space_metrics.py │ ├── buckets_performance_metrics.py │ ├── buckets_space_metrics.py │ ├── array_specific_performance_metrics.py │ ├── clients_performance_metrics.py │ ├── filesystems_performance_metrics.py │ ├── array_specific_performance_mapping.py │ ├── array_performance_metrics.py │ ├── flashblade.py │ └── array_hardware_metrics.py └── flashblade_collector.py ├── extra ├── pure-helper │ ├── requirements.txt │ ├── .dockerignore │ ├── Makefile │ ├── Dockerfile │ ├── README.md │ └── pure_helper.py ├── monitoring-stack │ ├── README.md │ ├── prometheus │ │ ├── pure.rules │ │ ├── alert.rules │ │ └── prometheus.yml │ ├── docker-compose.yml │ ├── alertmanager │ │ └── alertmanager.yml │ └── grafana │ │ └── grafana.ini └── grafana │ └── pure-storage-flashblade-landscape.json ├── requirements.fa.txt ├── requirements.fb.txt ├── requirements.txt ├── .dockerignore ├── .gitignore ├── Dockerfile.fa ├── Dockerfile.fb ├── Dockerfile ├── Makefile.mk ├── Makefile.fa ├── Makefile.fb ├── pure_fa_exporter.py ├── pure_fb_exporter.py ├── pure_exporter.py ├── README.md └── LICENSE.md /config/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /flasharray_collector/__init__.py: -------------------------------------------------------------------------------- 1 | from .flasharray_collector import FlasharrayCollector 2 | -------------------------------------------------------------------------------- /flashblade_collector/__init__.py: -------------------------------------------------------------------------------- 1 | from .flashblade_collector import FlashbladeCollector 2 | -------------------------------------------------------------------------------- /extra/pure-helper/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask>=1.0.2 2 | purestorage>=1.16.0 3 | urllib3>=1.26.5 4 | -------------------------------------------------------------------------------- /requirements.fa.txt: -------------------------------------------------------------------------------- 1 | Flask>=1.1.2 2 | Flask-HTTPAuth>=4.2.0 3 | prometheus-client>=0.7.1 4 | purestorage>=1.19 5 | urllib3>=1.25.10 6 | gunicorn>=20.1.0 7 | -------------------------------------------------------------------------------- /requirements.fb.txt: -------------------------------------------------------------------------------- 1 | Flask>=1.1.2 2 | Flask-HTTPAuth>=4.2.0 3 | prometheus-client>=0.7.1 4 | purity-fb>=1.10.0 5 | urllib3>=1.25.10 6 | gunicorn>=20.1.0 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Flask>=1.1.2 2 | Flask-HTTPAuth>=4.2.0 3 | prometheus-client>=0.7.1 4 | purestorage>=1.19.0 5 | purity-fb>=1.10.0 6 | urllib3>=1.25.10 7 | gunicorn>=20.1.0 8 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git/ 3 | .gitignore 4 | 5 | # Docker 6 | Dockerfile 7 | .dockerignore 8 | 9 | # Documents 10 | README.md 11 | LICENSE.md 12 | 13 | # Makefile 14 | Makefile 15 | 16 | -------------------------------------------------------------------------------- /extra/pure-helper/.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git/ 3 | .gitignore 4 | 5 | # Docker 6 | Dockerfile 7 | .dockerignore 8 | 9 | # Documents 10 | README.md 11 | LICENSE.md 12 | 13 | # Makefile 14 | Makefile 15 | 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # python bytecode cache 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # environments 6 | .env 7 | .venv 8 | env/ 9 | venv/ 10 | ENV/ 11 | env.bak/ 12 | venv.bak/ 13 | 14 | # mypy 15 | .mypy_cache/ 16 | .dmypy.json 17 | dmypy.json 18 | 19 | # IDEs 20 | .vscode/ 21 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["flasharray", 2 | "mappings", 3 | "array_info_metrics", 4 | "array_hardware_metrics", 5 | "array_events_metrics", 6 | "array_space_metrics", 7 | "array_performance_metrics", 8 | "volume_space_metrics", 9 | "volume_performance_metrics", 10 | "host_space_metrics", 11 | "host_performance_metrics", 12 | "host_volume_metrics", 13 | "pod_status_metrics", 14 | "pod_space_metrics", 15 | "pod_performance_metrics" 16 | "network_interface_performance_metrics"] 17 | -------------------------------------------------------------------------------- /extra/monitoring-stack/README.md: -------------------------------------------------------------------------------- 1 | # Stack template for a monitoring system tool. 2 | 3 | Components: 4 | - Prometheus 5 | - Grafana 6 | - AlertManager 7 | - Pure Storage metrics exporter for Prometheus 8 | - Pure Storage FlashArray helper for Grafana 9 | 10 | You are required to provide the persistent storage for Prometheus and Grafana and modify the docker-compose.yaml file appropriately 11 | 12 | ### Usage example 13 | 14 | To compose and launch the stack, use the following command: 15 | ```bash 16 | sudo docker-compose -p up -d 17 | ### License 18 | 19 | This project is licensed under the Apache 2.0 License - see the [LICENSE.md](LICENSE.md) file for details 20 | -------------------------------------------------------------------------------- /extra/pure-helper/Makefile: -------------------------------------------------------------------------------- 1 | IMAGE_NAMESPACE ?= genegatpure 2 | IMAGE_NAME ?= pure-helper 3 | IMAGE_TAG ?= 1.0 4 | 5 | RUN_PORT = 9000:9000 6 | 7 | default: build 8 | 9 | .PHONY: all 10 | all: build test 11 | 12 | .PHONY: build 13 | build: Dockerfile requirements.txt .dockerignore $(wildcard *.py) 14 | docker build . -f Dockerfile -t $(IMAGE_NAMESPACE)/$(IMAGE_NAME):$(IMAGE_TAG) 15 | 16 | .PHONY: test 17 | test: 18 | docker run --rm -p $(RUN_PORT) $(IMAGE_NAMESPACE)/$(IMAGE_NAME):$(IMAGE_TAG) 19 | 20 | .PHONY: release 21 | release: 22 | @[ "$(VERSION)" ] || ( echo "$@ needs VERSION variable"; exit 1 ) 23 | 24 | git tag "v$(VERSION)" 25 | git tag "$(IMAGE_TAG)" --force 26 | git push --force --tags 27 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["flashblade_metrics", 2 | "array_events_metrics", 3 | "array_performance_metrics", 4 | "buckets_performance_metrics", 5 | "filesystems_performance_metrics", 6 | "array_hardware_metrics", 7 | "array_space_metrics", 8 | "buckets_space_metrics", 9 | "filesystems_space_metrics", 10 | "array_info_metrics", 11 | "array_specific_performance_metrics", 12 | "clients_performance_metrics", 13 | "buckets_replica_metrics", 14 | "filesystems_replica_metrics", 15 | "usage_users_metrics", 16 | "usage_groups_metrics"] 17 | 18 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/host_volume_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class HostVolumeMetrics(): 5 | """ 6 | Base class for mapping FlashArray hosts to connected volumes 7 | """ 8 | 9 | def __init__(self, fa): 10 | self.fa = fa 11 | self.map_host_vol = GaugeMetricFamily('purefa_host_volumes_info', 12 | 'FlashArray host volumes connections', 13 | labels=['host', 'naaid']) 14 | 15 | def _map_host_vol(self): 16 | for hv in self.fa.get_host_volumes(): 17 | self.map_host_vol.add_metric([hv['host'], hv['naaid']], 1) 18 | 19 | 20 | 21 | def get_metrics(self): 22 | self._map_host_vol() 23 | yield self.map_host_vol 24 | -------------------------------------------------------------------------------- /extra/pure-helper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | 3 | # Application directory 4 | WORKDIR /app 5 | COPY . /app 6 | 7 | # Install dependencies and WSGI server 8 | RUN pip install --upgrade pip && \ 9 | pip install --no-cache-dir -r requirements.txt && \ 10 | pip install --no-cache-dir gunicorn 11 | 12 | # Run as non-root user 13 | RUN addgroup -S app && adduser -S -G app app 14 | USER app 15 | 16 | # Configure the image properties 17 | # gunicorn settings: bind any, 2 threads, log to 18 | # stdout/stderr (docker/k8s handles logs), anonymize request URL 19 | # end of log shows request time in seconds and size in bytes 20 | ENV GUNICORN_CMD_ARGS="--bind=0.0.0.0:9000 \ 21 | --workers=2 \ 22 | --access-logfile=- \ 23 | --error-logfile=- \ 24 | --access-logformat=\"%(t)s %(h)s %(U)s %(l)s %(T)s %(B)s\"" 25 | EXPOSE 9000 26 | ENTRYPOINT ["gunicorn", "pure_helper:app"] 27 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/array_info_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import InfoMetricFamily 2 | 3 | 4 | class ArrayInfoMetrics(): 5 | """ 6 | Base class for FlashArray Prometheus array info 7 | """ 8 | def __init__(self, fa): 9 | self.fa = fa 10 | self.info = None 11 | 12 | def _info(self): 13 | """Assemble a simple information metric defining the scraped system.""" 14 | array = self.fa.get_array() 15 | v = {'array_name': array['array_name'], 16 | 'system_id': array['id'], 17 | 'version': array['version']} 18 | if 'hostname' in array: 19 | v['hostname'] = array['hostname'] 20 | self.info = InfoMetricFamily('purefa', 'FlashArray system information', 21 | value = v) 22 | 23 | def get_metrics(self): 24 | self._info() 25 | yield self.info 26 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/array_info_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import InfoMetricFamily 2 | 3 | 4 | class ArrayInfoMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus array info 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.info = None 11 | 12 | def _info(self): 13 | """Assemble a simple information metric defining the scraped system.""" 14 | info = self.fb.get_array_info() 15 | 16 | self.info = InfoMetricFamily('purefb', 'FlashBlade system information', 17 | value={'array_name': info.name, 18 | 'system_id': info.id, 19 | 'os': info.os, 20 | 'version': info.version 21 | }) 22 | 23 | def get_metrics(self): 24 | self._info() 25 | yield self.info 26 | -------------------------------------------------------------------------------- /Dockerfile.fa: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | # Application directory 4 | WORKDIR /app 5 | COPY pure_fa_exporter.py requirements.fa.txt /app/ 6 | COPY flasharray_collector /app/flasharray_collector 7 | 8 | # Install dependencies and WSGI server 9 | RUN pip install --upgrade pip && \ 10 | pip install --no-cache-dir --upgrade requests && \ 11 | pip install --no-cache-dir -r requirements.fa.txt 12 | 13 | # Run as non-root user 14 | RUN addgroup -S app && adduser -S -G app app 15 | USER app 16 | 17 | # Configure the image properties 18 | # gunicorn settings: bind any, 2 threads, log to 19 | # stdout/stderr (docker/k8s handles logs), anonymize request URL 20 | # end of log shows request time in seconds and size in bytes 21 | ENV GUNICORN_CMD_ARGS="--bind=0.0.0.0:9491 \ 22 | --workers=2 \ 23 | --access-logfile=- \ 24 | --error-logfile=- \ 25 | --access-logformat=\"%(t)s %(h)s %(U)s %(l)s %(T)s %(B)s\"" 26 | EXPOSE 9491 27 | ENTRYPOINT ["gunicorn", "pure_fa_exporter:app"] 28 | -------------------------------------------------------------------------------- /Dockerfile.fb: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | # Application directory 4 | WORKDIR /app 5 | COPY pure_fb_exporter.py requirements.fb.txt /app/ 6 | COPY flashblade_collector /app/flashblade_collector 7 | 8 | # Install dependencies and WSGI server 9 | RUN pip install --upgrade pip && \ 10 | pip install --no-cache-dir --upgrade requests && \ 11 | pip install --no-cache-dir -r requirements.fb.txt 12 | 13 | # Run as non-root user 14 | RUN addgroup -S app && adduser -S -G app app 15 | USER app 16 | 17 | # Configure the image properties 18 | # gunicorn settings: bind any, 2 threads, log to 19 | # stdout/stderr (docker/k8s handles logs), anonymize request URL 20 | # end of log shows request time in seconds and size in bytes 21 | ENV GUNICORN_CMD_ARGS="--bind=0.0.0.0:9491 \ 22 | --workers=2 \ 23 | --access-logfile=- \ 24 | --error-logfile=- \ 25 | --access-logformat=\"%(t)s %(h)s %(U)s %(l)s %(T)s %(B)s\"" 26 | EXPOSE 9491 27 | ENTRYPOINT ["gunicorn", "pure_fb_exporter:app"] 28 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | # Application directory 4 | WORKDIR /app 5 | COPY pure_exporter.py requirements.txt /app/ 6 | COPY flasharray_collector /app/flasharray_collector 7 | COPY flashblade_collector /app/flashblade_collector 8 | 9 | # Install dependencies and WSGI server 10 | RUN pip install --upgrade pip && \ 11 | pip install --no-cache-dir --upgrade requests && \ 12 | pip install --no-cache-dir -r requirements.txt 13 | 14 | # Run as non-root user 15 | RUN addgroup -S app && adduser -S -G app app 16 | USER app 17 | 18 | # Configure the image properties 19 | # gunicorn settings: bind any, 2 threads, log to 20 | # stdout/stderr (docker/k8s handles logs), anonymize request URL 21 | # end of log shows request time in seconds and size in bytes 22 | ENV GUNICORN_CMD_ARGS="--bind=0.0.0.0:9491 \ 23 | --workers=2 \ 24 | --access-logfile=- \ 25 | --error-logfile=- \ 26 | --access-logformat=\"%(t)s %(h)s %(U)s %(l)s %(T)s %(B)s\"" 27 | EXPOSE 9491 28 | ENTRYPOINT ["gunicorn", "pure_exporter:app"] 29 | -------------------------------------------------------------------------------- /extra/monitoring-stack/prometheus/pure.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: example 3 | rules: 4 | - alert: HighRequestLatency 5 | expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 6 | for: 10m 7 | labels: 8 | severity: page 9 | annotations: 10 | summary: High request latency 11 | 12 | - alert: PrometheusConfigurationReload 13 | expr: prometheus_config_last_reload_successful != 1 14 | for: 5m 15 | labels: 16 | severity: error 17 | annotations: 18 | summary: "Prometheus configuration reload (instance {{ $labels.instance }})" 19 | description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 20 | 21 | - alert: AlertmanagerConfigurationReload 22 | expr: alertmanager_config_last_reload_successful != 1 23 | for: 5m 24 | labels: 25 | severity: error 26 | annotations: 27 | summary: "AlertManager configuration reload (instance {{ $labels.instance }})" 28 | description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" -------------------------------------------------------------------------------- /extra/monitoring-stack/prometheus/alert.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: example 3 | rules: 4 | - alert: HighRequestLatency 5 | expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 6 | for: 10m 7 | labels: 8 | severity: page 9 | annotations: 10 | summary: High request latency 11 | 12 | - alert: PrometheusConfigurationReload 13 | expr: prometheus_config_last_reload_successful != 1 14 | for: 5m 15 | labels: 16 | severity: error 17 | annotations: 18 | summary: "Prometheus configuration reload (instance {{ $labels.instance }})" 19 | description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 20 | 21 | - alert: AlertmanagerConfigurationReload 22 | expr: alertmanager_config_last_reload_successful != 1 23 | for: 5m 24 | labels: 25 | severity: error 26 | annotations: 27 | summary: "AlertManager configuration reload (instance {{ $labels.instance }})" 28 | description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" -------------------------------------------------------------------------------- /Makefile.mk: -------------------------------------------------------------------------------- 1 | IMAGE_NAMESPACE ?= quay.io/purestorage 2 | IMAGE_NAME ?= pure-exporter 3 | IMAGE_TAG ?= latest 4 | EXPORTER ?= pure_exporter 5 | REQUIREMENTS ?= requirements.txt 6 | DOCKERFILE ?= Dockerfile 7 | RUN_PORT ?= 9491 8 | TEST_PORT ?= 8123 9 | TIMEO ?= 30 10 | WORKERS ?= 2 11 | 12 | default: build 13 | 14 | .PHONY: all 15 | all: build test 16 | 17 | .PHONY: build 18 | build: $(DOCKERFILE) $(REQUIREMENTS) .dockerignore $(wildcard *.py) 19 | docker build . -f $(DOCKERFILE) -t $(IMAGE_NAMESPACE)/$(IMAGE_NAME):$(IMAGE_TAG) 20 | 21 | .PHONY: test 22 | test: 23 | (GUNICORN_CMD_ARGS="--bind=0.0.0.0:$(TEST_PORT) --workers=2 --access-logfile=- \ 24 | --timeout $(TIMEO) --workers $(WORKERS) \ 25 | --error-logfile=- --access-logformat=\"%(t)s %(h)s %(U)s %(l)s %(T)s %(B)s\"" \ 26 | gunicorn $(EXPORTER):app) 27 | 28 | .PHONY: test-docker 29 | test-docker: 30 | (GUNICORN_CMD_ARGS="--bind=0.0.0.0:$(RUN_PORT) --workers=2 --access-logfile=- \ 31 | --timeout $(TIMEO) --workers $(WORKERS) \ 32 | --error-logfile=- --access-logformat=\"%(t)s %(h)s %(U)s %(l)s %(T)s %(B)s\"" \ 33 | docker run --rm -p $(TEST_PORT):$(RUN_PORT) $(IMAGE_NAMESPACE)/$(IMAGE_NAME):$(IMAGE_TAG)) 34 | -------------------------------------------------------------------------------- /Makefile.fa: -------------------------------------------------------------------------------- 1 | IMAGE_NAMESPACE = quay.io/purestorage 2 | IMAGE_NAME = pure-fa-exporter 3 | IMAGE_TAG ?= latest 4 | EXPORTER ?= pure_fa_exporter 5 | REQUIREMENTS = requirements.fa.txt 6 | DOCKERFILE ?= Dockerfile.fa 7 | RUN_PORT ?= 9491 8 | TEST_PORT ?= 8123 9 | TIMEO ?= 30 10 | WORKERS ?= 2 11 | 12 | default: build 13 | 14 | .PHONY: build 15 | build: 16 | make IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) IMAGE_NAME=$(IMAGE_NAME) \ 17 | IMAGE_TAG=$(IMAGE_TAG) REQUIREMENTS=$(REQUIREMENTS) DOCKERFILE=$(DOCKERFILE) \ 18 | RUN_PORT=$(RUN_PORT) TIMEO=$(TIMEO) WORKERS=$(WORKERS) EXPORTER=$(EXPORTER) -f Makefile.mk 19 | 20 | .PHONY: test 21 | test: 22 | make IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) IMAGE_NAME=$(IMAGE_NAME) \ 23 | IMAGE_TAG=$(IMAGE_TAG) REQUIREMENTS=$(REQUIREMENTS) DOCKERFILE=$(DOCKERFILE) \ 24 | RUN_PORT=$(RUN_PORT) TIMEO=$(TIMEO) WORKERS=$(WORKERS) EXPORTER=$(EXPORTER) -f Makefile.mk test 25 | 26 | .PHONY: test-docker 27 | test-docker: build 28 | make IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) IMAGE_NAME=$(IMAGE_NAME) \ 29 | IMAGE_TAG=$(IMAGE_TAG) REQUIREMENTS=$(REQUIREMENTS) DOCKERFILE=$(DOCKERFILE) \ 30 | RUN_PORT=$(RUN_PORT) TIMEO=$(TIMEO) WORKERS=$(WORKERS) -f Makefile.mk test-docker 31 | -------------------------------------------------------------------------------- /Makefile.fb: -------------------------------------------------------------------------------- 1 | IMAGE_NAMESPACE = quay.io/purestorage 2 | IMAGE_NAME = pure-fb-exporter 3 | IMAGE_TAG ?= latest 4 | EXPORTER ?= pure_fb_exporter 5 | REQUIREMENTS ?= requirements.fb.txt 6 | DOCKERFILE ?= Dockerfile.fb 7 | RUN_PORT ?= 9491 8 | TEST_PORT ?= 8123 9 | TIMEO ?= 30 10 | WORKERS ?= 2 11 | 12 | default: build 13 | 14 | .PHONY: build 15 | build: 16 | make IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) IMAGE_NAME=$(IMAGE_NAME) \ 17 | IMAGE_TAG=$(IMAGE_TAG) REQUIREMENTS=$(REQUIREMENTS) DOCKERFILE=$(DOCKERFILE) \ 18 | RUN_PORT=$(RUN_PORT) TIMEO=$(TIMEO) WORKERS=$(WORKERS) EXPORTER=$(EXPORTER) -f Makefile.mk 19 | 20 | .PHONY: test 21 | test: 22 | make IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) IMAGE_NAME=$(IMAGE_NAME) \ 23 | IMAGE_TAG=$(IMAGE_TAG) REQUIREMENTS=$(REQUIREMENTS) DOCKERFILE=$(DOCKERFILE) \ 24 | RUN_PORT=$(RUN_PORT) TIMEO=$(TIMEO) WORKERS=$(WORKERS) EXPORTER=$(EXPORTER) -f Makefile.mk test 25 | 26 | .PHONY: test-docker 27 | test-docker: build 28 | make IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) IMAGE_NAME=$(IMAGE_NAME) \ 29 | IMAGE_TAG=$(IMAGE_TAG) REQUIREMENTS=$(REQUIREMENTS) DOCKERFILE=$(DOCKERFILE) \ 30 | RUN_PORT=$(RUN_PORT) TIMEO=$(TIMEO) WORKERS=$(WORKERS) -f Makefile.mk test-docker 31 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/usage_users_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class UsageUsersMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus users quota metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.usage = None 11 | 12 | def _usage(self): 13 | """ 14 | Create metrics of gauge type for users usage indicators. 15 | """ 16 | self.usage = GaugeMetricFamily('purefb_filesystem_user_usage_bytes', 17 | 'FlashBlade filesystem users usage', 18 | labels=['name', 'user_name', 'uid', 19 | 'dimension']) 20 | for uu in self.fb.get_users_usage(): 21 | uname = uu.user.name if uu.user.name is not None else '' 22 | uid = str(uu.user.id) 23 | self.usage.add_metric( 24 | [uu.file_system.name, uname, uid, 'quota'], uu.quota if uu.quota is not None else 0) 25 | self.usage.add_metric( 26 | [uu.file_system.name, uname, uid, 'usage'], uu.usage if uu.usage is not None else 0) 27 | 28 | def get_metrics(self): 29 | self._usage() 30 | yield self.usage 31 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/usage_groups_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class UsageGroupsMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus groups usage metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.usage = None 11 | 12 | def _usage(self): 13 | """ 14 | Create metrics of gauge type for groups usage indicators. 15 | """ 16 | self.usage = GaugeMetricFamily('purefb_filesystem_group_usage_bytes', 17 | 'FlashBlade filesystem groups usage', 18 | labels=['name', 'group_name', 'gid', 19 | 'dimension']) 20 | for gu in self.fb.get_groups_usage(): 21 | grpname = gu.group.name if gu.group.name is not None else '' 22 | gid = str(gu.group.id) 23 | self.usage.add_metric( 24 | [gu.file_system.name, grpname, gid, 'quota'], gu.quota if gu.quota is not None else 0) 25 | self.usage.add_metric( 26 | [gu.file_system.name, grpname, gid, 'usage'], gu.usage if gu.usage is not None else 0) 27 | 28 | def get_metrics(self): 29 | self._usage() 30 | yield self.usage 31 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/array_events_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class ArrayEventsMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus events metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.events = GaugeMetricFamily('purefb_open_events_total', 11 | 'FlashBlade number of open events', 12 | labels=['severity']) 13 | self.fb_events = fb.get_open_alerts() 14 | 15 | def _open_events(self): 16 | """ 17 | Create a metric of gauge type for the number of open alerts: 18 | critical, warning and info, with the severity as label. 19 | """ 20 | # Inrement each counter for each type of event 21 | c_crit, c_warn, c_info = 0, 0, 0 22 | for msg in self.fb_events: 23 | if msg.severity == 'critical': 24 | c_crit += 1 25 | if msg.severity == 'warning': 26 | c_warn += 1 27 | if msg.severity == 'info': 28 | c_info += 1 29 | self.events.add_metric(['critical'], c_crit) 30 | self.events.add_metric(['warning'], c_warn) 31 | self.events.add_metric(['info'], c_info) 32 | 33 | def get_metrics(self): 34 | self._open_events() 35 | yield self.events 36 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/network_interface_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | from . import mappings 3 | import re 4 | 5 | class NetworkInterfacePerformanceMetrics(): 6 | """ 7 | Base class for FlashArray Prometheus network interface performance metrics 8 | """ 9 | 10 | def __init__(self, fa): 11 | self.fa = fa 12 | self.performance = GaugeMetricFamily('purefa_network_interface_performance', 13 | 'FlashArray network interface performance', 14 | labels = ['interface','dimension']) 15 | 16 | def _mk_metric(self, metric, entity_list, mapping): 17 | """ 18 | Create metrics of gauge type, with given name 'name' and 19 | dimension as label. 20 | Metrics values can be iterated over. 21 | """ 22 | for e in entity_list: 23 | for k in mapping: 24 | if k in e: 25 | metric.add_metric([e['name'], mapping[k]], e[k]) 26 | 27 | def _performance(self): 28 | """ 29 | Create array network interface metrics of gauge type. 30 | """ 31 | self._mk_metric(self.performance, 32 | self.fa.get_network_interfaces(), 33 | mappings.array_network_interface_mapping) 34 | 35 | def get_metrics(self): 36 | self._performance() 37 | yield self.performance 38 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/array_events_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class ArrayEventsMetrics(): 5 | """ 6 | Base class for FlashArray Prometheus events metrics 7 | """ 8 | def __init__(self, fa): 9 | self.fa = fa 10 | self.open_events = None 11 | 12 | def _open_events(self): 13 | """ 14 | Create a metric of gauge type for the number of open alerts: 15 | critical, warning and info, with the severity as label. 16 | Metrics values can be iterated over. 17 | """ 18 | self.open_events = GaugeMetricFamily('purefa_alerts_total', 19 | 'Number of alert events', 20 | labels=['severity']) 21 | 22 | # Inrement each counter for each type of event 23 | c_crit, c_warn, c_info = 0, 0, 0 24 | for alert in self.fa.get_open_alerts(): 25 | if alert['current_severity'] == 'critical': 26 | c_crit += 1 27 | elif alert['current_severity'] == 'warning': 28 | c_warn += 1 29 | elif alert['current_severity'] == 'info': 30 | c_info += 1 31 | 32 | self.open_events.add_metric(['critical'], c_crit) 33 | self.open_events.add_metric(['warning'], c_warn) 34 | self.open_events.add_metric(['info'], c_info) 35 | 36 | def get_metrics(self): 37 | self._open_events() 38 | yield self.open_events 39 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/filesystems_replica_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class FilesystemsReplicaMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus filesystem replica link metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.replica_links_lag = GaugeMetricFamily('purefb_filesystems_links_lag_msec', 11 | 'FlashBlade filesystem links lag', 12 | labels=['name', 'direction', 'remote_name', 13 | 'remote_filesystem_name', 'status']) 14 | 15 | def _replica_links_lag(self): 16 | """ 17 | Create metrics of gauge type for filesystem replica link lag, with the 18 | local filesystem name, replication direction, remote array name, 19 | remote filesystem name and replication status as labels. 20 | """ 21 | for f in self.fb.get_filesystem_replica_links(): 22 | self.replica_links_lag.add_metric([f.local_file_system.name, 23 | f.direction, 24 | f.remote.name, 25 | f.remote_file_system.name, 26 | f.status], -1 if f.lag is None else f.lag) 27 | 28 | def get_metrics(self): 29 | self._replica_links_lag() 30 | yield self.replica_links_lag 31 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/buckets_replica_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class BucketsReplicaMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus buckets replication metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.replica_links = GaugeMetricFamily( 11 | 'purefb_bucket_replica_links_lag_msec', 12 | 'FlashBlade bucket replica links lag', 13 | labels=['name', 14 | 'direction', 15 | 'remote_name', 16 | 'remote_bucket_name', 17 | 'remote_account', 18 | 'status']) 19 | self.r_links = fb.get_bucket_replica_links() 20 | 21 | def _replica_links(self): 22 | """ 23 | Create metrics of gauge type for bucket indicators, with the 24 | account name and the bucket name as labels. 25 | """ 26 | for l in self.r_links: 27 | self.replica_links.add_metric([l.local_bucket.name, 28 | l.direction, 29 | l.remote.name, 30 | l.remote_bucket.name, 31 | l.remote_credentials.name, 32 | l.status], -1 if l.lag is None else l.lag) 33 | 34 | def get_metrics(self): 35 | self._replica_links() 36 | yield self.replica_links 37 | -------------------------------------------------------------------------------- /extra/monitoring-stack/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | prometheus: 5 | container_name: prometheus 6 | image: prom/prometheus:latest 7 | restart: always 8 | ports: 9 | - 9090:9090 10 | volumes: 11 | - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml 12 | - ./prometheus/pure.rules:/etc/prometheus/pure.rules 13 | - prometheus_data:/prometheus 14 | command: 15 | - --config.file=/etc/prometheus/prometheus.yml 16 | - --storage.tsdb.path=/prometheus 17 | - --storage.tsdb.retention.time=5d 18 | 19 | prometheus-alertmanager: 20 | container_name: prometheus-alertmanager 21 | image: prom/alertmanager:latest 22 | restart: always 23 | ports: 24 | - 9093:9093 25 | volumes: 26 | - ./alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml 27 | - alertmanager_data:/alertmanager 28 | command: 29 | - --config.file=/etc/alertmanager/alertmanager.yml 30 | 31 | pure-exporter: 32 | container_name: pure-exporter 33 | image: genegatpure/pure-exporter 34 | restart: always 35 | ports: 36 | - 9491:9491 37 | 38 | pure-helper: 39 | container_name: pure-helper 40 | image: purestorage/pure-helper 41 | restart: always 42 | ports: 43 | - 9000:9000 44 | 45 | grafana: 46 | container_name: grafana 47 | image: grafana/grafana:latest 48 | restart: always 49 | depends_on: 50 | - prometheus 51 | ports: 52 | - 3000:3000 53 | volumes: 54 | - ./grafana/plugins:/var/lib/grafana/plugins 55 | - ./grafana/grafana.ini:/etc/grafana/grafana.ini 56 | - grafana_data:/var/lib/grafana 57 | 58 | volumes: 59 | grafana_data: 60 | prometheus_data: 61 | alertmanager_data: 62 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/filesystems_space_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class FilesystemsSpaceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus filesystem space metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.data_reduction = GaugeMetricFamily('purefb_filesystems_data_reduction', 11 | 'FlashBlade filesystems data reduction', 12 | labels=['name']) 13 | self.space = GaugeMetricFamily('purefb_filesystems_space_bytes', 14 | 'FlashBlade filesystems space', 15 | labels=['name', 'dimension']) 16 | self.filesystems = fb.get_filesystems() 17 | 18 | def _data_reduction(self): 19 | """ 20 | Create metrics of gauge type for filesystems space indicators, 21 | with filesystem name as label. 22 | """ 23 | for f in self.filesystems: 24 | self.data_reduction.add_metric([f.name], f.space.data_reduction if f.space.data_reduction is not None else 0) 25 | 26 | def _space(self): 27 | """ 28 | Create metrics of gauge type for filesystems space indicators, 29 | with filesystem name as label. 30 | """ 31 | for f in self.filesystems: 32 | self.space.add_metric([f.name, 'provisioned'], f.provisioned) 33 | self.space.add_metric([f.name, 'snapshots'], f.space.snapshots) 34 | self.space.add_metric([f.name, 'total_physical'], f.space.total_physical) 35 | self.space.add_metric([f.name, 'virtual'], f.space.virtual) 36 | self.space.add_metric([f.name, 'unique'], f.space.unique) 37 | 38 | def get_metrics(self): 39 | self._data_reduction() 40 | self._space() 41 | yield self.data_reduction 42 | yield self.space 43 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/host_space_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class HostSpaceMetrics(): 5 | """ 6 | Base class for FlashArray Prometheus host space metrics 7 | """ 8 | 9 | def __init__(self, fa): 10 | self.fa = fa 11 | self.data_reduction = GaugeMetricFamily('purefa_host_space_datareduction_ratio', 12 | 'FlashArray host volumes data reduction ratio', 13 | labels=['host'], 14 | unit='ratio') 15 | self.size = GaugeMetricFamily('purefa_host_space_size_bytes', 16 | 'FlashArray host volumes size', 17 | labels=['host']) 18 | self.allocated = GaugeMetricFamily('purefa_host_space_bytes', 19 | 'FlashArray host volumes allocated space', 20 | labels=['host', 'dimension']) 21 | 22 | def _data_reduction(self): 23 | for h in self.fa.get_hosts(): 24 | self.data_reduction.add_metric([h['name']], h['data_reduction'] if h['data_reduction'] is not None else 0) 25 | 26 | 27 | def _size(self): 28 | for h in self.fa.get_hosts(): 29 | self.size.add_metric([h['name']], h['size'] if h['size'] is not None else 0) 30 | 31 | def _allocated(self): 32 | for h in self.fa.get_hosts(): 33 | self.allocated.add_metric([h['name'], 'volumes'], h['volumes'] if h['volumes'] is not None else 0) 34 | self.allocated.add_metric([h['name'], 'snapshots'], h['snapshots'] if h['snapshots'] is not None else 0) 35 | self.allocated.add_metric([h['name'], 'total'], h['total'] if h['total'] is not None else 0) 36 | 37 | def get_metrics(self): 38 | self._data_reduction() 39 | self._size() 40 | self._allocated() 41 | yield self.data_reduction 42 | yield self.size 43 | yield self.allocated 44 | -------------------------------------------------------------------------------- /extra/monitoring-stack/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 10s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 4 | evaluation_interval: 15m # Evaluate rules every 15 seconds. The default is every 1 minute. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Alertmanager configuration 8 | alerting: 9 | alertmanagers: 10 | - static_configs: 11 | - targets: 12 | - alertmanager:9093 13 | 14 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 15 | #rule_files: 16 | # - 'alert.rules' 17 | 18 | # A scrape configuration containing exactly one endpoint to scrape: 19 | # Here it's Prometheus itself. 20 | scrape_configs: 21 | # Job for all Prometheus 22 | - job_name: 'prometheus' 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | 27 | - job_name: node 28 | static_configs: 29 | - targets: ['localhost:9100'] 30 | 31 | # Job for all Pure Flasharrays 32 | - job_name: 'pure_flasharray' 33 | metrics_path: /metrics/flasharray 34 | relabel_configs: 35 | # meta label of target address --> get parameter "pure_host" 36 | - source_labels: [__address__] 37 | target_label: __param_endpoint 38 | # label of target api token --> get parameter "pure_apitoken" 39 | - source_labels: [__pure_apitoken] 40 | target_label: __param_apitoken 41 | # display the pure host as the instance label 42 | - source_labels: [__address__] 43 | target_label: instance 44 | # point the exporter to the scraping endpoint of the exporter 45 | - target_label: __address__ 46 | replacement: 10.0.2.72:9491 47 | 48 | static_configs: 49 | - targets: [ 10.0.2.20 ] 50 | labels: 51 | __pure_apitoken: 7b712253-099a-f13e-2166-e487d63166e0 52 | location: uk 53 | site: London 54 | is_production: 1 55 | - targets: [ 10.0.2.10 ] 56 | labels: 57 | __pure_apitoken: 21d04c8b-aa4b-f5b2-d86f-4e9aea4e14f3 58 | location: fr 59 | site: Paris 60 | is_production: 0 61 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/pod_space_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class PodSpaceMetrics(): 5 | """ 6 | Base class for FlashArray Prometheus pod space metrics 7 | """ 8 | 9 | def __init__(self, fa): 10 | self.fa = fa 11 | self.data_reduction = GaugeMetricFamily('purefa_pod_space_datareduction_ratio', 12 | 'FlashArray pod data reduction ratio', 13 | labels=['pod'], 14 | unit='ratio') 15 | self.size = GaugeMetricFamily('purefa_pod_space_size_bytes', 16 | 'FlashArray pod size', 17 | labels=['pod']) 18 | self.allocated = GaugeMetricFamily('purefa_pod_space_bytes', 19 | 'FlashArray pod allocated space', 20 | labels=['pod', 'dimension']) 21 | 22 | def _data_reduction(self): 23 | """ 24 | Create metrics of gauge type for pod data reduction 25 | Metrics values can be iterated over. 26 | """ 27 | for p in self.fa.get_pods(): 28 | self.data_reduction.add_metric([p['name']], p['data_reduction'] if p['data_reduction'] is not None else 0) 29 | 30 | def _size(self): 31 | """ 32 | Create metrics of gauge type for pod size. 33 | Metrics values can be iterated over. 34 | """ 35 | for p in self.fa.get_pods(): 36 | self.size.add_metric([p['name']], p['size'] if p['size'] is not None else 0) 37 | 38 | def _allocated(self): 39 | for p in self.fa.get_pods(): 40 | self.allocated.add_metric([p['name'], 'volumes'], p['volumes'] if p['volumes'] is not None else 0) 41 | self.allocated.add_metric([p['name'], 'snapshots'], p['snapshots'] if p['snapshots'] is not None else 0) 42 | self.allocated.add_metric([p['name'], 'total'], p['total'] if p['total'] is not None else 0) 43 | 44 | def get_metrics(self): 45 | self._data_reduction() 46 | self._size() 47 | self._allocated() 48 | yield self.data_reduction 49 | yield self.size 50 | yield self.allocated 51 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/array_space_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class ArraySpaceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus array space metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.capacity = GaugeMetricFamily('purefb_array_capacity_bytes', 11 | 'FlashBlade total capacity in bytes', 12 | labels=[]) 13 | self.space = GaugeMetricFamily('purefb_array_space_bytes', 14 | 'FlashBlade used space in bytes', 15 | labels=['dimension']) 16 | self.reduction = GaugeMetricFamily('purefb_array_space_data_reduction', 17 | 'FlashBlade overall data reduction', 18 | labels=[]) 19 | self.array_space = fb.get_array_space().space 20 | self.array_capacity = fb.get_array_space().capacity 21 | 22 | def _space(self): 23 | """ 24 | Create metrics of gauge type for array space indicators. 25 | """ 26 | if self.array_space is None: 27 | return 28 | self.space.add_metric(['unique'], self.array_space.unique) 29 | self.space.add_metric(['virtual'], self.array_space.virtual) 30 | self.space.add_metric(['total_physical'], self.array_space.total_physical) 31 | self.space.add_metric(['snapshots'], self.array_space.snapshots) 32 | 33 | def _capacity(self): 34 | """ 35 | Create metrics of gauge type for array capacity indicator. 36 | """ 37 | if self.array_capacity is None: 38 | return 39 | self.capacity.add_metric([], self.array_capacity) 40 | 41 | def _reduction(self): 42 | """ 43 | Create metrics of gauge type for array data redution indicator. 44 | """ 45 | if self.array_space is None: 46 | return 47 | self.reduction.add_metric([], self.array_space.data_reduction) 48 | 49 | def get_metrics(self): 50 | self._capacity() 51 | self._space() 52 | self._reduction() 53 | yield self.capacity 54 | yield self.space 55 | yield self.reduction 56 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/buckets_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class BucketsPerformanceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus buckets performace metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.latency = GaugeMetricFamily('purefb_bucket_performance_latency_usec', 11 | 'FlashBlade bucket latency', 12 | labels=['name', 'dimension']) 13 | self.throughput = GaugeMetricFamily('purefb_bucket_performance_throughput_bytes', 14 | 'FlashBlade bucket throughput', 15 | labels=['name', 'dimension']) 16 | self.buckets_performance = self.fb.get_buckets_performance() 17 | 18 | def _latency(self): 19 | """ 20 | Create metrics of gauge type for buckets performace latency, with the 21 | account name and the bucket name as labels. 22 | """ 23 | for b in self.buckets_performance: 24 | self.latency.add_metric([b.name, 'read_buckets'], b.usec_per_read_bucket_op) 25 | self.latency.add_metric([b.name, 'read_objects'], b.usec_per_read_object_op) 26 | self.latency.add_metric([b.name, 'write_buckets'], b.usec_per_write_bucket_op) 27 | self.latency.add_metric([b.name, 'write_objects'], b.usec_per_write_object_op) 28 | self.latency.add_metric([b.name, 'other'], b.usec_per_other_op) 29 | 30 | def _throughput(self): 31 | """ 32 | Create metrics of gauge type for buckets performace throughput, with 33 | the account name and the bucket name as labels. 34 | """ 35 | for b in self.buckets_performance: 36 | self.throughput.add_metric([b.name, 'read_buckets'], b.read_buckets_per_sec) 37 | self.throughput.add_metric([b.name, 'read_objects'], b.read_objects_per_sec) 38 | self.throughput.add_metric([b.name, 'write_buckets'], b.write_buckets_per_sec) 39 | self.throughput.add_metric([b.name, 'write_objects'], b.write_objects_per_sec) 40 | self.throughput.add_metric([b.name, 'other'], b.others_per_sec) 41 | 42 | def get_metrics(self): 43 | self._latency() 44 | self._throughput() 45 | yield self.latency 46 | yield self.throughput 47 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/host_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | from . import mappings 3 | 4 | class HostPerformanceMetrics(): 5 | """ 6 | Base class for FlashArray Prometheus host performance metrics 7 | """ 8 | 9 | def __init__(self, fa): 10 | self.fa = fa 11 | self.latency = GaugeMetricFamily('purefa_host_performance_latency_usec', 12 | 'FlashArray host IO latency', 13 | labels=['host', 'dimension']) 14 | self.bandwidth = GaugeMetricFamily('purefa_host_performance_bandwidth_bytes', 15 | 'FlashArray host bandwidth', 16 | labels=['host', 'dimension']) 17 | self.iops = GaugeMetricFamily('purefa_host_performance_iops', 18 | 'FlashArray host IOPS', 19 | labels=['host', 'dimension']) 20 | 21 | def _mk_metric(self, metric, entity_list, mapping): 22 | """ 23 | Create metrics of gauge type, with given name 'name' and 24 | dimension as label. 25 | Metrics values can be iterated over. 26 | """ 27 | for e in entity_list: 28 | for k in mapping: 29 | if k in e: 30 | metric.add_metric([e['name'], mapping[k]], e[k]) 31 | 32 | def _latency(self): 33 | """ 34 | Create hosts latency metrics of gauge type. 35 | """ 36 | self._mk_metric(self.latency, 37 | self.fa.get_hosts(), 38 | mappings.host_latency_mapping) 39 | 40 | def _bandwidth(self): 41 | """ 42 | Create hosts bandwidth metrics of gauge type. 43 | """ 44 | self._mk_metric(self.bandwidth, 45 | self.fa.get_hosts(), 46 | mappings.host_bandwidth_mapping) 47 | 48 | def _iops(self): 49 | """ 50 | Create hosts IOPS metrics of gauge type. 51 | """ 52 | self._mk_metric(self.iops, 53 | self.fa.get_hosts(), 54 | mappings.host_iops_mapping) 55 | 56 | def get_metrics(self): 57 | self._latency() 58 | self._bandwidth() 59 | self._iops() 60 | yield self.latency 61 | yield self.bandwidth 62 | yield self.iops 63 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/pod_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | from . import mappings 3 | 4 | 5 | class PodPerformanceMetrics(): 6 | """ 7 | Base class for FlashArray Prometheus pod performance metrics 8 | """ 9 | 10 | def __init__(self, fa): 11 | self.fa = fa 12 | self.latency = GaugeMetricFamily('purefa_pod_performance_latency_usec', 13 | 'FlashArray pod IO latency', 14 | labels=['pod', 'dimension']) 15 | self.bandwidth = GaugeMetricFamily('purefa_pod_performance_bandwidth_bytes', 16 | 'FlashArray pod bandwidth', 17 | labels=['pod', 'dimension']) 18 | self.iops = GaugeMetricFamily('purefa_pod_performance_iops', 19 | 'FlashArray pod IOPS', 20 | labels=['pod', 'dimension']) 21 | 22 | def _mk_metric(self, metric, entity_list, mapping): 23 | """ 24 | Create metrics of gauge type, with given name 'name' and 25 | dimension as label. 26 | Metrics values can be iterated over. 27 | """ 28 | for e in entity_list: 29 | for k in mapping: 30 | if k in e: 31 | metric.add_metric([e['name'], mapping[k]], e[k]) 32 | 33 | def _latency(self): 34 | """ 35 | Create pods latency metrics of gauge type, with pod name and 36 | dimension as label. 37 | """ 38 | self._mk_metric(self.latency, 39 | self.fa.get_pods(), 40 | mappings.pod_latency_mapping) 41 | 42 | def _bandwidth(self): 43 | """ 44 | Create pods bandwidth metrics of gauge type, with pod name and 45 | dimension as label. 46 | """ 47 | self._mk_metric(self.bandwidth, 48 | self.fa.get_pods(), 49 | mappings.pod_bandwidth_mapping) 50 | 51 | 52 | def _iops(self): 53 | """ 54 | Create IOPS bandwidth metrics of gauge type, with pod name and 55 | dimension as label. 56 | """ 57 | self._mk_metric(self.iops, 58 | self.fa.get_pods(), 59 | mappings.pod_iops_mapping) 60 | 61 | def get_metrics(self): 62 | self._latency() 63 | self._bandwidth() 64 | self._iops() 65 | yield self.latency 66 | yield self.bandwidth 67 | yield self.iops 68 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/pod_status_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class PodStatusMetrics(): 5 | """ 6 | Base class for FlashArray Prometheus pod stattus metrics 7 | """ 8 | 9 | def __init__(self, fa): 10 | self.fa = fa 11 | self.status = None 12 | 13 | def _status(self): 14 | """ 15 | Create pods status metrics of gauge type, with pod name, array id and 16 | array name as label. 17 | Metrics values can be iterated over. 18 | """ 19 | 20 | self.status = GaugeMetricFamily('purefa_pod_status', 21 | 'FlashArray pod status', 22 | labels=['pod', 'array_id', 'array_name']) 23 | self.mediator_status = GaugeMetricFamily('purefa_pod_mediator_status', 24 | 'FlashArray pod mediatorstatus', 25 | labels=['pod', 'array_id', 'array_name']) 26 | self.progress = GaugeMetricFamily('purefa_pod_progress_percent', 27 | 'FlashArray pod synchronization status percentage', 28 | labels=['pod', 'array_id', 'array_name']) 29 | for p in self.fa.get_pods(): 30 | arrays = p['arrays'] 31 | self.status.add_metric([p['name'], arrays[0]['array_id'], arrays[0]['name']], 1 if arrays[0]['status'] == 'online' else 0) 32 | self.mediator_status.add_metric([p['name'], arrays[0]['array_id'], arrays[0]['name']], 1 if arrays[0]['mediator_status'] == 'online' else 0) 33 | if 'progress' in arrays[0]: 34 | self.progress.add_metric([p['name'], arrays[0]['array_id'], arrays[0]['name']], arrays[0]['progress'] if arrays[0]['progress'] is not None else 101) 35 | if len(arrays) == 1: 36 | continue 37 | self.status.add_metric([p['name'], arrays[1]['array_id'], arrays[1]['name']], 1 if arrays[1]['status'] == 'online' else 0) 38 | self.mediator_status.add_metric([p['name'], arrays[1]['array_id'], arrays[1]['name']], 1 if arrays[1]['mediator_status'] == 'online' else 0) 39 | if 'progress' in arrays[1]: 40 | self.progress.add_metric([p['name'], arrays[1]['array_id'], arrays[1]['name']], arrays[1]['progress'] if arrays[1]['progress'] is not None else 101) 41 | 42 | def get_metrics(self): 43 | self._status() 44 | yield self.status 45 | yield self.mediator_status 46 | yield self.progress 47 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/buckets_space_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class BucketsSpaceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus buckets space metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.data_reduction = GaugeMetricFamily('purefb_buckets_data_reduction', 11 | 'FlashBlade buckets data reduction', 12 | labels=['account', 'name']) 13 | self.objects = GaugeMetricFamily('purefb_buckets_object_count', 14 | 'FlashBlade buckets objects counter', 15 | labels=['account', 'name']) 16 | self.space = GaugeMetricFamily('purefb_buckets_space_bytes', 17 | 'FlashBlade buckets space', 18 | labels=['account', 'name', 'dimension']) 19 | self.buckets = fb.get_buckets() 20 | 21 | def _data_reduction(self): 22 | """ 23 | Create metrics of gauge type for buckets space indicators, with the 24 | account name and the bucket name as labels. 25 | """ 26 | for b in self.buckets: 27 | self.data_reduction.add_metric([b.account.name, b.name], 28 | b.space.data_reduction if b.space.data_reduction is not None else 0) 29 | 30 | def _objects(self): 31 | """ 32 | Create metrics of gauge type for buckets space indicators, with the 33 | account name and the bucket name as labels. 34 | """ 35 | for b in self.buckets: 36 | self.objects.add_metric([b.account.name, b.name], b.object_count) 37 | 38 | def _space(self): 39 | """ 40 | Create metrics of gauge type for buckets space indicators, with the 41 | account name and the bucket name as labels. 42 | """ 43 | for b in self.buckets: 44 | self.space.add_metric([b.account.name, b.name, 'snapshots'], b.space.snapshots) 45 | self.space.add_metric([b.account.name, b.name, 'total_physical'], b.space.total_physical) 46 | self.space.add_metric([b.account.name, b.name, 'virtual'], b.space.virtual) 47 | self.space.add_metric([b.account.name, b.name, 'unique'], b.space.unique) 48 | 49 | def get_metrics(self): 50 | self._data_reduction() 51 | self._objects() 52 | self._space() 53 | yield self.data_reduction 54 | yield self.objects 55 | yield self.space 56 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/volume_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | from . import mappings 3 | import re 4 | 5 | 6 | class VolumePerformanceMetrics(): 7 | """ 8 | Base class for FlashArray Prometheus volume performance metrics 9 | """ 10 | 11 | def __init__(self, fa): 12 | self.fa = fa 13 | self.latency = GaugeMetricFamily('purefa_volume_performance_latency_usec', 14 | 'FlashArray volume IO latency', 15 | labels = ['volume', 'naaid', 'pod', 'vgroup' ,'dimension']) 16 | self.bandwidth = GaugeMetricFamily('purefa_volume_performance_throughput_bytes', 17 | 'FlashArray volume throughput', 18 | labels = ['volume', 'naaid', 'pod', 'vgroup' ,'dimension']) 19 | self.iops = GaugeMetricFamily('purefa_volume_performance_iops', 20 | 'FlashArray volume IOPS', 21 | labels = ['volume', 'naaid', 'pod', 'vgroup', 'dimension']) 22 | 23 | def _mk_metric(self, metric, entity_list, mapping): 24 | """ 25 | Create metrics of gauge type, with volume name, naaid and 26 | dimension as label. 27 | Metrics values can be iterated over. 28 | """ 29 | p = re.compile(r'::') 30 | for e in entity_list: 31 | for k in mapping: 32 | if k in e: 33 | e_name = p.split(e['name']) 34 | if len(e_name) == 1: 35 | e_name = ['/'] + e_name 36 | metric.add_metric([e_name[1], e['naaid'], e_name[0], e['vgroup'], mapping[k]], e[k]) 37 | 38 | def _latency(self): 39 | """ 40 | Create volumes latency metrics of gauge type. 41 | """ 42 | self._mk_metric(self.latency, 43 | self.fa.get_volumes(), 44 | mappings.volume_latency_mapping) 45 | 46 | def _bandwidth(self): 47 | """ 48 | Create volumes bandwidth metrics of gauge type. 49 | """ 50 | self._mk_metric(self.bandwidth, 51 | self.fa.get_volumes(), 52 | mappings.volume_bandwidth_mapping) 53 | 54 | def _iops(self): 55 | """ 56 | Create IOPS bandwidth metrics of gauge type. 57 | """ 58 | self._mk_metric(self.iops, 59 | self.fa.get_volumes(), 60 | mappings.volume_iops_mapping) 61 | 62 | def get_metrics(self): 63 | self._latency() 64 | self._bandwidth() 65 | self._iops() 66 | yield self.latency 67 | yield self.bandwidth 68 | yield self.iops 69 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/volume_space_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | import re 3 | 4 | 5 | class VolumeSpaceMetrics(): 6 | """ 7 | Base class for FlashArray Prometheus volume space metrics 8 | """ 9 | 10 | def __init__(self, fa): 11 | self.fa = fa 12 | self.data_reduction = GaugeMetricFamily('purefa_volume_space_datareduction_ratio', 13 | 'FlashArray volumes data reduction ratio', 14 | labels=['volume', 'naaid', 'pod', 'vgroup'], 15 | unit='ratio') 16 | self.size = GaugeMetricFamily('purefa_volume_space_size_bytes', 17 | 'FlashArray volumes size', 18 | labels=['volume', 'naaid', 'pod', 'vgroup']) 19 | self.allocated = GaugeMetricFamily('purefa_volume_space_bytes', 20 | 'FlashArray allocated space', 21 | labels=['volume', 'naaid', 'pod', 'vgroup', 'dimension']) 22 | 23 | def __split_vname(self, vname): 24 | p = re.compile(r'::') 25 | v_name = p.split(vname) 26 | if len(v_name) == 1: 27 | v_name = ['/'] + v_name 28 | return v_name 29 | 30 | def _data_reduction(self): 31 | """ 32 | Create metrics of gauge type for volume data reduction 33 | Metrics values can be iterated over. 34 | """ 35 | for v in self.fa.get_volumes(): 36 | v_name = self.__split_vname(v['name']) 37 | self.data_reduction.add_metric([v_name[1], v['naaid'], v_name[0], v['vgroup']], v['data_reduction'] if v['data_reduction'] is not None else 0) 38 | 39 | 40 | def _size(self): 41 | for v in self.fa.get_volumes(): 42 | v_name = self.__split_vname(v['name']) 43 | self.size.add_metric([v_name[1], v['naaid'], v_name[0], v['vgroup']], v['size'] if v['size'] is not None else 0) 44 | 45 | def _allocated(self): 46 | for v in self.fa.get_volumes(): 47 | v_name = self.__split_vname(v['name']) 48 | self.allocated.add_metric([v_name[1], v['naaid'], v_name[0], v['vgroup'], 'volumes'], v['volumes'] if v['volumes'] is not None else 0) 49 | self.allocated.add_metric([v_name[1], v['naaid'], v_name[0], v['vgroup'], 'snapshots'], v['snapshots'] if v['snapshots'] is not None else 0) 50 | self.allocated.add_metric([v_name[1], v['naaid'], v_name[0], v['vgroup'], 'total'], v['total'] if v['total'] is not None else 0) 51 | 52 | def get_metrics(self): 53 | self._data_reduction() 54 | self._size() 55 | self._allocated() 56 | yield self.data_reduction 57 | yield self.size 58 | yield self.allocated 59 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/array_specific_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | from . import array_specific_performance_mapping 3 | 4 | class ArraySpecificPerformanceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus array specific performance metrics 7 | """ 8 | 9 | def __init__(self, fb): 10 | self.fb = fb 11 | self.latency = GaugeMetricFamily( 12 | 'purefb_array_specific_performance_latency_usec', 13 | 'FlashBlade array specific latency', 14 | labels=['protocol', 'dimension']) 15 | self.iops = GaugeMetricFamily('purefb_array_specific_performance_iops', 16 | 'FlashBlade array specific IOPS', 17 | labels=['protocol', 'dimension']) 18 | 19 | 20 | 21 | def _latency(self): 22 | """ 23 | Create array specific latency performance metrics of gauge type. 24 | """ 25 | def _add_metric(proto, metric): 26 | if proto == 'nfs': 27 | mapping = array_specific_performance_mapping.nfs_array_specific_latency 28 | elif proto == 'http': 29 | mapping = array_specific_performance_mapping.http_array_specific_latency 30 | elif proto == 's3': 31 | mapping = array_specific_performance_mapping.s3_array_specific_latency 32 | 33 | m = self.fb.get_array_specific_performance(proto) 34 | if m is not None: 35 | for _k in getattr(m, '__dict__'): 36 | k = _k[1:] 37 | if k in mapping.keys() and getattr(m, _k) is not None : 38 | metric.add_metric([proto, mapping[k]], getattr(m, _k)) 39 | 40 | _add_metric('nfs', self.latency) 41 | _add_metric('http', self.latency) 42 | _add_metric('s3', self.latency) 43 | 44 | def _iops(self): 45 | """ 46 | Create array specific iops performance metrics of gauge type. 47 | """ 48 | def _add_metric(proto, metric): 49 | if proto == 'nfs': 50 | mapping = array_specific_performance_mapping.nfs_array_specific_iops 51 | elif proto == 'http': 52 | mapping = array_specific_performance_mapping.http_array_specific_iops 53 | elif proto == 's3': 54 | mapping = array_specific_performance_mapping.s3_array_specific_iops 55 | 56 | m = self.fb.get_array_specific_performance(proto) 57 | if m is not None: 58 | for _k in getattr(m, '__dict__'): 59 | k = _k[1:] 60 | if k in mapping.keys() and getattr(m, _k) is not None : 61 | metric.add_metric([proto, mapping[k]], getattr(m, _k)) 62 | 63 | _add_metric('nfs', self.iops) 64 | _add_metric('http', self.iops) 65 | _add_metric('s3', self.iops) 66 | 67 | def get_metrics(self): 68 | self._latency() 69 | self._iops() 70 | yield self.latency 71 | yield self.iops 72 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/mappings.py: -------------------------------------------------------------------------------- 1 | array_latency_mapping = {'usec_per_read_op': 'read', 2 | 'usec_per_write_op': 'write', 3 | 'usec_per_mirrored_write_op': 'mirrored_write', 4 | 'local_queue_usec_per_op': 'local_queue', 5 | 'san_usec_per_read_op': 'san_read', 6 | 'san_usec_per_write_op': 'san_write', 7 | 'san_usec_per_mirrored_write_op': 'san_mirrored_write', 8 | 'queue_usec_per_read_op': 'queue_read', 9 | 'queue_usec_per_write_op': 'queue_write', 10 | 'queue_usec_per_mirrored_write_op': 'queue_mirrored_write', 11 | 'qos_rate_limit_usec_per_read_op': 'qos_read', 12 | 'qos_rate_limit_usec_per_write_op': 'qos_write', 13 | 'qos_rate_limit_usec_per_mirrored_write_op': 'qos_mirrored'} 14 | 15 | array_network_interface_mapping = {'other_errors_per_sec': 'other_errors', 16 | 'received_bytes_per_sec': 'rx_bytes', 17 | 'received_crc_errors_per_sec': 'rx_crc_errors', 18 | 'received_frame_errors_per_sec': 'rx_frame_errors', 19 | 'received_packets_per_sec': 'rx_packets', 20 | 'total_errors_per_sec': 'errors', 21 | 'transmitted_bytes_per_sec': 'tx_bytes', 22 | 'transmitted_carrier_errors_per_sec': 'tx_carrier_errors', 23 | 'transmitted_dropped_errors_per_sec': 'tx_dropped_errors', 24 | 'transmitted_packets_per_sec': 'tx_packets'} 25 | 26 | array_bandwidth_mapping = {'output_per_sec': 'read', 27 | 'input_per_sec': 'write', 28 | 'mirrored_input_per_sec': 'mirrored_write'} 29 | 30 | array_iops_mapping = {'reads_per_sec': 'read', 31 | 'writes_per_sec': 'write', 32 | 'mirrored_writes_per_sec': 'mirrored_write'} 33 | 34 | array_bsize_mapping = {'bytes_per_read': 'read', 35 | 'bytes_per_write': 'write', 36 | 'bytes_per_mirrored_write': 'mirrored_write'} 37 | 38 | array_qdepth_mapping = {'queue_depth': ''} 39 | 40 | array_used_mapping = {'shared_space': 'shared', 41 | 'system': 'system', 42 | 'volumes': 'volumes', 43 | 'snapshots': 'snapshots', 44 | 'replication': 'replication'} 45 | 46 | array_drr_mapping = {'data_reduction': ''} 47 | 48 | array_capacity_mapping = {'capacity': ''} 49 | 50 | array_provisioned_mapping = {'provisioned': ''} 51 | 52 | volume_latency_mapping = array_latency_mapping 53 | volume_bandwidth_mapping = array_bandwidth_mapping 54 | volume_iops_mapping = array_iops_mapping 55 | 56 | host_latency_mapping = array_latency_mapping 57 | host_bandwidth_mapping = array_bandwidth_mapping 58 | host_iops_mapping = array_iops_mapping 59 | 60 | pod_latency_mapping = array_latency_mapping 61 | pod_bandwidth_mapping = array_bandwidth_mapping 62 | pod_iops_mapping = array_iops_mapping 63 | -------------------------------------------------------------------------------- /extra/pure-helper/README.md: -------------------------------------------------------------------------------- 1 | # Pure Storage helper 2 | Basic volumes and hosts information retriever for Pure Storage FlashArrays. 3 | 4 | 5 | ### Overview 6 | 7 | This applications is meant to provide an ancillary method to correlate hosts to volumes and vice-versa from a Pure Storage FlashArray, in order to help visualizing the information for tools like Grafana. 8 | In the usual utilization scenario, this application works in conjunction with the Pure Prometheus exporter, Prometheus and Grafana for monitoring Pure Storage FlashArray. 9 | 10 | ### Building and Deploying 11 | 12 | To build and deploy the application via Docker, your local linux user should be added to the `docker` group in order to be able to communicate with the Docker daemon. (If this is not possible, you can still use sudo) 13 | 14 | This can be done with this command in the context of your user: 15 | ```bash 16 | # add user to group 17 | sudo usermod -aG docker $(whoami) 18 | # apply the new group (no logout required) 19 | newgrp docker 20 | ``` 21 | 22 | An included Makefile takes care of the necessary build steps: 23 | ```bash 24 | make 25 | ``` 26 | 27 | To run a simple instance of the helper, run: 28 | ```bash 29 | make test 30 | ``` 31 | 32 | The Makefile currently features these targets: 33 | - **build** - builds the docker image with preconfigured tags. 34 | - **test** - spins up a new docker container with all required parameters. 35 | - **all** - runs _build_ and then _test_ 36 | 37 | 38 | ### Local development 39 | 40 | The application is usually not run by itself, but rather with the gunicorn WSGI server. If you want to contribute to the development, you can run the helper locally without a WSGI server, by executing the application directly. 41 | 42 | The following commands are required for a development setup: 43 | ```bash 44 | # it is recommended to use virtual python environments! 45 | python -m venv env 46 | source ./env/bin/activate 47 | 48 | # install dependencies 49 | python -m pip install -r requirements.txt 50 | 51 | # run the application in debug mode 52 | python pure_helper.py 53 | ``` 54 | 55 | 56 | ### Quering endpoints 57 | 58 | The helper application uses a RESTful API schema to provide Prometheus scraping endpoints. 59 | 60 | Type | URL | required GET parameters 61 | ---|---|--- 62 | host volumes | http://\:\/flasharray/host/{host}/volume | endpoint, apitoken 63 | volume hosts | http://\:\/flasharray/volume/{volume}/host | endpoint, apitoken 64 | 65 | ### Usage example 66 | 67 | In a typical production scenario, it is recommended to use this helper in combination with a visual frontend for your metrics, such as [Grafana](https://github.com/grafana/grafana). Grafana allows you to use your Prometheus instance as a datasource, and create Graphs and other visualizations from PromQL queries. Grafana and Prometheus, are all easy to run as docker containers. 68 | 69 | To spin up the containers, use the following commands: 70 | ```bash 71 | docker run -d -p 9000:9000 --name pure-helper purestorage/pure-helper:latest 72 | ``` 73 | 74 | ### Authors 75 | 76 | * **Eugenio Grosso** 77 | 78 | ### License 79 | 80 | This project is licensed under the Apache 2.0 License - see the [LICENSE.md](../../LICENSE.md) file for details 81 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/array_space_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | from . import mappings 3 | 4 | 5 | class ArraySpaceMetrics(): 6 | """ 7 | Base class for FlashArray Prometheus array space metrics 8 | """ 9 | def __init__(self, fa): 10 | self.fa = fa 11 | self.data_reduction = GaugeMetricFamily('purefa_array_space_datareduction_ratio', 12 | 'FlashArray overall data reduction', 13 | labels=['dimension'], 14 | unit='ratio') 15 | self.capacity = GaugeMetricFamily('purefa_array_space_capacity_bytes', 16 | 'FlashArray overall space capacity', 17 | labels=['dimension']) 18 | self.provisioned = GaugeMetricFamily('purefa_array_space_provisioned_bytes', 19 | 'FlashArray overall provisioned space', 20 | labels=['dimension']) 21 | self.used = GaugeMetricFamily('purefa_array_space_used_bytes', 22 | 'FlashArray overall used space', 23 | labels=['dimension']) 24 | 25 | def _data_reduction(self): 26 | """ 27 | Create metrics of gauge type for array data reduction. 28 | Metrics values can be iterated over. 29 | """ 30 | for k in mappings.array_drr_mapping: 31 | self.data_reduction.add_metric([mappings.array_drr_mapping[k]], 32 | self.fa.get_array_elem(k) if self.fa.get_array_elem(k) is not None else 0) 33 | 34 | def _capacity(self): 35 | """ 36 | Create metrics of gauge type for array capacity indicators. 37 | Metrics values can be iterated over. 38 | """ 39 | for k in mappings.array_capacity_mapping: 40 | self.capacity.add_metric([mappings.array_capacity_mapping[k]], 41 | self.fa.get_array_elem(k) if self.fa.get_array_elem(k) is not None else 0) 42 | 43 | def _provisioned(self): 44 | """ 45 | Create metrics of gauge type for array provisioned space indicators. 46 | Metrics values can be iterated over. 47 | """ 48 | for k in mappings.array_provisioned_mapping: 49 | self.provisioned.add_metric([mappings.array_provisioned_mapping[k]], 50 | self.fa.get_array_elem(k) if self.fa.get_array_elem(k) is not None else 0) 51 | 52 | def _used(self): 53 | """ 54 | Create metrics of gauge type for array used space indicators. 55 | Metrics values can be iterated over. 56 | """ 57 | for k in mappings.array_used_mapping: 58 | self.used.add_metric([mappings.array_used_mapping[k]], 59 | self.fa.get_array_elem(k) if self.fa.get_array_elem(k) is not None else 0) 60 | 61 | def get_metrics(self): 62 | self._data_reduction() 63 | self._capacity() 64 | self._provisioned() 65 | self._used() 66 | yield self.data_reduction 67 | yield self.capacity 68 | yield self.provisioned 69 | yield self.used 70 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_collector.py: -------------------------------------------------------------------------------- 1 | from .flasharray_metrics.flasharray import FlashArray 2 | from .flasharray_metrics.array_info_metrics import ArrayInfoMetrics 3 | from .flasharray_metrics.array_hardware_metrics import ArrayHardwareMetrics 4 | from .flasharray_metrics.array_events_metrics import ArrayEventsMetrics 5 | from .flasharray_metrics.array_space_metrics import ArraySpaceMetrics 6 | from .flasharray_metrics.array_performance_metrics import ArrayPerformanceMetrics 7 | from .flasharray_metrics.volume_space_metrics import VolumeSpaceMetrics 8 | from .flasharray_metrics.volume_performance_metrics import VolumePerformanceMetrics 9 | from .flasharray_metrics.host_space_metrics import HostSpaceMetrics 10 | from .flasharray_metrics.host_performance_metrics import HostPerformanceMetrics 11 | from .flasharray_metrics.host_volume_metrics import HostVolumeMetrics 12 | from .flasharray_metrics.pod_status_metrics import PodStatusMetrics 13 | from .flasharray_metrics.pod_space_metrics import PodSpaceMetrics 14 | from .flasharray_metrics.pod_performance_metrics import PodPerformanceMetrics 15 | from .flasharray_metrics.network_interface_metrics import NetworkInterfacePerformanceMetrics 16 | 17 | 18 | class FlasharrayCollector(): 19 | """ 20 | Instantiates the collector's methods and properties to retrieve status, 21 | space occupancy and performance metrics from Puretorage FlasBlade. 22 | Provides also a 'collect' method to allow Prometheus client registry 23 | to work properly. 24 | :param target: IP address or domain name of the target array's management 25 | interface. 26 | :type target: str 27 | :param api_token: API token of the user with which to log in. 28 | :type api_token: str 29 | """ 30 | def __init__(self, endpoint, api_token, request = 'all'): 31 | self.fa = None 32 | try: 33 | self.fa = FlashArray(endpoint, api_token) 34 | except Exception as e: 35 | raise Exception('Connection for FlashArray {} not initialized. Check array name/address and api-token'.format(endpoint)) 36 | self.request = request 37 | 38 | def collect(self): 39 | """Global collector method for all the collected array metrics.""" 40 | if self.request in ['all', 'array']: 41 | yield from ArrayInfoMetrics(self.fa).get_metrics() 42 | yield from ArrayHardwareMetrics(self.fa).get_metrics() 43 | yield from ArrayEventsMetrics(self.fa).get_metrics() 44 | yield from ArraySpaceMetrics(self.fa).get_metrics() 45 | yield from ArrayPerformanceMetrics(self.fa).get_metrics() 46 | yield from NetworkInterfacePerformanceMetrics(self.fa).get_metrics() 47 | if self.request in ['all', 'volumes']: 48 | yield from VolumeSpaceMetrics(self.fa).get_metrics() 49 | yield from VolumePerformanceMetrics(self.fa).get_metrics() 50 | if self.request in ['all', 'hosts']: 51 | yield from HostSpaceMetrics(self.fa).get_metrics() 52 | yield from HostPerformanceMetrics(self.fa).get_metrics() 53 | if self.request in ['all', 'pods']: 54 | yield from PodStatusMetrics(self.fa).get_metrics() 55 | yield from PodSpaceMetrics(self.fa).get_metrics() 56 | yield from PodPerformanceMetrics(self.fa).get_metrics() 57 | if self.request in ['all']: 58 | yield from HostVolumeMetrics(self.fa).get_metrics() 59 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/clients_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class ClientsPerformanceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus clients performance metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.latency = GaugeMetricFamily('purefb_client_performance_latency_usec', 11 | 'FlashBlade latency', 12 | labels=['name', 'port', 'dimension']) 13 | self.iops = GaugeMetricFamily('purefb_client_performance_iops', 14 | 'FlashBlade IOPS', 15 | labels=['name', 'port', 'dimension']) 16 | self.ops_size = GaugeMetricFamily('purefb_client_performance_opns_bytes', 17 | 'FlashBlade client average bytes per operations', 18 | labels=['name', 'port', 'dimension']) 19 | self.throughput = GaugeMetricFamily('purefb_client_performance_throughput_bytes', 20 | 'FlashBlade client_throughput', 21 | labels=['name', 'port', 'dimension']) 22 | self.clients_performance = fb.get_clients_performance() 23 | 24 | def _latency(self): 25 | """ 26 | Create metrics of gauge type for client latency metrics. 27 | """ 28 | for cperf in self.clients_performance: 29 | client, port = cperf.name.split(':') 30 | self.latency.add_metric([client, port, 'read'], cperf.usec_per_read_op) 31 | self.latency.add_metric([client, port, 'write'], cperf.usec_per_write_op) 32 | self.latency.add_metric([client, port, 'other'], cperf.usec_per_other_op) 33 | 34 | def _iops(self): 35 | """ 36 | Create metrics of gauge type for client iops metrics. 37 | """ 38 | for cperf in self.clients_performance: 39 | client, port = cperf.name.split(':') 40 | self.iops.add_metric([client, port, 'read'], cperf.reads_per_sec) 41 | self.iops.add_metric([client, port, 'write'], cperf.writes_per_sec) 42 | self.iops.add_metric([client, port, 'other'], cperf.others_per_sec) 43 | 44 | def _ops_size(self): 45 | """ 46 | Create metrics of gauge type for client operations size metrics. 47 | """ 48 | for cperf in self.clients_performance: 49 | client, port = cperf.name.split(':') 50 | self.ops_size.add_metric([client, port, 'per_op'], cperf.bytes_per_op) 51 | self.ops_size.add_metric([client, port, 'read'], cperf.bytes_per_read) 52 | self.ops_size.add_metric([client, port, 'write'], cperf.bytes_per_write) 53 | 54 | def _throughput(self): 55 | """ 56 | Create metrics of gauge type for client throughput metrics. 57 | """ 58 | for cperf in self.clients_performance: 59 | client, port = cperf.name.split(':') 60 | self.throughput.add_metric([client, port, 'read'], cperf.read_bytes_per_sec) 61 | self.throughput.add_metric([client, port, 'write'], cperf.write_bytes_per_sec) 62 | 63 | def get_metrics(self): 64 | self._latency() 65 | self._iops() 66 | self._ops_size() 67 | self._throughput() 68 | yield self.latency 69 | yield self.iops 70 | yield self.ops_size 71 | yield self.throughput 72 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/filesystems_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class FilesystemsPerformanceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus filesystem performance metrics 7 | """ 8 | def __init__(self, fb): 9 | self.fb = fb 10 | self.latency = GaugeMetricFamily('purefb_filesystem_performance_latency_usec', 11 | 'FlashBlade filesystem latency', 12 | labels=['protocol', 'name', 'dimension']) 13 | self.iops = GaugeMetricFamily('purefb_filesystem_performance_iops', 14 | 'FlashBlade filesystem IOPS', 15 | labels=['protocol', 'name', 'dimension']) 16 | self.ops_size = GaugeMetricFamily('purefb_filesystem_performance_opns_bytes', 17 | 'FlashBlade filesystem average bytes per operations', 18 | labels=['protocol', 'name', 'dimension']) 19 | self.throughput = GaugeMetricFamily('purefb_filesystem_performance_throughput_bytes', 20 | 'FlashBlade filesystem throughput', 21 | labels=['protocol', 'name', 'dimension']) 22 | self.nfs_filesystems_performance = fb.get_nfs_filesystems_performance() 23 | 24 | 25 | 26 | def _latency(self): 27 | """ 28 | Create metrics of gauge type for filesystems latency, 29 | with filesystem name as label. 30 | """ 31 | for f in self.nfs_filesystems_performance: 32 | self.latency.add_metric(['nfs', f.name, 'read'], f.usec_per_read_op) 33 | self.latency.add_metric(['nfs', f.name, 'write'], f.usec_per_write_op) 34 | self.latency.add_metric(['nfs', f.name, 'other'], f.usec_per_other_op) 35 | 36 | def _iops(self): 37 | """ 38 | Create metrics of gauge type for filesystems iops, 39 | with filesystem name as label. 40 | """ 41 | for f in self.nfs_filesystems_performance: 42 | self.iops.add_metric(['nfs', f.name, 'read'], f.reads_per_sec) 43 | self.iops.add_metric(['nfs', f.name, 'write'], f.writes_per_sec) 44 | self.iops.add_metric(['nfs', f.name, 'other'], f.others_per_sec) 45 | 46 | def _ops_size(self): 47 | """ 48 | Create metrics of gauge type for filesystems average operations size, 49 | with filesystem name as label. 50 | """ 51 | 52 | for f in self.nfs_filesystems_performance: 53 | self.ops_size.add_metric(['nfs', f.name, 'per_op'], f.bytes_per_op) 54 | self.ops_size.add_metric(['nfs', f.name, 'read'], f.bytes_per_read) 55 | self.ops_size.add_metric(['nfs', f.name, 'write'], f.bytes_per_write) 56 | 57 | def _throughput(self): 58 | """ 59 | Create metrics of gauge type for filesystems throughput, 60 | with filesystem name as label. 61 | """ 62 | for f in self.nfs_filesystems_performance: 63 | self.throughput.add_metric(['nfs', f.name, 'read'], f.read_bytes_per_sec) 64 | self.throughput.add_metric(['nfs', f.name, 'write'], f.write_bytes_per_sec) 65 | 66 | def get_metrics(self): 67 | self._latency() 68 | self._iops() 69 | self._ops_size() 70 | self._throughput() 71 | yield self.latency 72 | yield self.iops 73 | yield self.ops_size 74 | yield self.throughput 75 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_collector.py: -------------------------------------------------------------------------------- 1 | from .flashblade_metrics.flashblade import FlashBlade 2 | from .flashblade_metrics.array_info_metrics import ArrayInfoMetrics 3 | from .flashblade_metrics.array_hardware_metrics import ArrayHardwareMetrics 4 | from .flashblade_metrics.array_events_metrics import ArrayEventsMetrics 5 | from .flashblade_metrics.array_space_metrics import ArraySpaceMetrics 6 | from .flashblade_metrics.array_performance_metrics import ArrayPerformanceMetrics 7 | from .flashblade_metrics.array_specific_performance_metrics import ArraySpecificPerformanceMetrics 8 | from .flashblade_metrics.filesystems_space_metrics import FilesystemsSpaceMetrics 9 | from .flashblade_metrics.filesystems_performance_metrics import FilesystemsPerformanceMetrics 10 | from .flashblade_metrics.buckets_space_metrics import BucketsSpaceMetrics 11 | from .flashblade_metrics.buckets_performance_metrics import BucketsPerformanceMetrics 12 | from .flashblade_metrics.clients_performance_metrics import ClientsPerformanceMetrics 13 | from .flashblade_metrics.buckets_replica_metrics import BucketsReplicaMetrics 14 | from .flashblade_metrics.filesystems_replica_metrics import FilesystemsReplicaMetrics 15 | from .flashblade_metrics.usage_users_metrics import UsageUsersMetrics 16 | from .flashblade_metrics.usage_groups_metrics import UsageGroupsMetrics 17 | 18 | 19 | class FlashbladeCollector(): 20 | """ 21 | Instantiates the collector's methods and properties to retrieve status, 22 | space occupancy and performance metrics from Puretorage FlasBlade. 23 | Provides also a 'collect' method to allow Prometheus client registry 24 | to work properly. 25 | :param target: IP address or domain name of the target array's management 26 | interface. 27 | :type target: str 28 | :param api_token: API token of the user with which to log in. 29 | :type api_token: str 30 | """ 31 | def __init__(self, endpoint, api_token, request='all'): 32 | self.fb = None 33 | try: 34 | self.fb = FlashBlade(endpoint, api_token) 35 | except Exception as e: 36 | raise Exception('Connection with FlashBlade {} not initialized. Check array name/address and api-token'.format(endpoint)) 37 | self.request = request 38 | 39 | def collect(self): 40 | """Global collector method for all the collected array metrics.""" 41 | if self.request in ['all', 'array']: 42 | yield from ArrayInfoMetrics(self.fb).get_metrics() 43 | yield from ArrayHardwareMetrics(self.fb).get_metrics() 44 | yield from ArrayEventsMetrics(self.fb).get_metrics() 45 | yield from ArrayPerformanceMetrics(self.fb).get_metrics() 46 | yield from ArraySpecificPerformanceMetrics(self.fb).get_metrics() 47 | yield from ArraySpaceMetrics(self.fb).get_metrics() 48 | yield from FilesystemsSpaceMetrics(self.fb).get_metrics() 49 | yield from BucketsSpaceMetrics(self.fb).get_metrics() 50 | yield from FilesystemsPerformanceMetrics(self.fb).get_metrics() 51 | yield from BucketsPerformanceMetrics(self.fb).get_metrics() 52 | yield from BucketsReplicaMetrics(self.fb).get_metrics() 53 | yield from FilesystemsReplicaMetrics(self.fb).get_metrics() 54 | if self.request in ['all', 'usage']: 55 | yield from UsageUsersMetrics(self.fb).get_metrics() 56 | yield from UsageGroupsMetrics(self.fb).get_metrics() 57 | if self.request in ['all', 'clients']: 58 | yield from ClientsPerformanceMetrics(self.fb).get_metrics() 59 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/array_specific_performance_mapping.py: -------------------------------------------------------------------------------- 1 | nfs_array_specific_latency = { 2 | 'aggregate_usec_per_file_metadata_create_op': 'file_metadata_create', 3 | 'aggregate_usec_per_file_metadata_modify_op': 'file_metadata_modify', 4 | 'aggregate_usec_per_file_metadata_read_op': 'file_metadata_read', 5 | 'aggregate_usec_per_share_metadata_read_op': 'share_metadata_read', 6 | 'usec_per_access_op': 'acces', 7 | 'usec_per_create_op': 'create', 8 | 'usec_per_fsinfo_op': 'fsinfo', 9 | 'usec_per_fsstat_op': 'fsstat', 10 | 'usec_per_getattr_op': 'getattr', 11 | 'usec_per_link_op': 'link', 12 | 'usec_per_lookup_op': 'lookup', 13 | 'usec_per_mkdir_op': 'mkdir', 14 | 'usec_per_pathconf_op': 'pathconf', 15 | 'usec_per_read_op': 'read', 16 | 'usec_per_readdir_op': 'readdir', 17 | 'usec_per_readdirplus_op': 'readdirplus', 18 | 'usec_per_readlink_op': 'readlink', 19 | 'usec_per_remove_op': 'remove', 20 | 'usec_per_rename_op': 'rename', 21 | 'usec_per_rmdir_op': 'rmdir', 22 | 'usec_per_setattr_op': 'setattr', 23 | 'usec_per_symlink_op': 'symlink', 24 | 'usec_per_write_op': 'write' 25 | } 26 | http_array_specific_latency = { 27 | 'usec_per_read_dir_op': 'read_dir', 28 | 'usec_per_write_dir_op': 'write_dir', 29 | 'usec_per_read_file_op': 'read_file', 30 | 'usec_per_write_file_op': 'write_file', 31 | 'usec_per_other_op': 'other' 32 | } 33 | s3_array_specific_latency = { 34 | 'usec_per_other_op': 'other', 35 | 'usec_per_read_bucket_op': 'read_bucket', 36 | 'usec_per_read_object_op': 'read_object', 37 | 'usec_per_write_bucket_op': 'write_bucket', 38 | 'usec_per_write_object_op': 'write_object' 39 | } 40 | 41 | 42 | nfs_array_specific_iops = { 43 | 'aggregate_file_metadata_creates_per_sec': 'file_metadata_creates', 44 | 'aggregate_file_metadata_modifies_per_sec': 'file_metadata_modifies', 45 | 'aggregate_file_metadata_reads_per_sec': 'file_metadata_reads', 46 | 'aggregate_share_metadata_reads_per_sec': 'share_metadata_reads', 47 | 'accesses_per_sec': 'accesses', 48 | 'creates_per_sec': 'creates', 49 | 'fsinfos_per_sec': 'fsinfos', 50 | 'fsstats_per_sec': 'fsstats', 51 | 'getattrs_per_sec': 'getattrs', 52 | 'links_per_sec': 'links', 53 | 'lookups_per_sec': 'lookups', 54 | 'mkdirs_per_sec': 'mkdirs', 55 | 'pathconfs_per_sec': 'pathconfs', 56 | 'readdirpluses_per_sec': 'readdirpluses', 57 | 'readdirs_per_sec': 'readdirs', 58 | 'readlinks_per_sec': 'readlinks', 59 | 'reads_per_sec': 'reads', 60 | 'removes_per_sec': 'removes', 61 | 'renames_per_sec': 'renames', 62 | 'rmdirs_per_sec': 'rmdirs', 63 | 'setattrs_per_sec': 'setattrs', 64 | 'symlinks_per_sec': 'symlinks', 65 | 'writes_per_sec': 'writes' 66 | } 67 | http_array_specific_iops = { 68 | 'others_per_sec': 'others', 69 | 'read_dirs_per_sec': 'read_dirs', 70 | 'read_files_per_sec': 'read_files', 71 | 'write_dirs_per_sec': 'write_dirs', 72 | 'write_files_per_sec': 'write_files' 73 | } 74 | s3_array_specific_iops = { 75 | 'others_per_sec': 'others', 76 | 'read_buckets_per_sec': 'read_buckets', 77 | 'read_objects_per_sec': 'read_objects', 78 | 'write_buckets_per_sec': 'write_buckets', 79 | 'write_objects_per_sec': 'write_objects' 80 | } 81 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/array_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | 3 | 4 | class ArrayPerformanceMetrics(): 5 | """ 6 | Base class for FlashBlade Prometheus array performance metrics 7 | """ 8 | 9 | def __init__(self, fb): 10 | self.fb = fb 11 | self.protocols = ['http', 'nfs', 's3', 'smb'] 12 | self.latency = GaugeMetricFamily('purefb_array_performance_latency_usec', 13 | 'FlashBlade array latency', 14 | labels=['protocol', 'dimension']) 15 | self.iops = GaugeMetricFamily('purefb_array_performance_iops', 16 | 'FlashBlade array IOPS', 17 | labels=['protocol', 'dimension']) 18 | self.ops_size = GaugeMetricFamily('purefb_array_performance_opns_bytes', 19 | 'FlashBlade array average bytes per operations', 20 | labels=['protocol', 'dimension']) 21 | self.throughput = GaugeMetricFamily('purefb_array_performance_throughput_bytes', 22 | 'FlashBlade array throughput', 23 | labels=['protocol', 'dimension']) 24 | 25 | def _latency(self): 26 | """ 27 | Create array latency performance metrics of gauge type. 28 | """ 29 | for p in self.protocols: 30 | m = self.fb.get_array_performance(p) 31 | if m is None: 32 | continue 33 | self.latency.add_metric([p, 'read'], m.usec_per_read_op) 34 | self.latency.add_metric([p, 'write'], m.usec_per_write_op) 35 | self.latency.add_metric([p, 'other'], m.usec_per_other_op) 36 | 37 | def _iops(self): 38 | """ 39 | Create array iops performance metrics of gauge type. 40 | """ 41 | for p in self.protocols: 42 | m = self.fb.get_array_performance(p) 43 | if m is None: 44 | continue 45 | self.iops.add_metric([p, 'read'], m.reads_per_sec) 46 | self.iops.add_metric([p, 'write'], m.writes_per_sec) 47 | self.iops.add_metric([p, 'other'], m.others_per_sec) 48 | # self.iops.add_metric([p, 'in'], m.input_per_sec) 49 | # self.iops.add_metric([p, 'out'], m.output_per_sec) 50 | 51 | def _ops_size(self): 52 | """ 53 | Create array operation size performance metrics of gauge type. 54 | """ 55 | for p in self.protocols: 56 | m = self.fb.get_array_performance(p) 57 | if m is None: 58 | continue 59 | self.ops_size.add_metric([p, 'per_op'], m.bytes_per_op) 60 | self.ops_size.add_metric([p, 'read'], m.bytes_per_read) 61 | self.ops_size.add_metric([p, 'write'], m.bytes_per_write) 62 | 63 | def _throughput(self): 64 | """ 65 | Create array throughput performance metrics of gauge type. 66 | """ 67 | for p in self.protocols: 68 | m = self.fb.get_array_performance(p) 69 | if m is None: 70 | continue 71 | self.throughput.add_metric([p, 'read'], m.read_bytes_per_sec) 72 | self.throughput.add_metric([p, 'write'], m.write_bytes_per_sec) 73 | 74 | def get_metrics(self): 75 | self._latency() 76 | self._iops() 77 | self._ops_size() 78 | self._throughput() 79 | yield self.latency 80 | yield self.iops 81 | yield self.ops_size 82 | yield self.throughput 83 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/array_performance_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client.core import GaugeMetricFamily 2 | from . import mappings 3 | 4 | 5 | class ArrayPerformanceMetrics(): 6 | """ 7 | Base class for FlashArray Prometheus array performance metrics 8 | """ 9 | 10 | def __init__(self, fa): 11 | self.fa = fa 12 | 13 | self.latency = GaugeMetricFamily('purefa_array_performance_latency_usec', 14 | 'FlashArray latency', 15 | labels=['dimension']) 16 | self.bandwidth = GaugeMetricFamily('purefa_array_performance_bandwidth_bytes', 17 | 'FlashArray bandwidth', 18 | labels=['dimension']) 19 | self.iops = GaugeMetricFamily('purefa_array_performance_iops', 20 | 'FlashArray IOPS', 21 | labels=['dimension']) 22 | self.avg_bsz = GaugeMetricFamily('purefa_array_performance_avg_block_bytes', 23 | 'FlashArray avg block size', 24 | labels=['dimension']) 25 | self.qdepth = GaugeMetricFamily('purefa_array_performance_qdepth', 26 | 'FlashArray queue depth', 27 | labels=['dimension']) 28 | 29 | def _mk_metric(self, metric, entity_list, mapping): 30 | """ 31 | Create metrics of gauge type, with dimension as label. 32 | Metrics values can be iterated over. 33 | """ 34 | for k in mapping: 35 | if k in entity_list: 36 | metric.add_metric([mapping[k]], entity_list[k] if entity_list[k] is not None else 0) 37 | 38 | def _latency(self): 39 | """ 40 | Create array latency performance metrics of gauge type. 41 | Metrics values can be iterated over. 42 | """ 43 | self._mk_metric(self.latency, 44 | self.fa.get_array(), 45 | mappings.array_latency_mapping) 46 | 47 | def _bandwidth(self): 48 | """ 49 | Create array bandwidth performance metrics of gauge type. 50 | Metrics values can be iterated over. 51 | """ 52 | self._mk_metric(self.bandwidth, 53 | self.fa.get_array(), 54 | mappings.array_bandwidth_mapping) 55 | 56 | def _iops(self): 57 | """ 58 | Create array iops performance metrics of gauge type. 59 | Metrics values can be iterated over. 60 | """ 61 | self._mk_metric(self.iops, 62 | self.fa.get_array(), 63 | mappings.array_iops_mapping) 64 | 65 | def _avg_block_size(self): 66 | """ 67 | Create array average block size performance metrics of gauge type. 68 | Metrics values can be iterated over. 69 | """ 70 | self._mk_metric(self.avg_bsz, 71 | self.fa.get_array(), 72 | mappings.array_bsize_mapping) 73 | 74 | def _qdepth(self): 75 | """ 76 | Create array queue depth performance metric of gauge type. 77 | Metrics values can be iterated over. 78 | """ 79 | self._mk_metric(self.qdepth, 80 | self.fa.get_array(), 81 | mappings.array_qdepth_mapping) 82 | 83 | def get_metrics(self): 84 | self._latency() 85 | self._bandwidth() 86 | self._iops() 87 | self._avg_block_size() 88 | self._qdepth() 89 | yield self.latency 90 | yield self.bandwidth 91 | yield self.iops 92 | yield self.avg_bsz 93 | yield self.qdepth 94 | -------------------------------------------------------------------------------- /extra/monitoring-stack/alertmanager/alertmanager.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # The smarthost and SMTP sender used for mail notifications. 3 | smtp_smarthost: 'localhost:25' 4 | smtp_from: 'alertmanager@example.org' 5 | 6 | # The root route on which each incoming alert enters. 7 | route: 8 | # The root route must not have any matchers as it is the entry point for 9 | # all alerts. It needs to have a receiver configured so alerts that do not 10 | # match any of the sub-routes are sent to someone. 11 | receiver: 'team-X-mails' 12 | 13 | # The labels by which incoming alerts are grouped together. For example, 14 | # multiple alerts coming in for cluster=A and alertname=LatencyHigh would 15 | # be batched into a single group. 16 | # 17 | # To aggregate by all possible labels use '...' as the sole label name. 18 | # This effectively disables aggregation entirely, passing through all 19 | # alerts as-is. This is unlikely to be what you want, unless you have 20 | # a very low alert volume or your upstream notification system performs 21 | # its own grouping. Example: group_by: [...] 22 | group_by: ['alertname', 'cluster'] 23 | 24 | # When a new group of alerts is created by an incoming alert, wait at 25 | # least 'group_wait' to send the initial notification. 26 | # This way ensures that you get multiple alerts for the same group that start 27 | # firing shortly after another are batched together on the first 28 | # notification. 29 | group_wait: 30s 30 | 31 | # When the first notification was sent, wait 'group_interval' to send a batch 32 | # of new alerts that started firing for that group. 33 | group_interval: 5m 34 | 35 | # If an alert has successfully been sent, wait 'repeat_interval' to 36 | # resend them. 37 | repeat_interval: 3h 38 | 39 | # All the above attributes are inherited by all child routes and can 40 | # overwritten on each. 41 | 42 | # The child route trees. 43 | routes: 44 | # This routes performs a regular expression match on alert labels to 45 | # catch alerts that are related to a list of services. 46 | - match_re: 47 | service: ^(foo1|foo2|baz)$ 48 | receiver: team-X-mails 49 | 50 | # The service has a sub-route for critical alerts, any alerts 51 | # that do not match, i.e. severity != critical, fall-back to the 52 | # parent node and are sent to 'team-X-mails' 53 | routes: 54 | - match: 55 | severity: critical 56 | receiver: team-X-pager 57 | 58 | - match: 59 | service: files 60 | receiver: team-Y-mails 61 | 62 | routes: 63 | - match: 64 | severity: critical 65 | receiver: team-Y-pager 66 | 67 | # This route handles all alerts coming from a database service. If there's 68 | # no team to handle it, it defaults to the DB team. 69 | - match: 70 | service: database 71 | 72 | receiver: team-DB-pager 73 | # Also group alerts by affected database. 74 | group_by: [alertname, cluster, database] 75 | 76 | routes: 77 | - match: 78 | owner: team-X 79 | receiver: team-X-pager 80 | 81 | - match: 82 | owner: team-Y 83 | receiver: team-Y-pager 84 | 85 | 86 | # Inhibition rules allow to mute a set of alerts given that another alert is 87 | # firing. 88 | # We use this to mute any warning-level notifications if the same alert is 89 | # already critical. 90 | inhibit_rules: 91 | - source_match: 92 | severity: 'critical' 93 | target_match: 94 | severity: 'warning' 95 | # Apply inhibition if the alertname is the same. 96 | equal: ['alertname'] 97 | 98 | 99 | receivers: 100 | - name: 'team-X-mails' 101 | email_configs: 102 | - to: 'team-X+alerts@example.org, team-Y+alerts@example.org' 103 | 104 | - name: 'team-X-pager' 105 | email_configs: 106 | - to: 'team-X+alerts-critical@example.org' 107 | pagerduty_configs: 108 | - routing_key: 109 | 110 | - name: 'team-Y-mails' 111 | email_configs: 112 | - to: 'team-Y+alerts@example.org' 113 | 114 | - name: 'team-Y-pager' 115 | pagerduty_configs: 116 | - routing_key: 117 | 118 | - name: 'team-DB-pager' 119 | pagerduty_configs: 120 | - routing_key: -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/array_hardware_metrics.py: -------------------------------------------------------------------------------- 1 | import re 2 | from prometheus_client.core import GaugeMetricFamily 3 | 4 | 5 | class ArrayHardwareMetrics: 6 | 7 | def __init__(self, fa): 8 | self.fa = fa 9 | self.chassis_health = None 10 | self.controller_health = None 11 | self.component_health = None 12 | self.temperature = None 13 | self.temperature = None 14 | 15 | def _array_hardware_status(self): 16 | """Collect information about all system sensors.""" 17 | data = self.fa.get_hardware_status() 18 | 19 | self.chassis_health = GaugeMetricFamily( 20 | 'purefa_hardware_chassis_health', 21 | 'FlashArray hardware chassis health status') 22 | self.controller_health = GaugeMetricFamily( 23 | 'purefa_hardware_controller_health', 24 | 'FlashArray hardware controller health status', 25 | labels=['controller']) 26 | self.component_health = GaugeMetricFamily( 27 | 'purefa_hardware_component_health', 28 | 'FlashArray hardware component health status', 29 | labels=['chassis', 'controller', 'component', 30 | 'index']) 31 | self.temperature = GaugeMetricFamily( 32 | 'purefa_hardware_temperature_celsius', 33 | 'FlashArray hardware temperature sensors', 34 | labels=['chassis', 'controller', 35 | 'sensor']) 36 | self.power = GaugeMetricFamily( 37 | 'purefa_hardware_power_volts', 38 | 'FlashArray hardware power supply voltage', 39 | labels=['chassis', 'power_supply']) 40 | 41 | re_chassis = re.compile(r"^CH(\d+)$") 42 | re_controller = re.compile(r"^CT(\d+)$") 43 | re_component = re.compile(r"^(CH|CT)(\d+)\.([A-Z]+)([0-9]+)$") 44 | 45 | for comp in data: 46 | if (comp['status'] == 'not_installed'): 47 | continue 48 | component_name = comp['name'] 49 | component_state = 1 if (comp['status'] == 'ok') else 0 50 | 51 | # Chassis 52 | if re.match(r"^CH\d+$", component_name): 53 | detail = re_chassis.match(component_name) 54 | c_index = detail.group(1) 55 | self.chassis_health.add_metric([c_index], component_state) 56 | continue 57 | # Controller 58 | elif re.match(r"^CT\d+$", component_name): 59 | detail = re_controller.match(component_name) 60 | c_index = detail.group(1) 61 | self.controller_health.add_metric([c_index], component_state) 62 | continue 63 | # Components 64 | elif re.match(r"^C(H|T)\d+\.[A-Z]+[0-9]+$", component_name): 65 | detail = re_component.match(component_name) 66 | c_base = detail.group(1) 67 | c_base_index = detail.group(2) 68 | c_type = detail.group(3) 69 | c_index = detail.group(4) 70 | 71 | if c_base == 'CH': 72 | # Chassis-based 73 | labelset = [c_base_index, '', c_type, c_index] 74 | else: 75 | # Controller-based 76 | labelset = ['', c_base_index, c_type, c_index] 77 | 78 | # Component health status 79 | self.component_health.add_metric( 80 | labels=labelset, value=component_state) 81 | 82 | if c_type.lower() == 'tmp': 83 | # Additional metric for temperature 84 | if c_base == 'CH': 85 | self.temperature.add_metric( 86 | [c_base_index, '', c_index], float(comp['temperature'])) 87 | else: 88 | self.temperature.add_metric( 89 | ['', c_base_index, c_index], float(comp['temperature'])) 90 | elif c_type.lower() == 'pwr': 91 | # Additional metric for voltage level 92 | if comp['voltage'] is not None: 93 | self.power.add_metric([c_base_index, c_index], 94 | float(comp['voltage'])) 95 | 96 | def get_metrics(self): 97 | self._array_hardware_status() 98 | yield self.chassis_health 99 | yield self.controller_health 100 | yield self.component_health 101 | yield self.temperature 102 | yield self.power 103 | -------------------------------------------------------------------------------- /pure_fa_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from flask import Flask, request, abort, make_response 4 | from flask_httpauth import HTTPTokenAuth 5 | from urllib.parse import parse_qs 6 | import re 7 | from prometheus_client import generate_latest, CollectorRegistry, CONTENT_TYPE_LATEST 8 | from flasharray_collector import FlasharrayCollector 9 | 10 | import logging 11 | 12 | class InterceptRequestMiddleware: 13 | def __init__(self, wsgi_app): 14 | self.wsgi_app = wsgi_app 15 | 16 | def __call__(self, environ, start_response): 17 | d = parse_qs(environ['QUERY_STRING']) 18 | api_token = d.get('apitoken', [''])[0] # Returns the first api-token value 19 | if 'HTTP_AUTHORIZATION' not in environ: 20 | environ['HTTP_AUTHORIZATION'] = 'Bearer ' + api_token 21 | return self.wsgi_app(environ, start_response) 22 | 23 | app = Flask(__name__) 24 | app.logger.setLevel(logging.INFO) 25 | app.wsgi_app = InterceptRequestMiddleware(app.wsgi_app) 26 | auth = HTTPTokenAuth(scheme='Bearer') 27 | 28 | @auth.verify_token 29 | def verify_token(token): 30 | pattern_str = r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" 31 | regx = re.compile(pattern_str) 32 | match = regx.search(token) 33 | return token if match is not None else False 34 | 35 | 36 | @app.route('/') 37 | def route_index(): 38 | """Display an overview of the exporters capabilities.""" 39 | return ''' 40 |

Pure Storage Prometeus Exporter

41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 |
TypeEndpointGET parameters
Full metrics/metricsendpoint, apitoken (optional, required only if authentication tokem is not provided)
Volume metrics/metrics/volumesendpoint, apitoken (optional, required only if authentication tokem is not provided)Retrieves only volume related metrics
Host metrics/metrics/hostsendpoint, apitoken (optional, required only if authentication tokem is not provided)Retrieves only host related metrics
Pod metrics/metrics/podsendpoint, apitoken (optional, required only if authentication tokem is not provided)Retrieves only pod related metrics
75 | ''' 76 | 77 | @app.route('/metrics/', methods=['GET']) 78 | @auth.login_required 79 | def route_flasharray(m_type: str): 80 | """Produce FlashArray metrics.""" 81 | if not m_type in ['array', 'volumes', 'hosts', 'pods']: 82 | m_type = 'all' 83 | collector = FlasharrayCollector 84 | registry = CollectorRegistry() 85 | try: 86 | endpoint = request.args.get('endpoint', None) 87 | token = auth.current_user() 88 | registry.register(collector(endpoint, token, m_type)) 89 | except Exception as e: 90 | app.logger.warn('%s: %s', collector.__name__, str(e)) 91 | abort(500) 92 | 93 | resp = make_response(generate_latest(registry), 200) 94 | resp.headers['Content-type'] = CONTENT_TYPE_LATEST 95 | return resp 96 | 97 | 98 | @app.route('/metrics', methods=['GET']) 99 | def route_flasharray_all(): 100 | return route_flasharray('all') 101 | 102 | @app.errorhandler(400) 103 | def route_error_400(error): 104 | """Handle invalid request errors.""" 105 | return 'Invalid request parameters', 400 106 | 107 | @app.errorhandler(404) 108 | def route_error_404(error): 109 | """ Handle 404 (HTTP Not Found) errors.""" 110 | return 'Not found', 404 111 | 112 | @app.errorhandler(500) 113 | def route_error_500(error): 114 | """Handle server-side errors.""" 115 | return 'Internal server error', 500 116 | 117 | # Run in debug mode when not called by WSGI 118 | if __name__ == "__main__": 119 | app.logger.setLevel(logging.DEBUG) 120 | app.logger.debug('running in debug mode...') 121 | app.run(host="0.0.0.0", port=8080, debug=True) 122 | -------------------------------------------------------------------------------- /pure_fb_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from flask import Flask, request, abort, make_response 4 | from flask_httpauth import HTTPTokenAuth 5 | from urllib.parse import parse_qs 6 | import re 7 | from prometheus_client import generate_latest, CollectorRegistry, CONTENT_TYPE_LATEST 8 | from flashblade_collector import FlashbladeCollector 9 | 10 | import logging 11 | 12 | 13 | class InterceptRequestMiddleware: 14 | def __init__(self, wsgi_app): 15 | self.wsgi_app = wsgi_app 16 | 17 | def __call__(self, environ, start_response): 18 | d = parse_qs(environ['QUERY_STRING']) 19 | api_token = d.get('apitoken', [''])[0] # Returns the first api-token value 20 | if 'HTTP_AUTHORIZATION' not in environ: 21 | environ['HTTP_AUTHORIZATION'] = 'Bearer ' + api_token 22 | return self.wsgi_app(environ, start_response) 23 | 24 | app = Flask(__name__) 25 | app.logger.setLevel(logging.INFO) 26 | app.wsgi_app = InterceptRequestMiddleware(app.wsgi_app) 27 | auth = HTTPTokenAuth(scheme='Bearer') 28 | 29 | @auth.verify_token 30 | def verify_token(token): 31 | pattern_str = "^T-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" 32 | regx = re.compile(pattern_str) 33 | match = regx.search(token) 34 | return token if match is not None else False 35 | 36 | @app.route('/') 37 | def route_index(): 38 | """Display an overview of the exporters capabilities.""" 39 | return ''' 40 |

Pure Storage Prometeus Exporter

41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 |
TypeEndpointGET parameters
Full metrics/metricsendpoint, apitoken (optional, required only if authentication tokem is not provided)
Array metrics/metrics/arrayendpoint, apitoken (optional, required only if authentication tokem is not provided)Provides only array related metrics.
Client metrics/metrics/clientsendpoint, apitoken (optional, required only if authentication tokem is not provided)Provides only client related metrics. This is the most time expensive query
Quota metrics/metrics/quotasendpoint, apitoken (optional, required only if authentication tokem is not provided)Provides only quota related metrics.
75 | ''' 76 | 77 | @app.route('/metrics/', methods=['GET']) 78 | @auth.login_required 79 | def route_flashblade(m_type: str): 80 | """Produce FlashBlade metrics.""" 81 | collector = FlashbladeCollector 82 | if not m_type in ['array', 'clients', 'usage']: 83 | m_type = 'all' 84 | collector = FlashbladeCollector 85 | registry = CollectorRegistry() 86 | try: 87 | endpoint = request.args.get('endpoint', None) 88 | token = auth.current_user() 89 | registry.register(collector(endpoint, token, m_type)) 90 | except Exception as e: 91 | app.logger.warn('%s: %s', collector.__name__, str(e)) 92 | abort(500) 93 | 94 | resp = make_response(generate_latest(registry), 200) 95 | resp.headers['Content-type'] = CONTENT_TYPE_LATEST 96 | return resp 97 | 98 | @app.route('/metrics', methods=['GET']) 99 | def route_flashblade_all(): 100 | return route_flashblade('all') 101 | 102 | @app.errorhandler(400) 103 | def route_error_400(error): 104 | """Handle invalid request errors.""" 105 | return 'Invalid request parameters', 400 106 | 107 | @app.errorhandler(404) 108 | def route_error_404(error): 109 | """ Handle 404 (HTTP Not Found) errors.""" 110 | return 'Not found', 404 111 | 112 | @app.errorhandler(500) 113 | def route_error_500(error): 114 | """Handle server-side errors.""" 115 | return 'Internal server error', 500 116 | 117 | # Run in debug mode when not called by WSGI 118 | if __name__ == "__main__": 119 | app.logger.setLevel(logging.DEBUG) 120 | app.logger.debug('running in debug mode...') 121 | app.run(host="0.0.0.0", port=8080, debug=True) 122 | -------------------------------------------------------------------------------- /extra/pure-helper/pure_helper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from flask import Flask, request, abort, make_response, jsonify 4 | import urllib3 5 | import purestorage 6 | 7 | 8 | import logging 9 | 10 | 11 | app = Flask(__name__) 12 | app.logger.setLevel(logging.INFO) 13 | 14 | 15 | @app.route('/') 16 | def route_index(): 17 | """Display an overview of the helper capabilities.""" 18 | return ''' 19 |

PureStorage Grafana helper

20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | host/:host/volume 38 | 39 | 40 | 41 |
TypeEndpointRequired GET parameters
volume-hosts/flasharray/volume/{volume}/hostendpoint
host-volumes/flasharray/host/{host}/volumeendpoint
42 | ''' 43 | 44 | 45 | @app.route('/flasharray/volume//host', methods=['GET']) 46 | def route_volume(volume): 47 | """Produce a list of information for the volume.""" 48 | 49 | try: 50 | endpoint = request.args.get('endpoint', None) 51 | token = request.args.get('apitoken', None) 52 | resp = jsonify(list_volume_connections(endpoint, token, volume)) 53 | resp.headers['Access-Control-Allow-Origin'] = '*' 54 | return resp 55 | except Exception as e: 56 | app.logger.warn('%s: %s', 'pure_helper', str(e)) 57 | abort(500) 58 | 59 | @app.route('/flasharray/volume///host', methods=['GET']) 60 | def route_vgvolume(vgroup, volume): 61 | """Produce a list of information for the volume of the given volume group .""" 62 | vol = vgroup + '/' + volume 63 | return(route_volume(vol)) 64 | 65 | @app.route('/flasharray/host//volume', methods=['GET']) 66 | def route_host(host): 67 | """Produce a list of information for the host.""" 68 | 69 | try: 70 | endpoint = request.args.get('endpoint', None) 71 | token = request.args.get('apitoken', None) 72 | resp = jsonify(list_host_connections(endpoint, token, host)) 73 | resp.headers['Access-Control-Allow-Origin'] = '*' 74 | return resp 75 | except Exception as e: 76 | app.logger.warn('%s: %s', 'pure_helper', str(e)) 77 | abort(500) 78 | 79 | @app.errorhandler(400) 80 | def route_error_400(error): 81 | """Handle invalid request errors.""" 82 | resp = jsonify(error = 'Invalid request parameters') 83 | resp.headers['Access-Control-Allow-Origin'] = '*' 84 | return resp, 400 85 | 86 | 87 | @app.errorhandler(404) 88 | def route_error_404(error): 89 | """ Handle 404 (HTTP Not Found) errors.""" 90 | resp = jsonify(error = 'Not found') 91 | resp.headers['Access-Control-Allow-Origin'] = '*' 92 | return resp, 404 93 | 94 | 95 | @app.errorhandler(500) 96 | def route_error_500(error): 97 | """Handle server-side errors.""" 98 | resp = jsonify(error = 'Internal server error') 99 | resp.headers['Access-Control-Allow-Origin'] = '*' 100 | return resp, 500 101 | 102 | 103 | def list_volume_connections(target, api_token, volume): 104 | # disable ceritificate warnings 105 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 106 | conn = purestorage.FlashArray( 107 | target, 108 | api_token=api_token, 109 | user_agent='Purity_FA_Prometheus_exporter/1.0') 110 | array = conn.get() 111 | p_hosts = conn.list_volume_private_connections(volume) 112 | s_hosts = conn.list_volume_shared_connections(volume) 113 | v_info = conn.get_volume(volume) 114 | 115 | hosts = [] 116 | for h in s_hosts: 117 | hosts.append({'host': h['host'], 'lun': h['lun'], 'hgroup': h['hgroup']}) 118 | for h in p_hosts: 119 | hosts.append({'host': h['host'], 'lun': h['lun'], 'hgroup': ''}) 120 | 121 | vol = {} 122 | vol['serial'] = v_info['serial'] 123 | vol['hosts'] = hosts 124 | vol['array_name'] = array['array_name'] 125 | return vol 126 | 127 | def list_host_connections(target, api_token, host): 128 | # disable ceritificate warnings 129 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 130 | conn = purestorage.FlashArray( 131 | target, 132 | api_token=api_token, 133 | user_agent='Purity_FA_Prometheus_exporter/1.0') 134 | array = conn.get() 135 | v_list = conn.list_host_connections(host) 136 | vols = [] 137 | for v in v_list: 138 | v_info = conn.get_volume(v['vol']) 139 | if not v_info: 140 | serial = '' 141 | else: 142 | serial = v_info['serial'] 143 | vols.append({'volume': v['vol'], 'lun': v['lun'], 'serial': serial}) 144 | h_info = conn.get_host(host) 145 | h_info['volumes'] = vols 146 | h_info['array_name'] = array['array_name'] 147 | return h_info 148 | 149 | 150 | # Run in debug mode when not called by WSGI 151 | if __name__ == "__main__": 152 | app.logger.setLevel(logging.DEBUG) 153 | app.logger.debug('running in debug mode...') 154 | app.run(host="0.0.0.0", port=9000, debug=True) 155 | -------------------------------------------------------------------------------- /flasharray_collector/flasharray_metrics/flasharray.py: -------------------------------------------------------------------------------- 1 | import re 2 | import urllib3 3 | import purestorage 4 | 5 | 6 | # disable ceritificate warnings 7 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 8 | 9 | PURE_NAA = 'naa.624a9370' 10 | 11 | base_kpi_params = [{'action': 'monitor'}, 12 | {'action': 'monitor', 'mirrored': True}, 13 | {'action': 'monitor', 'latency': True}, 14 | {'action': 'monitor', 'latency': True, 'mirrored': True}, 15 | {'action': 'monitor', 'size': True}, 16 | {'action': 'monitor', 'size': True, 'mirrored': True}] 17 | 18 | array_kpi_params = base_kpi_params + [{'space': True}] 19 | host_kpi_params = array_kpi_params 20 | 21 | volume_kpi_params = base_kpi_params + [{'space': True, 'pending': True}] 22 | pod_kpi_params = volume_kpi_params 23 | 24 | nic_kpi_params = base_kpi_params + [{'error': True}] 25 | 26 | 27 | class FlashArray: 28 | """ 29 | Base class for FlashArray Prometheus array info 30 | """ 31 | def __init__(self, endpoint, api_token): 32 | self.flasharray = None 33 | try: 34 | self.flasharray = purestorage.FlashArray( 35 | endpoint, 36 | api_token=api_token, 37 | user_agent='Purity_FA_Prometheus_exporter/1.0') 38 | except purestorage.PureError: 39 | pass 40 | 41 | self.array = None 42 | self.hosts = None 43 | self.volumes = None 44 | self.vgroups = None 45 | self.pods = None 46 | self.host_volumes = None 47 | self.network_interfaces = None 48 | 49 | def __del__(self): 50 | if self.flasharray: 51 | self.flasharray.invalidate_cookie() 52 | 53 | def get_array(self): 54 | if self.array is not None: 55 | return self.array 56 | self.array = self.flasharray.get() 57 | 58 | for params in array_kpi_params: 59 | try: 60 | a = self.flasharray.get(**params)[0] 61 | self.array.update(a) 62 | except purestorage.PureError: 63 | pass 64 | return self.array 65 | 66 | 67 | def get_array_elem(self, elem): 68 | array = self.get_array() 69 | if elem not in array: 70 | return None 71 | return array[elem] 72 | 73 | def get_open_alerts(self): 74 | return self.flasharray.list_messages(open=True) 75 | 76 | def get_hardware_status(self): 77 | return self.flasharray.list_hardware() 78 | 79 | def get_volumes(self): 80 | if self.volumes is not None: 81 | return list(self.volumes.values()) 82 | vdict = {} 83 | if self.vgroups is None: 84 | self.vgroups = self.flasharray.list_vgroups() 85 | for v in self.flasharray.list_volumes(pending='true'): 86 | v['naaid'] = PURE_NAA + v['serial'] 87 | v['vgroup'] = '' 88 | for vg in self.vgroups: 89 | if v['name'] in vg['volumes']: 90 | v['vgroup'] = vg['name'] 91 | vdict[v['name']] = v 92 | 93 | try: 94 | for v in self.flasharray.list_volumes(protocol_endpoint=True): 95 | # PE do not have these metrics, so it is necessasy to poulate with fake values 96 | v['naaid'] = PURE_NAA + v['serial'] 97 | v['size'] = 0 98 | v['volumes'] = 0 99 | v['snapshots'] = 0 100 | v['total'] = 0 101 | v['data_reduction'] = 0 102 | v['vgroup'] = '' 103 | vdict[v['name']] = v 104 | except purestorage.PureError: 105 | pass 106 | 107 | for params in volume_kpi_params: 108 | try: 109 | for v in self.flasharray.list_volumes(**params): 110 | vdict[v['name']].update(v) 111 | except purestorage.PureError: 112 | pass 113 | # vdict = {key:val for key, val in vdict.items() if val['time_remaining'] is None} 114 | self.volumes = vdict 115 | return list(self.volumes.values()) 116 | 117 | def get_hosts(self): 118 | if self.hosts is not None: 119 | return list(self.hosts.values()) 120 | hdict = {} 121 | try: 122 | for h in self.flasharray.list_hosts(): 123 | hdict[h['name']] = h 124 | except purestorage.PureError: 125 | pass 126 | 127 | for params in host_kpi_params: 128 | try: 129 | for h in self.flasharray.list_hosts(**params): 130 | hdict[h['name']].update(h) 131 | except purestorage.PureError: 132 | pass 133 | self.hosts = hdict 134 | return list(self.hosts.values()) 135 | 136 | def get_host_volumes(self): 137 | if self.host_volumes is not None: 138 | return list(self.host_volumes.values()) 139 | hvdict = {} 140 | 141 | try: 142 | for h in self.get_hosts(): 143 | for c in self.flasharray.list_host_connections(h['name']): 144 | hvdict[h['name']] = {'host': h['name'], 'naaid': self.volumes[c['vol']]['naaid']} 145 | except purestorage.PureError: 146 | pass 147 | 148 | self.host_volumes = hvdict 149 | return list(self.host_volumes.values()) 150 | 151 | def get_pods(self): 152 | if self.pods is not None: 153 | return list(self.pods.values()) 154 | pdict = {} 155 | try: 156 | for p in self.flasharray.list_pods(pending='true'): 157 | pdict[p['name']] = p 158 | except purestorage.PureError: 159 | pass 160 | 161 | for params in pod_kpi_params: 162 | try: 163 | for p in self.flasharray.list_pods(**params): 164 | pdict[p['name']].update(p) 165 | except purestorage.PureError: 166 | pass 167 | # pdict = {key:val for key, val in pdict.items() if val['time_remaining'] is None} 168 | self.pods = pdict 169 | return list(self.pods.values()) 170 | 171 | def get_network_interfaces(self): 172 | if self.network_interfaces is not None: 173 | return list(self.network_interfaces.values()) 174 | nicdict = {} 175 | try: 176 | for n in self.flasharray.list_network_interfaces(): 177 | nicdict[n['name']] = n 178 | except purestorage.PureError: 179 | pass 180 | 181 | for params in nic_kpi_params: 182 | try: 183 | for n in self.flasharray.list_network_interfaces(**params): 184 | nicdict[n['name']].update(n) 185 | except purestorage.PureError: 186 | pass 187 | self.network_interfaces = nicdict 188 | return list(self.network_interfaces.values()) -------------------------------------------------------------------------------- /pure_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from flask import Flask, request, abort, make_response 4 | from flask_httpauth import HTTPTokenAuth 5 | from urllib.parse import parse_qs 6 | import re 7 | from prometheus_client import generate_latest, CollectorRegistry, CONTENT_TYPE_LATEST 8 | from flasharray_collector import FlasharrayCollector 9 | from flashblade_collector import FlashbladeCollector 10 | 11 | import logging 12 | 13 | class InterceptRequestMiddleware: 14 | def __init__(self, wsgi_app): 15 | self.wsgi_app = wsgi_app 16 | 17 | def __call__(self, environ, start_response): 18 | d = parse_qs(environ['QUERY_STRING']) 19 | api_token = d.get('apitoken', [''])[0] # Returns the first api-token value 20 | if 'HTTP_AUTHORIZATION' not in environ: 21 | environ['HTTP_AUTHORIZATION'] = 'Bearer ' + api_token 22 | return self.wsgi_app(environ, start_response) 23 | 24 | app = Flask(__name__) 25 | app.logger.setLevel(logging.INFO) 26 | app.wsgi_app = InterceptRequestMiddleware(app.wsgi_app) 27 | auth = HTTPTokenAuth(scheme='Bearer') 28 | 29 | @auth.verify_token 30 | def verify_token(token): 31 | pattern_str = r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" 32 | pattern_str += "|^T-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" 33 | regx = re.compile(pattern_str) 34 | match = regx.search(token) 35 | return token if match is not None else False 36 | 37 | @app.route('/') 38 | def route_index(): 39 | """Display an overview of the exporters capabilities.""" 40 | return ''' 41 |

Pure Storage Prometeus Exporter

42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | ` 97 | 98 |
TypeEndpointGET parameters
FlashArray/metrics/flasharrayendpoint, apitoken (optional, required only if authentication tokem is not provided)
FlashArray hosts/metrics/flasharrayendpoint, apitoken (optional, required only if authentication tokem is not provided)Retrieves only host related metrics
FlashArray volumes/metrics/flasharrayendpoint, apitoken (optional, required only if authentication tokem is not provided)Retrieves only volume related metrics
FlashArray pods/metrics/flasharrayendpoint, apitoken (optional, required only if authentication tokem is not provided)Retrieves only pod related metrics
FlashBlade/metrics/flashbladeendpoint, apitoken (optional, required only if authentication tokem is not provided)
FlashBlade array/metrics/flashbladeendpoint, apitoken (optional, required only if authentication tokem is not provided)Provides only array related metrics.
FlashBlade clients/metrics/flashbladeendpoint, apitoken (optional, required only if authentication tokem is not provided)Provides only client related metrics. This is the most time expensive query
FlashBlade quotas/metrics/flashbladeendpoint, apitoken (optional, required only if authentication tokem is not provided)Provides only quota related metrics.
99 | ''' 100 | 101 | @auth.login_required 102 | def route_array(array_type, m_type): 103 | """Produce FlashArray and FlashBlade metrics.""" 104 | collector = None 105 | if array_type == 'flasharray': 106 | if not m_type in ['array', 'volumes', 'hosts', 'pods']: 107 | m_type = 'all' 108 | collector = FlasharrayCollector 109 | elif array_type == 'flashblade': 110 | if not m_type in ['array', 'clients', 'usage']: 111 | m_type = 'all' 112 | collector = FlashbladeCollector 113 | else: 114 | abort(404) 115 | 116 | registry = CollectorRegistry() 117 | try: 118 | endpoint = request.args.get('endpoint', None) 119 | token = auth.current_user() 120 | registry.register(collector(endpoint, token, m_type)) 121 | except Exception as e: 122 | app.logger.warn('%s: %s', collector.__name__, str(e)) 123 | abort(500) 124 | 125 | resp = make_response(generate_latest(registry), 200) 126 | resp.headers['Content-type'] = CONTENT_TYPE_LATEST 127 | return resp 128 | 129 | @app.route('/metrics/flasharray/', methods=['GET']) 130 | def route_flasharray(m_type: str): 131 | return route_array('flasharray', m_type) 132 | 133 | @app.route('/metrics/flasharray', methods=['GET']) 134 | def route_flasharray_all(): 135 | return route_flasharray('all') 136 | 137 | @app.route('/metrics/flashblade/', methods=['GET']) 138 | def route_flashblade(m_type: str): 139 | return route_array('flashblade', m_type) 140 | 141 | @app.route('/metrics/flashblade', methods=['GET']) 142 | def route_flashblade_all(): 143 | return route_flashblade('all') 144 | 145 | @app.errorhandler(400) 146 | def route_error_400(error): 147 | """Handle invalid request errors.""" 148 | return 'Invalid request parameters', 400 149 | 150 | @app.errorhandler(404) 151 | def route_error_404(error): 152 | """ Handle 404 (HTTP Not Found) errors.""" 153 | return 'Not found', 404 154 | 155 | @app.errorhandler(500) 156 | def route_error_500(error): 157 | """Handle server-side errors.""" 158 | return 'Internal server error', 500 159 | 160 | # Run in debug mode when not called by WSGI 161 | if __name__ == "__main__": 162 | app.logger.setLevel(logging.DEBUG) 163 | app.logger.debug('running in debug mode...') 164 | app.run(host="0.0.0.0", port=8080, debug=True) 165 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/flashblade.py: -------------------------------------------------------------------------------- 1 | import urllib3 2 | import six 3 | from purity_fb import PurityFb, rest 4 | 5 | # disable ceritificate warnings 6 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 7 | 8 | 9 | class FlashBlade(): 10 | """ 11 | Base class for FlashBlade Prometheus array info 12 | """ 13 | def __init__(self, endpoint, api_token): 14 | self.flashblade = None 15 | try: 16 | self.flashblade = PurityFb(host=endpoint) 17 | self.flashblade.disable_verify_ssl() 18 | self.flashblade._api_client.user_agent = 'Purity_FB_Prometheus_exporter/1.0' 19 | self.flashblade.request_timeout = urllib3.Timeout(connect=2.0, read=60.0) 20 | self.flashblade.login(api_token) 21 | except Exceprion: 22 | pass 23 | self.filesystems = [] 24 | self.buckets = [] 25 | self.array_performance = {} 26 | self.array_performance['nfs'] = None 27 | self.array_performance['http'] = None 28 | self.array_performance['s3'] = None 29 | self.array_performance['smb'] = None 30 | self.array_specific_perf = {} 31 | self.array_specific_perf['nfs'] = None 32 | self.array_specific_perf['http'] = None 33 | self.array_specific_perf['s3'] = None 34 | self.array_space = None 35 | self.nfs_filesystems_performance = [] 36 | self.buckets_performance = [] 37 | self.buckets_replica_links = [] 38 | self.filesystems_replica_links = [] 39 | self.users_usage = [] 40 | self.groups_usage = [] 41 | self.clients_performance = [] 42 | 43 | def __del__(self): 44 | if self.flashblade is not None: 45 | self.flashblade.logout() 46 | 47 | def get_array_info(self): 48 | return self.flashblade.arrays.list_arrays().items[0] 49 | 50 | def get_open_alerts(self): 51 | return self.flashblade.alerts.list_alerts(filter="state='open'").items 52 | 53 | def get_hardware_status(self): 54 | return self.flashblade.hardware.list_hardware().items 55 | 56 | def get_array_performance(self, proto): 57 | if self.array_performance[proto] is None: 58 | try: 59 | self.array_performance[proto] = self.flashblade.arrays.list_arrays_performance(protocol=proto).items[0] 60 | except Exception: 61 | pass 62 | return self.array_performance[proto] 63 | 64 | def get_array_specific_performance(self, proto): 65 | if proto == 'http': 66 | if self.array_specific_perf['http'] is None: 67 | try: 68 | self.array_specific_perf['http'] = self.flashblade.arrays.list_arrays_http_specific_performance().items[0] 69 | except Exception: 70 | pass 71 | return self.array_specific_perf['http'] 72 | if proto == 'nfs': 73 | if self.array_specific_perf['nfs'] is None: 74 | try: 75 | self.array_specific_perf['nfs'] = self.flashblade.arrays.list_arrays_nfs_specific_performance().items[0] 76 | except Exception: 77 | pass 78 | return self.array_specific_perf['nfs'] 79 | if proto == 's3': 80 | if self.array_specific_perf['s3'] is None: 81 | try: 82 | self.array_specific_perf['s3'] = self.flashblade.arrays.list_arrays_s3_specific_performance().items[0] 83 | except Exception: 84 | pass 85 | return self.array_specific_perf['s3'] 86 | 87 | def get_filesystems(self): 88 | if not self.filesystems: 89 | try: 90 | self.filesystems = self.flashblade.file_systems.list_file_systems().items 91 | except Exception: 92 | pass 93 | return self.filesystems 94 | 95 | def get_array_space(self): 96 | if self.array_space is None: 97 | try: 98 | self.array_space = self.flashblade.arrays.list_arrays_space().items[0] 99 | except Exception: 100 | pass 101 | return self.array_space 102 | 103 | def get_buckets(self): 104 | if not self.buckets: 105 | try: 106 | self.buckets = self.flashblade.buckets.list_buckets().items 107 | except Exception: 108 | pass 109 | return self.buckets 110 | 111 | def get_nfs_filesystems_performance(self): 112 | if not self.nfs_filesystems_performance: 113 | for f in self.get_filesystems(): 114 | try: 115 | self.nfs_filesystems_performance.append( 116 | self.flashblade.file_systems.list_file_systems_performance(protocol='nfs',names=[f.name]).items[0]) 117 | except Exception: 118 | pass 119 | return self.nfs_filesystems_performance 120 | 121 | def get_buckets_performance(self): 122 | if not self.buckets_performance: 123 | for b in self.get_buckets(): 124 | try: 125 | self.buckets_performance.append( 126 | self.flashblade.buckets.list_buckets_s3_specific_performance(names=[b.name]).items[0]) 127 | except Exception: 128 | pass 129 | return self.buckets_performance 130 | 131 | def get_bucket_replica_links(self): 132 | if not self.buckets_replica_links: 133 | try: 134 | self.buckets_replica_links = self.flashblade.bucket_replica_links.list_bucket_replica_links().items 135 | except Exception: 136 | pass 137 | return self.buckets_replica_links 138 | 139 | def get_filesystem_replica_links(self): 140 | if not self.filesystems_replica_links: 141 | try: 142 | self.filesystems_replica_links = self.flashblade.file_system_replica_links.list_file_system_replica_links().items 143 | except Exception: 144 | pass 145 | return self.filesystems_replica_links 146 | 147 | def get_users_usage(self): 148 | if not self.users_usage: 149 | for f in self.get_filesystems(): 150 | try: 151 | uu = self.flashblade.usage_users.list_user_usage(file_system_names=[f.name]).items 152 | if len(uu) == 0: 153 | continue 154 | self.users_usage = self.users_usage + uu 155 | except Exception: 156 | pass 157 | return self.users_usage 158 | 159 | def get_groups_usage(self): 160 | if not self.groups_usage: 161 | for f in self.get_filesystems(): 162 | try: 163 | gu = self.flashblade.usage_groups.list_group_usage(file_system_names=[f.name]).items 164 | if len(gu) == 0: 165 | continue 166 | self.groups_usage = self.groups_usage + gu 167 | except Exception: 168 | pass 169 | return self.groups_usage 170 | 171 | def get_clients_performance(self): 172 | if not self.clients_performance: 173 | try: 174 | self.clients_performance = self.flashblade.arrays.list_clients_performance().items 175 | except Exception: 176 | pass 177 | return self.clients_performance 178 | -------------------------------------------------------------------------------- /flashblade_collector/flashblade_metrics/array_hardware_metrics.py: -------------------------------------------------------------------------------- 1 | import re 2 | from prometheus_client.core import GaugeMetricFamily 3 | 4 | 5 | class ArrayHardwareMetrics(): 6 | """ 7 | Base class for FlashBlade Prometheus hardware metrics 8 | """ 9 | def __init__(self, fb): 10 | self.fb = fb 11 | self.chassis_health = None 12 | self.fb_health = None 13 | self.fm_health = None 14 | self.fmcomponent_health = None 15 | self.power_health = None 16 | 17 | ### NOT YET IMPLEMENTED 18 | # self.xfm_health = None 19 | # self.xfmcomponent_health = None 20 | # self.temperature = None 21 | # self.power = None 22 | 23 | def _array_hardware_status(self): 24 | """ 25 | Create metric of gauge types for components status. 26 | 27 | WARNING: I do not have an External Fabric Module(xfm) to test against. 28 | On my test system all component have a termperature field, but 29 | they all report None. 30 | Those sections are commented out. 31 | """ 32 | data = self.fb.get_hardware_status() 33 | 34 | self.hardware_status = GaugeMetricFamily( 35 | 'purefb_hw_status', 36 | 'Hardware components status', 37 | labels=['model','name','serial','type']) 38 | 39 | self.chassis_health = GaugeMetricFamily( 40 | 'purefb_hardware_chassis_health', 41 | 'FlashBlade hardware chassis health status', 42 | labels=['index','name','serial']) 43 | self.flashblade_health = GaugeMetricFamily( 44 | 'purefb_hardware_flashblade_health', 45 | 'FlashBlade hardware flashblade health status', 46 | labels=['chassis','model','name','serial','slot']) 47 | self.fabricmodule_health = GaugeMetricFamily( 48 | 'purefb_hardware_fabricmodule_health', 49 | 'FlashBlade hardware fabric module health status', 50 | labels=['chassis','model', 'name', 'serial', 'slot']) 51 | self.fmcomponent_health = GaugeMetricFamily( 52 | 'purefb_hardware_fmcomponent_health', 53 | 'FlashBlade hardware fabric module component health status', 54 | labels=['chassis', 'fabricmodule', 'model', 'name', 'serial', 'slot', 'type']) 55 | self.power_health = GaugeMetricFamily( 56 | 'purefb_hardware_power_health', 57 | 'FlashBlade hardware power health status', 58 | labels=['chassis', 'model', 'name', 'serial', 'slot']) 59 | 60 | ### NOT YET IMPLEMENTED 61 | # self.xfabricmodule_health = GaugeMetricFamily( 62 | # 'purefb_hardware_extfabricmodule_health', 63 | # 'FlashBlade hardware external fabric module health status', 64 | # labels=['model', 'name', 'serial']) 65 | 66 | # self.xfmcomponent_health = GaugeMetricFamily( 67 | # 'purefb_hardware_fmcomponent_health', 68 | # 'FlashBlade hardware fabric module component health status', 69 | # labels=['fabricmodule', 'component', 'index']) 70 | 71 | 72 | # Leaving commented out as there is a temperature field for each component but it's currently empty. 73 | # Hopefully this means in the future we get temperature and this can be completed correctly 74 | # self.temperature = GaugeMetricFamily( 75 | # 'purefb_hardware_temperature_celsius', 76 | # 'FlashBlade hardware temperature sensors', 77 | # labels=['model', 'name', 'serial', 'type']) 78 | 79 | # Leaving this commented out, currently there is not a way to pull voltage used 80 | # self.power = GaugeMetricFamily( 81 | # 'purefb_hardware_power_volts', 82 | # 'FlashBlade hardware power supply voltage', 83 | # labels=['chassis', 'power_supply']) 84 | 85 | re_fb = re.compile(r"^(CH\d+)\.(FB[0-9]+)$") 86 | re_fm = re.compile(r"^(CH\d+)\.(FM[0-9]+)$") 87 | re_xfm = re.compile(r"^(CH\d+)\.(XFM[0-9]+)$") 88 | re_fmcomponent = re.compile(r"^(CH\d+)\.(FM\d+)\.([A-Z]+)([0-9]+)$") 89 | re_pwr = re.compile(r"^(CH\d+)\.(PWR[0-9]+)$") 90 | 91 | for comp in data: 92 | if comp.status in ['unused', 'not_installed']: 93 | continue 94 | component_name = comp.name 95 | component_state = 1 if comp.status in ['healthy'] else 0 96 | component_type = comp.type 97 | 98 | # Simple component health metric 99 | self.hardware_status.add_metric([comp.model or '',comp.name,comp.serial or '',comp.type], component_state) 100 | 101 | # Types are per https://purity-fb.readthedocs.io/en/latest/Hardware/ 102 | # Chassis 103 | if component_type == 'ch': 104 | self.chassis_health.add_metric([str(comp.index), comp.name, comp.serial or ''], component_state) 105 | continue 106 | 107 | # Flash Blade 108 | elif component_type == 'fb': 109 | detail = re_fb.match(component_name) 110 | chassis = detail.group(1) 111 | self.flashblade_health.add_metric([chassis, comp.model or '', comp.name, comp.serial or '', str(comp.slot)], component_state) 112 | continue 113 | 114 | # Fabric Modue or External Fabric Module 115 | elif component_type == 'fm': 116 | detail = re_fm.match(component_name) 117 | chassis = detail.group(1) 118 | self.fabricmodule_health.add_metric([chassis, comp.model or '', comp.name, comp.serial or '', str(comp.slot)], component_state) 119 | continue 120 | 121 | # # External Fabric Module 122 | # elif component_type == 'xfm': 123 | # detail = re_xfm.match(component_name) 124 | # chassis = detail.group(1) 125 | # self.xfabricmodule_health.add_metric([chassis, comp.model or '', comp.name, comp.serial or ''], component_state) 126 | # continue 127 | 128 | # FM Components 129 | elif re.match(r"^CH\d+\.FM\d+\.[A-Z]+[0-9]+$", component_name): 130 | detail = re_fmcomponent.match(component_name) 131 | chassis = detail.group(1) 132 | fabricmodule = detail.group(1)+'.'+detail.group(2) 133 | 134 | # Component health status 135 | self.fmcomponent_health.add_metric([chassis, fabricmodule, comp.model or '', comp.name, comp.serial or '', str(comp.slot) or '', comp.type], component_state) 136 | 137 | # Power 138 | elif component_type == 'pwr': 139 | detail = re_pwr.match(component_name) 140 | chassis = detail.group(1) 141 | self.power_health.add_metric([chassis, comp.model or '', comp.name, comp.serial or '', str(comp.slot)], component_state) 142 | continue 143 | 144 | def get_metrics(self): 145 | self._array_hardware_status() 146 | yield self.hardware_status 147 | yield self.chassis_health 148 | yield self.flashblade_health 149 | yield self.fabricmodule_health 150 | yield self.fmcomponent_health 151 | yield self.power_health 152 | 153 | ### NOT YET IMPLEMENTED 154 | # yield self.xfm_health 155 | # yield self.temperature 156 | # yield self.power 157 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Current version](https://img.shields.io/github/v/tag/PureStorage-OpenConnect/pure-exporter?label=current%20version) 2 | 3 | # NOTE.... This exporter is deprecated in favor of the individual OpenMetrics exporters for [FlashArray](https://github.com/PureStorage-OpenConnect/pure-fa-openmetrics-exporter) and [FlashBlade](https://github.com/PureStorage-OpenConnect/pure-fb-openmetrics-exporter) 4 | 5 | # Pure Storage Prometheus exporter 6 | Prometheus exporter for Pure Storage FlashArrays and FlashBlades. 7 | 8 | ## Support Statement 9 | These exporters are provided under Best Efforts support by the Pure Portfolio Solutions Group, Open Source Integrations team. 10 | For feature requests and bugs please use GitHub Issues. 11 | We will address these as soon as we can, but there are no specific SLAs. 12 | ## 13 | 14 | ### Overview 15 | 16 | This applications aims to help monitor Pure Storage FlashArrays and FlashBlades by providing an "exporter", which means it extracts data from the Purity API and converts it to a format which is easily readable by Prometheus. 17 | 18 | The stateless design of the exporter allows for easy configuration management as well as scalability for a whole fleet of Pure Storage systems. Each time Prometheus scrapes metrics for a specific system, it should provide the hostname via GET parameter and the API token as Authorization token to this exporter. 19 | 20 | --- 21 | 22 | **Note**: The previous method to provide the Pure API token via a GET parameter is now deprecated and will be removed in the next major version. 23 | 24 | --- 25 | 26 | To monitor your Pure Storage appliances, you will need to create a new dedicated user on your array, and assign read-only permissions to it. Afterwards, you also have to create a new API key. 27 | The exporter is provided as three different options: 28 | 29 | - pure-exporter. Full exporter for both FlashArray and FlashBlade in a single bundle 30 | - pure-fa-exporter. FlashArray exporter 31 | - pure-fb-exporter. FlashBlade exporter 32 | 33 | 34 | ### Building and Deploying 35 | 36 | The exporter is preferably built and launched via Docker. You can also scale the exporter deployment to multiple containers on Kubernetes thanks to the stateless nature of the application. 37 | 38 | --- 39 | 40 | #### The official docker images are available at Quay.io 41 | 42 | ```shell 43 | docker pull quay.io/purestorage/pure-exporter:1.2.5-a 44 | ``` 45 | 46 | or for the FlashArray exporter 47 | 48 | ```shell 49 | docker pull quay.io/purestorage/pure-fa-exporter:1.2.5-a 50 | ``` 51 | or for the FlashBlade exporter 52 | 53 | ```shell 54 | docker pull quay.io/purestorage/pure-fb-exporter:1.2.5-a 55 | ``` 56 | --- 57 | 58 | To build and deploy the application via Docker, your local linux user should be added to the `docker` group in order to be able to communicate with the Docker daemon. (If this is not possible, you can still use sudo) 59 | 60 | The detailed description on how to do that can be found on the [Docker](https://docs.docker.com/engine/install/) official documentation for your operating systemm 61 | To run a simple instance of the exporter, run: 62 | ```bash 63 | make -f Makefile.fa test 64 | make -f Makefile.fb test 65 | make -f Makefile.mk test 66 | ``` 67 | 68 | The Makefile currently features these targets: 69 | - **build** - builds the docker image with preconfigured tags. 70 | - **test** - spins up a new docker container with all required parameters. 71 | 72 | 73 | ### Local development 74 | 75 | The application is usually not run by itself, but rather with the gunicorn WSGI server. If you want to contribute to the development, you can run the exporter locally without a WSGI server, by executing the application directly. 76 | 77 | The following commands are required for a development setup: 78 | ```bash 79 | # it is recommended to use virtual python environments! 80 | python -m venv env 81 | source ./env/bin/activate 82 | 83 | # install dependencies 84 | python -m pip install -r requirements.txt 85 | 86 | # run the application in debug mode 87 | python pure_exporter.py 88 | ``` 89 | Use the same approach to modify the FlashArray and/or the FlashBlade exporter, by simply using the related requitements file. 90 | 91 | ### Scraping endpoints 92 | 93 | The exporter uses a RESTful API schema to provide Prometheus scraping endpoints. 94 | 95 | **Authentication** 96 | 97 | Authentication is used by the exporter as the mechanism to cross authenticate to the scraped appliance, therefore for each array it is required to provide the REST API token for an account that has a 'readonly' role. The api-token must be provided in the http request using the HTTP Authorization header of type 'Bearer'. This is achieved by specifying the api-token value as the authorization parameter of the specific job in the Prometheus configuration file. As an alternative, it is possible to provide the api-token as a request argument, using the *apitoken* key. *Note* this option is deprecated and will be removed from the next releases. 98 | 99 | 100 | The full exporter understands the following requests: 101 | 102 | System | URL | GET parameters | description 103 | ---|---|---|--- 104 | FlashArray | http://\:\/metrics/flasharray | endpoint| Full array metrics 105 | FlashArray | http://\:\/metrics/flasharray/array | endpoint | Array only metrics 106 | FlashArray | http://\:\/metrics/flasharray/volumes | endpoint | Volumes only metrics 107 | FlashArray | http://\:\/metrics/flasharray/hosts | endpoint | Hosts only metrics 108 | FlashArray | http://\:\/metrics/flasharray/pods | endpoint| Pods only metrics 109 | FlashBlade | http://\:\/metrics/flashblade | endpoint | Full array metrics 110 | FlashBlade | http://\:\/metrics/flashblade/array | endpoint | Array only metrics 111 | FlashBlade | http://\:\/metrics/flashblade/clients | endpoint | Clients only metrics 112 | FlashBlade | http://\:\/metrics/flashblade/quotas | endpoint | Quotas only metrics 113 | 114 | 115 | The FlashArray-only and FlashBlade only exporters use a slightly different schema, which consists of the removal of the flasharray|flashblade string from the path. 116 | 117 | **FlashArray** 118 | 119 | URL | GET parameters | description 120 | ---|---|--- 121 | http://\:\/metrics | endpoint | Full array metrics 122 | http://\:\/metrics/array | endpoint | Array only metrics 123 | http://\:\/metrics/volumes | endpoint | Volumes only metrics 124 | http://\:\/metrics/hosts | endpoint | Hosts only metrics 125 | http://\:\/metrics/pods | endpoint | Pods only metrics 126 | 127 | **FlashBlade** 128 | 129 | URL | GET parameters | description 130 | ---|---|--- 131 | http://\:\/metrics | endpoint | Full array metrics 132 | http://\:\/metrics/array | endpoint | Array only metrics 133 | http://\:\/metrics/clients | endpoint | Clients only metrics 134 | http://\:\/metrics/quotas | endpoint | Quotas only metrics 135 | 136 | 137 | Depending on the target array, scraping for the whole set of metrics could result into timeout issues, in which case it is suggested either to increase the scraping timeout or to scrape each single endpoint instead. 138 | 139 | 140 | ### Prometheus configuration examples 141 | 142 | The [config](config) directory provides a couple of Prometheus configuration examples that can be used as the starting point to build your own solution. 143 | 144 | ### Usage example 145 | 146 | In a typical production scenario, it is recommended to use a visual frontend for your metrics, such as [Grafana](https://github.com/grafana/grafana). Grafana allows you to use your Prometheus instance as a datasource, and create Graphs and other visualizations from PromQL queries. Grafana, Prometheus, are all easy to run as docker containers. 147 | 148 | To spin up a very basic set of those containers, use the following commands: 149 | ```bash 150 | # Pure exporter 151 | docker run -d -p 9491:9491 --name pure-exporter quay.io/purestorage/pure-exporter: 152 | 153 | # Prometheus with config via bind-volume (create config first!) 154 | docker run -d -p 9090:9090 --name=prometheus -v /tmp/prometheus-pure.yml:/etc/prometheus/prometheus.yml -v /tmp/prometheus-data:/prometheus prom/prometheus:latest 155 | 156 | # Grafana 157 | docker run -d -p 3000:3000 --name=grafana -v /tmp/grafana-data:/var/lib/grafana grafana/grafana 158 | ``` 159 | Please have a look at the documentation of each image/application for adequate configuration examples. 160 | 161 | 162 | ### Bugs and Limitations 163 | 164 | * Pure FlashBlade REST APIs are not designed for efficiently reporting on full clients and objects quota KPIs, therefrore it is suggested to scrape the "array" metrics preferably and use the "clients" and "quotas" metrics individually and with a lower frequency than the other.. In any case, as a general rule, it is advisable to do not lower the scraping interval down to less than 30 sec. In case you experience timeout issues, you may want to increase the internal Gunicorn timeout by specifically setting the `--timeout` variable and appropriately reduce the scraping intervall as well. 165 | 166 | * By default the number of workers spawn by Gunicorn is set to 2 and this is not optimal when monitoring a relatively large amount of arrays. The suggested approach is therefore to run the exporter with a number of workers that approximately matches the number of arrays to be scraped. 167 | 168 | 169 | ### License 170 | 171 | This project is licensed under the Apache 2.0 License - see the [LICENSE.md](LICENSE.md) file for details 172 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /extra/monitoring-stack/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | ##################### Grafana Configuration Defaults ##################### 2 | # 3 | # Do not modify this file in grafana installs 4 | # 5 | 6 | # possible values : production, development 7 | app_mode = production 8 | 9 | # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty 10 | instance_name = ${HOSTNAME} 11 | 12 | #################################### Paths ############################### 13 | [paths] 14 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) 15 | data = data 16 | 17 | # Temporary files in `data` directory older than given duration will be removed 18 | temp_data_lifetime = 24h 19 | 20 | # Directory where grafana can store logs 21 | logs = data/log 22 | 23 | # Directory where grafana will automatically scan and look for plugins 24 | plugins = data/plugins 25 | 26 | # folder that contains provisioning config files that grafana will apply on startup and while running. 27 | provisioning = conf/provisioning 28 | 29 | #################################### Server ############################## 30 | [server] 31 | # Protocol (http, https, h2, socket) 32 | protocol = http 33 | 34 | # The ip address to bind to, empty will bind to all interfaces 35 | http_addr = 36 | 37 | # The http port to use 38 | http_port = 3000 39 | 40 | # The public facing domain name used to access grafana from a browser 41 | domain = localhost 42 | 43 | # Redirect to correct domain if host header does not match domain 44 | # Prevents DNS rebinding attacks 45 | enforce_domain = false 46 | 47 | # The full public facing url 48 | root_url = %(protocol)s://%(domain)s:%(http_port)s/ 49 | 50 | # Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons. 51 | serve_from_sub_path = false 52 | 53 | # Log web requests 54 | router_logging = false 55 | 56 | # the path relative working path 57 | static_root_path = public 58 | 59 | # enable gzip 60 | enable_gzip = false 61 | 62 | # https certs & key file 63 | cert_file = 64 | cert_key = 65 | 66 | # Unix socket path 67 | socket = /tmp/grafana.sock 68 | 69 | #################################### Database ############################ 70 | [database] 71 | # You can configure the database connection by specifying type, host, name, user and password 72 | # as separate properties or as on string using the url property. 73 | 74 | # Either "mysql", "postgres" or "sqlite3", it's your choice 75 | type = sqlite3 76 | host = 127.0.0.1:3306 77 | name = grafana 78 | user = root 79 | # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" 80 | password = 81 | # Use either URL or the previous fields to configure the database 82 | # Example: mysql://user:secret@host:port/database 83 | url = 84 | 85 | # Max idle conn setting default is 2 86 | max_idle_conn = 2 87 | 88 | # Max conn setting default is 0 (mean not set) 89 | max_open_conn = 90 | 91 | # Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours) 92 | conn_max_lifetime = 14400 93 | 94 | # Set to true to log the sql calls and execution times. 95 | log_queries = 96 | 97 | # For "postgres", use either "disable", "require" or "verify-full" 98 | # For "mysql", use either "true", "false", or "skip-verify". 99 | ssl_mode = disable 100 | 101 | ca_cert_path = 102 | client_key_path = 103 | client_cert_path = 104 | server_cert_name = 105 | 106 | # For "sqlite3" only, path relative to data_path setting 107 | path = grafana.db 108 | 109 | # For "sqlite3" only. cache mode setting used for connecting to the database 110 | cache_mode = private 111 | 112 | #################################### Cache server ############################# 113 | [remote_cache] 114 | # Either "redis", "memcached" or "database" default is "database" 115 | type = database 116 | 117 | # cache connectionstring options 118 | # database: will use Grafana primary database. 119 | # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'. 120 | # memcache: 127.0.0.1:11211 121 | connstr = 122 | 123 | #################################### Data proxy ########################### 124 | [dataproxy] 125 | 126 | # This enables data proxy logging, default is false 127 | logging = false 128 | 129 | # How long the data proxy should wait before timing out default is 30 (seconds) 130 | timeout = 30 131 | 132 | # If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false. 133 | send_user_header = false 134 | 135 | #################################### Analytics ########################### 136 | [analytics] 137 | # Server reporting, sends usage counters to stats.grafana.org every 24 hours. 138 | # No ip addresses are being tracked, only simple counters to track 139 | # running instances, dashboard and error counts. It is very helpful to us. 140 | # Change this option to false to disable reporting. 141 | reporting_enabled = true 142 | 143 | # Set to false to disable all checks to https://grafana.com 144 | # for new versions (grafana itself and plugins), check is used 145 | # in some UI views to notify that grafana or plugin update exists 146 | # This option does not cause any auto updates, nor send any information 147 | # only a GET request to https://grafana.com to get latest versions 148 | check_for_updates = true 149 | 150 | # Google Analytics universal tracking code, only enabled if you specify an id here 151 | google_analytics_ua_id = 152 | 153 | # Google Tag Manager ID, only enabled if you specify an id here 154 | google_tag_manager_id = 155 | 156 | #################################### Security ############################ 157 | [security] 158 | # default admin user, created on startup 159 | admin_user = admin 160 | 161 | # default admin password, can be changed before first start of grafana, or in profile settings 162 | admin_password = admin 163 | 164 | # used for signing 165 | secret_key = SW2YcwTIb9zpOOhoPsMm 166 | 167 | # disable gravatar profile images 168 | disable_gravatar = false 169 | 170 | # data source proxy whitelist (ip_or_domain:port separated by spaces) 171 | data_source_proxy_whitelist = 172 | 173 | # disable protection against brute force login attempts 174 | disable_brute_force_login_protection = false 175 | 176 | # set to true if you host Grafana behind HTTPS. default is false. 177 | cookie_secure = false 178 | 179 | # set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict" and "none" 180 | cookie_samesite = lax 181 | 182 | # set to true if you want to allow browsers to render Grafana in a ,