├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── major-release.md
│ ├── minor-release.md
│ ├── patch-release.md
│ ├── redeliver_container_image.md
│ ├── bug_report.md
│ └── feature_request.md
├── FUNDING.yml
└── workflows
│ ├── python-publish.yml
│ ├── python-package.yml
│ └── codeql-analysis.yml
├── prometheus_api_client
├── py.typed
├── exceptions.py
├── __init__.py
├── utils.py
├── metrics_list.py
├── metric_plot.py
├── metric_range_df.py
├── metric_snapshot_df.py
├── metric.py
└── prometheus_connect.py
├── tests
├── __init__.py
├── test_with_metrics.py
├── mocked_network.py
├── test_metrics_list.py
├── test_lazy_imports.py
├── test_metric_snapshot_df.py
├── test_metric_range_df.py
├── test_metric.py
└── test_prometheus_connect.py
├── requirements-core.txt
├── MANIFEST.in
├── requirements.txt
├── .stickler.yml
├── docs
├── source
│ ├── modules.rst
│ └── prometheus_api_client.rst
├── index.rst
├── Makefile
├── make.bat
└── conf.py
├── OWNERS
├── Pipfile
├── pyproject.toml
├── .coafile
├── examples
└── amazon-prometheus.py
├── LICENSE
├── .pre-commit-config.yaml
├── .gitignore
├── setup.py
├── README.md
└── CHANGELOG.md
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | @4n4nd
2 |
--------------------------------------------------------------------------------
/prometheus_api_client/py.typed:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """Init unit tests."""
2 |
--------------------------------------------------------------------------------
/requirements-core.txt:
--------------------------------------------------------------------------------
1 | requests
2 | dateparser
3 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements.txt
2 | include requirements-core.txt
3 | prune tests
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | dateparser
3 | pandas>=1.4.0
4 | numpy
5 | matplotlib
6 | httmock
7 |
--------------------------------------------------------------------------------
/.stickler.yml:
--------------------------------------------------------------------------------
1 | ---
2 | linters:
3 | black:
4 | config: ./pyproject.toml
5 | fixer: true
6 | fixers:
7 | enable: true
8 |
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | prometheus_api_client
2 | =====================
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | prometheus_api_client
8 |
--------------------------------------------------------------------------------
/OWNERS:
--------------------------------------------------------------------------------
1 | # See the OWNERS docs at https://go.k8s.io/owners
2 |
3 | approvers:
4 | - 4n4nd
5 | - hemajv
6 | - sesheta
7 | reviewers:
8 | - chauhankaranraj
9 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/major-release.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Major release
3 | about: Create a new major release
4 | title: New major release
5 | assignees: sesheta
6 | labels: bot
7 | ---
8 |
9 | Hey, Kebechet!
10 |
11 | Create a new major release, please.
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/minor-release.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Minor release
3 | about: Create a new minor release
4 | title: New minor release
5 | assignees: sesheta
6 | labels: bot
7 | ---
8 |
9 | Hey, Kebechet!
10 |
11 | Create a new minor release, please.
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/patch-release.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Patch release
3 | about: Create a new patch release
4 | title: New patch release
5 | assignees: sesheta
6 | labels: bot
7 | ---
8 |
9 | Hey, Kebechet!
10 |
11 | Create a new patch release, please.
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/redeliver_container_image.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Deliver Container Image
3 | about: build a git tag and push it as a container image to quay
4 | title: Deliver Container Image
5 | assignees: sesheta
6 | labels: bot
7 | ---
8 |
9 | Hey, AICoE-CI!
10 |
11 | Please build and deliver the following git tag:
12 |
13 | Tag: x.y.z
14 |
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | name = "pypi"
3 | url = "https://pypi.org/simple"
4 | verify_ssl = true
5 |
6 | [dev-packages]
7 | httmock = "*"
8 | pytest = "*"
9 | sphinx = "*"
10 | sphinx-rtd-theme = "*"
11 |
12 | [packages]
13 | requests = "*"
14 | dateparser = "*"
15 | pandas = ">=1.4.0"
16 | matplotlib = "*"
17 | numpy = "*"
18 |
19 | [requires]
20 | python_version = "3.13"
21 |
--------------------------------------------------------------------------------
/prometheus_api_client/exceptions.py:
--------------------------------------------------------------------------------
1 | """Project wide exception classes."""
2 |
3 |
4 | class PrometheusApiClientException(Exception):
5 | """API client exception, raises when response status code != 200."""
6 |
7 | pass
8 |
9 |
10 | class MetricValueConversionError(Exception):
11 | """Raises when we find a metric that is a string where we fail to convert it to a float."""
12 |
13 | pass
14 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 100
3 | target_version = ['py37']
4 | include = '\.pyi?$'
5 | exclude = '''
6 |
7 | (
8 | /(
9 | \.eggs # exclude a few common directories in the
10 | | \.git # root of the project
11 | | \.hg
12 | | \.mypy_cache
13 | | \.tox
14 | | \.venv
15 | | _build
16 | | buck-out
17 | | build
18 | | dist
19 | | docs
20 | )/
21 | )
22 | '''
23 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. Prometheus Client API Python documentation master file, created by
2 | sphinx-quickstart on Thu Mar 21 15:35:17 2019.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to Prometheus Client API Python's documentation!
7 | ========================================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 |
14 |
15 | Indices and tables
16 | ==================
17 |
18 | * :ref:`genindex`
19 | * :ref:`modindex`
20 | * :ref:`search`
21 |
--------------------------------------------------------------------------------
/.coafile:
--------------------------------------------------------------------------------
1 | [all]
2 | bears = LineCountBear, FilenameBear
3 | files = **.py, **.yml, **.rst, **.md
4 | ignore = **/__pycache__/**, **/__pycache__, __pycache__, __pycache__/**, **/*.pyc, *.pyc
5 | max_lines_per_file = 1000
6 | max_line_length = 120
7 |
8 | [all.python]
9 | bears = PycodestyleBear, PyDocStyleBear
10 | files = **.py
11 | language = Python
12 | editor = vim
13 | ignore = setup.py, docs/*, tests/*
14 |
15 | [all.yaml]
16 | bears = YAMLLintBear
17 | files = **.yaml, **.yml
18 | ignore = .zuul.yaml
19 | max_line_length = 120
20 |
21 | [zuul.yaml]
22 | bears = YAMLLintBear
23 | files = .zuul.yaml
24 | max_line_length = 180
25 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | labels: bug
5 | ---
6 |
7 | **Describe the bug**
8 | A clear and concise description of what the bug is.
9 |
10 | **To Reproduce**
11 | Steps to reproduce the behavior:
12 | 1. Go to '...'
13 | 2. Click on '....'
14 | 3. Scroll down to '....'
15 | 4. See error
16 |
17 | **Expected behavior**
18 | A clear and concise description of what you expected to happen.
19 |
20 | **Screenshots**
21 | If applicable, add screenshots to help explain your problem.
22 |
23 | **Additional context**
24 | Add any other context about the problem here.
25 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = _build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
20 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | labels: enhancement
5 | ---
6 |
7 | **Is your feature request related to a problem? Please describe.**
8 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
9 |
10 | **Describe the solution you'd like**
11 | A clear and concise description of what you want to happen.
12 |
13 | **Describe alternatives you've considered**
14 | A clear and concise description of any alternative solutions or features you've considered.
15 |
16 | **Additional context**
17 | Add any other context or screenshots about the feature request here.
18 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: 4n4nd # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
14 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/prometheus_api_client/__init__.py:
--------------------------------------------------------------------------------
1 | """A collection of tools to collect and manipulate prometheus metrics."""
2 |
3 | __title__ = "prometheus-connect"
4 | __version__ = "0.7.0"
5 |
6 | from .exceptions import PrometheusApiClientException, MetricValueConversionError
7 | def __getattr__(name):
8 | if name == "PrometheusConnect":
9 | from .prometheus_connect import PrometheusConnect
10 | return PrometheusConnect
11 | elif name == "Metric":
12 | from .metric import Metric
13 | return Metric
14 | elif name == "MetricsList":
15 | from .metrics_list import MetricsList
16 | return MetricsList
17 | elif name == "MetricSnapshotDataFrame":
18 | from .metric_snapshot_df import MetricSnapshotDataFrame
19 | return MetricSnapshotDataFrame
20 | elif name == "MetricRangeDataFrame":
21 | from .metric_range_df import MetricRangeDataFrame
22 | return MetricRangeDataFrame
23 | raise AttributeError(f"module {__name__} has no attribute {name}")
24 |
--------------------------------------------------------------------------------
/prometheus_api_client/utils.py:
--------------------------------------------------------------------------------
1 | """Some helpful functions used in the API."""
2 | import json
3 | import dateparser
4 |
5 |
6 | def parse_datetime(date_string: str, settings: dict = None):
7 | """Functions as a wrapper for dateparser.parse, but the default settings are set to {"DATE_ORDER": "YMD"}."""
8 | settings = settings or {"DATE_ORDER": "YMD"}
9 | return dateparser.parse(str(date_string), settings=settings)
10 |
11 |
12 | def parse_timedelta(time_a: str = "now", time_b: str = "1d"):
13 | """Return timedelta for time_a - time_b."""
14 | return parse_datetime(time_a) - parse_datetime(time_b)
15 |
16 |
17 | def pretty_print_metric(metric_data):
18 | """
19 | Pretty print the metric data downloaded using class PrometheusConnect.
20 |
21 | :param metric_data: (list) This is the metric data list returned from methods
22 | get_metric_range_data and get_current_metric_value
23 | """
24 | data = metric_data
25 | for metric in data:
26 | print(json.dumps(metric, indent=4, sort_keys=True))
27 |
--------------------------------------------------------------------------------
/examples/amazon-prometheus.py:
--------------------------------------------------------------------------------
1 | from prometheus_api_client import PrometheusConnect
2 | from prometheus_api_client.utils import parse_datetime
3 | from datetime import timedelta
4 |
5 | from requests_auth_aws_sigv4 import AWSSigV4 # pip install requests-auth-aws-sigv4
6 | # Optionally install boto3 if you'd like to use AWS CLI/SDK credentials
7 |
8 | region = 'us-east-1'
9 | workspace_id = 'ws-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
10 |
11 | auth = AWSSigV4('aps', region=region)
12 |
13 | prom = PrometheusConnect(url=f"https://aps-workspaces.{region}.amazonaws.com/workspaces/{workspace_id}", disable_ssl=False, auth=auth)
14 |
15 | # Print all metrics
16 | metrics = prom.all_metrics()
17 | print(metrics)
18 |
19 | # Get 'prometheus_ready' metric for the last 15 minutes
20 | start_time = parse_datetime("15m")
21 | end_time = parse_datetime("now")
22 | chunk_size = timedelta(minutes=15)
23 |
24 | metric_data = prom.get_metric_range_data(
25 | metric_name='prometheus_ready',
26 | start_time=start_time,
27 | end_time=end_time,
28 | chunk_size=chunk_size,
29 | )
30 |
31 | print(metric_data[0]['values'])
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 4n4nd
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build
35 | - name: Publish package
36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37 | with:
38 | user: __token__
39 | password: ${{ secrets.PYPI_API_TOKEN }}
40 |
--------------------------------------------------------------------------------
/tests/test_with_metrics.py:
--------------------------------------------------------------------------------
1 | """Common functions used for Metrics Class."""
2 |
3 | import json
4 | import os
5 |
6 |
7 | class TestWithMetrics: # noqa D101
8 | class Common(object): # noqa D106
9 | def load_metrics(self):
10 | """Read metrics stored as jsons in './tests/metrics'."""
11 | self.raw_metrics_list = list()
12 | self.raw_metrics_labels = list()
13 | files = list()
14 |
15 | for (dir_path, _, file_names) in os.walk("./tests/metrics"):
16 | files.extend([(os.path.join(dir_path, f_name)) for f_name in file_names])
17 |
18 | # Files with metrics need to be loaded in order by timestamp in their names.
19 | # Several tests depend on order of these files.
20 | for file_path in sorted(files):
21 | with open(file_path) as json_fd:
22 | metric_jsons = json.load(json_fd)
23 | self.raw_metrics_list.append(metric_jsons)
24 |
25 | # save label configs
26 | labels = set()
27 | for i in metric_jsons:
28 | labels.update(set(i["metric"].keys()))
29 | self.raw_metrics_labels.append(labels)
30 |
31 | def test_setup(self):
32 | """Check if setup was done correctly."""
33 | self.assertEqual(8, len(self.raw_metrics_list), "incorrect number json files read")
34 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [ "master" ]
9 | pull_request:
10 | branches: [ "master" ]
11 |
12 | jobs:
13 | build:
14 |
15 | runs-on: ubuntu-latest
16 | strategy:
17 | fail-fast: false
18 | matrix:
19 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
20 |
21 | steps:
22 | - uses: actions/checkout@v3
23 | - name: Set up Python ${{ matrix.python-version }}
24 | uses: actions/setup-python@v3
25 | with:
26 | python-version: ${{ matrix.python-version }}
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | python -m pip install flake8 pytest
31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
32 | - name: Lint with flake8
33 | run: |
34 | # stop the build if there are Python syntax errors or undefined names
35 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
36 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
37 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
38 | - name: Test with pytest
39 | run: |
40 | PROM_URL="https://demo.promlabs.com/" pytest
41 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | repos:
3 | - repo: https://github.com/Lucas-C/pre-commit-hooks
4 | rev: v1.1.1
5 | hooks:
6 | - id: remove-tabs
7 |
8 | - repo: https://github.com/pre-commit/pre-commit-hooks
9 | rev: v2.0.0
10 | hooks:
11 | - id: trailing-whitespace
12 | - id: check-merge-conflict
13 | - id: end-of-file-fixer
14 | - id: check-added-large-files
15 | - id: check-byte-order-marker
16 | - id: check-case-conflict
17 | - id: check-docstring-first
18 | - id: check-json
19 | - id: check-symlinks
20 | - id: detect-private-key
21 | - id: check-ast
22 | - id: debug-statements
23 |
24 | - repo: https://github.com/pycqa/pydocstyle.git
25 | rev: 4.0.1
26 | hooks:
27 | - id: pydocstyle
28 |
29 | - repo: https://github.com/pre-commit/pre-commit-hooks
30 | rev: v2.3.0
31 | hooks:
32 | - id: check-toml
33 | - id: check-yaml
34 | - id: end-of-file-fixer
35 | - id: trailing-whitespace
36 |
37 | - repo: https://github.com/pre-commit/mirrors-mypy
38 | rev: v0.770
39 | hooks:
40 | - id: mypy
41 | exclude: '^(docs|tasks|tests)|setup\.py'
42 | args: [--ignore-missing-imports]
43 |
44 | - repo: https://github.com/psf/black
45 | rev: 19.10b0
46 | hooks:
47 | - id: black
48 |
49 | - repo: https://gitlab.com/PyCQA/flake8
50 | rev: '3.7.8'
51 | hooks:
52 | - id: flake8
53 | additional_dependencies: ['pep8-naming']
54 | # Ignore all format-related checks as Black takes care of those.
55 | args: ['--ignore', 'E2,W5', '--select', 'E,W,F,N', '--max-line-length=120']
56 |
--------------------------------------------------------------------------------
/docs/source/prometheus_api_client.rst:
--------------------------------------------------------------------------------
1 | prometheus\_api\_client package
2 | ===============================
3 |
4 | Submodules
5 | ----------
6 |
7 | prometheus\_api\_client.metric module
8 | -------------------------------------
9 |
10 | .. automodule:: prometheus_api_client.metric
11 | :members: Metric
12 | :special-members: __add__, __eq__, __str__
13 | :undoc-members:
14 | :show-inheritance:
15 |
16 | prometheus\_api\_client.metrics\_list module
17 | --------------------------------------------
18 |
19 | .. automodule:: prometheus_api_client.metrics_list
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
24 | prometheus\_api\_client.prometheus\_connect module
25 | --------------------------------------------------
26 |
27 | .. automodule:: prometheus_api_client.prometheus_connect
28 | :members:
29 | :undoc-members:
30 | :show-inheritance:
31 |
32 | prometheus\_api\_client.exceptions module
33 | --------------------------------------------------
34 |
35 | .. automodule:: prometheus_api_client.exceptions
36 | :members:
37 | :undoc-members:
38 | :show-inheritance:
39 |
40 | prometheus\_api\_client.metric\_snapshot\_df module
41 | ---------------------------------------------------
42 |
43 | .. automodule:: prometheus_api_client.metric_snapshot_df
44 | :members:
45 | :undoc-members:
46 | :show-inheritance:
47 |
48 | prometheus\_api\_client.metric\_range\_df module
49 | ---------------------------------------------------
50 |
51 | .. automodule:: prometheus_api_client.metric_range_df
52 | :members:
53 | :undoc-members:
54 | :show-inheritance:
55 |
56 | Module contents
57 | ---------------
58 |
59 | .. automodule:: prometheus_api_client
60 | :members:
61 | :undoc-members:
62 | :show-inheritance:
63 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | # pycharm
107 | .idea/
108 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | """Setup for Prometheus Api Client module."""
2 | import os
3 | import setuptools
4 |
5 | with open("README.md", "r") as fh:
6 | long_description = fh.read()
7 |
8 |
9 | def get_install_requires():
10 | """Get core requirements from requirements-core.txt."""
11 | with open("requirements-core.txt", "r") as requirements_file:
12 | res = requirements_file.readlines()
13 | return [req.split(" ", maxsplit=1)[0] for req in res if req]
14 |
15 |
16 | def get_version():
17 | """Get package version."""
18 | with open(os.path.join("prometheus_api_client", "__init__.py")) as f:
19 | content = f.readlines()
20 |
21 | for line in content:
22 | if line.startswith("__version__ ="):
23 | # dirty, remove trailing and leading chars
24 | return line.split(" = ")[1][1:-2]
25 | raise ValueError("No version identifier found")
26 |
27 |
28 | VERSION = get_version()
29 | setuptools.setup(
30 | name="prometheus-api-client",
31 | version=VERSION,
32 | author="Anand Sanmukhani",
33 | author_email="asanmukhani@microsoft.com",
34 | description="A small python api to collect data from prometheus",
35 | long_description=long_description,
36 | long_description_content_type="text/markdown",
37 | url="https://github.com/4n4nd/prometheus-api-client-python",
38 | install_requires=get_install_requires(),
39 | extras_require={
40 | "dataframe": ["pandas>=1.4.0"],
41 | "numpy": ["numpy"],
42 | "plot": ["matplotlib"],
43 | "analytics": ["numpy"],
44 | "all": ["pandas>=1.4.0", "numpy", "matplotlib"],
45 | },
46 | packages=setuptools.find_packages(),
47 | package_data={"prometheus-api-client": ["py.typed"]},
48 | tests_require=["httmock"],
49 | classifiers=[
50 | "Programming Language :: Python :: 3.13",
51 | "License :: OSI Approved :: MIT License",
52 | "Operating System :: OS Independent",
53 | ],
54 | )
55 |
--------------------------------------------------------------------------------
/prometheus_api_client/metrics_list.py:
--------------------------------------------------------------------------------
1 | """A list of Metric objects."""
2 |
3 | from .metric import Metric
4 |
5 |
6 | class MetricsList(list):
7 | """A Class to initialize a list of Metric objects at once.
8 |
9 | :param metric_data_list: (list|json) This is an individual metric or list of metrics received
10 | from prometheus as a result of a promql query.
11 |
12 | Example Usage:
13 | .. code-block:: python
14 |
15 | prom = PrometheusConnect()
16 | my_label_config = {'cluster': 'my_cluster_id', 'label_2': 'label_2_value'}
17 | metric_data = prom.get_metric_range_data(metric_name='up', label_config=my_label_config)
18 |
19 | metric_object_list = MetricsList(metric_data) # metric_object_list will be initialized as
20 | # a list of Metric objects for all the
21 | # metrics downloaded using get_metric query
22 |
23 | """
24 |
25 | def __init__(self, metric_data_list):
26 | """Class MetricsList constructor."""
27 | if not isinstance(metric_data_list, list):
28 | metric_data_list = [metric_data_list]
29 |
30 | metric_object_list = []
31 |
32 | def add_metric_to_object_list(metric):
33 | metric_object = Metric(metric)
34 | if metric_object in metric_object_list:
35 | metric_object_list[metric_object_list.index(metric_object)] += metric_object
36 | else:
37 | metric_object_list.append(metric_object)
38 |
39 | for i in metric_data_list:
40 | # If it is a list of lists (for example: while reading from multiple json files)
41 | if isinstance(i, list):
42 | for metric in i:
43 | add_metric_to_object_list(metric)
44 | else:
45 | add_metric_to_object_list(i)
46 |
47 | super(MetricsList, self).__init__(metric_object_list)
48 |
--------------------------------------------------------------------------------
/tests/mocked_network.py:
--------------------------------------------------------------------------------
1 | """Test module for Base Mock Network."""
2 |
3 | from urllib.parse import urlparse
4 |
5 | from unittest import TestCase
6 |
7 | from httmock import HTTMock, all_requests, response, urlmatch
8 |
9 |
10 | def mock_response(
11 | content,
12 | url=None,
13 | path="",
14 | headers=None,
15 | response_url=None,
16 | status_code=200,
17 | cookies=None,
18 | func=None,
19 | ):
20 | """Universal handler for specify mocks inplace."""
21 | if func is None:
22 |
23 | def mocked(url, request):
24 | mock = response(
25 | status_code=status_code, content=content, request=request, headers=headers
26 | )
27 | if cookies:
28 | mock.cookies = cookies
29 | mock.url = response_url if response_url else url
30 | return mock
31 |
32 | else:
33 | mocked = func
34 |
35 | if url:
36 | parsed = urlparse(url)
37 | return urlmatch(netloc=parsed.netloc, path=parsed.path)(func=mocked)
38 | elif path:
39 | return urlmatch(path=path)(func=mocked)
40 | else:
41 | return all_requests(func=mocked)
42 |
43 |
44 | class ResponseMock(HTTMock): # noqa D101
45 | called = False
46 | call_count = 0
47 |
48 | def intercept(self, request, **kwargs): # noqa D102
49 | resp = super(ResponseMock, self).intercept(request, **kwargs)
50 | if resp is not None and self.log_requests:
51 | self.called = True
52 | self.call_count += 1
53 | self.requests.append(request)
54 | return resp
55 |
56 | def __init__(self, *args, **kwargs): # noqa D102
57 | log_requests = kwargs.pop("log_requests", True)
58 | handler = mock_response(*args, **kwargs)
59 | self.log_requests = log_requests
60 | self.requests = []
61 | super().__init__(handler)
62 |
63 |
64 | class BaseMockedNetworkTestcase(TestCase): # noqa D101
65 | def run(self, result=None): # noqa D102
66 | with ResponseMock("BOOM!", status_code=403):
67 | return super().run(result)
68 |
69 | @property
70 | def mock_response(self): # noqa D102
71 | return ResponseMock
72 |
--------------------------------------------------------------------------------
/prometheus_api_client/metric_plot.py:
--------------------------------------------------------------------------------
1 | """plot code for metric class."""
2 |
3 | # This only gets called if there's a plot() call
4 | # This speeds up load time for all the non plot() users
5 | #
6 | # Only loads matplotlib etc during __init__
7 | #
8 |
9 | class MetricPlot:
10 | r"""
11 | A Class for `MetricPlot` object.
12 |
13 | Internal use only at present
14 |
15 | """
16 |
17 | def __init__(self, *args, **kwargs):
18 | """Functions as a Constructor for the Metric object."""
19 | try:
20 | import matplotlib.pyplot as plt
21 | from pandas.plotting import register_matplotlib_converters
22 |
23 | register_matplotlib_converters()
24 | except ImportError as exce: # noqa F841
25 | raise ImportError("matplotlib was not found")
26 |
27 | # One graph with potentially N lines - if plot() is called twice
28 | self._plt = plt
29 | self._fig, self._axis = self._plt.subplots(*args, **kwargs)
30 |
31 | def plot_date(self, metric):
32 | """Plot a very simple line graph for the metric time-series."""
33 |
34 | # If we made it here, then we know matplotlib is installed and available
35 | self._axis.plot_date(metric.metric_values.ds, metric.metric_values.y,
36 | linestyle="solid",
37 | label=str(metric.metric_name),
38 | )
39 | self._fig.autofmt_xdate()
40 | # These are provided for documentation reasons only - it's presumptuous for this code to call them
41 | # self._axis.set_xlabel('Date/Time')
42 | # self._axis.set_ylabel('Metric')
43 | # self._axis.set_title('Prometheus')
44 | if len(self._axis.lines) > 1:
45 | # We show a legend (or update the legend) if there's more than line on the plot
46 | self._axis.legend()
47 |
48 | def show(self, block=None):
49 | """convience show() call."""
50 | self._plt.show(block=block)
51 |
52 | @property
53 | def plt(self):
54 | """ pyplot value for present plotting """
55 | return self._plt
56 |
57 | @property
58 | def axis(self):
59 | """ Axis value for present plotting """
60 | return self._axis
61 |
62 | @property
63 | def fig(self):
64 | """ Figure value for present plotting """
65 | return self._fig
66 |
--------------------------------------------------------------------------------
/tests/test_metrics_list.py:
--------------------------------------------------------------------------------
1 | """Unit Tests for MetricsList."""
2 | import unittest
3 | import datetime
4 | from prometheus_api_client import MetricsList
5 | from .test_with_metrics import TestWithMetrics
6 |
7 |
8 | class TestMetricsList(unittest.TestCase, TestWithMetrics.Common):
9 | """unit tests for MetricsList Class."""
10 |
11 | def setUp(self):
12 | """Load metrics stored as jsons."""
13 | self.load_metrics()
14 |
15 | def test_init(self):
16 | """Test if metrics initialized in the list are correct."""
17 | self.assertEqual(
18 | 9, # manually check the number of unique metric time-series
19 | len(MetricsList(self.raw_metrics_list)),
20 | "incorrect number of unique metric timeseries",
21 | )
22 |
23 | def test_init_single_metric(self): # noqa D102
24 | self.assertEqual(
25 | 1,
26 | len(MetricsList(self.raw_metrics_list[0][0])),
27 | "incorrect number of Metric objects initialized for a raw metric not in a list",
28 | )
29 | self.assertEqual(
30 | 1,
31 | len(MetricsList([self.raw_metrics_list[0][0]])),
32 | "incorrect number of Metric objects initialized for a single metric list",
33 | )
34 |
35 | def test_unique_metric_combination(self): # noqa D102
36 | start_time = datetime.datetime(2019, 7, 28, 10, 0)
37 | start_time_plus_1m = datetime.datetime(2019, 7, 28, 10, 1)
38 | end_time = datetime.datetime(2019, 7, 30, 10, 0)
39 | end_time_minus_1m = datetime.datetime(2019, 7, 30, 9, 59)
40 |
41 | self.assertTrue(
42 | MetricsList(self.raw_metrics_list)[0].start_time > start_time,
43 | "Combined metric start time incorrect",
44 | )
45 | self.assertTrue(
46 | MetricsList(self.raw_metrics_list)[0].start_time < start_time_plus_1m,
47 | "Combined metric start time incorrect",
48 | )
49 | self.assertTrue(
50 | MetricsList(self.raw_metrics_list)[0].end_time < end_time,
51 | "Combined metric end time incorrect",
52 | )
53 | self.assertTrue(
54 | MetricsList(self.raw_metrics_list)[0].end_time > end_time_minus_1m,
55 | "Combined metric end time incorrect",
56 | )
57 |
58 |
59 | if __name__ == "__main__":
60 | unittest.main()
61 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ "master", v0.0.1, v0.2.1, v0.4.1, v0.4.2, v0.5.0 ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ "master" ]
20 | schedule:
21 | - cron: '36 4 * * 4'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 | permissions:
28 | actions: read
29 | contents: read
30 | security-events: write
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | language: [ 'python' ]
36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
37 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
38 |
39 | steps:
40 | - name: Checkout repository
41 | uses: actions/checkout@v3
42 |
43 | # Initializes the CodeQL tools for scanning.
44 | - name: Initialize CodeQL
45 | uses: github/codeql-action/init@v2
46 | with:
47 | languages: ${{ matrix.language }}
48 | # If you wish to specify custom queries, you can do so here or in a config file.
49 | # By default, queries listed here will override any specified in a config file.
50 | # Prefix the list here with "+" to use these queries and those in the config file.
51 |
52 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
53 | # queries: security-extended,security-and-quality
54 |
55 |
56 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
57 | # If this step fails, then you should remove it and run the build manually (see below)
58 | - name: Autobuild
59 | uses: github/codeql-action/autobuild@v2
60 |
61 | # ℹ️ Command-line programs to run using the OS shell.
62 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
63 |
64 | # If the Autobuild fails above, remove it and uncomment the following three lines.
65 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
66 |
67 | # - run: |
68 | # echo "Run, Build Application using script"
69 | # ./location_of_script_within_repo/buildscript.sh
70 |
71 | - name: Perform CodeQL Analysis
72 | uses: github/codeql-action/analyze@v2
73 |
--------------------------------------------------------------------------------
/tests/test_lazy_imports.py:
--------------------------------------------------------------------------------
1 | """Test lazy imports to ensure pandas/matplotlib are not loaded unnecessarily."""
2 | import unittest
3 | import sys
4 | import subprocess
5 |
6 |
7 | class TestLazyImports(unittest.TestCase):
8 | """Test that PrometheusConnect can be imported without loading heavy dependencies."""
9 |
10 | def _run_in_subprocess(self, code, fail_map):
11 | """Run code in a subprocess and check exit codes against fail_map.
12 |
13 | Args:
14 | code: Python code to execute in subprocess
15 | fail_map: Dictionary mapping exit codes to error messages
16 |
17 | Raises:
18 | AssertionError: If subprocess exits with a code in fail_map or any non-zero code
19 | """
20 | result = subprocess.run(
21 | [sys.executable, '-c', code],
22 | capture_output=True,
23 | text=True
24 | )
25 |
26 | if result.returncode in fail_map:
27 | self.fail(fail_map[result.returncode])
28 | elif result.returncode != 0:
29 | # Include both stdout and stderr for better debugging
30 | output = []
31 | if result.stdout:
32 | output.append(f"stdout: {result.stdout}")
33 | if result.stderr:
34 | output.append(f"stderr: {result.stderr}")
35 | output_str = "\n".join(output) if output else "no output"
36 | self.fail(f"Subprocess failed with code {result.returncode}: {output_str}")
37 |
38 | def test_prometheus_connect_import_without_pandas_matplotlib_numpy(self):
39 | """Test that importing PrometheusConnect doesn't load pandas, matplotlib, or numpy."""
40 | # Run in a subprocess to avoid affecting other tests
41 | code = """
42 | import sys
43 | from prometheus_api_client import PrometheusConnect
44 |
45 | # Check that pandas, matplotlib, and numpy are not loaded
46 | pandas_loaded = any(m == 'pandas' or m.startswith('pandas.') for m in sys.modules.keys())
47 | matplotlib_loaded = any(m == 'matplotlib' or m.startswith('matplotlib.') for m in sys.modules.keys())
48 | numpy_loaded = any(m == 'numpy' or m.startswith('numpy.') for m in sys.modules.keys())
49 |
50 | if pandas_loaded:
51 | sys.exit(1)
52 | if matplotlib_loaded:
53 | sys.exit(2)
54 | if numpy_loaded:
55 | sys.exit(3)
56 | sys.exit(0)
57 | """
58 | fail_map = {
59 | 1: "pandas should not be loaded when importing PrometheusConnect",
60 | 2: "matplotlib should not be loaded when importing PrometheusConnect",
61 | 3: "numpy should not be loaded when importing PrometheusConnect",
62 | }
63 | self._run_in_subprocess(code, fail_map)
64 |
65 | def test_prometheus_connect_instantiation_without_numpy(self):
66 | """Test that PrometheusConnect can be instantiated without loading numpy."""
67 | # Run in a subprocess to avoid affecting other tests
68 | code = """
69 | import sys
70 | from prometheus_api_client import PrometheusConnect
71 |
72 | pc = PrometheusConnect(url='http://test.local:9090')
73 |
74 | # Check that numpy is still not loaded after instantiation
75 | numpy_loaded = any(m == 'numpy' or m.startswith('numpy.') for m in sys.modules.keys())
76 |
77 | if numpy_loaded:
78 | sys.exit(1)
79 | if pc is None:
80 | sys.exit(2)
81 | sys.exit(0)
82 | """
83 | fail_map = {
84 | 1: "numpy should not be loaded when instantiating PrometheusConnect",
85 | 2: "PrometheusConnect should be instantiated successfully",
86 | }
87 | self._run_in_subprocess(code, fail_map)
88 |
89 | def test_metric_import_loads_pandas(self):
90 | """Test that importing Metric does load pandas (expected behavior)."""
91 | # This test doesn't remove modules, so it won't cause reload issues
92 | from prometheus_api_client import Metric
93 |
94 | # Check that pandas is loaded (this is expected for Metric)
95 | pandas_loaded = any(m == 'pandas' or m.startswith('pandas.') for m in sys.modules.keys())
96 | self.assertTrue(pandas_loaded, "pandas should be loaded when importing Metric")
97 |
98 |
99 | if __name__ == '__main__':
100 | unittest.main()
101 |
--------------------------------------------------------------------------------
/prometheus_api_client/metric_range_df.py:
--------------------------------------------------------------------------------
1 | """A pandas.DataFrame subclass for Prometheus range vector responses."""
2 | try:
3 | from pandas import DataFrame, to_datetime
4 | from pandas._typing import Axes, Dtype
5 | except ImportError as e:
6 | raise ImportError(
7 | "Pandas is required for MetricRangeDataFrame class. "
8 | "Please install it with: pip install prometheus-api-client[dataframe] "
9 | "or pip install prometheus-api-client[all]"
10 | ) from e
11 |
12 | from typing import Optional, Sequence
13 |
14 | from prometheus_api_client.exceptions import MetricValueConversionError
15 |
16 |
17 | class MetricRangeDataFrame(DataFrame):
18 | """Subclass to format and represent Prometheus query response as pandas.DataFrame.
19 |
20 | Assumes response is either a json or sequence of jsons.
21 |
22 | This class should be used specifically to instantiate a query response,
23 | where the query response has several timestamp values per series.
24 | That is, a range vector is expected.
25 | If the data is an instant vector, use MetricSnapshotDataFrame instead.
26 |
27 | Some argument descriptions in this docstring were copied from pandas.core.frame.DataFrame.
28 |
29 | :param data: (list|json) A single metric (json with keys "metric" and "values"/"value")
30 | or list of such metrics received from Prometheus as a response to query
31 | :param index: (pandas.Index|array-like) Index to use for resulting dataframe. Will default to
32 | pandas.RangeIndex if no indexing information part of input data and no index provided.
33 | :param columns: (pandas.Index|array-like) Column labels to use for resulting dataframe. Will
34 | default to list of labels + "timestamp" + "value" if not provided.
35 | :param dtype: (dtype) default None. Data type to force. Only a single dtype is allowed. If None, infer.
36 | :param copy: (bool) default False. Copy data from inputs. Only affects DataFrame / 2d ndarray input.
37 | :param ts_as_datetime: (bool) default True. Convert the timestamps returned by prometheus
38 | from float64 (unix time) to pandas datetime objects. This results in a pd.DatetimeIndex
39 | as the dtype of the index of the returned dataframe, instead of pd.Float64Index
40 |
41 | Example Usage:
42 | .. code-block:: python
43 |
44 | prom = PrometheusConnect()
45 | metric_data = prom.get_current_metric_value(metric_name='up', label_config=my_label_config)
46 | metric_df = MetricRangeDataFrame(metric_data)
47 | metric_df.head()
48 | '''
49 | +------------+------------+-----------------+--------------------+-------+
50 | | | __name__ | cluster | label_2 | value |
51 | +-------------------------+-----------------+--------------------+-------+
52 | | timestamp | | | | |
53 | +============+============+=================+====================+=======+
54 | | 1577836800 | __up__ | cluster_id_0 | label_2_value_2 | 0 |
55 | +-------------------------+-----------------+--------------------+-------+
56 | | 1577836801 | __up__ | cluster_id_1 | label_2_value_3 | 1 |
57 | +-------------------------+-----------------+------------=-------+-------+
58 | '''
59 | """
60 |
61 | def __init__(
62 | self,
63 | data=None,
64 | index: Optional[Axes] = None,
65 | columns: Optional[Axes] = None,
66 | dtype: Optional[Dtype] = None,
67 | copy: bool = False,
68 | ts_as_datetime: bool = True,
69 | ):
70 | """Functions as a constructor for MetricRangeDataFrame class."""
71 | if data is not None:
72 | # if just a single json instead of list/set/other sequence of jsons,
73 | # treat as list with single entry
74 | if not isinstance(data, Sequence):
75 | data = [data]
76 |
77 | row_data = []
78 | for v in data:
79 | if "value" in v:
80 | raise TypeError(
81 | "data must be a range vector. Expected range vector, got instant vector"
82 | )
83 | for t in v["values"]:
84 | metric_value = t[1]
85 | if isinstance(metric_value, str):
86 | try:
87 | metric_value = float(metric_value)
88 | except (TypeError, ValueError):
89 | raise MetricValueConversionError(
90 | "Converting string metric value to float failed."
91 | )
92 | row_data.append({**v["metric"], "timestamp": t[0], "value": metric_value})
93 |
94 | # init df normally now
95 | super(MetricRangeDataFrame, self).__init__(
96 | data=row_data, index=index, columns=columns, dtype=dtype, copy=copy
97 | )
98 |
99 | # convert to DateTime type instead of Float64
100 | if ts_as_datetime:
101 | self["timestamp"] = to_datetime(self["timestamp"], unit="s")
102 |
103 | self.set_index(["timestamp"], inplace=True)
104 |
--------------------------------------------------------------------------------
/tests/test_metric_snapshot_df.py:
--------------------------------------------------------------------------------
1 | """Unit Tests for MetricSnapshotDataFrame."""
2 | import unittest
3 | import json
4 | import os
5 |
6 | import pytest
7 |
8 | from prometheus_api_client import MetricSnapshotDataFrame
9 | from prometheus_api_client.exceptions import MetricValueConversionError
10 | from pandas.api.types import is_datetime64_any_dtype as is_dtype_datetime
11 |
12 |
13 | class TestMetricSnapshotDataFrame(unittest.TestCase): # noqa D101
14 | def setUp(self):
15 | """Read metrics stored as jsons in './tests/metrics'."""
16 | self.raw_metrics_list = list()
17 | self.raw_metrics_labels = list()
18 | for (dir_path, _, file_names) in os.walk("./tests/metrics"):
19 | for fname in file_names:
20 | with open(os.path.join(dir_path, fname), "rb") as f:
21 | metric_jsons = json.load(f)
22 |
23 | # save json list
24 | self.raw_metrics_list.extend([metric_jsons])
25 |
26 | # save label configs
27 | labels = set()
28 | for i in metric_jsons:
29 | labels.update(set(i["metric"].keys()))
30 | self.raw_metrics_labels.append(labels)
31 |
32 | def test_setup(self):
33 | """Check if setup was done correctly."""
34 | self.assertEqual(
35 | 8, len(self.raw_metrics_list), "incorrect number json files read (incorrect test setup)"
36 | )
37 |
38 | def test_init_shape(self):
39 | """Test if dataframe initialized is of correct shape."""
40 | # check shape
41 | # each json file contains 9 entries, 4 labels
42 | for current_metric_list in self.raw_metrics_list:
43 | self.assertEqual(
44 | (9, 6), # shape[1] = 4xlabels + timestamp + value
45 | MetricSnapshotDataFrame(current_metric_list).shape,
46 | "incorrect dataframe shape",
47 | )
48 |
49 | def test_init_columns(self):
50 | """Test if dataframe initialized has correct columns."""
51 | for curr_metric_labels, curr_metric_list in zip(
52 | self.raw_metrics_labels, self.raw_metrics_list
53 | ):
54 | self.assertEqual(
55 | curr_metric_labels.union({"timestamp", "value"}),
56 | set(MetricSnapshotDataFrame(curr_metric_list).columns),
57 | "incorrect dataframe columns",
58 | )
59 |
60 | def test_timestamp_dtype_conversion(self):
61 | """Test if the timestamp in the dataframe initialized has correct dtype."""
62 | for curr_metric_list in self.raw_metrics_list:
63 | # timestamp column should be datetime type by default
64 | curr_df = MetricSnapshotDataFrame(curr_metric_list,)
65 | self.assertTrue(
66 | is_dtype_datetime(curr_df["timestamp"]),
67 | "incorrect dtype for timestamp column (expected datetime dtype)",
68 | )
69 |
70 | # if explicitly set to false, conversion to dt shouldnt take place
71 | curr_df = MetricSnapshotDataFrame(curr_metric_list, ts_as_datetime=False,)
72 | self.assertFalse(
73 | is_dtype_datetime(curr_df["timestamp"]),
74 | "incorrect dtype for timestamp column (expected non-datetime dtype)",
75 | )
76 |
77 | def test_init_single_metric(self):
78 | """
79 | Test if dataframe initialized is of correct shape.
80 |
81 | 1. json object is passed as data
82 | 2. list with single json object is passed as data
83 | """
84 | # check shape when single json passed
85 | self.assertEqual(
86 | (1, 6),
87 | MetricSnapshotDataFrame(self.raw_metrics_list[0][0]).shape,
88 | "incorrect dataframe shape when initialized with json",
89 | )
90 | # check shape when list with single json passed
91 | self.assertEqual(
92 | (1, 6),
93 | MetricSnapshotDataFrame([self.raw_metrics_list[0][0]]).shape,
94 | "incorrect dataframe shape when initialized with single json list",
95 | )
96 |
97 | def test_init_multiple_metrics(self):
98 | """Ensures metric values provided as strings are properly cast to a numeric value (in this case, a float)."""
99 | raw_data = [
100 | {"metric": {"fake": "data",}, "value": [1627485628.789, "26.82068965517243"],},
101 | {"metric": {"fake": "data",}, "value": [1627485628.789, "26.82068965517243"],},
102 | ]
103 |
104 | test_df = MetricSnapshotDataFrame(data=raw_data)
105 |
106 | self.assertTrue(isinstance(test_df["value"][0], float))
107 |
108 | def test_init_invalid_float_error(self):
109 | """Ensures metric values provided as strings are properly cast to a numeric value (in this case, a float)."""
110 | raw_data = [
111 | {
112 | "metric": {"fake": "data",},
113 | "value": [1627485628.789, "26.8206896551724326.82068965517243"],
114 | },
115 | ]
116 |
117 | with pytest.raises(MetricValueConversionError):
118 | MetricSnapshotDataFrame(data=raw_data)
119 |
120 |
121 | if __name__ == "__main__":
122 | unittest.main()
123 |
--------------------------------------------------------------------------------
/tests/test_metric_range_df.py:
--------------------------------------------------------------------------------
1 | """Unit Tests for MetricRangeDataFrame."""
2 | import unittest
3 | import pandas as pd
4 | import pytest
5 |
6 | from prometheus_api_client import MetricRangeDataFrame
7 | from prometheus_api_client.exceptions import MetricValueConversionError
8 | from .test_with_metrics import TestWithMetrics
9 |
10 |
11 | class TestMetricRangeDataFrame(unittest.TestCase, TestWithMetrics.Common): # noqa D101
12 | def setUp(self):
13 | """Load metrics stored as jsons."""
14 | self.load_metrics()
15 |
16 | def test_setup(self):
17 | """Check if setup was done correctly."""
18 | self.assertEqual(
19 | 8, len(self.raw_metrics_list), "incorrect number json files read (incorrect test setup)"
20 | )
21 |
22 | def test_init_shape(self):
23 | """Test if dataframe initialized is of correct shape."""
24 | # check shape
25 | # each metric json contains number of timestamps equal to number entries * number of timestamps in each series
26 | # we're assuming each series has the same number of timestamps
27 | # 3 labels
28 | for current_metric_list in self.raw_metrics_list:
29 | df = MetricRangeDataFrame(current_metric_list)
30 | num_values = sum([len(v["values"]) for v in current_metric_list])
31 | self.assertEqual(
32 | (len(df.index.values), df.shape[1]), # shape[1] = 4xlabels + value
33 | (num_values, 5),
34 | "incorrect dataframe shape",
35 | )
36 |
37 | def test_init_timestamps(self):
38 | """Test if dataframe contains the correct timestamp indices."""
39 | # check that the timestamp indices in each series are the same
40 | for curr_metric_list in self.raw_metrics_list:
41 | curr_df = MetricRangeDataFrame(curr_metric_list, ts_as_datetime=False)
42 | self.assertEqual(
43 | set(curr_df.index.values),
44 | set([v[0] for s in curr_metric_list for v in s["values"]]),
45 | )
46 |
47 | def test_init_columns(self):
48 | """Test if dataframe initialized has correct columns."""
49 | for curr_metric_labels, curr_metric_list in zip(
50 | self.raw_metrics_labels, self.raw_metrics_list
51 | ):
52 | self.assertEqual(
53 | curr_metric_labels.union({"value"}),
54 | set(MetricRangeDataFrame(curr_metric_list).columns),
55 | "incorrect dataframe columns",
56 | )
57 |
58 | def test_timestamp_dtype_conversion(self):
59 | """Test if the timestamp in the dataframe initialized has correct dtype."""
60 | for curr_metric_list in self.raw_metrics_list:
61 | # index (prometheus metric timestamps) should be datetime type by default
62 | curr_df = MetricRangeDataFrame(curr_metric_list)
63 | self.assertTrue(
64 | isinstance(curr_df.index, pd.DatetimeIndex),
65 | "incorrect dtype for timestamp column (expected datetime dtype)",
66 | )
67 |
68 | # if explicitly set to false, conversion to dt shouldnt take place
69 | curr_df = MetricRangeDataFrame(curr_metric_list, ts_as_datetime=False)
70 | self.assertFalse(
71 | isinstance(curr_df.index, pd.DatetimeIndex),
72 | "incorrect dtype for timestamp column (expected non-datetime dtype)",
73 | )
74 |
75 | def test_init_single_metric(self):
76 | """
77 | Test if dataframe initialized is of correct shape.
78 |
79 | 1. json object is passed as data
80 | 2. list with single json object is passed as data
81 | """
82 | # check shape when single json passed
83 | num_vals = len(self.raw_metrics_list[0][0]["values"])
84 | self.assertEqual(
85 | (num_vals, 5),
86 | MetricRangeDataFrame(self.raw_metrics_list[0][0]).shape,
87 | "incorrect dataframe shape when initialized with json",
88 | )
89 | # check shape when list with single json passed
90 | self.assertEqual(
91 | (num_vals, 5),
92 | MetricRangeDataFrame([self.raw_metrics_list[0][0]]).shape,
93 | "incorrect dataframe shape when initialized with single json list",
94 | )
95 |
96 | def test_init_invalid_string_value(self):
97 | """Ensures metric values provided as concatenated strings are caught with a meaningful exception."""
98 | with pytest.raises(MetricValueConversionError):
99 | MetricRangeDataFrame(
100 | {
101 | "metric": {"__name__": "test_metric", "fake": "data",},
102 | "values": [[1627485628.789, "26.8206896551724326.82068965517243"]],
103 | }
104 | )
105 |
106 | def test_init_valid_string_value(self):
107 | """Ensures metric values provided as a string but are valid floats are processed properly."""
108 | results = MetricRangeDataFrame(
109 | {
110 | "metric": {"__name__": "test_metric", "fake": "data",},
111 | "values": [[1627485628.789, "26.82068965517243"]],
112 | }
113 | )
114 |
115 | self.assertEqual((1, 3), results.shape)
116 |
117 |
118 | if __name__ == "__main__":
119 | unittest.main()
120 |
--------------------------------------------------------------------------------
/prometheus_api_client/metric_snapshot_df.py:
--------------------------------------------------------------------------------
1 | """A pandas.DataFrame subclass for Prometheus query response."""
2 | try:
3 | from pandas import DataFrame, to_datetime
4 | from pandas._typing import Axes, Dtype
5 | except ImportError as e:
6 | raise ImportError(
7 | "Pandas is required for MetricSnapshotDataFrame class. "
8 | "Please install it with: pip install prometheus-api-client[dataframe] "
9 | "or pip install prometheus-api-client[all]"
10 | ) from e
11 |
12 | from typing import Optional, Sequence
13 |
14 | from prometheus_api_client.exceptions import MetricValueConversionError
15 |
16 |
17 | class MetricSnapshotDataFrame(DataFrame):
18 | """Subclass to format and represent Prometheus query response as pandas.DataFrame.
19 |
20 | Assumes response is either a json or sequence of jsons.
21 |
22 | This is different than passing raw list of jsons to pandas.DataFrame in that it
23 | unpacks metric label values, extracts (first or last) timestamp-value pair (if
24 | multiple pairs are retuned), and concats them before passing to the pandas
25 | DataFrame constructor.
26 |
27 | Some argument descriptions in this docstring were copied from pandas.core.frame.DataFrame.
28 |
29 | :param data: (list|json) A single metric (json with keys "metric" and "values"/"value")
30 | or list of such metrics received from Prometheus as a response to query
31 | :param index: (pandas.Index|array-like) Index to use for resulting dataframe. Will default to
32 | pandas.RangeIndex if no indexing information part of input data and no index provided.
33 | :param columns: (pandas.Index|array-like) Column labels to use for resulting dataframe. Will
34 | default to list of labels + "timestamp" + "value" if not provided.
35 | :param dtype: (dtype) default None. Data type to force. Only a single dtype is allowed. If None, infer.
36 | :param copy: (bool) default False. Copy data from inputs. Only affects DataFrame / 2d ndarray input.
37 | :param ts_values_keep: (str) If several timestamp-value tuples are returned for a given
38 | metric + label config, determine which one to keep. Currently only supports 'first', 'last'.
39 | :param ts_as_datetime: (bool) default True. Convert the timestamps returned by prometheus
40 | from float64 (unix time) to pandas datetime objects. This results in the timestamp column
41 | of the returned dataframe to be of dtype datetime64[ns] instead float64
42 |
43 |
44 | Example Usage:
45 | .. code-block:: python
46 |
47 | prom = PrometheusConnect()
48 | metric_data = prom.get_current_metric_value(metric_name='up', label_config=my_label_config)
49 | metric_df = MetricSnapshotDataFrame(metric_data)
50 | metric_df.head()
51 | '''
52 | +-------------------------+-----------------+------------+-------+
53 | | __name__ | cluster | label_2 | timestamp | value |
54 | +==========+==============+=================+============+=======+
55 | | up | cluster_id_0 | label_2_value_2 | 1577836800 | 0 |
56 | +-------------------------+-----------------+------------+-------+
57 | | up | cluster_id_1 | label_2_value_3 | 1577836800 | 1 |
58 | +-------------------------+-----------------+------------+-------+
59 | '''
60 |
61 | """
62 |
63 | def __init__(
64 | self,
65 | data=None,
66 | index: Optional[Axes] = None,
67 | columns: Optional[Axes] = None,
68 | dtype: Optional[Dtype] = None,
69 | copy: bool = False,
70 | ts_values_keep: str = "last",
71 | ts_as_datetime: bool = True,
72 | ):
73 | """Functions as a constructor for MetricSnapshotDataFrame class."""
74 | if data is not None:
75 | # if just a single json instead of list/set/other sequence of jsons,
76 | # treat as list with single entry
77 | if not isinstance(data, Sequence):
78 | data = [data]
79 |
80 | if ts_values_keep not in ("first", "last"):
81 | raise ValueError("ts_values_keep must be one of 'first' and 'last'")
82 |
83 | # index corresponding to which ts-value pair to extract
84 | n = -1 if ts_values_keep == "last" else 0
85 |
86 | # unpack metric, extract and unpack ts-value pair
87 | data = [
88 | {**i["metric"], **MetricSnapshotDataFrame._get_nth_ts_value_pair(i, n)}
89 | for i in data
90 | ]
91 |
92 | # init df normally now
93 | super(MetricSnapshotDataFrame, self).__init__(
94 | data=data, index=index, columns=columns, dtype=dtype, copy=copy
95 | )
96 |
97 | # convert to DateTime type instead of Float64
98 | if ts_as_datetime:
99 | self["timestamp"] = to_datetime(self["timestamp"], unit="s")
100 |
101 | @staticmethod
102 | def _get_nth_ts_value_pair(i: dict, n: int):
103 | val = i["values"][n] if "values" in i else i["value"]
104 | value = val[1]
105 | if isinstance(value, str):
106 | try:
107 | value = float(value)
108 | except (TypeError, ValueError):
109 | raise MetricValueConversionError("Converting string metric value to float failed.")
110 | return {"timestamp": val[0], "value": value}
111 |
--------------------------------------------------------------------------------
/tests/test_metric.py:
--------------------------------------------------------------------------------
1 | """Unit tests for Metrics Class."""
2 | import unittest
3 | import datetime
4 |
5 | import pytest
6 |
7 | from prometheus_api_client import Metric
8 | from prometheus_api_client.exceptions import MetricValueConversionError
9 | from .test_with_metrics import TestWithMetrics
10 |
11 |
12 | class TestMetric(unittest.TestCase, TestWithMetrics.Common):
13 | """unit tests for Metrics Class."""
14 |
15 | def setUp(self):
16 | """Load metrics stored as jsons."""
17 | self.load_metrics()
18 |
19 | def test_init(self): # noqa D102
20 | test_metric_object = Metric(self.raw_metrics_list[0][0])
21 | self.assertEqual("up", test_metric_object.metric_name, "incorrect metric name")
22 |
23 | def test_metric_start_time(self): # noqa D102
24 | start_time = datetime.datetime(2019, 7, 28, 10, 0)
25 | start_time_plus_1m = datetime.datetime(2019, 7, 28, 10, 1)
26 |
27 | test_metric_object = Metric(self.raw_metrics_list[0][0])
28 | self.assertTrue(test_metric_object.start_time > start_time, "incorrect metric start time")
29 | self.assertTrue(
30 | test_metric_object.start_time < start_time_plus_1m, "incorrect metric start time"
31 | )
32 |
33 | def test_metric_end_time(self): # noqa D102
34 | end_time = datetime.datetime(2019, 7, 28, 16, 00)
35 | end_time_minus_1m = datetime.datetime(2019, 7, 28, 15, 59)
36 |
37 | test_metric_object = Metric(self.raw_metrics_list[0][0])
38 | self.assertTrue(
39 | test_metric_object.end_time > end_time_minus_1m, "incorrect metric end time"
40 | )
41 | self.assertTrue(test_metric_object.end_time < end_time, "incorrect metric end time")
42 |
43 | def test_metric_equality(self): # noqa D102
44 | self.assertEqual(
45 | Metric(self.raw_metrics_list[0][0]),
46 | Metric(self.raw_metrics_list[1][0]),
47 | "incorrect inequality",
48 | )
49 | self.assertNotEqual(
50 | Metric(self.raw_metrics_list[0][0]),
51 | Metric(self.raw_metrics_list[0][1]),
52 | "incorrect equality",
53 | )
54 |
55 | def test_metric_addition(self): # noqa D102
56 | with self.assertRaises(TypeError, msg="incorrect addition of two metrics"):
57 | _ = Metric(self.raw_metrics_list[0][0]) + Metric(self.raw_metrics_list[0][1])
58 |
59 | sum_metric = Metric(self.raw_metrics_list[0][0]) + Metric(self.raw_metrics_list[1][0])
60 | self.assertIsInstance(sum_metric, Metric, msg="The sum is not a Metric")
61 | self.assertEqual(
62 | sum_metric.start_time,
63 | Metric(self.raw_metrics_list[0][0]).start_time,
64 | "Incorrect Start time after addition",
65 | )
66 | self.assertEqual(
67 | sum_metric.end_time,
68 | Metric(self.raw_metrics_list[1][0]).end_time,
69 | "Incorrect End time after addition",
70 | )
71 |
72 | def test_oldest_data_datetime_with_datetime(self): # noqa D102
73 | with self.assertRaises(TypeError, msg="incorrect parameter type accepted"):
74 | _ = Metric(self.raw_metrics_list[0][0], oldest_data_datetime="2d")
75 |
76 | expected_start_time = Metric(self.raw_metrics_list[0][0]).metric_values.iloc[4, 0]
77 | new_metric = Metric(
78 | self.raw_metrics_list[0][0], oldest_data_datetime=expected_start_time
79 | ) + Metric(self.raw_metrics_list[1][0])
80 |
81 | self.assertEqual(
82 | expected_start_time, new_metric.start_time, "Incorrect Start time after addition"
83 | )
84 | self.assertEqual(
85 | expected_start_time,
86 | new_metric.metric_values.iloc[0, 0],
87 | "Incorrect Start time after addition (in df)",
88 | )
89 |
90 | def test_oldest_data_datetime_with_timedelta(self): # noqa D102
91 | expected_start_time = Metric(self.raw_metrics_list[0][0]).metric_values.iloc[4, 0]
92 | time_delta = (
93 | Metric(self.raw_metrics_list[1][0]).metric_values.iloc[-1, 0]
94 | - Metric(self.raw_metrics_list[0][0]).metric_values.iloc[4, 0]
95 | )
96 | new_metric = Metric(self.raw_metrics_list[0][0], oldest_data_datetime=time_delta) + Metric(
97 | self.raw_metrics_list[1][0]
98 | )
99 | self.assertEqual(
100 | expected_start_time, new_metric.start_time, "Incorrect Start time after addition"
101 | )
102 |
103 | def test_init_valid_string_metric_value(self):
104 | """Ensures metric values provided as strings are properly cast to a numeric value (in this case, a float)."""
105 | test_metric = Metric(
106 | metric={
107 | "metric": {"__name__": "test_metric", "fake": "data",},
108 | "value": [1627485628.789, "26.82068965517243"],
109 | }
110 | )
111 |
112 | self.assertTrue(isinstance(test_metric, Metric))
113 |
114 | def test_init_invalid_string_metric_value(self):
115 | """Ensures metric values provided as strings are properly cast to a numeric value (in this case, a float)."""
116 | with pytest.raises(MetricValueConversionError):
117 | Metric(
118 | metric={
119 | "metric": {"__name__": "test_metric", "fake": "data",},
120 | "value": [1627485628.789, "26.8206896551724326.82068965517243"],
121 | }
122 | )
123 |
124 |
125 | if __name__ == "__main__":
126 | unittest.main()
127 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | """Configuration file for the Sphinx documentation builder."""
15 | import os
16 | import sys
17 |
18 | sys.path.insert(0, os.path.abspath("."))
19 | sys.path.insert(0, os.path.abspath("../"))
20 |
21 |
22 | # -- Project information -----------------------------------------------------
23 |
24 | project = "Prometheus Client API Python"
25 | copyright = "2019, Anand Sanmukhani"
26 | author = "Anand Sanmukhani"
27 |
28 | # The short X.Y version
29 | version = ""
30 | # The full version, including alpha/beta/rc tags
31 | release = "0.0.1"
32 |
33 |
34 | # -- General configuration ---------------------------------------------------
35 |
36 | # If your documentation needs a minimal Sphinx version, state it here.
37 | #
38 | # needs_sphinx = '1.0'
39 |
40 | # Add any Sphinx extension module names here, as strings. They can be
41 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
42 | # ones.
43 | extensions = [
44 | "sphinx.ext.autodoc",
45 | "sphinx.ext.doctest",
46 | "sphinx.ext.intersphinx",
47 | "sphinx.ext.todo",
48 | "sphinx.ext.coverage",
49 | "sphinx.ext.viewcode",
50 | "sphinx.ext.githubpages",
51 | ]
52 |
53 | # Add any paths that contain templates here, relative to this directory.
54 | templates_path = ["_templates"]
55 |
56 | # The suffix(es) of source filenames.
57 | # You can specify multiple suffix as a list of string:
58 | #
59 | # source_suffix = ['.rst', '.md']
60 | source_suffix = ".rst"
61 |
62 | # The master toctree document.
63 | master_doc = "index"
64 |
65 | # The language for content autogenerated by Sphinx. Refer to documentation
66 | # for a list of supported languages.
67 | #
68 | # This is also used if you do content translation via gettext catalogs.
69 | # Usually you set "language" from the command line for these cases.
70 | language = None
71 |
72 | # List of patterns, relative to source directory, that match files and
73 | # directories to ignore when looking for source files.
74 | # This pattern also affects html_static_path and html_extra_path.
75 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
76 |
77 | # The name of the Pygments (syntax highlighting) style to use.
78 | pygments_style = None
79 |
80 |
81 | # -- Options for HTML output -------------------------------------------------
82 |
83 | # The theme to use for HTML and HTML Help pages. See the documentation for
84 | # a list of builtin themes.
85 | #
86 | html_theme = "sphinx_rtd_theme"
87 |
88 | # Theme options are theme-specific and customize the look and feel of a theme
89 | # further. For a list of options available for each theme, see the
90 | # documentation.
91 | #
92 | # html_theme_options = {}
93 |
94 | # Add any paths that contain custom static files (such as style sheets) here,
95 | # relative to this directory. They are copied after the builtin static files,
96 | # so a file named "default.css" will overwrite the builtin "default.css".
97 | html_static_path = ["_static"]
98 |
99 | # Custom sidebar templates, must be a dictionary that maps document names
100 | # to template names.
101 | #
102 | # The default sidebars (for documents that don't match any pattern) are
103 | # defined by theme itself. Builtin themes are using these templates by
104 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
105 | # 'searchbox.html']``.
106 | #
107 | # html_sidebars = {}
108 |
109 |
110 | # -- Options for HTMLHelp output ---------------------------------------------
111 |
112 | # Output file base name for HTML help builder.
113 | htmlhelp_basename = "PrometheusClientAPIPythondoc"
114 |
115 |
116 | # -- Options for LaTeX output ------------------------------------------------
117 |
118 | latex_elements = {
119 | # The paper size ('letterpaper' or 'a4paper').
120 | #
121 | # 'papersize': 'letterpaper',
122 | # The font size ('10pt', '11pt' or '12pt').
123 | #
124 | # 'pointsize': '10pt',
125 | # Additional stuff for the LaTeX preamble.
126 | #
127 | # 'preamble': '',
128 | # Latex figure (float) alignment
129 | #
130 | # 'figure_align': 'htbp',
131 | }
132 |
133 | # Grouping the document tree into LaTeX files. List of tuples
134 | # (source start file, target name, title,
135 | # author, documentclass [howto, manual, or own class]).
136 | latex_documents = [
137 | (
138 | master_doc,
139 | "PrometheusClientAPIPython.tex",
140 | "Prometheus Client API Python Documentation",
141 | "Anand Sanmukhani",
142 | "manual",
143 | )
144 | ]
145 |
146 |
147 | # -- Options for manual page output ------------------------------------------
148 |
149 | # One entry per manual page. List of tuples
150 | # (source start file, name, description, authors, manual section).
151 | man_pages = [
152 | (
153 | master_doc,
154 | "prometheusclientapipython",
155 | "Prometheus Client API Python Documentation",
156 | [author],
157 | 1,
158 | )
159 | ]
160 |
161 |
162 | # -- Options for Texinfo output ----------------------------------------------
163 |
164 | # Grouping the document tree into Texinfo files. List of tuples
165 | # (source start file, target name, title, author,
166 | # dir menu entry, description, category)
167 | texinfo_documents = [
168 | (
169 | master_doc,
170 | "PrometheusClientAPIPython",
171 | "Prometheus Client API Python Documentation",
172 | author,
173 | "PrometheusClientAPIPython",
174 | "One line description of project.",
175 | "Miscellaneous",
176 | )
177 | ]
178 |
179 |
180 | # -- Options for Epub output -------------------------------------------------
181 |
182 | # Bibliographic Dublin Core info.
183 | epub_title = project
184 |
185 | # The unique identifier of the text. This can be a ISBN number
186 | # or the project homepage.
187 | #
188 | # epub_identifier = ''
189 |
190 | # A unique identification for the text.
191 | #
192 | # epub_uid = ''
193 |
194 | # A list of files that should not be packed into the epub file.
195 | epub_exclude_files = ["search.html"]
196 |
197 |
198 | # -- Extension configuration -------------------------------------------------
199 |
200 | # -- Options for intersphinx extension ---------------------------------------
201 |
202 | # Example configuration for intersphinx: refer to the Python standard library.
203 | intersphinx_mapping = {"https://docs.python.org/": None}
204 |
205 | # -- Options for todo extension ----------------------------------------------
206 |
207 | # If true, `todo` and `todoList` produce output, else they produce nothing.
208 | todo_include_todos = True
209 |
--------------------------------------------------------------------------------
/prometheus_api_client/metric.py:
--------------------------------------------------------------------------------
1 | """A Class for metric object."""
2 | from copy import deepcopy
3 | import datetime
4 |
5 | try:
6 | import pandas
7 | except ImportError as e:
8 | raise ImportError(
9 | "Pandas is required for Metric class. "
10 | "Please install it with: pip install prometheus-api-client[dataframe] "
11 | "or pip install prometheus-api-client[all]"
12 | ) from e
13 |
14 | from prometheus_api_client.exceptions import MetricValueConversionError
15 |
16 | class Metric:
17 | r"""
18 | A Class for `Metric` object.
19 |
20 | :param metric: (dict) A metric item from the list of metrics received from prometheus
21 | :param oldest_data_datetime: (datetime|timedelta) Any metric values in the dataframe that are
22 | older than this value will be deleted when new data is added to the dataframe
23 | using the __add__("+") operator.
24 |
25 | * `oldest_data_datetime=datetime.timedelta(days=2)`, will delete the
26 | metric data that is 2 days older than the latest metric.
27 | The dataframe is pruned only when new data is added to it.
28 | * `oldest_data_datetime=datetime.datetime(2019,5,23,12,0)`, will delete
29 | any data that is older than "23 May 2019 12:00:00"
30 | * `oldest_data_datetime=datetime.datetime.fromtimestamp(1561475156)`
31 | can also be set using the unix timestamp
32 |
33 | Example Usage:
34 | .. code-block:: python
35 |
36 | prom = PrometheusConnect()
37 |
38 | my_label_config = {'cluster': 'my_cluster_id', 'label_2': 'label_2_value'}
39 |
40 | metric_data = prom.get_metric_range_data(metric_name='up', label_config=my_label_config)
41 | # Here metric_data is a list of metrics received from prometheus
42 |
43 | # only for the first item in the list
44 | my_metric_object = Metric(metric_data[0], datetime.timedelta(days=10))
45 |
46 | """
47 |
48 | def __init__(self, metric, oldest_data_datetime=None):
49 | """Functions as a Constructor for the Metric object."""
50 | if not isinstance(
51 | oldest_data_datetime, (datetime.datetime, datetime.timedelta, type(None))
52 | ):
53 | # if it is neither a datetime object nor a timedelta object raise exception
54 | raise TypeError(
55 | "oldest_data_datetime can only be datetime.datetime/ datetime.timedelta or None"
56 | )
57 |
58 | if isinstance(metric, Metric):
59 | # if metric is a Metric object, just copy the object and update its parameters
60 | self.metric_name = metric.metric_name
61 | self.label_config = metric.label_config
62 | self.metric_values = metric.metric_values
63 | self.oldest_data_datetime = oldest_data_datetime
64 | else:
65 | self.metric_name = metric["metric"].get("__name__", None)
66 | self.label_config = deepcopy(metric["metric"])
67 | if "__name__" in self.label_config:
68 | del self.label_config["__name__"]
69 | self.oldest_data_datetime = oldest_data_datetime
70 |
71 | # if it is a single value metric change key name
72 | if "value" in metric:
73 | datestamp = metric["value"][0]
74 | metric_value = metric["value"][1]
75 | if isinstance(metric_value, str):
76 | try:
77 | metric_value = float(metric_value)
78 | except (TypeError, ValueError):
79 | raise MetricValueConversionError(
80 | "Converting string metric value to float failed."
81 | )
82 | metric["values"] = [[datestamp, metric_value]]
83 |
84 | self.metric_values = pandas.DataFrame(metric["values"], columns=["ds", "y"]).apply(
85 | pandas.to_numeric, errors="raise"
86 | )
87 | self.metric_values["ds"] = pandas.to_datetime(self.metric_values["ds"], unit="s")
88 |
89 | # Set the metric start time and the metric end time
90 | self.start_time = self.metric_values.iloc[0, 0]
91 | self.end_time = self.metric_values.iloc[-1, 0]
92 |
93 | # We store the plot information as Class variable
94 | Metric._plot = None
95 |
96 | def __eq__(self, other):
97 | """
98 | Overloading operator ``=``.
99 |
100 | Check whether two metrics are the same (are the same time-series regardless of their data)
101 |
102 | Example Usage:
103 | .. code-block:: python
104 |
105 | metric_1 = Metric(metric_data_1)
106 |
107 | metric_2 = Metric(metric_data_2)
108 |
109 | print(metric_1 == metric_2) # will print True if they belong to the same time-series
110 |
111 | :return: (bool) If two Metric objects belong to the same time-series,
112 | i.e. same name and label config, it will return True, else False
113 | """
114 | return bool(
115 | (self.metric_name == other.metric_name) and (self.label_config == other.label_config)
116 | )
117 |
118 | def __str__(self):
119 | """
120 | Make it print in a cleaner way when print function is used on a Metric object.
121 |
122 | Example Usage:
123 | .. code-block:: python
124 |
125 | metric_1 = Metric(metric_data_1)
126 |
127 | print(metric_1) # will print the name, labels and the head of the dataframe
128 |
129 | """
130 | name = "metric_name: " + repr(self.metric_name) + "\n"
131 | labels = "label_config: " + repr(self.label_config) + "\n"
132 | values = "metric_values: " + repr(self.metric_values)
133 |
134 | return "{" + "\n" + name + labels + values + "\n" + "}"
135 |
136 | def __add__(self, other):
137 | r"""
138 | Overloading operator ``+``.
139 |
140 | Add two metric objects for the same time-series
141 |
142 | Example Usage:
143 | .. code-block:: python
144 |
145 | metric_1 = Metric(metric_data_1)
146 | metric_2 = Metric(metric_data_2)
147 | metric_12 = metric_1 + metric_2 # will add the data in ``metric_2`` to ``metric_1``
148 | # so if any other parameters are set in ``metric_1``
149 | # will also be set in ``metric_12``
150 | # (like ``oldest_data_datetime``)
151 |
152 | :return: (`Metric`) Returns a `Metric` object with the combined metric data
153 | of the two added metrics
154 |
155 | :raises: (TypeError) Raises an exception when two metrics being added are
156 | from different metric time-series
157 | """
158 | if self == other:
159 | new_metric = deepcopy(self)
160 | new_metric.metric_values = pandas.concat([new_metric.metric_values,other.metric_values],ignore_index=True, axis=0)
161 | new_metric.metric_values = new_metric.metric_values.dropna()
162 | new_metric.metric_values = (
163 | new_metric.metric_values.drop_duplicates("ds")
164 | .sort_values(by=["ds"])
165 | .reset_index(drop=True)
166 | )
167 | # if oldest_data_datetime is set, trim the dataframe and only keep the newer data
168 | if new_metric.oldest_data_datetime:
169 | if isinstance(new_metric.oldest_data_datetime, datetime.timedelta):
170 | # create a time range mask
171 | mask = new_metric.metric_values["ds"] >= (
172 | new_metric.metric_values.iloc[-1, 0] - abs(new_metric.oldest_data_datetime)
173 | )
174 | else:
175 | # create a time range mask
176 | mask = new_metric.metric_values["ds"] >= new_metric.oldest_data_datetime
177 | # truncate the df within the mask
178 | new_metric.metric_values = new_metric.metric_values.loc[mask]
179 |
180 | # Update the metric start time and the metric end time for the new Metric
181 | new_metric.start_time = new_metric.metric_values.iloc[0, 0]
182 | new_metric.end_time = new_metric.metric_values.iloc[-1, 0]
183 |
184 | return new_metric
185 |
186 | if self.metric_name != other.metric_name:
187 | error_string = "Different metric names"
188 | else:
189 | error_string = "Different metric labels"
190 | raise TypeError("Cannot Add different metric types. " + error_string)
191 |
192 | _metric_plot = None
193 |
194 | def plot(self, *args, **kwargs):
195 | """Plot a very simple line graph for the metric time-series."""
196 | if not Metric._metric_plot:
197 | from prometheus_api_client.metric_plot import MetricPlot
198 | Metric._metric_plot = MetricPlot(*args, **kwargs)
199 | metric = self
200 | Metric._metric_plot.plot_date(metric)
201 |
202 | def show(self, block=None):
203 | """Plot a very simple line graph for the metric time-series."""
204 | if not Metric._metric_plot:
205 | # can't show before plot
206 | TypeError("Invalid operation: Can't show() before plot()")
207 | Metric._metric_plot.show(block)
208 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # prometheus-api-client
2 |
3 | [](https://badge.fury.io/py/prometheus-api-client) [](https://pypi.python.org/pypi/prometheus-api-client/) 
4 |
5 |
6 | A Python wrapper for the Prometheus http api and some tools for metrics processing.
7 |
8 | ## Installation
9 |
10 | To install the latest release:
11 |
12 | `pip install prometheus-api-client`
13 |
14 | To install with all optional dependencies (pandas, numpy, matplotlib):
15 |
16 | `pip install prometheus-api-client[all]`
17 |
18 | **Note:** Starting from version 0.7.0, pandas, numpy, and matplotlib are optional dependencies.
19 | If you only need `PrometheusConnect` without DataFrame support or plotting capabilities, you can install the minimal version which significantly reduces memory footprint and installation time, especially on Alpine-based Docker images.
20 |
21 | To install only specific extras:
22 | - For DataFrame support: `pip install prometheus-api-client[dataframe]`
23 | - For analytics/aggregation operations: `pip install prometheus-api-client[analytics]`
24 | - For plotting support: `pip install prometheus-api-client[plot]`
25 |
26 | To install directly from this branch:
27 |
28 | `pip install https://github.com/4n4nd/prometheus-api-client-python/zipball/master`
29 |
30 | ## Links
31 |
32 | - [Slack](https://join.slack.com/share/zt-kw3v8t1e-hbcVH7X7bXORiQuQtsNZ4A)
33 | - [Google Chat](https://chat.google.com/room/AAAAzFPwq5s)
34 | - [Documentation](https://prometheus-api-client-python.readthedocs.io/en/master/source/prometheus_api_client.html)
35 |
36 | ## Getting Started
37 |
38 | ### Usage
39 | [Prometheus](https://prometheus.io/), a Cloud Native Computing Foundation project, is a systems and service monitoring system. It collects metrics (time series data) from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. The raw time series data obtained from a Prometheus host can sometimes be hard to interpret. To help better understand these metrics we have created a Python wrapper for the Prometheus http api for easier metrics processing and analysis.
40 |
41 | The `prometheus-api-client` library consists of multiple modules which assist in connecting to a Prometheus host, fetching the required metrics and performing various aggregation operations on the time series data.
42 |
43 | #### Connecting and Collecting Metrics from a Prometheus host
44 | The `PrometheusConnect` module of the library can be used to connect to a Prometheus host. This module is essentially a class created for the collection of metrics from a Prometheus host. It stores the following connection parameters:
45 |
46 | - **url** - (str) url for the prometheus host
47 | - **headers** – (dict) A dictionary of http headers to be used to communicate with the host. Example: {“Authorization”: “bearer my_oauth_token_to_the_host”}
48 | - **disable_ssl** – (bool) If set to True, will disable ssl certificate verification for the http requests made to the prometheus host
49 |
50 | ```python
51 | from prometheus_api_client import PrometheusConnect
52 | prom = PrometheusConnect(url ="", disable_ssl=True)
53 |
54 | # Get the list of all the metrics that the Prometheus host scrapes
55 | prom.all_metrics()
56 | ```
57 |
58 | You can also fetch the time series data for a specific metric using custom queries as follows:
59 |
60 | ```python
61 | prom = PrometheusConnect()
62 | my_label_config = {'cluster': 'my_cluster_id', 'label_2': 'label_2_value'}
63 | prom.get_current_metric_value(metric_name='up', label_config=my_label_config)
64 |
65 | # Here, we are fetching the values of a particular metric name
66 | prom.custom_query(query="prometheus_http_requests_total")
67 |
68 | # Now, lets try to fetch the `sum` of the metrics
69 | prom.custom_query(query="sum(prometheus_http_requests_total)")
70 | ```
71 |
72 | We can also use custom queries for fetching the metric data in a specific time interval. For example, let's try to fetch the past 2 days of data for a particular metric in chunks of 1 day:
73 |
74 | ```python
75 | # Import the required datetime functions
76 | from prometheus_api_client.utils import parse_datetime
77 | from datetime import timedelta
78 |
79 | start_time = parse_datetime("2d")
80 | end_time = parse_datetime("now")
81 | chunk_size = timedelta(days=1)
82 |
83 | metric_data = prom.get_metric_range_data(
84 | "up{cluster='my_cluster_id'}", # this is the metric name and label config
85 | start_time=start_time,
86 | end_time=end_time,
87 | chunk_size=chunk_size,
88 | )
89 | ```
90 |
91 | For more functions included in the `PrometheusConnect` module, refer to this [documentation.](https://prometheus-api-client-python.readthedocs.io/en/master/source/prometheus_api_client.html#module-prometheus_api_client.prometheus_connect)
92 |
93 | #### Understanding the Metrics Data Fetched
94 | The `MetricsList` module initializes a list of Metric objects for the metrics fetched from a Prometheus host as a result of a promql query.
95 |
96 | ```python
97 | # Import the MetricsList and Metric modules
98 | from prometheus_api_client import PrometheusConnect, MetricsList, Metric
99 |
100 | prom = PrometheusConnect()
101 | my_label_config = {'cluster': 'my_cluster_id', 'label_2': 'label_2_value'}
102 | metric_data = prom.get_metric_range_data(metric_name='up', label_config=my_label_config)
103 |
104 | metric_object_list = MetricsList(metric_data) # metric_object_list will be initialized as
105 | # a list of Metric objects for all the
106 | # metrics downloaded using get_metric query
107 |
108 | # We can see what each of the metric objects look like
109 | for item in metric_object_list:
110 | print(item.metric_name, item.label_config, "\n")
111 | ```
112 |
113 | Each of the items in the `metric_object_list` are initialized as a `Metric` class object. Let's look at one of the metrics from the `metric_object_list` to learn more about the `Metric` class:
114 |
115 | ```python
116 | my_metric_object = metric_object_list[1] # one of the metrics from the list
117 | print(my_metric_object)
118 | ```
119 |
120 | For more functions included in the `MetricsList` and `Metrics` module, refer to this [documentation.](https://prometheus-api-client-python.readthedocs.io/en/master/source/prometheus_api_client.html#module-prometheus_api_client.metric)
121 |
122 | #### Additional Metric Functions
123 | The `Metric` class also supports multiple functions such as adding, equating and plotting various metric objects.
124 |
125 | ##### Adding Metrics
126 | You can add add two metric objects for the same time-series as follows:
127 |
128 | ```python
129 | metric_1 = Metric(metric_data_1)
130 | metric_2 = Metric(metric_data_2)
131 | metric_12 = metric_1 + metric_2 # will add the data in ``metric_2`` to ``metric_1``
132 | # so if any other parameters are set in ``metric_1``
133 | # will also be set in ``metric_12``
134 | # (like ``oldest_data_datetime``)
135 | ```
136 |
137 | ##### Equating Metrics
138 | Overloading operator =, to check whether two metrics are the same (are the same time-series regardless of their data)
139 | ```python
140 | metric_1 = Metric(metric_data_1)
141 | metric_2 = Metric(metric_data_2)
142 | print(metric_1 == metric_2) # will print True if they belong to the same time-series
143 | ```
144 |
145 | ##### Plotting Metric Objects
146 | Plot a very simple line graph for the metric time series:
147 |
148 | ```python
149 | from prometheus_api_client import PrometheusConnect, MetricsList, Metric
150 |
151 | prom = PrometheusConnect()
152 | my_label_config = {'cluster': 'my_cluster_id', 'label_2': 'label_2_value'}
153 | metric_data = prom.get_metric_range_data(metric_name='up', label_config=my_label_config)
154 |
155 | metric_object_list = MetricsList(metric_data)
156 | my_metric_object = metric_object_list[1] # one of the metrics from the list
157 | my_metric_object.plot()
158 | ```
159 |
160 | #### Getting Metrics Data as pandas DataFrames
161 | To perform data analysis and manipulation, it is often helpful to have the data represented using a [pandas DataFrame](https://pandas.pydata.org/docs/user_guide/dsintro.html#dataframe). There are two modules in this library that can be used to process the raw metrics fetched into a DataFrame.
162 |
163 | The `MetricSnapshotDataFrame` module converts "current metric value" data to a DataFrame representation, and the `MetricRangeDataFrame` converts "metric range values" data to a DataFrame representation. Example usage of these classes can be seen below:
164 |
165 | ```python
166 | import datetime as dt
167 | from prometheus_api_client import PrometheusConnect, MetricSnapshotDataFrame, MetricRangeDataFrame
168 |
169 | prom = PrometheusConnect()
170 | my_label_config = {'cluster': 'my_cluster_id', 'label_2': 'label_2_value'}
171 |
172 | # metric current values
173 | metric_data = prom.get_current_metric_value(
174 | metric_name='up',
175 | label_config=my_label_config,
176 | )
177 | metric_df = MetricSnapshotDataFrame(metric_data)
178 | metric_df.head()
179 | """ Output:
180 | +-------------------------+-----------------+------------+-------+
181 | | __name__ | cluster | label_2 | timestamp | value |
182 | +==========+==============+=================+============+=======+
183 | | up | cluster_id_0 | label_2_value_2 | 1577836800 | 0 |
184 | +-------------------------+-----------------+------------+-------+
185 | | up | cluster_id_1 | label_2_value_3 | 1577836800 | 1 |
186 | +-------------------------+-----------------+------------+-------+
187 | """
188 |
189 | # metric values for a range of timestamps
190 | metric_data = prom.get_metric_range_data(
191 | metric_name='up',
192 | label_config=my_label_config,
193 | start_time=(dt.datetime.now() - dt.timedelta(minutes=30)),
194 | end_time=dt.datetime.now(),
195 | )
196 | metric_df = MetricRangeDataFrame(metric_data)
197 | metric_df.head()
198 | """ Output:
199 | +------------+------------+-----------------+--------------------+-------+
200 | | | __name__ | cluster | label_2 | value |
201 | +-------------------------+-----------------+--------------------+-------+
202 | | timestamp | | | | |
203 | +============+============+=================+====================+=======+
204 | | 1577836800 | up | cluster_id_0 | label_2_value_2 | 0 |
205 | +-------------------------+-----------------+--------------------+-------+
206 | | 1577836801 | up | cluster_id_1 | label_2_value_3 | 1 |
207 | +-------------------------+-----------------+------------=-------+-------+
208 | """
209 | ```
210 |
211 |
212 | For more functions included in the `prometheus-api-client` library, please refer to this [documentation.](https://prometheus-api-client-python.readthedocs.io/en/master/source/prometheus_api_client.html)
213 |
214 | ## Running tests
215 |
216 | `PROM_URL="https://demo.promlabs.com/" pytest`
217 |
218 | ## Code Styling and Linting
219 |
220 | Prometheus Api client uses [pre-commit](https://pre-commit.com) framework to maintain the code linting and python code styling.
221 | The AICoE-CI would run the pre-commit check on each pull request.
222 | We encourage our contributors to follow the same pattern, while contributing to the code.
223 | we would like to keep the same standard and maintain the code for better quality and readability.
224 |
225 | The pre-commit configuration file is present in the repository `.pre-commit-config.yaml`
226 | It contains the different code styling and linting guide which we use for the application.
227 |
228 | we just need to run [pre-commit](https://pre-commit.com/#install) before raising a Pull Request.
229 | Following command can be used to run the pre-commit:
230 | `pre-commit run --all-files`
231 |
232 | If pre-commit is not installed in your system, it can be install with : `pip install pre-commit`
233 |
--------------------------------------------------------------------------------
/tests/test_prometheus_connect.py:
--------------------------------------------------------------------------------
1 | """Test module for class PrometheusConnect."""
2 | import unittest
3 | import os
4 | from datetime import datetime, timedelta
5 |
6 | import requests
7 | from requests.packages.urllib3.util.retry import Retry
8 |
9 | from prometheus_api_client import MetricsList, PrometheusConnect, PrometheusApiClientException
10 |
11 | from .mocked_network import BaseMockedNetworkTestcase
12 |
13 |
14 | class TestPrometheusConnect(unittest.TestCase):
15 | """Test module for class PrometheusConnect."""
16 |
17 | def setUp(self):
18 | """Set up connection settings for prometheus."""
19 | self.prometheus_host = os.getenv("PROM_URL")
20 | self.pc = PrometheusConnect(url=self.prometheus_host, disable_ssl=False)
21 |
22 | def test_metrics_list(self):
23 | """Check if setup was done correctly."""
24 | metrics_list = self.pc.all_metrics()
25 | self.assertTrue(len(metrics_list) > 0, "no metrics received from prometheus")
26 | # Checking that the results of all_metrics() and get_label_values("__name__") are the same.
27 | self.assertEqual(metrics_list, self.pc.get_label_values("__name__"))
28 | # Check for the "job" label.
29 | label_values = self.pc.get_label_values("job")
30 | self.assertTrue(len(label_values) > 0, "no metrics received from prometheus")
31 |
32 | def test_get_metric_range_data(self): # noqa D102
33 | start_time = datetime.now() - timedelta(minutes=10)
34 | end_time = datetime.now()
35 | metric_data = self.pc.get_metric_range_data(
36 | metric_name="up", start_time=start_time, end_time=end_time
37 | )
38 |
39 | metric_objects_list = MetricsList(metric_data)
40 |
41 | self.assertTrue(len(metric_objects_list) > 0, "no metrics received from prometheus")
42 | self.assertTrue(
43 | start_time.timestamp() < metric_objects_list[0].start_time.timestamp(),
44 | "invalid metric start time",
45 | )
46 | self.assertTrue(
47 | (start_time + timedelta(minutes=1)).timestamp()
48 | > metric_objects_list[0].start_time.timestamp(),
49 | "invalid metric start time",
50 | )
51 | self.assertTrue(
52 | end_time.timestamp() > metric_objects_list[0].end_time.timestamp(),
53 | "invalid metric end time",
54 | )
55 | self.assertTrue(
56 | (end_time - timedelta(minutes=1)).timestamp()
57 | < metric_objects_list[0].end_time.timestamp(),
58 | "invalid metric end time",
59 | )
60 |
61 | def test_get_metric_range_data_with_chunk_size(self): # noqa D102
62 | start_time = datetime.now() - timedelta(minutes=65)
63 | chunk_size = timedelta(minutes=7)
64 | end_time = datetime.now() - timedelta(minutes=5)
65 | metric_data = self.pc.get_metric_range_data(
66 | metric_name="up", start_time=start_time, end_time=end_time, chunk_size=chunk_size
67 | )
68 |
69 | metric_objects_list = MetricsList(metric_data)
70 |
71 | self.assertTrue(len(metric_objects_list) > 0, "no metrics received from prometheus")
72 | self.assertTrue(
73 | start_time.timestamp() < metric_objects_list[0].start_time.timestamp(),
74 | "invalid metric start time (with given chunk_size)",
75 | )
76 | self.assertTrue(
77 | (start_time + timedelta(minutes=1)).timestamp()
78 | > metric_objects_list[0].start_time.timestamp(),
79 | "invalid metric start time (with given chunk_size)",
80 | )
81 | self.assertTrue(
82 | end_time.timestamp() > metric_objects_list[0].end_time.timestamp(),
83 | "invalid metric end time (with given chunk_size)",
84 | )
85 | self.assertTrue(
86 | (end_time - timedelta(minutes=1)).timestamp()
87 | < metric_objects_list[0].end_time.timestamp(),
88 | "invalid metric end time (with given chunk_size)",
89 | )
90 |
91 | def test_get_metric_range_data_with_incorrect_input_types(self): # noqa D102
92 | start_time = datetime.now() - timedelta(minutes=20)
93 | chunk_size = timedelta(minutes=7)
94 | end_time = datetime.now() - timedelta(minutes=10)
95 |
96 | with self.assertRaises(ValueError, msg="specified chunk_size is too big"):
97 | _ = self.pc.get_metric_range_data(
98 | metric_name="up",
99 | start_time=start_time,
100 | end_time=end_time,
101 | chunk_size=timedelta(minutes=30),
102 | )
103 | with self.assertRaises(TypeError, msg="start_time accepted invalid value type"):
104 | _ = self.pc.get_metric_range_data(
105 | metric_name="up", start_time="20m", end_time=end_time, chunk_size=chunk_size
106 | )
107 | with self.assertRaises(TypeError, msg="end_time accepted invalid value type"):
108 | _ = self.pc.get_metric_range_data(
109 | metric_name="up", start_time=start_time, end_time="10m", chunk_size=chunk_size
110 | )
111 | with self.assertRaises(TypeError, msg="chunk_size accepted invalid value type"):
112 | _ = self.pc.get_metric_range_data(
113 | metric_name="up", start_time=start_time, end_time=end_time, chunk_size="10m"
114 | )
115 |
116 | def test_get_metric_aggregation(self): # noqa D102
117 | operations = ["sum", "max", "min", "variance", "percentile_50", "deviation", "average"]
118 | start_time = datetime.now() - timedelta(minutes=10)
119 | end_time = datetime.now()
120 | step = "15"
121 | aggregated_values = self.pc.get_metric_aggregation(
122 | query="up", operations=operations, start_time=start_time, end_time=end_time, step=step
123 | )
124 |
125 | self.assertTrue(len(aggregated_values) > 0, "no values received after aggregating")
126 |
127 | def test_get_metric_aggregation_with_incorrect_input_types(self): # noqa D102
128 | with self.assertRaises(TypeError, msg="operations accepted invalid value type"):
129 | _ = self.pc.get_metric_aggregation(query="up", operations="sum")
130 | def test_retry_on_error(self): # noqa D102
131 | retry = Retry(total=3, backoff_factor=0.1, status_forcelist=[400])
132 | pc = PrometheusConnect(url=self.prometheus_host, disable_ssl=False, retry=retry)
133 |
134 | with self.assertRaises(requests.exceptions.RetryError, msg="too many 400 error responses"):
135 | pc.custom_query("BOOM.BOOM!#$%")
136 |
137 | def test_get_label_names_method(self): # noqa D102
138 | labels = self.pc.get_label_names(params={"match[]": "up"})
139 | self.assertEqual(len(labels), 3)
140 | self.assertEqual(labels, ["__name__", "instance", "job"])
141 |
142 | def test_get_series(self): # noqa D102
143 | start_time = datetime.now() - timedelta(hours=1)
144 | end_time = datetime.now()
145 | series = self.pc.get_series(start=start_time, end=end_time, params={"match[]": "up"})
146 | self.assertIsInstance(series, list)
147 | self.assertTrue(len(series) > 0, "no series data received from prometheus")
148 | # Verify that each series entry is a dict with labels
149 | for series_entry in series:
150 | self.assertIsInstance(series_entry, dict)
151 | self.assertIn("__name__", series_entry)
152 |
153 | def test_get_scrape_pools(self): # noqa D102
154 | scrape_pools = self.pc.get_scrape_pools()
155 | self.assertIsInstance(scrape_pools, list)
156 | self.assertTrue(len(scrape_pools) > 0, "no scrape pools found")
157 | self.assertIsInstance(scrape_pools[0], str)
158 |
159 | def test_get_targets(self): # PR #295
160 | targets = self.pc.get_targets()
161 | self.assertIsInstance(targets, dict)
162 | self.assertIn('activeTargets', targets)
163 | self.assertIsInstance(targets['activeTargets'], list)
164 |
165 | # Test with state filter
166 | active_targets = self.pc.get_targets(state='active')
167 | self.assertIsInstance(active_targets, dict)
168 | self.assertIn('activeTargets', active_targets)
169 |
170 | # Test with scrape_pool filter
171 | if len(scrape_pools := self.pc.get_scrape_pools()) > 0:
172 | pool_targets = self.pc.get_targets(scrape_pool=scrape_pools[0])
173 | self.assertIsInstance(pool_targets, dict)
174 |
175 | def test_get_target_metadata(self): # PR #295
176 | # Get a target to test with
177 | targets = self.pc.get_targets()
178 | if len(targets['activeTargets']) > 0:
179 | target = {
180 | 'job': targets['activeTargets'][0]['labels']['job']
181 | }
182 | metadata = self.pc.get_target_metadata(target)
183 | self.assertIsInstance(metadata, list)
184 |
185 | # Test with metric filter
186 | if len(metadata) > 0:
187 | metric_name = metadata[0]['metric']
188 | filtered_metadata = self.pc.get_target_metadata(
189 | target, metric=metric_name)
190 | self.assertIsInstance(filtered_metadata, list)
191 | self.assertTrue(
192 | all(item['target']['job'] == target['job'] for item in filtered_metadata))
193 |
194 |
195 | def test_get_metric_metadata(self): # PR #295
196 | metadata = self.pc.get_metric_metadata(metric=None)
197 | self.assertIsInstance(metadata, list)
198 | self.assertTrue(len(metadata) > 0, "no metric metadata found")
199 |
200 | # Check structure of metadata
201 | self.assertIn('metric_name', metadata[0])
202 | self.assertIn('type', metadata[0])
203 | self.assertIn('help', metadata[0])
204 | self.assertIn('unit', metadata[0])
205 |
206 | # Test with specific metric
207 | if len(metadata) > 0:
208 | metric_name = metadata[0]['metric_name']
209 | filtered_metadata = self.pc.get_metric_metadata(metric=metric_name)
210 | self.assertIsInstance(filtered_metadata, list)
211 | self.assertTrue(
212 | all(item['metric_name'] == metric_name for item in filtered_metadata))
213 |
214 | # Test with limit
215 | limited_metadata = self.pc.get_metric_metadata(metric_name, limit=1)
216 | self.assertLessEqual(len(limited_metadata), 1)
217 |
218 | # Test with limit_per_metric
219 | limited_per_metric = self.pc.get_metric_metadata(metric_name, limit_per_metric=1)
220 | self.assertIsInstance(limited_per_metric, list)
221 |
222 | def test_method_argument_accepts_get_and_post(self):
223 | """Test that PrometheusConnect accepts GET and POST for method argument, and raises on invalid values."""
224 | # Default should be GET
225 | pc_default = PrometheusConnect(url=self.prometheus_host, disable_ssl=False)
226 | self.assertEqual(pc_default._method, "GET")
227 | # Explicit GET
228 | pc_get = PrometheusConnect(url=self.prometheus_host, disable_ssl=False, method="GET")
229 | self.assertEqual(pc_get._method, "GET")
230 | # Explicit POST
231 | pc_post = PrometheusConnect(url=self.prometheus_host, disable_ssl=False, method="POST")
232 | self.assertEqual(pc_post._method, "POST")
233 | # Invalid type
234 | with self.assertRaises(TypeError):
235 | PrometheusConnect(url=self.prometheus_host, disable_ssl=False, method=123)
236 | # Invalid value
237 | with self.assertRaises(ValueError):
238 | PrometheusConnect(url=self.prometheus_host, disable_ssl=False, method="PUT")
239 |
240 | def test_post_method_for_supported_functions(self):
241 | """Test that PrometheusConnect uses POST for supported endpoints when method='POST', and returns a value."""
242 | pc = PrometheusConnect(url=self.prometheus_host, disable_ssl=False, method="POST")
243 | start_time = datetime.now() - timedelta(minutes=10)
244 | end_time = datetime.now()
245 |
246 | # custom_query should use POST and return something (or raise)
247 | try:
248 | result = pc.custom_query("up")
249 | self.assertTrue(result is not None and result != [], "no metrics received from prometheus")
250 | except Exception as e:
251 | self.fail(f"custom_query('up') raised an unexpected exception: {e}")
252 |
253 | # custom_query_range should use POST and return something (or raise)
254 | try:
255 | result = pc.custom_query_range("up", start_time=start_time, end_time=end_time, step="15")
256 | self.assertTrue(result is not None and result != [], "no metrics received from prometheus")
257 | except Exception as e:
258 | self.fail(f"custom_query_range('up', ...) raised an unexpected exception: {e}")
259 |
260 | # get_label_names should use POST and return something (or raise)
261 | try:
262 | result = pc.get_label_names()
263 | self.assertTrue(result is not None and result != [], "no metrics received from prometheus")
264 | except Exception as e:
265 | self.fail(f"get_label_names() raised an unexpected exception: {e}")
266 |
267 | # get_current_metric_value should use POST and return something (or raise)
268 | try:
269 | result = pc.get_current_metric_value("up")
270 | self.assertTrue(result is not None and result != [], "no metrics received from prometheus")
271 | except Exception as e:
272 | self.fail(f"get_current_metric_value('up') raised an unexpected exception: {e}")
273 |
274 | # get_metric_range_data should use POST and return something (or raise)
275 | try:
276 | result = pc.get_metric_range_data("up", start_time=start_time, end_time=end_time)
277 | self.assertTrue(result is not None and result != [], "no metrics received from prometheus")
278 | except Exception as e:
279 | self.fail(f"get_metric_range_data('up', ...) raised an unexpected exception: {e}")
280 |
281 |
282 | class TestPrometheusConnectWithMockedNetwork(BaseMockedNetworkTestcase):
283 | """Network is blocked in this testcase, see base class."""
284 |
285 | def setUp(self): # noqa D102
286 | self.pc = PrometheusConnect(url="http://doesnt_matter.xyz", disable_ssl=True)
287 |
288 | def test_network_is_blocked(self): # noqa D102
289 | resp = requests.get("https://google.com")
290 | self.assertEqual(resp.status_code, 403)
291 | self.assertEqual(resp.text, "BOOM!")
292 |
293 | def test_how_mock_prop_works(self): # noqa D102
294 | with self.mock_response("kekekeke", status_code=500) as handler:
295 | self.assertEqual(len(handler.requests), 0)
296 | resp = requests.get("https://redhat.com")
297 | self.assertEqual(resp.status_code, 500)
298 | self.assertEqual(resp.text, "kekekeke")
299 |
300 | self.assertEqual(len(handler.requests), 1)
301 | request = handler.requests[0]
302 | self.assertEqual(request.url, "https://redhat.com/")
303 |
304 | def test_unauthorized(self): # noqa D102
305 | with self.mock_response("Unauthorized", status_code=403):
306 | with self.assertRaises(PrometheusApiClientException) as exc:
307 | self.pc.all_metrics()
308 | self.assertEqual("HTTP Status Code 403 (b'Unauthorized')", str(exc.exception))
309 |
310 | with self.mock_response("Unauthorized", status_code=403):
311 | with self.assertRaises(PrometheusApiClientException) as exc:
312 | self.pc.get_label_values("label_name")
313 | self.assertEqual("HTTP Status Code 403 (b'Unauthorized')", str(exc.exception))
314 |
315 | def test_broken_responses(self): # noqa D102
316 | with self.assertRaises(PrometheusApiClientException) as exc:
317 | self.pc.all_metrics()
318 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
319 |
320 | with self.assertRaises(PrometheusApiClientException) as exc:
321 | self.pc.get_label_values("label_name")
322 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
323 |
324 | with self.assertRaises(PrometheusApiClientException) as exc:
325 | self.pc.get_series(start=datetime.now() - timedelta(hours=1), end=datetime.now())
326 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
327 |
328 | with self.assertRaises(PrometheusApiClientException) as exc:
329 | self.pc.get_current_metric_value("metric")
330 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
331 |
332 | with self.assertRaises(PrometheusApiClientException) as exc:
333 | self.pc.get_metric_range_data("metric")
334 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
335 |
336 | with self.assertRaises(PrometheusApiClientException) as exc:
337 | self.pc.custom_query_range("query", datetime.now(), datetime.now(), "1")
338 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
339 |
340 | with self.assertRaises(PrometheusApiClientException) as exc:
341 | self.pc.custom_query("query")
342 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
343 |
344 | with self.assertRaises(PrometheusApiClientException) as exc:
345 | start_time = datetime.now() - timedelta(minutes=10)
346 | end_time = datetime.now()
347 | self.pc.get_metric_aggregation(
348 | "query", ["sum", "deviation"], start_time, end_time, "15"
349 | )
350 | self.assertEqual("HTTP Status Code 403 (b'BOOM!')", str(exc.exception))
351 |
352 | def test_all_metrics_method(self): # noqa D102
353 | all_metrics_payload = {"status": "success", "data": ["up", "alerts"]}
354 |
355 | with self.mock_response(all_metrics_payload) as handler:
356 | self.assertTrue(len(self.pc.all_metrics()))
357 | self.assertEqual(handler.call_count, 1)
358 | request = handler.requests[0]
359 | self.assertEqual(request.path_url, "/api/v1/label/__name__/values")
360 |
361 |
362 | def test_get_series_method(self): # noqa D102
363 | series_payload = {"status": "success", "data": [
364 | {"__name__": "up", "job": "prometheus", "instance": "localhost:9090"},
365 | {"__name__": "up", "job": "node", "instance": "localhost:9100"}
366 | ]}
367 |
368 | with self.mock_response(series_payload) as handler:
369 | start_time = datetime.now() - timedelta(hours=1)
370 | end_time = datetime.now()
371 | result = self.pc.get_series(start=start_time, end=end_time)
372 | self.assertTrue(len(result) > 0)
373 | self.assertEqual(handler.call_count, 1)
374 | request = handler.requests[0]
375 | self.assertTrue(request.path_url.startswith("/api/v1/series"))
376 | # Verify that start and end parameters are included
377 | self.assertIn("start", request.url)
378 | self.assertIn("end", request.url)
379 |
380 | def test_get_label_names_method(self): # noqa D102
381 | all_metrics_payload = {"status": "success", "data": ["value1", "value2"]}
382 |
383 | with self.mock_response(all_metrics_payload) as handler:
384 | self.assertTrue(len(self.pc.get_label_names()))
385 | self.assertEqual(handler.call_count, 1)
386 | request = handler.requests[0]
387 | self.assertEqual(request.path_url, "/api/v1/labels")
388 |
389 | def test_get_label_values_method(self): # noqa D102
390 | all_metrics_payload = {"status": "success", "data": ["value1", "value2"]}
391 |
392 | with self.mock_response(all_metrics_payload) as handler:
393 | self.assertTrue(len(self.pc.get_label_values("label_name")))
394 | self.assertEqual(handler.call_count, 1)
395 | request = handler.requests[0]
396 | self.assertEqual(request.path_url, "/api/v1/label/label_name/values")
397 |
398 | if __name__ == "__main__":
399 | unittest.main()
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## Release 0.5.0 (2021-12-14T17:53:42)
4 | * update deps and change python version in pipfile to 3.8
5 | * Add check to ensure valid input for timestamps in metric range fnc.
6 | * Update .pre-commit-config.yaml
7 | * Fix docs formatting
8 | * README.md example codes fix (#213)
9 | * Update OWNERS file (#212)
10 | * update pytest prom url since operatefirst url is now behind a proxy (#210)
11 | * :hatching_chick: fix of the naming which is causing dependency update failure
12 | * Update test prometheus url to operate-first prometheus (#194)
13 | * :arrow_up: Bump pillow from 8.0.1 to 8.1.1
14 | * Added slack and google chat link (#189)
15 | * fix documentation formatting (#187)
16 | * Release of version 0.4.2 (#186)
17 | * Add method to check connection to Prometheus (#181)
18 | * feat: replaced exit with ValueError (#182)
19 | * Add MetricRangeDataFrame to RTD. Add sphinx to Pipfile. (#177)
20 | * :pushpin: Automatic update of dependency httmock from 1.3.0 to 1.4.0 (#172)
21 | * :pushpin: Automatic update of dependency numpy from 1.19.2 to 1.19.4 (#171)
22 | * :pushpin: Automatic update of dependency matplotlib from 3.3.2 to 3.3.3 (#170)
23 | * :pushpin: Automatic update of dependency dateparser from 0.7.6 to 1.0.0 (#168)
24 | * :pushpin: Automatic update of dependency requests from 2.24.0 to 2.25.0 (#167)
25 | * Update example notebook (#166)
26 | * Add description of MetricSnapshotDataFrame,MetricRangeDataFrame to README
27 | * :pushpin: Automatic update of dependency numpy from 1.19.1 to 1.19.2 (#162)
28 | * :pushpin: Automatic update of dependency numpy from 1.19.1 to 1.19.2 (#161)
29 | * :pushpin: Automatic update of dependency matplotlib from 3.3.1 to 3.3.2 (#160)
30 | * :pushpin: Automatic update of dependency numpy from 1.19.1 to 1.19.2 (#155)
31 | * :pushpin: Automatic update of dependency matplotlib from 3.3.1 to 3.3.2 (#158)
32 | * :pushpin: Automatic update of dependency pandas from 1.1.1 to 1.1.2 (#154)
33 | * :sparkles: fixes to make pre-commit happy
34 | * :sparkles: now with an OWNERS file, so that Thoth bots can help you even more
35 | * Updating the readme
36 | * Release of version 0.4.1 (#151)
37 | * :pushpin: Automatic update of dependency matplotlib from 3.3.0 to 3.3.1 (#148)
38 | * :pushpin: Automatic update of dependency matplotlib from 3.3.0 to 3.3.1 (#147)
39 | * :pushpin: Automatic update of dependency pandas from 1.1.0 to 1.1.1 (#146)
40 | * Updated the get_metric_aggregations to return global aggregations for both range query and current time query
41 | * :hatching_chick: follow pre-commit compliance for the application
42 | * Make tests pass: ensure ordering of fixtures. (#140)
43 | * :truck: include aicoe-ci configuration file with pytest env vars
44 | * Deduplicate creation of MetricsList.
45 | * add metricrangedf and tests (#137)
46 | * :pushpin: Automatic update of dependency pandas from 1.0.5 to 1.1.0
47 | * Release of version 0.4.0
48 | * Document retry
49 | * :pushpin: Automatic update of dependency numpy from 1.19.0 to 1.19.1
50 | * :pushpin: Automatic update of dependency matplotlib from 3.2.2 to 3.3.0
51 | * Retry the proper way
52 | * Updated the pipfile and the requirement.txt file
53 | * :pushpin: Automatic update of dependency matplotlib from 3.2.1 to 3.2.2
54 | * :pushpin: Automatic update of dependency pandas from 1.0.4 to 1.0.5
55 | * :pushpin: Automatic update of dependency requests from 2.23.0 to 2.24.0
56 | * :pushpin: Automatic update of dependency dateparser from 0.7.5 to 0.7.6
57 | * Update README.md
58 | * Update README.md
59 | * Update README.md
60 | * Release of version 0.3.1
61 | * Update setup.py setup.py should get version info directly from __init__.py
62 | * :pushpin: Automatic update of dependency dateparser from 0.7.4 to 0.7.5
63 | * Release of version 0.3.0
64 | * Update .coafile
65 | * Template for issue creation
66 | * :pushpin: Automatic update of dependency pandas from 1.0.3 to 1.0.4
67 | * added numpy to requirements.txt
68 | * added params argument
69 | * added tests for metric_aggregation
70 | * removed metric_aggregation class
71 | * fix linter errors
72 | * fixed import issues
73 | * added doc strings
74 | * fixed doc string
75 | * code cleaning
76 | * code cleaning and adding adding doc strings
77 | * fixed data processing
78 | * added aggregation class
79 | * :pushpin: Automatic update of dependency matplotlib from 3.2.0 to 3.2.1
80 | * :pushpin: Automatic update of dependency pandas from 1.0.2 to 1.0.3
81 | * Add MetricSnapshotDataFrame module to generate docs config. Update docstring. Addresses #93
82 | * Add Coala Linter
83 | * :pushpin: Automatic update of dependency pandas from 1.0.1 to 1.0.2
84 | * :pushpin: Automatic update of dependency dateparser from 0.7.2 to 0.7.4
85 | * Fixed lint error - missing period in summary line
86 | * Fixed lint errors. Replaced assert for better code quality
87 | * Added some tests for MetricSnapshotDataFrame
88 | * Added initial implementation of MetricSnapshotDataFrame. Addresses #86
89 | * :pushpin: Automatic update of dependency matplotlib from 3.1.3 to 3.2.0
90 | * :pushpin: Automatic update of dependency matplotlib from 3.1.3 to 3.2.0
91 | * Fix Lint Errors
92 | * :pushpin: Automatic update of dependency requests from 2.22.0 to 2.23.0
93 | * Update .thoth.yaml
94 | * Update .thoth.yaml
95 | * :pushpin: Automatic update of dependency pandas from 1.0.0 to 1.0.1
96 | * try to make codacy happy
97 | * Base testcase for network mocking, test for PrometheusConnect. solves #38
98 | * :pushpin: Automatic update of dependency matplotlib from 3.1.2 to 3.1.3
99 | * :pushpin: Automatic update of dependency pandas from 0.25.3 to 1.0.0
100 | * add exception module to docs build
101 | * Upd missed Exception rising, upd docstrings
102 | * Replace Exception to internal exception class
103 | * method to access prometheus query_range HTTP API
104 | * :pushpin: Automatic update of dependency matplotlib from 3.1.1 to 3.1.2
105 | * :pushpin: Automatic update of dependency pandas from 0.25.2 to 0.25.3
106 | * :pushpin: Automatic update of dependency pandas from 0.25.1 to 0.25.2
107 | * Release of version 0.2.1
108 | * Using env var set in zuul config
109 | * Revert "[WIP]Use .zuul.yaml for pytest env vars instead of .env"
110 | * Use .zuul.yaml for pytest env vars instead of .env
111 | * Update Version number to create a new release
112 | * Initial dependency lock
113 | * Add Pipfile for dependency management
114 | * Add .env file for prometheus url to run pytest
115 | * Update README.md
116 | * Add a .coafile And fix coala errors
117 | * py linting and coala fixes
118 | * Added kebechet support
119 | * Delete .stickler.yml
120 | * added Thoth's Zuul and Coala config (#44)
121 | * Remove matplotlib warning
122 | * bump version number to 0.0.2b4 for a new pre-release
123 | * Update example notebook
124 | * Remove dateparser as a dependency and use datetime objects in PrometheusConnect Use datetime objects for metric start_time and end_time. Use timedelta objects for chunk_size. Add tests for class PrometheusConnect Move pretty_print_metric function to utils.py
125 | * Update README.md
126 | * Update .stickler.yml
127 | * Update .stickler.yml
128 | * Create pyproject.toml
129 | * Update .stickler.yml
130 | * Format using black No code changes
131 | * No strings for datetime input for Metric class constructor For `oldest_data_datetime` parameter, the only accepted input types are `datetime.datetime`/`datetime.timedelta` or `NoneType`
132 | * Create .zuul.yaml
133 | * Remove duplicate stored metrics from repo root
134 | * dateparser unexpected behaviour fix, now use the timestamp to convert numpy.datetime64 to datetime.datetime (#23)
135 | * Update MetricsList constructor
136 | * Add unit tests for class `Metric` and `MetricsList`
137 | * Update metric.py
138 | * Add properties `start_time` and `end_time` (datetime objects) to the `Metric` class
139 | * Added optional argument for GET params to all query functions + style fixes
140 | * minor: style fixes
141 | * Added option to specify GET params in custom_query()
142 | * init a Metric object from an existing Metric object
143 | * Update version number for v0.0.2b1 release
144 | * Update Sphinx doc V0.0.2 (#15)
145 | * Update documentation (#14)
146 | * Add example notebook for Metric and MetricsList classes
147 | * Add a Metric Class to make metric data processing easier. Also create a MetricsList class which directly takes the metric data received from prometheus and makes processing it easier
148 | * Update .stickler.yml
149 | * Update .stickler.yml
150 | * Update README.md
151 | * Adding .stickler.yml
152 | * Add Sphinx Documentation configuration
153 | * Change Package name to `prometheus-api-client`
154 | * Update setup.py. Update __init__.py
155 | * Add codacy code quality badge to the README.md
156 | * Add a method in class PrometheusConnect for making custom queries to Prometheus. Fix some documentation
157 | * Add documentation for the class and its methods.
158 | * Add example usage in app.py
159 | * Add function to store metrics locally Add function to print metric data Add requirements.txt
160 | * Fix request query for `get_current_metric_value`
161 | * Add basic methods to request data from prometheus
162 |
163 | ## Release 0.2.1 (2019-10-18T12:38:12)
164 |
165 | - Using env var set in zuul config
166 | - Revert "[WIP]Use .zuul.yaml for pytest env vars instead of .env"
167 | - Use .zuul.yaml for pytest env vars instead of .env
168 | - Update Version number to create a new release
169 | - Initial dependency lock
170 | - Add Pipfile for dependency management
171 | - Add .env file for prometheus url to run pytest
172 | - Update README.md
173 | - Add a .coafile And fix coala errors
174 | - py linting and coala fixes
175 | - Added kebechet support
176 | - Delete .stickler.yml
177 | - added Thoth's Zuul and Coala config (#44)
178 | - Remove matplotlib warning
179 | - bump version number to 0.0.2b4 for a new pre-release
180 | - Update example notebook
181 | - Remove dateparser as a dependency and use datetime objects in PrometheusConnect Use datetime objects for metric start_time and end_time. Use timedelta objects for chunk_size. Add tests for class PrometheusConnect Move pretty_print_metric function to utils.py
182 | - Update README.md
183 | - Update .stickler.yml
184 | - Update .stickler.yml
185 | - Create pyproject.toml
186 | - Update .stickler.yml
187 | - Format using black No code changes
188 | - No strings for datetime input for Metric class constructor For `oldest_data_datetime` parameter, the only accepted input types are `datetime.datetime`/`datetime.timedelta` or `NoneType`
189 | - Create .zuul.yaml
190 | - Remove duplicate stored metrics from repo root
191 | - dateparser unexpected behaviour fix, now use the timestamp to convert numpy.datetime64 to datetime.datetime (#23)
192 | - Update MetricsList constructor
193 | - Add unit tests for class `Metric` and `MetricsList`
194 | - Update metric.py
195 | - Add properties `start_time` and `end_time` (datetime objects) to the `Metric` class
196 | - Added optional argument for GET params to all query functions + style fixes
197 | - minor: style fixes
198 | - Added option to specify GET params in custom_query()
199 | - init a Metric object from an existing Metric object
200 | - Update version number for v0.0.2b1 release
201 | - Update Sphinx doc V0.0.2 (#15)
202 | - Update documentation (#14)
203 | - Add example notebook for Metric and MetricsList classes
204 | - Add a Metric Class to make metric data processing easier. Also create a MetricsList class which directly takes the metric data received from prometheus and makes processing it easier
205 | - Update .stickler.yml
206 | - Update .stickler.yml
207 | - Update README.md
208 | - Adding .stickler.yml
209 | - Add Sphinx Documentation configuration
210 | - Change Package name to `prometheus-api-client`
211 | - Update setup.py. Update `__init__.py`
212 | - Add codacy code quality badge to the README.md
213 | - Add a method in class PrometheusConnect for making custom queries to Prometheus. Fix some documentation
214 | - Add documentation for the class and its methods.
215 | - Add example usage in app.py
216 | - Add function to store metrics locally Add function to print metric data Add requirements.txt
217 | - Fix request query for `get_current_metric_value`
218 | - Add basic methods to request data from prometheus
219 |
220 | ## Release 0.3.0 (2020-06-11T15:21:52)
221 | * Template for issue creation
222 | * :pushpin: Automatic update of dependency pandas from 1.0.3 to 1.0.4
223 | * added numpy to requirements.txt
224 | * added params argument
225 | * added tests for metric_aggregation
226 | * removed metric_aggregation class
227 | * fix linter errors
228 | * fixed import issues
229 | * added doc strings
230 | * fixed doc string
231 | * code cleaning
232 | * code cleaning and adding adding doc strings
233 | * fixed data processing
234 | * added aggregation class
235 | * :pushpin: Automatic update of dependency matplotlib from 3.2.0 to 3.2.1
236 | * :pushpin: Automatic update of dependency pandas from 1.0.2 to 1.0.3
237 | * Add MetricSnapshotDataFrame module to generate docs config. Update docstring. Addresses #93
238 | * Add Coala Linter
239 | * :pushpin: Automatic update of dependency pandas from 1.0.1 to 1.0.2
240 | * :pushpin: Automatic update of dependency dateparser from 0.7.2 to 0.7.4
241 | * Fixed lint error - missing period in summary line
242 | * Fixed lint errors. Replaced assert for better code quality
243 | * Added some tests for MetricSnapshotDataFrame
244 | * Added initial implementation of MetricSnapshotDataFrame. Addresses #86
245 | * :pushpin: Automatic update of dependency matplotlib from 3.1.3 to 3.2.0
246 | * :pushpin: Automatic update of dependency matplotlib from 3.1.3 to 3.2.0
247 | * Fix Lint Errors
248 | * :pushpin: Automatic update of dependency requests from 2.22.0 to 2.23.0
249 | * Update .thoth.yaml
250 | * Update .thoth.yaml
251 | * :pushpin: Automatic update of dependency pandas from 1.0.0 to 1.0.1
252 | * try to make codacy happy
253 | * Base testcase for network mocking, test for PrometheusConnect. solves #38
254 | * :pushpin: Automatic update of dependency matplotlib from 3.1.2 to 3.1.3
255 | * :pushpin: Automatic update of dependency pandas from 0.25.3 to 1.0.0
256 | * add exception module to docs build
257 | * Upd missed Exception rising, upd docstrings
258 | * Replace Exception to internal exception class
259 | * method to access prometheus query_range HTTP API
260 | * :pushpin: Automatic update of dependency matplotlib from 3.1.1 to 3.1.2
261 | * :pushpin: Automatic update of dependency pandas from 0.25.2 to 0.25.3
262 | * :pushpin: Automatic update of dependency pandas from 0.25.1 to 0.25.2
263 |
264 | ## Release 0.3.1 (2020-06-11T16:13:10)
265 | * Update setup.py setup.py should get version info directly from __init__.py
266 | * :pushpin: Automatic update of dependency dateparser from 0.7.4 to 0.7.5
267 | * Release of version 0.3.0
268 | * Update .coafile
269 | * Template for issue creation
270 | * :pushpin: Automatic update of dependency pandas from 1.0.3 to 1.0.4
271 | * added numpy to requirements.txt
272 | * added params argument
273 | * added tests for metric_aggregation
274 | * removed metric_aggregation class
275 | * fix linter errors
276 | * fixed import issues
277 | * added doc strings
278 | * fixed doc string
279 | * code cleaning
280 | * code cleaning and adding adding doc strings
281 | * fixed data processing
282 | * added aggregation class
283 | * :pushpin: Automatic update of dependency matplotlib from 3.2.0 to 3.2.1
284 | * :pushpin: Automatic update of dependency pandas from 1.0.2 to 1.0.3
285 | * Add MetricSnapshotDataFrame module to generate docs config. Update docstring. Addresses #93
286 | * Add Coala Linter
287 | * :pushpin: Automatic update of dependency pandas from 1.0.1 to 1.0.2
288 | * :pushpin: Automatic update of dependency dateparser from 0.7.2 to 0.7.4
289 | * Fixed lint error - missing period in summary line
290 | * Fixed lint errors. Replaced assert for better code quality
291 | * Added some tests for MetricSnapshotDataFrame
292 | * Added initial implementation of MetricSnapshotDataFrame. Addresses #86
293 | * :pushpin: Automatic update of dependency matplotlib from 3.1.3 to 3.2.0
294 | * :pushpin: Automatic update of dependency matplotlib from 3.1.3 to 3.2.0
295 | * Fix Lint Errors
296 | * :pushpin: Automatic update of dependency requests from 2.22.0 to 2.23.0
297 | * Update .thoth.yaml
298 | * Update .thoth.yaml
299 | * :pushpin: Automatic update of dependency pandas from 1.0.0 to 1.0.1
300 | * try to make codacy happy
301 | * Base testcase for network mocking, test for PrometheusConnect. solves #38
302 | * :pushpin: Automatic update of dependency matplotlib from 3.1.2 to 3.1.3
303 | * :pushpin: Automatic update of dependency pandas from 0.25.3 to 1.0.0
304 | * add exception module to docs build
305 | * Upd missed Exception rising, upd docstrings
306 | * Replace Exception to internal exception class
307 | * method to access prometheus query_range HTTP API
308 | * :pushpin: Automatic update of dependency matplotlib from 3.1.1 to 3.1.2
309 | * :pushpin: Automatic update of dependency pandas from 0.25.2 to 0.25.3
310 | * :pushpin: Automatic update of dependency pandas from 0.25.1 to 0.25.2
311 | * Release of version 0.2.1
312 | * Using env var set in zuul config
313 | * Revert "[WIP]Use .zuul.yaml for pytest env vars instead of .env"
314 | * Use .zuul.yaml for pytest env vars instead of .env
315 | * Update Version number to create a new release
316 | * Initial dependency lock
317 | * Add Pipfile for dependency management
318 | * Add .env file for prometheus url to run pytest
319 | * Update README.md
320 | * Add a .coafile And fix coala errors
321 | * py linting and coala fixes
322 | * Added kebechet support
323 | * Delete .stickler.yml
324 | * added Thoth's Zuul and Coala config (#44)
325 | * Remove matplotlib warning
326 | * bump version number to 0.0.2b4 for a new pre-release
327 | * Update example notebook
328 | * Remove dateparser as a dependency and use datetime objects in PrometheusConnect Use datetime objects for metric start_time and end_time. Use timedelta objects for chunk_size. Add tests for class PrometheusConnect Move pretty_print_metric function to utils.py
329 | * Update README.md
330 | * Update .stickler.yml
331 | * Update .stickler.yml
332 | * Create pyproject.toml
333 | * Update .stickler.yml
334 | * Format using black No code changes
335 | * No strings for datetime input for Metric class constructor For `oldest_data_datetime` parameter, the only accepted input types are `datetime.datetime`/`datetime.timedelta` or `NoneType`
336 | * Create .zuul.yaml
337 | * Remove duplicate stored metrics from repo root
338 | * dateparser unexpected behaviour fix, now use the timestamp to convert numpy.datetime64 to datetime.datetime (#23)
339 | * Update MetricsList constructor
340 | * Add unit tests for class `Metric` and `MetricsList`
341 | * Update metric.py
342 | * Add properties `start_time` and `end_time` (datetime objects) to the `Metric` class
343 | * Added optional argument for GET params to all query functions + style fixes
344 | * minor: style fixes
345 | * Added option to specify GET params in custom_query()
346 | * init a Metric object from an existing Metric object
347 | * Update version number for v0.0.2b1 release
348 | * Update Sphinx doc V0.0.2 (#15)
349 | * Update documentation (#14)
350 | * Add example notebook for Metric and MetricsList classes
351 | * Add a Metric Class to make metric data processing easier. Also create a MetricsList class which directly takes the metric data received from prometheus and makes processing it easier
352 | * Update .stickler.yml
353 | * Update .stickler.yml
354 | * Update README.md
355 | * Adding .stickler.yml
356 | * Add Sphinx Documentation configuration
357 | * Change Package name to `prometheus-api-client`
358 | * Update setup.py. Update __init__.py
359 | * Add codacy code quality badge to the README.md
360 | * Add a method in class PrometheusConnect for making custom queries to Prometheus. Fix some documentation
361 | * Add documentation for the class and its methods.
362 | * Add example usage in app.py
363 | * Add function to store metrics locally Add function to print metric data Add requirements.txt
364 | * Fix request query for `get_current_metric_value`
365 | * Add basic methods to request data from prometheus
366 |
367 | ## Release 0.4.0 (2020-07-28T11:21:26)
368 | * Document retry
369 | * :pushpin: Automatic update of dependency numpy from 1.19.0 to 1.19.1
370 | * :pushpin: Automatic update of dependency matplotlib from 3.2.2 to 3.3.0
371 | * Retry the proper way
372 | * Updated the pipfile and the requirement.txt file
373 | * :pushpin: Automatic update of dependency matplotlib from 3.2.1 to 3.2.2
374 | * :pushpin: Automatic update of dependency pandas from 1.0.4 to 1.0.5
375 | * :pushpin: Automatic update of dependency requests from 2.23.0 to 2.24.0
376 | * :pushpin: Automatic update of dependency dateparser from 0.7.5 to 0.7.6
377 | * Update README.md
378 | * Update README.md
379 | * Update README.md
380 |
381 | ## Release 0.4.1 (2020-09-02T12:18:03)
382 | ### Features
383 | * Updated the get_metric_aggregations to return global aggregations for both range query and current time query
384 | * :hatching_chick: follow pre-commit compliance for the application
385 | * :truck: include aicoe-ci configuration file with pytest env vars
386 | * add metricrangedf and tests (#137)
387 | ### Improvements
388 | * Make tests pass: ensure ordering of fixtures. (#140)
389 | * Deduplicate creation of MetricsList.
390 | ### Automatic Updates
391 | * :pushpin: Automatic update of dependency matplotlib from 3.3.0 to 3.3.1 (#148)
392 | * :pushpin: Automatic update of dependency matplotlib from 3.3.0 to 3.3.1 (#147)
393 | * :pushpin: Automatic update of dependency pandas from 1.1.0 to 1.1.1 (#146)
394 | * :pushpin: Automatic update of dependency pandas from 1.0.5 to 1.1.0
395 |
396 | ## Release 0.4.2 (2020-12-03T16:47:55)
397 | ### Features
398 | * Add method to check connection to Prometheus (#181)
399 | * feat: replaced exit with ValueError (#182)
400 | * Add MetricRangeDataFrame to RTD. Add sphinx to Pipfile. (#177)
401 | * Update example notebook (#166)
402 | * Add description of MetricSnapshotDataFrame,MetricRangeDataFrame to README
403 | * :sparkles: now with an OWNERS file, so that Thoth bots can help you even more
404 | * Updating the readme
405 | ### Bug Fixes
406 | * :sparkles: fixes to make pre-commit happy
407 | ### Automatic Updates
408 | * :pushpin: Automatic update of dependency httmock from 1.3.0 to 1.4.0 (#172)
409 | * :pushpin: Automatic update of dependency numpy from 1.19.2 to 1.19.4 (#171)
410 | * :pushpin: Automatic update of dependency matplotlib from 3.3.2 to 3.3.3 (#170)
411 | * :pushpin: Automatic update of dependency dateparser from 0.7.6 to 1.0.0 (#168)
412 | * :pushpin: Automatic update of dependency requests from 2.24.0 to 2.25.0 (#167)
413 | * :pushpin: Automatic update of dependency numpy from 1.19.1 to 1.19.2 (#162)
414 | * :pushpin: Automatic update of dependency numpy from 1.19.1 to 1.19.2 (#161)
415 | * :pushpin: Automatic update of dependency matplotlib from 3.3.1 to 3.3.2 (#160)
416 | * :pushpin: Automatic update of dependency numpy from 1.19.1 to 1.19.2 (#155)
417 | * :pushpin: Automatic update of dependency matplotlib from 3.3.1 to 3.3.2 (#158)
418 | * :pushpin: Automatic update of dependency pandas from 1.1.1 to 1.1.2 (#154)
419 |
--------------------------------------------------------------------------------
/prometheus_api_client/prometheus_connect.py:
--------------------------------------------------------------------------------
1 | """A Class for collection of metrics from a Prometheus Host."""
2 | from urllib.parse import urlparse
3 | import bz2
4 | import os
5 | import json
6 | import logging
7 | from datetime import datetime, timedelta
8 | import requests
9 | from requests.adapters import HTTPAdapter
10 | from requests.packages.urllib3.util.retry import Retry
11 | from requests import Session
12 |
13 | from .exceptions import PrometheusApiClientException
14 |
15 | # set up logging
16 |
17 | _LOGGER = logging.getLogger(__name__)
18 |
19 | # In case of a connection failure try 2 more times
20 | MAX_REQUEST_RETRIES = 3
21 | # wait 1 second before retrying in case of an error
22 | RETRY_BACKOFF_FACTOR = 1
23 | # retry only on these status
24 | RETRY_ON_STATUS = [408, 429, 500, 502, 503, 504]
25 |
26 |
27 | class PrometheusConnect:
28 | """
29 | A Class for collection of metrics from a Prometheus Host.
30 |
31 | :param url: (str) url for the prometheus host
32 | :param headers: (dict) A dictionary of http headers to be used to communicate with
33 | the host. Example: {"Authorization": "bearer my_oauth_token_to_the_host"}
34 | :param disable_ssl: (bool) If set to True, will disable ssl certificate verification
35 | for the http requests made to the prometheus host
36 | :param retry: (Retry) Retry adapter to retry on HTTP errors
37 | :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. See python
38 | requests library auth parameter for further explanation.
39 | :param proxy: (Optional) Proxies dictionary to enable connection through proxy.
40 | Example: {"http_proxy": "", "https_proxy": ""}
41 | :param session (Optional) Custom requests.Session to enable complex HTTP configuration
42 | :param timeout: (Optional) A timeout (in seconds) applied to all requests
43 | :param method: (Optional) (str) HTTP Method (GET or POST) to use for Query APIs that allow POST
44 | (/query, /query_range and /labels). Use POST for large and complex queries. Default is GET.
45 | """
46 |
47 | def __init__(
48 | self,
49 | url: str = "http://127.0.0.1:9090",
50 | headers: dict = None,
51 | disable_ssl: bool = False,
52 | retry: Retry = None,
53 | auth: tuple = None,
54 | proxy: dict = None,
55 | session: Session = None,
56 | timeout: int = None,
57 | method: str = "GET"
58 | ):
59 | """Functions as a Constructor for the class PrometheusConnect."""
60 | if url is None:
61 | raise TypeError("missing url")
62 |
63 | self.headers = headers
64 | self.url = url
65 | self.prometheus_host = urlparse(self.url).netloc
66 | self._all_metrics = None
67 | self._timeout = timeout
68 |
69 | if not isinstance(method, str):
70 | raise TypeError("Method must be a string")
71 |
72 | method = method.upper()
73 | if method not in {"GET", "POST"}:
74 | raise ValueError("Method can only be GET or POST")
75 |
76 | self._method = method
77 |
78 | if retry is None:
79 | retry = Retry(
80 | total=MAX_REQUEST_RETRIES,
81 | backoff_factor=RETRY_BACKOFF_FACTOR,
82 | status_forcelist=RETRY_ON_STATUS,
83 | )
84 |
85 | self.auth = auth
86 |
87 | if session is not None:
88 | self._session = session
89 | else:
90 | self._session = requests.Session()
91 | self._session.verify = not disable_ssl
92 |
93 | if proxy is not None:
94 | self._session.proxies = proxy
95 | self._session.mount(self.url, HTTPAdapter(max_retries=retry))
96 |
97 | def check_prometheus_connection(self, params: dict = None) -> bool:
98 | """
99 | Check Promethus connection.
100 |
101 | :param params: (dict) Optional dictionary containing parameters to be
102 | sent along with the API request.
103 | :returns: (bool) True if the endpoint can be reached, False if cannot be reached.
104 | """
105 | response = self._session.request(
106 | method="GET",
107 | url="{0}/".format(self.url),
108 | verify=self._session.verify,
109 | headers=self.headers,
110 | params=params,
111 | auth=self.auth,
112 | cert=self._session.cert,
113 | timeout=self._timeout,
114 | )
115 | return response.ok
116 |
117 | def all_metrics(self, params: dict = None):
118 | """
119 | Get the list of all the metrics that the prometheus host scrapes.
120 |
121 | :param params: (dict) Optional dictionary containing GET parameters to be
122 | sent along with the API request, such as "time"
123 | :returns: (list) A list of names of all the metrics available from the
124 | specified prometheus host
125 | :raises:
126 | (RequestException) Raises an exception in case of a connection error
127 | (PrometheusApiClientException) Raises in case of non 200 response status code
128 | """
129 | self._all_metrics = self.get_label_values(label_name="__name__", params=params)
130 | return self._all_metrics
131 |
132 |
133 | def get_series(self, start: datetime, end: datetime, params: dict = None):
134 | """
135 | Get a list series happening between start and end times.
136 |
137 | :param start: (int) Start time unix ts
138 | :param end: (int) End time unix ts
139 | :param params: (dict) Optional dictionary containing GET parameters to be
140 | sent along with the API request, such as "start", "end" or "match[]".
141 | :returns: (list) A list of labels from the specified prometheus host
142 | :raises:
143 | (RequestException) Raises an exception in case of a connection error
144 | (PrometheusApiClientException) Raises in case of non 200 response status code
145 | """
146 | params = params or {}
147 | params["start"] = start.timestamp()
148 | params["end"] = end.timestamp()
149 | response = self._session.get(
150 | "{0}/api/v1/series".format(self.url),
151 | verify=self._session.verify,
152 | headers=self.headers,
153 | params=params,
154 | auth=self.auth,
155 | cert=self._session.cert,
156 | timeout=self._timeout,
157 | )
158 |
159 | if response.status_code == 200:
160 | labels = response.json()["data"]
161 | else:
162 | raise PrometheusApiClientException(
163 | "HTTP Status Code {} ({!r})".format(response.status_code, response.content)
164 | )
165 | return labels
166 |
167 |
168 | def get_label_names(self, params: dict = None):
169 | """
170 | Get a list of all labels.
171 |
172 | :param params: (dict) Optional dictionary containing GET parameters to be
173 | sent along with the API request, such as "start", "end" or "match[]".
174 | :returns: (list) A list of labels from the specified prometheus host
175 | :raises:
176 | (RequestException) Raises an exception in case of a connection error
177 | (PrometheusApiClientException) Raises in case of non 200 response status code
178 | """
179 | params = params or {}
180 | response = self._session.request(
181 | method=self._method,
182 | url="{0}/api/v1/labels".format(self.url),
183 | verify=self._session.verify,
184 | headers=self.headers,
185 | params=params,
186 | auth=self.auth,
187 | cert=self._session.cert,
188 | timeout=self._timeout,
189 | )
190 |
191 | if response.status_code == 200:
192 | labels = response.json()["data"]
193 | else:
194 | raise PrometheusApiClientException(
195 | "HTTP Status Code {} ({!r})".format(response.status_code, response.content)
196 | )
197 | return labels
198 |
199 | def get_label_values(self, label_name: str, params: dict = None):
200 | """
201 | Get a list of all values for the label.
202 |
203 | :param label_name: (str) The name of the label for which you want to get all the values.
204 | :param params: (dict) Optional dictionary containing GET parameters to be
205 | sent along with the API request, such as "time"
206 | :returns: (list) A list of names for the label from the specified prometheus host
207 | :raises:
208 | (RequestException) Raises an exception in case of a connection error
209 | (PrometheusApiClientException) Raises in case of non 200 response status code
210 | """
211 | params = params or {}
212 | response = self._session.request(
213 | method="GET",
214 | url="{0}/api/v1/label/{1}/values".format(self.url, label_name),
215 | verify=self._session.verify,
216 | headers=self.headers,
217 | params=params,
218 | auth=self.auth,
219 | cert=self._session.cert,
220 | timeout=self._timeout,
221 | )
222 |
223 | if response.status_code == 200:
224 | labels = response.json()["data"]
225 | else:
226 | raise PrometheusApiClientException(
227 | "HTTP Status Code {} ({!r})".format(response.status_code, response.content)
228 | )
229 | return labels
230 |
231 | def get_current_metric_value(
232 | self, metric_name: str, label_config: dict = None, params: dict = None
233 | ):
234 | r"""
235 | Get the current metric value for the specified metric and label configuration.
236 |
237 | :param metric_name: (str) The name of the metric
238 | :param label_config: (dict) A dictionary that specifies metric labels and their
239 | values
240 | :param params: (dict) Optional dictionary containing GET parameters to be sent
241 | along with the API request, such as "time"
242 | :returns: (list) A list of current metric values for the specified metric
243 | :raises:
244 | (RequestException) Raises an exception in case of a connection error
245 | (PrometheusApiClientException) Raises in case of non 200 response status code
246 |
247 | Example Usage:
248 | .. code-block:: python
249 |
250 | prom = PrometheusConnect()
251 |
252 | my_label_config = {'cluster': 'my_cluster_id', 'label_2': 'label_2_value'}
253 |
254 | prom.get_current_metric_value(metric_name='up', label_config=my_label_config)
255 | """
256 | params = params or {}
257 | data = []
258 | if label_config:
259 | label_list = [str(key + "=" + "'" + label_config[key] + "'") for key in label_config]
260 | query = metric_name + "{" + ",".join(label_list) + "}"
261 | else:
262 | query = metric_name
263 |
264 | # using the query API to get raw data
265 | response = self._session.request(
266 | method=self._method,
267 | url="{0}/api/v1/query".format(self.url),
268 | params={**{"query": query}, **params},
269 | verify=self._session.verify,
270 | headers=self.headers,
271 | auth=self.auth,
272 | cert=self._session.cert,
273 | timeout=self._timeout,
274 | )
275 |
276 | if response.status_code == 200:
277 | data += response.json()["data"]["result"]
278 | else:
279 | raise PrometheusApiClientException(
280 | "HTTP Status Code {} ({!r})".format(response.status_code, response.content)
281 | )
282 | return data
283 |
284 | def get_metric_range_data(
285 | self,
286 | metric_name: str,
287 | label_config: dict = None,
288 | start_time: datetime = (datetime.now() - timedelta(minutes=10)),
289 | end_time: datetime = datetime.now(),
290 | chunk_size: timedelta = None,
291 | store_locally: bool = False,
292 | params: dict = None,
293 | ):
294 | r"""
295 | Get the current metric value for the specified metric and label configuration.
296 |
297 | :param metric_name: (str) The name of the metric.
298 | :param label_config: (dict) A dictionary specifying metric labels and their
299 | values.
300 | :param start_time: (datetime) A datetime object that specifies the metric range start time.
301 | :param end_time: (datetime) A datetime object that specifies the metric range end time.
302 | :param chunk_size: (timedelta) Duration of metric data downloaded in one request. For
303 | example, setting it to timedelta(hours=3) will download 3 hours worth of data in each
304 | request made to the prometheus host
305 | :param store_locally: (bool) If set to True, will store data locally at,
306 | `"./metrics/hostname/metric_date/name_time.json.bz2"`
307 | :param params: (dict) Optional dictionary containing GET parameters to be
308 | sent along with the API request, such as "time"
309 | :return: (list) A list of metric data for the specified metric in the given time
310 | range
311 | :raises:
312 | (RequestException) Raises an exception in case of a connection error
313 | (PrometheusApiClientException) Raises in case of non 200 response status code
314 |
315 | """
316 | params = params or {}
317 | data = []
318 |
319 | _LOGGER.debug("start_time: %s", start_time)
320 | _LOGGER.debug("end_time: %s", end_time)
321 | _LOGGER.debug("chunk_size: %s", chunk_size)
322 |
323 | if not (isinstance(start_time, datetime) and isinstance(end_time, datetime)):
324 | raise TypeError("start_time and end_time can only be of type datetime.datetime")
325 |
326 | if not chunk_size:
327 | chunk_size = end_time - start_time
328 | if not isinstance(chunk_size, timedelta):
329 | raise TypeError("chunk_size can only be of type datetime.timedelta")
330 |
331 | start = round(start_time.timestamp())
332 | end = round(end_time.timestamp())
333 |
334 | if end_time < start_time:
335 | raise ValueError("end_time must not be before start_time")
336 |
337 | if (end_time - start_time).total_seconds() < chunk_size.total_seconds():
338 | raise ValueError("specified chunk_size is too big")
339 | chunk_seconds = round(chunk_size.total_seconds())
340 |
341 | if label_config:
342 | label_list = [str(key + "=" + "'" + label_config[key] + "'") for key in label_config]
343 | query = metric_name + "{" + ",".join(label_list) + "}"
344 | else:
345 | query = metric_name
346 | _LOGGER.debug("Prometheus Query: %s", query)
347 |
348 | while start < end:
349 | if start + chunk_seconds > end:
350 | chunk_seconds = end - start
351 |
352 | # using the query API to get raw data
353 | response = self._session.request(
354 | method=self._method,
355 | url="{0}/api/v1/query".format(self.url),
356 | params={
357 | **{
358 | "query": query + "[" + str(chunk_seconds) + "s" + "]",
359 | "time": start + chunk_seconds,
360 | },
361 | **params,
362 | },
363 | verify=self._session.verify,
364 | headers=self.headers,
365 | auth=self.auth,
366 | cert=self._session.cert,
367 | timeout=self._timeout,
368 | )
369 | if response.status_code == 200:
370 | data += response.json()["data"]["result"]
371 | else:
372 | raise PrometheusApiClientException(
373 | "HTTP Status Code {} ({!r})".format(response.status_code, response.content)
374 | )
375 | if store_locally:
376 | # store it locally
377 | self._store_metric_values_local(
378 | metric_name,
379 | json.dumps(response.json()["data"]["result"]),
380 | start + chunk_seconds,
381 | )
382 |
383 | start += chunk_seconds
384 | return data
385 |
386 | def _store_metric_values_local(self, metric_name, values, end_timestamp, compressed=False):
387 | r"""
388 | Store metrics on the local filesystem, optionally with bz2 compression.
389 |
390 | :param metric_name: (str) the name of the metric being saved
391 | :param values: (str) metric data in JSON string format
392 | :param end_timestamp: (int) timestamp in any format understood by \
393 | datetime.datetime.fromtimestamp()
394 | :param compressed: (bool) whether or not to apply bz2 compression
395 | :returns: (str) path to the saved metric file
396 | """
397 | if not values:
398 | _LOGGER.debug("No values for %s", metric_name)
399 | return None
400 |
401 | file_path = self._metric_filename(metric_name, end_timestamp)
402 |
403 | if compressed:
404 | payload = bz2.compress(str(values).encode("utf-8"))
405 | file_path = file_path + ".bz2"
406 | else:
407 | payload = str(values).encode("utf-8")
408 |
409 | os.makedirs(os.path.dirname(file_path), exist_ok=True)
410 | with open(file_path, "wb") as file:
411 | file.write(payload)
412 |
413 | return file_path
414 |
415 | def _metric_filename(self, metric_name: str, end_timestamp: int):
416 | r"""
417 | Add a timestamp to the filename before it is stored.
418 |
419 | :param metric_name: (str) the name of the metric being saved
420 | :param end_timestamp: (int) timestamp in any format understood by \
421 | datetime.datetime.fromtimestamp()
422 | :returns: (str) the generated path
423 | """
424 | end_time_stamp = datetime.fromtimestamp(end_timestamp)
425 | directory_name = end_time_stamp.strftime("%Y%m%d")
426 | timestamp = end_time_stamp.strftime("%Y%m%d%H%M")
427 | object_path = (
428 | "./metrics/"
429 | + self.prometheus_host
430 | + "/"
431 | + metric_name
432 | + "/"
433 | + directory_name
434 | + "/"
435 | + timestamp
436 | + ".json"
437 | )
438 | return object_path
439 |
440 | def custom_query(self, query: str, params: dict = None, timeout: int = None):
441 | """
442 | Send a custom query to a Prometheus Host.
443 |
444 | This method takes as input a string which will be sent as a query to
445 | the specified Prometheus Host. This query is a PromQL query.
446 |
447 | :param query: (str) This is a PromQL query, a few examples can be found
448 | at https://prometheus.io/docs/prometheus/latest/querying/examples/
449 | :param params: (dict) Optional dictionary containing GET parameters to be
450 | sent along with the API request, such as "time"
451 | :param timeout: (Optional) A timeout (in seconds) applied to the request
452 | :returns: (list) A list of metric data received in response of the query sent
453 | :raises:
454 | (RequestException) Raises an exception in case of a connection error
455 | (PrometheusApiClientException) Raises in case of non 200 response status code
456 | """
457 | params = params or {}
458 | data = None
459 | query = str(query)
460 | timeout = self._timeout if timeout is None else timeout
461 | # using the query API to get raw data
462 | response = self._session.request(
463 | method=self._method,
464 | url="{0}/api/v1/query".format(self.url),
465 | params={**{"query": query}, **params},
466 | verify=self._session.verify,
467 | headers=self.headers,
468 | auth=self.auth,
469 | cert=self._session.cert,
470 | timeout=timeout,
471 | )
472 | if response.status_code == 200:
473 | data = response.json()["data"]["result"]
474 | else:
475 | raise PrometheusApiClientException(
476 | "HTTP Status Code {} ({!r})".format(response.status_code, response.content)
477 | )
478 |
479 | return data
480 |
481 | def custom_query_range(
482 | self, query: str, start_time: datetime, end_time: datetime, step: str, params: dict = None, timeout: int = None
483 | ):
484 | """
485 | Send a query_range to a Prometheus Host.
486 |
487 | This method takes as input a string which will be sent as a query to
488 | the specified Prometheus Host. This query is a PromQL query.
489 |
490 | :param query: (str) This is a PromQL query, a few examples can be found
491 | at https://prometheus.io/docs/prometheus/latest/querying/examples/
492 | :param start_time: (datetime) A datetime object that specifies the query range start time.
493 | :param end_time: (datetime) A datetime object that specifies the query range end time.
494 | :param step: (str) Query resolution step width in duration format or float number of seconds
495 | :param params: (dict) Optional dictionary containing GET parameters to be
496 | sent along with the API request, such as "timeout"
497 | :param timeout: (Optional) A timeout (in seconds) applied to the request
498 | :returns: (dict) A dict of metric data received in response of the query sent
499 | :raises:
500 | (RequestException) Raises an exception in case of a connection error
501 | (PrometheusApiClientException) Raises in case of non 200 response status code
502 | """
503 | start = round(start_time.timestamp())
504 | end = round(end_time.timestamp())
505 | params = params or {}
506 | data = None
507 | query = str(query)
508 | timeout = self._timeout if timeout is None else timeout
509 | # using the query_range API to get raw data
510 | response = self._session.request(
511 | method=self._method,
512 | url="{0}/api/v1/query_range".format(self.url),
513 | params={**{"query": query, "start": start, "end": end, "step": step}, **params},
514 | verify=self._session.verify,
515 | headers=self.headers,
516 | auth=self.auth,
517 | cert=self._session.cert,
518 | timeout=timeout,
519 | )
520 | if response.status_code == 200:
521 | data = response.json()["data"]["result"]
522 | else:
523 | raise PrometheusApiClientException(
524 | "HTTP Status Code {} ({!r})".format(response.status_code, response.content)
525 | )
526 | return data
527 |
528 | def get_metric_aggregation(
529 | self,
530 | query: str,
531 | operations: list,
532 | start_time: datetime = None,
533 | end_time: datetime = None,
534 | step: str = "15",
535 | params: dict = None,
536 | ):
537 | """
538 | Get aggregations on metric values received from PromQL query.
539 |
540 | This method takes as input a string which will be sent as a query to
541 | the specified Prometheus Host. This query is a PromQL query. And, a
542 | list of operations to perform such as- sum, max, min, deviation, etc.
543 | with start_time, end_time and step.
544 |
545 | The received query is passed to the custom_query_range method which returns
546 | the result of the query and the values are extracted from the result.
547 |
548 | :param query: (str) This is a PromQL query, a few examples can be found
549 | at https://prometheus.io/docs/prometheus/latest/querying/examples/
550 | :param operations: (list) A list of operations to perform on the values.
551 | Operations are specified in string type.
552 | :param start_time: (datetime) A datetime object that specifies the query range start time.
553 | :param end_time: (datetime) A datetime object that specifies the query range end time.
554 | :param step: (str) Query resolution step width in duration format or float number of seconds
555 | :param params: (dict) Optional dictionary containing GET parameters to be
556 | sent along with the API request, such as "timeout"
557 | Available operations - sum, max, min, variance, nth percentile, deviation
558 | and average.
559 |
560 | :returns: (dict) A dict of aggregated values received in response to the operations
561 | performed on the values for the query sent.
562 |
563 | Example output:
564 | .. code-block:: python
565 |
566 | {
567 | 'sum': 18.05674,
568 | 'max': 6.009373
569 | }
570 | """
571 | try:
572 | import numpy
573 | except ImportError as e:
574 | raise ImportError(
575 | "NumPy is required for metric aggregation operations. "
576 | "Please install it with: pip install prometheus-api-client[analytics] "
577 | "or pip install prometheus-api-client[all]"
578 | ) from e
579 |
580 | if not isinstance(operations, list):
581 | raise TypeError("Operations can be only of type list")
582 | if len(operations) == 0:
583 | _LOGGER.debug("No operations found to perform")
584 | return None
585 | aggregated_values = {}
586 | query_values = []
587 | if start_time is not None and end_time is not None:
588 | data = self.custom_query_range(
589 | query=query, params=params, start_time=start_time, end_time=end_time, step=step
590 | )
591 | for result in data:
592 | values = result["values"]
593 | for val in values:
594 | query_values.append(float(val[1]))
595 | else:
596 | data = self.custom_query(query, params)
597 | for result in data:
598 | val = float(result["value"][1])
599 | query_values.append(val)
600 |
601 | if len(query_values) == 0:
602 | _LOGGER.debug("No values found for given query.")
603 | return None
604 |
605 | np_array = numpy.array(query_values)
606 | for operation in operations:
607 | if operation == "sum":
608 | aggregated_values["sum"] = numpy.sum(np_array)
609 | elif operation == "max":
610 | aggregated_values["max"] = numpy.max(np_array)
611 | elif operation == "min":
612 | aggregated_values["min"] = numpy.min(np_array)
613 | elif operation == "average":
614 | aggregated_values["average"] = numpy.average(np_array)
615 | elif operation.startswith("percentile"):
616 | percentile = float(operation.split("_")[1])
617 | aggregated_values["percentile_" + str(percentile)] = numpy.percentile(
618 | query_values, percentile
619 | )
620 | elif operation == "deviation":
621 | aggregated_values["deviation"] = numpy.std(np_array)
622 | elif operation == "variance":
623 | aggregated_values["variance"] = numpy.var(np_array)
624 | else:
625 | raise TypeError("Invalid operation: " + operation)
626 | return aggregated_values
627 |
628 |
629 | def get_scrape_pools(self) -> list[str]:
630 | """
631 | Get a list of all scrape pools in activeTargets.
632 | """
633 | scrape_pools = []
634 | for target in self.get_targets()['activeTargets']:
635 | scrape_pools.append(target['scrapePool'])
636 | return list(set(scrape_pools))
637 |
638 | def get_targets(self, state: str = None, scrape_pool: str = None):
639 | """
640 | Get a list of all targets from Prometheus.
641 |
642 | :param state: (str) Optional filter for target state ('active', 'dropped', 'any').
643 | If None, returns both active and dropped targets.
644 | :param scrape_pool: (str) Optional filter by scrape pool name
645 | :returns: (dict) A dictionary containing active and dropped targets
646 | :raises:
647 | (RequestException) Raises an exception in case of a connection error
648 | (PrometheusApiClientException) Raises in case of non 200 response status code
649 | """
650 | params = {}
651 | if state:
652 | params['state'] = state
653 | if scrape_pool:
654 | params['scrapePool'] = scrape_pool
655 |
656 | response = self._session.request(
657 | method="GET",
658 | url="{0}/api/v1/targets".format(self.url),
659 | verify=self._session.verify,
660 | headers=self.headers,
661 | params=params,
662 | auth=self.auth,
663 | cert=self._session.cert,
664 | timeout=self._timeout,
665 | )
666 |
667 | if response.status_code == 200:
668 | return response.json()["data"]
669 | else:
670 | raise PrometheusApiClientException(
671 | "HTTP Status Code {} ({!r})".format(
672 | response.status_code, response.content)
673 | )
674 |
675 | def get_target_metadata(self, target: dict[str, str], metric: str = None):
676 | """
677 | Get metadata about metrics from a specific target.
678 |
679 | :param target: (dict) A dictionary containing target labels to match against (e.g. {'job': 'prometheus'})
680 | :param metric: (str) Optional metric name to filter metadata
681 | :returns: (list) A list of metadata entries for matching targets
682 | :raises:
683 | (RequestException) Raises an exception in case of a connection error
684 | (PrometheusApiClientException) Raises in case of non 200 response status code
685 | """
686 | params = {}
687 |
688 | # Convert target dict to label selector string
689 | if metric:
690 | params['metric'] = metric
691 |
692 | if target:
693 | match_target = "{" + \
694 | ",".join(f'{k}="{v}"' for k, v in target.items()) + "}"
695 | params['match_target'] = match_target
696 |
697 | response = self._session.request(
698 | method="GET",
699 | url="{0}/api/v1/targets/metadata".format(self.url),
700 | verify=self._session.verify,
701 | headers=self.headers,
702 | params=params,
703 | auth=self.auth,
704 | cert=self._session.cert,
705 | timeout=self._timeout,
706 | )
707 |
708 | if response.status_code == 200:
709 | return response.json()["data"]
710 | else:
711 | raise PrometheusApiClientException(
712 | "HTTP Status Code {} ({!r})".format(
713 | response.status_code, response.content)
714 | )
715 |
716 | def get_metric_metadata(self, metric: str, limit: int = None, limit_per_metric: int = None):
717 | """
718 | Get metadata about metrics.
719 |
720 | :param metric: (str) Optional metric name to filter metadata
721 | :param limit: (int) Optional maximum number of metrics to return
722 | :param limit_per_metric: (int) Optional maximum number of metadata entries per metric
723 | :returns: (dict) A dictionary mapping metric names to lists of metadata entries in format:
724 | {'metric_name': [{'type': str, 'help': str, 'unit': str}, ...]}
725 | :raises:
726 | (RequestException) Raises an exception in case of a connection error
727 | (PrometheusApiClientException) Raises in case of non 200 response status code
728 | """
729 | params = {}
730 |
731 | if metric:
732 | params['metric'] = metric
733 |
734 | if limit:
735 | params['limit'] = limit
736 |
737 | if limit_per_metric:
738 | params['limit_per_metric'] = limit_per_metric
739 |
740 | response = self._session.request(
741 | method="GET",
742 | url="{0}/api/v1/metadata".format(self.url),
743 | verify=self._session.verify,
744 | headers=self.headers,
745 | params=params,
746 | auth=self.auth,
747 | cert=self._session.cert,
748 | timeout=self._timeout,
749 | )
750 |
751 | if response.status_code == 200:
752 | data = response.json()["data"]
753 | formatted_data = []
754 | for k, v in data.items():
755 | for v_ in v:
756 | formatted_data.append({
757 | "metric_name": k,
758 | "type": v_.get('type', 'unknown'),
759 | "help": v_.get('help', ''),
760 | "unit": v_.get('unit', '')
761 | })
762 | return formatted_data
763 | else:
764 | raise PrometheusApiClientException(
765 | "HTTP Status Code {} ({!r})".format(
766 | response.status_code, response.content)
767 | )
768 |
--------------------------------------------------------------------------------