├── django_prometheus ├── cache │ ├── __init__.py │ ├── backends │ │ ├── __init__.py │ │ ├── locmem.py │ │ ├── filebased.py │ │ ├── django_memcached_consul.py │ │ ├── memcached.py │ │ └── redis.py │ └── metrics.py ├── tests │ ├── __init__.py │ ├── end2end │ │ ├── testapp │ │ │ ├── __init__.py │ │ │ ├── templates │ │ │ │ ├── index.html │ │ │ │ ├── lawn.html │ │ │ │ ├── sql.html │ │ │ │ ├── slow.html │ │ │ │ └── help.html │ │ │ ├── wsgi.py │ │ │ ├── models.py │ │ │ ├── urls.py │ │ │ ├── helpers.py │ │ │ ├── test_migrations.py │ │ │ ├── views.py │ │ │ ├── test_models.py │ │ │ ├── test_caches.py │ │ │ ├── test_middleware_custom_labels.py │ │ │ ├── settings.py │ │ │ ├── test_middleware.py │ │ │ └── test_db.py │ │ └── manage.py │ ├── test_django_prometheus.py │ ├── test_exports.py │ └── test_testutils.py ├── db │ ├── backends │ │ ├── __init__.py │ │ ├── mysql │ │ │ ├── __init__.py │ │ │ └── base.py │ │ ├── postgis │ │ │ ├── __init__.py │ │ │ └── base.py │ │ ├── postgresql │ │ │ ├── __init__.py │ │ │ └── base.py │ │ ├── spatialite │ │ │ ├── __init__.py │ │ │ └── base.py │ │ ├── sqlite3 │ │ │ ├── __init__.py │ │ │ └── base.py │ │ └── README.md │ ├── __init__.py │ ├── metrics.py │ └── common.py ├── urls.py ├── conf │ └── __init__.py ├── __init__.py ├── utils.py ├── apps.py ├── models.py ├── migrations.py ├── exports.py ├── testutils.py └── middleware.py ├── setup.cfg ├── MANIFEST.in ├── examples ├── django-promdash.png └── prometheus │ ├── prometheus.yml │ ├── README.md │ ├── consoles │ └── django.html │ └── django.rules ├── pyproject.toml ├── requirements.txt ├── .pre-commit-config.yaml ├── ruff.toml ├── tox.ini ├── .github └── workflows │ ├── pre-release.yml │ ├── release.yml │ └── ci.yml ├── .gitignore ├── CONTRIBUTING.md ├── CHANGELOG.md ├── setup.py ├── update_version_from_git.py ├── documentation └── exports.md ├── CODE_OF_CONDUCT.md ├── README.md └── LICENSE /django_prometheus/cache/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | -------------------------------------------------------------------------------- /django_prometheus/cache/backends/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/mysql/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/postgis/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/postgresql/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/spatialite/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/sqlite3/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/templates/index.html: -------------------------------------------------------------------------------- 1 | This is the index. 2 | -------------------------------------------------------------------------------- /examples/django-promdash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/django-commons/django-prometheus/HEAD/examples/django-promdash.png -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/templates/lawn.html: -------------------------------------------------------------------------------- 1 |

Aaah, {{ lawn.location }}, the best place on Earth, probably.

2 | -------------------------------------------------------------------------------- /django_prometheus/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | 3 | from django_prometheus import exports 4 | 5 | urlpatterns = [path("metrics", exports.ExportToDjangoView, name="prometheus-django-metrics")] 6 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 67.7.2, < 72.0.0", "wheel >= 0.40.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.pytest.ini_options] 6 | addopts = " --ignore django_prometheus/tests/end2end" 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | django-redis>=4.12.1 2 | prometheus-client>=0.12.0 3 | pip-prometheus>=1.2.1 4 | mysqlclient 5 | psycopg 6 | pytest==7.4.3 7 | pytest-django 8 | pylibmc 9 | pymemcache 10 | python-memcached 11 | setuptools<72.0.0 12 | wheel 13 | -------------------------------------------------------------------------------- /examples/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 10s 3 | evaluation_interval: 10s 4 | external_labels: 5 | monitor: django-monitor 6 | rule_files: 7 | - "django.rules" 8 | scrape_configs: 9 | - job_name: "django" 10 | static_configs: 11 | - targets: ["localhost:8000"] 12 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings") 7 | 8 | from django.core.management import execute_from_command_line 9 | 10 | execute_from_command_line(sys.argv) 11 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/sqlite3/base.py: -------------------------------------------------------------------------------- 1 | from django.db.backends.sqlite3 import base 2 | 3 | from django_prometheus.db.common import DatabaseWrapperMixin 4 | 5 | 6 | class DatabaseFeatures(base.DatabaseFeatures): 7 | """Our database has the exact same features as the base one.""" 8 | 9 | 10 | class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper): 11 | CURSOR_CLASS = base.SQLiteCursorWrapper 12 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/wsgi.py: -------------------------------------------------------------------------------- 1 | """WSGI config for testapp project. 2 | 3 | It exposes the WSGI callable as a module-level variable named ``application``. 4 | 5 | For more information on this file, see 6 | https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ 7 | """ 8 | 9 | import os 10 | 11 | from django.core.wsgi import get_wsgi_application 12 | 13 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings") 14 | 15 | application = get_wsgi_application() 16 | -------------------------------------------------------------------------------- /django_prometheus/tests/test_django_prometheus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from django_prometheus.utils import PowersOf 3 | 4 | 5 | class TestDjangoPrometheus: 6 | def testPowersOf(self): 7 | """Tests utils.PowersOf.""" 8 | assert PowersOf(2, 4) == [0, 1, 2, 4, 8] 9 | assert PowersOf(3, 5, lower=1) == [0, 3, 9, 27, 81, 243] 10 | assert PowersOf(2, 4, include_zero=False) == [1, 2, 4, 8] 11 | assert PowersOf(2, 6, lower=2, include_zero=False) == [4, 8, 16, 32, 64, 128] 12 | -------------------------------------------------------------------------------- /django_prometheus/db/__init__.py: -------------------------------------------------------------------------------- 1 | # Import all metrics 2 | from django_prometheus.db.metrics import ( 3 | Counter, 4 | connection_errors_total, 5 | connections_total, 6 | errors_total, 7 | execute_many_total, 8 | execute_total, 9 | query_duration_seconds, 10 | ) 11 | 12 | __all__ = [ 13 | "Counter", 14 | "connection_errors_total", 15 | "connections_total", 16 | "errors_total", 17 | "execute_many_total", 18 | "execute_total", 19 | "query_duration_seconds", 20 | ] 21 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/spatialite/base.py: -------------------------------------------------------------------------------- 1 | from django.contrib.gis.db.backends.spatialite import base, features 2 | from django.db.backends.sqlite3 import base as sqlite_base 3 | 4 | from django_prometheus.db.common import DatabaseWrapperMixin 5 | 6 | 7 | class DatabaseFeatures(features.DatabaseFeatures): 8 | """Our database has the exact same features as the base one.""" 9 | 10 | 11 | class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper): 12 | CURSOR_CLASS = sqlite_base.SQLiteCursorWrapper 13 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/models.py: -------------------------------------------------------------------------------- 1 | from django.db.models import CharField, Model, PositiveIntegerField 2 | 3 | from django_prometheus.models import ExportModelOperationsMixin 4 | 5 | 6 | class Dog(ExportModelOperationsMixin("dog"), Model): 7 | name = CharField(max_length=100, unique=True) 8 | breed = CharField(max_length=100, blank=True, null=True) 9 | age = PositiveIntegerField(blank=True, null=True) 10 | 11 | 12 | class Lawn(ExportModelOperationsMixin("lawn"), Model): 13 | location = CharField(max_length=100) 14 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/urls.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | from django.urls import include, path 3 | 4 | from testapp import views 5 | 6 | urlpatterns = [ 7 | path("", views.index), 8 | path("help", views.help), 9 | path("slow", views.slow, name="slow"), 10 | path("objection", views.objection), 11 | path("sql", views.sql), 12 | path("newlawn/", views.newlawn), 13 | path("file", views.file), 14 | path("", include("django_prometheus.urls")), 15 | path("admin/", admin.site.urls), 16 | ] 17 | -------------------------------------------------------------------------------- /django_prometheus/conf/__init__.py: -------------------------------------------------------------------------------- 1 | from django.conf import settings 2 | 3 | NAMESPACE = "" 4 | 5 | PROMETHEUS_LATENCY_BUCKETS = ( 6 | 0.01, 7 | 0.025, 8 | 0.05, 9 | 0.075, 10 | 0.1, 11 | 0.25, 12 | 0.5, 13 | 0.75, 14 | 1.0, 15 | 2.5, 16 | 5.0, 17 | 7.5, 18 | 10.0, 19 | 25.0, 20 | 50.0, 21 | 75.0, 22 | float("inf"), 23 | ) 24 | 25 | if settings.configured: 26 | NAMESPACE = getattr(settings, "PROMETHEUS_METRIC_NAMESPACE", NAMESPACE) 27 | PROMETHEUS_LATENCY_BUCKETS = getattr(settings, "PROMETHEUS_LATENCY_BUCKETS", PROMETHEUS_LATENCY_BUCKETS) 28 | -------------------------------------------------------------------------------- /django_prometheus/__init__.py: -------------------------------------------------------------------------------- 1 | """Django-Prometheus 2 | 3 | https://github.com/korfuri/django-prometheus 4 | """ 5 | 6 | # Import all files that define metrics. This has the effect that 7 | # `import django_prometheus` will always instantiate all metric 8 | # objects right away. 9 | from django_prometheus import middleware, models 10 | 11 | __all__ = ["middleware", "models", "pip_prometheus"] 12 | 13 | __version__ = "2.5.0.dev0" 14 | 15 | # Import pip_prometheus to export the pip metrics automatically. 16 | try: 17 | import pip_prometheus 18 | except ImportError: 19 | # If people don't have pip, don't export anything. 20 | pass 21 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/mysql/base.py: -------------------------------------------------------------------------------- 1 | from django.db.backends.mysql import base 2 | 3 | from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper 4 | 5 | 6 | class DatabaseFeatures(base.DatabaseFeatures): 7 | """Our database has the exact same features as the base one.""" 8 | 9 | 10 | class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper): 11 | CURSOR_CLASS = base.CursorWrapper 12 | 13 | def create_cursor(self, name=None): 14 | cursor = self.connection.cursor() 15 | CursorWrapper = ExportingCursorWrapper(self.CURSOR_CLASS, self.alias, self.vendor) 16 | return CursorWrapper(cursor) 17 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/helpers.py: -------------------------------------------------------------------------------- 1 | DJANGO_MIDDLEWARES = [ 2 | "django.contrib.sessions.middleware.SessionMiddleware", 3 | "django.middleware.common.CommonMiddleware", 4 | "django.middleware.csrf.CsrfViewMiddleware", 5 | "django.contrib.auth.middleware.AuthenticationMiddleware", 6 | "django.contrib.messages.middleware.MessageMiddleware", 7 | "django.middleware.clickjacking.XFrameOptionsMiddleware", 8 | "django.middleware.security.SecurityMiddleware", 9 | ] 10 | 11 | 12 | def get_middleware(before, after): 13 | middleware = [before] 14 | middleware.extend(DJANGO_MIDDLEWARES) 15 | middleware.append(after) 16 | return middleware 17 | -------------------------------------------------------------------------------- /examples/prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Running a demo Prometheus 2 | 3 | To run a demo Prometheus, you'll need to follow these steps: 4 | 5 | * Have a Django application running and exporting its stats. The 6 | provided `prometheus.yml` assumes the stats are exported at 7 | `http://127.0.0.1:8000/metrics`. 8 | * Install Prometheus by cloning it somewhere, see the [installation 9 | instructions](http://prometheus.io/docs/introduction/install/). 10 | Let's assume you cloned it to `~/prometheus`. 11 | * Run prometheus like this: 12 | 13 | ```shell 14 | ~/prometheus/prometheus \ 15 | --config.file=prometheus.yml \ 16 | --web.console.templates consoles/ \ 17 | --web.console.libraries ~/prometheus/console_libraries/ 18 | ``` 19 | 20 | * Navigate to `http://localhost:9090`. 21 | -------------------------------------------------------------------------------- /django_prometheus/cache/backends/locmem.py: -------------------------------------------------------------------------------- 1 | from django.core.cache.backends import locmem 2 | 3 | from django_prometheus.cache.metrics import ( 4 | django_cache_get_total, 5 | django_cache_hits_total, 6 | django_cache_misses_total, 7 | ) 8 | 9 | 10 | class LocMemCache(locmem.LocMemCache): 11 | """Inherit filebased cache to add metrics about hit/miss ratio""" 12 | 13 | def get(self, key, default=None, version=None): 14 | django_cache_get_total.labels(backend="locmem").inc() 15 | cached = super().get(key, default=None, version=version) 16 | if cached is not None: 17 | django_cache_hits_total.labels(backend="locmem").inc() 18 | else: 19 | django_cache_misses_total.labels(backend="locmem").inc() 20 | return cached or default 21 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/templates/sql.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |

Execute some SQL here, for fun and profit!

4 |

Note that this is a very bad vulnerability: it gives anyone direct 5 | access to your whole database. This only exists to test that 6 | django_prometheus is working.

7 |
8 | 14 |
15 | {% if query %} 16 |

Your query was:

17 |

{{ query }}
18 |

Your results were:

19 |

{{ rows }}

20 | {% endif %} 21 | 22 | 23 | -------------------------------------------------------------------------------- /django_prometheus/cache/metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import Counter 2 | 3 | from django_prometheus.conf import NAMESPACE 4 | 5 | django_cache_get_total = Counter( 6 | "django_cache_get_total", 7 | "Total get requests on cache", 8 | ["backend"], 9 | namespace=NAMESPACE, 10 | ) 11 | django_cache_hits_total = Counter( 12 | "django_cache_get_hits_total", 13 | "Total hits on cache", 14 | ["backend"], 15 | namespace=NAMESPACE, 16 | ) 17 | django_cache_misses_total = Counter( 18 | "django_cache_get_misses_total", 19 | "Total misses on cache", 20 | ["backend"], 21 | namespace=NAMESPACE, 22 | ) 23 | django_cache_get_fail_total = Counter( 24 | "django_cache_get_fail_total", 25 | "Total get request failures by cache", 26 | ["backend"], 27 | namespace=NAMESPACE, 28 | ) 29 | -------------------------------------------------------------------------------- /django_prometheus/cache/backends/filebased.py: -------------------------------------------------------------------------------- 1 | from django.core.cache.backends import filebased 2 | 3 | from django_prometheus.cache.metrics import ( 4 | django_cache_get_total, 5 | django_cache_hits_total, 6 | django_cache_misses_total, 7 | ) 8 | 9 | 10 | class FileBasedCache(filebased.FileBasedCache): 11 | """Inherit filebased cache to add metrics about hit/miss ratio""" 12 | 13 | def get(self, key, default=None, version=None): 14 | django_cache_get_total.labels(backend="filebased").inc() 15 | cached = super().get(key, default=None, version=version) 16 | if cached is not None: 17 | django_cache_hits_total.labels(backend="filebased").inc() 18 | else: 19 | django_cache_misses_total.labels(backend="filebased").inc() 20 | return cached or default 21 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/postgresql/base.py: -------------------------------------------------------------------------------- 1 | from django.db.backends.postgresql import base 2 | from django.db.backends.postgresql.base import Cursor 3 | 4 | from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper 5 | 6 | 7 | class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper): 8 | def get_new_connection(self, *args, **kwargs): 9 | conn = super().get_new_connection(*args, **kwargs) 10 | conn.cursor_factory = ExportingCursorWrapper( 11 | conn.cursor_factory or Cursor(), 12 | self.alias, 13 | self.vendor, 14 | ) 15 | 16 | return conn 17 | 18 | def create_cursor(self, name=None): 19 | # cursor_factory is a kwarg to connect() so restore create_cursor()'s 20 | # default behavior 21 | return base.DatabaseWrapper.create_cursor(self, name=name) 22 | -------------------------------------------------------------------------------- /django_prometheus/cache/backends/django_memcached_consul.py: -------------------------------------------------------------------------------- 1 | from django_memcached_consul import memcached 2 | 3 | from django_prometheus.cache.metrics import ( 4 | django_cache_get_total, 5 | django_cache_hits_total, 6 | django_cache_misses_total, 7 | ) 8 | 9 | 10 | class MemcachedCache(memcached.MemcachedCache): 11 | """Inherit django_memcached_consul to add metrics about hit/miss ratio""" 12 | 13 | def get(self, key, default=None, version=None): 14 | django_cache_get_total.labels(backend="django_memcached_consul").inc() 15 | cached = super().get(key, default=None, version=version) 16 | if cached is not None: 17 | django_cache_hits_total.labels(backend="django_memcached_consul").inc() 18 | else: 19 | django_cache_misses_total.labels(backend="django_memcached_consul").inc() 20 | return cached or default 21 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/postgis/base.py: -------------------------------------------------------------------------------- 1 | from django.contrib.gis.db.backends.postgis import base 2 | from django.db.backends.postgresql.base import Cursor 3 | 4 | from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper 5 | 6 | 7 | class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper): 8 | def get_new_connection(self, *args, **kwargs): 9 | conn = super().get_new_connection(*args, **kwargs) 10 | conn.cursor_factory = ExportingCursorWrapper( 11 | conn.cursor_factory or Cursor(), 12 | "postgis", 13 | self.vendor, 14 | ) 15 | 16 | return conn 17 | 18 | def create_cursor(self, name=None): 19 | # cursor_factory is a kwarg to connect() so restore create_cursor()'s 20 | # default behavior 21 | return base.DatabaseWrapper.create_cursor(self, name=name) 22 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: check-case-conflict 6 | - id: check-merge-conflict 7 | - id: check-yaml 8 | - id: end-of-file-fixer 9 | - id: trailing-whitespace 10 | - repo: https://github.com/adamchainz/django-upgrade 11 | rev: '1.25.0' 12 | hooks: 13 | - id: django-upgrade 14 | args: [--target-version, '4.2'] 15 | - repo: https://github.com/astral-sh/ruff-pre-commit 16 | rev: v0.11.13 17 | hooks: 18 | - id: ruff-check 19 | name: ruff check 20 | args: ['--fix'] 21 | - id: ruff-format 22 | name: ruff format 23 | - repo: https://github.com/asottile/pyupgrade 24 | rev: v3.20.0 25 | hooks: 26 | - id: pyupgrade 27 | - repo: https://github.com/google/yamlfmt 28 | rev: v0.17.0 29 | hooks: 30 | - id: yamlfmt 31 | -------------------------------------------------------------------------------- /django_prometheus/utils.py: -------------------------------------------------------------------------------- 1 | from timeit import default_timer 2 | 3 | 4 | def Time(): 5 | """Returns some representation of the current time. 6 | 7 | This wrapper is meant to take advantage of a higher time 8 | resolution when available. Thus, its return value should be 9 | treated as an opaque object. It can be compared to the current 10 | time with TimeSince(). 11 | """ 12 | return default_timer() 13 | 14 | 15 | def TimeSince(t): 16 | """Compares a value returned by Time() to the current time. 17 | 18 | Returns: 19 | the time since t, in fractional seconds. 20 | 21 | """ 22 | return default_timer() - t 23 | 24 | 25 | def PowersOf(logbase, count, lower=0, include_zero=True): 26 | """Returns a list of count powers of logbase (from logbase**lower).""" 27 | if not include_zero: 28 | return [logbase**i for i in range(lower, count + lower)] 29 | return [0] + [logbase**i for i in range(lower, count + lower)] 30 | -------------------------------------------------------------------------------- /django_prometheus/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | from django.conf import settings 3 | 4 | import django_prometheus 5 | from django_prometheus.exports import SetupPrometheusExportsFromConfig 6 | from django_prometheus.migrations import ExportMigrations 7 | 8 | 9 | class DjangoPrometheusConfig(AppConfig): 10 | name = django_prometheus.__name__ 11 | verbose_name = "Django-Prometheus" 12 | 13 | def ready(self): 14 | """Initializes the Prometheus exports if they are enabled in the config. 15 | 16 | Note that this is called even for other management commands 17 | than `runserver`. As such, it is possible to scrape the 18 | metrics of a running `manage.py test` or of another command, 19 | which shouldn't be done for real monitoring (since these jobs 20 | are usually short-lived), but can be useful for debugging. 21 | """ 22 | SetupPrometheusExportsFromConfig() 23 | if getattr(settings, "PROMETHEUS_EXPORT_MIGRATIONS", False): 24 | ExportMigrations() 25 | -------------------------------------------------------------------------------- /django_prometheus/cache/backends/memcached.py: -------------------------------------------------------------------------------- 1 | from django.core.cache.backends import memcached 2 | 3 | from django_prometheus.cache.metrics import ( 4 | django_cache_get_total, 5 | django_cache_hits_total, 6 | django_cache_misses_total, 7 | ) 8 | 9 | 10 | class MemcachedPrometheusCacheMixin: 11 | def get(self, key, default=None, version=None): 12 | django_cache_get_total.labels(backend="memcached").inc() 13 | cached = super().get(key, default=None, version=version) 14 | if cached is not None: 15 | django_cache_hits_total.labels(backend="memcached").inc() 16 | return cached 17 | 18 | django_cache_misses_total.labels(backend="memcached").inc() 19 | return default 20 | 21 | 22 | class PyLibMCCache(MemcachedPrometheusCacheMixin, memcached.PyLibMCCache): 23 | """Inherit memcached to add metrics about hit/miss ratio""" 24 | 25 | 26 | class PyMemcacheCache(MemcachedPrometheusCacheMixin, memcached.PyMemcacheCache): 27 | """Inherit memcached to add metrics about hit/miss ratio""" 28 | -------------------------------------------------------------------------------- /ruff.toml: -------------------------------------------------------------------------------- 1 | line-length = 120 2 | target-version = "py39" 3 | 4 | [lint] 5 | select = [ 6 | # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b 7 | # https://github.com/PyCQA/flake8-bugbear 8 | "B", 9 | # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 10 | # https://github.com/adamchainz/flake8-comprehensions 11 | "C4", 12 | # https://docs.astral.sh/ruff/rules/#error-e 13 | # https://github.com/PyCQA/pycodestyle 14 | "E", 15 | # https://docs.astral.sh/ruff/rules/#pyflakes-f 16 | # https://github.com/PyCQA/pyflakes 17 | "F", 18 | # https://docs.astral.sh/ruff/rules/#isort-i 19 | # https://pycqa.github.io/isort/ 20 | "I", 21 | # https://docs.astral.sh/ruff/rules/#flake8-debugger-t10 22 | # https://github.com/jbkahn/flake8-debugger 23 | "T10", 24 | # https://docs.astral.sh/ruff/rules/#pyupgrade-up 25 | # https://github.com/asottile/pyupgrade 26 | "UP", 27 | # https://docs.astral.sh/ruff/rules/#flake8-2020-ytt 28 | # https://github.com/asottile-archive/flake8-2020 29 | "YTT", 30 | # https://docs.astral.sh/ruff/rules/#warning-w 31 | # https://github.com/PyCQA/pycodestyle 32 | "W", 33 | ] 34 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | min_version = 4.4 3 | envlist = 4 | {py39,py310,py311,py312}-django420-{end2end,unittests} 5 | {py310,py311,py312,py313}-django{510,520}-{end2end,unittests} 6 | {py314,py314t}-django520-{end2end,unittests} 7 | py39-lint 8 | skip_missing_interpreters = True 9 | 10 | [gh-actions] 11 | python = 12 | 3.9: py39, py39-lint 13 | 3.10: py310 14 | 3.11: py311 15 | 3.12: py312 16 | 3.13: py313 17 | 3.14: py314,py314t 18 | 19 | [testenv] 20 | deps = 21 | django420: Django>=4.2,<4.3 22 | django510: Django>=5.1,<5.2 23 | django520: Django>=5.2,<5.3 24 | coverage 25 | -rrequirements.txt 26 | skip_missing_interpreters=true 27 | 28 | changedir = 29 | end2end: {toxinidir}/django_prometheus/tests/end2end 30 | setenv = 31 | end2end: PYTHONPATH = {toxinidir} 32 | end2end: DJANGO_SETTINGS_MODULE=testapp.settings 33 | commands = 34 | end2end: coverage run --source=django_prometheus -m pytest testapp/ 35 | unittests: coverage run --source=django_prometheus setup.py test 36 | unittests: python setup.py sdist bdist_wheel 37 | 38 | [testenv:py39-lint] 39 | deps = 40 | pre-commit==4.2.0 41 | commands = 42 | pre-commit run --all-files 43 | -------------------------------------------------------------------------------- /django_prometheus/tests/test_exports.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import socket 3 | from unittest.mock import ANY, MagicMock, call, patch 4 | 5 | from django_prometheus.exports import SetupPrometheusEndpointOnPortRange 6 | 7 | 8 | @patch("django_prometheus.exports.HTTPServer") 9 | def test_port_range_available(httpserver_mock): 10 | """Test port range setup with an available port.""" 11 | httpserver_mock.side_effect = [socket.error, MagicMock()] 12 | port_range = [8000, 8001] 13 | port_chosen = SetupPrometheusEndpointOnPortRange(port_range) 14 | assert port_chosen in port_range 15 | 16 | expected_calls = [call(("", 8000), ANY), call(("", 8001), ANY)] 17 | assert httpserver_mock.mock_calls == expected_calls 18 | 19 | 20 | @patch("django_prometheus.exports.HTTPServer") 21 | def test_port_range_unavailable(httpserver_mock): 22 | """Test port range setup with no available ports.""" 23 | httpserver_mock.side_effect = [socket.error, socket.error] 24 | port_range = [8000, 8001] 25 | port_chosen = SetupPrometheusEndpointOnPortRange(port_range) 26 | 27 | expected_calls = [call(("", 8000), ANY), call(("", 8001), ANY)] 28 | assert httpserver_mock.mock_calls == expected_calls 29 | assert port_chosen is None 30 | -------------------------------------------------------------------------------- /.github/workflows/pre-release.yml: -------------------------------------------------------------------------------- 1 | name: Pre-Release 2 | on: 3 | push: 4 | branches: 5 | - "master" 6 | jobs: 7 | pre-release-django-prometheus-job: 8 | runs-on: ubuntu-latest 9 | name: pre-release django-prometheus 10 | if: github.repository_owner == 'django-commons' 11 | permissions: 12 | id-token: write 13 | steps: 14 | - uses: actions/checkout@v5 15 | with: 16 | fetch-depth: 0 17 | - name: Check if version is dev 18 | run: | 19 | if ! grep -q "dev" django_prometheus/__init__.py; then 20 | echo "Version does not contain 'dev', skipping pre-release" 21 | exit 1 22 | else 23 | echo "Version contains 'dev', proceeding with pre-release" 24 | fi 25 | - name: Set up Python 26 | uses: actions/setup-python@v6 27 | with: 28 | python-version: 3.10 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install wheel setuptools packaging twine build --upgrade 33 | - name: Set version number 34 | run: python update_version_from_git.py 35 | - name: Build 36 | run: python -m build 37 | - name: Publish to PyPI 38 | uses: pypa/gh-action-pypi-publish@v1.13.0 39 | with: 40 | skip-existing: true 41 | verbose: true 42 | print-hash: true 43 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/test_migrations.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | import pytest 4 | 5 | from django_prometheus.migrations import ExportMigrationsForDatabase 6 | from django_prometheus.testutils import assert_metric_equal 7 | 8 | 9 | def M(metric_name): 10 | """Make a full metric name from a short metric name. 11 | 12 | This is just intended to help keep the lines shorter in test 13 | cases. 14 | """ 15 | return f"django_migrations_{metric_name}" 16 | 17 | 18 | @pytest.mark.django_db 19 | class TestMigrations: 20 | """Test migration counters.""" 21 | 22 | def test_counters(self): 23 | executor = MagicMock() 24 | executor.migration_plan = MagicMock() 25 | executor.migration_plan.return_value = set() 26 | executor.loader.applied_migrations = {"a", "b", "c"} 27 | ExportMigrationsForDatabase("fakedb1", executor) 28 | assert executor.migration_plan.call_count == 1 29 | executor.migration_plan = MagicMock() 30 | executor.migration_plan.return_value = {"a"} 31 | executor.loader.applied_migrations = {"b", "c"} 32 | ExportMigrationsForDatabase("fakedb2", executor) 33 | 34 | assert_metric_equal(3, M("applied_total"), connection="fakedb1") 35 | assert_metric_equal(0, M("unapplied_total"), connection="fakedb1") 36 | assert_metric_equal(2, M("applied_total"), connection="fakedb2") 37 | assert_metric_equal(1, M("unapplied_total"), connection="fakedb2") 38 | -------------------------------------------------------------------------------- /django_prometheus/db/metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import Counter, Histogram 2 | 3 | from django_prometheus.conf import NAMESPACE, PROMETHEUS_LATENCY_BUCKETS 4 | 5 | connections_total = Counter( 6 | "django_db_new_connections_total", 7 | "Counter of created connections by database and by vendor.", 8 | ["alias", "vendor"], 9 | namespace=NAMESPACE, 10 | ) 11 | 12 | connection_errors_total = Counter( 13 | "django_db_new_connection_errors_total", 14 | "Counter of connection failures by database and by vendor.", 15 | ["alias", "vendor"], 16 | namespace=NAMESPACE, 17 | ) 18 | 19 | execute_total = Counter( 20 | "django_db_execute_total", 21 | ("Counter of executed statements by database and by vendor, including bulk executions."), 22 | ["alias", "vendor"], 23 | namespace=NAMESPACE, 24 | ) 25 | 26 | 27 | execute_many_total = Counter( 28 | "django_db_execute_many_total", 29 | ("Counter of executed statements in bulk operations by database and by vendor."), 30 | ["alias", "vendor"], 31 | namespace=NAMESPACE, 32 | ) 33 | 34 | 35 | errors_total = Counter( 36 | "django_db_errors_total", 37 | ("Counter of execution errors by database, vendor and exception type."), 38 | ["alias", "vendor", "type"], 39 | namespace=NAMESPACE, 40 | ) 41 | 42 | query_duration_seconds = Histogram( 43 | "django_db_query_duration_seconds", 44 | ("Histogram of query duration by database and vendor."), 45 | ["alias", "vendor"], 46 | buckets=PROMETHEUS_LATENCY_BUCKETS, 47 | namespace=NAMESPACE, 48 | ) 49 | -------------------------------------------------------------------------------- /django_prometheus/db/backends/README.md: -------------------------------------------------------------------------------- 1 | # Adding new database wrapper types 2 | 3 | Unfortunately, I don't have the resources to create wrappers for all 4 | database vendors. Doing so should be straightforward, but testing that 5 | it works and maintaining it is a lot of busywork, or is impossible for 6 | me for commercial databases. 7 | 8 | This document should be enough for people who wish to implement a new 9 | database wrapper. 10 | 11 | ## Structure 12 | 13 | A database engine in Django requires 3 classes (it really requires 2, 14 | but the 3rd one is required for our purposes): 15 | 16 | * A DatabaseFeatures class, which describes what features the database 17 | supports. For our usage, we can simply extend the existing 18 | DatabaseFeatures class without any changes. 19 | * A DatabaseWrapper class, which abstracts the interface to the 20 | database. 21 | * A CursorWrapper class, which abstracts the interface to a cursor. A 22 | cursor is the object that can execute SQL statements via an open 23 | connection. 24 | 25 | An easy example can be found in the sqlite3 module. Here are a few tips: 26 | 27 | * The `self.alias` and `self.vendor` properties are present in all 28 | DatabaseWrappers. 29 | * The CursorWrapper doesn't have access to the alias and vendor, so we 30 | generate the class in a function that accepts them as arguments. 31 | * Most methods you overload should just increment a counter, forward 32 | all arguments to the original method and return the 33 | result. `execute` and `execute_many` should also wrap the call to 34 | the parent method in a `try...except` block to increment the 35 | `errors_total` counter as appropriate. 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env*/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | *.sqlite3 53 | 54 | # Sphinx documentation 55 | docs/_build/ 56 | 57 | # PyBuilder 58 | target/ 59 | 60 | # VSCode 61 | .vscode/ 62 | 63 | ### Emacs ### 64 | # -*- mode: gitignore; -*- 65 | *~ 66 | \#*\# 67 | /.emacs.desktop 68 | /.emacs.desktop.lock 69 | *.elc 70 | auto-save-list 71 | tramp 72 | .\#* 73 | 74 | # Org-mode 75 | .org-id-locations 76 | *_archive 77 | 78 | # flymake-mode 79 | *_flymake.* 80 | 81 | # eshell files 82 | /eshell/history 83 | /eshell/lastdir 84 | 85 | # elpa packages 86 | /elpa/ 87 | 88 | # reftex files 89 | *.rel 90 | 91 | # AUCTeX auto folder 92 | /auto/ 93 | 94 | # cask packages 95 | .cask/ 96 | 97 | # venv 98 | venv/ 99 | 100 | ### Prometheus ### 101 | examples/prometheus/data 102 | -------------------------------------------------------------------------------- /django_prometheus/models.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import Counter 2 | 3 | from django_prometheus.conf import NAMESPACE 4 | 5 | model_inserts = Counter( 6 | "django_model_inserts_total", 7 | "Number of insert operations by model.", 8 | ["model"], 9 | namespace=NAMESPACE, 10 | ) 11 | 12 | model_updates = Counter( 13 | "django_model_updates_total", 14 | "Number of update operations by model.", 15 | ["model"], 16 | namespace=NAMESPACE, 17 | ) 18 | 19 | model_deletes = Counter( 20 | "django_model_deletes_total", 21 | "Number of delete operations by model.", 22 | ["model"], 23 | namespace=NAMESPACE, 24 | ) 25 | 26 | 27 | def ExportModelOperationsMixin(model_name): 28 | """Returns a mixin for models to export counters for lifecycle operations. 29 | 30 | Usage: 31 | class User(ExportModelOperationsMixin('user'), Model): 32 | ... 33 | """ 34 | # Force create the labels for this model in the counters. This 35 | # is not necessary but it avoids gaps in the aggregated data. 36 | model_inserts.labels(model_name) 37 | model_updates.labels(model_name) 38 | model_deletes.labels(model_name) 39 | 40 | class Mixin: 41 | def _do_insert(self, *args, **kwargs): 42 | model_inserts.labels(model_name).inc() 43 | return super()._do_insert(*args, **kwargs) 44 | 45 | def _do_update(self, *args, **kwargs): 46 | model_updates.labels(model_name).inc() 47 | return super()._do_update(*args, **kwargs) 48 | 49 | def delete(self, *args, **kwargs): 50 | model_deletes.labels(model_name).inc() 51 | return super().delete(*args, **kwargs) 52 | 53 | Mixin.__qualname__ = f"ExportModelOperationsMixin('{model_name}')" 54 | return Mixin 55 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/views.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | from django.db import connections 5 | from django.http import FileResponse 6 | from django.shortcuts import render 7 | from django.template.response import TemplateResponse 8 | 9 | from testapp.models import Lawn 10 | 11 | 12 | def index(request): 13 | return TemplateResponse(request, "index.html", {}) 14 | 15 | 16 | def help(request): 17 | # render does not instantiate a TemplateResponse, so it does not 18 | # increment the "by_templatename" counters. 19 | return render(request, "help.html", {}) 20 | 21 | 22 | def slow(request): 23 | """This view takes .1s to load, on purpose.""" 24 | time.sleep(0.1) 25 | return TemplateResponse(request, "slow.html", {}) 26 | 27 | 28 | def newlawn(request, location): 29 | """This view creates a new Lawn instance in the database.""" 30 | lawn = Lawn() 31 | lawn.location = location 32 | lawn.save() 33 | return TemplateResponse(request, "lawn.html", {"lawn": lawn}) 34 | 35 | 36 | class ObjectionException(Exception): 37 | pass 38 | 39 | 40 | def objection(request): 41 | raise ObjectionException("Objection!") 42 | 43 | 44 | def sql(request): 45 | databases = connections.databases.keys() 46 | query = request.GET.get("query") 47 | db = request.GET.get("database") 48 | if query and db: 49 | cursor = connections[db].cursor() 50 | cursor.execute(query, []) 51 | results = cursor.fetchall() 52 | return TemplateResponse( 53 | request, 54 | "sql.html", 55 | {"query": query, "rows": results, "databases": databases}, 56 | ) 57 | return TemplateResponse(request, "sql.html", {"query": None, "rows": None, "databases": databases}) 58 | 59 | 60 | def file(request): 61 | return FileResponse(open(os.devnull, "rb")) 62 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/test_models.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from django_prometheus.testutils import assert_metric_diff, save_registry 4 | from testapp.models import Dog, Lawn 5 | 6 | 7 | def M(metric_name): 8 | """Make a full metric name from a short metric name. 9 | 10 | This is just intended to help keep the lines shorter in test 11 | cases. 12 | """ 13 | return f"django_model_{metric_name}" 14 | 15 | 16 | @pytest.mark.django_db 17 | class TestModelMetrics: 18 | """Test django_prometheus.models.""" 19 | 20 | def test_counters(self): 21 | registry = save_registry() 22 | cool = Dog() 23 | cool.name = "Cool" 24 | cool.save() 25 | assert_metric_diff(registry, 1, M("inserts_total"), model="dog") 26 | 27 | elysees = Lawn() 28 | elysees.location = "Champs Elysees, Paris" 29 | elysees.save() 30 | assert_metric_diff(registry, 1, M("inserts_total"), model="lawn") 31 | assert_metric_diff(registry, 1, M("inserts_total"), model="dog") 32 | 33 | galli = Dog() 34 | galli.name = "Galli" 35 | galli.save() 36 | assert_metric_diff(registry, 2, M("inserts_total"), model="dog") 37 | 38 | cool.breed = "Wolfhound" 39 | assert_metric_diff(registry, 2, M("inserts_total"), model="dog") 40 | 41 | cool.save() 42 | assert_metric_diff(registry, 2, M("inserts_total"), model="dog") 43 | assert_metric_diff(registry, 1, M("updates_total"), model="dog") 44 | 45 | cool.age = 9 46 | cool.save() 47 | assert_metric_diff(registry, 2, M("updates_total"), model="dog") 48 | 49 | cool.delete() # :( 50 | assert_metric_diff(registry, 2, M("inserts_total"), model="dog") 51 | assert_metric_diff(registry, 2, M("updates_total"), model="dog") 52 | assert_metric_diff(registry, 1, M("deletes_total"), model="dog") 53 | -------------------------------------------------------------------------------- /django_prometheus/cache/backends/redis.py: -------------------------------------------------------------------------------- 1 | from django.core.cache.backends.redis import RedisCache as DjangoRedisCache 2 | from django_redis import cache, exceptions 3 | 4 | from django_prometheus.cache.metrics import ( 5 | django_cache_get_fail_total, 6 | django_cache_get_total, 7 | django_cache_hits_total, 8 | django_cache_misses_total, 9 | ) 10 | 11 | 12 | class RedisCache(cache.RedisCache): 13 | """Inherit redis to add metrics about hit/miss/interruption ratio""" 14 | 15 | @cache.omit_exception 16 | def get(self, key, default=None, version=None, client=None): 17 | try: 18 | django_cache_get_total.labels(backend="redis").inc() 19 | cached = self.client.get(key, default=None, version=version, client=client) 20 | except exceptions.ConnectionInterrupted as e: 21 | django_cache_get_fail_total.labels(backend="redis").inc() 22 | if self._ignore_exceptions: 23 | if self._log_ignored_exceptions: 24 | self.logger.error(str(e)) 25 | return default 26 | raise 27 | else: 28 | if cached is not None: 29 | django_cache_hits_total.labels(backend="redis").inc() 30 | return cached 31 | django_cache_misses_total.labels(backend="redis").inc() 32 | return default 33 | 34 | 35 | class NativeRedisCache(DjangoRedisCache): 36 | def get(self, key, default=None, version=None): 37 | django_cache_get_total.labels(backend="native_redis").inc() 38 | try: 39 | result = super().get(key, default=None, version=version) 40 | except Exception: 41 | django_cache_get_fail_total.labels(backend="native_redis").inc() 42 | raise 43 | if result is not None: 44 | django_cache_hits_total.labels(backend="native_redis").inc() 45 | return result 46 | django_cache_misses_total.labels(backend="native_redis").inc() 47 | return default 48 | -------------------------------------------------------------------------------- /django_prometheus/migrations.py: -------------------------------------------------------------------------------- 1 | from django.db import connections 2 | from django.db.backends.dummy.base import DatabaseWrapper 3 | from prometheus_client import Gauge 4 | 5 | from django_prometheus.conf import NAMESPACE 6 | 7 | unapplied_migrations = Gauge( 8 | "django_migrations_unapplied_total", 9 | "Count of unapplied migrations by database connection", 10 | ["connection"], 11 | namespace=NAMESPACE, 12 | ) 13 | 14 | applied_migrations = Gauge( 15 | "django_migrations_applied_total", 16 | "Count of applied migrations by database connection", 17 | ["connection"], 18 | namespace=NAMESPACE, 19 | ) 20 | 21 | 22 | def ExportMigrationsForDatabase(alias, executor): 23 | plan = executor.migration_plan(executor.loader.graph.leaf_nodes()) 24 | unapplied_migrations.labels(alias).set(len(plan)) 25 | applied_migrations.labels(alias).set(len(executor.loader.applied_migrations)) 26 | 27 | 28 | def ExportMigrations(): 29 | """Exports counts of unapplied migrations. 30 | 31 | This is meant to be called during app startup, ideally by 32 | django_prometheus.apps.AppConfig. 33 | """ 34 | # Import MigrationExecutor lazily. MigrationExecutor checks at 35 | # import time that the apps are ready, and they are not when 36 | # django_prometheus is imported. ExportMigrations() should be 37 | # called in AppConfig.ready(), which signals that all apps are 38 | # ready. 39 | from django.db.migrations.executor import MigrationExecutor 40 | 41 | if "default" in connections and (isinstance(connections["default"], DatabaseWrapper)): 42 | # This is the case where DATABASES = {} in the configuration, 43 | # i.e. the user is not using any databases. Django "helpfully" 44 | # adds a dummy database and then throws when you try to 45 | # actually use it. So we don't do anything, because trying to 46 | # export stats would crash the app on startup. 47 | return 48 | for alias in connections.databases: 49 | executor = MigrationExecutor(connections[alias]) 50 | ExportMigrationsForDatabase(alias, executor) 51 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release To PyPI 2 | on: 3 | push: 4 | tags: 5 | - v[0-9]+.[0-9]+.[0-9]+ 6 | jobs: 7 | org-check: 8 | name: Check GitHub Organization 9 | if: ${{ github.repository_owner == 'django-commons' }} 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Noop 13 | run: "true" 14 | determine-tag: 15 | name: Determine the release tag to operate against. 16 | needs: org-check 17 | runs-on: ubuntu-latest 18 | outputs: 19 | release-tag: ${{ steps.determine-tag.outputs.release-tag }} 20 | release-version: ${{ steps.determine-tag.outputs.release-version }} 21 | steps: 22 | - name: Determine Tag 23 | id: determine-tag 24 | run: | 25 | RELEASE_TAG=${GITHUB_REF#refs/tags/} 26 | echo "Release tag: ${RELEASE_TAG}" 27 | if [[ "${RELEASE_TAG}" =~ ^v[0-9]+.[0-9]+.[0-9]+$ ]]; then 28 | echo "release-tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT 29 | echo "release-version=${RELEASE_TAG#v}" >> $GITHUB_OUTPUT 30 | else 31 | echo "::error::Release tag '${RELEASE_TAG}' must match 'v\d+.\d+.\d+'." 32 | exit 1 33 | fi 34 | release-django-prometheus-job: 35 | runs-on: ubuntu-latest 36 | name: Release Django-Promethues 37 | needs: determine-tag 38 | permissions: 39 | id-token: write 40 | steps: 41 | - uses: actions/checkout@v4 42 | with: 43 | ref: ${{ needs.determine-tag.outputs.release-tag }} 44 | fetch-depth: 0 45 | - name: Set up Python 3.9 46 | uses: actions/setup-python@v5 47 | with: 48 | python-version: 3.9 49 | - name: Install dependencies 50 | run: | 51 | python -m pip install --upgrade pip 52 | pip install wheel setuptools packaging twine build --upgrade 53 | - name: Build 54 | run: python -m build 55 | - name: Publish to PyPI 56 | uses: pypa/gh-action-pypi-publish@v1.13.0 57 | with: 58 | skip-existing: true 59 | verbose: true 60 | print-hash: true 61 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Git 4 | 5 | Feel free to send pull requests, even for the tiniest things. Watch 6 | for Travis' opinion on them ([![Build 7 | Status](https://travis-ci.org/korfuri/django-prometheus.svg?branch=master)](https://travis-ci.org/korfuri/django-prometheus)). 8 | 9 | Travis will also make sure your code is pep8 compliant, and it's a 10 | good idea to run flake8 as well (on django_prometheus/ and on 11 | tests/). The code contains "unused" imports on purpose so flake8 isn't 12 | run automatically. 13 | 14 | ## Tests 15 | 16 | Please write unit tests for your change. There are two kinds of tests: 17 | 18 | * Regular unit tests that test the code directly, without loading 19 | Django. This is limited to pieces of the code that don't depend on 20 | Django, since a lot of the Django code will require a full Django 21 | environment (anything that interacts with models, for instance, 22 | needs a full database configuration). 23 | * End-to-end tests are Django unit tests in a test application. The 24 | test application doubles as an easy way to interactively test your 25 | changes. It uses most of the basic Django features and a few 26 | advanced features, so you can test things for yourself. 27 | 28 | ### Running all tests 29 | 30 | ```shell 31 | python setup.py test 32 | cd tests/end2end/ && PYTHONPATH=../.. ./manage.py test 33 | ``` 34 | 35 | The former runs the regular unit tests, the latter runs the Django 36 | unit test. 37 | 38 | To avoid setting PYTHONPATH every time, you can also run `python 39 | setup.py install`. 40 | 41 | ### Running the test Django app 42 | 43 | ```shell 44 | cd tests/end2end/ && PYTHONPATH=../.. ./manage.py runserver 45 | ``` 46 | 47 | By default, this will start serving on http://localhost:8000/. Metrics 48 | are available at `/metrics`. 49 | 50 | ## Running Prometheus 51 | 52 | See for instructions on installing 53 | Prometheus. Once you have Prometheus installed, you can use the 54 | example rules and dashboard in `examples/prometheus/`. See 55 | `examples/prometheus/README.md` to run Prometheus and view the example 56 | dashboard. 57 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/templates/slow.html: -------------------------------------------------------------------------------- 1 |
 2 |                                   _.---"'"""""'`--.._
 3 |                              _,.-'                   `-._
 4 |                          _,."                            -.
 5 |                      .-""   ___...---------.._             `.
 6 |                      `---'""                  `-.            `.
 7 |                                                  `.            \
 8 |                                                    `.           \
 9 |                                                      \           \
10 |                                                       .           \
11 |                                                       |            .
12 |                                                       |            |
13 |                                 _________             |            |
14 |                           _,.-'"         `"'-.._      :            |
15 |                       _,-'                      `-._.'             |
16 |                    _.'                              `.             '
17 |         _.-.    _,+......__                           `.          .
18 |       .'    `-"'           `"-.,-""--._                 \        /
19 |      /    ,'                  |    __  \                 \      /
20 |     `   ..                       +"  )  \                 \    /
21 |      `.'  \          ,-"`-..    |       |                  \  /
22 |       / " |        .'       \   '.    _.'                   .'
23 |      |,.."--"""--..|    "    |    `""`.                     |
24 |    ,"               `-._     |        |                     |
25 |  .'                     `-._+         |                     |
26 | /                           `.                        /     |
27 | |    `     '                  |                      /      |
28 | `-.....--.__                  |              |      /       |
29 |    `./ "| / `-.........--.-   '              |    ,'        '
30 |      /| ||        `.'  ,'   .'               |_,-+         /
31 |     / ' '.`.        _,'   ,'     `.          |   '   _,.. /
32 |    /   `.  `"'"'""'"   _,^--------"`.        |    `.'_  _/
33 |   /... _.`:.________,.'              `._,.-..|        "'
34 |  `.__.'                                 `._  /
35 |                                            "' mh
36 | 
37 | Art by Maija Haavisto 38 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v2.5.0 - UNRELEASED 4 | 5 | * Drop support for Django 5.0 (EOL) 6 | * Add support for Python 3.13, 3.14, and free threaded 3.14t. 7 | 8 | ## v2.4.1 - June 25th, 2025 9 | 10 | * Add Django version to install requirements. 11 | 12 | ## v2.4.0 - June 18th, 2025 13 | 14 | * Add support for Django 5.0 and Python 3.12. 15 | * Replace black, flake8 and isort with Ruff 16 | * Drop support for Django 3.2 (Python 3.7), 4.0 and 4.1 17 | * Project moved to the [Django Commons](https://github.com/django-commons) GitHub organization. 18 | * Add pyupgrade and yamlfmt pre-commit hooks 19 | 20 | ## v2.3.1 - May 2nd, 2023 21 | 22 | * Fix postgresql provider import, Thanks [@wilsonehusin](https://github.com/korfuri/django-prometheus/pull/402) 23 | 24 | ## v2.3.0 - May 2nd, 2023 25 | 26 | * Remove support for Python 3.6, Django versions older than 3.2 27 | * Fix two latency metrics not using PROMETHEUS_LATENCY_BUCKETS setting, Thanks [@AleksaC](https://github.com/korfuri/django-prometheus/pull/343) 28 | * Support new cache backend names in newer Django versions, Thanks [@tneuct](https://github.com/korfuri/django-prometheus/pull/329) 29 | * Make export of migrations False by default, Thanks [@kaypee90](https://github.com/korfuri/django-prometheus/pull/313) 30 | * Add support for Django 4.1, Python 3.11 31 | * Add support for Django 4.2 and Psycopg 3 32 | 33 | ## v2.2.0 - December 19, 2021 34 | 35 | * Switch to Github Actions CI, remove travis-ci. 36 | * Add support for Django 3.2 & 4.0 and Python 3.9 & 3.10 37 | 38 | ## v2.1.0 - August 22, 2020 39 | 40 | * Remove support for older django and python versions 41 | * Add support for Django 3.0 and Django 3.1 42 | * Add support for [PostGIS](https://github.com/korfuri/django-prometheus/pull/221), Thanks [@EverWinter23](https://github.com/EverWinter23) 43 | 44 | ## v2.0.0 - Jan 20, 2020 45 | 46 | * Added support for newer Django and Python versions 47 | * Added an extensibility that applications to add their own labels to middleware (request/response) metrics 48 | * Allow overriding and setting custom bucket values for request/response latency histogram metric 49 | * Internal improvements: 50 | * use tox 51 | * Use pytest 52 | * use Black 53 | * Automate pre-releases on every commit to master 54 | * Fix flaky tests. 55 | 56 | ## v1.1.0 - Sep 28, 2019 57 | 58 | * maintenance release that updates this library to support recent and supported version of python & Django 59 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/templates/help.html: -------------------------------------------------------------------------------- 1 |

Can't Help Falling in Love

2 |

Remembering Helps Me to Forget

3 |

Helplessly, Hopelessly

4 |

Love Helps Those

5 |

I Need a Little Help

6 |

For a while We Helped Each Other Out

7 |

Give Me a Helping Hand

8 |

I Can't Help You, I'm Falling Too

9 |

How Can I Help You Say Goodbye?

10 |

Time Hasn't Helped

11 |

Jukebox, Help Me Find My Baby

12 |

I Just Can't Help Myself

13 |

Help Me, Girl

14 |

I Can't Help it

15 |

Help Somebody

16 |

Help, Help

17 |

I Can't Help How I Feel

18 |

No Help From Me

19 |

I Can Help

20 |

Somebody Help Me

21 |

Please Help Me I'm Falling in Love With You

22 |

Help Yourself

23 |

Outside Help

24 |

Helping Hand

25 |

Help Me, Rhonda

26 |

Can't Help Feeling So Blue

27 |

We All Agreed to Help

28 |

Help Pour Out the Rain (Lacey's Song)

29 |

Sleep Won't Help Me

30 |

I Can't Help Myself (Sugarpie, Honeybunch)

31 |

Cry for Help

32 |

She's Helping Me Get Over You

33 |

Mama Help Me

34 |

Help Yourself to Me

35 |

Can't Help But Wonder

36 |

Heaven Help the Working Girl

37 |

Help Me Pick Up the Pieces

38 |

Crying Won't Help Now

39 |

I Couldn't Help Myself

40 |

So Help Me, Girl

41 |

Heaven Help the Fool

42 |

Help Wanted

43 |

Help Me Get Over You

44 |

Helpless

45 |

Help

46 |

Can't Help it

47 |

Can't Help Calling Your Name

48 |

If She Just Helps Me Get Over You

49 |

Helpless Heart

50 |

No Help Wanted

51 |

It Didn't Help Much

52 |

Help Me Make it Through the Night

53 |

Help Me Understand

54 |

I Just Can't Help Believing

55 |

Can't Help Thinking About Me

56 |

How Could I Help But Love You?

57 |

Heaven Help My Heart

58 |

I Can't Help Remembering You

59 |

Help Me Hold on

60 |

Helping Me Get Over You

61 |

I Can't Help it if I'm Still in Love with You

62 |

Girl Can't Help it, The

63 |

I Can't Help it, I'm Falling in Love

64 |

With a Little Help from My Friends

65 |

Heaven Help the Child

66 |

Help Me

67 |

Can't Help But Love You

68 |

Help is on the Way

69 |

I Got Some Help I Don't Need

70 |

Heaven Help Us All

71 |

Heaven Help Me

72 |

Helplessly Hoping

73 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | branches: 5 | - "*" 6 | pull_request: 7 | branches: 8 | - master 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} 11 | cancel-in-progress: true 12 | jobs: 13 | test: 14 | timeout-minutes: 30 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14", "3.14t"] 19 | os: [ubuntu-24.04] 20 | runs-on: ${{ matrix.os }} 21 | name: "${{ matrix.os }} Python: ${{ matrix.python-version }}" 22 | services: 23 | redis: 24 | image: redis:8.0-alpine 25 | ports: 26 | - 6379:6379 27 | memcached: 28 | image: memcached:1.6-alpine 29 | ports: 30 | - 11211:11211 31 | mysql: 32 | image: mysql:9.3.0 33 | env: 34 | MYSQL_ALLOW_EMPTY_PASSWORD: yes 35 | ports: 36 | - 3306:3306 37 | postgresql: 38 | image: postgis/postgis:17-3.5-alpine 39 | env: 40 | POSTGRES_HOST_AUTH_METHOD: trust 41 | ports: 42 | - 5432:5432 43 | steps: 44 | - name: Install OS Packages 45 | run: | 46 | sudo apt-get update 47 | sudo apt-get install binutils libproj-dev gdal-bin libmemcached-dev libsqlite3-mod-spatialite 48 | - uses: actions/checkout@v5 49 | with: 50 | fetch-depth: 0 51 | - name: Set up Python ${{ matrix.python-version }} 52 | uses: actions/setup-python@v6 53 | with: 54 | python-version: ${{ matrix.python-version }} 55 | - name: Install dependencies 56 | run: | 57 | pip install --upgrade "pip>=23.1.1" 58 | pip install --upgrade "tox-gh-actions==3.3.0" coverage 59 | - name: Log versions 60 | run: | 61 | python --version 62 | pip --version 63 | psql -V 64 | mysql -V 65 | - name: prep DB 66 | env: 67 | MYSQL_TCP_PORT: 3306 68 | MYSQL_HOST: localhost 69 | PGHOST: localhost 70 | PGPORT: 5432 71 | run: | 72 | psql --user postgres -c 'CREATE DATABASE postgis' 73 | psql --user postgres postgis -c 'CREATE EXTENSION IF NOT EXISTS postgis;' 74 | mysql --protocol=TCP --user=root -e 'create database django_prometheus_1;' 75 | - name: Run test and linters via Tox 76 | run: tox 77 | - name: Process code coverage 78 | run: | 79 | coverage combine .coverage django_prometheus/tests/end2end/.coverage 80 | coverage xml 81 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from setuptools import find_packages, setup 4 | 5 | with open("README.md") as fl: 6 | LONG_DESCRIPTION = fl.read() 7 | 8 | 9 | def get_version(): 10 | version_file = open("django_prometheus/__init__.py").read() 11 | version_match = re.search( 12 | r'^__version__ = [\'"]([^\'"]*)[\'"]', 13 | version_file, 14 | re.MULTILINE, 15 | ) 16 | if version_match: 17 | return version_match.group(1) 18 | raise RuntimeError("Unable to find version string.") 19 | 20 | 21 | setup( 22 | name="django-prometheus", 23 | version=get_version(), 24 | author="Uriel Corfa", 25 | author_email="uriel@corfa.fr", 26 | description=("Django middlewares to monitor your application with Prometheus.io."), 27 | license="Apache", 28 | keywords="django monitoring prometheus", 29 | url="http://github.com/korfuri/django-prometheus", 30 | project_urls={ 31 | "Changelog": "https://github.com/korfuri/django-prometheus/blob/master/CHANGELOG.md", 32 | "Documentation": "https://github.com/korfuri/django-prometheus/blob/master/README.md", 33 | "Source": "https://github.com/korfuri/django-prometheus", 34 | "Tracker": "https://github.com/korfuri/django-prometheus/issues", 35 | }, 36 | packages=find_packages( 37 | exclude=[ 38 | "tests", 39 | ], 40 | ), 41 | test_suite="django_prometheus.tests", 42 | long_description=LONG_DESCRIPTION, 43 | long_description_content_type="text/markdown", 44 | tests_require=["pytest", "pytest-django"], 45 | setup_requires=["pytest-runner"], 46 | options={"bdist_wheel": {"universal": "1"}}, 47 | install_requires=[ 48 | "Django>=4.2,<5.3,!=5.0.*", 49 | "prometheus-client>=0.7", 50 | ], 51 | classifiers=[ 52 | "Development Status :: 5 - Production/Stable", 53 | "Intended Audience :: Developers", 54 | "Intended Audience :: Information Technology", 55 | "Intended Audience :: System Administrators", 56 | "Programming Language :: Python :: 3", 57 | "Programming Language :: Python :: 3.9", 58 | "Programming Language :: Python :: 3.10", 59 | "Programming Language :: Python :: 3.11", 60 | "Programming Language :: Python :: 3.12", 61 | "Programming Language :: Python :: 3.13", 62 | "Programming Language :: Python :: 3.14", 63 | "Programming Language :: Python :: Free Threading", 64 | "Framework :: Django :: 4.2", 65 | "Framework :: Django :: 5.1", 66 | "Framework :: Django :: 5.2", 67 | "Topic :: System :: Monitoring", 68 | "License :: OSI Approved :: Apache Software License", 69 | ], 70 | ) 71 | -------------------------------------------------------------------------------- /django_prometheus/db/common.py: -------------------------------------------------------------------------------- 1 | from django_prometheus.db import ( 2 | connection_errors_total, 3 | connections_total, 4 | errors_total, 5 | execute_many_total, 6 | execute_total, 7 | query_duration_seconds, 8 | ) 9 | 10 | 11 | class ExceptionCounterByType: 12 | """A context manager that counts exceptions by type. 13 | 14 | Exceptions increment the provided counter, whose last label's name 15 | must match the `type_label` argument. 16 | 17 | In other words: 18 | 19 | c = Counter('http_request_exceptions_total', 'Counter of exceptions', 20 | ['method', 'type']) 21 | with ExceptionCounterByType(c, extra_labels={'method': 'GET'}): 22 | handle_get_request() 23 | """ 24 | 25 | def __init__(self, counter, type_label="type", extra_labels=None): 26 | self._counter = counter 27 | self._type_label = type_label 28 | self._labels = dict(extra_labels) # Copy labels since we modify them. 29 | 30 | def __enter__(self): 31 | pass 32 | 33 | def __exit__(self, typ, value, traceback): 34 | if typ is not None: 35 | self._labels.update({self._type_label: typ.__name__}) 36 | self._counter.labels(**self._labels).inc() 37 | 38 | 39 | class DatabaseWrapperMixin: 40 | """Extends the DatabaseWrapper to count connections and cursors.""" 41 | 42 | def get_new_connection(self, *args, **kwargs): 43 | connections_total.labels(self.alias, self.vendor).inc() 44 | try: 45 | return super().get_new_connection(*args, **kwargs) 46 | except Exception: 47 | connection_errors_total.labels(self.alias, self.vendor).inc() 48 | raise 49 | 50 | def create_cursor(self, name=None): 51 | return self.connection.cursor(factory=ExportingCursorWrapper(self.CURSOR_CLASS, self.alias, self.vendor)) 52 | 53 | 54 | def ExportingCursorWrapper(cursor_class, alias, vendor): 55 | """Returns a CursorWrapper class that knows its database's alias and 56 | vendor name. 57 | """ 58 | labels = {"alias": alias, "vendor": vendor} 59 | 60 | class CursorWrapper(cursor_class): 61 | """Extends the base CursorWrapper to count events.""" 62 | 63 | def execute(self, *args, **kwargs): 64 | execute_total.labels(alias, vendor).inc() 65 | with ( 66 | query_duration_seconds.labels(**labels).time(), 67 | ExceptionCounterByType(errors_total, extra_labels=labels), 68 | ): 69 | return super().execute(*args, **kwargs) 70 | 71 | def executemany(self, query, param_list, *args, **kwargs): 72 | execute_total.labels(alias, vendor).inc(len(param_list)) 73 | execute_many_total.labels(alias, vendor).inc(len(param_list)) 74 | with ( 75 | query_duration_seconds.labels(**labels).time(), 76 | ExceptionCounterByType(errors_total, extra_labels=labels), 77 | ): 78 | return super().executemany(query, param_list, *args, **kwargs) 79 | 80 | return CursorWrapper 81 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/test_caches.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from django.core.cache import caches 3 | from redis import RedisError 4 | 5 | from django_prometheus.testutils import assert_metric_equal, get_metric 6 | 7 | _SUPPORTED_CACHES = [ 8 | "memcached.PyLibMCCache", 9 | "memcached.PyMemcacheCache", 10 | "filebased", 11 | "locmem", 12 | "native_redis", 13 | "redis", 14 | ] 15 | 16 | 17 | class TestCachesMetrics: 18 | """Test django_prometheus.caches metrics.""" 19 | 20 | @pytest.mark.parametrize("supported_cache", _SUPPORTED_CACHES) 21 | def test_counters(self, supported_cache): 22 | # Note: those tests require a memcached server running 23 | tested_cache = caches[supported_cache] 24 | backend = supported_cache.split(".")[0] 25 | total_before = get_metric("django_cache_get_total", backend=backend) or 0 26 | hit_before = get_metric("django_cache_get_hits_total", backend=backend) or 0 27 | miss_before = get_metric("django_cache_get_misses_total", backend=backend) or 0 28 | tested_cache.set("foo1", "bar") 29 | tested_cache.get("foo1") 30 | tested_cache.get("foo1") 31 | tested_cache.get("foofoo") 32 | result = tested_cache.get("foofoo", default="default") 33 | assert result == "default" 34 | assert_metric_equal(total_before + 4, "django_cache_get_total", backend=backend) 35 | assert_metric_equal(hit_before + 2, "django_cache_get_hits_total", backend=backend) 36 | assert_metric_equal( 37 | miss_before + 2, 38 | "django_cache_get_misses_total", 39 | backend=backend, 40 | ) 41 | 42 | def test_redis_cache_fail(self): 43 | # Note: test use fake service config (like if server was stopped) 44 | supported_cache = "redis" 45 | total_before = get_metric("django_cache_get_total", backend=supported_cache) or 0 46 | fail_before = get_metric("django_cache_get_fail_total", backend=supported_cache) or 0 47 | hit_before = get_metric("django_cache_get_hits_total", backend=supported_cache) or 0 48 | miss_before = get_metric("django_cache_get_misses_total", backend=supported_cache) or 0 49 | 50 | tested_cache = caches["stopped_redis_ignore_exception"] 51 | tested_cache.get("foo1") 52 | 53 | assert_metric_equal(hit_before, "django_cache_get_hits_total", backend=supported_cache) 54 | assert_metric_equal(miss_before, "django_cache_get_misses_total", backend=supported_cache) 55 | assert_metric_equal(total_before + 1, "django_cache_get_total", backend=supported_cache) 56 | assert_metric_equal(fail_before + 1, "django_cache_get_fail_total", backend=supported_cache) 57 | 58 | tested_cache = caches["stopped_redis"] 59 | with pytest.raises(RedisError): 60 | tested_cache.get("foo1") 61 | 62 | assert_metric_equal(hit_before, "django_cache_get_hits_total", backend=supported_cache) 63 | assert_metric_equal(miss_before, "django_cache_get_misses_total", backend=supported_cache) 64 | assert_metric_equal(total_before + 2, "django_cache_get_total", backend=supported_cache) 65 | assert_metric_equal(fail_before + 2, "django_cache_get_fail_total", backend=supported_cache) 66 | 67 | @pytest.mark.parametrize("supported_cache", _SUPPORTED_CACHES) 68 | def test_cache_version_support(self, supported_cache): 69 | # Note: those tests require a memcached server running 70 | tested_cache = caches[supported_cache] 71 | tested_cache.set("foo1", "bar v.1", version=1) 72 | tested_cache.set("foo1", "bar v.2", version=2) 73 | assert tested_cache.get("foo1", version=1) == "bar v.1" 74 | assert tested_cache.get("foo1", version=2) == "bar v.2" 75 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/test_middleware_custom_labels.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from prometheus_client import REGISTRY 3 | from prometheus_client.metrics import MetricWrapperBase 4 | 5 | from django_prometheus.middleware import ( 6 | Metrics, 7 | PrometheusAfterMiddleware, 8 | PrometheusBeforeMiddleware, 9 | ) 10 | from django_prometheus.testutils import assert_metric_diff, save_registry 11 | from testapp.helpers import get_middleware 12 | from testapp.test_middleware import M, T 13 | 14 | EXTENDED_METRICS = [ 15 | M("requests_latency_seconds_by_view_method"), 16 | M("responses_total_by_status_view_method"), 17 | ] 18 | 19 | 20 | class CustomMetrics(Metrics): 21 | def register_metric(self, metric_cls, name, documentation, labelnames=(), **kwargs): 22 | if name in EXTENDED_METRICS: 23 | labelnames.extend(("view_type", "user_agent_type")) 24 | return super().register_metric(metric_cls, name, documentation, labelnames=labelnames, **kwargs) 25 | 26 | 27 | class AppMetricsBeforeMiddleware(PrometheusBeforeMiddleware): 28 | metrics_cls = CustomMetrics 29 | 30 | 31 | class AppMetricsAfterMiddleware(PrometheusAfterMiddleware): 32 | metrics_cls = CustomMetrics 33 | 34 | def label_metric(self, metric, request, response=None, **labels): 35 | new_labels = labels 36 | if metric._name in EXTENDED_METRICS: 37 | new_labels = {"view_type": "foo", "user_agent_type": "browser"} 38 | new_labels.update(labels) 39 | return super().label_metric(metric, request, response=response, **new_labels) 40 | 41 | 42 | class TestMiddlewareMetricsWithCustomLabels: 43 | @pytest.fixture(autouse=True) 44 | def _setup(self, settings): 45 | settings.MIDDLEWARE = get_middleware( 46 | "testapp.test_middleware_custom_labels.AppMetricsBeforeMiddleware", 47 | "testapp.test_middleware_custom_labels.AppMetricsAfterMiddleware", 48 | ) 49 | # Allow CustomMetrics to be used 50 | for metric in Metrics._instance.__dict__.values(): 51 | if isinstance(metric, MetricWrapperBase): 52 | REGISTRY.unregister(metric) 53 | Metrics._instance = None 54 | 55 | def test_request_counters(self, client): 56 | registry = save_registry() 57 | client.get("/") 58 | client.get("/") 59 | client.get("/help") 60 | client.post("/", {"test": "data"}) 61 | 62 | assert_metric_diff(registry, 4, M("requests_before_middlewares_total")) 63 | assert_metric_diff(registry, 4, M("responses_before_middlewares_total")) 64 | assert_metric_diff(registry, 3, T("requests_total_by_method"), method="GET") 65 | assert_metric_diff(registry, 1, T("requests_total_by_method"), method="POST") 66 | assert_metric_diff(registry, 4, T("requests_total_by_transport"), transport="http") 67 | assert_metric_diff( 68 | registry, 69 | 2, 70 | T("requests_total_by_view_transport_method"), 71 | view="testapp.views.index", 72 | transport="http", 73 | method="GET", 74 | ) 75 | assert_metric_diff( 76 | registry, 77 | 1, 78 | T("requests_total_by_view_transport_method"), 79 | view="testapp.views.help", 80 | transport="http", 81 | method="GET", 82 | ) 83 | assert_metric_diff( 84 | registry, 85 | 1, 86 | T("requests_total_by_view_transport_method"), 87 | view="testapp.views.index", 88 | transport="http", 89 | method="POST", 90 | ) 91 | assert_metric_diff( 92 | registry, 93 | 2.0, 94 | T("responses_total_by_status_view_method"), 95 | status="200", 96 | view="testapp.views.index", 97 | method="GET", 98 | view_type="foo", 99 | user_agent_type="browser", 100 | ) 101 | assert_metric_diff( 102 | registry, 103 | 1.0, 104 | T("responses_total_by_status_view_method"), 105 | status="200", 106 | view="testapp.views.help", 107 | method="GET", 108 | view_type="foo", 109 | user_agent_type="browser", 110 | ) 111 | -------------------------------------------------------------------------------- /update_version_from_git.py: -------------------------------------------------------------------------------- 1 | """Adapted from https://github.com/pygame/pygameweb/blob/master/pygameweb/builds/update_version_from_git.py 2 | 3 | For updating the version from git. 4 | __init__.py contains a __version__ field. 5 | Update that. 6 | If the user supplies "patch" as a CLi argument, we want to bump the existing patch version 7 | If the user supplied the full version as a CLI argument, we want to use that version. 8 | Otherwise, 9 | If we are on master, we want to update the version as a pre-release. 10 | git describe --tags 11 | With these: 12 | __init__.py 13 | __version__= '0.0.2' 14 | git describe --tags 15 | 0.0.1-22-g729a5ae 16 | We want this: 17 | __init__.py 18 | __version__= '0.0.2.dev22.g729a5ae' 19 | Get the branch/tag name with this. 20 | git symbolic-ref -q --short HEAD || git describe --tags --exact-match 21 | """ 22 | 23 | import re 24 | import subprocess 25 | import sys 26 | from pathlib import Path 27 | 28 | from packaging.version import Version 29 | 30 | _INIT_FILE = Path("django_prometheus/__init__.py") 31 | 32 | 33 | def migrate_source_attribute(attr, to_this, target_file): 34 | """Updates __magic__ attributes in the source file""" 35 | new_file = [] 36 | found = False 37 | lines = target_file.read_text().splitlines() 38 | 39 | for line in lines: 40 | if line.startswith(attr): 41 | found = True 42 | line = to_this 43 | new_file.append(line) 44 | 45 | if found: 46 | target_file.write_text("\n".join(new_file)) 47 | 48 | 49 | def migrate_version(new_version): 50 | """Updates __version__ in the init file""" 51 | print(f"migrate to version: {new_version}") 52 | migrate_source_attribute("__version__", to_this=f'__version__ = "{new_version}"\n', target_file=_INIT_FILE) 53 | 54 | 55 | def is_master_branch(): 56 | cmd = "git rev-parse --abbrev-ref HEAD" 57 | tag_branch = subprocess.check_output(cmd, shell=True) 58 | return tag_branch in [b"master\n"] 59 | 60 | 61 | def get_git_version_info(): 62 | cmd = "git describe --tags" 63 | ver_str = subprocess.check_output(cmd, shell=True) 64 | ver, commits_since, githash = ver_str.decode().strip().split("-") 65 | return Version(ver), int(commits_since), githash 66 | 67 | 68 | def prerelease_version(): 69 | """Return what the prerelease version should be. 70 | https://packaging.python.org/tutorials/distributing-packages/#pre-release-versioning 71 | 0.0.2.dev22 72 | """ 73 | ver, commits_since, githash = get_git_version_info() 74 | initpy_ver = get_version() 75 | 76 | assert initpy_ver > ver, "the django_prometheus/__init__.py version should be newer than the last tagged release." 77 | return f"{initpy_ver.major}.{initpy_ver.minor}.{initpy_ver.micro}.dev{commits_since}" 78 | 79 | 80 | def get_version(): 81 | """Returns version from django_prometheus/__init__.py""" 82 | version_file = _INIT_FILE.read_text() 83 | version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.MULTILINE) 84 | if not version_match: 85 | raise RuntimeError("Unable to find version string.") 86 | initpy_ver = version_match.group(1) 87 | assert len(initpy_ver.split(".")) in [3, 4], "django_prometheus/__init__.py version should be like 0.0.2.dev" 88 | return Version(initpy_ver) 89 | 90 | 91 | def increase_patch_version(old_version): 92 | """:param old_version: 2.0.1 93 | :return: 2.0.2.dev 94 | """ 95 | return f"{old_version.major}.{old_version.minor}.{old_version.micro + 1}.dev" 96 | 97 | 98 | def release_version_correct(): 99 | """Makes sure the: 100 | - prerelease version for master is correct. 101 | - release version is correct for tags. 102 | """ 103 | print("update for a pre release version") 104 | assert is_master_branch(), "No non-master deployments yet" 105 | new_version = prerelease_version() 106 | print(f"updating version in __init__.py to {new_version}") 107 | assert len(new_version.split(".")) >= 4, "django_prometheus/__init__.py version should be like 0.0.2.dev" 108 | migrate_version(new_version) 109 | 110 | 111 | if __name__ == "__main__": 112 | new_version = None 113 | if len(sys.argv) == 1: 114 | release_version_correct() 115 | elif len(sys.argv) == 2: 116 | for _, arg in enumerate(sys.argv): 117 | new_version = arg 118 | if new_version == "patch": 119 | new_version = increase_patch_version(get_version()) 120 | 121 | migrate_version(new_version) 122 | else: 123 | print( 124 | "Invalid usage. Supply 0 or 1 arguments. " 125 | "Argument can be either a version '1.2.3' or 'patch' " 126 | "if you want to increase the patch-version (1.2.3 -> 1.2.4.dev)", 127 | ) 128 | -------------------------------------------------------------------------------- /django_prometheus/exports.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import threading 4 | 5 | import prometheus_client 6 | from django.conf import settings 7 | from django.http import HttpResponse 8 | from prometheus_client import multiprocess 9 | 10 | try: 11 | # Python 2 12 | from BaseHTTPServer import HTTPServer 13 | except ImportError: 14 | # Python 3 15 | from http.server import HTTPServer 16 | 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | def SetupPrometheusEndpointOnPort(port, addr=""): 22 | """Exports Prometheus metrics on an HTTPServer running in its own thread. 23 | 24 | The server runs on the given port and is by default listening on 25 | all interfaces. This HTTPServer is fully independent of Django and 26 | its stack. This offers the advantage that even if Django becomes 27 | unable to respond, the HTTPServer will continue to function and 28 | export metrics. However, this also means that the features 29 | offered by Django (like middlewares or WSGI) can't be used. 30 | 31 | Now here's the really weird part. When Django runs with the 32 | auto-reloader enabled (which is the default, you can disable it 33 | with `manage.py runserver --noreload`), it forks and executes 34 | manage.py twice. That's wasteful but usually OK. It starts being a 35 | problem when you try to open a port, like we do. We can detect 36 | that we're running under an autoreloader through the presence of 37 | the RUN_MAIN environment variable, so we abort if we're trying to 38 | export under an autoreloader and trying to open a port. 39 | """ 40 | assert os.environ.get("RUN_MAIN") != "true", ( 41 | "The thread-based exporter can't be safely used when django's " 42 | "autoreloader is active. Use the URL exporter, or start django " 43 | "with --noreload. See documentation/exports.md." 44 | ) 45 | prometheus_client.start_http_server(port, addr=addr) 46 | 47 | 48 | class PrometheusEndpointServer(threading.Thread): 49 | """A thread class that holds an http and makes it serve_forever().""" 50 | 51 | def __init__(self, httpd, *args, **kwargs): 52 | self.httpd = httpd 53 | super().__init__(*args, **kwargs) 54 | 55 | def run(self): 56 | self.httpd.serve_forever() 57 | 58 | 59 | def SetupPrometheusEndpointOnPortRange(port_range, addr=""): 60 | """Like SetupPrometheusEndpointOnPort, but tries several ports. 61 | 62 | This is useful when you're running Django as a WSGI application 63 | with multiple processes and you want Prometheus to discover all 64 | workers. Each worker will grab a port and you can use Prometheus 65 | to aggregate across workers. 66 | 67 | port_range may be any iterable object that contains a list of 68 | ports. Typically this would be a `range` of contiguous ports. 69 | 70 | As soon as one port is found that can serve, use this one and stop 71 | trying. 72 | 73 | Returns the port chosen (an `int`), or `None` if no port in the 74 | supplied range was available. 75 | 76 | The same caveats regarding autoreload apply. Do not use this when 77 | Django's autoreloader is active. 78 | """ 79 | assert os.environ.get("RUN_MAIN") != "true", ( 80 | "The thread-based exporter can't be safely used when django's " 81 | "autoreloader is active. Use the URL exporter, or start django " 82 | "with --noreload. See documentation/exports.md." 83 | ) 84 | for port in port_range: 85 | try: 86 | httpd = HTTPServer((addr, port), prometheus_client.MetricsHandler) 87 | except OSError: 88 | # Python 2 raises socket.error, in Python 3 socket.error is an 89 | # alias for OSError 90 | continue # Try next port 91 | thread = PrometheusEndpointServer(httpd) 92 | thread.daemon = True 93 | thread.start() 94 | logger.info(f"Exporting Prometheus /metrics/ on port {port}") 95 | return port # Stop trying ports at this point 96 | logger.warning("Cannot export Prometheus /metrics/ - no available ports in supplied range") 97 | return None 98 | 99 | 100 | def SetupPrometheusExportsFromConfig(): 101 | """Exports metrics so Prometheus can collect them.""" 102 | port = getattr(settings, "PROMETHEUS_METRICS_EXPORT_PORT", None) 103 | port_range = getattr(settings, "PROMETHEUS_METRICS_EXPORT_PORT_RANGE", None) 104 | addr = getattr(settings, "PROMETHEUS_METRICS_EXPORT_ADDRESS", "") 105 | if port_range: 106 | SetupPrometheusEndpointOnPortRange(port_range, addr) 107 | elif port: 108 | SetupPrometheusEndpointOnPort(port, addr) 109 | 110 | 111 | def ExportToDjangoView(request): 112 | """Exports /metrics as a Django view. 113 | 114 | You can use django_prometheus.urls to map /metrics to this view. 115 | """ 116 | if "PROMETHEUS_MULTIPROC_DIR" in os.environ or "prometheus_multiproc_dir" in os.environ: 117 | registry = prometheus_client.CollectorRegistry() 118 | multiprocess.MultiProcessCollector(registry) 119 | else: 120 | registry = prometheus_client.REGISTRY 121 | metrics_page = prometheus_client.generate_latest(registry) 122 | return HttpResponse(metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST) 123 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | from testapp.helpers import get_middleware 5 | 6 | # SECURITY WARNING: keep the secret key used in production secret! 7 | SECRET_KEY = ")0-t%mc5y1^fn8e7i**^^v166@5iu(&-2%9#kxud0&4ap#k!_k" 8 | DEBUG = True 9 | ALLOWED_HOSTS = [] 10 | 11 | 12 | # Application definition 13 | INSTALLED_APPS = ( 14 | "django.contrib.admin", 15 | "django.contrib.auth", 16 | "django.contrib.contenttypes", 17 | "django.contrib.sessions", 18 | "django.contrib.messages", 19 | "django.contrib.staticfiles", 20 | "django_prometheus", 21 | "testapp", 22 | ) 23 | 24 | 25 | MIDDLEWARE = get_middleware( 26 | "django_prometheus.middleware.PrometheusBeforeMiddleware", 27 | "django_prometheus.middleware.PrometheusAfterMiddleware", 28 | ) 29 | 30 | ROOT_URLCONF = "testapp.urls" 31 | 32 | TEMPLATES = [ 33 | { 34 | "BACKEND": "django.template.backends.django.DjangoTemplates", 35 | "DIRS": [], 36 | "APP_DIRS": True, 37 | "OPTIONS": { 38 | "context_processors": [ 39 | "django.template.context_processors.debug", 40 | "django.template.context_processors.request", 41 | "django.contrib.auth.context_processors.auth", 42 | "django.contrib.messages.context_processors.messages", 43 | ], 44 | }, 45 | }, 46 | ] 47 | 48 | WSGI_APPLICATION = "testapp.wsgi.application" 49 | 50 | 51 | DATABASES = { 52 | "default": { 53 | "ENGINE": "django_prometheus.db.backends.sqlite3", 54 | "NAME": "db.sqlite3", 55 | }, 56 | # Comment this to not test django_prometheus.db.backends.postgres. 57 | "postgresql": { 58 | "ENGINE": "django_prometheus.db.backends.postgresql", 59 | "NAME": "postgres", 60 | "USER": "postgres", 61 | "PASSWORD": "", 62 | "HOST": "localhost", 63 | "PORT": "5432", 64 | }, 65 | # Comment this to not test django_prometheus.db.backends.postgis. 66 | "postgis": { 67 | "ENGINE": "django_prometheus.db.backends.postgis", 68 | "NAME": "postgis", 69 | "USER": "postgres", 70 | "PASSWORD": "", 71 | "HOST": "localhost", 72 | "PORT": "5432", 73 | }, 74 | # Comment this to not test django_prometheus.db.backends.mysql. 75 | "mysql": { 76 | "ENGINE": "django_prometheus.db.backends.mysql", 77 | "NAME": "django_prometheus_1", 78 | "USER": "root", 79 | "PASSWORD": "", 80 | "HOST": "127.0.0.1", 81 | "PORT": "3306", 82 | }, 83 | "spatialite": { 84 | "ENGINE": "django_prometheus.db.backends.spatialite", 85 | "NAME": "db_spatialite.sqlite3", 86 | }, 87 | # The following databases are used by test_db.py only 88 | "test_db_1": { 89 | "ENGINE": "django_prometheus.db.backends.sqlite3", 90 | "NAME": "test_db_1.sqlite3", 91 | }, 92 | "test_db_2": { 93 | "ENGINE": "django_prometheus.db.backends.sqlite3", 94 | "NAME": "test_db_2.sqlite3", 95 | }, 96 | } 97 | 98 | # Caches 99 | _tmp_cache_dir = tempfile.mkdtemp() 100 | 101 | CACHES = { 102 | "default": { 103 | "BACKEND": "django_prometheus.cache.backends.memcached.PyLibMCCache", 104 | "LOCATION": "localhost:11211", 105 | }, 106 | "memcached.PyLibMCCache": { 107 | "BACKEND": "django_prometheus.cache.backends.memcached.PyLibMCCache", 108 | "LOCATION": "localhost:11211", 109 | }, 110 | "memcached.PyMemcacheCache": { 111 | "BACKEND": "django_prometheus.cache.backends.memcached.PyMemcacheCache", 112 | "LOCATION": "localhost:11211", 113 | }, 114 | "filebased": { 115 | "BACKEND": "django_prometheus.cache.backends.filebased.FileBasedCache", 116 | "LOCATION": os.path.join(_tmp_cache_dir, "django_cache"), 117 | }, 118 | "locmem": { 119 | "BACKEND": "django_prometheus.cache.backends.locmem.LocMemCache", 120 | "LOCATION": os.path.join(_tmp_cache_dir, "locmem_cache"), 121 | }, 122 | "native_redis": { 123 | "BACKEND": "django_prometheus.cache.backends.redis.NativeRedisCache", 124 | "LOCATION": "redis://127.0.0.1:6379/0", 125 | }, 126 | "redis": { 127 | "BACKEND": "django_prometheus.cache.backends.redis.RedisCache", 128 | "LOCATION": "redis://127.0.0.1:6379/1", 129 | }, 130 | # Fake redis config emulated stopped service 131 | "stopped_redis": { 132 | "BACKEND": "django_prometheus.cache.backends.redis.RedisCache", 133 | "LOCATION": "redis://127.0.0.1:6666/1", 134 | }, 135 | "stopped_redis_ignore_exception": { 136 | "BACKEND": "django_prometheus.cache.backends.redis.RedisCache", 137 | "LOCATION": "redis://127.0.0.1:6666/1", 138 | "OPTIONS": {"IGNORE_EXCEPTIONS": True}, 139 | }, 140 | } 141 | 142 | 143 | # Internationalization 144 | LANGUAGE_CODE = "en-us" 145 | TIME_ZONE = "UTC" 146 | USE_I18N = True 147 | USE_TZ = False 148 | 149 | 150 | # Static files (CSS, JavaScript, Images) 151 | STATIC_URL = "/static/" 152 | 153 | LOGGING = { 154 | "version": 1, 155 | "disable_existing_loggers": False, 156 | "handlers": {"console": {"class": "logging.StreamHandler"}}, 157 | "root": {"handlers": ["console"], "level": "INFO"}, 158 | "loggers": {"django": {"handlers": ["console"], "level": "INFO"}}, 159 | } 160 | -------------------------------------------------------------------------------- /django_prometheus/tests/test_testutils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from operator import itemgetter 3 | 4 | import prometheus_client 5 | import pytest 6 | 7 | from django_prometheus.testutils import ( 8 | assert_metric_diff, 9 | assert_metric_equal, 10 | assert_metric_no_diff, 11 | assert_metric_not_equal, 12 | get_metric, 13 | get_metric_from_frozen_registry, 14 | get_metrics_vector, 15 | save_registry, 16 | ) 17 | 18 | 19 | class TestPrometheusTestCaseMixin: 20 | @pytest.fixture 21 | def registry(self): 22 | return prometheus_client.CollectorRegistry() 23 | 24 | @pytest.fixture(autouse=True) 25 | def some_gauge(self, registry): 26 | some_gauge = prometheus_client.Gauge("some_gauge", "Some gauge.", registry=registry) 27 | some_gauge.set(42) 28 | return some_gauge 29 | 30 | @pytest.fixture(autouse=True) 31 | def some_labelled_gauge(self, registry): 32 | some_labelled_gauge = prometheus_client.Gauge( 33 | "some_labelled_gauge", 34 | "Some labelled gauge.", 35 | ["labelred", "labelblue"], 36 | registry=registry, 37 | ) 38 | some_labelled_gauge.labels("pink", "indigo").set(1) 39 | some_labelled_gauge.labels("pink", "royal").set(2) 40 | some_labelled_gauge.labels("carmin", "indigo").set(3) 41 | some_labelled_gauge.labels("carmin", "royal").set(4) 42 | return some_labelled_gauge 43 | 44 | def test_get_metric(self, registry): 45 | """Tests get_metric.""" 46 | assert get_metric("some_gauge", registry=registry) == 42 47 | assert ( 48 | get_metric( 49 | "some_labelled_gauge", 50 | registry=registry, 51 | labelred="pink", 52 | labelblue="indigo", 53 | ) 54 | == 1 55 | ) 56 | 57 | def test_get_metrics_vector(self, registry): 58 | """Tests get_metrics_vector.""" 59 | vector = get_metrics_vector("some_nonexistent_gauge", registry=registry) 60 | assert vector == [] 61 | vector = get_metrics_vector("some_gauge", registry=registry) 62 | assert vector == [({}, 42)] 63 | vector = get_metrics_vector("some_labelled_gauge", registry=registry) 64 | assert sorted( 65 | [ 66 | ({"labelred": "pink", "labelblue": "indigo"}, 1), 67 | ({"labelred": "pink", "labelblue": "royal"}, 2), 68 | ({"labelred": "carmin", "labelblue": "indigo"}, 3), 69 | ({"labelred": "carmin", "labelblue": "royal"}, 4), 70 | ], 71 | key=itemgetter(1), 72 | ) == sorted(vector, key=itemgetter(1)) 73 | 74 | def test_assert_metric_equal(self, registry): 75 | """Tests assert_metric_equal.""" 76 | # First we test that a scalar metric can be tested. 77 | assert_metric_equal(42, "some_gauge", registry=registry) 78 | 79 | assert_metric_not_equal(43, "some_gauge", registry=registry) 80 | 81 | # Here we test that assert_metric_equal fails on nonexistent gauges. 82 | assert_metric_not_equal(42, "some_nonexistent_gauge", registry=registry) 83 | 84 | # Here we test that labelled metrics can be tested. 85 | assert_metric_equal( 86 | 1, 87 | "some_labelled_gauge", 88 | registry=registry, 89 | labelred="pink", 90 | labelblue="indigo", 91 | ) 92 | 93 | assert_metric_not_equal( 94 | 1, 95 | "some_labelled_gauge", 96 | registry=registry, 97 | labelred="tomato", 98 | labelblue="sky", 99 | ) 100 | 101 | def test_registry_saving(self, registry, some_gauge, some_labelled_gauge): 102 | """Tests save_registry and frozen registries operations.""" 103 | frozen_registry = save_registry(registry=registry) 104 | # Test that we can manipulate a frozen scalar metric. 105 | assert get_metric_from_frozen_registry("some_gauge", frozen_registry) == 42 106 | some_gauge.set(99) 107 | assert get_metric_from_frozen_registry("some_gauge", frozen_registry) == 42 108 | assert_metric_diff(frozen_registry, 99 - 42, "some_gauge", registry=registry) 109 | assert_metric_no_diff(frozen_registry, 1, "some_gauge", registry=registry) 110 | # Now test the same thing with a labelled metric. 111 | assert ( 112 | get_metric_from_frozen_registry( 113 | "some_labelled_gauge", 114 | frozen_registry, 115 | labelred="pink", 116 | labelblue="indigo", 117 | ) 118 | == 1 119 | ) 120 | some_labelled_gauge.labels("pink", "indigo").set(5) 121 | assert ( 122 | get_metric_from_frozen_registry( 123 | "some_labelled_gauge", 124 | frozen_registry, 125 | labelred="pink", 126 | labelblue="indigo", 127 | ) 128 | == 1 129 | ) 130 | assert_metric_diff( 131 | frozen_registry, 132 | 5 - 1, 133 | "some_labelled_gauge", 134 | registry=registry, 135 | labelred="pink", 136 | labelblue="indigo", 137 | ) 138 | 139 | assert_metric_no_diff( 140 | frozen_registry, 141 | 1, 142 | "some_labelled_gauge", 143 | registry=registry, 144 | labelred="pink", 145 | labelblue="indigo", 146 | ) 147 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/test_middleware.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from django_prometheus.testutils import ( 4 | assert_metric_diff, 5 | assert_metric_equal, 6 | save_registry, 7 | ) 8 | from testapp.views import ObjectionException 9 | 10 | 11 | def M(metric_name): 12 | """Makes a full metric name from a short metric name. 13 | 14 | This is just intended to help keep the lines shorter in test 15 | cases. 16 | """ 17 | return f"django_http_{metric_name}" 18 | 19 | 20 | def T(metric_name): 21 | """Makes a full metric name from a short metric name like M(metric_name) 22 | 23 | This method adds a '_total' postfix for metrics. 24 | """ 25 | return f"{M(metric_name)}_total" 26 | 27 | 28 | class TestMiddlewareMetrics: 29 | """Test django_prometheus.middleware. 30 | 31 | Note that counters related to exceptions can't be tested as 32 | Django's test Client only simulates requests and the exception 33 | handling flow is very different in that simulation. 34 | """ 35 | 36 | @pytest.fixture(autouse=True) 37 | def _setup(self, settings): 38 | settings.PROMETHEUS_LATENCY_BUCKETS = (0.05, 1.0, 2.0, 4.0, 5.0, 10.0, float("inf")) 39 | 40 | def test_request_counters(self, client): 41 | registry = save_registry() 42 | client.get("/") 43 | client.get("/") 44 | client.get("/help") 45 | client.post("/", {"test": "data"}) 46 | 47 | assert_metric_diff(registry, 4, M("requests_before_middlewares_total")) 48 | assert_metric_diff(registry, 4, M("responses_before_middlewares_total")) 49 | assert_metric_diff(registry, 3, T("requests_total_by_method"), method="GET") 50 | assert_metric_diff(registry, 1, T("requests_total_by_method"), method="POST") 51 | assert_metric_diff(registry, 4, T("requests_total_by_transport"), transport="http") 52 | assert_metric_diff( 53 | registry, 54 | 2, 55 | T("requests_total_by_view_transport_method"), 56 | view="testapp.views.index", 57 | transport="http", 58 | method="GET", 59 | ) 60 | assert_metric_diff( 61 | registry, 62 | 1, 63 | T("requests_total_by_view_transport_method"), 64 | view="testapp.views.help", 65 | transport="http", 66 | method="GET", 67 | ) 68 | assert_metric_diff( 69 | registry, 70 | 1, 71 | T("requests_total_by_view_transport_method"), 72 | view="testapp.views.index", 73 | transport="http", 74 | method="POST", 75 | ) 76 | # We have 3 requests with no post body, and one with a few 77 | # bytes, but buckets are cumulative so that is 4 requests with 78 | # <=128 bytes bodies. 79 | assert_metric_diff(registry, 3, M("requests_body_total_bytes_bucket"), le="0.0") 80 | assert_metric_diff(registry, 4, M("requests_body_total_bytes_bucket"), le="128.0") 81 | assert_metric_equal(None, M("responses_total_by_templatename"), templatename="help.html") 82 | assert_metric_diff(registry, 3, T("responses_total_by_templatename"), templatename="index.html") 83 | assert_metric_diff(registry, 4, T("responses_total_by_status"), status="200") 84 | assert_metric_diff(registry, 0, M("responses_body_total_bytes_bucket"), le="0.0") 85 | assert_metric_diff(registry, 3, M("responses_body_total_bytes_bucket"), le="128.0") 86 | assert_metric_diff(registry, 4, M("responses_body_total_bytes_bucket"), le="8192.0") 87 | assert_metric_diff(registry, 4, T("responses_total_by_charset"), charset="utf-8") 88 | assert_metric_diff(registry, 0, M("responses_streaming_total")) 89 | 90 | def test_latency_histograms(self, client): 91 | # Caution: this test is timing-based. This is not ideal. It 92 | # runs slowly (each request to /slow takes at least .1 seconds 93 | # to complete), to eliminate flakiness we adjust the buckets used 94 | # in the test suite. 95 | 96 | registry = save_registry() 97 | 98 | # This always takes more than .1 second, so checking the lower 99 | # buckets is fine. 100 | client.get("/slow") 101 | assert_metric_diff( 102 | registry, 103 | 0, 104 | M("requests_latency_seconds_by_view_method_bucket"), 105 | le="0.05", 106 | view="slow", 107 | method="GET", 108 | ) 109 | assert_metric_diff( 110 | registry, 111 | 1, 112 | M("requests_latency_seconds_by_view_method_bucket"), 113 | le="5.0", 114 | view="slow", 115 | method="GET", 116 | ) 117 | 118 | def test_exception_latency_histograms(self, client): 119 | registry = save_registry() 120 | 121 | try: 122 | client.get("/objection") 123 | except ObjectionException: 124 | pass 125 | assert_metric_diff( 126 | registry, 127 | 2, 128 | M("requests_latency_seconds_by_view_method_bucket"), 129 | le="2.5", 130 | view="testapp.views.objection", 131 | method="GET", 132 | ) 133 | 134 | def test_streaming_responses(self, client): 135 | registry = save_registry() 136 | client.get("/") 137 | client.get("/file") 138 | assert_metric_diff(registry, 1, M("responses_streaming_total")) 139 | assert_metric_diff(registry, 1, M("responses_body_total_bytes_bucket"), le="+Inf") 140 | -------------------------------------------------------------------------------- /documentation/exports.md: -------------------------------------------------------------------------------- 1 | # Exports 2 | 3 | ## Default: exporting /metrics as a Django view 4 | 5 | /metrics can be exported as a Django view very easily. Simply 6 | include('django_prometheus.urls') with no prefix like so: 7 | 8 | ```python 9 | urlpatterns = [ 10 | ... 11 | path('', include('django_prometheus.urls')), 12 | ] 13 | ``` 14 | 15 | This will reserve the /metrics path on your server. This may be a 16 | problem for you, so you can use a prefix. For instance, the following 17 | will export the metrics at `/monitoring/metrics` instead. You will 18 | need to configure Prometheus to use that path instead of the default. 19 | 20 | ```python 21 | urlpatterns = [ 22 | ... 23 | path('monitoring/', include('django_prometheus.urls')), 24 | ] 25 | ``` 26 | 27 | ## Exporting /metrics in a dedicated thread 28 | 29 | To ensure that issues in your Django app do not affect the monitoring, 30 | it is recommended to export /metrics in an HTTPServer running in a 31 | daemon thread. This will prevent that problems such as thread 32 | starvation or low-level bugs in Django do not affect the export of 33 | your metrics, which may be more needed than ever if these problems 34 | occur. 35 | 36 | It can be enabled by adding the following line in your `settings.py`: 37 | 38 | ```python 39 | PROMETHEUS_METRICS_EXPORT_PORT = 8001 40 | PROMETHEUS_METRICS_EXPORT_ADDRESS = '' # all addresses 41 | ``` 42 | 43 | However, by default this mechanism is disabled, because it is not 44 | compatible with Django's autoreloader. The autoreloader is the feature 45 | that allows you to edit your code and see the changes 46 | immediately. This works by forking multiple processes of Django, which 47 | would compete for the port. As such, this code will assert-fail if the 48 | autoreloader is active. 49 | 50 | You can run Django without the autoreloader by passing `-noreload` to 51 | `manage.py`. If you decide to enable the thread-based exporter in 52 | production, you may wish to modify your manage.py to ensure that this 53 | option is always active: 54 | 55 | ```python 56 | execute_from_command_line(sys.argv + ['--noreload']) 57 | ``` 58 | 59 | ## Exporting /metrics in a WSGI application with multiple processes per process 60 | 61 | If you're using WSGI (e.g. with uwsgi or with gunicorn) and multiple 62 | Django processes, using either option above won't work, as requests 63 | using the Django view would just go to an inconsistent backend each 64 | time, and exporting on a single port doesn't work. 65 | 66 | The following settings can be used instead: 67 | 68 | ```python 69 | PROMETHEUS_METRICS_EXPORT_PORT_RANGE = range(8001, 8050) 70 | ``` 71 | 72 | This will make Django-Prometheus try to export /metrics on port 73 | 8001. If this fails (i.e. the port is in use), it will try 8002, then 74 | 8003, etc. 75 | 76 | You can then configure Prometheus to collect metrics on as many 77 | targets as you have workers, using each port separately. 78 | 79 | This approach requires the application to be loaded into each child process. 80 | uWSGI and Gunicorn typically load the application into the master process before forking the child processes. 81 | Set the [lazy-apps option](https://uwsgi-docs.readthedocs.io/en/latest/Options.html#lazy-apps) to `true` (uWSGI) 82 | or the [preload-app option](https://docs.gunicorn.org/en/stable/settings.html#preload-app) to `false` (Gunicorn) 83 | to change this behaviour. 84 | 85 | 86 | ## Exporting /metrics in a WSGI application with multiple processes globally 87 | 88 | In some WSGI applications, workers are short lived (less than a minute), so some 89 | are never scraped by prometheus by default. Prometheus client already provides 90 | a nice system to aggregate them using the env variable: `PROMETHEUS_MULTIPROC_DIR` 91 | which will configure the directory where metrics will be stored as files per process. 92 | 93 | Configuration in uwsgi would look like: 94 | 95 | ```ini 96 | env = PROMETHEUS_MULTIPROC_DIR=/path/to/django_metrics 97 | ``` 98 | 99 | You can also set this environment variable elsewhere such as in a kubernetes manifest. 100 | 101 | Setting this will create four files (one for counters, one for summaries, ...etc) 102 | for each pid used. In uwsgi, the number of different pids used can be quite large 103 | (the pid change every time a worker respawn). To prevent having thousand of files 104 | created, it's possible to create file using worker ids rather than pids. 105 | 106 | You can change the function used for identifying the process to use the uwsgi worker_id. 107 | Modify this in settings before any metrics are created: 108 | 109 | ```python 110 | try: 111 | import prometheus_client 112 | import uwsgi 113 | prometheus_client.values.ValueClass = prometheus_client.values.MultiProcessValue( 114 | process_identifier=uwsgi.worker_id) 115 | except ImportError: 116 | pass # not running in uwsgi 117 | ``` 118 | 119 | Note that this code uses internal interfaces of prometheus_client. 120 | The underlying implementation may change. 121 | 122 | The number of resulting files will be: 123 | number of processes * 4 (counter, histogram, gauge, summary) 124 | 125 | Be aware that by default this will generate a large amount of file descriptors: 126 | Each worker will keep 3 file descriptors for each files it created. 127 | 128 | Since these files will be written often, you should consider mounting this directory 129 | as a `tmpfs` or using a subdir of an existing one such as `/run/` or `/var/run/`. 130 | 131 | If uwsgi is not using lazy-apps (lazy-apps = true), there will be a 132 | file descriptors leak (tens to hundreds of fds on a single file) due 133 | to the way uwsgi forks processes to create workers. 134 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | # Django Commons Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | We as members, contributors, and leaders pledge to make participation in our 7 | community a harassment-free experience for everyone, regardless of age, body 8 | size, visible or invisible disability, ethnicity, sex characteristics, gender 9 | identity and expression, level of experience, education, socio-economic status, 10 | nationality, personal appearance, race, caste, color, religion, or sexual 11 | identity and orientation. 12 | 13 | We pledge to act and interact in ways that contribute to an open, welcoming, 14 | diverse, inclusive, and healthy community. 15 | 16 | ## Our Standards 17 | 18 | Examples of behavior that contributes to a positive environment for our 19 | community include: 20 | 21 | * Demonstrating empathy and kindness toward other people 22 | * Being respectful of differing opinions, viewpoints, and experiences 23 | * Giving and gracefully accepting constructive feedback 24 | * Accepting responsibility and apologizing to those affected by our mistakes, 25 | and learning from the experience 26 | * Focusing on what is best not just for us as individuals, but for the overall 27 | community 28 | 29 | Examples of unacceptable behavior include: 30 | 31 | * The use of sexualized language or imagery, and sexual attention or advances of 32 | any kind 33 | * Trolling, insulting or derogatory comments, and personal or political attacks 34 | * Public or private harassment 35 | * Publishing others' private information, such as a physical or email address, 36 | without their explicit permission 37 | * Other conduct which could reasonably be considered inappropriate in a 38 | professional setting 39 | 40 | ## Enforcement Responsibilities 41 | 42 | Community leaders are responsible for clarifying and enforcing our standards of 43 | acceptable behavior and will take appropriate and fair corrective action in 44 | response to any behavior that they deem inappropriate, threatening, offensive, 45 | or harmful. 46 | 47 | Community leaders have the right and responsibility to remove, edit, or reject 48 | comments, commits, code, wiki edits, issues, and other contributions that are 49 | not aligned to this Code of Conduct, and will communicate reasons for moderation 50 | decisions when appropriate. 51 | 52 | ## Scope 53 | 54 | This Code of Conduct applies within all community spaces, and also applies when 55 | an individual is officially representing the community in public spaces. 56 | Examples of representing our community include using an official email address, 57 | posting via an official social media account, or acting as an appointed 58 | representative at an online or offline event. 59 | 60 | ## Enforcement 61 | 62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 63 | reported to the community leaders responsible for enforcement at 64 | [django-commons-coc@googlegroups.com](mailto:django-commons-coc@googlegroups.com). 65 | All complaints will be reviewed and investigated promptly and fairly. 66 | 67 | All community leaders are obligated to respect the privacy and security of the 68 | reporter of any incident. 69 | 70 | ## Enforcement Guidelines 71 | 72 | Community leaders will follow these Community Impact Guidelines in determining 73 | the consequences for any action they deem in violation of this Code of Conduct: 74 | 75 | ### 1. Warning 76 | 77 | **Community Impact**: Use of inappropriate language or other behavior deemed 78 | unprofessional or unwelcome in the community. 79 | 80 | **Consequence**: A warning with consequences for continued behavior. No 81 | interaction with the people involved, including unsolicited interaction with 82 | those enforcing the Code of Conduct, for a specified period of time. This 83 | includes avoiding interactions in community spaces as well as external channels 84 | like social media. Violating these terms may lead to a temporary or permanent 85 | ban. 86 | 87 | ### 2. Temporary Ban 88 | 89 | **Community Impact**: A serious violation of community standards, including 90 | sustained inappropriate behavior. 91 | 92 | **Consequence**: A temporary ban from any sort of interaction or public 93 | communication with the community for a specified period of time. No public or 94 | private interaction with the people involved, including unsolicited interaction 95 | with those enforcing the Code of Conduct, is allowed during this period. 96 | Violating these terms may lead to a permanent ban. 97 | 98 | ### 3. Permanent Ban 99 | 100 | **Community Impact**: Demonstrating a pattern of violation of community 101 | standards, including sustained inappropriate behavior, harassment of an 102 | individual, or aggression toward or disparagement of classes of individuals. 103 | 104 | **Consequence**: A permanent ban from any sort of public interaction within the 105 | community. 106 | 107 | ## Attribution 108 | 109 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 110 | version 2.1, available at 111 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 112 | 113 | Community Impact Guidelines were inspired by 114 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 115 | 116 | For answers to common questions about this code of conduct, see the FAQ at 117 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 118 | [https://www.contributor-covenant.org/translations][translations]. 119 | 120 | [homepage]: https://www.contributor-covenant.org 121 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 122 | [Mozilla CoC]: https://github.com/mozilla/diversity 123 | [FAQ]: https://www.contributor-covenant.org/faq 124 | [translations]: https://www.contributor-covenant.org/translations 125 | -------------------------------------------------------------------------------- /examples/prometheus/consoles/django.html: -------------------------------------------------------------------------------- 1 | {{template "head" .}} 2 | 3 | {{template "prom_right_table_head"}} 4 | 5 | Django 6 | {{ template "prom_query_drilldown" (args "sum(up{job='django'})") }} 7 | / {{ template "prom_query_drilldown" (args "count(up{job='django'})") }} 8 | 9 | 10 | 11 | avg CPU 12 | {{ template "prom_query_drilldown" (args "avg by(job)(rate(process_cpu_seconds_total{job='django'}[5m]))" "s/s" "humanizeNoSmallPrefix") }} 13 | 14 | 15 | 16 | avg Memory 17 | {{ template "prom_query_drilldown" (args "avg by(job)(process_resident_memory_bytes{job='django'})" "B" "humanize1024") }} 18 | 19 | 20 | {{template "prom_right_table_tail"}} 21 | 22 | 23 | {{template "prom_content_head" .}} 24 |

Django

25 | 26 |

Requests

27 |

Total

28 |
29 | 41 | 42 |

By view

43 |
44 | 57 | 58 |

Latency (median)

59 |
60 | 72 | 73 |

Latency (99.9th percentile)

74 |
75 | 87 | 88 |

Models

89 |

Insertions/s

90 |
91 | 102 | 103 |

Updates/s

104 |
105 | 116 | 117 |

Deletions/s

118 |
119 | 130 | 131 |

Database

132 |

Connections/s

133 |
134 | 146 | 147 |

Connections errors/s

148 |
149 | 161 | 162 |

Queries/s

163 |
164 | 176 | 177 |

Errors/s

178 |
179 | 191 | 192 | {{template "prom_content_tail" .}} 193 | 194 | {{template "tail"}} 195 | -------------------------------------------------------------------------------- /examples/prometheus/django.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: django.rules 3 | rules: 4 | - record: job:django_http_requests_before_middlewares_total:sum_rate30s 5 | expr: sum(rate(django_http_requests_before_middlewares_total[30s])) BY (job) 6 | - record: job:django_http_requests_unknown_latency_total:sum_rate30s 7 | expr: sum(rate(django_http_requests_unknown_latency_total[30s])) BY (job) 8 | - record: job:django_http_ajax_requests_total:sum_rate30s 9 | expr: sum(rate(django_http_ajax_requests_total[30s])) BY (job) 10 | - record: job:django_http_responses_before_middlewares_total:sum_rate30s 11 | expr: sum(rate(django_http_responses_before_middlewares_total[30s])) BY (job) 12 | - record: job:django_http_requests_unknown_latency_including_middlewares_total:sum_rate30s 13 | expr: sum(rate(django_http_requests_unknown_latency_including_middlewares_total[30s])) 14 | BY (job) 15 | - record: job:django_http_requests_body_total_bytes:sum_rate30s 16 | expr: sum(rate(django_http_requests_body_total_bytes[30s])) BY (job) 17 | - record: job:django_http_responses_streaming_total:sum_rate30s 18 | expr: sum(rate(django_http_responses_streaming_total[30s])) BY (job) 19 | - record: job:django_http_responses_body_total_bytes:sum_rate30s 20 | expr: sum(rate(django_http_responses_body_total_bytes[30s])) BY (job) 21 | - record: job:django_http_requests_total:sum_rate30s 22 | expr: sum(rate(django_http_requests_total_by_method[30s])) BY (job) 23 | - record: job:django_http_requests_total_by_method:sum_rate30s 24 | expr: sum(rate(django_http_requests_total_by_method[30s])) BY (job, method) 25 | - record: job:django_http_requests_total_by_transport:sum_rate30s 26 | expr: sum(rate(django_http_requests_total_by_transport[30s])) BY (job, transport) 27 | - record: job:django_http_requests_total_by_view:sum_rate30s 28 | expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) BY (job, 29 | view) 30 | - record: job:django_http_requests_total_by_view_transport_method:sum_rate30s 31 | expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) BY (job, 32 | view, transport, method) 33 | - record: job:django_http_responses_total_by_templatename:sum_rate30s 34 | expr: sum(rate(django_http_responses_total_by_templatename[30s])) BY (job, templatename) 35 | - record: job:django_http_responses_total_by_status:sum_rate30s 36 | expr: sum(rate(django_http_responses_total_by_status[30s])) BY (job, status) 37 | - record: job:django_http_responses_total_by_status_name_method:sum_rate30s 38 | expr: sum(rate(django_http_responses_total_by_status_name_method[30s])) BY (job, 39 | status, name, method) 40 | - record: job:django_http_responses_total_by_charset:sum_rate30s 41 | expr: sum(rate(django_http_responses_total_by_charset[30s])) BY (job, charset) 42 | - record: job:django_http_exceptions_total_by_type:sum_rate30s 43 | expr: sum(rate(django_http_exceptions_total_by_type[30s])) BY (job, type) 44 | - record: job:django_http_exceptions_total_by_view:sum_rate30s 45 | expr: sum(rate(django_http_exceptions_total_by_view[30s])) BY (job, view) 46 | - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s 47 | expr: histogram_quantile(0.5, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) 48 | BY (job, le)) 49 | labels: 50 | quantile: "50" 51 | - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s 52 | expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) 53 | BY (job, le)) 54 | labels: 55 | quantile: "95" 56 | - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s 57 | expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) 58 | BY (job, le)) 59 | labels: 60 | quantile: "99" 61 | - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s 62 | expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) 63 | BY (job, le)) 64 | labels: 65 | quantile: "99.9" 66 | - record: job:django_http_requests_latency_seconds:quantile_rate30s 67 | expr: histogram_quantile(0.5, sum(rate(django_http_requests_latency_seconds_bucket[30s])) 68 | BY (job, le)) 69 | labels: 70 | quantile: "50" 71 | - record: job:django_http_requests_latency_seconds:quantile_rate30s 72 | expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_seconds_bucket[30s])) 73 | BY (job, le)) 74 | labels: 75 | quantile: "95" 76 | - record: job:django_http_requests_latency_seconds:quantile_rate30s 77 | expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_seconds_bucket[30s])) 78 | BY (job, le)) 79 | labels: 80 | quantile: "99" 81 | - record: job:django_http_requests_latency_seconds:quantile_rate30s 82 | expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_seconds_bucket[30s])) 83 | BY (job, le)) 84 | labels: 85 | quantile: "99.9" 86 | - record: job:django_model_inserts_total:sum_rate1m 87 | expr: sum(rate(django_model_inserts_total[1m])) BY (job, model) 88 | - record: job:django_model_updates_total:sum_rate1m 89 | expr: sum(rate(django_model_updates_total[1m])) BY (job, model) 90 | - record: job:django_model_deletes_total:sum_rate1m 91 | expr: sum(rate(django_model_deletes_total[1m])) BY (job, model) 92 | - record: job:django_db_new_connections_total:sum_rate30s 93 | expr: sum(rate(django_db_new_connections_total[30s])) BY (alias, vendor) 94 | - record: job:django_db_new_connection_errors_total:sum_rate30s 95 | expr: sum(rate(django_db_new_connection_errors_total[30s])) BY (alias, vendor) 96 | - record: job:django_db_execute_total:sum_rate30s 97 | expr: sum(rate(django_db_execute_total[30s])) BY (alias, vendor) 98 | - record: job:django_db_execute_many_total:sum_rate30s 99 | expr: sum(rate(django_db_execute_many_total[30s])) BY (alias, vendor) 100 | - record: job:django_db_errors_total:sum_rate30s 101 | expr: sum(rate(django_db_errors_total[30s])) BY (alias, vendor, type) 102 | - record: job:django_migrations_applied_total:max 103 | expr: max(django_migrations_applied_total) BY (job, connection) 104 | - record: job:django_migrations_unapplied_total:max 105 | expr: max(django_migrations_unapplied_total) BY (job, connection) 106 | -------------------------------------------------------------------------------- /django_prometheus/testutils.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | from prometheus_client import REGISTRY 4 | 5 | METRIC_EQUALS_ERR_EXPLANATION = """ 6 | %s%s = %s, expected %s. 7 | The values for %s are: 8 | %s""" 9 | 10 | METRIC_DIFF_ERR_EXPLANATION = """ 11 | %s%s changed by %f, expected %f. 12 | Value before: %s 13 | Value after: %s 14 | """ 15 | 16 | METRIC_COMPARE_ERR_EXPLANATION = """ 17 | The change in value of %s%s didn't match the predicate. 18 | Value before: %s 19 | Value after: %s 20 | """ 21 | 22 | METRIC_DIFF_ERR_NONE_EXPLANATION = """ 23 | %s%s was None after. 24 | Value before: %s 25 | Value after: %s 26 | """ 27 | 28 | 29 | """A collection of utilities that make it easier to write test cases 30 | that interact with metrics. 31 | """ 32 | 33 | 34 | def assert_metric_equal(expected_value, metric_name, registry=REGISTRY, **labels): 35 | """Asserts that metric_name{**labels} == expected_value.""" 36 | value = get_metric(metric_name, registry=registry, **labels) 37 | assert_err = METRIC_EQUALS_ERR_EXPLANATION % ( 38 | metric_name, 39 | format_labels(labels), 40 | value, 41 | expected_value, 42 | metric_name, 43 | format_vector(get_metrics_vector(metric_name)), 44 | ) 45 | assert expected_value == value, assert_err 46 | 47 | 48 | def assert_metric_diff(frozen_registry, expected_diff, metric_name, registry=REGISTRY, **labels): 49 | """Asserts that metric_name{**labels} changed by expected_diff between 50 | the frozen registry and now. A frozen registry can be obtained 51 | by calling save_registry, typically at the beginning of a test 52 | case. 53 | """ 54 | saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels) 55 | current_value = get_metric(metric_name, registry=registry, **labels) 56 | assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % ( 57 | metric_name, 58 | format_labels(labels), 59 | saved_value, 60 | current_value, 61 | ) 62 | diff = current_value - (saved_value or 0.0) 63 | assert_err = METRIC_DIFF_ERR_EXPLANATION % ( 64 | metric_name, 65 | format_labels(labels), 66 | diff, 67 | expected_diff, 68 | saved_value, 69 | current_value, 70 | ) 71 | assert expected_diff == diff, assert_err 72 | 73 | 74 | def assert_metric_no_diff(frozen_registry, expected_diff, metric_name, registry=REGISTRY, **labels): 75 | """Asserts that metric_name{**labels} isn't changed by expected_diff between 76 | the frozen registry and now. A frozen registry can be obtained 77 | by calling save_registry, typically at the beginning of a test 78 | case. 79 | """ 80 | saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels) 81 | current_value = get_metric(metric_name, registry=registry, **labels) 82 | assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % ( 83 | metric_name, 84 | format_labels(labels), 85 | saved_value, 86 | current_value, 87 | ) 88 | diff = current_value - (saved_value or 0.0) 89 | assert_err = METRIC_DIFF_ERR_EXPLANATION % ( 90 | metric_name, 91 | format_labels(labels), 92 | diff, 93 | expected_diff, 94 | saved_value, 95 | current_value, 96 | ) 97 | assert expected_diff != diff, assert_err 98 | 99 | 100 | def assert_metric_not_equal(expected_value, metric_name, registry=REGISTRY, **labels): 101 | """Asserts that metric_name{**labels} == expected_value.""" 102 | value = get_metric(metric_name, registry=registry, **labels) 103 | assert_err = METRIC_EQUALS_ERR_EXPLANATION % ( 104 | metric_name, 105 | format_labels(labels), 106 | value, 107 | expected_value, 108 | metric_name, 109 | format_vector(get_metrics_vector(metric_name)), 110 | ) 111 | assert expected_value != value, assert_err 112 | 113 | 114 | def assert_metric_compare(frozen_registry, predicate, metric_name, registry=REGISTRY, **labels): 115 | """Asserts that metric_name{**labels} changed according to a provided 116 | predicate function between the frozen registry and now. A 117 | frozen registry can be obtained by calling save_registry, 118 | typically at the beginning of a test case. 119 | """ 120 | saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels) 121 | current_value = get_metric(metric_name, registry=registry, **labels) 122 | assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % ( 123 | metric_name, 124 | format_labels(labels), 125 | saved_value, 126 | current_value, 127 | ) 128 | assert predicate(saved_value, current_value) is True, METRIC_COMPARE_ERR_EXPLANATION % ( 129 | metric_name, 130 | format_labels(labels), 131 | saved_value, 132 | current_value, 133 | ) 134 | 135 | 136 | def save_registry(registry=REGISTRY): 137 | """Freezes a registry. This lets a user test changes to a metric 138 | instead of testing the absolute value. A typical use case looks like: 139 | 140 | registry = save_registry() 141 | doStuff() 142 | assert_metric_diff(registry, 1, 'stuff_done_total') 143 | """ 144 | return copy.deepcopy(list(registry.collect())) 145 | 146 | 147 | def get_metric(metric_name, registry=REGISTRY, **labels): 148 | """Gets a single metric.""" 149 | return get_metric_from_frozen_registry(metric_name, registry.collect(), **labels) 150 | 151 | 152 | def get_metrics_vector(metric_name, registry=REGISTRY): 153 | """Returns the values for all labels of a given metric. 154 | 155 | The result is returned as a list of (labels, value) tuples, 156 | where `labels` is a dict. 157 | 158 | This is quite a hack since it relies on the internal 159 | representation of the prometheus_client, and it should 160 | probably be provided as a function there instead. 161 | """ 162 | return get_metric_vector_from_frozen_registry(metric_name, registry.collect()) 163 | 164 | 165 | def get_metric_vector_from_frozen_registry(metric_name, frozen_registry): 166 | """Like get_metrics_vector, but from a frozen registry.""" 167 | output = [] 168 | for metric in frozen_registry: 169 | for sample in metric.samples: 170 | if sample[0] == metric_name: 171 | output.append((sample[1], sample[2])) 172 | return output 173 | 174 | 175 | def get_metric_from_frozen_registry(metric_name, frozen_registry, **labels): 176 | """Gets a single metric from a frozen registry.""" 177 | for metric in frozen_registry: 178 | for sample in metric.samples: 179 | if sample[0] == metric_name and sample[1] == labels: 180 | return sample[2] 181 | 182 | 183 | def format_labels(labels): 184 | """Format a set of labels to Prometheus representation. 185 | 186 | In: 187 | {'method': 'GET', 'port': '80'} 188 | 189 | Out: 190 | '{method="GET",port="80"}' 191 | """ 192 | return "{{{}}}".format(",".join([f'{k}="{v}"' for k, v in labels.items()])) 193 | 194 | 195 | def format_vector(vector): 196 | """Formats a list of (labels, value) where labels is a dict into a 197 | human-readable representation. 198 | """ 199 | return "\n".join([f"{format_labels(labels)} = {value}" for labels, value in vector]) 200 | -------------------------------------------------------------------------------- /django_prometheus/tests/end2end/testapp/test_db.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from django.conf import settings 3 | from django.db import connections 4 | 5 | from django_prometheus.testutils import ( 6 | assert_metric_compare, 7 | assert_metric_diff, 8 | assert_metric_equal, 9 | get_metric, 10 | save_registry, 11 | ) 12 | 13 | # @pytest.fixture(autouse=True) 14 | # def enable_db_access_for_all_tests(db): 15 | # pass 16 | 17 | 18 | @pytest.mark.django_db(databases=list(settings.DATABASES.keys())) 19 | class BaseDBTest: 20 | pass 21 | 22 | 23 | @pytest.mark.skipif(connections["test_db_1"].vendor != "sqlite", reason="Skipped unless test_db_1 uses sqlite") 24 | class TestDbMetrics(BaseDBTest): 25 | """Test django_prometheus.db metrics. 26 | 27 | Note regarding the values of metrics: many tests interact with the 28 | database, and the test runner itself does. As such, tests that 29 | require that a metric has a specific value are at best very 30 | fragile. Consider asserting that the value exceeds a certain 31 | threshold, or check by how much it increased during the test. 32 | """ 33 | 34 | def test_config_has_expected_databases(self): 35 | """Not a real unit test: ensures that testapp.settings contains the 36 | databases this test expects. 37 | """ 38 | assert "default" in connections.databases.keys() 39 | assert "test_db_1" in connections.databases.keys() 40 | assert "test_db_2" in connections.databases.keys() 41 | 42 | def test_counters(self): 43 | cursor_db1 = connections["test_db_1"].cursor() 44 | cursor_db2 = connections["test_db_2"].cursor() 45 | cursor_db1.execute("SELECT 1") 46 | for _ in range(200): 47 | cursor_db2.execute("SELECT 2") 48 | cursor_db1.execute("SELECT 3") 49 | try: 50 | cursor_db1.execute("this is clearly not valid SQL") 51 | except Exception: 52 | pass 53 | 54 | assert_metric_equal( 55 | 1, 56 | "django_db_errors_total", 57 | alias="test_db_1", 58 | vendor="sqlite", 59 | type="OperationalError", 60 | ) 61 | assert get_metric("django_db_execute_total", alias="test_db_1", vendor="sqlite") > 0 62 | assert get_metric("django_db_execute_total", alias="test_db_2", vendor="sqlite") >= 200 63 | 64 | def test_histograms(self): 65 | cursor_db1 = connections["test_db_1"].cursor() 66 | cursor_db2 = connections["test_db_2"].cursor() 67 | cursor_db1.execute("SELECT 1") 68 | for _ in range(200): 69 | cursor_db2.execute("SELECT 2") 70 | assert ( 71 | get_metric( 72 | "django_db_query_duration_seconds_count", 73 | alias="test_db_1", 74 | vendor="sqlite", 75 | ) 76 | > 0 77 | ) 78 | assert ( 79 | get_metric( 80 | "django_db_query_duration_seconds_count", 81 | alias="test_db_2", 82 | vendor="sqlite", 83 | ) 84 | >= 200 85 | ) 86 | 87 | def test_execute_many(self): 88 | registry = save_registry() 89 | cursor_db1 = connections["test_db_1"].cursor() 90 | cursor_db1.executemany( 91 | "INSERT INTO testapp_lawn(location) VALUES (?)", 92 | [("Paris",), ("New York",), ("Berlin",), ("San Francisco",)], 93 | ) 94 | assert_metric_diff( 95 | registry, 96 | 4, 97 | "django_db_execute_many_total", 98 | alias="test_db_1", 99 | vendor="sqlite", 100 | ) 101 | 102 | 103 | @pytest.mark.skipif("postgresql" not in connections, reason="Skipped unless postgresql database is enabled") 104 | class TestPostgresDbMetrics(BaseDBTest): 105 | """Test django_prometheus.db metrics for postgres backend. 106 | 107 | Note regarding the values of metrics: many tests interact with the 108 | database, and the test runner itself does. As such, tests that 109 | require that a metric has a specific value are at best very 110 | fragile. Consider asserting that the value exceeds a certain 111 | threshold, or check by how much it increased during the test. 112 | """ 113 | 114 | def test_counters(self): 115 | registry = save_registry() 116 | cursor = connections["postgresql"].cursor() 117 | 118 | for _ in range(20): 119 | cursor.execute("SELECT 1") 120 | 121 | assert_metric_compare( 122 | registry, 123 | lambda a, b: a + 20 <= b < a + 25, 124 | "django_db_execute_total", 125 | alias="postgresql", 126 | vendor="postgresql", 127 | ) 128 | 129 | 130 | @pytest.mark.skipif("mysql" not in connections, reason="Skipped unless mysql database is enabled") 131 | class TestMysDbMetrics(BaseDBTest): 132 | """Test django_prometheus.db metrics for mys backend. 133 | 134 | Note regarding the values of metrics: many tests interact with the 135 | database, and the test runner itself does. As such, tests that 136 | require that a metric has a specific value are at best very 137 | fragile. Consider asserting that the value exceeds a certain 138 | threshold, or check by how much it increased during the test. 139 | """ 140 | 141 | def test_counters(self): 142 | registry = save_registry() 143 | cursor = connections["mysql"].cursor() 144 | 145 | for _ in range(20): 146 | cursor.execute("SELECT 1") 147 | 148 | assert_metric_compare( 149 | registry, 150 | lambda a, b: a + 20 <= b < a + 25, 151 | "django_db_execute_total", 152 | alias="mysql", 153 | vendor="mysql", 154 | ) 155 | 156 | 157 | @pytest.mark.skipif("postgis" not in connections, reason="Skipped unless postgis database is enabled") 158 | class TestPostgisDbMetrics(BaseDBTest): 159 | """Test django_prometheus.db metrics for postgis backend. 160 | 161 | Note regarding the values of metrics: many tests interact with the 162 | database, and the test runner itself does. As such, tests that 163 | require that a metric has a specific value are at best very 164 | fragile. Consider asserting that the value exceeds a certain 165 | threshold, or check by how much it increased during the test. 166 | """ 167 | 168 | def test_counters(self): 169 | r = save_registry() 170 | cursor = connections["postgis"].cursor() 171 | 172 | for _ in range(20): 173 | cursor.execute("SELECT 1") 174 | 175 | assert_metric_compare( 176 | r, 177 | lambda a, b: a + 20 <= b < a + 25, 178 | "django_db_execute_total", 179 | alias="postgis", 180 | vendor="postgresql", 181 | ) 182 | 183 | 184 | @pytest.mark.skipif("spatialite" not in connections, reason="Skipped unless spatialite database is enabled") 185 | class TestSpatialiteDbMetrics(BaseDBTest): 186 | """Test django_prometheus.db metrics for spatialite backend. 187 | 188 | Note regarding the values of metrics: many tests interact with the 189 | database, and the test runner itself does. As such, tests that 190 | require that a metric has a specific value are at best very 191 | fragile. Consider asserting that the value exceeds a certain 192 | threshold, or check by how much it increased during the test. 193 | """ 194 | 195 | def test_counters(self): 196 | r = save_registry() 197 | connection = connections["spatialite"] 198 | 199 | # Make sure the extension is loaded and geospatial tables are created 200 | connection.prepare_database() 201 | 202 | cursor = connection.cursor() 203 | 204 | for _ in range(20): 205 | cursor.execute("SELECT 1") 206 | 207 | assert_metric_compare( 208 | r, 209 | lambda a, b: a + 20 <= b < a + 25, 210 | "django_db_execute_total", 211 | alias="spatialite", 212 | vendor="sqlite", 213 | ) 214 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # django-prometheus 2 | 3 | Export Django monitoring metrics for Prometheus.io 4 | 5 | [![Join the chat at https://gitter.im/django-prometheus/community](https://badges.gitter.im/django-prometheus/community.svg)](https://gitter.im/django-prometheus/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 6 | 7 | [![PyPI version](https://badge.fury.io/py/django-prometheus.svg)](http://badge.fury.io/py/django-prometheus) 8 | [![Build Status](https://github.com/korfuri/django-prometheus/actions/workflows/ci.yml/badge.svg)](https://github.com/korfuri/django-prometheus/actions/workflows/ci.yml) 9 | [![Coverage Status](https://coveralls.io/repos/github/korfuri/django-prometheus/badge.svg?branch=master)](https://coveralls.io/github/korfuri/django-prometheus?branch=master) 10 | [![PyPi page link -- Python versions](https://img.shields.io/pypi/pyversions/django-prometheus.svg)](https://pypi.python.org/pypi/django-prometheus) 11 | 12 | 13 | ## Features 14 | 15 | This library provides Prometheus metrics for Django related operations: 16 | 17 | * Requests & Responses 18 | * Database access done via [Django ORM](https://docs.djangoproject.com/en/3.2/topics/db/) 19 | * Cache access done via [Django Cache framework](https://docs.djangoproject.com/en/3.2/topics/cache/) 20 | 21 | ## Usage 22 | 23 | ### Requirements 24 | 25 | * Django >= 4.2 26 | * Python 3.9 and above. 27 | 28 | ### Installation 29 | 30 | Install with: 31 | 32 | ```shell 33 | pip install django-prometheus 34 | ``` 35 | 36 | Or, if you're using a development version cloned from this repository: 37 | 38 | ```shell 39 | python path-to-where-you-cloned-django-prometheus/setup.py install 40 | ``` 41 | 42 | This will install [prometheus_client](https://github.com/prometheus/client_python) as a dependency. 43 | 44 | ### Quickstart 45 | 46 | In your settings.py: 47 | 48 | ```python 49 | INSTALLED_APPS = [ 50 | ... 51 | 'django_prometheus', 52 | ... 53 | ] 54 | 55 | MIDDLEWARE = [ 56 | 'django_prometheus.middleware.PrometheusBeforeMiddleware', 57 | # All your other middlewares go here, including the default 58 | # middlewares like SessionMiddleware, CommonMiddleware, 59 | # CsrfViewmiddleware, SecurityMiddleware, etc. 60 | 'django_prometheus.middleware.PrometheusAfterMiddleware', 61 | ] 62 | ``` 63 | 64 | In your urls.py: 65 | 66 | ```python 67 | urlpatterns = [ 68 | ... 69 | path('', include('django_prometheus.urls')), 70 | ] 71 | ``` 72 | 73 | ### Configuration 74 | 75 | Prometheus uses Histogram based grouping for monitoring latencies. The default 76 | buckets are: 77 | 78 | ```python 79 | PROMETHEUS_LATENCY_BUCKETS = (0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 25.0, 50.0, 75.0, float("inf"),) 80 | ``` 81 | 82 | You can define custom buckets for latency, adding more buckets decreases performance but 83 | increases accuracy: 84 | 85 | ```python 86 | PROMETHEUS_LATENCY_BUCKETS = (.1, .2, .5, .6, .8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.5, 9.0, 12.0, 15.0, 20.0, 30.0, float("inf")) 87 | ``` 88 | 89 | --- 90 | 91 | You can have a custom namespace for your metrics: 92 | 93 | ```python 94 | PROMETHEUS_METRIC_NAMESPACE = "project" 95 | ``` 96 | 97 | This will prefix all metrics with `project_` word like this: 98 | 99 | ```text 100 | project_django_http_requests_total_by_method_total{method="GET"} 1.0 101 | ``` 102 | 103 | ### Monitoring your databases 104 | 105 | SQLite, MySQL, and PostgreSQL databases can be monitored. Just 106 | replace the `ENGINE` property of your database, replacing 107 | `django.db.backends` with `django_prometheus.db.backends`. 108 | 109 | ```python 110 | DATABASES = { 111 | 'default': { 112 | 'ENGINE': 'django_prometheus.db.backends.sqlite3', 113 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 114 | }, 115 | } 116 | ``` 117 | 118 | ### Monitoring your caches 119 | 120 | Filebased, memcached, redis caches can be monitored. Just replace 121 | the cache backend to use the one provided by django_prometheus 122 | `django.core.cache.backends` with `django_prometheus.cache.backends`. 123 | 124 | ```python 125 | CACHES = { 126 | 'default': { 127 | 'BACKEND': 'django_prometheus.cache.backends.filebased.FileBasedCache', 128 | 'LOCATION': '/var/tmp/django_cache', 129 | } 130 | } 131 | ``` 132 | 133 | ### Monitoring your models 134 | 135 | You may want to monitor the creation/deletion/update rate for your 136 | model. This can be done by adding a mixin to them. This is safe to do 137 | on existing models (it does not require a migration). 138 | 139 | If your model is: 140 | 141 | ```python 142 | class Dog(models.Model): 143 | name = models.CharField(max_length=100, unique=True) 144 | breed = models.CharField(max_length=100, blank=True, null=True) 145 | age = models.PositiveIntegerField(blank=True, null=True) 146 | ``` 147 | 148 | Just add the `ExportModelOperationsMixin` as such: 149 | 150 | ```python 151 | from django_prometheus.models import ExportModelOperationsMixin 152 | 153 | class Dog(ExportModelOperationsMixin('dog'), models.Model): 154 | name = models.CharField(max_length=100, unique=True) 155 | breed = models.CharField(max_length=100, blank=True, null=True) 156 | age = models.PositiveIntegerField(blank=True, null=True) 157 | ``` 158 | 159 | This will export 3 metrics, `django_model_inserts_total{model="dog"}`, 160 | `django_model_updates_total{model="dog"}` and 161 | `django_model_deletes_total{model="dog"}`. 162 | 163 | Note that the exported metrics are counters of creations, 164 | modifications and deletions done in the current process. They are not 165 | gauges of the number of objects in the model. 166 | 167 | Starting with Django 1.7, migrations are also monitored. Two gauges 168 | are exported, `django_migrations_applied_by_connection` and 169 | `django_migrations_unapplied_by_connection`. You may want to alert if 170 | there are unapplied migrations. 171 | 172 | If you want to disable the Django migration metrics, set the 173 | `PROMETHEUS_EXPORT_MIGRATIONS` setting to False. 174 | 175 | ### Monitoring and aggregating the metrics 176 | 177 | Prometheus is quite easy to set up. An example prometheus.conf to 178 | scrape `127.0.0.1:8001` can be found in `examples/prometheus`. 179 | 180 | Here's an example of a PromDash displaying some of the metrics 181 | collected by django-prometheus: 182 | 183 | ![Example dashboard](https://raw.githubusercontent.com/korfuri/django-prometheus/master/examples/django-promdash.png) 184 | 185 | ## Adding your own metrics 186 | 187 | You can add application-level metrics in your code by using 188 | [prometheus_client](https://github.com/prometheus/client_python) 189 | directly. The exporter is global and will pick up your metrics. 190 | 191 | To add metrics to the Django internals, the easiest way is to extend 192 | django-prometheus' classes. Please consider contributing your metrics, 193 | pull requests are welcome. Make sure to read the Prometheus best 194 | practices on 195 | [instrumentation](http://prometheus.io/docs/practices/instrumentation/) 196 | and [naming](http://prometheus.io/docs/practices/naming/). 197 | 198 | ## Importing Django Prometheus using only local settings 199 | 200 | If you wish to use Django Prometheus but are not able to change 201 | the code base, it's possible to have all the default metrics by 202 | modifying only the settings. 203 | 204 | First step is to inject prometheus' middlewares and to add 205 | django_prometheus in INSTALLED_APPS 206 | 207 | ```python 208 | MIDDLEWARE = \ 209 | ['django_prometheus.middleware.PrometheusBeforeMiddleware'] + \ 210 | MIDDLEWARE + \ 211 | ['django_prometheus.middleware.PrometheusAfterMiddleware'] 212 | 213 | INSTALLED_APPS += ['django_prometheus'] 214 | ``` 215 | 216 | Second step is to create the /metrics end point, for that we need 217 | another file (called urls_prometheus_wrapper.py in this example) that 218 | will wraps the apps URLs and add one on top: 219 | 220 | ```python 221 | from django.urls import include, path 222 | 223 | 224 | urlpatterns = [] 225 | 226 | urlpatterns.append(path('prometheus/', include('django_prometheus.urls'))) 227 | urlpatterns.append(path('', include('myapp.urls'))) 228 | ``` 229 | 230 | This file will add a "/prometheus/metrics" end point to the URLs of django 231 | that will export the metrics (replace myapp by your project name). 232 | 233 | Then we inject the wrapper in settings: 234 | 235 | ```python 236 | ROOT_URLCONF = "graphite.urls_prometheus_wrapper" 237 | ``` 238 | 239 | ## Adding custom labels to middleware (request/response) metrics 240 | 241 | You can add application specific labels to metrics reported by the django-prometheus middleware. 242 | This involves extending the classes defined in middleware.py. 243 | 244 | * Extend the Metrics class and override the `register_metric` method to add the application specific labels. 245 | * Extend middleware classes, set the metrics_cls class attribute to the the extended metric class and override the label_metric method to attach custom metrics. 246 | 247 | See implementation example in [the test app](django_prometheus/tests/end2end/testapp/test_middleware_custom_labels.py#L19-L46) 248 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /django_prometheus/middleware.py: -------------------------------------------------------------------------------- 1 | from django.utils.deprecation import MiddlewareMixin 2 | from prometheus_client import Counter, Histogram 3 | 4 | from django_prometheus.conf import NAMESPACE, PROMETHEUS_LATENCY_BUCKETS 5 | from django_prometheus.utils import PowersOf, Time, TimeSince 6 | 7 | 8 | class Metrics: 9 | _instance = None 10 | 11 | @classmethod 12 | def get_instance(cls): 13 | if not cls._instance: 14 | cls._instance = cls() 15 | return cls._instance 16 | 17 | def register_metric(self, metric_cls, name, documentation, labelnames=(), **kwargs): 18 | return metric_cls(name, documentation, labelnames=labelnames, **kwargs) 19 | 20 | def __init__(self, *args, **kwargs): 21 | self.register() 22 | 23 | def register(self): 24 | self.requests_total = self.register_metric( 25 | Counter, 26 | "django_http_requests_before_middlewares_total", 27 | "Total count of requests before middlewares run.", 28 | namespace=NAMESPACE, 29 | ) 30 | self.responses_total = self.register_metric( 31 | Counter, 32 | "django_http_responses_before_middlewares_total", 33 | "Total count of responses before middlewares run.", 34 | namespace=NAMESPACE, 35 | ) 36 | self.requests_latency_before = self.register_metric( 37 | Histogram, 38 | "django_http_requests_latency_including_middlewares_seconds", 39 | ("Histogram of requests processing time (including middleware processing time)."), 40 | buckets=PROMETHEUS_LATENCY_BUCKETS, 41 | namespace=NAMESPACE, 42 | ) 43 | self.requests_unknown_latency_before = self.register_metric( 44 | Counter, 45 | "django_http_requests_unknown_latency_including_middlewares_total", 46 | ( 47 | "Count of requests for which the latency was unknown (when computing " 48 | "django_http_requests_latency_including_middlewares_seconds)." 49 | ), 50 | namespace=NAMESPACE, 51 | ) 52 | self.requests_latency_by_view_method = self.register_metric( 53 | Histogram, 54 | "django_http_requests_latency_seconds_by_view_method", 55 | "Histogram of request processing time labelled by view.", 56 | ["view", "method"], 57 | buckets=PROMETHEUS_LATENCY_BUCKETS, 58 | namespace=NAMESPACE, 59 | ) 60 | self.requests_unknown_latency = self.register_metric( 61 | Counter, 62 | "django_http_requests_unknown_latency_total", 63 | "Count of requests for which the latency was unknown.", 64 | namespace=NAMESPACE, 65 | ) 66 | # Set in process_request 67 | self.requests_ajax = self.register_metric( 68 | Counter, 69 | "django_http_ajax_requests_total", 70 | "Count of AJAX requests.", 71 | namespace=NAMESPACE, 72 | ) 73 | self.requests_by_method = self.register_metric( 74 | Counter, 75 | "django_http_requests_total_by_method", 76 | "Count of requests by method.", 77 | ["method"], 78 | namespace=NAMESPACE, 79 | ) 80 | self.requests_by_transport = self.register_metric( 81 | Counter, 82 | "django_http_requests_total_by_transport", 83 | "Count of requests by transport.", 84 | ["transport"], 85 | namespace=NAMESPACE, 86 | ) 87 | # Set in process_view 88 | self.requests_by_view_transport_method = self.register_metric( 89 | Counter, 90 | "django_http_requests_total_by_view_transport_method", 91 | "Count of requests by view, transport, method.", 92 | ["view", "transport", "method"], 93 | namespace=NAMESPACE, 94 | ) 95 | self.requests_body_bytes = self.register_metric( 96 | Histogram, 97 | "django_http_requests_body_total_bytes", 98 | "Histogram of requests by body size.", 99 | buckets=PowersOf(2, 30), 100 | namespace=NAMESPACE, 101 | ) 102 | # Set in process_template_response 103 | self.responses_by_templatename = self.register_metric( 104 | Counter, 105 | "django_http_responses_total_by_templatename", 106 | "Count of responses by template name.", 107 | ["templatename"], 108 | namespace=NAMESPACE, 109 | ) 110 | # Set in process_response 111 | self.responses_by_status = self.register_metric( 112 | Counter, 113 | "django_http_responses_total_by_status", 114 | "Count of responses by status.", 115 | ["status"], 116 | namespace=NAMESPACE, 117 | ) 118 | self.responses_by_status_view_method = self.register_metric( 119 | Counter, 120 | "django_http_responses_total_by_status_view_method", 121 | "Count of responses by status, view, method.", 122 | ["status", "view", "method"], 123 | namespace=NAMESPACE, 124 | ) 125 | self.responses_body_bytes = self.register_metric( 126 | Histogram, 127 | "django_http_responses_body_total_bytes", 128 | "Histogram of responses by body size.", 129 | buckets=PowersOf(2, 30), 130 | namespace=NAMESPACE, 131 | ) 132 | self.responses_by_charset = self.register_metric( 133 | Counter, 134 | "django_http_responses_total_by_charset", 135 | "Count of responses by charset.", 136 | ["charset"], 137 | namespace=NAMESPACE, 138 | ) 139 | self.responses_streaming = self.register_metric( 140 | Counter, 141 | "django_http_responses_streaming_total", 142 | "Count of streaming responses.", 143 | namespace=NAMESPACE, 144 | ) 145 | # Set in process_exception 146 | self.exceptions_by_type = self.register_metric( 147 | Counter, 148 | "django_http_exceptions_total_by_type", 149 | "Count of exceptions by object type.", 150 | ["type"], 151 | namespace=NAMESPACE, 152 | ) 153 | self.exceptions_by_view = self.register_metric( 154 | Counter, 155 | "django_http_exceptions_total_by_view", 156 | "Count of exceptions by view.", 157 | ["view"], 158 | namespace=NAMESPACE, 159 | ) 160 | 161 | 162 | class PrometheusBeforeMiddleware(MiddlewareMixin): 163 | """Monitoring middleware that should run before other middlewares.""" 164 | 165 | metrics_cls = Metrics 166 | 167 | def __init__(self, *args, **kwargs): 168 | super().__init__(*args, **kwargs) 169 | self.metrics = self.metrics_cls.get_instance() 170 | 171 | def process_request(self, request): 172 | self.metrics.requests_total.inc() 173 | request.prometheus_before_middleware_event = Time() 174 | 175 | def process_response(self, request, response): 176 | self.metrics.responses_total.inc() 177 | if hasattr(request, "prometheus_before_middleware_event"): 178 | self.metrics.requests_latency_before.observe(TimeSince(request.prometheus_before_middleware_event)) 179 | else: 180 | self.metrics.requests_unknown_latency_before.inc() 181 | return response 182 | 183 | 184 | class PrometheusAfterMiddleware(MiddlewareMixin): 185 | """Monitoring middleware that should run after other middlewares.""" 186 | 187 | metrics_cls = Metrics 188 | 189 | def __init__(self, *args, **kwargs): 190 | super().__init__(*args, **kwargs) 191 | self.metrics = self.metrics_cls.get_instance() 192 | 193 | def _transport(self, request): 194 | return "https" if request.is_secure() else "http" 195 | 196 | def _method(self, request): 197 | m = request.method 198 | if m not in ( 199 | "GET", 200 | "HEAD", 201 | "POST", 202 | "PUT", 203 | "DELETE", 204 | "TRACE", 205 | "OPTIONS", 206 | "CONNECT", 207 | "PATCH", 208 | ): 209 | return "" 210 | return m 211 | 212 | def label_metric(self, metric, request, response=None, **labels): 213 | return metric.labels(**labels) if labels else metric 214 | 215 | def process_request(self, request): 216 | transport = self._transport(request) 217 | method = self._method(request) 218 | self.label_metric(self.metrics.requests_by_method, request, method=method).inc() 219 | self.label_metric(self.metrics.requests_by_transport, request, transport=transport).inc() 220 | 221 | # Mimic the behaviour of the deprecated "Request.is_ajax()" method. 222 | if request.headers.get("x-requested-with") == "XMLHttpRequest": 223 | self.label_metric(self.metrics.requests_ajax, request).inc() 224 | 225 | content_length = int(request.headers.get("content-length") or 0) 226 | self.label_metric(self.metrics.requests_body_bytes, request).observe(content_length) 227 | request.prometheus_after_middleware_event = Time() 228 | 229 | def _get_view_name(self, request): 230 | view_name = "" 231 | if hasattr(request, "resolver_match"): 232 | if request.resolver_match is not None: 233 | if request.resolver_match.view_name is not None: 234 | view_name = request.resolver_match.view_name 235 | return view_name 236 | 237 | def process_view(self, request, view_func, *view_args, **view_kwargs): 238 | transport = self._transport(request) 239 | method = self._method(request) 240 | if hasattr(request, "resolver_match"): 241 | name = request.resolver_match.view_name or "" 242 | self.label_metric( 243 | self.metrics.requests_by_view_transport_method, 244 | request, 245 | view=name, 246 | transport=transport, 247 | method=method, 248 | ).inc() 249 | 250 | def process_template_response(self, request, response): 251 | if hasattr(response, "template_name"): 252 | self.label_metric( 253 | self.metrics.responses_by_templatename, 254 | request, 255 | response=response, 256 | templatename=str(response.template_name), 257 | ).inc() 258 | return response 259 | 260 | def process_response(self, request, response): 261 | method = self._method(request) 262 | name = self._get_view_name(request) 263 | status = str(response.status_code) 264 | self.label_metric(self.metrics.responses_by_status, request, response, status=status).inc() 265 | self.label_metric( 266 | self.metrics.responses_by_status_view_method, 267 | request, 268 | response, 269 | status=status, 270 | view=name, 271 | method=method, 272 | ).inc() 273 | if hasattr(response, "charset"): 274 | self.label_metric( 275 | self.metrics.responses_by_charset, 276 | request, 277 | response, 278 | charset=str(response.charset), 279 | ).inc() 280 | if hasattr(response, "streaming") and response.streaming: 281 | self.label_metric(self.metrics.responses_streaming, request, response).inc() 282 | if hasattr(response, "content"): 283 | self.label_metric(self.metrics.responses_body_bytes, request, response).observe(len(response.content)) 284 | if hasattr(request, "prometheus_after_middleware_event"): 285 | self.label_metric( 286 | self.metrics.requests_latency_by_view_method, 287 | request, 288 | response, 289 | view=self._get_view_name(request), 290 | method=request.method, 291 | ).observe(TimeSince(request.prometheus_after_middleware_event)) 292 | else: 293 | self.label_metric(self.metrics.requests_unknown_latency, request, response).inc() 294 | return response 295 | 296 | def process_exception(self, request, exception): 297 | self.label_metric(self.metrics.exceptions_by_type, request, type=type(exception).__name__).inc() 298 | if hasattr(request, "resolver_match"): 299 | name = request.resolver_match.view_name or "" 300 | self.label_metric(self.metrics.exceptions_by_view, request, view=name).inc() 301 | if hasattr(request, "prometheus_after_middleware_event"): 302 | self.label_metric( 303 | self.metrics.requests_latency_by_view_method, 304 | request, 305 | view=self._get_view_name(request), 306 | method=request.method, 307 | ).observe(TimeSince(request.prometheus_after_middleware_event)) 308 | else: 309 | self.label_metric(self.metrics.requests_unknown_latency, request).inc() 310 | --------------------------------------------------------------------------------