├── CODEOWNERS ├── docs ├── history.rst ├── contributing.rst ├── index.rst ├── Makefile ├── usage.rst ├── conf.py ├── testing.rst ├── backends.rst ├── metricsoverview.rst └── filters.rst ├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ └── main.yml ├── .gitignore ├── CONTRIBUTORS ├── .readthedocs.yaml ├── src └── markus │ ├── pytest_plugin.py │ ├── __init__.py │ ├── backends │ ├── __init__.py │ ├── cloudwatch.py │ ├── statsd.py │ ├── datadog.py │ └── logging.py │ ├── utils.py │ ├── filters.py │ ├── testing.py │ └── main.py ├── justfile ├── CONTRIBUTING.rst ├── tests ├── test_utils.py ├── test_cloudwatch.py ├── test_metrics.py ├── test_statsd.py ├── test_datadog.py ├── test_logging.py ├── test_filters.py └── test_testing.py ├── pyproject.toml ├── README.rst ├── HISTORY.rst └── LICENSE /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @willkg 2 | -------------------------------------------------------------------------------- /docs/history.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../HISTORY.rst 2 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Default owner for everything in this repository 2 | * @willkg 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .cache/ 3 | __pycache__ 4 | markus.egg-info/ 5 | docs/_build/ 6 | .tox/ 7 | build/ 8 | dist/ 9 | .eggs/ 10 | .python-version 11 | uv.lock 12 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | CONTRIBUTORS 2 | ============ 3 | 4 | Maintainer: 5 | 6 | - Will Kahn-Greene 7 | 8 | Contributors: 9 | 10 | * Javier Ruere 11 | * Peter Bengtsson 12 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "monthly" 8 | cooldown: 9 | default-days: 7 10 | rebase-strategy: "disabled" 11 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | Contents 4 | ======== 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | :caption: Contents: 9 | 10 | usage 11 | metricsoverview 12 | backends 13 | filters 14 | testing 15 | history 16 | contributing 17 | 18 | 19 | Indices and tables 20 | ================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # .readthedocs.yaml 3 | # Read the Docs configuration file 4 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 5 | version: 2 6 | 7 | sphinx: 8 | configuration: docs/conf.py 9 | 10 | build: 11 | os: ubuntu-24.04 12 | tools: 13 | python: "3.10" 14 | 15 | python: 16 | install: 17 | - method: pip 18 | path: . 19 | extra_requirements: 20 | - datadog 21 | - dev 22 | - statsd 23 | -------------------------------------------------------------------------------- /src/markus/pytest_plugin.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | from markus.testing import MetricsMock 6 | 7 | import pytest 8 | 9 | 10 | pytest.register_assert_rewrite("markus.testing") 11 | 12 | 13 | @pytest.fixture 14 | def metricsmock() -> MetricsMock: 15 | with MetricsMock() as mm: 16 | yield mm 17 | -------------------------------------------------------------------------------- /src/markus/__init__.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | from importlib.metadata import ( 6 | version as importlib_version, 7 | PackageNotFoundError, 8 | ) 9 | 10 | from markus.main import configure, get_metrics # noqa 11 | 12 | try: 13 | __version__ = importlib_version("markus") 14 | except PackageNotFoundError: 15 | __version__ = "unknown" 16 | 17 | 18 | INCR = "incr" 19 | GAUGE = "gauge" 20 | TIMING = "timing" 21 | HISTOGRAM = "histogram" 22 | 23 | __all__ = ["configure", "get_metrics", "INCR", "GAUGE", "TIMING", "HISTOGRAM"] 24 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = Markus 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile view 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | view: 23 | gnome-open _build/html/index.html 24 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: CI 3 | 4 | on: 5 | push: 6 | branches: 7 | - 'main' 8 | pull_request: 9 | branches: 10 | - 'main' 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | strategy: 18 | matrix: 19 | python-version: ['3.10', '3.11', '3.12', '3.13', '3.14'] 20 | 21 | name: Python ${{ matrix.python-version}} 22 | steps: 23 | - uses: actions/checkout@v5.0.0 24 | 25 | - name: Set up Python 26 | uses: actions/setup-python@v6.1.0 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | 30 | - name: Update pip and install dev requirements 31 | run: | 32 | python -m pip install --upgrade pip 33 | pip install '.[dev]' 34 | 35 | - name: Test 36 | run: tox 37 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | # Relative to docs/ directory 2 | sphinxbuild := "../.venv/bin/sphinx-build" 3 | 4 | @_default: 5 | just --list 6 | 7 | # Build a development environment 8 | devenv: 9 | uv sync --extra dev --extra datadog --extra statsd --refresh --upgrade 10 | 11 | # Run tests and linting 12 | test *args: devenv 13 | uv run tox {{args}} 14 | 15 | # Format files 16 | format: devenv 17 | uv run tox exec -e py310-lint -- ruff format 18 | 19 | # Lint files 20 | lint: devenv 21 | uv run tox -e py310-lint 22 | 23 | # Build docs 24 | docs: devenv 25 | SPHINXBUILD={{sphinxbuild}} make -e -C docs/ clean 26 | SPHINXBUILD={{sphinxbuild}} make -e -C docs/ doctest 27 | SPHINXBUILD={{sphinxbuild}} make -e -C docs/ html 28 | 29 | # Wipe dev environment and build artifacts 30 | clean: 31 | rm -rf .venv uv.lock 32 | rm -rf build dist src/markus.egg-info .tox 33 | rm -rf docs/_build/* 34 | find src/markus/ tests/ -name __pycache__ | xargs rm -rf 35 | find src/markus/ tests/ -name '*.pyc' | xargs rm -rf 36 | 37 | # Build files for relase 38 | build: devenv 39 | rm -rf build/ dist/ 40 | uv run python -m build 41 | uv run twine check dist/* 42 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | Usage 3 | ===== 4 | 5 | Markus is used similar to Python's built-in logging library. To use Markus, you 6 | need to do three things: 7 | 8 | 1. Configure Markus' backends using :py:func:`markus.configure`. 9 | 10 | 2. For each Python module or class or however you want to organize it, you use 11 | :py:func:`markus.get_metrics` to get a 12 | :py:class:`markus.main.MetricsInterface`. 13 | 14 | 3. Use the various metrics reporting methods on the 15 | :py:class:`markus.main.MetricsInterface`. 16 | 17 | 18 | ``markus.configure`` 19 | ==================== 20 | 21 | .. autofunction:: markus.configure 22 | 23 | 24 | ``markus.get_metrics`` 25 | ====================== 26 | 27 | .. autofunction:: markus.get_metrics 28 | 29 | 30 | ``markus.main.MetricsRecord`` 31 | ============================= 32 | 33 | .. autoclass:: markus.main.MetricsRecord 34 | :members: 35 | :member-order: bysource 36 | 37 | 38 | ``markus.main.MetricsInterface`` 39 | ================================ 40 | 41 | .. autoclass:: markus.main.MetricsInterface 42 | :members: 43 | :member-order: bysource 44 | 45 | 46 | ``markus.utils`` 47 | ================ 48 | 49 | .. automodule:: markus.utils 50 | :members: 51 | -------------------------------------------------------------------------------- /src/markus/backends/__init__.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | 6 | class BackendBase: 7 | """Markus Backend superclass that defines API backends should follow.""" 8 | 9 | def __init__(self, options=None, filters=None): 10 | """Implement this. 11 | 12 | :arg dict options: user-specified options 13 | :arg list filters: filters to apply to this backend 14 | 15 | """ 16 | self.options = options or {} 17 | self.filters = filters or [] 18 | 19 | def _filter(self, record): 20 | for metrics_filter in self.filters: 21 | record = metrics_filter.filter(record) 22 | if record is None: 23 | return 24 | return record 25 | 26 | def emit_to_backend(self, record): 27 | """Emit record for backend handling. 28 | 29 | :arg MetricsRecord record: the record to be emitted 30 | 31 | """ 32 | record = self._filter(record) 33 | if record is not None: 34 | self.emit(record) 35 | 36 | def emit(self, record): 37 | """Emit record to backend. 38 | 39 | Implement this in your backend. 40 | 41 | :arg MetricsRecord record: the record to be published 42 | 43 | """ 44 | raise NotImplementedError 45 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | import os 7 | import sys 8 | 9 | cwd = os.getcwd() 10 | project_root = os.path.dirname(cwd) 11 | src_root = os.path.join(project_root, "src") 12 | sys.path.insert(0, project_root) 13 | 14 | import markus # noqa 15 | 16 | 17 | # -- Project information ----------------------------------------------------- 18 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 19 | 20 | project = "Markus" 21 | copyright = "2017-2024, Will Kahn-Greene" 22 | author = "Will Kahn-Greene" 23 | 24 | version = markus.__version__ 25 | release = version 26 | 27 | 28 | # -- General configuration --------------------------------------------------- 29 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 30 | 31 | extensions = ["sphinx.ext.doctest", "sphinx.ext.autodoc"] 32 | 33 | templates_path = ["_templates"] 34 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 35 | 36 | 37 | # -- autodoc configuration 38 | autoclass_content = "both" 39 | autodoc_typehints = "description" 40 | 41 | 42 | # -- Options for HTML output ------------------------------------------------- 43 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 44 | 45 | html_theme = "alabaster" 46 | html_static_path = ["_static"] 47 | 48 | html_sidebars = { 49 | "**": ["about.html", "navigation.html", "relations.html", "searchbox.html"] 50 | } 51 | -------------------------------------------------------------------------------- /docs/testing.rst: -------------------------------------------------------------------------------- 1 | =================== 2 | Testing with Markus 3 | =================== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | 9 | Asserting things about emitted metrics 10 | ====================================== 11 | 12 | Markus comes with a :py:class:`markus.testing.MetricsMock` that makes it easier 13 | to write tests and assert things about generated metrics. 14 | 15 | When activating the :py:class:`markus.testing.MetricsMock` context, it becomes 16 | a backend for all emitted metrics. It'll capture metrics emitted. You can 17 | assert things about the metrics collected. 18 | 19 | There are a set of ``assert_*`` helper methods for simplifying that code, but 20 | you can also use :py:meth:`markus.testing.MetricsMock.filter_records` 21 | directly. 22 | 23 | 24 | pytest plugin 25 | ============= 26 | 27 | Markus includes a pytest plugin with a ``metricsmock`` pytest fixture. It's 28 | implemented like this: 29 | 30 | .. code-block:: 31 | 32 | from markus.testing import MetricsMock 33 | 34 | import pytest 35 | 36 | @pytest.fixture 37 | def metricsmock() -> MetricsMock: 38 | with MetricsMock() as mm: 39 | yield mm 40 | 41 | 42 | Testing against tag values 43 | ========================== 44 | 45 | You can assert against tags ignoring the actual values using 46 | :py:class:`markus.testing.AnyTagValue`. 47 | 48 | .. code-block:: python 49 | 50 | from markus.testing import MetricsMock 51 | 52 | def test_something(): 53 | with MetricsMock() as mm: 54 | # Do things that might record metrics here 55 | 56 | # Assert something about the metrics recorded 57 | mm.assert_incr(stat="some.random.key", value=1, tags=[AnyTagValue("host")]) 58 | 59 | 60 | Test module API 61 | =============== 62 | 63 | .. automodule:: markus.testing 64 | :members: 65 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributing 3 | ============ 4 | 5 | How to help 6 | =========== 7 | 8 | Markus is used in production in a few places, but I'd love to know how 9 | you use Markus and how Markus' API works for you. Is it easy? Are there 10 | thing that are hard? 11 | 12 | If you encounter any bugs, please write up an issue in the issue tracker. 13 | 14 | 15 | Writing up issues 16 | ================= 17 | 18 | Please write up issues in the 19 | `issue tracker `__. 20 | 21 | If the issue is about a bug in Markus, please specify: 22 | 23 | 1. the version of Markus that you're using 24 | 2. the version of Python that you're using 25 | 3. the traceback and error message if applicable 26 | 27 | These things will help me figure out the problem faster. 28 | 29 | 30 | Install for hacking 31 | =================== 32 | 33 | Requirements: 34 | 35 | * `uv `__ 36 | * `just `__ 37 | 38 | Run:: 39 | 40 | # Clone the repository 41 | $ git clone https://github.com/mozilla-services/markus 42 | 43 | # Create virtual environment and install Markus and dev requirements 44 | $ just devenv 45 | 46 | # View project commands 47 | $ just 48 | 49 | 50 | Documentation 51 | ============= 52 | 53 | Documentation is written in reStructuredText and is in the ``docs/`` 54 | directory. We use `Sphinx `__ 55 | to build documentation. 56 | 57 | 58 | Tests 59 | ===== 60 | 61 | Test environments are defined with 62 | `tox `_. This will run all tests across 63 | all supported Python versions. 64 | 65 | Tests are implemented with `pytest `__. 66 | 67 | Tests are located in the ``tests/`` directory. 68 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import pytest 6 | 7 | from markus.utils import generate_tag 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "key, value, expected", 12 | [ 13 | ("", None, ""), 14 | ("", "", ""), 15 | ("a", None, "a"), 16 | # Key and value are concatenated using : 17 | ("a", "b", "a:b"), 18 | # First character must be a letter 19 | ("1", None, "a1"), 20 | # Test good characters 21 | ( 22 | "abcdefghijklmnopqrstuvwxyz0123456789-_/.", 23 | None, 24 | "abcdefghijklmnopqrstuvwxyz0123456789-_/.", 25 | ), 26 | # Ok with unicode 27 | ("joe\u018ajoe", None, "joe_joe"), 28 | # Test bad characters get converted to _ 29 | ("a&b", None, "a_b"), 30 | ("email", "foo@example.com", "email:foo_example.com"), 31 | # Tags are lowercased 32 | ("ABC", "DEF", "abc:def"), 33 | # Long tags are truncated to 200 characters 34 | ("a" * 201, None, "a" * 200), 35 | # device host, and source all get "_" appended to the end 36 | ("device", None, "device_"), 37 | ("host", None, "host_"), 38 | ("source", None, "source_"), 39 | ], 40 | ) 41 | def test_generate_tag(key, value, expected): 42 | assert generate_tag(key, value) == expected 43 | 44 | 45 | def test_generate_tag_bad_data(): 46 | with pytest.raises(ValueError) as exc_info: 47 | generate_tag(42) 48 | 49 | assert str(exc_info.value) == "key must be a string type, but got 42 instead" 50 | 51 | with pytest.raises(ValueError) as exc_info: 52 | generate_tag("key", 42) 53 | 54 | assert ( 55 | str(exc_info.value) == "value must be None or a string type, but got 42 instead" 56 | ) 57 | -------------------------------------------------------------------------------- /src/markus/backends/cloudwatch.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import time 6 | 7 | from markus.backends import BackendBase 8 | 9 | 10 | class CloudwatchMetrics(BackendBase): 11 | """Publish metrics to stdout for Cloudwatch. 12 | 13 | This prints to stdout in this format:: 14 | 15 | MONITORING|unix_epoch_timestamp|value|metric_type|my.metric.name|#tag1:value,tag2 16 | 17 | It lets you generate metrics for reading/consuming in Cloudwatch. 18 | 19 | For example, Datadog can consume metrics formatted this way from Cloudwatch 20 | allowing you to generate metrics in AWS Lambda functions and have them show 21 | up in Datadog. 22 | 23 | To use, add this to your backends list:: 24 | 25 | { 26 | "class": "markus.backends.cloudwatch.CloudwatchMetrics", 27 | } 28 | 29 | This backend doesn"t take any options. 30 | 31 | .. Note:: 32 | 33 | Datadog's Cloudwatch through Lambda logs supports four metrics types: 34 | count, gauge, histogram, and check. Thus all timing metrics are treated 35 | as histogram metrics. 36 | 37 | .. seealso:: 38 | 39 | https://docs.datadoghq.com/integrations/amazon_lambda/ 40 | 41 | https://docs.datadoghq.com/developers/metrics/#metric-names 42 | 43 | """ 44 | 45 | def emit(self, record): 46 | stat_type_to_kind = { 47 | "incr": "count", 48 | "gauge": "gauge", 49 | "timing": "histogram", 50 | "histogram": "histogram", 51 | } 52 | print( 53 | "MONITORING|%(timestamp)s|%(value)s|%(kind)s|%(stat)s|%(tags)s" 54 | % { 55 | "timestamp": int(time.time()), 56 | "kind": stat_type_to_kind[record.stat_type], 57 | "stat": record.key, 58 | "value": record.value, 59 | "tags": ("#%s" % ",".join(record.tags)) if record.tags else "", 60 | } 61 | ) 62 | -------------------------------------------------------------------------------- /docs/backends.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Backends 3 | ======== 4 | 5 | Markus comes with several backends. You can also write your own. 6 | 7 | .. contents:: 8 | :local: 9 | 10 | 11 | Logging metrics 12 | =============== 13 | 14 | .. autoclass:: markus.backends.logging.LoggingMetrics 15 | :members: 16 | :special-members: 17 | 18 | 19 | .. autoclass:: markus.backends.logging.LoggingRollupMetrics 20 | :members: 21 | :special-members: 22 | 23 | 24 | Statsd metrics 25 | =============== 26 | 27 | .. autoclass:: markus.backends.statsd.StatsdMetrics 28 | :members: 29 | :special-members: 30 | 31 | 32 | Datadog metrics 33 | =============== 34 | 35 | .. autoclass:: markus.backends.datadog.DatadogMetrics 36 | :members: 37 | :special-members: 38 | 39 | 40 | Cloudwatch metrics 41 | ================== 42 | 43 | .. autoclass:: markus.backends.cloudwatch.CloudwatchMetrics 44 | :members: 45 | :special-members: 46 | 47 | 48 | Writing your own 49 | ================ 50 | 51 | 1. Subclass ``markus.backends.BackendBase``. 52 | 53 | 2. Implement ``__init__``. It takes a single "options" dict with stuff the 54 | user configured. 55 | 56 | 3. Implement ``emit`` and have it do whatever is appropriate in the context of 57 | your backend. 58 | 59 | 60 | .. autoclass:: markus.backends.BackendBase 61 | :members: __init__, emit 62 | 63 | 64 | The records that get emitted are :py:class:`markus.main.MetricsRecord` instances. 65 | 66 | 67 | Here's an example backend that prints metrics to stdout: 68 | 69 | .. doctest:: 70 | 71 | >>> import markus 72 | >>> from markus.backends import BackendBase 73 | >>> from markus.main import MetricsRecord 74 | 75 | >>> class StdoutMetrics(BackendBase): 76 | ... def __init__(self, options=None, filters=None): 77 | ... options = options or {} 78 | ... self.filters = filters or [] 79 | ... self.prefix = options.get('prefix', '') 80 | ... 81 | ... def emit(self, record): 82 | ... print('%s %s %s %s tags=%s' % ( 83 | ... record.stat_type, 84 | ... self.prefix, 85 | ... record.key, 86 | ... record.value, 87 | ... record.tags 88 | ... )) 89 | ... 90 | >>> markus.configure([{'class': StdoutMetrics, 'options': {'prefix': 'foo'}}], raise_errors=True) 91 | 92 | >>> metrics = markus.get_metrics('test') 93 | >>> metrics.incr('key1') 94 | incr foo test.key1 1 tags=[] 95 | 96 | 97 | .. testcleanup:: * 98 | 99 | import markus 100 | markus.configure([]) 101 | -------------------------------------------------------------------------------- /tests/test_cloudwatch.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import datetime 6 | 7 | import pytest 8 | 9 | from markus.backends.cloudwatch import CloudwatchMetrics 10 | from markus.main import MetricsFilter, MetricsRecord 11 | 12 | 13 | class TestCloudwatch: 14 | @pytest.fixture(autouse=True) 15 | def set_time(self, time_machine): 16 | time_machine.move_to( 17 | datetime.datetime(2017, 3, 6, 16, 30, 0, tzinfo=datetime.timezone.utc), 18 | tick=False, 19 | ) 20 | 21 | def test_incr(self, capsys): 22 | rec = MetricsRecord("incr", key="foo", value=10, tags=["key1:val", "key2:val"]) 23 | ddcm = CloudwatchMetrics() 24 | ddcm.emit_to_backend(rec) 25 | out, err = capsys.readouterr() 26 | assert out == "MONITORING|1488817800|10|count|foo|#key1:val,key2:val\n" 27 | assert err == "" 28 | 29 | def test_gauge(self, capsys): 30 | rec = MetricsRecord( 31 | "gauge", key="foo", value=100, tags=["key1:val", "key2:val"] 32 | ) 33 | ddcm = CloudwatchMetrics() 34 | ddcm.emit_to_backend(rec) 35 | out, err = capsys.readouterr() 36 | assert out == "MONITORING|1488817800|100|gauge|foo|#key1:val,key2:val\n" 37 | assert err == "" 38 | 39 | def test_timing(self, capsys): 40 | # .timing is a histogram 41 | rec = MetricsRecord( 42 | "timing", key="foo", value=100, tags=["key1:val", "key2:val"] 43 | ) 44 | ddcm = CloudwatchMetrics() 45 | ddcm.emit_to_backend(rec) 46 | out, err = capsys.readouterr() 47 | assert out == "MONITORING|1488817800|100|histogram|foo|#key1:val,key2:val\n" 48 | assert err == "" 49 | 50 | def test_histogram(self, capsys): 51 | rec = MetricsRecord( 52 | "histogram", key="foo", value=100, tags=["key1:val", "key2:val"] 53 | ) 54 | ddcm = CloudwatchMetrics() 55 | ddcm.emit_to_backend(rec) 56 | out, err = capsys.readouterr() 57 | assert out == "MONITORING|1488817800|100|histogram|foo|#key1:val,key2:val\n" 58 | assert err == "" 59 | 60 | def test_filters(self, capsys): 61 | class BlueFilter(MetricsFilter): 62 | def filter(self, record): 63 | if "blue" not in record.key: 64 | return 65 | return record 66 | 67 | ddcm = CloudwatchMetrics(filters=[BlueFilter()]) 68 | ddcm.emit_to_backend(MetricsRecord("incr", key="foo", value=1, tags=[])) 69 | ddcm.emit_to_backend(MetricsRecord("incr", key="foo.blue", value=2, tags=[])) 70 | out, err = capsys.readouterr() 71 | assert out == "MONITORING|1488817800|2|count|foo.blue|\n" 72 | assert err == "" 73 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "markus" 3 | description = "Metrics system for generating statistics about your app" 4 | version = "5.2.0" 5 | readme = "README.rst" 6 | keywords = ["metrics", "datadog", "statsd"] 7 | authors = [{name = "Will Kahn-Greene"}] 8 | license = "MPL-2.0" 9 | requires-python = ">=3.10" 10 | dependencies = [] 11 | classifiers = [ 12 | "Development Status :: 5 - Production/Stable", 13 | "Intended Audience :: Developers", 14 | "Natural Language :: English", 15 | "Programming Language :: Python :: 3 :: Only", 16 | "Programming Language :: Python :: 3.10", 17 | "Programming Language :: Python :: 3.11", 18 | "Programming Language :: Python :: 3.12", 19 | "Programming Language :: Python :: 3.13", 20 | "Programming Language :: Python :: 3.14", 21 | ] 22 | urls.Homepage = "https://github.com/mozilla-services/markus" 23 | urls.Documentation = "https://markus.readthedocs.io/" 24 | urls.Source = "https://github.com/mozilla-services/markus/" 25 | urls.Issues = "https://github.com/mozilla-services/markus/issues" 26 | 27 | [project.entry-points.pytest11] 28 | markus = "markus.pytest_plugin" 29 | 30 | [project.optional-dependencies] 31 | datadog = ["datadog>=0.45.0"] 32 | statsd = ["statsd"] 33 | dev = [ 34 | "build", 35 | "pytest", 36 | "ruff", 37 | "Sphinx", 38 | "sphinx_rtd_theme", 39 | "time-machine", 40 | "tomli>=1.1.0; python_version < '3.11'", 41 | "tox", 42 | "tox-gh-actions", 43 | "tox-uv", 44 | "twine", 45 | "wheel", 46 | ] 47 | 48 | 49 | [build-system] 50 | requires = ["setuptools>=80", "setuptools-scm[simple]>=8"] 51 | 52 | 53 | [tool.ruff] 54 | line-length = 88 55 | target-version = "py310" 56 | src = ["src"] 57 | 58 | [tool.ruff.lint] 59 | # Enable pycodestyle (E), pyflakes (F), and bugbear (B) rules 60 | select = ["E", "F", "B"] 61 | ignore = ["E501"] 62 | 63 | [tool.ruff.lint.flake8-quotes] 64 | docstring-quotes = "double" 65 | 66 | 67 | [tool.pytest.ini_options] 68 | filterwarnings = [ 69 | "error", 70 | # datadog kicks up a DeprecationWarning for collections use. 71 | "ignore:.*ABCs:DeprecationWarning", 72 | ] 73 | 74 | 75 | [tool.tox] 76 | legacy_tox_ini = """ 77 | [tox] 78 | envlist = 79 | py310 80 | py310-lint 81 | py311 82 | py312 83 | py313 84 | py314 85 | uv_python_preferences = only-managed 86 | 87 | [gh-actions] 88 | python = 89 | 3.10: py310 90 | 3.11: py311 91 | 3.12: py312 92 | 3.13: py313 93 | 3.13: py314 94 | 95 | [testenv] 96 | extras = dev,datadog,statsd 97 | commands = 98 | pytest {posargs} tests/ 99 | pytest --doctest-modules --pyargs markus 100 | 101 | [testenv:py310-lint] 102 | allowlist_externals = ruff 103 | basepython = python3.10 104 | changedir = {toxinidir} 105 | commands = 106 | ruff format --check src tests 107 | ruff check src tests 108 | """ 109 | -------------------------------------------------------------------------------- /src/markus/utils.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import re 6 | 7 | 8 | NONE_TYPE = type(None) 9 | 10 | # regexp that matches characters that can't be in tags 11 | BAD_TAG_CHAR_REGEXP = re.compile(r"[^0-9a-zA-Z\._\-/]") 12 | 13 | 14 | def generate_tag(key, value=None): 15 | """Generate a tag for use with the tag backends. 16 | 17 | The key and value (if there is one) are sanitized according to the 18 | following rules: 19 | 20 | 1. after the first character, all characters must be alphanumeric, 21 | underscore, minus, period, or slash--invalid characters are converted 22 | to "_" 23 | 2. lowercase 24 | 25 | If a value is provided, the final tag is `key:value`. 26 | 27 | The final tag must start with a letter. If it doesn't, an "a" is prepended. 28 | 29 | The final tag is truncated to 200 characters. 30 | 31 | If the final tag is "device", "host", or "source", then a "_" will be 32 | appended the end. 33 | 34 | :arg str key: the key to use 35 | :arg str value: the value (if any) 36 | 37 | :returns: the final tag 38 | 39 | Examples: 40 | 41 | >>> from markus.utils import generate_tag 42 | >>> generate_tag("yellow") 43 | 'yellow' 44 | >>> generate_tag("rule", "is_yellow") 45 | 'rule:is_yellow' 46 | 47 | Some examples of sanitizing: 48 | 49 | >>> from markus.utils import generate_tag 50 | >>> generate_tag("rule", "THIS$#$%^!@IS[]{$}GROSS!") 51 | 'rule:this_______is_____gross_' 52 | >>> generate_tag("host") 53 | 'host_' 54 | 55 | Example using it with :py:meth:`markus.main.MetricsInterface.incr`: 56 | 57 | >>> import markus 58 | >>> from markus.utils import generate_tag 59 | >>> mymetrics = markus.get_metrics(__name__) 60 | >>> mymetrics.incr( 61 | ... "somekey", 62 | ... value=1, 63 | ... tags=[generate_tag("rule", "is_yellow")] 64 | ... ) 65 | 66 | """ 67 | # Verify the types 68 | if not isinstance(key, str): 69 | raise ValueError("key must be a string type, but got %r instead" % key) 70 | 71 | if not isinstance(value, (str, NONE_TYPE)): 72 | raise ValueError( 73 | "value must be None or a string type, but got %r instead" % value 74 | ) 75 | 76 | # Sanitize the key 77 | key = BAD_TAG_CHAR_REGEXP.sub("_", key).strip() 78 | 79 | # Build the tag 80 | if value is None or not value.strip(): 81 | tag = key 82 | else: 83 | value = BAD_TAG_CHAR_REGEXP.sub("_", value).strip() 84 | tag = "%s:%s" % (key, value) 85 | 86 | if tag and not tag[0].isalpha(): 87 | tag = "a" + tag 88 | 89 | # Lowercase and truncate 90 | tag = tag.lower()[:200] 91 | 92 | # Add _ if it's a reserved word 93 | if tag in ["device", "host", "source"]: 94 | tag = tag + "_" 95 | 96 | return tag 97 | -------------------------------------------------------------------------------- /src/markus/backends/statsd.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | 6 | import logging 7 | 8 | from statsd import StatsClient 9 | from markus.backends import BackendBase 10 | 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class StatsdMetrics(BackendBase): 16 | """Use pystatsd client for statsd pings. 17 | 18 | This requires the pystatsd module and requirements to be installed. 19 | To install those bits, do:: 20 | 21 | $ pip install markus[statsd] 22 | 23 | 24 | To use, add this to your backends list:: 25 | 26 | { 27 | "class": "markus.backends.statsd.StatsdMetrics", 28 | "options": { 29 | "statsd_host": "statsd.example.com", 30 | "statsd_port": 8125, 31 | "statsd_prefix": None, 32 | "statsd_maxudpsize": 512, 33 | } 34 | } 35 | 36 | 37 | Options: 38 | 39 | * statsd_host: the hostname for the statsd daemon to connect to 40 | 41 | Defaults to ``"localhost"``. 42 | 43 | * statsd_port: the port for the statsd daemon to connect to 44 | 45 | Defaults to ``8125``. 46 | 47 | * statsd_prefix: the prefix to use for statsd data 48 | 49 | Defaults to ``None``. 50 | 51 | * statsd_maxudpsize: the maximum data to send per packet 52 | 53 | Defaults to ``512``. 54 | 55 | .. Note:: 56 | 57 | The StatsdMetrics backend does not support tags. All tags will be 58 | dropped when metrics are emitted by this backend. 59 | 60 | .. Note:: 61 | 62 | statsd doesn't support histograms so histogram metrics are reported as 63 | timing metrics. 64 | 65 | .. seealso:: 66 | 67 | https://statsd.readthedocs.io/en/latest/configure.html 68 | 69 | """ 70 | 71 | def __init__(self, options=None, filters=None): 72 | options = options or {} 73 | self.host = options.get("statsd_host", "localhost") 74 | self.port = options.get("statsd_port", 8125) 75 | self.prefix = options.get("statsd_prefix") 76 | self.maxudpsize = options.get("statsd_maxudpsize", 512) 77 | 78 | self.filters = filters or [] 79 | 80 | self.client = self._get_client( 81 | self.host, self.port, self.prefix, self.maxudpsize 82 | ) 83 | logger.debug( 84 | "%s configured: %s:%s %s", 85 | self.__class__.__name__, 86 | self.host, 87 | self.port, 88 | self.prefix, 89 | ) 90 | 91 | def _get_client(self, host, port, prefix, maxudpsize): 92 | return StatsClient(host=host, port=port, prefix=prefix, maxudpsize=maxudpsize) 93 | 94 | def emit(self, record): 95 | stat_type = record.stat_type 96 | if stat_type == "incr": 97 | self.client.incr(stat=record.key, count=record.value) 98 | elif stat_type == "gauge": 99 | self.client.gauge(stat=record.key, value=record.value) 100 | elif stat_type in ("timing", "histogram"): 101 | self.client.timing(stat=record.key, delta=record.value) 102 | -------------------------------------------------------------------------------- /src/markus/backends/datadog.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | 6 | import logging 7 | 8 | from datadog.dogstatsd import DogStatsd 9 | from markus.backends import BackendBase 10 | 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class DatadogMetrics(BackendBase): 16 | """Use the Datadog DogStatsd client for statsd pings. 17 | 18 | This requires the Datadog backend and requirements be installed. 19 | To install those bits, do:: 20 | 21 | $ pip install markus[datadog] 22 | 23 | 24 | To use, add this to your backends list:: 25 | 26 | { 27 | "class": "markus.backends.datadog.DatadogMetrics", 28 | "options": { 29 | "statsd_host": "localhost", 30 | "statsd_port": 8125, 31 | "statsd_namespace": "", 32 | } 33 | } 34 | 35 | 36 | Options: 37 | 38 | * statsd_host: the hostname for the statsd daemon to connect to 39 | 40 | Defaults to ``"localhost"``. 41 | 42 | * statsd_port: the port for the statsd daemon to connect to 43 | 44 | Defaults to ``8125``. 45 | 46 | * statsd_namespace: the namespace to use for statsd data 47 | 48 | Defaults to ``""``. 49 | 50 | * origin_detection_enabled: whether or not the client should fill the 51 | container field (part of datadog protocol v1.2). 52 | 53 | Defaults to ``False``. 54 | 55 | .. seealso:: 56 | 57 | https://docs.datadoghq.com/developers/metrics/ 58 | 59 | """ 60 | 61 | def __init__(self, options=None, filters=None): 62 | options = options or {} 63 | self.filters = filters or [] 64 | 65 | self.host = options.get("statsd_host", "localhost") 66 | self.port = options.get("statsd_port", 8125) 67 | self.namespace = options.get("statsd_namespace", "") 68 | self.origin_detection_enabled = options.get("origin_detection_enabled", False) 69 | 70 | self.client = self._get_client( 71 | host=self.host, 72 | port=self.port, 73 | namespace=self.namespace, 74 | origin_detection_enabled=self.origin_detection_enabled, 75 | ) 76 | logger.debug( 77 | "%s configured: %s:%s %s %s", 78 | self.__class__.__name__, 79 | self.host, 80 | self.port, 81 | self.namespace, 82 | self.origin_detection_enabled, 83 | ) 84 | 85 | def _get_client(self, host, port, namespace, origin_detection_enabled): 86 | return DogStatsd( 87 | host=host, 88 | port=port, 89 | namespace=namespace, 90 | origin_detection_enabled=origin_detection_enabled, 91 | ) 92 | 93 | def emit(self, record): 94 | stat_type_to_fun = { 95 | "incr": self.client.increment, 96 | "gauge": self.client.gauge, 97 | "timing": self.client.timing, 98 | "histogram": self.client.histogram, 99 | } 100 | metrics_fun = stat_type_to_fun[record.stat_type] 101 | metrics_fun(metric=record.key, value=record.value, tags=record.tags) 102 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | Markus 3 | ====== 4 | 5 | Markus is a Python library for generating metrics. 6 | 7 | :Code: https://github.com/mozilla-services/markus 8 | :Issues: https://github.com/mozilla-services/markus/issues 9 | :License: MPL v2 10 | :Documentation: http://markus.readthedocs.io/en/latest/ 11 | 12 | 13 | Goals 14 | ===== 15 | 16 | Markus makes it easier to generate metrics in your program by: 17 | 18 | * providing multiple backends (Datadog statsd, statsd, logging, logging rollup, 19 | and so on) for sending data to different places 20 | 21 | * sending metrics to multiple backends at the same time 22 | 23 | * providing a testing framework for easy testing 24 | 25 | * providing a decoupled architecture making it easier to write code to generate 26 | metrics without having to worry about making sure creating and configuring a 27 | metrics client has been done--similar to the Python logging Python logging 28 | module in this way 29 | 30 | I use it at Mozilla in the collector of our crash ingestion pipeline. Peter used 31 | it to build our symbols lookup server, too. 32 | 33 | 34 | Install 35 | ======= 36 | 37 | To install Markus, run:: 38 | 39 | $ pip install markus 40 | 41 | (Optional) To install the requirements for the 42 | ``markus.backends.statsd.StatsdMetrics`` backend:: 43 | 44 | $ pip install 'markus[statsd]' 45 | 46 | (Optional) To install the requirements for the 47 | ``markus.backends.datadog.DatadogMetrics`` backend:: 48 | 49 | $ pip install 'markus[datadog]' 50 | 51 | 52 | Quick start 53 | =========== 54 | 55 | Similar to using the logging library, every Python module can create a 56 | ``markus.main.MetricsInterface`` (loosely equivalent to a Python 57 | logging logger) at any time including at module import time and use that to 58 | generate metrics. 59 | 60 | For example:: 61 | 62 | import markus 63 | 64 | metrics = markus.get_metrics(__name__) 65 | 66 | 67 | Creating a ``markus.main.MetricsInterface`` using ``__name__`` 68 | will cause it to generate all stats keys with a prefix determined from 69 | ``__name__`` which is a dotted Python path to that module. 70 | 71 | Then you can use the ``markus.main.MetricsInterface`` anywhere in that 72 | module:: 73 | 74 | @metrics.timer_decorator("chopping_vegetables") 75 | def some_long_function(vegetable): 76 | for veg in vegetable: 77 | chop_vegetable() 78 | metrics.incr("vegetable", value=1) 79 | 80 | 81 | At application startup, configure Markus with the backends you want and any 82 | options they require to publish metrics. 83 | 84 | For example, let us configure Markus to publish metrics to the Python logging 85 | infrastructure and Datadog:: 86 | 87 | import markus 88 | 89 | markus.configure( 90 | backends=[ 91 | { 92 | # Publish metrics to the Python logging infrastructure 93 | "class": "markus.backends.logging.LoggingMetrics", 94 | }, 95 | { 96 | # Publish metrics to Datadog 97 | "class": "markus.backends.datadog.DatadogMetrics", 98 | "options": { 99 | "statsd_host": "example.com", 100 | "statsd_port": 8125, 101 | "statsd_namespace": "" 102 | } 103 | } 104 | ] 105 | ) 106 | 107 | 108 | Once you've added code that publishes metrics, you'll want to test it and make 109 | sure it's working correctly. Markus comes with a ``markus.testing.MetricsMock`` 110 | to make testing and asserting specific outcomes easier:: 111 | 112 | from markus.testing import MetricsMock 113 | 114 | 115 | def test_something(): 116 | with MetricsMock() as mm: 117 | # ... Do things that might publish metrics 118 | 119 | # Make assertions on metrics published 120 | mm.assert_incr_once("some.key", value=1) 121 | -------------------------------------------------------------------------------- /tests/test_metrics.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from markus import get_metrics 4 | from markus.filters import AddTagFilter 5 | from markus.main import MetricsRecord 6 | from markus.testing import MetricsMock 7 | 8 | 9 | @pytest.fixture 10 | def metricsmock(): 11 | return MetricsMock() 12 | 13 | 14 | @pytest.mark.parametrize( 15 | "prefix, expected", 16 | [("", ""), (".", ""), ("abc(123)", "abc.123"), ("...ab..c...", "ab.c")], 17 | ) 18 | def test_get_metrics_fix_name(prefix, expected): 19 | assert get_metrics(prefix).prefix == expected 20 | 21 | 22 | class Foo: 23 | pass 24 | 25 | 26 | @pytest.mark.parametrize( 27 | "thing, extra, expected", 28 | [ 29 | # Test base 30 | ("string", "", "string"), 31 | (Foo, "", "test_metrics.Foo"), 32 | (Foo(), "", "test_metrics.Foo"), 33 | (__name__, "", "test_metrics"), 34 | (None, "", ""), 35 | # Test extra 36 | ("foo", "namespace1", "foo.namespace1"), 37 | ], 38 | ) 39 | def test_get_metrics_prefix(thing, extra, expected): 40 | assert get_metrics(thing, extra=extra).prefix == expected 41 | 42 | 43 | def test_metricsinterface_extend_prefix(): 44 | metrics = get_metrics("a") 45 | sub_metrics = metrics.extend_prefix("b") 46 | assert sub_metrics.prefix == "a.b" 47 | assert sub_metrics.filters == [] 48 | 49 | sub_metrics = metrics.extend_prefix(".b.") 50 | assert sub_metrics.prefix == "a.b" 51 | assert sub_metrics.filters == [] 52 | 53 | tag_filter_host_foo = AddTagFilter("host:foo") 54 | 55 | metrics.filters.append(tag_filter_host_foo) 56 | sub_metrics = metrics.extend_prefix("b") 57 | assert sub_metrics.prefix == "a.b" 58 | assert sub_metrics.filters == [tag_filter_host_foo] 59 | 60 | tag_filter_env_prod = AddTagFilter("env:prod") 61 | # Add a second tag filter and make sure the two lists are independent 62 | metrics.filters.append(tag_filter_env_prod) 63 | assert metrics.filters == [tag_filter_host_foo, tag_filter_env_prod] 64 | assert sub_metrics.filters == [tag_filter_host_foo] 65 | 66 | 67 | def test_dunders(): 68 | record = MetricsRecord("incr", "foo", 10, []) 69 | record2 = record.__copy__() 70 | assert record is not record2 71 | assert record == record2 72 | 73 | 74 | def test_incr(metricsmock): 75 | metrics = get_metrics("thing") 76 | 77 | with metricsmock as mm: 78 | metrics.incr("foo", value=5) 79 | 80 | assert mm.get_records() == [MetricsRecord("incr", "thing.foo", 5, [])] 81 | 82 | 83 | def test_gauge(metricsmock): 84 | metrics = get_metrics("thing") 85 | 86 | with metricsmock as mm: 87 | metrics.gauge("foo", value=10) 88 | 89 | assert mm.get_records() == [MetricsRecord("gauge", "thing.foo", 10, [])] 90 | 91 | 92 | def test_timing(metricsmock): 93 | metrics = get_metrics("thing") 94 | 95 | with metricsmock as mm: 96 | metrics.timing("foo", value=1234) 97 | 98 | assert mm.get_records() == [MetricsRecord("timing", "thing.foo", 1234, [])] 99 | 100 | 101 | def test_histogram(metricsmock): 102 | metrics = get_metrics("thing") 103 | 104 | with metricsmock as mm: 105 | metrics.histogram("foo", value=4321) 106 | 107 | assert mm.get_records() == [MetricsRecord("histogram", "thing.foo", 4321, [])] 108 | 109 | 110 | def test_timer_contextmanager(metricsmock): 111 | metrics = get_metrics("thing") 112 | 113 | with metricsmock as mm: 114 | with metrics.timer("long_fun"): 115 | print("blah") 116 | 117 | assert mm.has_record(fun_name="timing", stat="thing.long_fun") 118 | 119 | 120 | def test_timer_decorator(metricsmock): 121 | metrics = get_metrics("thing") 122 | 123 | @metrics.timer_decorator("long_fun") 124 | def something(): 125 | print("blah") 126 | 127 | with metricsmock as mm: 128 | something() 129 | 130 | assert mm.has_record(fun_name="timing", stat="thing.long_fun") 131 | -------------------------------------------------------------------------------- /tests/test_statsd.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import pytest 6 | 7 | from markus.backends import statsd 8 | from markus.main import MetricsFilter, MetricsRecord 9 | 10 | 11 | class MockStatsd: 12 | def __init__(self, *args, **kwargs): 13 | self.initargs = args 14 | self.initkwargs = kwargs 15 | self.calls = [] 16 | 17 | def incr(self, *args, **kwargs): 18 | self.calls.append(("incr", args, kwargs)) 19 | 20 | def gauge(self, *args, **kwargs): 21 | self.calls.append(("gauge", args, kwargs)) 22 | 23 | def timing(self, *args, **kwargs): 24 | self.calls.append(("timing", args, kwargs)) 25 | 26 | 27 | @pytest.fixture 28 | def mockstatsd(): 29 | """Mocks Statsd class to capture method call data""" 30 | _old_statsd = statsd.StatsClient 31 | mock = MockStatsd 32 | statsd.StatsClient = mock 33 | yield 34 | statsd.StatsClient = _old_statsd 35 | 36 | 37 | def test_default_options(mockstatsd): 38 | ddm = statsd.StatsdMetrics() 39 | 40 | assert ddm.host == "localhost" 41 | assert ddm.port == 8125 42 | assert ddm.prefix is None 43 | assert ddm.maxudpsize == 512 44 | 45 | # NOTE: ddm.client is the mock instance 46 | assert ddm.client.initargs == () 47 | assert ddm.client.initkwargs == { 48 | "host": "localhost", 49 | "port": 8125, 50 | "prefix": None, 51 | "maxudpsize": 512, 52 | } 53 | 54 | 55 | def test_options(mockstatsd): 56 | ddm = statsd.StatsdMetrics( 57 | { 58 | "statsd_host": "example.com", 59 | "statsd_port": 5000, 60 | "statsd_prefix": "joe", 61 | "statsd_maxudpsize": 256, 62 | } 63 | ) 64 | 65 | assert ddm.host == "example.com" 66 | assert ddm.port == 5000 67 | assert ddm.prefix == "joe" 68 | assert ddm.maxudpsize == 256 69 | 70 | # NOTE: ddm.client is the mock instance 71 | assert ddm.client.initargs == () 72 | assert ddm.client.initkwargs == { 73 | "host": "example.com", 74 | "port": 5000, 75 | "prefix": "joe", 76 | "maxudpsize": 256, 77 | } 78 | 79 | 80 | def test_incr(mockstatsd): 81 | rec = MetricsRecord("incr", key="foo", value=10, tags=["key1:val"]) 82 | ddm = statsd.StatsdMetrics() 83 | ddm.emit_to_backend(rec) 84 | assert ddm.client.calls == [("incr", (), {"stat": "foo", "count": 10})] 85 | 86 | 87 | def test_gauge(mockstatsd): 88 | rec = MetricsRecord("gauge", key="foo", value=100, tags=["key1:val"]) 89 | ddm = statsd.StatsdMetrics() 90 | ddm.emit_to_backend(rec) 91 | assert ddm.client.calls == [("gauge", (), {"stat": "foo", "value": 100})] 92 | 93 | 94 | def test_timing(mockstatsd): 95 | rec = MetricsRecord("timing", key="foo", value=1234, tags=["key1:val"]) 96 | ddm = statsd.StatsdMetrics() 97 | ddm.emit_to_backend(rec) 98 | assert ddm.client.calls == [("timing", (), {"stat": "foo", "delta": 1234})] 99 | 100 | 101 | def test_histogram(mockstatsd): 102 | rec = MetricsRecord("histogram", key="foo", value=4321, tags=["key1:val"]) 103 | ddm = statsd.StatsdMetrics() 104 | ddm.emit_to_backend(rec) 105 | assert ddm.client.calls == [("timing", (), {"stat": "foo", "delta": 4321})] 106 | 107 | 108 | def test_filters(mockstatsd): 109 | class BlueFilter(MetricsFilter): 110 | def filter(self, record): 111 | if "blue" not in record.key: 112 | return 113 | return record 114 | 115 | ddm = statsd.StatsdMetrics(filters=[BlueFilter()]) 116 | ddm.emit_to_backend(MetricsRecord("incr", key="foo", value=1, tags=[])) 117 | ddm.emit_to_backend(MetricsRecord("incr", key="foo.blue", value=2, tags=[])) 118 | assert ddm.client.calls == [("incr", (), {"stat": "foo.blue", "count": 2})] 119 | -------------------------------------------------------------------------------- /docs/metricsoverview.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | Metrics overview 3 | ================ 4 | 5 | This chapter covers the different types of metrics and what they're useful for. 6 | 7 | .. contents:: 8 | :local: 9 | 10 | 11 | Counters (incr) 12 | =============== 13 | 14 | Counters are for counting things. They answers questions like: 15 | 16 | * How many requests did my app handle in the last hour? 17 | * How many times did my app rate limit a request? 18 | * How many bytes did my app upload? 19 | 20 | Metrics analysis tools (Datadog, Graphite, Grafana, and so on) can also look 21 | counters during periods of time also known as a rate. With tools like that, you 22 | can answer questions like: 23 | 24 | * How many requests does my app handle per minute? 25 | 26 | 27 | Reporting sizes and measures (gauge) 28 | ==================================== 29 | 30 | When you want to measure something over time, you use a gauge. They answer 31 | questions like: 32 | 33 | * How much memory does my app use? 34 | * How many items are in the crashmover queue? 35 | * How many threads are running? 36 | 37 | Measuring these once isn't interesting. Measuring these periodically and then 38 | seeing those measures go up and down over time and possibly in response to 39 | various events (sudden high load, external service going down, and so on) is 40 | very interesting. 41 | 42 | These numbers are useful for monitoring and alerting. For example, maybe when 43 | the crashmover queue hits a specified threshold, that means the app is in 44 | trouble. That's something you'd want to get alerted to. 45 | 46 | 47 | Getting statistical distributions (timing, histogram) 48 | ===================================================== 49 | 50 | When you want to measure something over time, but you want statistical 51 | information about those values, you want to use timing and histogram. 52 | 53 | Statsd-type monitoring tools will calculate the count, average, median, 95th 54 | percentile, and max for these values over some period. 55 | 56 | This helps you answer questions like: 57 | 58 | * What's the median, 95%, and max HTTP request payload sizes? 59 | * What's the median and 95% durations for executing some database query? 60 | 61 | In these cases, knowing just the measured values isn't helpful, but knowing the 62 | median and 95% is very helpful. You'd be able to see how those things generally 63 | are and what the extreme cases are like. 64 | 65 | Some backends don't support histogram. In that case, histograms are reported as 66 | timings because the two are essentially the same where "histogram" is the more 67 | general of the two. 68 | 69 | 70 | Stats 71 | ===== 72 | 73 | Depending on the backend you're using, there may be rules for the metrics stats 74 | names you're generating. 75 | 76 | Generally, if you follow these rules, your stats should work across all 77 | backends: 78 | 79 | 1. Use only ASCII letters, ASCII numbers, and periods. 80 | 2. The metric stats name should begin with a letter. 81 | 3. Keep metric stats names short. 82 | 83 | 84 | Using tags 85 | ========== 86 | 87 | Tags give context to a metric. Monitoring tools can show you values, but then 88 | break down the values by tag value. This makes it easier to answer questions 89 | like: 90 | 91 | * What's the total number of requests handled by the cluster? Broken down by 92 | host? 93 | * What's the total number of requests to the site? Broken down by browser? 94 | * What's the total number of throttle requests? Broken down by throttle result? 95 | 96 | Tags consist of two parts: a key and a value. 97 | 98 | Depending on the backend you're using, there may be rules for the tag names and 99 | values you're using. 100 | 101 | Generally, if you follow these rules, your tags should work across all backends: 102 | 103 | 1. Use only ASCII letters, ASCII numbers, underscores, hyphens, and periods in 104 | tag names. 105 | 2. Tag names should begin with a letter. 106 | 3. Tag names should be short. 107 | 4. Tag values should be limited to a small (under 1,000) set of possible values. 108 | Be wary of using ip addresses, usernames, and other things that can have many 109 | values. 110 | 111 | To help with that, use :py:func:`markus.utils.generate_tag` which will sanitize 112 | tags and key/val tags for use with all Markus backends. 113 | -------------------------------------------------------------------------------- /tests/test_datadog.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import pytest 6 | 7 | from markus.backends import datadog 8 | from markus.main import MetricsFilter, MetricsRecord 9 | 10 | 11 | class MockDogStatsd: 12 | def __init__(self, *args, **kwargs): 13 | self.initargs = args 14 | self.initkwargs = kwargs 15 | self.calls = [] 16 | 17 | def increment(self, *args, **kwargs): 18 | self.calls.append(("increment", args, kwargs)) 19 | 20 | def gauge(self, *args, **kwargs): 21 | self.calls.append(("gauge", args, kwargs)) 22 | 23 | def timing(self, *args, **kwargs): 24 | self.calls.append(("timing", args, kwargs)) 25 | 26 | def histogram(self, *args, **kwargs): 27 | self.calls.append(("histogram", args, kwargs)) 28 | 29 | 30 | @pytest.fixture 31 | def mockdogstatsd(): 32 | """Mocks DogStatsd class to capture method call data""" 33 | _old_datadog = datadog.DogStatsd 34 | mock = MockDogStatsd 35 | datadog.DogStatsd = mock 36 | yield 37 | datadog.DogStatsd = _old_datadog 38 | 39 | 40 | def test_default_options(mockdogstatsd): 41 | ddm = datadog.DatadogMetrics() 42 | 43 | assert ddm.host == "localhost" 44 | assert ddm.port == 8125 45 | assert ddm.namespace == "" 46 | 47 | # NOTE(willkg): ddm.client is the mock instance 48 | assert ddm.client.initargs == () 49 | assert ddm.client.initkwargs == { 50 | "host": "localhost", 51 | "port": 8125, 52 | "namespace": "", 53 | "origin_detection_enabled": False, 54 | } 55 | 56 | 57 | def test_options(mockdogstatsd): 58 | ddm = datadog.DatadogMetrics( 59 | options={ 60 | "statsd_host": "example.com", 61 | "statsd_port": 5000, 62 | "statsd_namespace": "joe", 63 | "origin_detection_enabled": True, 64 | } 65 | ) 66 | 67 | assert ddm.host == "example.com" 68 | assert ddm.port == 5000 69 | assert ddm.namespace == "joe" 70 | assert ddm.origin_detection_enabled is True 71 | 72 | # NOTE(willkg): ddm.client is the mock instance 73 | assert ddm.client.initargs == () 74 | assert ddm.client.initkwargs == { 75 | "host": "example.com", 76 | "port": 5000, 77 | "namespace": "joe", 78 | "origin_detection_enabled": True, 79 | } 80 | 81 | 82 | def test_incr(mockdogstatsd): 83 | rec = MetricsRecord("incr", key="foo", value=10, tags=["key1:val"]) 84 | ddm = datadog.DatadogMetrics() 85 | ddm.emit_to_backend(rec) 86 | assert ddm.client.calls == [ 87 | ("increment", (), {"metric": "foo", "value": 10, "tags": ["key1:val"]}) 88 | ] 89 | 90 | 91 | def test_gauge(mockdogstatsd): 92 | rec = MetricsRecord("gauge", key="foo", value=100, tags=["key1:val"]) 93 | ddm = datadog.DatadogMetrics() 94 | ddm.emit_to_backend(rec) 95 | assert ddm.client.calls == [ 96 | ("gauge", (), {"metric": "foo", "value": 100, "tags": ["key1:val"]}) 97 | ] 98 | 99 | 100 | def test_timing(mockdogstatsd): 101 | rec = MetricsRecord("timing", key="foo", value=1234, tags=["key1:val"]) 102 | ddm = datadog.DatadogMetrics() 103 | ddm.emit_to_backend(rec) 104 | assert ddm.client.calls == [ 105 | ("timing", (), {"metric": "foo", "value": 1234, "tags": ["key1:val"]}) 106 | ] 107 | 108 | 109 | def test_histogram(mockdogstatsd): 110 | rec = MetricsRecord("histogram", key="foo", value=4321, tags=["key1:val"]) 111 | ddm = datadog.DatadogMetrics() 112 | ddm.emit_to_backend(rec) 113 | assert ddm.client.calls == [ 114 | ("histogram", (), {"metric": "foo", "value": 4321, "tags": ["key1:val"]}) 115 | ] 116 | 117 | 118 | def test_filters(mockdogstatsd): 119 | class BlueFilter(MetricsFilter): 120 | def filter(self, record): 121 | if "blue" not in record.key: 122 | return 123 | return record 124 | 125 | ddm = datadog.DatadogMetrics(filters=[BlueFilter()]) 126 | ddm.emit_to_backend(MetricsRecord("incr", key="foo", value=1, tags=[])) 127 | ddm.emit_to_backend(MetricsRecord("incr", key="foo.blue", value=2, tags=[])) 128 | assert ddm.client.calls == [ 129 | ("increment", (), {"metric": "foo.blue", "value": 2, "tags": []}) 130 | ] 131 | -------------------------------------------------------------------------------- /docs/filters.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Filters 3 | ======= 4 | 5 | Markus lets you write and use filters to modify generated metrics. 6 | 7 | .. versionadded:: 2.0.0 8 | 9 | .. contents:: 10 | :local: 11 | 12 | 13 | Using filters 14 | ============= 15 | 16 | Filters can be configured to run on a :py:class:`markus.main.MetricsInterface` 17 | instance that you build with ``get_metrics()``. 18 | 19 | For example, say you had a Python module and all metrics published from code 20 | in that module should get the additional tag ``source:foomodule``. Then you 21 | could do something like this:: 22 | 23 | import markus 24 | from markus.filters import AddTagFilter 25 | 26 | metrics = markus.get_metrics(__name__, filters=[AddTagFilter("source:cache")]) 27 | 28 | class CacheInterface: 29 | def get(self, key, default=None): 30 | with metrics.timing("cache.get"): 31 | # do stuff 32 | 33 | 34 | The ``cache.get`` metric will have ``source:cache`` as a tag. 35 | 36 | You can also specify filters on backends when configuring markus. For example, 37 | say you want to add a ``hostid:HOSTID`` tag to all metrics being published to 38 | the Datadog backend. You could do this:: 39 | 40 | import os 41 | 42 | import markus 43 | from markus.filters import AddTagFilter 44 | 45 | HOSTID = os.environ.get("HOSTID", "none") 46 | 47 | markus.configure( 48 | backends=[ 49 | { 50 | "class": "markus.backends.datadog.DatadogMetrics", 51 | "options": { 52 | "statsd_host": "example.com", 53 | "statsd_port": 8125, 54 | "statsd_namespace": "" 55 | }, 56 | "filters": [AddTagFilter("host:%s" % HOSTID)] 57 | } 58 | ] 59 | ) 60 | 61 | All metrics generated will have the ``hostid`` tag. 62 | 63 | Filters can be used to drop metrics, too. Say you want metrics with 64 | ``debug:true`` spit out to the logging backend, but not the Datadog backend. 65 | You could do this:: 66 | 67 | import os 68 | 69 | import markus 70 | from markus.main import MetricsFilter 71 | 72 | class DropDebugFilter(MetricsFilter): 73 | def filter(self, record): 74 | if "debug:true" in record.tags: 75 | return 76 | return record 77 | 78 | markus.configure( 79 | backends=[ 80 | { 81 | "class": "markus.backends.datadog.DatadogMetrics", 82 | "options": { 83 | "statsd_host": "example.com", 84 | "statsd_port": 8125, 85 | "statsd_namespace": "" 86 | }, 87 | "filters": [DropDebugFilter()] 88 | }, 89 | { 90 | # Log metrics to the logs 91 | 'class': 'markus.backends.logging.LoggingMetrics', 92 | }, 93 | ] 94 | ) 95 | 96 | Metrics generated with ``debug:true`` will show up in the logs, but not get 97 | sent to the Datadog backend. 98 | 99 | 100 | Writing filters 101 | =============== 102 | 103 | Filters subclass the :py:class:`markus.main.MetricsFilter` class. 104 | 105 | All filters need to implement the ``.filter()`` method. The ``filter`` method 106 | takes :py:class:`markus.main.MetricsRecord` instance and either: 107 | 108 | 1. returns the record, 109 | 2. changes the record in place and returns the record, 110 | 3. returns ``None`` signifying the record should be dropped 111 | 112 | This filter adds a host id tag to all outgoing metrics:: 113 | 114 | import os 115 | 116 | from markus.main import MetricsFilter 117 | 118 | class HostFilter(MetricsFilter): 119 | def __init__(self): 120 | self.hostid_tag = "hostid:%s" % os.environ.get("HOSTID") 121 | 122 | def filter(self, record): 123 | record.tags.append(self.hostid_tag) 124 | return record 125 | 126 | Filters can also drop metrics. This one drops any metric that has a 127 | "debug:true" tag:: 128 | 129 | from markus.main import 130 | 131 | class DebugFilter(MetricsFilter): 132 | def filter(self, record): 133 | if "debug:true" in record.tags: 134 | return 135 | return record 136 | 137 | 138 | .. autoclass:: markus.main.MetricsFilter 139 | 140 | 141 | Included filters 142 | ================ 143 | 144 | Markus includes one filter for the common case of adding tags to all metrics 145 | generated by a metrics interface. 146 | 147 | .. autoclass:: markus.filters.AddTagFilter 148 | 149 | .. autoclass:: markus.filters.RegisteredMetricsFilter 150 | -------------------------------------------------------------------------------- /src/markus/filters.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import logging 6 | from typing import Dict 7 | 8 | from markus.main import MetricsFilter, MetricsRecord 9 | 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | 14 | class MetricsException(Exception): 15 | pass 16 | 17 | 18 | class MetricsInvalidSchema(MetricsException): 19 | pass 20 | 21 | 22 | class MetricsUnknownKey(MetricsException): 23 | pass 24 | 25 | 26 | class MetricsWrongType(MetricsException): 27 | pass 28 | 29 | 30 | RegisteredMetricsType = Dict[str, Dict[str, str]] 31 | 32 | 33 | def _validate_registered_metrics(registered_metrics: RegisteredMetricsType): 34 | if not isinstance(registered_metrics, dict): 35 | raise MetricsInvalidSchema("registered_metrics is not a dict") 36 | 37 | for key, val in registered_metrics.items(): 38 | if not isinstance(key, str): 39 | raise MetricsInvalidSchema(f"key {key!r} is not a str") 40 | 41 | if not isinstance(val, dict): 42 | raise MetricsInvalidSchema(f"key {key!r} has a non-dict value") 43 | 44 | if "type" not in val.keys() or "description" not in val.keys(): 45 | raise MetricsInvalidSchema( 46 | f"key {key!r} has value missing type or description" 47 | ) 48 | 49 | if val["type"] not in ["incr", "gauge", "timing", "histogram"]: 50 | raise MetricsInvalidSchema( 51 | f"key {key!r} type is {val['type']}; " 52 | + "not one of incr, gauge, timing, histogram" 53 | ) 54 | 55 | if not isinstance(val["description"], str): 56 | raise MetricsInvalidSchema(f"key {key!r} description is not a str") 57 | 58 | 59 | class RegisteredMetricsFilter(MetricsFilter): 60 | """Contains a list of registered metrics and validator. 61 | 62 | This is a Markus Metrics filter. It'll complain if metrics are generated 63 | that it doesn't know about. 64 | 65 | Registered metrics should be a dict structured like this:: 66 | 67 | { 68 | KEY -> { 69 | "type": str, # one of "incr" | "gauge" | "timing" | "histogram" 70 | "description": str, # can use markdown 71 | }, 72 | ... 73 | } 74 | 75 | For example:: 76 | 77 | { 78 | "eliot.symbolicate_api": { 79 | "type": "timing", 80 | "description": "Timer for how long a symbolication API request takes.", 81 | }, 82 | "eliot.symbolicate.proxied": { 83 | "type": "incr", 84 | description": "Counter for symbolication requests.", 85 | }, 86 | ... 87 | } 88 | 89 | You can include additional information to suit your needs:: 90 | 91 | { 92 | "eliot.symbolicate_api": { 93 | "type": "timing", 94 | "description": "Timer for how long a symbolication API request takes.", 95 | "data_sensitivity": "technical", 96 | "bugs": [ 97 | "https://example.com/bugid=12345", 98 | ], 99 | }, 100 | ... 101 | } 102 | 103 | You can define your metrics in JSON or YAML, read them in, and pass them to 104 | ``RegisteredMetricsFilter`` for easier management of metrics. 105 | 106 | """ 107 | 108 | def __init__( 109 | self, registered_metrics: RegisteredMetricsType, raise_error: bool = False 110 | ): 111 | _validate_registered_metrics(registered_metrics) 112 | self.registered_metrics = registered_metrics 113 | self.raise_error = raise_error 114 | 115 | def __repr__(self): 116 | return f"" 117 | 118 | def filter(self, record: MetricsRecord) -> MetricsRecord: 119 | metric = self.registered_metrics.get(record.key) 120 | if metric is None: 121 | if self.raise_error: 122 | raise MetricsUnknownKey(f"metrics key {record.key!r} is unknown") 123 | LOGGER.warning("metrics key %r is unknown.", record.key) 124 | 125 | elif record.stat_type != metric["type"]: 126 | if self.raise_error: 127 | raise MetricsWrongType( 128 | f"metrics key {record.key!r} has wrong type; {record.stat_type} vs. " 129 | + f"{metric['type']}" 130 | ) 131 | 132 | LOGGER.warning( 133 | "metrics key %r has wrong type; got %s expecting %s", 134 | record.key, 135 | record.stat_type, 136 | metric["type"], 137 | ) 138 | 139 | return record 140 | 141 | 142 | class AddTagFilter(MetricsFilter): 143 | """Metrics filter that adds tags. 144 | 145 | Contrived example that adds the host for all metrics generated in this 146 | module:: 147 | 148 | import socket 149 | 150 | import markus 151 | from markus.filters import AddTagFilter 152 | 153 | metrics = markus.get_metrics( 154 | __name__, 155 | filters=[AddTagFilter(f"host:{socket.gethostname()}")] 156 | ) 157 | 158 | """ 159 | 160 | def __init__(self, tag: str): 161 | self.tag = tag 162 | 163 | def __repr__(self): 164 | return f"" 165 | 166 | def filter(self, record: MetricsRecord) -> MetricsRecord: 167 | record.tags.append(self.tag) 168 | return record 169 | -------------------------------------------------------------------------------- /tests/test_logging.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import datetime 6 | 7 | from markus.backends.logging import LoggingMetrics, LoggingRollupMetrics 8 | from markus.main import MetricsFilter, MetricsRecord 9 | 10 | 11 | class TestLoggingMetrics: 12 | def test_incr(self, caplog): 13 | caplog.set_level("DEBUG") 14 | rec = MetricsRecord("incr", key="foo", value=10, tags=["key1:val", "key2:val"]) 15 | lm = LoggingMetrics() 16 | lm.emit_to_backend(rec) 17 | assert caplog.record_tuples == [ 18 | ("markus", 20, "METRICS|incr|foo|10|#key1:val,key2:val") 19 | ] 20 | 21 | def test_gauge(self, caplog): 22 | caplog.set_level("DEBUG") 23 | rec = MetricsRecord( 24 | "gauge", key="foo", value=100, tags=["key1:val", "key2:val"] 25 | ) 26 | lm = LoggingMetrics() 27 | lm.emit_to_backend(rec) 28 | assert caplog.record_tuples == [ 29 | ( 30 | "markus", 31 | 20, 32 | "METRICS|gauge|foo|100|#key1:val,key2:val", 33 | ) 34 | ] 35 | 36 | def test_timing(self, caplog): 37 | caplog.set_level("DEBUG") 38 | rec = MetricsRecord( 39 | "timing", key="foo", value=1234, tags=["key1:val", "key2:val"] 40 | ) 41 | lm = LoggingMetrics() 42 | lm.emit_to_backend(rec) 43 | assert caplog.record_tuples == [ 44 | ( 45 | "markus", 46 | 20, 47 | "METRICS|timing|foo|1234|#key1:val,key2:val", 48 | ) 49 | ] 50 | 51 | def test_histogram(self, caplog): 52 | caplog.set_level("DEBUG") 53 | rec = MetricsRecord( 54 | "histogram", key="foo", value=4321, tags=["key1:val", "key2:val"] 55 | ) 56 | lm = LoggingMetrics() 57 | lm.emit_to_backend(rec) 58 | assert caplog.record_tuples == [ 59 | ( 60 | "markus", 61 | 20, 62 | "METRICS|histogram|foo|4321|#key1:val,key2:val", 63 | ) 64 | ] 65 | 66 | def test_filters(self, caplog): 67 | class BlueFilter(MetricsFilter): 68 | def filter(self, record): 69 | if "blue" not in record.key: 70 | return 71 | return record 72 | 73 | caplog.set_level("DEBUG") 74 | lm = LoggingMetrics(filters=[BlueFilter()]) 75 | lm.emit_to_backend(MetricsRecord("incr", key="foo", value=1, tags=[])) 76 | lm.emit_to_backend(MetricsRecord("incr", key="foo.blue", value=2, tags=[])) 77 | assert caplog.record_tuples == [("markus", 20, "METRICS|incr|foo.blue|2|")] 78 | 79 | def test_utc_timezone_incr(self, caplog, time_machine): 80 | time_machine.move_to("2017-03-06T16:30:00+00:00", tick=False) 81 | caplog.set_level("DEBUG") 82 | rec = MetricsRecord("incr", key="foo", value=10, tags=["key1:val", "key2:val"]) 83 | lm = LoggingMetrics(options={"timestamp_mode": "utc"}) 84 | lm.emit_to_backend(rec) 85 | assert caplog.record_tuples == [ 86 | ( 87 | "markus", 88 | 20, 89 | "METRICS|2017-03-06T16:30:00+00:00|incr|foo|10|#key1:val,key2:val", 90 | ) 91 | ] 92 | 93 | def test_local_timezone_incr(self, caplog, time_machine): 94 | time_machine.move_to("2017-03-06 16:30:00", tick=False) 95 | caplog.set_level("DEBUG") 96 | rec = MetricsRecord("incr", key="foo", value=10, tags=["key1:val", "key2:val"]) 97 | lm = LoggingMetrics(options={"timestamp_mode": "local"}) 98 | lm.emit_to_backend(rec) 99 | assert caplog.record_tuples == [ 100 | ("markus", 20, "METRICS|2017-03-06T16:30:00|incr|foo|10|#key1:val,key2:val") 101 | ] 102 | 103 | 104 | class TestLoggingRollupMetrics: 105 | def test_rollup(self, caplog, time_machine): 106 | caplog.set_level("DEBUG") 107 | 108 | time_machine.move_to( 109 | datetime.datetime(2017, 4, 19, 12, 0, 0, tzinfo=datetime.timezone.utc), 110 | tick=False, 111 | ) 112 | lm = LoggingRollupMetrics() 113 | lm.emit_to_backend(MetricsRecord("incr", key="foo", value=1, tags=None)) 114 | lm.emit_to_backend(MetricsRecord("incr", key="foo", value=1, tags=None)) 115 | lm.emit_to_backend(MetricsRecord("gauge", key="widget", value=10, tags=None)) 116 | lm.emit_to_backend(MetricsRecord("incr", key="foo", value=1, tags=None)) 117 | lm.emit_to_backend(MetricsRecord("incr", key="bar", value=1, tags=None)) 118 | lm.emit_to_backend(MetricsRecord("gauge", key="widget", value=20, tags=None)) 119 | lm.emit_to_backend(MetricsRecord("gauge", key="widget", value=5, tags=None)) 120 | lm.emit_to_backend( 121 | MetricsRecord("histogram", key="save_time", value=50, tags=None) 122 | ) 123 | lm.emit_to_backend( 124 | MetricsRecord("histogram", key="save_time", value=60, tags=None) 125 | ) 126 | 127 | time_machine.move_to( 128 | datetime.datetime(2017, 4, 19, 12, 0, 11, tzinfo=datetime.timezone.utc), 129 | tick=False, 130 | ) 131 | lm.emit_to_backend(MetricsRecord("incr", key="bar", value=1, tags=None)) 132 | 133 | assert caplog.record_tuples == [ 134 | ("markus", 20, "ROLLUP INCR bar: count:1|rate:1/10"), 135 | ("markus", 20, "ROLLUP INCR foo: count:3|rate:3/10"), 136 | ("markus", 20, "ROLLUP GAUGE widget: count:3|current:5|min:5|max:20"), 137 | ( 138 | "markus", 139 | 20, 140 | "ROLLUP HISTOGRAM save_time: " 141 | "count:2|min:50.00|avg:55.00|median:55.00|ninety-five:60.00|max:60.00", 142 | ), 143 | ] 144 | -------------------------------------------------------------------------------- /tests/test_filters.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import pytest 3 | 4 | from markus import get_metrics 5 | from markus.filters import ( 6 | AddTagFilter, 7 | MetricsInvalidSchema, 8 | MetricsUnknownKey, 9 | MetricsWrongType, 10 | RegisteredMetricsFilter, 11 | _validate_registered_metrics, 12 | ) 13 | from markus.main import MetricsRecord 14 | 15 | 16 | logging.basicConfig() 17 | 18 | 19 | def test_tag_filter(metricsmock): 20 | metrics = get_metrics("thing", filters=[AddTagFilter("foo:bar")]) 21 | 22 | with metricsmock as mm: 23 | metrics.incr("foo", value=5) 24 | 25 | assert mm.get_records() == [ 26 | MetricsRecord("incr", "thing.foo", 5, ["foo:bar"]), 27 | ] 28 | 29 | 30 | @pytest.mark.parametrize( 31 | "schema", 32 | [ 33 | pytest.param({}, id="empty"), 34 | pytest.param({"testkey": {"type": "incr", "description": "abcde"}}, id="basic"), 35 | pytest.param( 36 | { 37 | "testkey_incr": {"type": "incr", "description": "abcde"}, 38 | "testkey_gauge": {"type": "gauge", "description": "abcde"}, 39 | "testkey_timing": {"type": "timing", "description": "abcde"}, 40 | "testkey_histogram": {"type": "histogram", "description": "abcde"}, 41 | }, 42 | id="cover_stats", 43 | ), 44 | pytest.param( 45 | { 46 | "testkey": { 47 | "type": "incr", 48 | "description": "abcde", 49 | "labels": [], 50 | "bugs": [], 51 | } 52 | }, 53 | id="addtl_info", 54 | ), 55 | ], 56 | ) 57 | def test_validate_registered_metrics(schema): 58 | _validate_registered_metrics(schema) 59 | 60 | 61 | @pytest.mark.parametrize( 62 | "schema, error_msg", 63 | [ 64 | pytest.param([], "registered_metrics is not a dict", id="not_dict"), 65 | pytest.param({1: {}}, "key 1 is not a str", id="key_not_str"), 66 | pytest.param( 67 | {"key": []}, "key 'key' has a non-dict value", id="non_dict_value" 68 | ), 69 | pytest.param( 70 | {"key": {"type": "incr"}}, 71 | "key 'key' has value missing type or description", 72 | id="missing_description", 73 | ), 74 | pytest.param( 75 | {"key": {"description": "foo"}}, 76 | "key 'key' has value missing type or description", 77 | id="missing_type", 78 | ), 79 | pytest.param( 80 | {"key": {"type": "foo", "description": "foo"}}, 81 | "key 'key' type is foo; not one of incr, gauge, timing, histogram", 82 | id="invalid_type", 83 | ), 84 | pytest.param( 85 | {"key": {"type": "incr", "description": 5}}, 86 | "key 'key' description is not a str", 87 | id="bad_description_type", 88 | ), 89 | ], 90 | ) 91 | def test_validate_registered_metrics_invalid(schema, error_msg): 92 | with pytest.raises(MetricsInvalidSchema) as excinfo: 93 | _validate_registered_metrics(schema) 94 | 95 | assert str(excinfo.value) == error_msg 96 | 97 | 98 | ALLOWED_METRICS = { 99 | "thing.key_incr": { 100 | "type": "incr", 101 | "description": "--", 102 | }, 103 | "thing.key_gauge": { 104 | "type": "gauge", 105 | "description": "--", 106 | }, 107 | "thing.key_timing": { 108 | "type": "timing", 109 | "description": "--", 110 | }, 111 | "thing.key_histogram": { 112 | "type": "histogram", 113 | "description": "--", 114 | }, 115 | } 116 | 117 | 118 | def test_registered_metrics_filter(caplog, metricsmock): 119 | caplog.set_level(logging.INFO) 120 | 121 | metrics = get_metrics( 122 | "thing", filters=[RegisteredMetricsFilter(ALLOWED_METRICS, raise_error=False)] 123 | ) 124 | 125 | with metricsmock as mm: 126 | # Emit allowed metrics 127 | metrics.incr("key_incr", value=1) 128 | metrics.gauge("key_gauge", value=10) 129 | metrics.timing("key_timing", value=1.0) 130 | metrics.histogram("key_histogram", value=10.0) 131 | 132 | assert mm.get_records() == [ 133 | MetricsRecord("incr", "thing.key_incr", 1, []), 134 | MetricsRecord("gauge", "thing.key_gauge", 10, []), 135 | MetricsRecord("timing", "thing.key_timing", 1.0, []), 136 | MetricsRecord("histogram", "thing.key_histogram", 10.0, []), 137 | ] 138 | 139 | assert caplog.records == [] 140 | 141 | 142 | def test_registered_metrics_filter_missing(caplog, metricsmock): 143 | caplog.set_level(logging.INFO) 144 | 145 | metrics = get_metrics( 146 | "thing", filters=[RegisteredMetricsFilter(ALLOWED_METRICS, raise_error=False)] 147 | ) 148 | 149 | with metricsmock as mm: 150 | # Emit unknown metric 151 | metrics.incr("unknown_key", value=1) 152 | 153 | assert mm.get_records() == [ 154 | MetricsRecord("incr", "thing.unknown_key", 1, []), 155 | ] 156 | 157 | assert caplog.records[0].levelname == "WARNING" 158 | assert caplog.records[0].message == "metrics key 'thing.unknown_key' is unknown." 159 | 160 | 161 | def test_registered_metrics_filter_missing_error(metricsmock): 162 | with pytest.raises(MetricsUnknownKey) as excinfo: 163 | metrics = get_metrics( 164 | "thing", 165 | filters=[RegisteredMetricsFilter(ALLOWED_METRICS, raise_error=True)], 166 | ) 167 | with metricsmock: 168 | # Emit unknown metric 169 | metrics.incr("unknown_key", value=1) 170 | 171 | assert str(excinfo.value) == "metrics key 'thing.unknown_key' is unknown" 172 | 173 | 174 | def test_registered_metrics_filter_bad_type(metricsmock): 175 | with pytest.raises(MetricsWrongType) as excinfo: 176 | metrics = get_metrics( 177 | "thing", 178 | filters=[RegisteredMetricsFilter(ALLOWED_METRICS, raise_error=True)], 179 | ) 180 | with metricsmock: 181 | # Emit unknown metric 182 | metrics.incr("key_gauge", value=1) 183 | 184 | assert ( 185 | str(excinfo.value) 186 | == "metrics key 'thing.key_gauge' has wrong type; incr vs. gauge" 187 | ) 188 | -------------------------------------------------------------------------------- /HISTORY.rst: -------------------------------------------------------------------------------- 1 | History 2 | ======= 3 | 4 | 5.2.0 (October 14th, 2025) 5 | -------------------------- 6 | 7 | **Features** 8 | 9 | * Added support for Python 3.14 (#181) 10 | 11 | **Backwards incompatible changes** 12 | 13 | * Dropped support for Python 3.9 (#180) 14 | 15 | * Switch to uv and just for maintenance and development (#167) 16 | 17 | 18 | 5.1.0 (October 30th, 2024) 19 | -------------------------- 20 | 21 | **Features** 22 | 23 | * Added support for Python 3.13 (#159) 24 | 25 | **Backwards incompatible changes** 26 | 27 | * Dropped support for Python 3.8 (#160) 28 | 29 | * Changed timestamp field in ``LoggingMetrics``. The ``LoggingMetrics`` now 30 | defaults to not emitting a timestamp at all. If you would like a timestamp, 31 | you can provide the ``"timestamp_mode"`` option with either ``"utc"`` for UTC 32 | timestamps or ``"local"`` for local timezone timestamps. 33 | 34 | No timestamp example:: 35 | 36 | LoggingMetrics() 37 | 38 | emits lines like:: 39 | 40 | METRICS|histogram|foo|4321|#key1:val 41 | 42 | ``"utc"`` timestamp mode example:: 43 | 44 | LoggingMetrics(options={"timestamp_mode": "utc"}) 45 | 46 | emits lines like:: 47 | 48 | METRICS|2017-03-06T11:30:00+00:00:00|histogram|foo|4321|#key1:val 49 | 50 | ``"local"`` timestamp mode example:: 51 | 52 | LoggingMetrics(options={"timestamp_mode": "local"}) 53 | 54 | emits lines like:: 55 | 56 | METRICS|2017-03-06T11:30:00|histogram|foo|4321|#key1:val 57 | 58 | If you want the original behavior, add set ``timestamp_mode`` to ``local``. 59 | (#11) 60 | 61 | 62 | 63 | 5.0.0 (June 24th, 2024) 64 | ----------------------- 65 | 66 | **Features** 67 | 68 | * Add support for Python 3.12 (#122) 69 | 70 | * Add ``AnyTagValue`` for asserting that metrics are emitted with certain tags, 71 | but ignoring the tag values in tests. (#141) 72 | 73 | Example:: 74 | 75 | def test_somekey_emitted(): 76 | with metricsmock() as mm: 77 | # emit some metrics 78 | 79 | mm.assert_incr("somekey", value=1, tags=[AnyTagValue("host")]) 80 | 81 | * Add ``MetricsInterface.extend_prefix()`` to iteratively hone a metrics 82 | interface prefix. (#142) 83 | 84 | Example:: 85 | 86 | metrics = markus.get_metrics("project") 87 | 88 | module_metrics = metrics.extend_prefix("somemodule") 89 | 90 | module_metrics.incr("key1") # prefix is project.somemodule.key1 91 | 92 | * Add ``metricsmock`` pytest fixture. (#118) 93 | 94 | * Add ``RegisteredMetricsFilter`` metrics filter making it easier to enforce 95 | metrics can only be emitted if documented. (#15) 96 | 97 | **Backwards incompatible changes** 98 | 99 | * Dropped support for Python 3.7 (#121) 100 | 101 | 102 | 4.2.0 (March 30th, 2023) 103 | ------------------------ 104 | 105 | **Bug fixes** 106 | 107 | * Add support for setting ``origin_detection_enabled`` in Datadog backend. 108 | (#108) 109 | 110 | * Switch from Flake8 to Ruff. (#109) 111 | 112 | 113 | 4.1.0 (November 7th, 2022) 114 | -------------------------- 115 | 116 | **Features** 117 | 118 | * Add support for Python 3.11 (#100) 119 | 120 | **Bug fixes** 121 | 122 | * Redo how dev environment works so it's no longer installed via an extras but 123 | is now in a separate requirements-dev.txt file. 124 | 125 | * Split flake8 tou a separate requirements-flake8.txt and tox environment to 126 | handle conflicts with installing other things. 127 | 128 | 129 | 4.0.1 (May 10th, 2022) 130 | ---------------------- 131 | 132 | **Bug fixes** 133 | 134 | * Move pytest import to a pytest plugin so it's easier to determine when pytest 135 | is running. (#95) Thank you, John! 136 | 137 | 138 | 4.0.0 (October 22nd, 2021) 139 | -------------------------- 140 | 141 | **Features** 142 | 143 | * Added support for Python 3.10 (#88) 144 | 145 | **Backwards incompatibel changes** 146 | 147 | * Dropped support for Python 3.6 (#89) 148 | 149 | 150 | 3.0.0 (February 5th, 2021) 151 | -------------------------- 152 | 153 | **Features** 154 | 155 | * Added support for Python 3.9 (#79). Thank you, Brady! 156 | 157 | * Changed ``assert_*`` helper methods on ``markus.testing.MetricsMock`` 158 | to print the records to stdout if the assertion fails. This can save some 159 | time debugging failing tests. (#74) 160 | 161 | **Backwards incompatible changes** 162 | 163 | * Dropped support for Python 3.5 (#78). Thank you, Brady! 164 | 165 | * ``markus.testing.MetricsMock.get_records`` and 166 | ``markus.testing.MetricsMock.filter_records`` return 167 | ``markus.main.MetricsRecord`` instances now. This might require 168 | you to rewrite/update tests that use the ``MetricsMock``. 169 | 170 | 171 | 2.2.0 (April 15th, 2020) 172 | ------------------------ 173 | 174 | **Features** 175 | 176 | * Add ``assert_`` methods to ``MetricsMock`` to reduce the boilerplate for 177 | testing. Thank you, John! (#68) 178 | 179 | **Bug fixes** 180 | 181 | * Remove use of ``six`` library. (#69) 182 | 183 | 184 | 2.1.0 (October 7th, 2019) 185 | ------------------------- 186 | 187 | **Features** 188 | 189 | * Fix ``get_metrics()`` so you can call it without passing in a `thing` 190 | and it'll now create a ``MetricsInterface`` that doesn't have a key 191 | prefix. (#59) 192 | 193 | 194 | 2.0.0 (September 19th, 2019) 195 | ---------------------------- 196 | 197 | **Features** 198 | 199 | * Use ``time.perf_counter()`` if available. Thank you, Mike! (#34) 200 | * Support Python 3.7 officially. 201 | * Add filters for adjusting and dropping metrics getting emitted. 202 | See documentation for more details. (#40) 203 | 204 | **Backwards incompatible changes** 205 | 206 | * ``tags`` now defaults to ``[]`` instead of ``None`` which may affect some 207 | expected test output. 208 | * Adjust internals to run ``.emit()`` on backends. If you wrote your own 209 | backend, you may need to adjust it. 210 | * Drop support for Python 3.4. (#39) 211 | * Drop support for Python 2.7. 212 | 213 | If you're still using Python 2.7, you'll need to pin to ``<2.0.0``. (#42) 214 | 215 | **Bug fixes** 216 | 217 | * Document feature support in backends. (#47) 218 | * Fix ``MetricsMock.has_record()`` example. Thank you, John! 219 | 220 | 221 | 1.2.0 (April 27th, 2018) 222 | ------------------------ 223 | 224 | **Features** 225 | 226 | * Add ``.clear()`` to ``MetricsMock`` making it easier to build a pytest 227 | fixture with the ``MetricsMock`` context and manipulate records for easy 228 | testing. (#29) 229 | 230 | **Bug fixes** 231 | 232 | * Update Cloudwatch backend fixing ``.timing()`` and ``.histogram()`` to 233 | send ``histogram`` metrics type which Datadog now supports. (#31) 234 | 235 | 236 | 1.1.2 (April 5th, 2018) 237 | ----------------------- 238 | 239 | **Typo fixes** 240 | 241 | * Fix the date from the previous release. Ugh. 242 | 243 | 244 | 1.1.1 (April 5th, 2018) 245 | ----------------------- 246 | 247 | **Features** 248 | 249 | * Official switch to semver. 250 | 251 | **Bug fixes** 252 | 253 | * Fix ``MetricsMock`` so it continues to work even if ``configure`` 254 | is called. (#27) 255 | 256 | 257 | 1.1 (November 13th, 2017) 258 | ------------------------- 259 | 260 | **Features** 261 | 262 | * Added ``markus.utils.generate_tag`` utility function 263 | 264 | 265 | 1.0 (October 30th, 2017) 266 | ------------------------ 267 | 268 | **Features** 269 | 270 | * Added support for Python 2.7. 271 | 272 | * Added a ``markus.backends.statsd.StatsdMetrics`` backend that uses 273 | pystatsd client for statsd pings. Thank you, Javier! 274 | 275 | **Bug fixes** 276 | 277 | * Added ``LoggingRollupMetrics`` to docs. 278 | 279 | * Mozilla has been running Markus in production for 6 months so we 280 | can mark it production-ready now. 281 | 282 | 283 | 0.2 (April 19th, 2017) 284 | ---------------------- 285 | 286 | **Features** 287 | 288 | * Added a ``markus.backends.logging.LoggingRollupMetrics`` backend that 289 | rolls up metrics and does some light math on them. Possibly helpful 290 | for light profiling for development. 291 | 292 | **Bug fixes** 293 | 294 | * Lots of documentation fixes. Thank you, Peter! 295 | 296 | 297 | 0.1 (April 10th, 2017) 298 | ---------------------- 299 | 300 | Initial writing. 301 | -------------------------------------------------------------------------------- /src/markus/backends/logging.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | 6 | import datetime 7 | import logging 8 | import statistics 9 | import time 10 | 11 | from markus.backends import BackendBase 12 | 13 | 14 | UTC = datetime.timezone.utc 15 | 16 | 17 | class LoggingMetrics(BackendBase): 18 | """Metrics backend that publishes to Python logging. 19 | 20 | To use, add this to your backends list:: 21 | 22 | { 23 | "class": "markus.backends.logging.LoggingMetrics", 24 | "options": { 25 | "logger_name": "markus", 26 | "leader": "METRICS", 27 | } 28 | } 29 | 30 | By default the :py:class:`markus.backends.logging.LoggingMetrics` backend 31 | generates logging messages in this format:: 32 | 33 | leader|metric_type|stat|value|#tags 34 | 35 | For example:: 36 | 37 | METRICS|histogram|foo|4321|#key1:val 38 | 39 | If you set the ``"timestamp_mode"`` option to ``"utc"`` for a UTC timestamp 40 | or ``"local"`` for a local timezone timestamp, then it'll have this 41 | format:: 42 | 43 | leader|timestamp|metric_type|stat|value|#tags 44 | 45 | For example:: 46 | 47 | METRICS|2017-03-06T11:30:00|histogram|foo|4321|#key1:val 48 | 49 | This will log at the ``logging.INFO`` level. 50 | 51 | Options: 52 | 53 | * ``logger_name``: the name for the logger 54 | 55 | Defaults to ``"markus"``. 56 | 57 | * ``leader``: string at the start of the metrics line 58 | 59 | This makes it easier to parse logs for metrics data--you look for the 60 | leader and everything after that is parseable data. 61 | 62 | Defaults to ``"METRICS"``. 63 | 64 | * ``timestamp_mode``: mode for timestamp 65 | 66 | * ``"utc"``: UTC timestamp 67 | * ``"local"``: local timezone timestamp 68 | * anything else: no timestamp 69 | 70 | Defaults to no timestamp. 71 | 72 | """ 73 | 74 | def __init__(self, options=None, filters=None): 75 | options = options or {} 76 | self.filters = filters or [] 77 | self.logger_name = options.get("logger_name", "markus") 78 | self.logger = logging.getLogger(self.logger_name) 79 | self.leader = options.get("leader", "METRICS") 80 | self.timestamp_mode = options.get("timestamp_mode", None) 81 | 82 | tmpl = [ 83 | "%(leader)s", 84 | "%(kind)s", 85 | "%(stat)s", 86 | "%(value)s", 87 | "%(tags)s", 88 | ] 89 | if self.timestamp_mode == "utc": 90 | tmpl.insert(1, "%(utc_timestamp)s") 91 | elif self.timestamp_mode == "local": 92 | tmpl.insert(1, "%(local_timestamp)s") 93 | 94 | self.tmpl = "|".join(tmpl) 95 | 96 | def emit(self, record): 97 | self.logger.info( 98 | self.tmpl 99 | % { 100 | "leader": self.leader, 101 | "local_timestamp": datetime.datetime.now().isoformat(), 102 | "utc_timestamp": datetime.datetime.now(tz=UTC).isoformat(), 103 | "kind": record.stat_type, 104 | "stat": record.key, 105 | "value": record.value, 106 | "tags": ("#%s" % ",".join(record.tags)) if record.tags else "", 107 | } 108 | ) 109 | 110 | 111 | class LoggingRollupMetrics(BackendBase): 112 | """Experimental logging backend for rolling up stats over a period. 113 | 114 | To use, add this to your backends list:: 115 | 116 | { 117 | "class": "markus.backends.logging.LoggingRollupMetrics", 118 | "options": { 119 | "logger_name": "markus", 120 | "leader": "ROLLUP", 121 | "flush_interval": 10 122 | } 123 | } 124 | 125 | The :py:class:`markus.backends.logging.LoggingRollupMetrics` backend 126 | generates rollups every *flush_interval* of stats generated during that 127 | period. 128 | 129 | For incr stats, it shows count and rate. 130 | 131 | For gauge stats, it shows count, current value, min value, and max value 132 | for the period. 133 | 134 | For timing and histogram stats, it shows count, min, average, median, 95%, 135 | and max for the period. 136 | 137 | This will log at the ``logging.INFO`` level. 138 | 139 | Options: 140 | 141 | * ``logger_name``: the name for the logger 142 | 143 | Defaults to ``"markus"``. 144 | 145 | * ``leader``: string at the start of the metrics line 146 | 147 | This makes it easier to parse logs for metrics data--you look for the 148 | leader and everything after that is parseable data. 149 | 150 | Defaults to ``"ROLLUP"``. 151 | 152 | * ``flush_interval``: interval to generate rollup data 153 | 154 | :py:class:`markus.backends.logging.LoggingRollupMetrics` will spit out 155 | rollup data every ``flush_interval`` seconds. 156 | 157 | Defaults to ``10`` seconds. 158 | 159 | .. Note:: 160 | 161 | This backend is experimental, probably has bugs, and may change over 162 | time. 163 | 164 | """ 165 | 166 | def __init__(self, options=None, filters=None): 167 | options = options or {} 168 | self.filters = filters or [] 169 | 170 | self.flush_interval = options.get("flush_interval", 10) 171 | self.logger_name = options.get("logger_name", "markus") 172 | self.leader = options.get("leader", "ROLLUP") 173 | 174 | self.logger = logging.getLogger(self.logger_name) 175 | 176 | # Next time to rollup in seconds since epoch 177 | self.next_rollup = time.time() + self.flush_interval 178 | 179 | # Map of key -> values list 180 | self.incr_stats = {} 181 | self.gauge_stats = {} 182 | self.histogram_stats = {} 183 | 184 | def rollup(self): 185 | """Roll up stats and log them.""" 186 | now = time.time() 187 | if now < self.next_rollup: 188 | return 189 | 190 | self.next_rollup = now + self.flush_interval 191 | 192 | for key, values in sorted(self.incr_stats.items()): 193 | self.logger.info( 194 | "%s INCR %s: count:%d|rate:%d/%d", 195 | self.leader, 196 | key, 197 | len(values), 198 | sum(values), 199 | self.flush_interval, 200 | ) 201 | self.incr_stats[key] = [] 202 | 203 | for key, values in sorted(self.gauge_stats.items()): 204 | if values: 205 | self.logger.info( 206 | "%s GAUGE %s: count:%d|current:%s|min:%s|max:%s", 207 | self.leader, 208 | key, 209 | len(values), 210 | values[-1], 211 | min(values), 212 | max(values), 213 | ) 214 | else: 215 | self.logger.info("%s (gauge) %s: no data", self.leader, key) 216 | 217 | self.gauge_stats[key] = [] 218 | 219 | for key, values in sorted(self.histogram_stats.items()): 220 | if values: 221 | self.logger.info( 222 | ( 223 | "%s HISTOGRAM %s: " 224 | "count:%d|min:%.2f|avg:%.2f|median:%.2f|ninety-five:%.2f|max:%.2f" 225 | ), 226 | self.leader, 227 | key, 228 | len(values), 229 | min(values), 230 | statistics.mean(values), 231 | statistics.median(values), 232 | values[int(len(values) * 95 / 100)], 233 | max(values), 234 | ) 235 | else: 236 | self.logger.info("%s (histogram) %s: no data", self.leader, key) 237 | 238 | self.histogram_stats[key] = [] 239 | 240 | def emit(self, record): 241 | stat_type_to_list = { 242 | "incr": self.incr_stats, 243 | "gauge": self.gauge_stats, 244 | "timing": self.histogram_stats, 245 | "histogram": self.histogram_stats, 246 | } 247 | 248 | self.rollup() 249 | 250 | # FIXME(willkg): what to do with tags? 251 | stat_type_to_list[record.stat_type].setdefault(record.key, []).append( 252 | record.value 253 | ) 254 | -------------------------------------------------------------------------------- /tests/test_testing.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import markus 6 | from markus.testing import AnyTagValue, MetricsMock 7 | 8 | import pytest 9 | 10 | 11 | class TestAnyTagValue: 12 | def test_equality(self): 13 | # values are equal even if instances aren't 14 | assert AnyTagValue("host") == AnyTagValue("host") 15 | 16 | # equal to "host:" with any value 17 | assert AnyTagValue("host") == "host:1234" 18 | assert AnyTagValue("host") == "host:5678" 19 | assert AnyTagValue("host") == "host" 20 | 21 | # not equal to a different tag 22 | assert AnyTagValue("host") != "env:prod" 23 | 24 | def test_sorting(self): 25 | items = [AnyTagValue("host"), "env:prod", "color:blue"] 26 | items.sort() 27 | assert list(items) == ["color:blue", "env:prod", AnyTagValue("host")] 28 | 29 | items = ["env:prod", "color:blue", AnyTagValue("host"), "host"] 30 | items.sort() 31 | assert list(items) == ["color:blue", "env:prod", AnyTagValue("host"), "host"] 32 | 33 | def test_assertions(self): 34 | with MetricsMock() as mm: 35 | mymetrics = markus.get_metrics("test") 36 | mymetrics.incr("key1", value=1, tags=["host:12345", "env:prod"]) 37 | 38 | mm.assert_incr("test.key1", value=1, tags=[AnyTagValue("host"), "env:prod"]) 39 | mm.assert_incr("test.key1", value=1, tags=["env:prod", AnyTagValue("host")]) 40 | 41 | 42 | class TestMetricsMock: 43 | """Verify the MetricsMock works as advertised""" 44 | 45 | def test_print_records(self): 46 | # NOTE(willkg): .print_records() prints to stdout and is mostly used 47 | # for debugging tests. So we're just going to run it and make sure it 48 | # doesn't throw errors. 49 | with MetricsMock() as mm: 50 | mymetrics = markus.get_metrics("test") 51 | mymetrics.incr("key1") 52 | 53 | mm.print_records() 54 | 55 | def test_clear_records(self): 56 | with MetricsMock() as mm: 57 | mymetrics = markus.get_metrics("test") 58 | mymetrics.incr("key1", value=1, tags=["env:stage"]) 59 | 60 | assert len(mm.get_records()) == 1 61 | 62 | mm.clear_records() 63 | 64 | assert len(mm.get_records()) == 0 65 | 66 | def test_filter_records_fun_name(self): 67 | with MetricsMock() as mm: 68 | mymetrics = markus.get_metrics("test") 69 | mymetrics.incr("key1", value=1, tags=["env:stage"]) 70 | 71 | key1_metrics = mm.filter_records(stat="test.key1", value=1) 72 | assert len(key1_metrics) == 1 73 | 74 | key1_metrics = mm.filter_records(fun_name="incr", stat="test.key1", value=1) 75 | assert len(key1_metrics) == 1 76 | 77 | key1_metrics = mm.filter_records( 78 | fun_name="timing", stat="test.key1", value=1 79 | ) 80 | assert len(key1_metrics) == 0 81 | 82 | def test_filter_records_key(self): 83 | with MetricsMock() as mm: 84 | mymetrics = markus.get_metrics("test") 85 | mymetrics.incr("key1", value=1, tags=["env:stage"]) 86 | 87 | key1_metrics = mm.filter_records(fun_name="incr", value=1) 88 | assert len(key1_metrics) == 1 89 | 90 | key1_metrics = mm.filter_records(fun_name="incr", stat="test.key1", value=1) 91 | assert len(key1_metrics) == 1 92 | 93 | key1_metrics = mm.filter_records(fun_name="incr", stat="test.key1", value=1) 94 | assert len(key1_metrics) == 1 95 | 96 | key1_metrics = mm.filter_records(fun_name="incr", stat="test.key2", value=1) 97 | assert len(key1_metrics) == 0 98 | 99 | def test_filter_records_value(self): 100 | with MetricsMock() as mm: 101 | mymetrics = markus.get_metrics("test") 102 | mymetrics.incr("key1", value=1, tags=["env:stage"]) 103 | 104 | key1_metrics = mm.filter_records(fun_name="incr", stat="test.key1") 105 | assert len(key1_metrics) == 1 106 | 107 | key1_metrics = mm.filter_records(fun_name="incr", stat="test.key1", value=1) 108 | assert len(key1_metrics) == 1 109 | 110 | key1_metrics = mm.filter_records(fun_name="incr", stat="test.key1", value=5) 111 | assert len(key1_metrics) == 0 112 | 113 | def test_filter_records_tags(self): 114 | with MetricsMock() as mm: 115 | mymetrics = markus.get_metrics("test") 116 | mymetrics.incr("key1", value=1, tags=["env:stage"]) 117 | mymetrics.incr("key2", value=3, tags=["env:prod"]) 118 | 119 | key1_metrics = mm.filter_records(tags=["env:stage"]) 120 | assert len(key1_metrics) == 1 121 | assert key1_metrics[0].key == "test.key1" 122 | 123 | key1_metrics = mm.filter_records(tags=["env:prod"]) 124 | assert len(key1_metrics) == 1 125 | assert key1_metrics[0].key == "test.key2" 126 | 127 | key1_metrics = mm.filter_records(tags=["env:dev"]) 128 | assert len(key1_metrics) == 0 129 | 130 | def test_has_record(self): 131 | # NOTE(willkg): .has_record() is implemented using .filter_records() so 132 | # we can test that aggressively and just make sure the .has_record() 133 | # wrapper works fine. 134 | # 135 | # If that ever changes, we should update this test. 136 | with MetricsMock() as mm: 137 | mymetrics = markus.get_metrics("test") 138 | mymetrics.incr("key1", value=1) 139 | 140 | assert mm.has_record(fun_name="incr", stat="test.key1", value=1) 141 | 142 | assert not mm.has_record(fun_name="incr", stat="test.key1", value=5) 143 | 144 | def test_configure_doesnt_affect_override(self): 145 | with MetricsMock() as mm: 146 | markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}]) 147 | mymetrics = markus.get_metrics("test") 148 | mymetrics.incr("key1", value=1) 149 | 150 | assert mm.has_record(fun_name="incr", stat="test.key1", value=1) 151 | 152 | assert not mm.has_record(fun_name="incr", stat="test.key1", value=5) 153 | 154 | def test_incr_helpers(self): 155 | with MetricsMock() as mm: 156 | markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}]) 157 | mymetrics = markus.get_metrics("test") 158 | mymetrics.incr("key1", value=1) 159 | mymetrics.incr("keymultiple", value=1) 160 | mymetrics.incr("keymultiple", value=1) 161 | 162 | mm.assert_incr(stat="test.key1") 163 | 164 | mm.assert_incr_once(stat="test.key1") 165 | with pytest.raises(AssertionError): 166 | mm.assert_incr_once(stat="test.keymultiple") 167 | 168 | mm.assert_not_incr(stat="test.keynot") 169 | mm.assert_not_incr(stat="test.key1", value=5) 170 | with pytest.raises(AssertionError): 171 | mm.assert_not_incr(stat="test.key1") 172 | 173 | def test_gauge_helpers(self): 174 | with MetricsMock() as mm: 175 | markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}]) 176 | mymetrics = markus.get_metrics("test") 177 | mymetrics.gauge("key1", value=5) 178 | mymetrics.gauge("keymultiple", value=5) 179 | mymetrics.gauge("keymultiple", value=5) 180 | 181 | mm.assert_gauge(stat="test.key1") 182 | 183 | mm.assert_gauge_once(stat="test.key1") 184 | with pytest.raises(AssertionError): 185 | mm.assert_gauge_once(stat="test.keymultiple") 186 | 187 | mm.assert_not_gauge(stat="test.keynot") 188 | mm.assert_not_gauge(stat="test.key1", value=10) 189 | with pytest.raises(AssertionError): 190 | mm.assert_not_gauge(stat="test.key1") 191 | 192 | def test_timing_helpers(self): 193 | with MetricsMock() as mm: 194 | markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}]) 195 | mymetrics = markus.get_metrics("test") 196 | mymetrics.timing("key1", value=1) 197 | mymetrics.timing("keymultiple", value=1) 198 | mymetrics.timing("keymultiple", value=1) 199 | 200 | mm.assert_timing(stat="test.key1") 201 | 202 | mm.assert_timing_once(stat="test.key1") 203 | with pytest.raises(AssertionError): 204 | mm.assert_timing_once(stat="test.keymultiple") 205 | 206 | mm.assert_not_timing(stat="test.keynot") 207 | mm.assert_not_timing(stat="test.key1", value=5) 208 | with pytest.raises(AssertionError): 209 | mm.assert_not_timing(stat="test.key1") 210 | 211 | def test_histogram_helpers(self): 212 | with MetricsMock() as mm: 213 | markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}]) 214 | mymetrics = markus.get_metrics("test") 215 | mymetrics.histogram("key1", value=1) 216 | mymetrics.histogram("keymultiple", value=1) 217 | mymetrics.histogram("keymultiple", value=1) 218 | 219 | mm.assert_histogram(stat="test.key1") 220 | 221 | mm.assert_histogram_once(stat="test.key1") 222 | with pytest.raises(AssertionError): 223 | mm.assert_histogram_once(stat="test.keymultiple") 224 | 225 | mm.assert_not_histogram(stat="test.keynot") 226 | mm.assert_not_histogram(stat="test.key1", value=5) 227 | with pytest.raises(AssertionError): 228 | mm.assert_not_histogram(stat="test.key1") 229 | 230 | def test_print_on_failure(self, capsys): 231 | with MetricsMock() as mm: 232 | markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}]) 233 | mymetrics = markus.get_metrics("test") 234 | mymetrics.histogram("keymultiple", value=1) 235 | mymetrics.histogram("keymultiple", value=1) 236 | 237 | with pytest.raises(AssertionError): 238 | mm.assert_histogram_once(stat="test.keymultiple") 239 | 240 | # On assertion error, the assert_* methods will print the metrics 241 | # records to stdout. 242 | captured = capsys.readouterr() 243 | expected = ( 244 | "\n" 245 | "\n" 246 | ) 247 | assert captured.out == expected 248 | -------------------------------------------------------------------------------- /src/markus/testing.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | from copy import copy 6 | import functools 7 | from types import TracebackType 8 | from typing import List, Optional, Type, Union 9 | 10 | from markus import INCR, GAUGE, TIMING, HISTOGRAM # noqa 11 | from markus.main import _override_metrics, MetricsRecord 12 | 13 | 14 | __all__ = ["AnyTagValue", "MetricsMock"] 15 | 16 | 17 | def print_on_failure(fun): 18 | """Decorator to print metrics records on assertion failure.""" 19 | 20 | @functools.wraps(fun) 21 | def _print_on_failure(metricsmock, *args, **kwargs): 22 | try: 23 | return fun(metricsmock, *args, **kwargs) 24 | except Exception: 25 | metricsmock.print_records() 26 | raise 27 | 28 | return _print_on_failure 29 | 30 | 31 | @functools.total_ordering 32 | class AnyTagValue: 33 | """Matches a markus metrics tag with any value 34 | 35 | Example:: 36 | 37 | import markus 38 | 39 | from markus.testing import AnyTagValue, MetricsMock 40 | 41 | 42 | with MetricsMock() as mm: 43 | metrics = get_metrics("test") 44 | 45 | metrics.incr("key1", value=1, tags=["host:12345"]) 46 | 47 | mm.assert_incr(stat="test.key1", tags=[AnyTagValue("host")]) 48 | 49 | """ 50 | 51 | def __init__(self, key: str): 52 | self.key = key 53 | 54 | def __repr__(self): 55 | return f"" 56 | 57 | def get_other_key(self, other: str) -> str: 58 | # This is comparing against a tag string 59 | if ":" in other: 60 | other_key, _ = other.split(":") 61 | else: 62 | other_key = other 63 | return other_key 64 | 65 | def __eq__(self, other: Union[str, "AnyTagValue"]) -> bool: 66 | if isinstance(other, AnyTagValue): 67 | return self.key == other.key 68 | return self.key == self.get_other_key(other) 69 | 70 | def __lt__(self, other: Union[str, "AnyTagValue"]) -> bool: 71 | if isinstance(other, AnyTagValue): 72 | return self.key < other.key 73 | return self.key < self.get_other_key(other) 74 | 75 | 76 | class MetricsMock: 77 | """Mock for recording metrics events and testing them. 78 | 79 | Mimics a metrics backend as a context manager. Keeps records of what got 80 | metricfied so that you can print them out, filter them, assert various 81 | things about them, etc. 82 | 83 | To use:: 84 | 85 | from markus.testing import MetricsMock 86 | 87 | def test_something(): 88 | with MetricsMock() as mm: 89 | # Do things that might record metrics here 90 | 91 | # Assert something about the metrics recorded 92 | mm.assert_incr_once(stat="some.random.key", value=1) 93 | 94 | When using the ``assert_*`` helper methods, if the assertion fails, it'll 95 | print the MetricsRecords that were emitted to stdout. 96 | 97 | """ 98 | 99 | def __init__(self): 100 | self.records = [] 101 | 102 | def emit_to_backend(self, record: MetricsRecord): 103 | self.emit(record) 104 | 105 | def emit(self, record: MetricsRecord): 106 | self.records.append(copy(record)) 107 | 108 | def __enter__(self) -> "MetricsMock": 109 | self.records = [] 110 | _override_metrics([self]) 111 | return self 112 | 113 | def __exit__( 114 | self, 115 | exctype: Optional[Type[BaseException]], 116 | excinst: Optional[BaseException], 117 | exctb: Optional[TracebackType], 118 | ) -> bool: 119 | _override_metrics(None) 120 | 121 | def get_records(self) -> List[MetricsRecord]: 122 | """Return list of MetricsRecord instances. 123 | 124 | This is the list of :py:class:`markus.main.MetricsRecord` instances 125 | that were emitted while this :py:class:`markus.testing.MetricsMock` was 126 | active. 127 | 128 | """ 129 | return self.records 130 | 131 | def filter_records( 132 | self, 133 | fun_name: Optional[str] = None, 134 | stat: Optional[str] = None, 135 | value: Optional[Union[int, float]] = None, 136 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 137 | ) -> List[MetricsRecord]: 138 | """Filter collected metrics records for ones that match specified criteria. 139 | 140 | Filtering is done by ANDing the requirements together. For example:: 141 | 142 | with MetricsMock() as mm: 143 | # Do something that emits metrics 144 | 145 | assert mm.filter_records("incr", stat="some.key", tags=["color:blue"]) 146 | 147 | :py:meth:`markus.testing.MetricsMock.filter_records` will return 148 | :py:class:`markus.main.MetricsRecord` instances that are ``"incr"`` AND 149 | the stat is ``"some.key"`` AND the tags list is ``["color:blue"]``. 150 | 151 | :arg fun_name: "incr", "gauge", "timing", "histogram", or ``None`` 152 | :arg stat: the stat emitted 153 | :arg value: the value 154 | :arg tags: the list of tag strings or ``[]`` or ``None`` 155 | 156 | :returns: list of :py:class:`markus.main.MetricsRecord` instances 157 | 158 | """ 159 | 160 | def match_fun_name(record_fun_name: Optional[str]) -> bool: 161 | return fun_name is None or fun_name == record_fun_name 162 | 163 | def match_stat(record_stat: Optional[str]) -> bool: 164 | return stat is None or stat == record_stat 165 | 166 | def match_value(record_value: Optional[Union[int, float]]) -> bool: 167 | return value is None or value == record_value 168 | 169 | def match_tags(record_tags: Optional[List[Union[str, AnyTagValue]]]) -> bool: 170 | return tags is None or list(sorted(tags)) == list(sorted(record_tags)) 171 | 172 | return [ 173 | record 174 | for record in self.get_records() 175 | if ( 176 | match_fun_name(record.stat_type) 177 | and match_stat(record.key) 178 | and match_value(record.value) 179 | and match_tags(record.tags) 180 | ) 181 | ] 182 | 183 | def has_record( 184 | self, 185 | fun_name: Optional[str] = None, 186 | stat: Optional[str] = None, 187 | value: Optional[Union[int, float]] = None, 188 | tags=None, 189 | ) -> bool: 190 | """Return True/False regarding whether collected metrics match criteria. 191 | 192 | :arg fun_name: "incr", "gauge", "timing", "histogram", or ``None`` 193 | :arg stat: the stat emitted 194 | :arg value: the value 195 | :arg tags: the list of tag strings or ``[]`` or ``None`` 196 | 197 | :returns: bool 198 | 199 | """ 200 | return bool( 201 | self.filter_records(fun_name=fun_name, stat=stat, value=value, tags=tags) 202 | ) 203 | 204 | def print_records(self): 205 | """Print all the collected metrics.""" 206 | for record in self.get_records(): 207 | print(f"{record!r}") 208 | 209 | def clear_records(self): 210 | """Clear the records list.""" 211 | self.records = [] 212 | 213 | @print_on_failure 214 | def assert_incr( 215 | self, 216 | stat: Optional[str], 217 | value: Optional[int] = 1, 218 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 219 | ): 220 | """Asserts an incr was emitted at least once.""" 221 | assert len(self.filter_records(INCR, stat=stat, value=value, tags=tags)) >= 1 222 | 223 | @print_on_failure 224 | def assert_incr_once( 225 | self, 226 | stat: Optional[str], 227 | value: Optional[int] = 1, 228 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 229 | ): 230 | """Asserts an incr was emitted exactly once.""" 231 | assert len(self.filter_records(INCR, stat=stat, value=value, tags=tags)) == 1 232 | 233 | @print_on_failure 234 | def assert_not_incr( 235 | self, 236 | stat: Optional[str], 237 | value: Optional[int] = 1, 238 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 239 | ): 240 | """Asserts an incr was not emitted.""" 241 | assert len(self.filter_records(INCR, stat=stat, value=value, tags=tags)) == 0 242 | 243 | @print_on_failure 244 | def assert_gauge( 245 | self, 246 | stat: Optional[str], 247 | value: Optional[int] = None, 248 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 249 | ): 250 | """Asserts a gauge was emitted at least once.""" 251 | assert len(self.filter_records(GAUGE, stat=stat, value=value, tags=tags)) >= 1 252 | 253 | @print_on_failure 254 | def assert_gauge_once( 255 | self, 256 | stat: Optional[str], 257 | value: Optional[int] = None, 258 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 259 | ): 260 | """Asserts a gauge was emitted exactly once.""" 261 | assert len(self.filter_records(GAUGE, stat=stat, value=value, tags=tags)) == 1 262 | 263 | @print_on_failure 264 | def assert_not_gauge( 265 | self, 266 | stat: Optional[str], 267 | value: Optional[int] = None, 268 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 269 | ): 270 | """Asserts a gauge was not emitted.""" 271 | assert len(self.filter_records(GAUGE, stat=stat, value=value, tags=tags)) == 0 272 | 273 | @print_on_failure 274 | def assert_timing( 275 | self, 276 | stat: Optional[str], 277 | value: Optional[float] = None, 278 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 279 | ): 280 | """Asserts a timing was emitted at least once.""" 281 | assert len(self.filter_records(TIMING, stat=stat, value=value, tags=tags)) >= 1 282 | 283 | @print_on_failure 284 | def assert_timing_once( 285 | self, 286 | stat: Optional[str], 287 | value: Optional[float] = None, 288 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 289 | ): 290 | """Asserts a timing was emitted exactly once.""" 291 | assert len(self.filter_records(TIMING, stat=stat, value=value, tags=tags)) == 1 292 | 293 | @print_on_failure 294 | def assert_not_timing( 295 | self, 296 | stat: Optional[str], 297 | value: Optional[float] = None, 298 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 299 | ): 300 | """Asserts a timing was not emitted.""" 301 | assert len(self.filter_records(TIMING, stat=stat, value=value, tags=tags)) == 0 302 | 303 | @print_on_failure 304 | def assert_histogram( 305 | self, 306 | stat: Optional[str], 307 | value: Optional[float] = None, 308 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 309 | ): 310 | """Asserts a histogram was emitted at least once.""" 311 | assert ( 312 | len(self.filter_records(HISTOGRAM, stat=stat, value=value, tags=tags)) >= 1 313 | ) 314 | 315 | @print_on_failure 316 | def assert_histogram_once( 317 | self, 318 | stat: Optional[str], 319 | value: Optional[float] = None, 320 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 321 | ): 322 | """Asserts a histogram was emitted exactly once.""" 323 | assert ( 324 | len(self.filter_records(HISTOGRAM, stat=stat, value=value, tags=tags)) == 1 325 | ) 326 | 327 | @print_on_failure 328 | def assert_not_histogram( 329 | self, 330 | stat: Optional[str], 331 | value: Optional[float] = None, 332 | tags: Optional[List[Union[str, AnyTagValue]]] = None, 333 | ): 334 | """Asserts a histogram was not emitted.""" 335 | assert ( 336 | len(self.filter_records(HISTOGRAM, stat=stat, value=value, tags=tags)) == 0 337 | ) 338 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License, version 2.0 2 | 3 | 1. Definitions 4 | 5 | 1.1. “Contributor” 6 | 7 | means each individual or legal entity that creates, contributes to the 8 | creation of, or owns Covered Software. 9 | 10 | 1.2. “Contributor Version” 11 | 12 | means the combination of the Contributions of others (if any) used by a 13 | Contributor and that particular Contributor’s Contribution. 14 | 15 | 1.3. “Contribution” 16 | 17 | means Covered Software of a particular Contributor. 18 | 19 | 1.4. “Covered Software” 20 | 21 | means Source Code Form to which the initial Contributor has attached the 22 | notice in Exhibit A, the Executable Form of such Source Code Form, and 23 | Modifications of such Source Code Form, in each case including portions 24 | thereof. 25 | 26 | 1.5. “Incompatible With Secondary Licenses” 27 | means 28 | 29 | a. that the initial Contributor has attached the notice described in 30 | Exhibit B to the Covered Software; or 31 | 32 | b. that the Covered Software was made available under the terms of version 33 | 1.1 or earlier of the License, but not also under the terms of a 34 | Secondary License. 35 | 36 | 1.6. “Executable Form” 37 | 38 | means any form of the work other than Source Code Form. 39 | 40 | 1.7. “Larger Work” 41 | 42 | means a work that combines Covered Software with other material, in a separate 43 | file or files, that is not Covered Software. 44 | 45 | 1.8. “License” 46 | 47 | means this document. 48 | 49 | 1.9. “Licensable” 50 | 51 | means having the right to grant, to the maximum extent possible, whether at the 52 | time of the initial grant or subsequently, any and all of the rights conveyed by 53 | this License. 54 | 55 | 1.10. “Modifications” 56 | 57 | means any of the following: 58 | 59 | a. any file in Source Code Form that results from an addition to, deletion 60 | from, or modification of the contents of Covered Software; or 61 | 62 | b. any new file in Source Code Form that contains any Covered Software. 63 | 64 | 1.11. “Patent Claims” of a Contributor 65 | 66 | means any patent claim(s), including without limitation, method, process, 67 | and apparatus claims, in any patent Licensable by such Contributor that 68 | would be infringed, but for the grant of the License, by the making, 69 | using, selling, offering for sale, having made, import, or transfer of 70 | either its Contributions or its Contributor Version. 71 | 72 | 1.12. “Secondary License” 73 | 74 | means either the GNU General Public License, Version 2.0, the GNU Lesser 75 | General Public License, Version 2.1, the GNU Affero General Public 76 | License, Version 3.0, or any later versions of those licenses. 77 | 78 | 1.13. “Source Code Form” 79 | 80 | means the form of the work preferred for making modifications. 81 | 82 | 1.14. “You” (or “Your”) 83 | 84 | means an individual or a legal entity exercising rights under this 85 | License. For legal entities, “You” includes any entity that controls, is 86 | controlled by, or is under common control with You. For purposes of this 87 | definition, “control” means (a) the power, direct or indirect, to cause 88 | the direction or management of such entity, whether by contract or 89 | otherwise, or (b) ownership of more than fifty percent (50%) of the 90 | outstanding shares or beneficial ownership of such entity. 91 | 92 | 93 | 2. License Grants and Conditions 94 | 95 | 2.1. Grants 96 | 97 | Each Contributor hereby grants You a world-wide, royalty-free, 98 | non-exclusive license: 99 | 100 | a. under intellectual property rights (other than patent or trademark) 101 | Licensable by such Contributor to use, reproduce, make available, 102 | modify, display, perform, distribute, and otherwise exploit its 103 | Contributions, either on an unmodified basis, with Modifications, or as 104 | part of a Larger Work; and 105 | 106 | b. under Patent Claims of such Contributor to make, use, sell, offer for 107 | sale, have made, import, and otherwise transfer either its Contributions 108 | or its Contributor Version. 109 | 110 | 2.2. Effective Date 111 | 112 | The licenses granted in Section 2.1 with respect to any Contribution become 113 | effective for each Contribution on the date the Contributor first distributes 114 | such Contribution. 115 | 116 | 2.3. Limitations on Grant Scope 117 | 118 | The licenses granted in this Section 2 are the only rights granted under this 119 | License. No additional rights or licenses will be implied from the distribution 120 | or licensing of Covered Software under this License. Notwithstanding Section 121 | 2.1(b) above, no patent license is granted by a Contributor: 122 | 123 | a. for any code that a Contributor has removed from Covered Software; or 124 | 125 | b. for infringements caused by: (i) Your and any other third party’s 126 | modifications of Covered Software, or (ii) the combination of its 127 | Contributions with other software (except as part of its Contributor 128 | Version); or 129 | 130 | c. under Patent Claims infringed by Covered Software in the absence of its 131 | Contributions. 132 | 133 | This License does not grant any rights in the trademarks, service marks, or 134 | logos of any Contributor (except as may be necessary to comply with the 135 | notice requirements in Section 3.4). 136 | 137 | 2.4. Subsequent Licenses 138 | 139 | No Contributor makes additional grants as a result of Your choice to 140 | distribute the Covered Software under a subsequent version of this License 141 | (see Section 10.2) or under the terms of a Secondary License (if permitted 142 | under the terms of Section 3.3). 143 | 144 | 2.5. Representation 145 | 146 | Each Contributor represents that the Contributor believes its Contributions 147 | are its original creation(s) or it has sufficient rights to grant the 148 | rights to its Contributions conveyed by this License. 149 | 150 | 2.6. Fair Use 151 | 152 | This License is not intended to limit any rights You have under applicable 153 | copyright doctrines of fair use, fair dealing, or other equivalents. 154 | 155 | 2.7. Conditions 156 | 157 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in 158 | Section 2.1. 159 | 160 | 161 | 3. Responsibilities 162 | 163 | 3.1. Distribution of Source Form 164 | 165 | All distribution of Covered Software in Source Code Form, including any 166 | Modifications that You create or to which You contribute, must be under the 167 | terms of this License. You must inform recipients that the Source Code Form 168 | of the Covered Software is governed by the terms of this License, and how 169 | they can obtain a copy of this License. You may not attempt to alter or 170 | restrict the recipients’ rights in the Source Code Form. 171 | 172 | 3.2. Distribution of Executable Form 173 | 174 | If You distribute Covered Software in Executable Form then: 175 | 176 | a. such Covered Software must also be made available in Source Code Form, 177 | as described in Section 3.1, and You must inform recipients of the 178 | Executable Form how they can obtain a copy of such Source Code Form by 179 | reasonable means in a timely manner, at a charge no more than the cost 180 | of distribution to the recipient; and 181 | 182 | b. You may distribute such Executable Form under the terms of this License, 183 | or sublicense it under different terms, provided that the license for 184 | the Executable Form does not attempt to limit or alter the recipients’ 185 | rights in the Source Code Form under this License. 186 | 187 | 3.3. Distribution of a Larger Work 188 | 189 | You may create and distribute a Larger Work under terms of Your choice, 190 | provided that You also comply with the requirements of this License for the 191 | Covered Software. If the Larger Work is a combination of Covered Software 192 | with a work governed by one or more Secondary Licenses, and the Covered 193 | Software is not Incompatible With Secondary Licenses, this License permits 194 | You to additionally distribute such Covered Software under the terms of 195 | such Secondary License(s), so that the recipient of the Larger Work may, at 196 | their option, further distribute the Covered Software under the terms of 197 | either this License or such Secondary License(s). 198 | 199 | 3.4. Notices 200 | 201 | You may not remove or alter the substance of any license notices (including 202 | copyright notices, patent notices, disclaimers of warranty, or limitations 203 | of liability) contained within the Source Code Form of the Covered 204 | Software, except that You may alter any license notices to the extent 205 | required to remedy known factual inaccuracies. 206 | 207 | 3.5. Application of Additional Terms 208 | 209 | You may choose to offer, and to charge a fee for, warranty, support, 210 | indemnity or liability obligations to one or more recipients of Covered 211 | Software. However, You may do so only on Your own behalf, and not on behalf 212 | of any Contributor. You must make it absolutely clear that any such 213 | warranty, support, indemnity, or liability obligation is offered by You 214 | alone, and You hereby agree to indemnify every Contributor for any 215 | liability incurred by such Contributor as a result of warranty, support, 216 | indemnity or liability terms You offer. You may include additional 217 | disclaimers of warranty and limitations of liability specific to any 218 | jurisdiction. 219 | 220 | 4. Inability to Comply Due to Statute or Regulation 221 | 222 | If it is impossible for You to comply with any of the terms of this License 223 | with respect to some or all of the Covered Software due to statute, judicial 224 | order, or regulation then You must: (a) comply with the terms of this License 225 | to the maximum extent possible; and (b) describe the limitations and the code 226 | they affect. Such description must be placed in a text file included with all 227 | distributions of the Covered Software under this License. Except to the 228 | extent prohibited by statute or regulation, such description must be 229 | sufficiently detailed for a recipient of ordinary skill to be able to 230 | understand it. 231 | 232 | 5. Termination 233 | 234 | 5.1. The rights granted under this License will terminate automatically if You 235 | fail to comply with any of its terms. However, if You become compliant, 236 | then the rights granted under this License from a particular Contributor 237 | are reinstated (a) provisionally, unless and until such Contributor 238 | explicitly and finally terminates Your grants, and (b) on an ongoing basis, 239 | if such Contributor fails to notify You of the non-compliance by some 240 | reasonable means prior to 60 days after You have come back into compliance. 241 | Moreover, Your grants from a particular Contributor are reinstated on an 242 | ongoing basis if such Contributor notifies You of the non-compliance by 243 | some reasonable means, this is the first time You have received notice of 244 | non-compliance with this License from such Contributor, and You become 245 | compliant prior to 30 days after Your receipt of the notice. 246 | 247 | 5.2. If You initiate litigation against any entity by asserting a patent 248 | infringement claim (excluding declaratory judgment actions, counter-claims, 249 | and cross-claims) alleging that a Contributor Version directly or 250 | indirectly infringes any patent, then the rights granted to You by any and 251 | all Contributors for the Covered Software under Section 2.1 of this License 252 | shall terminate. 253 | 254 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user 255 | license agreements (excluding distributors and resellers) which have been 256 | validly granted by You or Your distributors under this License prior to 257 | termination shall survive termination. 258 | 259 | 6. Disclaimer of Warranty 260 | 261 | Covered Software is provided under this License on an “as is” basis, without 262 | warranty of any kind, either expressed, implied, or statutory, including, 263 | without limitation, warranties that the Covered Software is free of defects, 264 | merchantable, fit for a particular purpose or non-infringing. The entire 265 | risk as to the quality and performance of the Covered Software is with You. 266 | Should any Covered Software prove defective in any respect, You (not any 267 | Contributor) assume the cost of any necessary servicing, repair, or 268 | correction. This disclaimer of warranty constitutes an essential part of this 269 | License. No use of any Covered Software is authorized under this License 270 | except under this disclaimer. 271 | 272 | 7. Limitation of Liability 273 | 274 | Under no circumstances and under no legal theory, whether tort (including 275 | negligence), contract, or otherwise, shall any Contributor, or anyone who 276 | distributes Covered Software as permitted above, be liable to You for any 277 | direct, indirect, special, incidental, or consequential damages of any 278 | character including, without limitation, damages for lost profits, loss of 279 | goodwill, work stoppage, computer failure or malfunction, or any and all 280 | other commercial damages or losses, even if such party shall have been 281 | informed of the possibility of such damages. This limitation of liability 282 | shall not apply to liability for death or personal injury resulting from such 283 | party’s negligence to the extent applicable law prohibits such limitation. 284 | Some jurisdictions do not allow the exclusion or limitation of incidental or 285 | consequential damages, so this exclusion and limitation may not apply to You. 286 | 287 | 8. Litigation 288 | 289 | Any litigation relating to this License may be brought only in the courts of 290 | a jurisdiction where the defendant maintains its principal place of business 291 | and such litigation shall be governed by laws of that jurisdiction, without 292 | reference to its conflict-of-law provisions. Nothing in this Section shall 293 | prevent a party’s ability to bring cross-claims or counter-claims. 294 | 295 | 9. Miscellaneous 296 | 297 | This License represents the complete agreement concerning the subject matter 298 | hereof. If any provision of this License is held to be unenforceable, such 299 | provision shall be reformed only to the extent necessary to make it 300 | enforceable. Any law or regulation which provides that the language of a 301 | contract shall be construed against the drafter shall not be used to construe 302 | this License against a Contributor. 303 | 304 | 305 | 10. Versions of the License 306 | 307 | 10.1. New Versions 308 | 309 | Mozilla Foundation is the license steward. Except as provided in Section 310 | 10.3, no one other than the license steward has the right to modify or 311 | publish new versions of this License. Each version will be given a 312 | distinguishing version number. 313 | 314 | 10.2. Effect of New Versions 315 | 316 | You may distribute the Covered Software under the terms of the version of 317 | the License under which You originally received the Covered Software, or 318 | under the terms of any subsequent version published by the license 319 | steward. 320 | 321 | 10.3. Modified Versions 322 | 323 | If you create software not governed by this License, and you want to 324 | create a new license for such software, you may create and use a modified 325 | version of this License if you rename the license and remove any 326 | references to the name of the license steward (except to note that such 327 | modified license differs from this License). 328 | 329 | 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses 330 | If You choose to distribute Source Code Form that is Incompatible With 331 | Secondary Licenses under the terms of this version of the License, the 332 | notice described in Exhibit B of this License must be attached. 333 | 334 | Exhibit A - Source Code Form License Notice 335 | 336 | This Source Code Form is subject to the 337 | terms of the Mozilla Public License, v. 338 | 2.0. If a copy of the MPL was not 339 | distributed with this file, You can 340 | obtain one at 341 | http://mozilla.org/MPL/2.0/. 342 | 343 | If it is not possible or desirable to put the notice in a particular file, then 344 | You may include the notice in a location (such as a LICENSE file in a relevant 345 | directory) where a recipient would be likely to look for such a notice. 346 | 347 | You may add additional accurate notices of copyright ownership. 348 | 349 | Exhibit B - “Incompatible With Secondary Licenses” Notice 350 | 351 | This Source Code Form is “Incompatible 352 | With Secondary Licenses”, as defined by 353 | the Mozilla Public License, v. 2.0. 354 | 355 | -------------------------------------------------------------------------------- /src/markus/main.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import contextlib 6 | from functools import wraps 7 | import logging 8 | import re 9 | import sys 10 | import time 11 | 12 | 13 | NOT_ALPHANUM_RE = re.compile(r"[^a-z0-9_\.]", re.I) 14 | CONSECUTIVE_PERIODS_RE = re.compile(r"\.+") 15 | 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | _override_backends = None 21 | _metrics_backends = [] 22 | 23 | 24 | def _override_metrics(backends): 25 | """Override backends for testing.""" 26 | global _override_backends 27 | _override_backends = backends 28 | 29 | 30 | def _change_metrics(backends): 31 | """Set a new backend.""" 32 | global _metrics_backends 33 | _metrics_backends = backends 34 | 35 | 36 | def _get_metrics_backends(): 37 | return _override_backends or _metrics_backends 38 | 39 | 40 | def split_clspath(clspath): 41 | """Split of clspath into module and class name. 42 | 43 | NOTE(willkg): This is really simple. Maybe we should use something more 44 | sophisticated? 45 | 46 | """ 47 | return clspath.rsplit(".", 1) 48 | 49 | 50 | def configure(backends, raise_errors=False): 51 | """Instantiate and configures backends. 52 | 53 | :arg list-of-dicts backends: the backend configuration as a list of dicts where 54 | each dict specifies a separate backend. 55 | 56 | Each backend dict consists of three things: 57 | 58 | 1. ``class`` with a value that is either a Python class or a dotted 59 | Python path to one 60 | 61 | 2. (optional) ``options`` dict with options for the backend in question 62 | to configure it 63 | 64 | 3. (optional) ``filters`` list of filters to apply to metrics emitted 65 | by this backend 66 | 67 | See the documentation for the backends you're using to know what is 68 | configurable in the options dict. 69 | 70 | :arg raise_errors bool: whether or not to raise an exception if something 71 | happens in configuration; if it doesn't raise an exception, it'll log 72 | the exception 73 | 74 | For example, this sets up a default 75 | :py:class:`markus.backends.logging.LoggingMetrics` backend:: 76 | 77 | import markus 78 | 79 | markus.configure([ 80 | { 81 | "class": "markus.backends.logging.LoggingMetrics", 82 | } 83 | ]) 84 | 85 | This sets up a 86 | :py:class:`markus.backends.logging.LoggingMetrics` backend with 87 | options:: 88 | 89 | import markus 90 | 91 | markus.configure([ 92 | { 93 | "class": "markus.backends.logging.LoggingMetrics", 94 | "options": { 95 | "logger_name": "metrics" 96 | } 97 | } 98 | ]) 99 | 100 | This sets up a :py:class:`markus.backends.logging.LoggingMetrics` 101 | backend that adds a tag to every metric:: 102 | 103 | import markus 104 | from markus.filters import AddTagFilter 105 | 106 | markus.configure([ 107 | { 108 | "class": "markus.backends.logging.LoggingMetrics", 109 | "filters": [AddTagFilter("color:blue")], 110 | } 111 | ]) 112 | 113 | You can set up zero or more backends. 114 | 115 | .. Note:: 116 | 117 | During application startup, Markus should get configured before the app 118 | starts generating metrics. Any metrics generated before Markus is 119 | configured will get dropped. 120 | 121 | However, anything can call :py:func:`markus.get_metrics` and get a 122 | :py:class:`markus.main.MetricsInterface` before Markus has been 123 | configured including at module load time. 124 | 125 | """ 126 | good_backends = [] 127 | 128 | for backend in backends: 129 | clspath = backend["class"] 130 | options = backend.get("options", {}) 131 | filters = backend.get("filters", []) 132 | 133 | if isinstance(clspath, str): 134 | modpath, clsname = split_clspath(clspath) 135 | try: 136 | __import__(modpath) 137 | module = sys.modules[modpath] 138 | cls = getattr(module, clsname) 139 | except Exception: 140 | logger.exception("Exception while importing %s", clspath) 141 | if raise_errors: 142 | raise 143 | continue 144 | else: 145 | cls = clspath 146 | 147 | try: 148 | good_backends.append(cls(options=options, filters=filters)) 149 | except Exception: 150 | logger.exception( 151 | "Exception thrown while instantiating %s, %s", clspath, options 152 | ) 153 | if raise_errors: 154 | raise 155 | 156 | _change_metrics(good_backends) 157 | 158 | 159 | class MetricsRecord: 160 | """Record for a single emitted metric. 161 | 162 | :attribute stat_type: the type of the stat ("incr", "gauge", "timing", 163 | "histogram") 164 | :attribute key: the full key for this record 165 | :attribute value: the value for this record 166 | :attribute tags: list of tag strings 167 | 168 | """ 169 | 170 | def __init__(self, stat_type, key, value, tags): 171 | self.stat_type = stat_type 172 | self.key = key 173 | self.value = value 174 | self.tags = tags or [] 175 | 176 | def __repr__(self): 177 | return ( 178 | f"" 183 | ) 184 | 185 | def __eq__(self, obj): 186 | return ( 187 | isinstance(obj, MetricsRecord) 188 | and obj.stat_type == self.stat_type 189 | and obj.key == self.key 190 | and obj.value == self.value 191 | and obj.tags == self.tags 192 | ) 193 | 194 | def __copy__(self): 195 | # NOTE(willkg): the only attribute that's mutable is tags--the rest 196 | # we can just copy verbatim 197 | return MetricsRecord(self.stat_type, self.key, self.value, list(self.tags)) 198 | 199 | 200 | class MetricsFilter: 201 | """Filter class for augmenting metrics. 202 | 203 | Subclass MetricsFilter to build filters that augment metrics as they're 204 | published. 205 | 206 | """ 207 | 208 | def __repr__(self): 209 | return "" 210 | 211 | def filter(self, record): 212 | """Filter a record 213 | 214 | You can adjust a record, return the record as-is, or return ``None`` 215 | which will drop the record from publishing. 216 | 217 | Records are :py:class:`markus.main.MetricsRecord`. 218 | 219 | """ 220 | return record 221 | 222 | 223 | class MetricsInterface: 224 | """Interface to generating metrics. 225 | 226 | This is the interface to generating metrics. When you call methods on this 227 | instance, it publishes those metrics to the configured backends. 228 | 229 | In this way, code can get a :py:class:`markus.main.MetricsInterface` at any 230 | time even before backends have been configured. Further, backends can be 231 | switched around without affecting existing 232 | :py:class:`markus.main.MetricsInterface` instancess. 233 | 234 | See :py:func:`markus.get_metrics` for generating 235 | :py:class:`markus.main.MetricsInterface` instances. 236 | 237 | """ 238 | 239 | def __init__(self, prefix, filters=None): 240 | """Create a MetricsInterface. 241 | 242 | :arg str prefix: Use alphanumeric characters and underscore and period. 243 | Anything else gets converted to a period. Sequences of periods get 244 | collapsed to a single period. 245 | 246 | The prefix is prepended to all keys emitted by this metrics 247 | interface. 248 | 249 | :arg list of MetricsFilter filters: list of filters to apply to 250 | records being emitted 251 | 252 | """ 253 | # Convert all bad characters to . 254 | prefix = NOT_ALPHANUM_RE.sub(".", prefix) 255 | # Collapse sequences of . to a single . 256 | prefix = CONSECUTIVE_PERIODS_RE.sub(".", prefix) 257 | # Remove . at beginning and end 258 | self.prefix = prefix.strip(".") 259 | 260 | self.filters = filters or [] 261 | 262 | def __repr__(self): 263 | return "" % (self.prefix, repr(self.filters)) 264 | 265 | def _full_stat(self, stat): 266 | if self.prefix: 267 | return self.prefix + "." + stat 268 | else: 269 | return stat 270 | 271 | def _publish(self, record): 272 | """Publish a record to backends. 273 | 274 | If one of the filters rejects the record, then the record does not get 275 | published. 276 | 277 | """ 278 | # First run filters configured on the MetricsInterface 279 | for metrics_filter in self.filters: 280 | record = metrics_filter.filter(record) 281 | if record is None: 282 | return 283 | 284 | for backend in _get_metrics_backends(): 285 | # Copy the record so filtering in one backend doesn't affect other 286 | # backends 287 | fresh_record = record.__copy__() 288 | backend.emit_to_backend(fresh_record) 289 | 290 | def extend_prefix(self, prefix): 291 | """Returns a duplicate MetricsInterface with prefix extended 292 | 293 | :arg prefix: the prefix to append to the end of the existing prefix 294 | 295 | :returns: a new MetricsInterface with adjusted prefix 296 | 297 | Example:: 298 | 299 | import markus 300 | 301 | metrics = markus.get_metrics("key1") 302 | metrics.incr("stat1") # key1.stat1 303 | 304 | sub_metrics = metrics.with_prefix("key2") 305 | sub_metrics.incr("stat1") # key1.key2.stat1 306 | 307 | """ 308 | prefix = prefix.strip(".") 309 | 310 | return MetricsInterface( 311 | f"{self.prefix}.{prefix}", 312 | filters=list(self.filters), 313 | ) 314 | 315 | def incr(self, stat, value=1, tags=None): 316 | """Incr is used for counting things. 317 | 318 | :arg string stat: A period delimited alphanumeric key. 319 | 320 | :arg int value: A value to increment the count by. Usually this is 1. 321 | 322 | :arg list-of-strings tags: Each string in the tag consists of a key and 323 | a value separated by a colon. Tags can make it easier to break down 324 | metrics for analysis. 325 | 326 | For example ``["env:stage", "compressed:yes"]``. 327 | 328 | To pass no tags, either pass an empty list or ``None``. 329 | 330 | For example: 331 | 332 | >>> import markus 333 | >>> metrics = markus.get_metrics("foo") 334 | >>> def chop_vegetable(kind): 335 | ... # chop chop chop 336 | ... metrics.incr("vegetable", value=1) 337 | 338 | You can also use incr to decrement by passing a negative value. 339 | 340 | """ 341 | self._publish( 342 | MetricsRecord( 343 | stat_type="incr", key=self._full_stat(stat), value=value, tags=tags 344 | ) 345 | ) 346 | 347 | def gauge(self, stat, value, tags=None): 348 | """Gauges are used for measuring things. 349 | 350 | :arg string stat: A period delimited alphanumeric key. 351 | 352 | :arg int value: The measured value of the thing being measured. 353 | 354 | :arg list-of-strings tags: Each string in the tag consists of a key and 355 | a value separated by a colon. Tags can make it easier to break down 356 | metrics for analysis. 357 | 358 | For example ``["env:stage", "compressed:yes"]``. 359 | 360 | To pass no tags, either pass an empty list or ``None``. 361 | 362 | For example: 363 | 364 | >>> import markus 365 | >>> metrics = markus.get_metrics("foo") 366 | >>> def parse_payload(payload): 367 | ... metrics.gauge("payload_size", value=len(payload)) 368 | ... # parse parse parse 369 | 370 | """ 371 | self._publish( 372 | MetricsRecord( 373 | stat_type="gauge", key=self._full_stat(stat), value=value, tags=tags 374 | ) 375 | ) 376 | 377 | def timing(self, stat, value, tags=None): 378 | """Record a timing value. 379 | 380 | Record the length of time of something to be added to a set of values from 381 | which a statistical distribution is derived. 382 | 383 | Depending on the backend, you might end up with count, average, median, 384 | 95% and max for a set of timing values. 385 | 386 | This is useful for analyzing how long things take to occur. For 387 | example, how long it takes for a function to run, to upload files, or 388 | for a database query to execute. 389 | 390 | :arg string stat: A period delimited alphanumeric key. 391 | 392 | :arg int value: A timing in milliseconds. 393 | 394 | :arg list-of-strings tags: Each string in the tag consists of a key and 395 | a value separated by a colon. Tags can make it easier to break down 396 | metrics for analysis. 397 | 398 | For example ``["env:stage", "compressed:yes"]``. 399 | 400 | To pass no tags, either pass an empty list or ``None``. 401 | 402 | For example: 403 | 404 | >>> import time 405 | >>> import markus 406 | >>> metrics = markus.get_metrics("foo") 407 | >>> def upload_file(payload): 408 | ... start_time = time.perf_counter() # this is in seconds 409 | ... # upload the file 410 | ... timing = (time.perf_counter() - start_time) * 1000.0 # convert to ms 411 | ... metrics.timing("upload_file_time", value=timing) 412 | 413 | .. Note:: 414 | 415 | If you're timing a function or a block of code, it's probably more 416 | convenient to use :py:meth:`markus.main.MetricsInterface.timer` or 417 | :py:meth:`markus.main.MetricsInterface.timer_decorator`. 418 | 419 | """ 420 | self._publish( 421 | MetricsRecord( 422 | stat_type="timing", key=self._full_stat(stat), value=value, tags=tags 423 | ) 424 | ) 425 | 426 | def histogram(self, stat, value, tags=None): 427 | """Record a histogram value. 428 | 429 | Record a value to be added to a set of values from which a statistical 430 | distribution is derived. 431 | 432 | Depending on the backend, you might end up with count, average, median, 433 | 95% and max for a set of values. 434 | 435 | This is useful for analyzing distributions of values. For example, 436 | what's the median and 95% upload file size? What's the most expensive 437 | thing sold? 438 | 439 | :arg string stat: A period delimited alphanumeric key. 440 | 441 | :arg int value: The value of the thing. 442 | 443 | :arg list-of-strings tags: Each string in the tag consists of a key and 444 | a value separated by a colon. Tags can make it easier to break down 445 | metrics for analysis. 446 | 447 | For example ``["env:stage", "compressed:yes"]``. 448 | 449 | To pass no tags, either pass an empty list or ``None``. 450 | 451 | For example: 452 | 453 | >>> import time 454 | >>> import markus 455 | >>> metrics = markus.get_metrics("foo") 456 | >>> def finalize_sale(cart): 457 | ... for item in cart: 458 | ... metrics.histogram("item_cost", value=item.cost) 459 | ... # finish finalizing 460 | 461 | .. Note:: 462 | 463 | For metrics backends that don't have histogram, this will do the 464 | same as timing. 465 | 466 | """ 467 | self._publish( 468 | MetricsRecord( 469 | stat_type="histogram", key=self._full_stat(stat), value=value, tags=tags 470 | ) 471 | ) 472 | 473 | @contextlib.contextmanager 474 | def timer(self, stat, tags=None): 475 | """Contextmanager for easily computing timings. 476 | 477 | :arg string stat: A period delimited alphanumeric key. 478 | 479 | :arg list-of-strings tags: Each string in the tag consists of a key and 480 | a value separated by a colon. Tags can make it easier to break down 481 | metrics for analysis. 482 | 483 | For example ``["env:stage", "compressed:yes"]``. 484 | 485 | To pass no tags, either pass an empty list or ``None``. 486 | 487 | For example: 488 | 489 | >>> mymetrics = get_metrics(__name__) 490 | 491 | >>> def long_function(): 492 | ... with mymetrics.timer('long_function'): 493 | ... # perform some thing we want to keep metrics on 494 | ... pass 495 | 496 | 497 | .. Note:: 498 | 499 | All timings generated with this are in milliseconds. 500 | 501 | """ 502 | start_time = time.perf_counter() 503 | 504 | yield 505 | 506 | end_time = time.perf_counter() 507 | 508 | delta = end_time - start_time 509 | self.timing(stat, value=delta * 1000.0, tags=tags) 510 | 511 | def timer_decorator(self, stat, tags=None): 512 | """Timer decorator for easily computing timings. 513 | 514 | :arg string stat: A period delimited alphanumeric key. 515 | 516 | :arg list-of-strings tags: Each string in the tag consists of a key and 517 | a value separated by a colon. Tags can make it easier to break down 518 | metrics for analysis. 519 | 520 | For example ``["env:stage", "compressed:yes"]``. 521 | 522 | To pass no tags, either pass an empty list or ``None``. 523 | 524 | For example: 525 | 526 | >>> mymetrics = get_metrics(__name__) 527 | >>> @mymetrics.timer_decorator("long_function") 528 | ... def long_function(): 529 | ... # perform some thing we want to keep metrics on 530 | ... pass 531 | 532 | 533 | .. Note:: 534 | 535 | All timings generated with this are in milliseconds. 536 | 537 | """ 538 | 539 | def _inner(fun): 540 | @wraps(fun) 541 | def _timer_decorator(*args, **kwargs): 542 | with self.timer(stat, tags): 543 | return fun(*args, **kwargs) 544 | 545 | return _timer_decorator 546 | 547 | return _inner 548 | 549 | 550 | def get_metrics(thing="", extra="", filters=None): 551 | """Return MetricsInterface instance with specified prefix. 552 | 553 | The prefix is prepended to all keys emitted with this 554 | :py:class:`markus.main.MetricsInterface`. 555 | 556 | The :py:class:`markus.main.MetricsInterface` is not tied to metrics 557 | backends. The list of active backends are globally configured. This allows 558 | us to create :py:class:`markus.main.MetricsInterface` classes without 559 | having to worry about bootstrapping order of the app. 560 | 561 | :arg class/instance/str thing: The prefix to use for keys generated 562 | with this :py:class:`markus.main.MetricsInterface`. 563 | 564 | If this is a string, then it uses it as a prefix. 565 | 566 | If this is a class, it uses the dotted Python path. 567 | 568 | If this is an instance, it uses the dotted Python path plus 569 | ``str(instance)``. 570 | 571 | :arg str extra: Any extra bits to add to the end of the prefix. 572 | 573 | :arg list of MetricsFilters filters: any filters to apply to all metrics 574 | generated using this MetricsInterface 575 | 576 | :returns: a ``MetricsInterface`` instance 577 | 578 | Examples: 579 | 580 | >>> from markus import get_metrics 581 | 582 | Create a MetricsInterface with the prefix "myapp" and generate a count with 583 | stat "myapp.thing1" and value 1: 584 | 585 | >>> metrics = get_metrics("myapp") 586 | >>> metrics.incr("thing1", value=1) 587 | 588 | Create a MetricsInterface with the prefix of the Python module it's being 589 | called in: 590 | 591 | >>> metrics = get_metrics(__name__) 592 | 593 | Create a MetricsInterface with the prefix as the qualname of the class: 594 | 595 | >>> class Foo: 596 | ... def __init__(self): 597 | ... self.metrics = get_metrics(self) 598 | 599 | Create a prefix of the class path plus some identifying information: 600 | 601 | >>> class Foo: 602 | ... def __init__(self, myprefix): 603 | ... self.metrics = get_metrics(self, extra=myprefix) 604 | ... 605 | >>> foo = Foo("jim") 606 | 607 | Assume that ``Foo`` is defined in the ``myapp`` module. Then this will 608 | generate the prefix ``myapp.Foo.jim``. 609 | 610 | Create a MetricsFilter and add it to the metrics interface: 611 | 612 | >>> from markus.main import MetricsFilter 613 | >>> class BlueTagFilter(MetricsFilter): 614 | ... def filter(self, record): 615 | ... record.tags.append('color:blue') 616 | ... return record 617 | ... 618 | >>> metrics = get_metrics('foo', filters=[BlueTagFilter()]) 619 | 620 | """ 621 | thing = thing or "" 622 | 623 | if not isinstance(thing, str): 624 | # If it's not a str, it's either a class or an instance. Handle 625 | # accordingly. 626 | if type(thing) is type: 627 | thing = "%s.%s" % (thing.__module__, thing.__name__) 628 | else: 629 | thing = "%s.%s" % (thing.__class__.__module__, thing.__class__.__name__) 630 | 631 | if extra: 632 | thing = "%s.%s" % (thing, extra) 633 | 634 | return MetricsInterface(thing, filters=filters) 635 | --------------------------------------------------------------------------------