├── tests ├── __init__.py ├── proc │ ├── 584 │ │ └── stat │ ├── 26231 │ │ ├── fd │ │ │ ├── 0 │ │ │ ├── 1 │ │ │ ├── 2 │ │ │ ├── 3 │ │ │ └── 4 │ │ ├── stat │ │ └── limits │ └── stat ├── test_twisted.py ├── test_graphite_bridge.py ├── test_process_collector.py ├── test_parser.py ├── test_exposition.py └── test_core.py ├── prometheus_client ├── bridge │ ├── __init__.py │ └── graphite.py ├── twisted │ ├── __init__.py │ └── _exposition.py ├── __init__.py ├── process_collector.py ├── exposition.py ├── parser.py └── core.py ├── .gitignore ├── NOTICE ├── AUTHORS.md ├── CONTRIBUTING.md ├── setup.py ├── LICENSE └── README.md /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/proc/26231/fd/0: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/proc/26231/fd/1: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/proc/26231/fd/2: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/proc/26231/fd/3: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/proc/26231/fd/4: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /prometheus_client/bridge/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | dist 3 | *.egg-info 4 | *.pyc 5 | *.swp 6 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Prometheus instrumentation library for Python applications 2 | Copyright 2015 The Prometheus Authors 3 | -------------------------------------------------------------------------------- /prometheus_client/twisted/__init__.py: -------------------------------------------------------------------------------- 1 | from ._exposition import MetricsResource 2 | 3 | __all__ = ['MetricsResource'] 4 | -------------------------------------------------------------------------------- /tests/proc/584/stat: -------------------------------------------------------------------------------- 1 | 1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 2 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | Maintainers of this repository: 2 | 3 | * Brian Brazil 4 | 5 | The following individuals have contributed code to this repository 6 | (listed in alphabetical order): 7 | 8 | * Andrea Fagan 9 | * Brian Brazil 10 | * Paul Logston 11 | 12 | -------------------------------------------------------------------------------- /tests/proc/26231/stat: -------------------------------------------------------------------------------- 1 | 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 2 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Prometheus uses GitHub to manage reviews of pull requests. 4 | 5 | * If you have a trivial fix or improvement, go ahead and create a pull 6 | request, addressing (with `@...`) one or more of the maintainers 7 | (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. 8 | 9 | * If you plan to do something more involved, first discuss your ideas 10 | on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). 11 | This will avoid unnecessary work and surely give you and us a good deal 12 | of inspiration. 13 | -------------------------------------------------------------------------------- /prometheus_client/twisted/_exposition.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, unicode_literals 2 | from .. import REGISTRY, generate_latest, CONTENT_TYPE_LATEST 3 | 4 | from twisted.web.resource import Resource 5 | 6 | 7 | class MetricsResource(Resource): 8 | """ 9 | Twisted ``Resource`` that serves prometheus metrics. 10 | """ 11 | isLeaf = True 12 | 13 | def __init__(self, registry=REGISTRY): 14 | self.registry = registry 15 | 16 | def render_GET(self, request): 17 | request.setHeader(b'Content-Type', CONTENT_TYPE_LATEST.encode('ascii')) 18 | return generate_latest(self.registry) 19 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup 3 | 4 | setup( 5 | name = "prometheus_client", 6 | version = "0.0.13", 7 | author = "Brian Brazil", 8 | author_email = "brian.brazil@robustperception.io", 9 | description = ("Python client for the Prometheus monitoring system."), 10 | long_description = ("See https://github.com/prometheus/client_python/blob/master/README.md for documentation."), 11 | license = "Apache Software License 2.0", 12 | keywords = "prometheus monitoring instrumentation client", 13 | url = "https://github.com/prometheus/client_python", 14 | packages=['prometheus_client', 'prometheus_client.bridge', 'prometheus_client.twisted'], 15 | extras_requires={ 16 | 'twisted': ['twisted'], 17 | }, 18 | test_suite="tests", 19 | classifiers=[ 20 | "Development Status :: 4 - Beta", 21 | "Intended Audience :: Developers", 22 | "Intended Audience :: Information Technology", 23 | "Intended Audience :: System Administrators", 24 | "Topic :: System :: Monitoring", 25 | "License :: OSI Approved :: Apache Software License", 26 | ], 27 | ) 28 | -------------------------------------------------------------------------------- /tests/proc/26231/limits: -------------------------------------------------------------------------------- 1 | Limit Soft Limit Hard Limit Units 2 | Max cpu time unlimited unlimited seconds 3 | Max file size unlimited unlimited bytes 4 | Max data size unlimited unlimited bytes 5 | Max stack size 8388608 unlimited bytes 6 | Max core file size 0 unlimited bytes 7 | Max resident set unlimited unlimited bytes 8 | Max processes 62898 62898 processes 9 | Max open files 2048 4096 files 10 | Max locked memory 65536 65536 bytes 11 | Max address space unlimited unlimited bytes 12 | Max file locks unlimited unlimited locks 13 | Max pending signals 62898 62898 signals 14 | Max msgqueue size 819200 819200 bytes 15 | Max nice priority 0 0 16 | -------------------------------------------------------------------------------- /prometheus_client/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from . import core 4 | from . import exposition 5 | from . import process_collector 6 | 7 | __all__ = ['Counter', 'Gauge', 'Summary', 'Histogram'] 8 | # http://stackoverflow.com/questions/19913653/no-unicode-in-all-for-a-packages-init 9 | __all__ = [n.encode('ascii') for n in __all__] 10 | 11 | CollectorRegistry = core.CollectorRegistry 12 | REGISTRY = core.REGISTRY 13 | Metric = core.Metric 14 | Counter = core.Counter 15 | Gauge = core.Gauge 16 | Summary = core.Summary 17 | Histogram = core.Histogram 18 | 19 | CONTENT_TYPE_LATEST = exposition.CONTENT_TYPE_LATEST 20 | generate_latest = exposition.generate_latest 21 | MetricsHandler = exposition.MetricsHandler 22 | make_wsgi_app = exposition.make_wsgi_app 23 | start_http_server = exposition.start_http_server 24 | start_wsgi_server = exposition.start_wsgi_server 25 | write_to_textfile = exposition.write_to_textfile 26 | push_to_gateway = exposition.push_to_gateway 27 | pushadd_to_gateway = exposition.pushadd_to_gateway 28 | delete_from_gateway = exposition.delete_from_gateway 29 | instance_ip_grouping_key = exposition.instance_ip_grouping_key 30 | 31 | ProcessCollector = process_collector.ProcessCollector 32 | PROCESS_COLLECTOR = process_collector.PROCESS_COLLECTOR 33 | 34 | 35 | if __name__ == '__main__': 36 | c = Counter('cc', 'A counter') 37 | c.inc() 38 | 39 | g = Gauge('gg', 'A gauge') 40 | g.set(17) 41 | 42 | s = Summary('ss', 'A summary', ['a', 'b']) 43 | s.labels('c', 'd').observe(17) 44 | 45 | h = Histogram('hh', 'A histogram') 46 | h.observe(.6) 47 | 48 | start_http_server(8000) 49 | import time 50 | while True: 51 | time.sleep(1) 52 | -------------------------------------------------------------------------------- /tests/test_twisted.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, unicode_literals 2 | 3 | from unittest import skipUnless 4 | 5 | from prometheus_client import Counter 6 | from prometheus_client import CollectorRegistry, generate_latest 7 | 8 | try: 9 | from prometheus_client.twisted import MetricsResource 10 | 11 | from twisted.trial.unittest import TestCase 12 | from twisted.web.server import Site 13 | from twisted.web.resource import Resource 14 | from twisted.internet import reactor 15 | from twisted.web.client import Agent 16 | from twisted.web.client import readBody 17 | HAVE_TWISTED = True 18 | except ImportError: 19 | from unittest import TestCase 20 | HAVE_TWISTED = False 21 | 22 | 23 | class MetricsResourceTest(TestCase): 24 | @skipUnless(HAVE_TWISTED, "Don't have twisted installed.") 25 | def setUp(self): 26 | self.registry = CollectorRegistry() 27 | 28 | def test_reports_metrics(self): 29 | """ 30 | ``MetricsResource`` serves the metrics from the provided registry. 31 | """ 32 | c = Counter('cc', 'A counter', registry=self.registry) 33 | c.inc() 34 | 35 | root = Resource() 36 | root.putChild(b'metrics', MetricsResource(registry=self.registry)) 37 | server = reactor.listenTCP(0, Site(root)) 38 | self.addCleanup(server.stopListening) 39 | 40 | agent = Agent(reactor) 41 | port = server.getHost().port 42 | url = "http://localhost:{port}/metrics".format(port=port) 43 | d = agent.request(b"GET", url.encode("ascii")) 44 | 45 | d.addCallback(readBody) 46 | d.addCallback(self.assertEqual, generate_latest(self.registry)) 47 | 48 | return d 49 | -------------------------------------------------------------------------------- /tests/proc/stat: -------------------------------------------------------------------------------- 1 | cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 2 | cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 3 | cpu1 47869 23 16474 1110787 591 0 46 0 0 0 4 | cpu2 46504 36 15916 1112321 441 0 326 0 0 0 5 | cpu3 47054 102 15683 1113230 533 0 60 0 0 0 6 | cpu4 28413 25 10776 1140321 217 0 8 0 0 0 7 | cpu5 29271 101 11586 1136270 672 0 30 0 0 0 8 | cpu6 29152 36 10276 1139721 319 0 29 0 0 0 9 | cpu7 29098 268 10164 1139282 555 0 31 0 0 0 10 | intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 | ctxt 38014093 12 | btime 1418183276 13 | processes 26442 14 | procs_running 2 15 | procs_blocked 0 16 | softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 17 | -------------------------------------------------------------------------------- /tests/test_graphite_bridge.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import threading 3 | try: 4 | import SocketServer 5 | except ImportError: 6 | import socketserver as SocketServer 7 | 8 | from prometheus_client import Counter, CollectorRegistry 9 | from prometheus_client.bridge.graphite import GraphiteBridge 10 | 11 | class FakeTime(object): 12 | def time(self): 13 | return 1434898897.5 14 | 15 | class TestGraphiteBridge(unittest.TestCase): 16 | def setUp(self): 17 | self.registry = CollectorRegistry() 18 | 19 | self.data = '' 20 | class TCPHandler(SocketServer.BaseRequestHandler): 21 | def handle(s): 22 | self.data = s.request.recv(1024) 23 | server = SocketServer.TCPServer(('', 0), TCPHandler) 24 | class ServingThread(threading.Thread): 25 | def run(self): 26 | server.handle_request() 27 | server.socket.close() 28 | self.t = ServingThread() 29 | self.t.start() 30 | 31 | # Explicitly use localhost as the target host, since connecting to 0.0.0.0 fails on Windows 32 | address = ('localhost', server.server_address[1]) 33 | self.gb = GraphiteBridge(address, self.registry, _time=FakeTime()) 34 | 35 | def test_nolabels(self): 36 | counter = Counter('c', 'help', registry=self.registry) 37 | counter.inc() 38 | 39 | self.gb.push() 40 | self.t.join() 41 | 42 | self.assertEqual(b'c 1.0 1434898897\n', self.data) 43 | 44 | def test_labels(self): 45 | labels = Counter('labels', 'help', ['a', 'b'], registry=self.registry) 46 | labels.labels('c', 'd').inc() 47 | 48 | self.gb.push() 49 | self.t.join() 50 | 51 | self.assertEqual(b'labels.a.c.b.d 1.0 1434898897\n', self.data) 52 | 53 | def test_prefix(self): 54 | labels = Counter('labels', 'help', ['a', 'b'], registry=self.registry) 55 | labels.labels('c', 'd').inc() 56 | 57 | self.gb.push(prefix = 'pre.fix') 58 | self.t.join() 59 | 60 | self.assertEqual(b'pre.fix.labels.a.c.b.d 1.0 1434898897\n', self.data) 61 | 62 | def test_sanitizing(self): 63 | labels = Counter('labels', 'help', ['a'], registry=self.registry) 64 | labels.labels('c.:8').inc() 65 | 66 | self.gb.push() 67 | self.t.join() 68 | 69 | self.assertEqual(b'labels.a.c__8 1.0 1434898897\n', self.data) 70 | -------------------------------------------------------------------------------- /prometheus_client/bridge/graphite.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | from __future__ import unicode_literals 3 | 4 | import logging 5 | import re 6 | import socket 7 | import time 8 | import threading 9 | 10 | from .. import core 11 | 12 | # Roughly, have to keep to what works as a file name. 13 | # We also remove periods, so labels can be distinguished. 14 | _INVALID_GRAPHITE_CHARS = re.compile(r"[^a-zA-Z0-9_-]") 15 | 16 | 17 | def _sanitize(s): 18 | return _INVALID_GRAPHITE_CHARS.sub('_', s) 19 | 20 | 21 | class _RegularPush(threading.Thread): 22 | def __init__(self, pusher, interval, prefix): 23 | super(_RegularPush, self).__init__() 24 | self._pusher = pusher 25 | self._interval = interval 26 | self._prefix = prefix 27 | 28 | def run(self): 29 | wait_until = time.time() 30 | while True: 31 | while True: 32 | now = time.time() 33 | if now >= wait_until: 34 | # May need to skip some pushes. 35 | while wait_until < now: 36 | wait_until += self._interval 37 | break 38 | # time.sleep can return early. 39 | time.sleep(wait_until - now) 40 | try: 41 | self._pusher.push(prefix=self._prefix) 42 | except IOError: 43 | logging.exception("Push failed") 44 | 45 | 46 | class GraphiteBridge(object): 47 | def __init__(self, address, registry=core.REGISTRY, timeout_seconds=30, _time=time): 48 | self._address = address 49 | self._registry = registry 50 | self._timeout = timeout_seconds 51 | self._time = _time 52 | 53 | def push(self, prefix=''): 54 | now = int(self._time.time()) 55 | output = [] 56 | 57 | prefixstr = '' 58 | if prefix: 59 | prefixstr = prefix + '.' 60 | 61 | for metric in self._registry.collect(): 62 | for name, labels, value in metric.samples: 63 | if labels: 64 | labelstr = '.' + '.'.join( 65 | ['{0}.{1}'.format( 66 | _sanitize(k), _sanitize(v)) 67 | for k, v in sorted(labels.items())]) 68 | else: 69 | labelstr = '' 70 | output.append('{0}{1}{2} {3} {4}\n'.format( 71 | prefixstr, _sanitize(name), labelstr, float(value), now)) 72 | 73 | conn = socket.create_connection(self._address, self._timeout) 74 | conn.sendall(''.join(output).encode('ascii')) 75 | conn.close() 76 | 77 | def start(self, interval=60.0, prefix=''): 78 | t = _RegularPush(self, interval, prefix) 79 | t.daemon = True 80 | t.start() 81 | -------------------------------------------------------------------------------- /prometheus_client/process_collector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import unicode_literals 4 | 5 | import os 6 | import time 7 | import threading 8 | 9 | from . import core 10 | try: 11 | import resource 12 | _PAGESIZE = resource.getpagesize() 13 | except ImportError: 14 | # Not Unix 15 | _PAGESIZE = 4096 16 | 17 | 18 | class ProcessCollector(object): 19 | """Collector for Standard Exports such as cpu and memory.""" 20 | def __init__(self, namespace='', pid=lambda: 'self', proc='/proc', registry=core.REGISTRY): 21 | self._namespace = namespace 22 | self._pid = pid 23 | self._proc = proc 24 | if namespace: 25 | self._prefix = namespace + '_process_' 26 | else: 27 | self._prefix = 'process_' 28 | self._ticks = 100.0 29 | try: 30 | self._ticks = os.sysconf('SC_CLK_TCK') 31 | except (ValueError, TypeError, AttributeError): 32 | pass 33 | 34 | # This is used to test if we can access /proc. 35 | self._btime = 0 36 | try: 37 | self._btime = self._boot_time() 38 | except IOError: 39 | pass 40 | if registry: 41 | registry.register(self) 42 | 43 | def _boot_time(self): 44 | with open(os.path.join(self._proc, 'stat')) as stat: 45 | for line in stat: 46 | if line.startswith('btime '): 47 | return float(line.split()[1]) 48 | 49 | def collect(self): 50 | if not self._btime: 51 | return [] 52 | 53 | try: 54 | pid = os.path.join(self._proc, str(self._pid()).strip()) 55 | except: 56 | # File likely didn't exist, fail silently. 57 | raise 58 | return [] 59 | 60 | result = [] 61 | try: 62 | with open(os.path.join(pid, 'stat')) as stat: 63 | parts = (stat.read().split(')')[-1].split()) 64 | vmem = core.GaugeMetricFamily(self._prefix + 'virtual_memory_bytes', 65 | 'Virtual memory size in bytes.', value=float(parts[20])) 66 | rss = core.GaugeMetricFamily(self._prefix + 'resident_memory_bytes', 'Resident memory size in bytes.', value=float(parts[21]) * _PAGESIZE) 67 | start_time_secs = float(parts[19]) / self._ticks 68 | start_time = core.GaugeMetricFamily(self._prefix + 'start_time_seconds', 69 | 'Start time of the process since unix epoch in seconds.', value=start_time_secs + self._btime) 70 | utime = float(parts[11]) / self._ticks 71 | stime = float(parts[12]) / self._ticks 72 | cpu = core.CounterMetricFamily(self._prefix + 'cpu_seconds_total', 73 | 'Total user and system CPU time spent in seconds.', value=utime + stime) 74 | result.extend([vmem, rss, start_time, cpu]) 75 | except IOError: 76 | pass 77 | 78 | try: 79 | with open(os.path.join(pid, 'limits')) as limits: 80 | for line in limits: 81 | if line.startswith('Max open file'): 82 | max_fds = core.GaugeMetricFamily(self._prefix + 'max_fds', 83 | 'Maximum number of open file descriptors.', value=float(line.split()[3])) 84 | break 85 | open_fds = core.GaugeMetricFamily(self._prefix + 'open_fds', 86 | 'Number of open file descriptors.', len(os.listdir(os.path.join(pid, 'fd')))) 87 | result.extend([open_fds, max_fds]) 88 | except IOError: 89 | pass 90 | 91 | return result 92 | 93 | 94 | PROCESS_COLLECTOR = ProcessCollector() 95 | """Default ProcessCollector in default Registry REGISTRY.""" 96 | -------------------------------------------------------------------------------- /tests/test_process_collector.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | import os 3 | import unittest 4 | 5 | 6 | from prometheus_client import CollectorRegistry, ProcessCollector 7 | 8 | class TestProcessCollector(unittest.TestCase): 9 | def setUp(self): 10 | self.registry = CollectorRegistry() 11 | self.test_proc = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'proc') 12 | 13 | def test_working(self): 14 | collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry) 15 | collector._ticks = 100 16 | 17 | self.assertEqual(17.21, self.registry.get_sample_value('process_cpu_seconds_total')) 18 | self.assertEqual(56274944.0, self.registry.get_sample_value('process_virtual_memory_bytes')) 19 | self.assertEqual(8114176, self.registry.get_sample_value('process_resident_memory_bytes')) 20 | self.assertEqual(1418184099.75, self.registry.get_sample_value('process_start_time_seconds')) 21 | self.assertEqual(2048.0, self.registry.get_sample_value('process_max_fds')) 22 | self.assertEqual(5.0, self.registry.get_sample_value('process_open_fds')) 23 | self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace')) 24 | 25 | def test_namespace(self): 26 | collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry, namespace='n') 27 | collector._ticks = 100 28 | 29 | self.assertEqual(17.21, self.registry.get_sample_value('n_process_cpu_seconds_total')) 30 | self.assertEqual(56274944.0, self.registry.get_sample_value('n_process_virtual_memory_bytes')) 31 | self.assertEqual(8114176, self.registry.get_sample_value('n_process_resident_memory_bytes')) 32 | self.assertEqual(1418184099.75, self.registry.get_sample_value('n_process_start_time_seconds')) 33 | self.assertEqual(2048.0, self.registry.get_sample_value('n_process_max_fds')) 34 | self.assertEqual(5.0, self.registry.get_sample_value('n_process_open_fds')) 35 | self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total')) 36 | 37 | def test_working_584(self): 38 | collector = ProcessCollector(proc=self.test_proc, pid=lambda: "584\n", registry=self.registry) 39 | collector._ticks = 100 40 | 41 | self.assertEqual(0.0, self.registry.get_sample_value('process_cpu_seconds_total')) 42 | self.assertEqual(10395648.0, self.registry.get_sample_value('process_virtual_memory_bytes')) 43 | self.assertEqual(634880, self.registry.get_sample_value('process_resident_memory_bytes')) 44 | self.assertEqual(1418291667.75, self.registry.get_sample_value('process_start_time_seconds')) 45 | self.assertEqual(None, self.registry.get_sample_value('process_max_fds')) 46 | self.assertEqual(None, self.registry.get_sample_value('process_open_fds')) 47 | 48 | def test_working_fake_pid(self): 49 | collector = ProcessCollector(proc=self.test_proc, pid=lambda: 123, registry=self.registry) 50 | collector._ticks = 100 51 | 52 | self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total')) 53 | self.assertEqual(None, self.registry.get_sample_value('process_virtual_memory_bytes')) 54 | self.assertEqual(None, self.registry.get_sample_value('process_resident_memory_bytes')) 55 | self.assertEqual(None, self.registry.get_sample_value('process_start_time_seconds')) 56 | self.assertEqual(None, self.registry.get_sample_value('process_max_fds')) 57 | self.assertEqual(None, self.registry.get_sample_value('process_open_fds')) 58 | self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace')) 59 | 60 | 61 | if __name__ == '__main__': 62 | unittest.main() 63 | -------------------------------------------------------------------------------- /prometheus_client/exposition.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import unicode_literals 4 | 5 | import os 6 | import socket 7 | import time 8 | import threading 9 | from contextlib import closing 10 | from wsgiref.simple_server import make_server 11 | 12 | from . import core 13 | try: 14 | from BaseHTTPServer import BaseHTTPRequestHandler 15 | from BaseHTTPServer import HTTPServer 16 | from urllib2 import build_opener, Request, HTTPHandler 17 | from urllib import quote_plus 18 | except ImportError: 19 | # Python 3 20 | unicode = str 21 | from http.server import BaseHTTPRequestHandler 22 | from http.server import HTTPServer 23 | from urllib.request import build_opener, Request, HTTPHandler 24 | from urllib.parse import quote_plus 25 | 26 | 27 | CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8') 28 | '''Content type of the latest text format''' 29 | 30 | 31 | def make_wsgi_app(): 32 | '''Create a WSGI app which serves the metrics from the registry.''' 33 | def prometheus_app(environ, start_response): 34 | status = str('200 OK') 35 | headers = [(str('Content-type'), CONTENT_TYPE_LATEST)] 36 | start_response(status, headers) 37 | return [generate_latest(core.REGISTRY)] 38 | return prometheus_app 39 | 40 | 41 | def start_wsgi_server(port, addr=''): 42 | """Starts a WSGI server for prometheus metrics as a daemon thread.""" 43 | class PrometheusMetricsServer(threading.Thread): 44 | def run(self): 45 | httpd = make_server(addr, port, make_wsgi_app()) 46 | httpd.serve_forever() 47 | t = PrometheusMetricsServer() 48 | t.daemon = True 49 | t.start() 50 | 51 | 52 | def generate_latest(registry=core.REGISTRY): 53 | '''Returns the metrics from the registry in latest text format as a string.''' 54 | output = [] 55 | for metric in registry.collect(): 56 | output.append('# HELP {0} {1}'.format( 57 | metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) 58 | output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) 59 | for name, labels, value in metric.samples: 60 | if labels: 61 | labelstr = '{{{0}}}'.format(','.join( 62 | ['{0}="{1}"'.format( 63 | k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) 64 | for k, v in sorted(labels.items())])) 65 | else: 66 | labelstr = '' 67 | output.append('{0}{1} {2}\n'.format(name, labelstr, core._floatToGoString(value))) 68 | return ''.join(output).encode('utf-8') 69 | 70 | 71 | class MetricsHandler(BaseHTTPRequestHandler): 72 | def do_GET(self): 73 | self.send_response(200) 74 | self.send_header('Content-Type', CONTENT_TYPE_LATEST) 75 | self.end_headers() 76 | self.wfile.write(generate_latest(core.REGISTRY)) 77 | 78 | def log_message(self, format, *args): 79 | return 80 | 81 | 82 | def start_http_server(port, addr=''): 83 | """Starts a HTTP server for prometheus metrics as a daemon thread.""" 84 | class PrometheusMetricsServer(threading.Thread): 85 | def run(self): 86 | httpd = HTTPServer((addr, port), MetricsHandler) 87 | httpd.serve_forever() 88 | t = PrometheusMetricsServer() 89 | t.daemon = True 90 | t.start() 91 | 92 | 93 | def write_to_textfile(path, registry): 94 | '''Write metrics to the given path. 95 | 96 | This is intended for use with the Node exporter textfile collector. 97 | The path must end in .prom for the textfile collector to process it.''' 98 | tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident) 99 | with open(tmppath, 'wb') as f: 100 | f.write(generate_latest(registry)) 101 | # rename(2) is atomic. 102 | os.rename(tmppath, path) 103 | 104 | 105 | def push_to_gateway(gateway, job, registry, grouping_key=None, timeout=None): 106 | '''Push metrics to the given pushgateway. 107 | 108 | This overwrites all metrics with the same job and grouping_key. 109 | This uses the PUT HTTP method.''' 110 | _use_gateway('PUT', gateway, job, registry, grouping_key, timeout) 111 | 112 | 113 | def pushadd_to_gateway(gateway, job, registry, grouping_key=None, timeout=None): 114 | '''PushAdd metrics to the given pushgateway. 115 | 116 | This replaces metrics with the same name, job and grouping_key. 117 | This uses the POST HTTP method.''' 118 | _use_gateway('POST', gateway, job, registry, grouping_key, timeout) 119 | 120 | 121 | def delete_from_gateway(gateway, job, grouping_key=None, timeout=None): 122 | '''Delete metrics from the given pushgateway. 123 | 124 | This deletes metrics with the given job and grouping_key. 125 | This uses the DELETE HTTP method.''' 126 | _use_gateway('DELETE', gateway, job, None, grouping_key, timeout) 127 | 128 | 129 | def _use_gateway(method, gateway, job, registry, grouping_key, timeout): 130 | url = 'http://{0}/metrics/job/{1}'.format(gateway, quote_plus(job)) 131 | 132 | data = b'' 133 | if method != 'DELETE': 134 | data = generate_latest(registry) 135 | 136 | if grouping_key is None: 137 | grouping_key = {} 138 | url = url + ''.join(['/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v))) 139 | for k, v in sorted(grouping_key.items())]) 140 | 141 | request = Request(url, data=data) 142 | request.add_header('Content-Type', CONTENT_TYPE_LATEST) 143 | request.get_method = lambda: method 144 | resp = build_opener(HTTPHandler).open(request, timeout=timeout) 145 | if resp.code >= 400: 146 | raise IOError("error talking to pushgateway: {0} {1}".format( 147 | resp.code, resp.msg)) 148 | 149 | def instance_ip_grouping_key(): 150 | '''Grouping key with instance set to the IP Address of this host.''' 151 | with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s: 152 | s.connect(('localhost', 0)) 153 | return {'instance': s.getsockname()[0]} 154 | -------------------------------------------------------------------------------- /tests/test_parser.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import unittest 4 | 5 | from prometheus_client.core import * 6 | from prometheus_client.exposition import * 7 | from prometheus_client.parser import * 8 | 9 | 10 | class TestParse(unittest.TestCase): 11 | 12 | def test_simple_counter(self): 13 | families = text_string_to_metric_families("""# TYPE a counter 14 | # HELP a help 15 | a 1 16 | """) 17 | self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families)) 18 | 19 | def test_simple_gauge(self): 20 | families = text_string_to_metric_families("""# TYPE a gauge 21 | # HELP a help 22 | a 1 23 | """) 24 | self.assertEqual([GaugeMetricFamily("a", "help", value=1)], list(families)) 25 | 26 | def test_simple_summary(self): 27 | families = text_string_to_metric_families("""# TYPE a summary 28 | # HELP a help 29 | a_count 1 30 | a_sum 2 31 | """) 32 | 33 | def test_summary_quantiles(self): 34 | families = text_string_to_metric_families("""# TYPE a summary 35 | # HELP a help 36 | a_count 1 37 | a_sum 2 38 | a{quantile="0.5"} 0.7 39 | """) 40 | # The Python client doesn't support quantiles, but we 41 | # still need to be able to parse them. 42 | metric_family = SummaryMetricFamily("a", "help", count_value=1, sum_value=2) 43 | metric_family.add_sample("a", {"quantile": "0.5"}, 0.7) 44 | self.assertEqual([metric_family], list(families)) 45 | 46 | def test_simple_histogram(self): 47 | families = text_string_to_metric_families("""# TYPE a histogram 48 | # HELP a help 49 | a_bucket{le="1"} 0 50 | a_bucket{le="+Inf"} 3 51 | a_count 3 52 | a_sum 2 53 | """) 54 | self.assertEqual([HistogramMetricFamily("a", "help", sum_value=2, buckets=[("1", 0.0), ("+Inf", 3.0)])], list(families)) 55 | 56 | def test_no_metadata(self): 57 | families = text_string_to_metric_families("""a 1 58 | """) 59 | metric_family = Metric("a", "", "untyped") 60 | metric_family.add_sample("a", {}, 1) 61 | self.assertEqual([metric_family], list(families)) 62 | 63 | def test_type_help_switched(self): 64 | families = text_string_to_metric_families("""# HELP a help 65 | # TYPE a counter 66 | a 1 67 | """) 68 | self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families)) 69 | 70 | def test_blank_lines_and_comments(self): 71 | families = text_string_to_metric_families(""" 72 | # TYPE a counter 73 | # FOO a 74 | # BAR b 75 | # HELP a help 76 | 77 | a 1 78 | """) 79 | self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families)) 80 | 81 | def test_tabs(self): 82 | families = text_string_to_metric_families("""#\tTYPE\ta\tcounter 83 | #\tHELP\ta\thelp 84 | a\t1 85 | """) 86 | self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families)) 87 | 88 | def test_empty_help(self): 89 | families = text_string_to_metric_families("""# TYPE a counter 90 | # HELP a 91 | a 1 92 | """) 93 | self.assertEqual([CounterMetricFamily("a", "", value=1)], list(families)) 94 | 95 | def test_labels_and_infinite(self): 96 | families = text_string_to_metric_families("""# TYPE a counter 97 | # HELP a help 98 | a{foo="bar"} +Inf 99 | a{foo="baz"} -Inf 100 | """) 101 | metric_family = CounterMetricFamily("a", "help", labels=["foo"]) 102 | metric_family.add_metric(["bar"], core._INF) 103 | metric_family.add_metric(["baz"], core._MINUS_INF) 104 | self.assertEqual([metric_family], list(families)) 105 | 106 | def test_spaces(self): 107 | families = text_string_to_metric_families("""# TYPE a counter 108 | # HELP a help 109 | a{ foo = "bar" } 1 110 | a\t\t{\t\tfoo\t\t=\t\t"baz"\t\t}\t\t2 111 | """) 112 | metric_family = CounterMetricFamily("a", "help", labels=["foo"]) 113 | metric_family.add_metric(["bar"], 1) 114 | metric_family.add_metric(["baz"], 2) 115 | self.assertEqual([metric_family], list(families)) 116 | 117 | def test_nan(self): 118 | families = text_string_to_metric_families("""a NaN 119 | """) 120 | # Can't use a simple comparison as nan != nan. 121 | self.assertTrue(math.isnan(list(families)[0].samples[0][2])) 122 | 123 | def test_escaping(self): 124 | families = text_string_to_metric_families("""# TYPE a counter 125 | # HELP a he\\n\\\\l\\tp 126 | a{foo="b\\"a\\nr"} 1 127 | a{foo="b\\\\a\\z"} 2 128 | """) 129 | metric_family = CounterMetricFamily("a", "he\n\\l\\tp", labels=["foo"]) 130 | metric_family.add_metric(["b\"a\nr"], 1) 131 | metric_family.add_metric(["b\\a\\z"], 2) 132 | self.assertEqual([metric_family], list(families)) 133 | 134 | def test_roundtrip(self): 135 | text = """# HELP go_gc_duration_seconds A summary of the GC invocation durations. 136 | # TYPE go_gc_duration_seconds summary 137 | go_gc_duration_seconds{quantile="0"} 0.013300656000000001 138 | go_gc_duration_seconds{quantile="0.25"} 0.013638736 139 | go_gc_duration_seconds{quantile="0.5"} 0.013759906 140 | go_gc_duration_seconds{quantile="0.75"} 0.013962066 141 | go_gc_duration_seconds{quantile="1"} 0.021383540000000003 142 | go_gc_duration_seconds_sum 56.12904785 143 | go_gc_duration_seconds_count 7476.0 144 | # HELP go_goroutines Number of goroutines that currently exist. 145 | # TYPE go_goroutines gauge 146 | go_goroutines 166.0 147 | # HELP prometheus_local_storage_indexing_batch_duration_milliseconds Quantiles for batch indexing duration in milliseconds. 148 | # TYPE prometheus_local_storage_indexing_batch_duration_milliseconds summary 149 | prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.5"} NaN 150 | prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.9"} NaN 151 | prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.99"} NaN 152 | prometheus_local_storage_indexing_batch_duration_milliseconds_sum 871.5665949999999 153 | prometheus_local_storage_indexing_batch_duration_milliseconds_count 229.0 154 | # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. 155 | # TYPE process_cpu_seconds_total counter 156 | process_cpu_seconds_total 29323.4 157 | # HELP process_virtual_memory_bytes Virtual memory size in bytes. 158 | # TYPE process_virtual_memory_bytes gauge 159 | process_virtual_memory_bytes 2478268416.0 160 | # HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, and branch from which Prometheus was built. 161 | # TYPE prometheus_build_info gauge 162 | prometheus_build_info{branch="HEAD",revision="ef176e5",version="0.16.0rc1"} 1.0 163 | # HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type. 164 | # TYPE prometheus_local_storage_chunk_ops_total counter 165 | prometheus_local_storage_chunk_ops_total{type="clone"} 28.0 166 | prometheus_local_storage_chunk_ops_total{type="create"} 997844.0 167 | prometheus_local_storage_chunk_ops_total{type="drop"} 1345758.0 168 | prometheus_local_storage_chunk_ops_total{type="load"} 1641.0 169 | prometheus_local_storage_chunk_ops_total{type="persist"} 981408.0 170 | prometheus_local_storage_chunk_ops_total{type="pin"} 32662.0 171 | prometheus_local_storage_chunk_ops_total{type="transcode"} 980180.0 172 | prometheus_local_storage_chunk_ops_total{type="unpin"} 32662.0 173 | """ 174 | families = list(text_string_to_metric_families(text)) 175 | 176 | class TextCollector(object): 177 | def collect(self): 178 | return families 179 | 180 | 181 | registry = CollectorRegistry() 182 | registry.register(TextCollector()) 183 | self.assertEqual(text.encode('utf-8'), generate_latest(registry)) 184 | 185 | 186 | 187 | if __name__ == '__main__': 188 | unittest.main() 189 | -------------------------------------------------------------------------------- /prometheus_client/parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import unicode_literals 4 | 5 | try: 6 | import StringIO 7 | except ImportError: 8 | # Python 3 9 | import io as StringIO 10 | 11 | from . import core 12 | 13 | 14 | def text_string_to_metric_families(text): 15 | """Parse Prometheus text format from a string. 16 | 17 | See text_fd_to_metric_families. 18 | """ 19 | for metric_family in text_fd_to_metric_families(StringIO.StringIO(text)): 20 | yield metric_family 21 | 22 | 23 | def _unescape_help(text): 24 | result = [] 25 | slash = False 26 | 27 | for char in text: 28 | if slash: 29 | if char == '\\': 30 | result.append('\\') 31 | elif char == 'n': 32 | result.append('\n') 33 | else: 34 | result.append('\\' + char) 35 | slash = False 36 | else: 37 | if char == '\\': 38 | slash = True 39 | else: 40 | result.append(char) 41 | 42 | if slash: 43 | result.append('\\') 44 | 45 | return ''.join(result) 46 | 47 | 48 | def _parse_sample(text): 49 | name = [] 50 | labelname = [] 51 | labelvalue = [] 52 | value = [] 53 | labels = {} 54 | 55 | state = 'name' 56 | 57 | for char in text: 58 | if state == 'name': 59 | if char == '{': 60 | state = 'startoflabelname' 61 | elif char == ' ' or char == '\t': 62 | state = 'endofname' 63 | else: 64 | name.append(char) 65 | elif state == 'endofname': 66 | if char == ' ' or char == '\t': 67 | pass 68 | elif char == '{': 69 | state = 'startoflabelname' 70 | else: 71 | value.append(char) 72 | state = 'value' 73 | elif state == 'startoflabelname': 74 | if char == ' ' or char == '\t': 75 | pass 76 | elif char == '}': 77 | state = 'endoflabels' 78 | else: 79 | state = 'labelname' 80 | labelname.append(char) 81 | elif state == 'labelname': 82 | if char == '=': 83 | state = 'labelvaluequote' 84 | elif char == ' ' or char == '\t': 85 | state = 'labelvalueequals' 86 | else: 87 | labelname.append(char) 88 | elif state == 'labelvalueequals': 89 | if char == '=': 90 | state = 'labelvaluequote' 91 | elif char == ' ' or char == '\t': 92 | pass 93 | else: 94 | raise ValueError("Invalid line: " + text) 95 | elif state == 'labelvaluequote': 96 | if char == '"': 97 | state = 'labelvalue' 98 | elif char == ' ' or char == '\t': 99 | pass 100 | else: 101 | raise ValueError("Invalid line: " + text) 102 | elif state == 'labelvalue': 103 | if char == '\\': 104 | state = 'labelvalueslash' 105 | elif char == '"': 106 | labels[''.join(labelname)] = ''.join(labelvalue) 107 | labelname = [] 108 | labelvalue = [] 109 | state = 'nextlabel' 110 | else: 111 | labelvalue.append(char) 112 | elif state == 'labelvalueslash': 113 | state = 'labelvalue' 114 | if char == '\\': 115 | labelvalue.append('\\') 116 | elif char == 'n': 117 | labelvalue.append('\n') 118 | elif char == '"': 119 | labelvalue.append('"') 120 | else: 121 | labelvalue.append('\\' + char) 122 | elif state == 'nextlabel': 123 | if char == ',': 124 | state = 'labelname' 125 | elif char == '}': 126 | state = 'endoflabels' 127 | elif char == ' ' or char == '\t': 128 | pass 129 | else: 130 | raise ValueError("Invalid line: " + text) 131 | elif state == 'endoflabels': 132 | if char == ' ' or char == '\t': 133 | pass 134 | else: 135 | value.append(char) 136 | state = 'value' 137 | elif state == 'value': 138 | if char == ' ' or char == '\t': 139 | # Timestamps are not supported, halt 140 | break 141 | else: 142 | value.append(char) 143 | return (''.join(name), labels, float(''.join(value))) 144 | 145 | 146 | def text_fd_to_metric_families(fd): 147 | """Parse Prometheus text format from a file descriptor. 148 | 149 | This is a laxer parser than the main Go parser, 150 | so successful parsing does not imply that the parsed 151 | text meets the specification. 152 | 153 | Yields core.Metric's. 154 | """ 155 | name = '' 156 | documentation = '' 157 | typ = 'untyped' 158 | samples = [] 159 | allowed_names = [] 160 | 161 | def build_metric(name, documentation, typ, samples): 162 | metric = core.Metric(name, documentation, typ) 163 | metric.samples = samples 164 | return metric 165 | 166 | for line in fd: 167 | line = line.strip() 168 | 169 | if line.startswith('#'): 170 | parts = line.split(None, 3) 171 | if len(parts) < 2: 172 | continue 173 | if parts[1] == 'HELP': 174 | if parts[2] != name: 175 | if name != '': 176 | yield build_metric(name, documentation, typ, samples) 177 | # New metric 178 | name = parts[2] 179 | typ = 'untyped' 180 | samples = [] 181 | allowed_names = [parts[2]] 182 | if len(parts) == 4: 183 | documentation = _unescape_help(parts[3]) 184 | else: 185 | documentation = '' 186 | elif parts[1] == 'TYPE': 187 | if parts[2] != name: 188 | if name != '': 189 | yield build_metric(name, documentation, typ, samples) 190 | # New metric 191 | name = parts[2] 192 | documentation = '' 193 | samples = [] 194 | typ = parts[3] 195 | allowed_names = { 196 | 'counter': [''], 197 | 'gauge': [''], 198 | 'summary': ['_count', '_sum', ''], 199 | 'histogram': ['_count', '_sum', '_bucket'], 200 | }.get(typ, [parts[2]]) 201 | allowed_names = [name + n for n in allowed_names] 202 | else: 203 | # Ignore other comment tokens 204 | pass 205 | elif line == '': 206 | # Ignore blank lines 207 | pass 208 | else: 209 | sample = _parse_sample(line) 210 | if sample[0] not in allowed_names: 211 | if name != '': 212 | yield build_metric(name, documentation, typ, samples) 213 | # New metric, yield immediately as untyped singleton 214 | name = '' 215 | documentation = '' 216 | typ = 'untyped' 217 | samples = [] 218 | allowed_names = [] 219 | yield build_metric(sample[0], documentation, typ, [sample]) 220 | else: 221 | samples.append(sample) 222 | 223 | if name != '': 224 | yield build_metric(name, documentation, typ, samples) 225 | -------------------------------------------------------------------------------- /tests/test_exposition.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | import os 3 | import threading 4 | import time 5 | import unittest 6 | 7 | 8 | from prometheus_client import Gauge, Counter, Summary, Histogram, Metric 9 | from prometheus_client import CollectorRegistry, generate_latest 10 | from prometheus_client import push_to_gateway, pushadd_to_gateway, delete_from_gateway 11 | from prometheus_client import CONTENT_TYPE_LATEST, instance_ip_grouping_key 12 | 13 | try: 14 | from BaseHTTPServer import BaseHTTPRequestHandler 15 | from BaseHTTPServer import HTTPServer 16 | except ImportError: 17 | # Python 3 18 | from http.server import BaseHTTPRequestHandler 19 | from http.server import HTTPServer 20 | 21 | 22 | class TestGenerateText(unittest.TestCase): 23 | def setUp(self): 24 | self.registry = CollectorRegistry() 25 | 26 | def test_counter(self): 27 | c = Counter('cc', 'A counter', registry=self.registry) 28 | c.inc() 29 | self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc 1.0\n', generate_latest(self.registry)) 30 | 31 | def test_gauge(self): 32 | g = Gauge('gg', 'A gauge', registry=self.registry) 33 | g.set(17) 34 | self.assertEqual(b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n', generate_latest(self.registry)) 35 | 36 | def test_summary(self): 37 | s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry) 38 | s.labels('c', 'd').observe(17) 39 | self.assertEqual(b'# HELP ss A summary\n# TYPE ss summary\nss_count{a="c",b="d"} 1.0\nss_sum{a="c",b="d"} 17.0\n', generate_latest(self.registry)) 40 | 41 | def test_histogram(self): 42 | s = Histogram('hh', 'A histogram', registry=self.registry) 43 | s.observe(0.05) 44 | self.assertEqual(b'''# HELP hh A histogram 45 | # TYPE hh histogram 46 | hh_bucket{le="0.005"} 0.0 47 | hh_bucket{le="0.01"} 0.0 48 | hh_bucket{le="0.025"} 0.0 49 | hh_bucket{le="0.05"} 1.0 50 | hh_bucket{le="0.075"} 1.0 51 | hh_bucket{le="0.1"} 1.0 52 | hh_bucket{le="0.25"} 1.0 53 | hh_bucket{le="0.5"} 1.0 54 | hh_bucket{le="0.75"} 1.0 55 | hh_bucket{le="1.0"} 1.0 56 | hh_bucket{le="2.5"} 1.0 57 | hh_bucket{le="5.0"} 1.0 58 | hh_bucket{le="7.5"} 1.0 59 | hh_bucket{le="10.0"} 1.0 60 | hh_bucket{le="+Inf"} 1.0 61 | hh_count 1.0 62 | hh_sum 0.05 63 | ''', generate_latest(self.registry)) 64 | 65 | def test_unicode(self): 66 | c = Counter('cc', '\u4500', ['l'], registry=self.registry) 67 | c.labels('\u4500').inc() 68 | self.assertEqual(b'# HELP cc \xe4\x94\x80\n# TYPE cc counter\ncc{l="\xe4\x94\x80"} 1.0\n', generate_latest(self.registry)) 69 | 70 | def test_escaping(self): 71 | c = Counter('cc', 'A\ncount\\er', ['a'], registry=self.registry) 72 | c.labels('\\x\n"').inc(1) 73 | self.assertEqual(b'# HELP cc A\\ncount\\\\er\n# TYPE cc counter\ncc{a="\\\\x\\n\\""} 1.0\n', generate_latest(self.registry)) 74 | 75 | def test_nonnumber(self): 76 | class MyNumber(): 77 | def __repr__(self): 78 | return "MyNumber(123)" 79 | def __float__(self): 80 | return 123.0 81 | class MyCollector(): 82 | def collect(self): 83 | metric = Metric("nonnumber", "Non number", 'untyped') 84 | metric.add_sample("nonnumber", {}, MyNumber()) 85 | yield metric 86 | self.registry.register(MyCollector()) 87 | self.assertEqual(b'# HELP nonnumber Non number\n# TYPE nonnumber untyped\nnonnumber 123.0\n', generate_latest(self.registry)) 88 | 89 | 90 | class TestPushGateway(unittest.TestCase): 91 | def setUp(self): 92 | self.registry = CollectorRegistry() 93 | self.counter = Gauge('g', 'help', registry=self.registry) 94 | self.requests = requests = [] 95 | class TestHandler(BaseHTTPRequestHandler): 96 | def do_PUT(self): 97 | self.send_response(201) 98 | length = int(self.headers['content-length']) 99 | requests.append((self, self.rfile.read(length))) 100 | self.end_headers() 101 | 102 | do_POST = do_PUT 103 | do_DELETE = do_PUT 104 | 105 | httpd = HTTPServer(('localhost', 0), TestHandler) 106 | self.address = ':'.join([str(x) for x in httpd.server_address]) 107 | class TestServer(threading.Thread): 108 | def run(self): 109 | httpd.handle_request() 110 | self.server = TestServer() 111 | self.server.daemon = True 112 | self.server.start() 113 | 114 | def test_push(self): 115 | push_to_gateway(self.address, "my_job", self.registry) 116 | self.assertEqual(self.requests[0][0].command, 'PUT') 117 | self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job') 118 | self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) 119 | self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') 120 | 121 | def test_push_with_groupingkey(self): 122 | push_to_gateway(self.address, "my_job", self.registry, {'a': 9}) 123 | self.assertEqual(self.requests[0][0].command, 'PUT') 124 | self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9') 125 | self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) 126 | self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') 127 | 128 | def test_push_with_complex_groupingkey(self): 129 | push_to_gateway(self.address, "my_job", self.registry, {'a': 9, 'b': 'a/ z'}) 130 | self.assertEqual(self.requests[0][0].command, 'PUT') 131 | self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9/b/a%2F+z') 132 | self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) 133 | self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') 134 | 135 | def test_pushadd(self): 136 | pushadd_to_gateway(self.address, "my_job", self.registry) 137 | self.assertEqual(self.requests[0][0].command, 'POST') 138 | self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job') 139 | self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) 140 | self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') 141 | 142 | def test_pushadd_with_groupingkey(self): 143 | pushadd_to_gateway(self.address, "my_job", self.registry, {'a': 9}) 144 | self.assertEqual(self.requests[0][0].command, 'POST') 145 | self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9') 146 | self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) 147 | self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') 148 | 149 | def test_delete(self): 150 | delete_from_gateway(self.address, "my_job") 151 | self.assertEqual(self.requests[0][0].command, 'DELETE') 152 | self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job') 153 | self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) 154 | self.assertEqual(self.requests[0][1], b'') 155 | 156 | def test_delete_with_groupingkey(self): 157 | delete_from_gateway(self.address, "my_job", {'a': 9}) 158 | self.assertEqual(self.requests[0][0].command, 'DELETE') 159 | self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9') 160 | self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) 161 | self.assertEqual(self.requests[0][1], b'') 162 | 163 | def test_instance_ip_grouping_key(self): 164 | self.assertTrue('' != instance_ip_grouping_key()['instance']) 165 | 166 | 167 | if __name__ == '__main__': 168 | unittest.main() 169 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Prometheus Python Client 2 | 3 | The official Python 2 and 3 client for [Prometheus](http://prometheus.io). 4 | 5 | ## Three Step Demo 6 | 7 | **One**: Install the client: 8 | ``` 9 | pip install prometheus_client 10 | ``` 11 | 12 | **Two**: Paste the following into a Python interpreter: 13 | ```python 14 | from prometheus_client import start_http_server, Summary 15 | import random 16 | import time 17 | 18 | # Create a metric to track time spent and requests made. 19 | REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request') 20 | 21 | # Decorate function with metric. 22 | @REQUEST_TIME.time() 23 | def process_request(t): 24 | """A dummy function that takes some time.""" 25 | time.sleep(t) 26 | 27 | if __name__ == '__main__': 28 | # Start up the server to expose the metrics. 29 | start_http_server(8000) 30 | # Generate some requests. 31 | while True: 32 | process_request(random.random()) 33 | ``` 34 | 35 | **Three**: Visit [http://localhost:8000/](http://localhost:8000/) to view the metrics. 36 | 37 | From one easy to use decorator you get: 38 | * `request_processing_seconds_count`: Number of times this function was called. 39 | * `request_processing_seconds_sum`: Total amount of time spent in this function. 40 | 41 | Prometheus's `rate` function allows calculation of both requests per second, 42 | and latency over time from this data. 43 | 44 | In addition if you're on Linux the `process` metrics expose CPU, memory and 45 | other information about the process for free! 46 | 47 | ## Installation 48 | 49 | ``` 50 | pip install prometheus_client 51 | ``` 52 | 53 | This package can be found on 54 | [PyPI](https://pypi.python.org/pypi/prometheus_client). 55 | 56 | ## Instrumenting 57 | 58 | Four types of metric are offered: Counter, Gauge, Summary and Histogram. 59 | See the documentation on [metric types](http://prometheus.io/docs/concepts/metric_types/) 60 | and [instrumentation best practices](http://prometheus.io/docs/practices/instrumentation/#counter-vs.-gauge,-summary-vs.-histogram) 61 | on how to use them. 62 | 63 | ### Counter 64 | 65 | Counters go up, and reset when the process restarts. 66 | 67 | 68 | ```python 69 | from prometheus_client import Counter 70 | c = Counter('my_failures_total', 'Description of counter') 71 | c.inc() # Increment by 1 72 | c.inc(1.6) # Increment by given value 73 | ``` 74 | 75 | There are utilities to count exceptions raised: 76 | 77 | ```python 78 | @c.count_exceptions() 79 | def f(): 80 | pass 81 | 82 | with c.count_exceptions(): 83 | pass 84 | 85 | # Count only one type of exception 86 | with c.count_exceptions(ValueError): 87 | pass 88 | ``` 89 | 90 | ### Gauge 91 | 92 | Gauges can go up and down. 93 | 94 | ```python 95 | from prometheus_client import Gauge 96 | g = Gauge('my_inprogress_requests', 'Description of gauge') 97 | g.inc() # Increment by 1 98 | g.dec(10) # Decrement by given value 99 | g.set(4.2) # Set to a given value 100 | ``` 101 | 102 | There are utilities for common use cases: 103 | 104 | ```python 105 | g.set_to_current_time() # Set to current unixtime 106 | 107 | # Increment when entered, decrement when exited. 108 | @g.track_inprogress() 109 | def f(): 110 | pass 111 | 112 | with g.track_inprogress(): 113 | pass 114 | ``` 115 | 116 | A Gauge can also take its value from a callback: 117 | 118 | ```python 119 | d = Gauge('data_objects', 'Number of objects') 120 | my_dict = {} 121 | d.set_function(lambda: len(my_dict)) 122 | ``` 123 | 124 | ### Summary 125 | 126 | Summaries track the size and number of events. 127 | 128 | ```python 129 | from prometheus_client import Summary 130 | s = Summary('request_latency_seconds', 'Description of summary') 131 | s.observe(4.7) # Observe 4.7 (seconds in this case) 132 | ``` 133 | 134 | There are utilities for timing code: 135 | 136 | ```python 137 | @s.time() 138 | def f(): 139 | pass 140 | 141 | with s.time(): 142 | pass 143 | ``` 144 | 145 | The Python client doesn't store or expose quantile information at this time. 146 | 147 | ### Histogram 148 | 149 | Histograms track the size and number of events in buckets. 150 | This allows for aggregatable calculation of quantiles. 151 | 152 | ```python 153 | from prometheus_client import Histogram 154 | h = Histogram('request_latency_seconds', 'Description of histogram') 155 | h.observe(4.7) # Observe 4.7 (seconds in this case) 156 | ``` 157 | 158 | The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds. 159 | They can be overridden by passing `buckets` keyword argument to `Histogram`. 160 | 161 | There are utilities for timing code: 162 | 163 | ```python 164 | @h.time() 165 | def f(): 166 | pass 167 | 168 | with h.time(): 169 | pass 170 | ``` 171 | 172 | ### Labels 173 | 174 | All metrics can have labels, allowing grouping of related time series. 175 | 176 | See the best practices on [naming](http://prometheus.io/docs/practices/naming/) 177 | and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). 178 | 179 | Taking a counter as an example: 180 | 181 | ```python 182 | from prometheus_client import Counter 183 | c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) 184 | c.labels('get', '/').inc() 185 | c.labels('post', '/submit').inc() 186 | ``` 187 | 188 | Labels can also be provided as a dict: 189 | 190 | ```python 191 | from prometheus_client import Counter 192 | c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) 193 | c.labels({'method': 'get', 'endpoint': '/'}).inc() 194 | c.labels({'method': 'post', 'endpoint': '/submit'}).inc() 195 | ``` 196 | 197 | ### Process Collector 198 | 199 | The Python client automatically exports metrics about process CPU usage, RAM, 200 | file descriptors and start time. These all have the prefix `process`, and 201 | are only currently available on Linux. 202 | 203 | The namespace and pid constructor arguments allows for exporting metrics about 204 | other processes, for example: 205 | ``` 206 | ProcessCollector(namespace='mydaemon', pid=lambda: open('/var/run/daemon.pid').read()) 207 | ``` 208 | 209 | ## Exporting 210 | 211 | There are several options for exporting metrics. 212 | 213 | ### HTTP 214 | 215 | Metrics are usually exposed over HTTP, to be read by the Prometheus server. 216 | 217 | The easiest way to do this is via `start_http_server`, which will start a HTTP 218 | server in a daemon thread on the given port: 219 | 220 | ```python 221 | from prometheus_client import start_http_server 222 | 223 | start_http_server(8000) 224 | ``` 225 | 226 | Visit [http://localhost:8000/](http://localhost:8000/) to view the metrics. 227 | 228 | To add Prometheus exposition to an existing HTTP server, see the `MetricsHandler` class 229 | which provides a `BaseHTTPRequestHandler`. It also serves as a simple example of how 230 | to write a custom endpoint. 231 | 232 | #### Twisted 233 | 234 | To use prometheus with [twisted](https://twistedmatrix.com/), there is `MetricsResource` which exposes metrics as a twisted resource. 235 | 236 | ```python 237 | from prometheus_client.twisted import MetricsResource 238 | from twisted.web.server import Site 239 | from twisted.web.resource import Resource 240 | from twisted.internet import reactor 241 | 242 | root = Resource() 243 | root.putChild(b'metrics', MetricsResource()) 244 | 245 | factory = Site(root) 246 | reactor.listenTCP(8000, factory) 247 | reactor.run() 248 | ``` 249 | 250 | #### WSGI 251 | 252 | To use Prometheus with [WSGI](http://wsgi.readthedocs.org/en/latest/), there is 253 | `make_wsgi_app` which creates a WSGI application. 254 | 255 | ```python 256 | from prometheus_client import make_wsgi_app 257 | from wsgiref.simple_server import make_server 258 | 259 | app = make_wsgi_app() 260 | httpd = make_server('', 8000, app) 261 | httpd.serve_forever() 262 | ``` 263 | 264 | Such an application can be useful when integrating Prometheus metrics with WSGI 265 | apps. 266 | 267 | The method `start_wsgi_server` can be used to serve the metrics through the 268 | WSGI reference implementation in a new thread. 269 | 270 | ```python 271 | from prometheus_client import start_wsgi_server 272 | 273 | start_wsgi_server(8000) 274 | ``` 275 | 276 | ### Node exporter textfile collector 277 | 278 | The [textfile collector](https://github.com/prometheus/node_exporter#textfile-collector) 279 | allows machine-level statistics to be exported out via the Node exporter. 280 | 281 | This is useful for monitoring cronjobs, or for writing cronjobs to expose metrics 282 | about a machine system that the Node exporter does not support or would not make sense 283 | to perform at every scrape (for example, anything involving subprocesses). 284 | 285 | ```python 286 | from prometheus_client import CollectorRegistry, Gauge, write_to_textfile 287 | 288 | registry = CollectorRegistry() 289 | g = Gauge('raid_status', '1 if raid array is okay', registry=registry) 290 | g.set(1) 291 | write_to_textfile('/configured/textfile/path/raid.prom', registry) 292 | ``` 293 | 294 | A separate registry is used, as the default registry may contain other metrics 295 | such as those from the Process Collector. 296 | 297 | ## Exporting to a Pushgateway 298 | 299 | The [Pushgateway](https://github.com/prometheus/pushgateway) 300 | allows ephemeral and batch jobs to expose their metrics to Prometheus. 301 | 302 | ```python 303 | from prometheus_client import CollectorRegistry, Gauge, push_to_gateway 304 | 305 | registry = CollectorRegistry() 306 | g = Gauge('job_last_success_unixtime', 'Last time a batch job successfully finished', registry=registry) 307 | g.set_to_current_time() 308 | push_to_gateway('localhost:9091', job='batchA', registry=registry) 309 | ``` 310 | 311 | A separate registry is used, as the default registry may contain other metrics 312 | such as those from the Process Collector. 313 | 314 | Pushgateway functions take a grouping key. `push_to_gateway` replaces metrics 315 | with the same grouping key, `pushadd_to_gateway` only replaces metrics with the 316 | same name and grouping key and `delete_from_gateway` deletes metrics with the 317 | given job and grouping key. See the 318 | [Pushgateway documentation](https://github.com/prometheus/pushgateway/blob/master/README.md) 319 | for more information. 320 | 321 | `instance_ip_grouping_key` returns a grouping key with the instance label set 322 | to the host's IP address. 323 | 324 | 325 | ## Bridges 326 | 327 | It is also possible to expose metrics to systems other than Prometheus. 328 | This allows you to take advantage of Prometheus instrumentation even 329 | if you are not quite ready to fully transition to Prometheus yet. 330 | 331 | ### Graphite 332 | 333 | Metrics are pushed over TCP in the Graphite plaintext format. 334 | 335 | ```python 336 | from prometheus_client.bridge.graphite import GraphiteBridge 337 | 338 | gb = GraphiteBridge(('graphite.your.org', 2003)) 339 | # Push once. 340 | gb.push() 341 | # Push every 10 seconds in a daemon thread. 342 | gb.start(10.0) 343 | ``` 344 | 345 | ## Custom Collectors 346 | 347 | Sometimes it is not possible to directly instrument code, as it is not 348 | in your control. This requires you to proxy metrics from other systems. 349 | 350 | To do so you need to create a custom collector, for example: 351 | 352 | ```python 353 | from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY 354 | 355 | class CustomCollector(object): 356 | def collect(self): 357 | yield GaugeMetricFamily('my_gauge', 'Help text', value=7) 358 | c = CounterMetricFamily('my_counter_total', 'Help text', labels=['foo']) 359 | c.add_metric(['bar'], 1.7) 360 | c.add_metric(['baz'], 3.8) 361 | yield c 362 | 363 | REGISTRY.register(CustomCollector()) 364 | ``` 365 | 366 | `SummaryMetricFamily` and `HistogramMetricFamily` work similarly. 367 | 368 | 369 | ## Parser 370 | 371 | The Python client supports parsing the Promeheus text format. 372 | This is intended for advanced use cases where you have servers 373 | exposing Prometheus metrics and need to get them into some other 374 | system. 375 | 376 | ```python 377 | from prometheus_client.parser import text_string_to_metric_families 378 | for family in text_string_to_metric_families("my_gauge 1.0\n"): 379 | for sample in family.samples: 380 | print("Name: {0} Labels: {1} Value: {2}".format(*sample)) 381 | ``` 382 | 383 | -------------------------------------------------------------------------------- /tests/test_core.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | import os 3 | import threading 4 | import time 5 | import unittest 6 | 7 | from prometheus_client.core import * 8 | 9 | class TestCounter(unittest.TestCase): 10 | def setUp(self): 11 | self.registry = CollectorRegistry() 12 | self.counter = Counter('c', 'help', registry=self.registry) 13 | 14 | def test_increment(self): 15 | self.assertEqual(0, self.registry.get_sample_value('c')) 16 | self.counter.inc() 17 | self.assertEqual(1, self.registry.get_sample_value('c')) 18 | self.counter.inc(7) 19 | self.assertEqual(8, self.registry.get_sample_value('c')) 20 | 21 | def test_negative_increment_raises(self): 22 | self.assertRaises(ValueError, self.counter.inc, -1) 23 | 24 | def test_function_decorator(self): 25 | @self.counter.count_exceptions(ValueError) 26 | def f(r): 27 | if r: 28 | raise ValueError 29 | else: 30 | raise TypeError 31 | 32 | try: 33 | f(False) 34 | except TypeError: 35 | pass 36 | self.assertEqual(0, self.registry.get_sample_value('c')) 37 | 38 | try: 39 | f(True) 40 | except ValueError: 41 | raised = True 42 | self.assertEqual(1, self.registry.get_sample_value('c')) 43 | 44 | def test_block_decorator(self): 45 | with self.counter.count_exceptions(): 46 | pass 47 | self.assertEqual(0, self.registry.get_sample_value('c')) 48 | 49 | raised = False 50 | try: 51 | with self.counter.count_exceptions(): 52 | raise ValueError 53 | except: 54 | raised = True 55 | self.assertTrue(raised) 56 | self.assertEqual(1, self.registry.get_sample_value('c')) 57 | 58 | 59 | class TestGauge(unittest.TestCase): 60 | def setUp(self): 61 | self.registry = CollectorRegistry() 62 | self.gauge = Gauge('g', 'help', registry=self.registry) 63 | 64 | def test_gauge(self): 65 | self.assertEqual(0, self.registry.get_sample_value('g')) 66 | self.gauge.inc() 67 | self.assertEqual(1, self.registry.get_sample_value('g')) 68 | self.gauge.dec(3) 69 | self.assertEqual(-2, self.registry.get_sample_value('g')) 70 | self.gauge.set(9) 71 | self.assertEqual(9, self.registry.get_sample_value('g')) 72 | 73 | def test_function_decorator(self): 74 | self.assertEqual(0, self.registry.get_sample_value('g')) 75 | 76 | @self.gauge.track_inprogress() 77 | def f(): 78 | self.assertEqual(1, self.registry.get_sample_value('g')) 79 | 80 | f() 81 | self.assertEqual(0, self.registry.get_sample_value('g')) 82 | 83 | def test_block_decorator(self): 84 | self.assertEqual(0, self.registry.get_sample_value('g')) 85 | with self.gauge.track_inprogress(): 86 | self.assertEqual(1, self.registry.get_sample_value('g')) 87 | self.assertEqual(0, self.registry.get_sample_value('g')) 88 | 89 | def test_gauge_function(self): 90 | x = {} 91 | self.gauge.set_function(lambda: len(x)) 92 | self.assertEqual(0, self.registry.get_sample_value('g')) 93 | self.gauge.inc() 94 | self.assertEqual(0, self.registry.get_sample_value('g')) 95 | x['a'] = None 96 | self.assertEqual(1, self.registry.get_sample_value('g')) 97 | 98 | def test_function_decorator(self): 99 | self.assertEqual(0, self.registry.get_sample_value('g')) 100 | 101 | @self.gauge.time() 102 | def f(): 103 | time.sleep(.001) 104 | 105 | f() 106 | self.assertNotEqual(0, self.registry.get_sample_value('g')) 107 | 108 | def test_block_decorator(self): 109 | self.assertEqual(0, self.registry.get_sample_value('g')) 110 | with self.gauge.time(): 111 | time.sleep(.001) 112 | self.assertNotEqual(0, self.registry.get_sample_value('g')) 113 | 114 | 115 | class TestSummary(unittest.TestCase): 116 | def setUp(self): 117 | self.registry = CollectorRegistry() 118 | self.summary = Summary('s', 'help', registry=self.registry) 119 | 120 | def test_summary(self): 121 | self.assertEqual(0, self.registry.get_sample_value('s_count')) 122 | self.assertEqual(0, self.registry.get_sample_value('s_sum')) 123 | self.summary.observe(10) 124 | self.assertEqual(1, self.registry.get_sample_value('s_count')) 125 | self.assertEqual(10, self.registry.get_sample_value('s_sum')) 126 | 127 | def test_function_decorator(self): 128 | self.assertEqual(0, self.registry.get_sample_value('s_count')) 129 | 130 | @self.summary.time() 131 | def f(): 132 | pass 133 | 134 | f() 135 | self.assertEqual(1, self.registry.get_sample_value('s_count')) 136 | 137 | def test_block_decorator(self): 138 | self.assertEqual(0, self.registry.get_sample_value('s_count')) 139 | with self.summary.time(): 140 | pass 141 | self.assertEqual(1, self.registry.get_sample_value('s_count')) 142 | 143 | 144 | class TestHistogram(unittest.TestCase): 145 | def setUp(self): 146 | self.registry = CollectorRegistry() 147 | self.histogram = Histogram('h', 'help', registry=self.registry) 148 | self.labels = Histogram('hl', 'help', ['l'], registry=self.registry) 149 | 150 | def test_histogram(self): 151 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) 152 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) 153 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) 154 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 155 | self.assertEqual(0, self.registry.get_sample_value('h_count')) 156 | self.assertEqual(0, self.registry.get_sample_value('h_sum')) 157 | 158 | self.histogram.observe(2) 159 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) 160 | self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) 161 | self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) 162 | self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 163 | self.assertEqual(1, self.registry.get_sample_value('h_count')) 164 | self.assertEqual(2, self.registry.get_sample_value('h_sum')) 165 | 166 | self.histogram.observe(2.5) 167 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) 168 | self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) 169 | self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) 170 | self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 171 | self.assertEqual(2, self.registry.get_sample_value('h_count')) 172 | self.assertEqual(4.5, self.registry.get_sample_value('h_sum')) 173 | 174 | self.histogram.observe(float("inf")) 175 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) 176 | self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) 177 | self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) 178 | self.assertEqual(3, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 179 | self.assertEqual(3, self.registry.get_sample_value('h_count')) 180 | self.assertEqual(float("inf"), self.registry.get_sample_value('h_sum')) 181 | 182 | def test_setting_buckets(self): 183 | h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2]) 184 | self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds) 185 | 186 | h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2, float("inf")]) 187 | self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds) 188 | 189 | self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[]) 190 | self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[float("inf")]) 191 | self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[3, 1]) 192 | 193 | def test_labels(self): 194 | self.labels.labels('a').observe(2) 195 | self.assertEqual(0, self.registry.get_sample_value('hl_bucket', {'le': '1.0', 'l': 'a'})) 196 | self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '2.5', 'l': 'a'})) 197 | self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '5.0', 'l': 'a'})) 198 | self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '+Inf', 'l': 'a'})) 199 | self.assertEqual(1, self.registry.get_sample_value('hl_count', {'l': 'a'})) 200 | self.assertEqual(2, self.registry.get_sample_value('hl_sum', {'l': 'a'})) 201 | 202 | def test_function_decorator(self): 203 | self.assertEqual(0, self.registry.get_sample_value('h_count')) 204 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 205 | 206 | @self.histogram.time() 207 | def f(): 208 | pass 209 | 210 | f() 211 | self.assertEqual(1, self.registry.get_sample_value('h_count')) 212 | self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 213 | 214 | def test_block_decorator(self): 215 | self.assertEqual(0, self.registry.get_sample_value('h_count')) 216 | self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 217 | with self.histogram.time(): 218 | pass 219 | self.assertEqual(1, self.registry.get_sample_value('h_count')) 220 | self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 221 | 222 | 223 | class TestMetricWrapper(unittest.TestCase): 224 | def setUp(self): 225 | self.registry = CollectorRegistry() 226 | self.counter = Counter('c', 'help', labelnames=['l'], registry=self.registry) 227 | self.two_labels = Counter('two', 'help', labelnames=['a', 'b'], registry=self.registry) 228 | 229 | def test_child(self): 230 | self.counter.labels('x').inc() 231 | self.assertEqual(1, self.registry.get_sample_value('c', {'l': 'x'})) 232 | self.two_labels.labels('x', 'y').inc(2) 233 | self.assertEqual(2, self.registry.get_sample_value('two', {'a': 'x', 'b': 'y'})) 234 | 235 | def test_remove(self): 236 | self.counter.labels('x').inc() 237 | self.counter.labels('y').inc(2) 238 | self.assertEqual(1, self.registry.get_sample_value('c', {'l': 'x'})) 239 | self.assertEqual(2, self.registry.get_sample_value('c', {'l': 'y'})) 240 | self.counter.remove('x') 241 | self.assertEqual(None, self.registry.get_sample_value('c', {'l': 'x'})) 242 | self.assertEqual(2, self.registry.get_sample_value('c', {'l': 'y'})) 243 | 244 | def test_incorrect_label_count_raises(self): 245 | self.assertRaises(ValueError, self.counter.labels) 246 | self.assertRaises(ValueError, self.counter.labels, 'a', 'b') 247 | self.assertRaises(ValueError, self.counter.remove) 248 | self.assertRaises(ValueError, self.counter.remove, 'a', 'b') 249 | 250 | def test_labels_coerced_to_string(self): 251 | self.counter.labels(None).inc() 252 | self.counter.labels({'l': None}).inc() 253 | self.assertEqual(2, self.registry.get_sample_value('c', {'l': 'None'})) 254 | 255 | self.counter.remove(None) 256 | self.assertEqual(None, self.registry.get_sample_value('c', {'l': 'None'})) 257 | 258 | def test_non_string_labels_raises(self): 259 | class Test(object): 260 | __str__ = None 261 | self.assertRaises(TypeError, self.counter.labels, Test()) 262 | self.assertRaises(TypeError, self.counter.labels, {'l': Test()}) 263 | 264 | def test_namespace_subsystem_concatenated(self): 265 | c = Counter('c', 'help', namespace='a', subsystem='b', registry=self.registry) 266 | c.inc() 267 | self.assertEqual(1, self.registry.get_sample_value('a_b_c')) 268 | 269 | def test_labels_by_dict(self): 270 | self.counter.labels({'l': 'x'}).inc() 271 | self.assertEqual(1, self.registry.get_sample_value('c', {'l': 'x'})) 272 | self.assertRaises(ValueError, self.counter.labels, {'l': 'x', 'm': 'y'}) 273 | self.assertRaises(ValueError, self.counter.labels, {'m': 'y'}) 274 | self.assertRaises(ValueError, self.counter.labels, {}) 275 | self.two_labels.labels({'a': 'x', 'b': 'y'}).inc() 276 | self.assertEqual(1, self.registry.get_sample_value('two', {'a': 'x', 'b': 'y'})) 277 | self.assertRaises(ValueError, self.two_labels.labels, {'a': 'x', 'b': 'y', 'c': 'z'}) 278 | self.assertRaises(ValueError, self.two_labels.labels, {'a': 'x', 'c': 'z'}) 279 | self.assertRaises(ValueError, self.two_labels.labels, {'b': 'y', 'c': 'z'}) 280 | self.assertRaises(ValueError, self.two_labels.labels, {'c': 'z'}) 281 | self.assertRaises(ValueError, self.two_labels.labels, {}) 282 | 283 | def test_invalid_names_raise(self): 284 | self.assertRaises(ValueError, Counter, '', 'help') 285 | self.assertRaises(ValueError, Counter, '^', 'help') 286 | self.assertRaises(ValueError, Counter, '', 'help', namespace='&') 287 | self.assertRaises(ValueError, Counter, '', 'help', subsystem='(') 288 | self.assertRaises(ValueError, Counter, 'c', '', labelnames=['^']) 289 | self.assertRaises(ValueError, Counter, 'c', '', labelnames=['__reserved']) 290 | self.assertRaises(ValueError, Summary, 'c', '', labelnames=['quantile']) 291 | 292 | 293 | class TestMetricFamilies(unittest.TestCase): 294 | def setUp(self): 295 | self.registry = CollectorRegistry() 296 | 297 | def custom_collector(self, metric_family): 298 | class CustomCollector(object): 299 | def collect(self): 300 | return [metric_family] 301 | self.registry.register(CustomCollector()) 302 | 303 | def test_counter(self): 304 | self.custom_collector(CounterMetricFamily('c', 'help', value=1)) 305 | self.assertEqual(1, self.registry.get_sample_value('c', {})) 306 | 307 | def test_counter_labels(self): 308 | cmf = CounterMetricFamily('c', 'help', labels=['a', 'c']) 309 | cmf.add_metric(['b', 'd'], 2) 310 | self.custom_collector(cmf) 311 | self.assertEqual(2, self.registry.get_sample_value('c', {'a': 'b', 'c': 'd'})) 312 | 313 | def test_gauge(self): 314 | self.custom_collector(GaugeMetricFamily('g', 'help', value=1)) 315 | self.assertEqual(1, self.registry.get_sample_value('g', {})) 316 | 317 | def test_gauge_labels(self): 318 | cmf = GaugeMetricFamily('g', 'help', labels=['a']) 319 | cmf.add_metric(['b'], 2) 320 | self.custom_collector(cmf) 321 | self.assertEqual(2, self.registry.get_sample_value('g', {'a':'b'})) 322 | 323 | def test_summary(self): 324 | self.custom_collector(SummaryMetricFamily('s', 'help', count_value=1, sum_value=2)) 325 | self.assertEqual(1, self.registry.get_sample_value('s_count', {})) 326 | self.assertEqual(2, self.registry.get_sample_value('s_sum', {})) 327 | 328 | def test_summary_labels(self): 329 | cmf = SummaryMetricFamily('s', 'help', labels=['a']) 330 | cmf.add_metric(['b'], count_value=1, sum_value=2) 331 | self.custom_collector(cmf) 332 | self.assertEqual(1, self.registry.get_sample_value('s_count', {'a': 'b'})) 333 | self.assertEqual(2, self.registry.get_sample_value('s_sum', {'a': 'b'})) 334 | 335 | def test_histogram(self): 336 | self.custom_collector(HistogramMetricFamily('h', 'help', buckets=[('0', 1), ('+Inf', 2)], sum_value=3)) 337 | self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '0'})) 338 | self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) 339 | self.assertEqual(2, self.registry.get_sample_value('h_count', {})) 340 | self.assertEqual(3, self.registry.get_sample_value('h_sum', {})) 341 | 342 | def test_histogram_labels(self): 343 | cmf = HistogramMetricFamily('h', 'help', labels=['a']) 344 | cmf.add_metric(['b'], buckets=[('0', 1), ('+Inf', 2)], sum_value=3) 345 | self.custom_collector(cmf) 346 | self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '0'})) 347 | self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '+Inf'})) 348 | self.assertEqual(2, self.registry.get_sample_value('h_count', {'a': 'b'})) 349 | self.assertEqual(3, self.registry.get_sample_value('h_sum', {'a': 'b'})) 350 | 351 | def test_bad_constructors(self): 352 | self.assertRaises(ValueError, CounterMetricFamily, 'c', 'help', value=1, labels=[]) 353 | self.assertRaises(ValueError, CounterMetricFamily, 'c', 'help', value=1, labels=['a']) 354 | 355 | self.assertRaises(ValueError, GaugeMetricFamily, 'g', 'help', value=1, labels=[]) 356 | self.assertRaises(ValueError, GaugeMetricFamily, 'g', 'help', value=1, labels=['a']) 357 | 358 | self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', sum_value=1) 359 | self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1) 360 | self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1, labels=['a']) 361 | self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', sum_value=1, labels=['a']) 362 | self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1, sum_value=1, labels=['a']) 363 | 364 | self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', sum_value=1) 365 | self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}) 366 | self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', sum_value=1, labels=['a']) 367 | self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}, labels=['a']) 368 | self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}, sum_value=1, labels=['a']) 369 | self.assertRaises(KeyError, HistogramMetricFamily, 'h', 'help', buckets={}, sum_value=1) 370 | 371 | 372 | if __name__ == '__main__': 373 | unittest.main() 374 | -------------------------------------------------------------------------------- /prometheus_client/core.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import unicode_literals 4 | 5 | import copy 6 | import math 7 | import re 8 | import time 9 | import types 10 | 11 | try: 12 | from BaseHTTPServer import BaseHTTPRequestHandler 13 | except ImportError: 14 | # Python 3 15 | unicode = str 16 | 17 | from functools import wraps 18 | from threading import Lock 19 | 20 | _METRIC_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$') 21 | _METRIC_LABEL_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$') 22 | _RESERVED_METRIC_LABEL_NAME_RE = re.compile(r'^__.*$') 23 | _INF = float("inf") 24 | _MINUS_INF = float("-inf") 25 | 26 | 27 | class CollectorRegistry(object): 28 | '''Metric collector registry. 29 | 30 | Collectors must have a no-argument method 'collect' that returns a list of 31 | Metric objects. The returned metrics should be consistent with the Prometheus 32 | exposition formats. 33 | ''' 34 | def __init__(self): 35 | self._collectors = set() 36 | self._lock = Lock() 37 | 38 | def register(self, collector): 39 | '''Add a collector to the registry.''' 40 | with self._lock: 41 | self._collectors.add(collector) 42 | 43 | def unregister(self, collector): 44 | '''Remove a collector from the registry.''' 45 | with self._lock: 46 | self._collectors.remove(collector) 47 | 48 | def collect(self): 49 | '''Yields metrics from the collectors in the registry.''' 50 | collectors = None 51 | with self._lock: 52 | collectors = copy.copy(self._collectors) 53 | for collector in collectors: 54 | for metric in collector.collect(): 55 | yield metric 56 | 57 | def get_sample_value(self, name, labels=None): 58 | '''Returns the sample value, or None if not found. 59 | 60 | This is inefficient, and intended only for use in unittests. 61 | ''' 62 | if labels is None: 63 | labels = {} 64 | for metric in self.collect(): 65 | for n, l, value in metric.samples: 66 | if n == name and l == labels: 67 | return value 68 | return None 69 | 70 | 71 | REGISTRY = CollectorRegistry() 72 | '''The default registry.''' 73 | 74 | _METRIC_TYPES = ('counter', 'gauge', 'summary', 'histogram', 'untyped') 75 | 76 | 77 | class Metric(object): 78 | '''A single metric family and its samples. 79 | 80 | This is intended only for internal use by the instrumentation client. 81 | 82 | Custom collectors should use GaugeMetricFamily, CounterMetricFamily 83 | and SummaryMetricFamily instead. 84 | ''' 85 | def __init__(self, name, documentation, typ): 86 | self.name = name 87 | self.documentation = documentation 88 | if typ not in _METRIC_TYPES: 89 | raise ValueError('Invalid metric type: ' + typ) 90 | self.type = typ 91 | self.samples = [] 92 | 93 | def add_sample(self, name, labels, value): 94 | '''Add a sample to the metric. 95 | 96 | Internal-only, do not use.''' 97 | self.samples.append((name, labels, value)) 98 | 99 | def __eq__(self, other): 100 | return (isinstance(other, Metric) 101 | and self.name == other.name 102 | and self.documentation == other.documentation 103 | and self.type == other.type 104 | and self.samples == other.samples) 105 | 106 | 107 | class CounterMetricFamily(Metric): 108 | '''A single counter and its samples. 109 | 110 | For use by custom collectors. 111 | ''' 112 | def __init__(self, name, documentation, value=None, labels=None): 113 | Metric.__init__(self, name, documentation, 'counter') 114 | if labels is not None and value is not None: 115 | raise ValueError('Can only specify at most one of value and labels.') 116 | if labels is None: 117 | labels = [] 118 | self._labelnames = labels 119 | if value is not None: 120 | self.add_metric([], value) 121 | 122 | def add_metric(self, labels, value): 123 | '''Add a metric to the metric family. 124 | 125 | Args: 126 | labels: A list of label values 127 | value: The value of the metric. 128 | ''' 129 | self.samples.append((self.name, dict(zip(self._labelnames, labels)), value)) 130 | 131 | 132 | class GaugeMetricFamily(Metric): 133 | '''A single gauge and its samples. 134 | 135 | For use by custom collectors. 136 | ''' 137 | def __init__(self, name, documentation, value=None, labels=None): 138 | Metric.__init__(self, name, documentation, 'gauge') 139 | if labels is not None and value is not None: 140 | raise ValueError('Can only specify at most one of value and labels.') 141 | if labels is None: 142 | labels = [] 143 | self._labelnames = labels 144 | if value is not None: 145 | self.add_metric([], value) 146 | 147 | def add_metric(self, labels, value): 148 | '''Add a metric to the metric family. 149 | 150 | Args: 151 | labels: A list of label values 152 | value: A float 153 | ''' 154 | self.samples.append((self.name, dict(zip(self._labelnames, labels)), value)) 155 | 156 | 157 | class SummaryMetricFamily(Metric): 158 | '''A single summary and its samples. 159 | 160 | For use by custom collectors. 161 | ''' 162 | def __init__(self, name, documentation, count_value=None, sum_value=None, labels=None): 163 | Metric.__init__(self, name, documentation, 'summary') 164 | if (sum_value is None) != (count_value is None): 165 | raise ValueError('count_value and sum_value must be provided together.') 166 | if labels is not None and count_value is not None: 167 | raise ValueError('Can only specify at most one of value and labels.') 168 | if labels is None: 169 | labels = [] 170 | self._labelnames = labels 171 | if count_value is not None: 172 | self.add_metric([], count_value, sum_value) 173 | 174 | def add_metric(self, labels, count_value, sum_value): 175 | '''Add a metric to the metric family. 176 | 177 | Args: 178 | labels: A list of label values 179 | count_value: The count value of the metric. 180 | sum_value: The sum value of the metric. 181 | ''' 182 | self.samples.append((self.name + '_count', dict(zip(self._labelnames, labels)), count_value)) 183 | self.samples.append((self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value)) 184 | 185 | 186 | class HistogramMetricFamily(Metric): 187 | '''A single histogram and its samples. 188 | 189 | For use by custom collectors. 190 | ''' 191 | def __init__(self, name, documentation, buckets=None, sum_value=None, labels=None): 192 | Metric.__init__(self, name, documentation, 'histogram') 193 | if (sum_value is None) != (buckets is None): 194 | raise ValueError('buckets and sum_value must be provided together.') 195 | if labels is not None and buckets is not None: 196 | raise ValueError('Can only specify at most one of buckets and labels.') 197 | if labels is None: 198 | labels = [] 199 | self._labelnames = labels 200 | if buckets is not None: 201 | self.add_metric([], buckets, sum_value) 202 | 203 | def add_metric(self, labels, buckets, sum_value): 204 | '''Add a metric to the metric family. 205 | 206 | Args: 207 | labels: A list of label values 208 | buckets: A list of pairs of bucket names and values. 209 | The buckets must be sorted, and +Inf present. 210 | sum_value: The sum value of the metric. 211 | ''' 212 | for bucket, value in buckets: 213 | self.samples.append((self.name + '_bucket', dict(list(zip(self._labelnames, labels)) + [('le', bucket)]), value)) 214 | # +Inf is last and provides the count value. 215 | self.samples.append((self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1])) 216 | self.samples.append((self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value)) 217 | 218 | 219 | class _MutexValue(object): 220 | '''A float protected by a mutex.''' 221 | 222 | def __init__(self, name, labelnames, labelvalues): 223 | self._value = 0.0 224 | self._lock = Lock() 225 | 226 | def inc(self, amount): 227 | with self._lock: 228 | self._value += amount 229 | 230 | def set(self, value): 231 | with self._lock: 232 | self._value = value 233 | 234 | def get(self): 235 | with self._lock: 236 | return self._value 237 | 238 | _ValueClass = _MutexValue 239 | 240 | 241 | class _LabelWrapper(object): 242 | '''Handles labels for the wrapped metric.''' 243 | def __init__(self, wrappedClass, name, labelnames, **kwargs): 244 | self._wrappedClass = wrappedClass 245 | self._type = wrappedClass._type 246 | self._name = name 247 | self._labelnames = labelnames 248 | self._kwargs = kwargs 249 | self._lock = Lock() 250 | self._metrics = {} 251 | 252 | for l in labelnames: 253 | if l.startswith('__'): 254 | raise ValueError('Invalid label metric name: ' + l) 255 | 256 | def labels(self, *labelvalues): 257 | '''Return the child for the given labelset. 258 | 259 | All metrics can have labels, allowing grouping of related time series. 260 | Taking a counter as an example: 261 | 262 | from prometheus_client import Counter 263 | 264 | c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) 265 | c.labels('get', '/').inc() 266 | c.labels('post', '/submit').inc() 267 | 268 | Labels can also be provided as a dict: 269 | 270 | from prometheus_client import Counter 271 | 272 | c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) 273 | c.labels({'method': 'get', 'endpoint': '/'}).inc() 274 | c.labels({'method': 'post', 'endpoint': '/submit'}).inc() 275 | 276 | See the best practices on [naming](http://prometheus.io/docs/practices/naming/) 277 | and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). 278 | ''' 279 | if len(labelvalues) == 1 and type(labelvalues[0]) == dict: 280 | if sorted(labelvalues[0].keys()) != sorted(self._labelnames): 281 | raise ValueError('Incorrect label names') 282 | labelvalues = tuple([unicode(labelvalues[0][l]) for l in self._labelnames]) 283 | else: 284 | if len(labelvalues) != len(self._labelnames): 285 | raise ValueError('Incorrect label count') 286 | labelvalues = tuple([unicode(l) for l in labelvalues]) 287 | with self._lock: 288 | if labelvalues not in self._metrics: 289 | self._metrics[labelvalues] = self._wrappedClass(self._name, self._labelnames, labelvalues, **self._kwargs) 290 | return self._metrics[labelvalues] 291 | 292 | def remove(self, *labelvalues): 293 | '''Remove the given labelset from the metric.''' 294 | if len(labelvalues) != len(self._labelnames): 295 | raise ValueError('Incorrect label count') 296 | labelvalues = tuple([unicode(l) for l in labelvalues]) 297 | with self._lock: 298 | del self._metrics[labelvalues] 299 | 300 | def _samples(self): 301 | with self._lock: 302 | metrics = self._metrics.copy() 303 | for labels, metric in metrics.items(): 304 | series_labels = list(dict(zip(self._labelnames, labels)).items()) 305 | for suffix, sample_labels, value in metric._samples(): 306 | yield (suffix, dict(series_labels + list(sample_labels.items())), value) 307 | 308 | 309 | def _MetricWrapper(cls): 310 | '''Provides common functionality for metrics.''' 311 | def init(name, documentation, labelnames=(), namespace='', subsystem='', registry=REGISTRY, **kwargs): 312 | full_name = '' 313 | if namespace: 314 | full_name += namespace + '_' 315 | if subsystem: 316 | full_name += subsystem + '_' 317 | full_name += name 318 | 319 | if labelnames: 320 | labelnames = tuple(labelnames) 321 | for l in labelnames: 322 | if not _METRIC_LABEL_NAME_RE.match(l): 323 | raise ValueError('Invalid label metric name: ' + l) 324 | if _RESERVED_METRIC_LABEL_NAME_RE.match(l): 325 | raise ValueError('Reserved label metric name: ' + l) 326 | if l in cls._reserved_labelnames: 327 | raise ValueError('Reserved label metric name: ' + l) 328 | collector = _LabelWrapper(cls, name, labelnames, **kwargs) 329 | else: 330 | collector = cls(name, labelnames, (), **kwargs) 331 | 332 | if not _METRIC_NAME_RE.match(full_name): 333 | raise ValueError('Invalid metric name: ' + full_name) 334 | 335 | def collect(): 336 | metric = Metric(full_name, documentation, cls._type) 337 | for suffix, labels, value in collector._samples(): 338 | metric.add_sample(full_name + suffix, labels, value) 339 | return [metric] 340 | collector.collect = collect 341 | 342 | if registry: 343 | registry.register(collector) 344 | return collector 345 | 346 | return init 347 | 348 | 349 | @_MetricWrapper 350 | class Counter(object): 351 | '''A Counter tracks counts of events or running totals. 352 | 353 | Example use cases for Counters: 354 | - Number of requests processed 355 | - Number of items that were inserted into a queue 356 | - Total amount of data that a system has processed 357 | 358 | Counters can only go up (and be reset when the process restarts). If your use case can go down, 359 | you should use a Gauge instead. 360 | 361 | An example for a Counter: 362 | 363 | from prometheus_client import Counter 364 | 365 | c = Counter('my_failures_total', 'Description of counter') 366 | c.inc() # Increment by 1 367 | c.inc(1.6) # Increment by given value 368 | 369 | There are utilities to count exceptions raised: 370 | 371 | @c.count_exceptions() 372 | def f(): 373 | pass 374 | 375 | with c.count_exceptions(): 376 | pass 377 | 378 | # Count only one type of exception 379 | with c.count_exceptions(ValueError): 380 | pass 381 | ''' 382 | _type = 'counter' 383 | _reserved_labelnames = [] 384 | 385 | def __init__(self, name, labelnames, labelvalues): 386 | self._value = _ValueClass(name, labelnames, labelvalues) 387 | 388 | def inc(self, amount=1): 389 | '''Increment counter by the given amount.''' 390 | if amount < 0: 391 | raise ValueError('Counters can only be incremented by non-negative amounts.') 392 | self._value.inc(amount) 393 | 394 | def count_exceptions(self, exception=Exception): 395 | '''Count exceptions in a block of code or function. 396 | 397 | Can be used as a function decorator or context manager. 398 | Increments the counter when an exception of the given 399 | type is raised up out of the code. 400 | ''' 401 | 402 | class ExceptionCounter(object): 403 | def __init__(self, counter): 404 | self._counter = counter 405 | 406 | def __enter__(self): 407 | pass 408 | 409 | def __exit__(self, typ, value, traceback): 410 | if isinstance(value, exception): 411 | self._counter.inc() 412 | 413 | def __call__(self, f): 414 | @wraps(f) 415 | def wrapped(*args, **kwargs): 416 | with self: 417 | return f(*args, **kwargs) 418 | return wrapped 419 | 420 | return ExceptionCounter(self) 421 | 422 | def _samples(self): 423 | return (('', {}, self._value.get()), ) 424 | 425 | 426 | @_MetricWrapper 427 | class Gauge(object): 428 | '''Gauge metric, to report instantaneous values. 429 | 430 | Examples of Gauges include: 431 | - Inprogress requests 432 | - Number of items in a queue 433 | - Free memory 434 | - Total memory 435 | - Temperature 436 | 437 | Gauges can go both up and down. 438 | 439 | from prometheus_client import Gauge 440 | 441 | g = Gauge('my_inprogress_requests', 'Description of gauge') 442 | g.inc() # Increment by 1 443 | g.dec(10) # Decrement by given value 444 | g.set(4.2) # Set to a given value 445 | 446 | There are utilities for common use cases: 447 | 448 | g.set_to_current_time() # Set to current unixtime 449 | 450 | # Increment when entered, decrement when exited. 451 | @g.track_inprogress() 452 | def f(): 453 | pass 454 | 455 | with g.track_inprogress(): 456 | pass 457 | 458 | A Gauge can also take its value from a callback: 459 | 460 | d = Gauge('data_objects', 'Number of objects') 461 | my_dict = {} 462 | d.set_function(lambda: len(my_dict)) 463 | ''' 464 | _type = 'gauge' 465 | _reserved_labelnames = [] 466 | 467 | def __init__(self, name, labelnames, labelvalues): 468 | self._value = _ValueClass(name, labelnames, labelvalues) 469 | 470 | def inc(self, amount=1): 471 | '''Increment gauge by the given amount.''' 472 | self._value.inc(amount) 473 | 474 | def dec(self, amount=1): 475 | '''Decrement gauge by the given amount.''' 476 | self._value.inc(-amount) 477 | 478 | def set(self, value): 479 | '''Set gauge to the given value.''' 480 | self._value.set(float(value)) 481 | 482 | def set_to_current_time(self): 483 | '''Set gauge to the current unixtime.''' 484 | self.set(time.time()) 485 | 486 | def track_inprogress(self): 487 | '''Track inprogress blocks of code or functions. 488 | 489 | Can be used as a function decorator or context manager. 490 | Increments the gauge when the code is entered, 491 | and decrements when it is exited. 492 | ''' 493 | 494 | class InprogressTracker(object): 495 | def __init__(self, gauge): 496 | self._gauge = gauge 497 | 498 | def __enter__(self): 499 | self._gauge.inc() 500 | 501 | def __exit__(self, typ, value, traceback): 502 | self._gauge.dec() 503 | 504 | def __call__(self, f): 505 | @wraps(f) 506 | def wrapped(*args, **kwargs): 507 | with self: 508 | return f(*args, **kwargs) 509 | return wrapped 510 | 511 | return InprogressTracker(self) 512 | 513 | def time(self): 514 | '''Time a block of code or function, and set the duration in seconds. 515 | 516 | Can be used as a function decorator or context manager. 517 | ''' 518 | 519 | class Timer(object): 520 | def __init__(self, gauge): 521 | self._gauge = gauge 522 | 523 | def __enter__(self): 524 | self._start = time.time() 525 | 526 | def __exit__(self, typ, value, traceback): 527 | # Time can go backwards. 528 | self._gauge.set(max(time.time() - self._start, 0)) 529 | 530 | def __call__(self, f): 531 | @wraps(f) 532 | def wrapped(*args, **kwargs): 533 | with self: 534 | return f(*args, **kwargs) 535 | return wrapped 536 | 537 | return Timer(self) 538 | 539 | def set_function(self, f): 540 | '''Call the provided function to return the Gauge value. 541 | 542 | The function must return a float, and may be called from 543 | multiple threads. All other methods of the Gauge become NOOPs. 544 | ''' 545 | def samples(self): 546 | return (('', {}, float(f())), ) 547 | self._samples = types.MethodType(samples, self) 548 | 549 | def _samples(self): 550 | return (('', {}, self._value.get()), ) 551 | 552 | 553 | @_MetricWrapper 554 | class Summary(object): 555 | '''A Summary tracks the size and number of events. 556 | 557 | Example use cases for Summaries: 558 | - Response latency 559 | - Request size 560 | 561 | Example for a Summary: 562 | 563 | from prometheus_client import Summary 564 | 565 | s = Summary('request_size_bytes', 'Request size (bytes)') 566 | s.observe(512) # Observe 512 (bytes) 567 | 568 | Example for a Summary using time: 569 | 570 | from prometheus_client import Summary 571 | 572 | REQUEST_TIME = Summary('response_latency_seconds', 'Response latency (seconds)') 573 | 574 | @REQUEST_TIME.time() 575 | def create_response(request): 576 | """A dummy function""" 577 | time.sleep(1) 578 | 579 | Example for using the same Summary object as a context manager: 580 | 581 | with REQUEST_TIME.time(): 582 | pass # Logic to be timed 583 | ''' 584 | _type = 'summary' 585 | _reserved_labelnames = ['quantile'] 586 | 587 | def __init__(self, name, labelnames, labelvalues): 588 | self._count = _ValueClass(name + '_count', labelnames, labelvalues) 589 | self._sum = _ValueClass(name + '_sum', labelnames, labelvalues) 590 | 591 | def observe(self, amount): 592 | '''Observe the given amount.''' 593 | self._count.inc(1) 594 | self._sum.inc(amount) 595 | 596 | def time(self): 597 | '''Time a block of code or function, and observe the duration in seconds. 598 | 599 | Can be used as a function decorator or context manager. 600 | ''' 601 | 602 | class Timer(object): 603 | def __init__(self, summary): 604 | self._summary = summary 605 | 606 | def __enter__(self): 607 | self._start = time.time() 608 | 609 | def __exit__(self, typ, value, traceback): 610 | # Time can go backwards. 611 | self._summary.observe(max(time.time() - self._start, 0)) 612 | 613 | def __call__(self, f): 614 | @wraps(f) 615 | def wrapped(*args, **kwargs): 616 | with self: 617 | return f(*args, **kwargs) 618 | return wrapped 619 | 620 | return Timer(self) 621 | 622 | def _samples(self): 623 | return ( 624 | ('_count', {}, self._count.get()), 625 | ('_sum', {}, self._sum.get())) 626 | 627 | 628 | def _floatToGoString(d): 629 | if d == _INF: 630 | return '+Inf' 631 | elif d == _MINUS_INF: 632 | return '-Inf' 633 | elif math.isnan(d): 634 | return 'NaN' 635 | else: 636 | return repr(float(d)) 637 | 638 | 639 | @_MetricWrapper 640 | class Histogram(object): 641 | '''A Histogram tracks the size and number of events in buckets. 642 | 643 | You can use Histograms for aggregatable calculation of quantiles. 644 | 645 | Example use cases: 646 | - Response latency 647 | - Request size 648 | 649 | Example for a Histogram: 650 | 651 | from prometheus_client import Histogram 652 | 653 | h = Histogram('request_size_bytes', 'Request size (bytes)') 654 | h.observe(512) # Observe 512 (bytes) 655 | 656 | Example for a Histogram using time: 657 | 658 | from prometheus_client import Histogram 659 | 660 | REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)') 661 | 662 | @REQUEST_TIME.time() 663 | def create_response(request): 664 | """A dummy function""" 665 | time.sleep(1) 666 | 667 | Example of using the same Histogram object as a context manager: 668 | 669 | with REQUEST_TIME.time(): 670 | pass # Logic to be timed 671 | 672 | The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds. 673 | They can be overridden by passing `buckets` keyword argument to `Histogram`. 674 | 675 | **NB** The Python client doesn't store or expose quantile information at this time. 676 | ''' 677 | _type = 'histogram' 678 | _reserved_labelnames = ['histogram'] 679 | 680 | def __init__(self, name, labelnames, labelvalues, buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, _INF)): 681 | self._sum = _ValueClass(name + '_sum', labelnames, labelvalues) 682 | buckets = [float(b) for b in buckets] 683 | if buckets != sorted(buckets): 684 | # This is probably an error on the part of the user, 685 | # so raise rather than sorting for them. 686 | raise ValueError('Buckets not in sorted order') 687 | if buckets and buckets[-1] != _INF: 688 | buckets.append(_INF) 689 | if len(buckets) < 2: 690 | raise ValueError('Must have at least two buckets') 691 | self._upper_bounds = buckets 692 | self._buckets = [] 693 | bucket_labelnames = labelnames + ('le',) 694 | for b in buckets: 695 | self._buckets.append(_ValueClass(name + '_bucket', bucket_labelnames, labelvalues + (_floatToGoString(b),))) 696 | 697 | def observe(self, amount): 698 | '''Observe the given amount.''' 699 | self._sum.inc(amount) 700 | for i, bound in enumerate(self._upper_bounds): 701 | if amount <= bound: 702 | self._buckets[i].inc(1) 703 | break 704 | 705 | def time(self): 706 | '''Time a block of code or function, and observe the duration in seconds. 707 | 708 | Can be used as a function decorator or context manager. 709 | ''' 710 | 711 | class Timer(object): 712 | def __init__(self, histogram): 713 | self._histogram = histogram 714 | 715 | def __enter__(self): 716 | self._start = time.time() 717 | 718 | def __exit__(self, typ, value, traceback): 719 | # Time can go backwards. 720 | self._histogram.observe(max(time.time() - self._start, 0)) 721 | 722 | def __call__(self, f): 723 | @wraps(f) 724 | def wrapped(*args, **kwargs): 725 | with self: 726 | return f(*args, **kwargs) 727 | return wrapped 728 | 729 | return Timer(self) 730 | 731 | def _samples(self): 732 | samples = [] 733 | acc = 0 734 | for i, bound in enumerate(self._upper_bounds): 735 | acc += self._buckets[i].get() 736 | samples.append(('_bucket', {'le': _floatToGoString(bound)}, acc)) 737 | samples.append(('_count', {}, acc)) 738 | samples.append(('_sum', {}, self._sum.get())) 739 | return tuple(samples) 740 | 741 | --------------------------------------------------------------------------------