├── tests ├── __init__.py ├── integration │ ├── __init__.py │ ├── protos │ │ ├── __init__.py │ │ └── hello_world.proto │ └── hello_world │ │ ├── __init__.py │ │ ├── hello_world_pb2.py │ │ ├── hello_world_client.py │ │ ├── hello_world_server.py │ │ └── hello_world_pb2_grpc.py ├── py_grpc_prometheus │ ├── __init__.py │ ├── utils.py │ ├── test_grpc_server_started.py │ ├── test_grpc_server_handled.py │ ├── test_grpc_server_msg_sent.py │ ├── test_grpc_server_msg_received.py │ ├── test_grpc_server_interceptor_exception.py │ ├── test_grpc_server_handling_seconds.py │ └── test_grpc_server_handled_latency_seconds.py └── conftest.py ├── py_grpc_prometheus ├── __init__.py ├── grpc_utils.py ├── server_metrics.py ├── client_metrics.py ├── prometheus_server_interceptor.py └── prometheus_client_interceptor.py ├── requirements.txt ├── test_requirements.txt ├── .travis.yaml ├── setup.py ├── .pre-commit-config.yaml ├── .github └── workflows │ └── test.yaml ├── Makefile ├── .gitignore ├── README.md ├── LICENSE └── .pylintrc /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py_grpc_prometheus/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/integration/protos/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/integration/hello_world/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Use the dependencies from the setup.py. 2 | -e . 3 | -------------------------------------------------------------------------------- /test_requirements.txt: -------------------------------------------------------------------------------- 1 | pylint 2 | pre-commit 3 | protobuf 4 | pytest 5 | requests 6 | coverage 7 | -------------------------------------------------------------------------------- /.travis.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: 4 | - "3.6" 5 | install: 6 | - "pip install -r requirements.txt" 7 | - "pip install pylint" 8 | script: 9 | - "make pre-commit run-test" 10 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/utils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from prometheus_client.parser import text_string_to_metric_families 3 | 4 | 5 | def get_server_metric(metric_name): 6 | metrics = list( 7 | text_string_to_metric_families( 8 | requests.get("http://localhost:50052/metrics", timeout=5).text 9 | ) 10 | ) 11 | target_metric = list(filter(lambda x: x.name == metric_name, metrics)) 12 | assert len(target_metric) == 1 13 | return target_metric[0] 14 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from setuptools import find_packages 3 | from setuptools import setup 4 | 5 | with open("README.md", "r") as fh: 6 | long_description = fh.read() 7 | 8 | setup(name="py_grpc_prometheus", 9 | version="0.8.0", 10 | description="Python gRPC Prometheus Interceptors", 11 | long_description=long_description, 12 | long_description_content_type="text/markdown", 13 | author="Lin Chen", 14 | author_email="linchen04@gmail.com", 15 | install_requires=[ 16 | "setuptools>=39.0.1", 17 | "grpcio>=1.10.0", 18 | "prometheus_client>=0.3.0" 19 | ], 20 | url="https://github.com/lchenn/py-grpc-prometheus", 21 | packages=find_packages(exclude=["tests.*", "tests"]), 22 | ) 23 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/adrienverge/yamllint.git 4 | rev: v1.19.0 5 | hooks: 6 | - id: yamllint 7 | - repo: https://github.com/pre-commit/pre-commit-hooks 8 | rev: v2.4.0 9 | hooks: 10 | - id: check-case-conflict 11 | - id: check-merge-conflict 12 | - id: check-symlinks 13 | - id: end-of-file-fixer 14 | - id: trailing-whitespace 15 | - id: no-commit-to-branch 16 | args: [--branch=master] 17 | - id: check-executables-have-shebangs 18 | - id: check-json 19 | - id: check-xml 20 | - repo: https://github.com/pre-commit/mirrors-pylint 21 | rev: 'v2.4.4' 22 | hooks: 23 | - id: pylint 24 | args: [--rcfile=.pylintrc, --max-line-length=100] 25 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | jobs: 4 | test: 5 | runs-on: ubuntu-latest 6 | strategy: 7 | matrix: 8 | python-version: 9 | - '3.7' 10 | - '3.8' 11 | - '3.9' 12 | - '3.10' 13 | - '3.11' 14 | - '3.12' 15 | name: Test in python ${{ matrix.python-version }} 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Setup python 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | architecture: x64 23 | - run: |- 24 | pip install -r requirements.txt 25 | pip install -r test_requirements.txt 26 | - run: |- 27 | coverage run --source=py_grpc_prometheus -m pytest 28 | coverage report -m 29 | -------------------------------------------------------------------------------- /tests/integration/protos/hello_world.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | // The greeting service definition. 4 | service Greeter { 5 | // Sends a greeting. 6 | rpc SayHello (HelloRequest) returns (HelloReply) {} 7 | 8 | // Sends one greeting, get multiple response. 9 | rpc SayHelloUnaryStream (MultipleHelloResRequest) returns (stream HelloReply) {} 10 | 11 | // Send multiple greetings, get one response. 12 | rpc SayHelloStreamUnary (stream HelloRequest) returns (HelloReply) {} 13 | 14 | // Send multiple greetings, get multiple response. 15 | rpc SayHelloBidiStream(stream MultipleHelloResRequest) returns (stream HelloReply) {} 16 | } 17 | 18 | // The request message containing the user's name. 19 | message HelloRequest { 20 | string name = 1; 21 | } 22 | 23 | message MultipleHelloResRequest { 24 | string name = 1; 25 | int32 res = 2; 26 | } 27 | 28 | // The response message containing the greetings 29 | message HelloReply { 30 | string message = 1; 31 | } 32 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: initialize-development 2 | 3 | # Initialize the project development environment. 4 | initialize-development: 5 | @pip install --upgrade -r requirements.txt 6 | @pip install -U -r test_requirements.txt 7 | @pre-commit install 8 | 9 | .PHONY: test 10 | test: 11 | @coverage run --source=py_grpc_prometheus -m pytest 12 | @coverage report -m 13 | 14 | # Run pre-commit for all 15 | pre-commit: 16 | @pre-commit run --all-files 17 | 18 | run-test: 19 | @python -m unittest discover 20 | 21 | # Fix the import path. Use pipe for sed to avoid the difference between Mac and GNU sed 22 | compile-protos: 23 | @docker run --rm -v $(PWD):$(PWD) -w $(PWD) znly/protoc \ 24 | --python_out=tests/integration//hello_world \ 25 | -I tests/integration/protos \ 26 | tests/integration/protos/*.proto 27 | @docker run --rm -v $(PWD):$(PWD) -w $(PWD) znly/protoc \ 28 | --plugin=protoc-gen-grpc=/usr/bin/grpc_python_plugin \ 29 | --python_out=tests/integration//hello_world \ 30 | --grpc_out=tests/integration//hello_world \ 31 | -I tests/integration/protos \ 32 | tests/integration/protos/*.proto 33 | 34 | run-test-server: 35 | python -m tests.integration.hello_world.hello_world_server 36 | 37 | run-test-client: 38 | python -m tests.integration.hello_world.hello_world_client 39 | 40 | publish: 41 | # Markdown checker 42 | # pip install cmarkgfm 43 | rm -rf *.egg-info build dist 44 | python setup.py sdist bdist_wheel 45 | twine check dist/* 46 | twine upload dist/* 47 | -------------------------------------------------------------------------------- /py_grpc_prometheus/grpc_utils.py: -------------------------------------------------------------------------------- 1 | UNARY = "UNARY" 2 | SERVER_STREAMING = "SERVER_STREAMING" 3 | CLIENT_STREAMING = "CLIENT_STREAMING" 4 | BIDI_STREAMING = "BIDI_STREAMING" 5 | UNKNOWN = "UNKNOWN" 6 | 7 | 8 | def wrap_iterator_inc_counter(iterator, counter, grpc_type, grpc_service_name, grpc_method_name): 9 | """Wraps an iterator and collect metrics.""" 10 | 11 | for item in iterator: 12 | counter.labels( 13 | grpc_type=grpc_type, 14 | grpc_service=grpc_service_name, 15 | grpc_method=grpc_method_name).inc() 16 | yield item 17 | 18 | 19 | def get_method_type(request_streaming, response_streaming): 20 | """ 21 | Infers the method type from if the request or the response is streaming. 22 | 23 | # The Method type is coming from: 24 | # https://grpc.io/grpc-java/javadoc/io/grpc/MethodDescriptor.MethodType.html 25 | """ 26 | if request_streaming and response_streaming: 27 | return BIDI_STREAMING 28 | elif request_streaming and not response_streaming: 29 | return CLIENT_STREAMING 30 | elif not request_streaming and response_streaming: 31 | return SERVER_STREAMING 32 | return UNARY 33 | 34 | 35 | def split_method_call(handler_call_details): 36 | """ 37 | Infers the grpc service and method name from the handler_call_details. 38 | """ 39 | 40 | # e.g. /package.ServiceName/MethodName 41 | parts = handler_call_details.method.split("/") 42 | if len(parts) < 3: 43 | return "", "", False 44 | 45 | grpc_service_name, grpc_method_name = parts[1:3] 46 | return grpc_service_name, grpc_method_name, True 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | .idea 107 | *.iml 108 | -------------------------------------------------------------------------------- /tests/integration/hello_world/hello_world_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: hello_world.proto 4 | # Protobuf Python Version: 4.25.2 5 | """Generated protocol buffer code.""" 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | from google.protobuf.internal import builder as _builder 10 | # @@protoc_insertion_point(imports) 11 | 12 | _sym_db = _symbol_database.Default() 13 | 14 | 15 | 16 | 17 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x11hello_world.proto\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"4\n\x17MultipleHelloResRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03res\x18\x02 \x01(\x05\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2\xef\x01\n\x07Greeter\x12(\n\x08SayHello\x12\r.HelloRequest\x1a\x0b.HelloReply\"\x00\x12@\n\x13SayHelloUnaryStream\x12\x18.MultipleHelloResRequest\x1a\x0b.HelloReply\"\x00\x30\x01\x12\x35\n\x13SayHelloStreamUnary\x12\r.HelloRequest\x1a\x0b.HelloReply\"\x00(\x01\x12\x41\n\x12SayHelloBidiStream\x12\x18.MultipleHelloResRequest\x1a\x0b.HelloReply\"\x00(\x01\x30\x01\x62\x06proto3') 18 | 19 | _globals = globals() 20 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) 21 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'hello_world_pb2', _globals) 22 | if _descriptor._USE_C_DESCRIPTORS == False: 23 | DESCRIPTOR._options = None 24 | _globals['_HELLOREQUEST']._serialized_start=21 25 | _globals['_HELLOREQUEST']._serialized_end=49 26 | _globals['_MULTIPLEHELLORESREQUEST']._serialized_start=51 27 | _globals['_MULTIPLEHELLORESREQUEST']._serialized_end=103 28 | _globals['_HELLOREPLY']._serialized_start=105 29 | _globals['_HELLOREPLY']._serialized_end=134 30 | _globals['_GREETER']._serialized_start=137 31 | _globals['_GREETER']._serialized_end=376 32 | # @@protoc_insertion_point(module_scope) 33 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/test_grpc_server_started.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from tests.py_grpc_prometheus.utils import get_server_metric 4 | from tests.integration.hello_world import hello_world_pb2 5 | 6 | 7 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 8 | def test_grpc_server_started_with_normal( 9 | target_count, grpc_server, grpc_stub 10 | ): # pylint: disable=unused-argument 11 | for i in range(target_count): 12 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name=str(i))) 13 | target_metric = get_server_metric("grpc_server_started") 14 | assert target_metric.samples[0].value == target_count 15 | 16 | 17 | @pytest.mark.parametrize("number_of_res", [1, 10, 100]) 18 | def test_grpc_server_started_with_unary_stream( 19 | number_of_res, grpc_server, grpc_stub 20 | ): # pylint: disable=unused-argument 21 | list( 22 | grpc_stub.SayHelloUnaryStream( 23 | hello_world_pb2.MultipleHelloResRequest( 24 | name="unary stream", res=number_of_res 25 | ) 26 | ) 27 | ) 28 | target_metric = get_server_metric("grpc_server_started") 29 | # Only one request sent 30 | assert target_metric.samples[0].value == 1 31 | 32 | 33 | @pytest.mark.parametrize("number_of_names", [1, 10, 100]) 34 | def test_grpc_server_started_with_stream_unary( 35 | number_of_names, grpc_server, grpc_stub, stream_request_generator 36 | ): # pylint: disable=unused-argument 37 | grpc_stub.SayHelloStreamUnary(stream_request_generator(number_of_names)) 38 | target_metric = get_server_metric("grpc_server_started") 39 | assert target_metric.samples == [] 40 | 41 | 42 | @pytest.mark.parametrize( 43 | "number_of_names, number_of_res", [(1, 10), (10, 100), (100, 100)] 44 | ) 45 | def test_grpc_server_started_with_bidi_stream( 46 | number_of_names, number_of_res, grpc_server, grpc_stub, bidi_request_generator 47 | ): # pylint: disable=unused-argument 48 | list( 49 | grpc_stub.SayHelloBidiStream( 50 | bidi_request_generator(number_of_names, number_of_res) 51 | ) 52 | ) 53 | target_metric = get_server_metric("grpc_server_started") 54 | assert target_metric.samples == [] 55 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/test_grpc_server_handled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from tests.py_grpc_prometheus.utils import get_server_metric 4 | from tests.integration.hello_world import hello_world_pb2 5 | 6 | 7 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 8 | def test_grpc_server_handled_with_normal( 9 | target_count, grpc_server, grpc_stub 10 | ): # pylint: disable=unused-argument 11 | for i in range(target_count): 12 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name=str(i))) 13 | target_metric = get_server_metric("grpc_server_handled") 14 | assert target_metric.samples[0].value == target_count 15 | 16 | 17 | @pytest.mark.parametrize("number_of_res", [1, 10, 100]) 18 | def test_grpc_server_handled_with_unary_stream( 19 | number_of_res, grpc_server, grpc_stub 20 | ): # pylint: disable=unused-argument 21 | list( 22 | grpc_stub.SayHelloUnaryStream( 23 | hello_world_pb2.MultipleHelloResRequest( 24 | name="unary stream", res=number_of_res 25 | ) 26 | ) 27 | ) 28 | target_metric = get_server_metric("grpc_server_handled") 29 | # No grpc_server_handled for streaming response 30 | assert target_metric.samples == [] 31 | 32 | 33 | @pytest.mark.parametrize("number_of_names", [1, 10, 100]) 34 | def test_grpc_server_handled_with_stream_unary( 35 | number_of_names, grpc_server, grpc_stub, stream_request_generator 36 | ): # pylint: disable=unused-argument 37 | grpc_stub.SayHelloStreamUnary( 38 | stream_request_generator(number_of_names) 39 | ) 40 | target_metric = get_server_metric("grpc_server_handled") 41 | assert target_metric.samples[0].value == 1 42 | 43 | 44 | @pytest.mark.parametrize( 45 | "number_of_names, number_of_res", [(1, 10), (10, 100), (100, 100)] 46 | ) 47 | def test_grpc_server_handled_with_bidi_stream( 48 | number_of_names, number_of_res, grpc_server, grpc_stub, bidi_request_generator 49 | ): # pylint: disable=unused-argument 50 | list( 51 | grpc_stub.SayHelloBidiStream( 52 | bidi_request_generator(number_of_names, number_of_res) 53 | ) 54 | ) 55 | target_metric = get_server_metric("grpc_server_handled") 56 | assert target_metric.samples == [] 57 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/test_grpc_server_msg_sent.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from tests.py_grpc_prometheus.utils import get_server_metric 4 | from tests.integration.hello_world import hello_world_pb2 5 | 6 | 7 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 8 | def test_grpc_server_msg_sent_with_normal( 9 | target_count, grpc_server, grpc_stub 10 | ): # pylint: disable=unused-argument 11 | for i in range(target_count): 12 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name=str(i))) 13 | target_metric = get_server_metric("grpc_server_msg_sent") 14 | # None streaming request has no this metrics 15 | assert target_metric.samples == [] 16 | 17 | 18 | @pytest.mark.parametrize("number_of_res", [1, 10, 100]) 19 | def test_grpc_server_msg_sent_with_unary_stream( 20 | number_of_res, grpc_server, grpc_stub 21 | ): # pylint: disable=unused-argument 22 | list( 23 | grpc_stub.SayHelloUnaryStream( 24 | hello_world_pb2.MultipleHelloResRequest( 25 | name="unary stream", res=number_of_res 26 | ) 27 | ) 28 | ) 29 | target_metric = get_server_metric("grpc_server_msg_sent") 30 | assert target_metric.samples[0].value == number_of_res 31 | 32 | 33 | @pytest.mark.parametrize("number_of_names", [1, 10, 100]) 34 | def test_grpc_server_msg_sent_with_stream_unary( 35 | number_of_names, grpc_server, grpc_stub, stream_request_generator 36 | ): # pylint: disable=unused-argument 37 | grpc_stub.SayHelloStreamUnary(stream_request_generator(number_of_names)) 38 | target_metric = get_server_metric("grpc_server_msg_sent") 39 | assert target_metric.samples == [] 40 | 41 | 42 | @pytest.mark.parametrize( 43 | "number_of_names, number_of_res", [(1, 10), (10, 100), (100, 100)] 44 | ) 45 | def test_grpc_server_msg_sent_with_bidi_stream( 46 | number_of_names, number_of_res, grpc_server, grpc_stub, bidi_request_generator 47 | ): # pylint: disable=unused-argument 48 | list( 49 | grpc_stub.SayHelloBidiStream( 50 | bidi_request_generator(number_of_names, number_of_res) 51 | ) 52 | ) 53 | target_metric = get_server_metric("grpc_server_msg_sent") 54 | assert target_metric.samples[0].value == number_of_names 55 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/test_grpc_server_msg_received.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from tests.py_grpc_prometheus.utils import get_server_metric 4 | from tests.integration.hello_world import hello_world_pb2 5 | 6 | 7 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 8 | def test_grpc_server_msg_received_with_normal( 9 | target_count, grpc_server, grpc_stub 10 | ): # pylint: disable=unused-argument 11 | for i in range(target_count): 12 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name=str(i))) 13 | target_metric = get_server_metric("grpc_server_msg_received") 14 | # None streaming request has no this metrics 15 | assert target_metric.samples == [] 16 | 17 | 18 | @pytest.mark.parametrize("number_of_res", [1, 10, 100]) 19 | def test_grpc_server_msg_received_with_unary_stream( 20 | number_of_res, grpc_server, grpc_stub 21 | ): # pylint: disable=unused-argument 22 | list( 23 | grpc_stub.SayHelloUnaryStream( 24 | hello_world_pb2.MultipleHelloResRequest( 25 | name="unary stream", res=number_of_res 26 | ) 27 | ) 28 | ) 29 | target_metric = get_server_metric("grpc_server_msg_received") 30 | assert target_metric.samples == [] 31 | 32 | 33 | @pytest.mark.parametrize("number_of_names", [1, 10, 100]) 34 | def test_grpc_server_msg_received_with_stream_unary( 35 | number_of_names, grpc_server, grpc_stub, stream_request_generator 36 | ): # pylint: disable=unused-argument 37 | grpc_stub.SayHelloStreamUnary(stream_request_generator(number_of_names)) 38 | target_metric = get_server_metric("grpc_server_msg_received") 39 | assert target_metric.samples[0].value == number_of_names 40 | 41 | 42 | @pytest.mark.parametrize( 43 | "number_of_names, number_of_res", [(1, 10), (10, 100), (100, 100)] 44 | ) 45 | def test_grpc_server_msg_received_with_bidi_stream( 46 | number_of_names, number_of_res, grpc_server, grpc_stub, bidi_request_generator 47 | ): # pylint: disable=unused-argument 48 | list( 49 | grpc_stub.SayHelloBidiStream( 50 | bidi_request_generator(number_of_names, number_of_res) 51 | ) 52 | ) 53 | target_metric = get_server_metric("grpc_server_msg_received") 54 | assert target_metric.samples[0].value == number_of_names 55 | -------------------------------------------------------------------------------- /py_grpc_prometheus/server_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import Counter 2 | from prometheus_client import Histogram 3 | 4 | def init_metrics(registry): 5 | return { 6 | "grpc_server_started_counter": Counter( 7 | "grpc_server_started_total", 8 | "Total number of RPCs started on the server.", 9 | ["grpc_type", "grpc_service", "grpc_method"], 10 | registry=registry 11 | ), 12 | "grpc_server_stream_msg_received": Counter( 13 | "grpc_server_msg_received_total", 14 | "Total number of RPC stream messages received on the server.", 15 | ["grpc_type", "grpc_service", "grpc_method"], 16 | registry=registry 17 | ), 18 | "grpc_server_stream_msg_sent": Counter( 19 | "grpc_server_msg_sent_total", 20 | "Total number of gRPC stream messages sent by the server.", 21 | ["grpc_type", "grpc_service", "grpc_method"], 22 | registry=registry 23 | ), 24 | "grpc_server_handled_histogram": Histogram( 25 | "grpc_server_handling_seconds", 26 | "Histogram of response latency (seconds) of gRPC that had been application-level " 27 | "handled by the server.", 28 | ["grpc_type", "grpc_service", "grpc_method"], 29 | registry=registry 30 | ), 31 | "legacy_grpc_server_handled_latency_seconds": Histogram( 32 | "grpc_server_handled_latency_seconds", 33 | "Histogram of response latency (seconds) of gRPC that had been " 34 | "application-level handled by the server", 35 | ["grpc_type", "grpc_service", "grpc_method"], 36 | registry=registry 37 | ) 38 | } 39 | 40 | 41 | # Legacy metrics for backward compatibility 42 | def get_grpc_server_handled_counter(is_legacy, registry): 43 | if is_legacy: 44 | return Counter( 45 | "grpc_server_handled_total", 46 | "Total number of RPCs completed on the server, regardless of success or failure.", 47 | ["grpc_type", "grpc_service", "grpc_method", "code"], 48 | registry=registry 49 | ) 50 | return Counter( 51 | "grpc_server_handled_total", 52 | "Total number of RPCs completed on the server, regardless of success or failure.", 53 | ["grpc_type", "grpc_service", "grpc_method", "grpc_code"], 54 | registry=registry 55 | ) 56 | -------------------------------------------------------------------------------- /tests/integration/hello_world/hello_world_client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import time 4 | 5 | import grpc 6 | from prometheus_client import start_http_server 7 | 8 | import tests.integration.hello_world.hello_world_pb2 as hello_world_pb2 9 | import tests.integration.hello_world.hello_world_pb2_grpc as hello_world_grpc 10 | from py_grpc_prometheus.prometheus_client_interceptor import PromClientInterceptor 11 | 12 | _ONE_DAY_IN_SECONDS = 60 * 60 * 24 13 | _LOGGER = logging.getLogger(__name__) 14 | 15 | def call_server(): 16 | channel = grpc.intercept_channel(grpc.insecure_channel("localhost:50051"), 17 | PromClientInterceptor()) 18 | stub = hello_world_grpc.GreeterStub(channel) 19 | 20 | # Call the unary-unary. 21 | for _ in range(5): 22 | try: 23 | response = stub.SayHello(hello_world_pb2.HelloRequest(name="Unary")) 24 | _LOGGER.info("Unary response: %s", response.message) 25 | _LOGGER.info("") 26 | except grpc.RpcError: 27 | _LOGGER.error("Got an exception from server") 28 | 29 | # Call the unary stream. 30 | _LOGGER.info("Running Unary Stream client") 31 | response_iter = stub.SayHelloUnaryStream(hello_world_pb2.HelloRequest(name="unary stream")) 32 | _LOGGER.info("Response for Unary Stream") 33 | for response in response_iter: 34 | _LOGGER.info("Unary Stream response item: %s", response.message) 35 | _LOGGER.info("") 36 | 37 | # Call the stream_unary. 38 | try: 39 | _LOGGER.info("Running Stream Unary client") 40 | response = stub.SayHelloStreamUnary(generate_requests("Stream Unary")) 41 | _LOGGER.info("Stream Unary response: %s", response.message) 42 | _LOGGER.info("") 43 | except grpc.RpcError: 44 | _LOGGER.error("Got an exception from server") 45 | 46 | # Call stream & stream. 47 | _LOGGER.info("Running Bidi Stream client") 48 | response_iter = stub.SayHelloBidiStream(generate_requests("Bidi Stream")) 49 | for response in response_iter: 50 | _LOGGER.info("Bidi Stream response item: %s", response.message) 51 | _LOGGER.info("") 52 | 53 | 54 | def generate_requests(name): 55 | for i in range(10): 56 | yield hello_world_pb2.HelloRequest(name="%s %s" % (name, i)) 57 | 58 | 59 | def run(): 60 | logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s") 61 | _LOGGER.info("Starting py-grpc-promtheus hello word server") 62 | call_server() 63 | start_http_server(50053) 64 | _LOGGER.info("Started py-grpc-promtheus client, metrics is located at http://localhost:50053") 65 | try: 66 | while True: 67 | time.sleep(_ONE_DAY_IN_SECONDS) 68 | except KeyboardInterrupt: 69 | sys.exit() 70 | 71 | 72 | if __name__ == "__main__": 73 | run() 74 | -------------------------------------------------------------------------------- /tests/integration/hello_world/hello_world_server.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from concurrent import futures 4 | 5 | import grpc 6 | from prometheus_client import start_http_server 7 | 8 | import tests.integration.hello_world.hello_world_pb2 as hello_world_pb2 9 | import tests.integration.hello_world.hello_world_pb2_grpc as hello_world_grpc 10 | from py_grpc_prometheus.prometheus_server_interceptor import PromServerInterceptor 11 | 12 | _ONE_DAY_IN_SECONDS = 60 * 60 * 24 13 | _LOGGER = logging.getLogger(__name__) 14 | 15 | class Greeter(hello_world_grpc.GreeterServicer): 16 | 17 | def SayHello(self, request, context): 18 | if request.name == "invalid": 19 | context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'Consarnit!') 20 | if request.name == "rpcError": 21 | context.set_code(grpc.StatusCode.INVALID_ARGUMENT) 22 | context.set_details('Consarnit!') 23 | raise grpc.RpcError() 24 | if request.name == "unknownError": 25 | raise Exception(request.name) 26 | return hello_world_pb2.HelloReply(message="Hello, %s!" % request.name) 27 | 28 | def SayHelloUnaryStream(self, request, context): 29 | if request.name == "invalid": 30 | context.set_code(grpc.StatusCode.INVALID_ARGUMENT) 31 | context.set_details('Consarnit!') 32 | return 33 | for i in range(request.res): 34 | yield hello_world_pb2.HelloReply( 35 | message="Hello, %s %s!" % (request.name, i) 36 | ) 37 | return 38 | 39 | def SayHelloStreamUnary(self, request_iterator, context): 40 | names = "" 41 | for request in request_iterator: 42 | names += request.name + " " 43 | return hello_world_pb2.HelloReply(message="Hello, %s!" % names) 44 | 45 | def SayHelloBidiStream(self, request_iterator, context): 46 | for request in request_iterator: 47 | yield hello_world_pb2.HelloReply(message="Hello, %s!" % request.name) 48 | 49 | 50 | def serve(): 51 | logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s") 52 | _LOGGER.info("Starting py-grpc-promtheus hello word server") 53 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), 54 | interceptors=( 55 | PromServerInterceptor( 56 | enable_handling_time_histogram=True, 57 | skip_exceptions=True 58 | ), 59 | )) 60 | hello_world_grpc.add_GreeterServicer_to_server(Greeter(), server) 61 | server.add_insecure_port("[::]:50051") 62 | server.start() 63 | start_http_server(50052) 64 | 65 | _LOGGER.info("Started py-grpc-promtheus hello word server, grpc at localhost:50051, " 66 | "metrics at http://localhost:50052") 67 | try: 68 | while True: 69 | time.sleep(_ONE_DAY_IN_SECONDS) 70 | except KeyboardInterrupt: 71 | server.stop(0) 72 | 73 | 74 | if __name__ == "__main__": 75 | serve() 76 | -------------------------------------------------------------------------------- /py_grpc_prometheus/client_metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import Counter 2 | from prometheus_client import Histogram 3 | 4 | def init_metrics(registry): 5 | return { 6 | "grpc_client_started_counter": Counter( 7 | "grpc_client_started_total", 8 | "Total number of RPCs started on the client", 9 | ["grpc_type", "grpc_service", "grpc_method"], 10 | registry=registry 11 | ), 12 | 13 | "grpc_client_handled_counter": Counter( 14 | "grpc_client_handled_total", 15 | "Total number of RPCs completed on the client, " 16 | "regardless of success or failure.", 17 | ["grpc_type", "grpc_service", "grpc_method", "grpc_code"], 18 | registry=registry 19 | ), 20 | 21 | "grpc_client_stream_msg_received": Counter( 22 | "grpc_client_msg_received_total", 23 | "Total number of RPC stream messages received by the client.", 24 | ["grpc_type", "grpc_service", "grpc_method"], 25 | registry=registry 26 | ), 27 | 28 | "grpc_client_stream_msg_sent": Counter( 29 | "grpc_client_msg_sent_total", 30 | "Total number of gRPC stream messages sent by the client.", 31 | ["grpc_type", "grpc_service", "grpc_method"], 32 | registry=registry 33 | ), 34 | 35 | "grpc_client_handled_histogram": Histogram( 36 | "grpc_client_handling_seconds", 37 | "Histogram of response latency (seconds) of the gRPC until" \ 38 | "it is finished by the application.", 39 | ["grpc_type", "grpc_service", "grpc_method"], 40 | registry=registry 41 | ), 42 | 43 | "grpc_client_stream_recv_histogram": Histogram( 44 | "grpc_client_msg_recv_handling_seconds", 45 | "Histogram of response latency (seconds) of the gRPC single message receive.", 46 | ["grpc_type", "grpc_service", "grpc_method"], 47 | registry=registry 48 | ), 49 | 50 | "grpc_client_stream_send_histogram": Histogram( 51 | "grpc_client_msg_send_handling_seconds", 52 | "Histogram of response latency (seconds) of the gRPC single message send.", 53 | ["grpc_type", "grpc_service", "grpc_method"], 54 | registry=registry 55 | ), 56 | 57 | # Legacy metrics for backwards compatibility 58 | 59 | "legacy_grpc_client_completed_counter": Counter( 60 | "grpc_client_completed", 61 | "Total number of RPCs completed on the client, " 62 | "regardless of success or failure.", 63 | ["grpc_type", "grpc_service", "grpc_method", "code"], 64 | registry=registry 65 | ), 66 | 67 | "legacy_grpc_client_completed_latency_seconds_histogram": Histogram( 68 | "grpc_client_completed_latency_seconds", 69 | "Histogram of rpc response latency (in seconds) for completed rpcs.", 70 | ["grpc_type", "grpc_service", "grpc_method"], 71 | registry=registry 72 | ), 73 | } 74 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/test_grpc_server_interceptor_exception.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import patch 2 | 3 | import pytest 4 | import grpc 5 | 6 | from tests.py_grpc_prometheus.utils import get_server_metric 7 | from tests.integration.hello_world import hello_world_pb2 8 | 9 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 10 | def test_grpc_server_handled_with_server_error( 11 | target_count, grpc_server, grpc_stub 12 | ): # pylint: disable=unused-argument 13 | for _ in range(target_count): 14 | with pytest.raises(Exception): 15 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name="unknownError")) 16 | 17 | target_metric = get_server_metric("grpc_server_handled") 18 | print(target_metric.samples[0].labels["grpc_code"]) 19 | assert target_metric.samples[0].value == target_count 20 | assert target_metric.samples[0].labels["grpc_code"] == "UNKNOWN" 21 | 22 | 23 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 24 | def test_grpc_server_handled_with_rpc_error( 25 | target_count, grpc_server, grpc_stub 26 | ): # pylint: disable=unused-argument 27 | for _ in range(target_count): 28 | with pytest.raises(grpc.RpcError): 29 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name="rpcError")) 30 | 31 | target_metric = get_server_metric("grpc_server_handled") 32 | assert target_metric.samples[0].value == target_count 33 | assert target_metric.samples[0].labels["grpc_code"] == "INVALID_ARGUMENT" 34 | 35 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 36 | def test_grpc_server_handled_with_interceptor_error( 37 | target_count, grpc_server, grpc_stub 38 | ): # pylint: disable=unused-argument 39 | for _ in range(target_count): 40 | with patch( 41 | 'py_grpc_prometheus.prometheus_server_interceptor.'\ 42 | 'PromServerInterceptor._compute_status_code', 43 | side_effect=Exception('mocked error') 44 | ): 45 | with pytest.raises(Exception): 46 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name="unary")) 47 | 48 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 49 | def test_grpc_server_handled_with_server_error_and_skip_exceptions( 50 | target_count, grpc_server_with_exception_handling, grpc_stub 51 | ): # pylint: disable=unused-argument 52 | for _ in range(target_count): 53 | with pytest.raises(Exception): 54 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name="unknownError")) 55 | 56 | target_metric = get_server_metric("grpc_server_handled") 57 | assert target_metric.samples == [] 58 | 59 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 60 | def test_grpc_server_handled_with_interceptor_error_and_skip_exceptions( 61 | target_count, grpc_server_with_exception_handling, grpc_stub 62 | ): # pylint: disable=unused-argument 63 | for _ in range(target_count): 64 | with patch( 65 | 'py_grpc_prometheus.prometheus_server_interceptor.'\ 66 | 'PromServerInterceptor._compute_status_code', 67 | side_effect=Exception('mocked error') 68 | ): 69 | assert grpc_stub.SayHello( 70 | hello_world_pb2.HelloRequest(name="unary") 71 | ).message == "Hello, unary!" 72 | 73 | target_metric = get_server_metric("grpc_server_handled") 74 | assert target_metric.samples == [] 75 | 76 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 77 | def test_grpc_server_handled_before_request_error( 78 | target_count, grpc_server, grpc_stub 79 | ): # pylint: disable=unused-argument 80 | for _ in range(target_count): 81 | with patch( 82 | 'py_grpc_prometheus.grpc_utils.wrap_iterator_inc_counter', 83 | side_effect=Exception('mocked error') 84 | ): 85 | assert grpc_stub.SayHello( 86 | hello_world_pb2.HelloRequest(name="unary") 87 | ).message == "Hello, unary!" 88 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/test_grpc_server_handling_seconds.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | 3 | import pytest 4 | 5 | from tests.py_grpc_prometheus.utils import get_server_metric 6 | from tests.integration.hello_world import hello_world_pb2 7 | 8 | 9 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 10 | def test_grpc_server_handling_seconds_with_normal( 11 | target_count, grpc_server, grpc_stub 12 | ): # pylint: disable=unused-argument 13 | for i in range(target_count): 14 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name=str(i))) 15 | target_metric = get_server_metric("grpc_server_handling_seconds") 16 | assert reduce( 17 | lambda acc, x: acc if acc > x.value else x.value, 18 | list( 19 | filter( 20 | lambda x: x.name == "grpc_server_handling_seconds_bucket", 21 | target_metric.samples 22 | ) 23 | ), 24 | 0 25 | ) == target_count 26 | assert reduce( 27 | lambda acc, x: acc if acc > x.value else x.value, 28 | list( 29 | filter( 30 | lambda x: x.name == "grpc_server_handling_seconds_count", 31 | target_metric.samples 32 | ) 33 | ), 34 | 0 35 | ) == target_count 36 | assert reduce( 37 | lambda acc, x: acc if acc > x.value else x.value, 38 | list( 39 | filter( 40 | lambda x: x.name == "grpc_server_handling_seconds_sum", 41 | target_metric.samples 42 | ) 43 | ), 44 | 0 45 | ) > 0 46 | 47 | 48 | @pytest.mark.parametrize("number_of_res", [1, 10, 100]) 49 | def test_grpc_server_handling_seconds_with_unary_stream( 50 | number_of_res, grpc_server, grpc_stub 51 | ): # pylint: disable=unused-argument 52 | list( 53 | grpc_stub.SayHelloUnaryStream( 54 | hello_world_pb2.MultipleHelloResRequest( 55 | name="unary stream", res=number_of_res 56 | ) 57 | ) 58 | ) 59 | target_metric = get_server_metric("grpc_server_handling_seconds") 60 | # Only one request sent 61 | assert target_metric.samples == [] 62 | 63 | 64 | @pytest.mark.parametrize("number_of_names", [1, 10, 100]) 65 | def test_grpc_server_handling_seconds_with_stream_unary( 66 | number_of_names, grpc_server, grpc_stub, stream_request_generator 67 | ): # pylint: disable=unused-argument 68 | grpc_stub.SayHelloStreamUnary(stream_request_generator(number_of_names)) 69 | target_metric = get_server_metric("grpc_server_handling_seconds") 70 | assert reduce( 71 | lambda acc, x: acc if acc > x.value else x.value, 72 | list( 73 | filter( 74 | lambda x: x.name == "grpc_server_handling_seconds_bucket", 75 | target_metric.samples 76 | ) 77 | ), 78 | 0 79 | ) == 1 80 | assert reduce( 81 | lambda acc, x: acc if acc > x.value else x.value, 82 | list( 83 | filter( 84 | lambda x: x.name == "grpc_server_handling_seconds_count", 85 | target_metric.samples 86 | ) 87 | ), 88 | 0 89 | ) == 1 90 | assert reduce( 91 | lambda acc, x: acc if acc > x.value else x.value, 92 | list( 93 | filter( 94 | lambda x: x.name == "grpc_server_handling_seconds_sum", 95 | target_metric.samples 96 | ) 97 | ), 98 | 0 99 | ) > 0 100 | 101 | @pytest.mark.parametrize( 102 | "number_of_names, number_of_res", [(1, 10), (10, 100), (100, 100)] 103 | ) 104 | def test_grpc_server_handling_seconds_with_bidi_stream( 105 | number_of_names, number_of_res, grpc_server, grpc_stub, bidi_request_generator 106 | ): # pylint: disable=unused-argument 107 | list( 108 | grpc_stub.SayHelloBidiStream( 109 | bidi_request_generator(number_of_names, number_of_res) 110 | ) 111 | ) 112 | target_metric = get_server_metric("grpc_server_handling_seconds") 113 | assert target_metric.samples == [] 114 | -------------------------------------------------------------------------------- /tests/integration/hello_world/hello_world_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | #pylint: disable-all 3 | import grpc 4 | 5 | import tests.integration.hello_world.hello_world_pb2 as hello__world__pb2 6 | 7 | 8 | class GreeterStub(object): 9 | """The greeting service definition. 10 | """ 11 | 12 | def __init__(self, channel): 13 | """Constructor. 14 | 15 | Args: 16 | channel: A grpc.Channel. 17 | """ 18 | self.SayHello = channel.unary_unary( 19 | '/Greeter/SayHello', 20 | request_serializer=hello__world__pb2.HelloRequest.SerializeToString, 21 | response_deserializer=hello__world__pb2.HelloReply.FromString, 22 | ) 23 | self.SayHelloUnaryStream = channel.unary_stream( 24 | '/Greeter/SayHelloUnaryStream', 25 | request_serializer=hello__world__pb2.MultipleHelloResRequest.SerializeToString, 26 | response_deserializer=hello__world__pb2.HelloReply.FromString, 27 | ) 28 | self.SayHelloStreamUnary = channel.stream_unary( 29 | '/Greeter/SayHelloStreamUnary', 30 | request_serializer=hello__world__pb2.HelloRequest.SerializeToString, 31 | response_deserializer=hello__world__pb2.HelloReply.FromString, 32 | ) 33 | self.SayHelloBidiStream = channel.stream_stream( 34 | '/Greeter/SayHelloBidiStream', 35 | request_serializer=hello__world__pb2.MultipleHelloResRequest.SerializeToString, 36 | response_deserializer=hello__world__pb2.HelloReply.FromString, 37 | ) 38 | 39 | 40 | class GreeterServicer(object): 41 | """The greeting service definition. 42 | """ 43 | 44 | def SayHello(self, request, context): 45 | """Sends a greeting. 46 | """ 47 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 48 | context.set_details('Method not implemented!') 49 | raise NotImplementedError('Method not implemented!') 50 | 51 | def SayHelloUnaryStream(self, request, context): 52 | """Sends one greeting, get multiple response. 53 | """ 54 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 55 | context.set_details('Method not implemented!') 56 | raise NotImplementedError('Method not implemented!') 57 | 58 | def SayHelloStreamUnary(self, request_iterator, context): 59 | """Send multiple greetings, get one response. 60 | """ 61 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 62 | context.set_details('Method not implemented!') 63 | raise NotImplementedError('Method not implemented!') 64 | 65 | def SayHelloBidiStream(self, request_iterator, context): 66 | """Send multiple greetings, get multiple response. 67 | """ 68 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 69 | context.set_details('Method not implemented!') 70 | raise NotImplementedError('Method not implemented!') 71 | 72 | 73 | def add_GreeterServicer_to_server(servicer, server): 74 | rpc_method_handlers = { 75 | 'SayHello': grpc.unary_unary_rpc_method_handler( 76 | servicer.SayHello, 77 | request_deserializer=hello__world__pb2.HelloRequest.FromString, 78 | response_serializer=hello__world__pb2.HelloReply.SerializeToString, 79 | ), 80 | 'SayHelloUnaryStream': grpc.unary_stream_rpc_method_handler( 81 | servicer.SayHelloUnaryStream, 82 | request_deserializer=hello__world__pb2.MultipleHelloResRequest.FromString, 83 | response_serializer=hello__world__pb2.HelloReply.SerializeToString, 84 | ), 85 | 'SayHelloStreamUnary': grpc.stream_unary_rpc_method_handler( 86 | servicer.SayHelloStreamUnary, 87 | request_deserializer=hello__world__pb2.HelloRequest.FromString, 88 | response_serializer=hello__world__pb2.HelloReply.SerializeToString, 89 | ), 90 | 'SayHelloBidiStream': grpc.stream_stream_rpc_method_handler( 91 | servicer.SayHelloBidiStream, 92 | request_deserializer=hello__world__pb2.MultipleHelloResRequest.FromString, 93 | response_serializer=hello__world__pb2.HelloReply.SerializeToString, 94 | ), 95 | } 96 | generic_handler = grpc.method_handlers_generic_handler( 97 | 'Greeter', rpc_method_handlers) 98 | server.add_generic_rpc_handlers((generic_handler,)) 99 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | import threading 3 | 4 | import pytest 5 | import grpc 6 | from prometheus_client import exposition, registry 7 | 8 | from py_grpc_prometheus.prometheus_client_interceptor import PromClientInterceptor 9 | from py_grpc_prometheus.prometheus_server_interceptor import PromServerInterceptor 10 | from tests.integration.hello_world import hello_world_pb2_grpc as hello_world_grpc 11 | from tests.integration.hello_world.hello_world_server import Greeter 12 | from tests.integration.hello_world import hello_world_pb2 13 | 14 | def start_prometheus_server(port, prom_registry=registry.REGISTRY): 15 | app = exposition.make_wsgi_app(prom_registry) 16 | httpd = exposition.make_server( 17 | "", 18 | port, 19 | app, 20 | exposition.ThreadingWSGIServer, 21 | handler_class=exposition._SilentHandler # pylint: disable=protected-access 22 | ) 23 | t = threading.Thread(target=httpd.serve_forever) 24 | t.start() 25 | return httpd 26 | 27 | @pytest.fixture(scope='function') 28 | def grpc_legacy_server(): 29 | prom_registry = registry.CollectorRegistry(auto_describe=True) 30 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=2), 31 | interceptors=( 32 | PromServerInterceptor( 33 | legacy=True, 34 | enable_handling_time_histogram=True, 35 | registry=prom_registry 36 | ), 37 | )) 38 | hello_world_grpc.add_GreeterServicer_to_server(Greeter(), server) 39 | server.add_insecure_port("[::]:50051") 40 | server.start() 41 | prom_server = start_prometheus_server(50052, prom_registry) 42 | 43 | yield server 44 | server.stop(0) 45 | prom_server.shutdown() 46 | prom_server.server_close() 47 | 48 | @pytest.fixture(scope='function') 49 | def grpc_server_with_exception_handling(): 50 | prom_registry = registry.CollectorRegistry(auto_describe=True) 51 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=2), 52 | interceptors=( 53 | PromServerInterceptor( 54 | skip_exceptions=True, 55 | enable_handling_time_histogram=True, 56 | registry=prom_registry 57 | ), 58 | )) 59 | hello_world_grpc.add_GreeterServicer_to_server(Greeter(), server) 60 | server.add_insecure_port("[::]:50051") 61 | server.start() 62 | prom_server = start_prometheus_server(50052, prom_registry) 63 | 64 | yield server 65 | server.stop(0) 66 | prom_server.shutdown() 67 | prom_server.server_close() 68 | 69 | @pytest.fixture(scope='function') 70 | def grpc_server(): 71 | prom_registry = registry.CollectorRegistry(auto_describe=True) 72 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=2), 73 | interceptors=( 74 | PromServerInterceptor( 75 | enable_handling_time_histogram=True, 76 | registry=prom_registry 77 | ), 78 | )) 79 | hello_world_grpc.add_GreeterServicer_to_server(Greeter(), server) 80 | server.add_insecure_port("[::]:50051") 81 | server.start() 82 | prom_server = start_prometheus_server(50052, prom_registry) 83 | 84 | yield server 85 | server.stop(0) 86 | prom_server.shutdown() 87 | prom_server.server_close() 88 | 89 | @pytest.fixture(scope='function') 90 | def grpc_stub(): 91 | prom_registry = registry.CollectorRegistry(auto_describe=True) 92 | channel = grpc.intercept_channel(grpc.insecure_channel("localhost:50051"), 93 | PromClientInterceptor(registry=prom_registry)) 94 | stub = hello_world_grpc.GreeterStub(channel) 95 | prom_server = start_prometheus_server(50053, prom_registry) 96 | 97 | yield stub 98 | 99 | channel.close() 100 | prom_server.shutdown() 101 | 102 | @pytest.fixture(scope="module") 103 | def stream_request_generator(): 104 | def _generate_requests(number_of_names): 105 | for i in range(number_of_names): 106 | yield hello_world_pb2.HelloRequest(name="{}".format(i)) 107 | return _generate_requests 108 | 109 | @pytest.fixture(scope="module") 110 | def bidi_request_generator(): 111 | def _generate_bidi_requests(number_of_names, number_of_res): 112 | for i in range(number_of_names): 113 | yield hello_world_pb2.MultipleHelloResRequest(name="{}".format(i), res=number_of_res) 114 | return _generate_bidi_requests 115 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # py-grpc-prometheus 2 | 3 | Instrument library to provide prometheus metrics similar to: 4 | 5 | - https://github.com/grpc-ecosystem/java-grpc-prometheus 6 | - https://github.com/grpc-ecosystem/go-grpc-prometheus 7 | 8 | 9 | ## Status 10 | Currently, the library has the parity metrics with the Java and Go library. 11 | 12 | ### Server side: 13 | - grpc_server_started_total 14 | - grpc_server_handled_total 15 | - grpc_server_msg_received_total 16 | - grpc_server_msg_sent_total 17 | - grpc_server_handling_seconds 18 | 19 | ### Client side: 20 | - grpc_client_started_total 21 | - grpc_client_handled_total 22 | - grpc_client_msg_received_total 23 | - grpc_client_msg_sent_total 24 | - grpc_client_handling_seconds 25 | - grpc_client_msg_recv_handling_seconds 26 | - grpc_client_msg_send_handling_seconds 27 | 28 | ## How to use 29 | 30 | ``` 31 | pip install py-grpc-prometheus 32 | ``` 33 | 34 | ## Client side: 35 | Client metrics monitoring is done by intercepting the gPRC channel. 36 | 37 | ```python 38 | import grpc 39 | from py_grpc_prometheus.prometheus_client_interceptor import PromClientInterceptor 40 | 41 | channel = grpc.intercept_channel(grpc.insecure_channel('server:6565'), 42 | PromClientInterceptor()) 43 | # Start an end point to expose metrics. 44 | start_http_server(metrics_port) 45 | ``` 46 | 47 | ## Server side: 48 | Server metrics are exposed by adding the interceptor when the gRPC server is started. Take a look at 49 | `tests/integration/hello_world/hello_world_client.py` for the complete example. 50 | 51 | ```python 52 | import grpc 53 | from concurrent import futures 54 | from py_grpc_prometheus.prometheus_server_interceptor import PromServerInterceptor 55 | from prometheus_client import start_http_server 56 | ``` 57 | 58 | Start the gRPC server with the interceptor, take a look at 59 | `tests/integration/hello_world/hello_world_server.py` for the complete example. 60 | 61 | ```python 62 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), 63 | interceptors=(PromServerInterceptor(),)) 64 | # Start an end point to expose metrics. 65 | start_http_server(metrics_port) 66 | ``` 67 | 68 | ## Histograms 69 | 70 | [Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way 71 | to measure latency distributions of your RPCs. However, since it is bad practice to have metrics 72 | of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels) 73 | the latency monitoring metrics are disabled by default. To enable them please call the following 74 | in your interceptor initialization code: 75 | 76 | ```jsoniq 77 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), 78 | interceptors=(PromServerInterceptor(enable_handling_time_histogram=True),)) 79 | ``` 80 | 81 | After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) 82 | variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics: 83 | 84 | * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method 85 | * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for 86 | calculating average handling times 87 | * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective 88 | handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/)) 89 | 90 | ## Server Side: 91 | - enable_handling_time_histogram: Enables 'grpc_server_handling_seconds' 92 | 93 | ## Client Side: 94 | - enable_client_handling_time_histogram: Enables 'grpc_client_handling_seconds' 95 | - enable_client_stream_receive_time_histogram: Enables 'grpc_client_msg_recv_handling_seconds' 96 | - enable_client_stream_send_time_histogram: Enables 'grpc_client_msg_send_handling_seconds' 97 | 98 | ## Legacy metrics: 99 | 100 | Metric names have been updated to be in line with those from https://github.com/grpc-ecosystem/go-grpc-prometheus. 101 | 102 | The legacy metrics are: 103 | 104 | ### server side: 105 | - grpc_server_started_total 106 | - grpc_server_handled_total 107 | - grpc_server_handled_latency_seconds 108 | - grpc_server_msg_received_total 109 | - grpc_server_msg_sent_total 110 | 111 | ### client side: 112 | - grpc_client_started_total 113 | - grpc_client_completed 114 | - grpc_client_completed_latency_seconds 115 | - grpc_client_msg_sent_total 116 | - grpc_client_msg_received_total 117 | 118 | In order to be able to use these legacy metrics for backwards compatibility, the `legacy` flag can be set to `True` when initialising the server/client interceptors 119 | 120 | For example, to enable the server side legacy metrics: 121 | ```jsoniq 122 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), 123 | interceptors=(PromServerInterceptor(legacy=True),)) 124 | ``` 125 | 126 | ## How to run and test 127 | ```sh 128 | make initialize-development 129 | make test 130 | ``` 131 | 132 | ## TODO: 133 | - Unit test with https://github.com/census-instrumentation/opencensus-python/blob/master/tests/unit/trace/ext/grpc/test_server_interceptor.py 134 | 135 | ## Reference 136 | - https://grpc.io/grpc/python/grpc.html 137 | - https://github.com/census-instrumentation/opencensus-python/blob/master/opencensus/trace/ext/grpc/utils.py 138 | - https://github.com/opentracing-contrib/python-grpc/blob/b4bdc7ce81fa75ede00f7c6bcf5dab8fae47332a/grpc_opentracing/grpcext/grpc_interceptor/server_interceptor.py 139 | -------------------------------------------------------------------------------- /tests/py_grpc_prometheus/test_grpc_server_handled_latency_seconds.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | 3 | import pytest 4 | 5 | from tests.py_grpc_prometheus.utils import get_server_metric 6 | from tests.integration.hello_world import hello_world_pb2 7 | 8 | 9 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 10 | def test_grpc_server_handled_latency_seconds_with_normal( 11 | target_count, grpc_server, grpc_stub 12 | ): # pylint: disable=unused-argument 13 | for i in range(target_count): 14 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name=str(i))) 15 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 16 | assert target_metric.samples == [] 17 | 18 | 19 | @pytest.mark.parametrize("number_of_res", [1, 10, 100]) 20 | def test_grpc_server_handled_latency_seconds_with_unary_stream( 21 | number_of_res, grpc_server, grpc_stub 22 | ): # pylint: disable=unused-argument 23 | list( 24 | grpc_stub.SayHelloUnaryStream( 25 | hello_world_pb2.MultipleHelloResRequest( 26 | name="unary stream", res=number_of_res 27 | ) 28 | ) 29 | ) 30 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 31 | # No grpc_server_handled_latency_seconds for streaming response 32 | assert target_metric.samples == [] 33 | 34 | 35 | @pytest.mark.parametrize("number_of_names", [1, 10, 100]) 36 | def test_grpc_server_handled_latency_seconds_with_stream_unary( 37 | number_of_names, grpc_server, grpc_stub, stream_request_generator 38 | ): # pylint: disable=unused-argument 39 | grpc_stub.SayHelloStreamUnary( 40 | stream_request_generator(number_of_names) 41 | ) 42 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 43 | assert target_metric.samples == [] 44 | 45 | 46 | @pytest.mark.parametrize( 47 | "number_of_names, number_of_res", [(1, 10), (10, 100), (100, 100)] 48 | ) 49 | def test_grpc_server_handled_latency_seconds_with_bidi_stream( 50 | number_of_names, number_of_res, grpc_server, grpc_stub, bidi_request_generator 51 | ): # pylint: disable=unused-argument 52 | list( 53 | grpc_stub.SayHelloBidiStream( 54 | bidi_request_generator(number_of_names, number_of_res) 55 | ) 56 | ) 57 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 58 | assert target_metric.samples == [] 59 | 60 | @pytest.mark.parametrize("target_count", [1, 10, 100]) 61 | def test_legacy_grpc_server_handled_latency_seconds_with_normal( 62 | target_count, grpc_legacy_server, grpc_stub 63 | ): # pylint: disable=unused-argument 64 | for i in range(target_count): 65 | grpc_stub.SayHello(hello_world_pb2.HelloRequest(name=str(i))) 66 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 67 | assert reduce( 68 | lambda acc, x: acc if acc > x.value else x.value, 69 | list( 70 | filter( 71 | lambda x: x.name == "grpc_server_handled_latency_seconds_bucket", 72 | target_metric.samples 73 | ) 74 | ), 75 | 0 76 | ) == target_count 77 | assert reduce( 78 | lambda acc, x: acc if acc > x.value else x.value, 79 | list( 80 | filter( 81 | lambda x: x.name == "grpc_server_handled_latency_seconds_count", 82 | target_metric.samples 83 | ) 84 | ), 85 | 0 86 | ) == target_count 87 | assert reduce( 88 | lambda acc, x: acc if acc > x.value else x.value, 89 | list( 90 | filter( 91 | lambda x: x.name == "grpc_server_handled_latency_seconds_sum", 92 | target_metric.samples 93 | ) 94 | ), 95 | 0 96 | ) > 0 97 | 98 | 99 | @pytest.mark.parametrize("number_of_res", [1, 10, 100]) 100 | def test_legacy_grpc_server_handled_latency_seconds_with_unary_stream( 101 | number_of_res, grpc_legacy_server, grpc_stub 102 | ): # pylint: disable=unused-argument 103 | list( 104 | grpc_stub.SayHelloUnaryStream( 105 | hello_world_pb2.MultipleHelloResRequest( 106 | name="unary stream", res=number_of_res 107 | ) 108 | ) 109 | ) 110 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 111 | # No grpc_server_handled_latency_seconds for streaming response 112 | assert target_metric.samples == [] 113 | 114 | 115 | @pytest.mark.parametrize("number_of_names", [1, 10, 100]) 116 | def test_legacy_grpc_server_handled_latency_seconds_with_stream_unary( 117 | number_of_names, grpc_legacy_server, grpc_stub, stream_request_generator 118 | ): # pylint: disable=unused-argument 119 | grpc_stub.SayHelloStreamUnary( 120 | stream_request_generator(number_of_names) 121 | ) 122 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 123 | assert reduce( 124 | lambda acc, x: acc if acc > x.value else x.value, 125 | list( 126 | filter( 127 | lambda x: x.name == "grpc_server_handled_latency_seconds_bucket", 128 | target_metric.samples 129 | ) 130 | ), 131 | 0 132 | ) == 1 133 | assert reduce( 134 | lambda acc, x: acc if acc > x.value else x.value, 135 | list( 136 | filter( 137 | lambda x: x.name == "grpc_server_handled_latency_seconds_count", 138 | target_metric.samples 139 | ) 140 | ), 141 | 0 142 | ) == 1 143 | assert reduce( 144 | lambda acc, x: acc if acc > x.value else x.value, 145 | list( 146 | filter( 147 | lambda x: x.name == "grpc_server_handled_latency_seconds_sum", 148 | target_metric.samples 149 | ) 150 | ), 151 | 0 152 | ) > 0 153 | 154 | 155 | @pytest.mark.parametrize( 156 | "number_of_names, number_of_res", [(1, 10), (10, 100), (100, 100)] 157 | ) 158 | def test_legacy_grpc_server_handled_latency_seconds_with_bidi_stream( 159 | number_of_names, number_of_res, grpc_legacy_server, grpc_stub, bidi_request_generator 160 | ): # pylint: disable=unused-argument 161 | list( 162 | grpc_stub.SayHelloBidiStream( 163 | bidi_request_generator(number_of_names, number_of_res) 164 | ) 165 | ) 166 | target_metric = get_server_metric("grpc_server_handled_latency_seconds") 167 | assert target_metric.samples == [] 168 | -------------------------------------------------------------------------------- /py_grpc_prometheus/prometheus_server_interceptor.py: -------------------------------------------------------------------------------- 1 | """Interceptor a client call with prometheus""" 2 | import logging 3 | 4 | from timeit import default_timer 5 | 6 | import grpc 7 | from prometheus_client.registry import REGISTRY 8 | 9 | from py_grpc_prometheus import grpc_utils 10 | from py_grpc_prometheus import server_metrics 11 | 12 | 13 | _LOGGER = logging.getLogger(__name__) 14 | 15 | class PromServerInterceptor(grpc.ServerInterceptor): 16 | 17 | def __init__(self, 18 | enable_handling_time_histogram=False, 19 | legacy=False, 20 | skip_exceptions=False, 21 | log_exceptions=True, 22 | registry=REGISTRY): 23 | self._enable_handling_time_histogram = enable_handling_time_histogram 24 | self._legacy = legacy 25 | self._grpc_server_handled_total_counter = server_metrics.get_grpc_server_handled_counter( 26 | self._legacy, 27 | registry 28 | ) 29 | self._metrics = server_metrics.init_metrics(registry) 30 | self._skip_exceptions = skip_exceptions 31 | self._log_exceptions = log_exceptions 32 | 33 | def intercept_service(self, continuation, handler_call_details): 34 | """ 35 | Intercepts the server function calls. 36 | 37 | This implements referred to: 38 | https://github.com/census-instrumentation/opencensus-python/blob/master/opencensus/ 39 | trace/ext/grpc/server_interceptor.py 40 | and 41 | https://grpc.io/grpc/python/grpc.html#service-side-interceptor 42 | """ 43 | 44 | grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call(handler_call_details) 45 | 46 | def metrics_wrapper(behavior, request_streaming, response_streaming): 47 | def new_behavior(request_or_iterator, servicer_context): 48 | response_or_iterator = None 49 | try: 50 | start = default_timer() 51 | grpc_type = grpc_utils.get_method_type(request_streaming, response_streaming) 52 | try: 53 | if request_streaming: 54 | request_or_iterator = grpc_utils.wrap_iterator_inc_counter( 55 | request_or_iterator, 56 | self._metrics["grpc_server_stream_msg_received"], 57 | grpc_type, 58 | grpc_service_name, 59 | grpc_method_name) 60 | else: 61 | self._metrics["grpc_server_started_counter"].labels( 62 | grpc_type=grpc_type, 63 | grpc_service=grpc_service_name, 64 | grpc_method=grpc_method_name).inc() 65 | 66 | # Invoke the original rpc behavior. 67 | response_or_iterator = behavior(request_or_iterator, servicer_context) 68 | 69 | if response_streaming: 70 | sent_metric = self._metrics["grpc_server_stream_msg_sent"] 71 | response_or_iterator = grpc_utils.wrap_iterator_inc_counter( 72 | response_or_iterator, 73 | sent_metric, 74 | grpc_type, 75 | grpc_service_name, 76 | grpc_method_name) 77 | 78 | else: 79 | self.increase_grpc_server_handled_total_counter(grpc_type, 80 | grpc_service_name, 81 | grpc_method_name, 82 | self._compute_status_code( 83 | servicer_context).name) 84 | return response_or_iterator 85 | except Exception as e: 86 | if not self._skip_exceptions: 87 | status_code = self._compute_status_code(servicer_context) 88 | if status_code == grpc.StatusCode.OK: 89 | status_code = grpc.StatusCode.UNKNOWN 90 | self.increase_grpc_server_handled_total_counter(grpc_type, 91 | grpc_service_name, 92 | grpc_method_name, 93 | status_code.name) 94 | raise e 95 | 96 | finally: 97 | 98 | if not response_streaming: 99 | if self._legacy: 100 | self._metrics["legacy_grpc_server_handled_latency_seconds"].labels( 101 | grpc_type=grpc_type, 102 | grpc_service=grpc_service_name, 103 | grpc_method=grpc_method_name) \ 104 | .observe(max(default_timer() - start, 0)) 105 | elif self._enable_handling_time_histogram: 106 | self._metrics["grpc_server_handled_histogram"].labels( 107 | grpc_type=grpc_type, 108 | grpc_service=grpc_service_name, 109 | grpc_method=grpc_method_name) \ 110 | .observe(max(default_timer() - start, 0)) 111 | except Exception as e: # pylint: disable=broad-except 112 | # Allow user to skip the exceptions in order to maintain 113 | # the basic functionality in the server 114 | # The logging function in exception can be toggled with log_exceptions 115 | # in order to suppress the noise in logging 116 | if self._skip_exceptions: 117 | if self._log_exceptions: 118 | _LOGGER.error(e) 119 | if response_or_iterator is None: 120 | return response_or_iterator 121 | return behavior(request_or_iterator, servicer_context) 122 | raise e 123 | 124 | return new_behavior 125 | 126 | optional_any = self._wrap_rpc_behavior(continuation(handler_call_details), metrics_wrapper) 127 | 128 | return optional_any 129 | 130 | # pylint: disable=protected-access 131 | def _compute_status_code(self, servicer_context): 132 | if servicer_context._state.client == "cancelled": 133 | return grpc.StatusCode.CANCELLED 134 | 135 | if servicer_context._state.code is None: 136 | return grpc.StatusCode.OK 137 | 138 | return servicer_context._state.code 139 | 140 | def _compute_error_code(self, grpc_exception): 141 | if isinstance(grpc_exception, grpc.Call): 142 | return grpc_exception.code() 143 | 144 | return grpc.StatusCode.UNKNOWN 145 | 146 | def increase_grpc_server_handled_total_counter( 147 | self, grpc_type, grpc_service_name, grpc_method_name, grpc_code): 148 | if self._legacy: 149 | self._grpc_server_handled_total_counter.labels( 150 | grpc_type=grpc_type, 151 | grpc_service=grpc_service_name, 152 | grpc_method=grpc_method_name, 153 | code=grpc_code).inc() 154 | else: 155 | self._grpc_server_handled_total_counter.labels( 156 | grpc_type=grpc_type, 157 | grpc_service=grpc_service_name, 158 | grpc_method=grpc_method_name, 159 | grpc_code=grpc_code).inc() 160 | 161 | def _wrap_rpc_behavior(self, handler, fn): 162 | """Returns a new rpc handler that wraps the given function""" 163 | if handler is None: 164 | return None 165 | 166 | if handler.request_streaming and handler.response_streaming: 167 | behavior_fn = handler.stream_stream 168 | handler_factory = grpc.stream_stream_rpc_method_handler 169 | elif handler.request_streaming and not handler.response_streaming: 170 | behavior_fn = handler.stream_unary 171 | handler_factory = grpc.stream_unary_rpc_method_handler 172 | elif not handler.request_streaming and handler.response_streaming: 173 | behavior_fn = handler.unary_stream 174 | handler_factory = grpc.unary_stream_rpc_method_handler 175 | else: 176 | behavior_fn = handler.unary_unary 177 | handler_factory = grpc.unary_unary_rpc_method_handler 178 | 179 | return handler_factory( 180 | fn(behavior_fn, handler.request_streaming, handler.response_streaming), 181 | request_deserializer=handler.request_deserializer, 182 | response_serializer=handler.response_serializer) 183 | -------------------------------------------------------------------------------- /py_grpc_prometheus/prometheus_client_interceptor.py: -------------------------------------------------------------------------------- 1 | """Interceptor a client call with prometheus""" 2 | 3 | from timeit import default_timer 4 | 5 | import grpc 6 | from prometheus_client.registry import REGISTRY 7 | 8 | from py_grpc_prometheus import grpc_utils 9 | from py_grpc_prometheus.client_metrics import init_metrics 10 | 11 | class PromClientInterceptor(grpc.UnaryUnaryClientInterceptor, 12 | grpc.UnaryStreamClientInterceptor, 13 | grpc.StreamUnaryClientInterceptor, 14 | grpc.StreamStreamClientInterceptor): 15 | """ 16 | Intercept gRPC client requests. 17 | """ 18 | 19 | def __init__( 20 | self, 21 | enable_client_handling_time_histogram=False, 22 | enable_client_stream_receive_time_histogram=False, 23 | enable_client_stream_send_time_histogram=False, 24 | legacy=False, 25 | registry=REGISTRY 26 | ): 27 | self._enable_client_handling_time_histogram = enable_client_handling_time_histogram 28 | self._enable_client_stream_receive_time_histogram = enable_client_stream_receive_time_histogram 29 | self._enable_client_stream_send_time_histogram = enable_client_stream_send_time_histogram 30 | self._legacy = legacy 31 | self._metrics = init_metrics(registry) 32 | 33 | def intercept_unary_unary(self, continuation, client_call_details, request): 34 | grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call(client_call_details) 35 | grpc_type = grpc_utils.UNARY 36 | 37 | 38 | self._metrics["grpc_client_started_counter"].labels( 39 | grpc_type=grpc_type, 40 | grpc_service=grpc_service_name, 41 | grpc_method=grpc_method_name).inc() 42 | 43 | start = default_timer() 44 | handler = continuation(client_call_details, request) 45 | if self._legacy: 46 | self._metrics["legacy_grpc_client_completed_latency_seconds_histogram"].labels( 47 | grpc_type=grpc_type, 48 | grpc_service=grpc_service_name, 49 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 50 | elif self._enable_client_handling_time_histogram: 51 | self._metrics["grpc_client_handled_histogram"].labels( 52 | grpc_type=grpc_type, 53 | grpc_service=grpc_service_name, 54 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 55 | 56 | if self._legacy: 57 | self._metrics["legacy_grpc_client_completed_counter"].labels( 58 | grpc_type=grpc_type, 59 | grpc_service=grpc_service_name, 60 | grpc_method=grpc_method_name, 61 | code=handler.code().name).inc() 62 | else: 63 | self._metrics["grpc_client_handled_counter"].labels( 64 | grpc_type=grpc_type, 65 | grpc_service=grpc_service_name, 66 | grpc_method=grpc_method_name, 67 | grpc_code=handler.code().name).inc() 68 | 69 | return handler 70 | 71 | def intercept_unary_stream(self, continuation, client_call_details, request): 72 | grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call(client_call_details) 73 | grpc_type = grpc_utils.SERVER_STREAMING 74 | 75 | self._metrics["grpc_client_started_counter"].labels( 76 | grpc_type=grpc_type, 77 | grpc_service=grpc_service_name, 78 | grpc_method=grpc_method_name).inc() 79 | 80 | start = default_timer() 81 | handler = continuation(client_call_details, request) 82 | if self._legacy: 83 | self._metrics["legacy_grpc_client_completed_latency_seconds_histogram"].labels( 84 | grpc_type=grpc_type, 85 | grpc_service=grpc_service_name, 86 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 87 | 88 | elif self._enable_client_handling_time_histogram: 89 | self._metrics["grpc_client_handled_histogram"].labels( 90 | grpc_type=grpc_type, 91 | grpc_service=grpc_service_name, 92 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 93 | 94 | handler = grpc_utils.wrap_iterator_inc_counter( 95 | handler, 96 | self._metrics["grpc_client_stream_msg_received"], 97 | grpc_type, 98 | grpc_service_name, 99 | grpc_method_name) 100 | 101 | if self._enable_client_stream_receive_time_histogram and not self._legacy: 102 | self._metrics["grpc_client_stream_recv_histogram"].labels( 103 | grpc_type=grpc_type, 104 | grpc_service=grpc_service_name, 105 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 106 | 107 | return handler 108 | 109 | def intercept_stream_unary(self, continuation, client_call_details, request_iterator): 110 | grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call(client_call_details) 111 | grpc_type = grpc_utils.CLIENT_STREAMING 112 | 113 | iterator_metric = self._metrics["grpc_client_stream_msg_sent"] 114 | 115 | request_iterator = grpc_utils.wrap_iterator_inc_counter( 116 | request_iterator, 117 | iterator_metric, 118 | grpc_type, 119 | grpc_service_name, 120 | grpc_method_name) 121 | 122 | start = default_timer() 123 | handler = continuation(client_call_details, request_iterator) 124 | 125 | if self._legacy: 126 | self._metrics["grpc_client_started_counter"].labels( 127 | grpc_type=grpc_type, 128 | grpc_service=grpc_service_name, 129 | grpc_method=grpc_method_name).inc() 130 | self._metrics["legacy_grpc_client_completed_latency_seconds_histogram"].labels( 131 | grpc_type=grpc_type, 132 | grpc_service=grpc_service_name, 133 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 134 | else: 135 | self._metrics["grpc_client_started_counter"].labels( 136 | grpc_type=grpc_type, 137 | grpc_service=grpc_service_name, 138 | grpc_method=grpc_method_name).inc() 139 | if self._enable_client_handling_time_histogram: 140 | self._metrics["grpc_client_handled_histogram"].labels( 141 | grpc_type=grpc_type, 142 | grpc_service=grpc_service_name, 143 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 144 | 145 | if self._enable_client_stream_send_time_histogram and not self._legacy: 146 | self._metrics["grpc_client_stream_send_histogram"].labels( 147 | grpc_type=grpc_type, 148 | grpc_service=grpc_service_name, 149 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 150 | 151 | return handler 152 | 153 | def intercept_stream_stream(self, continuation, client_call_details, request_iterator): 154 | grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( 155 | client_call_details) 156 | grpc_type = grpc_utils.BIDI_STREAMING 157 | start = default_timer() 158 | 159 | iterator_sent_metric = self._metrics["grpc_client_stream_msg_sent"] 160 | 161 | response_iterator = continuation( 162 | client_call_details, 163 | grpc_utils.wrap_iterator_inc_counter( 164 | request_iterator, 165 | iterator_sent_metric, 166 | grpc_type, 167 | grpc_service_name, 168 | grpc_method_name)) 169 | 170 | if self._enable_client_stream_send_time_histogram and not self._legacy: 171 | self._metrics["grpc_client_stream_send_histogram"].labels( 172 | grpc_type=grpc_type, 173 | grpc_service=grpc_service_name, 174 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 175 | 176 | iterator_received_metric = self._metrics["grpc_client_stream_msg_received"] 177 | 178 | response_iterator = grpc_utils.wrap_iterator_inc_counter( 179 | response_iterator, 180 | iterator_received_metric, 181 | grpc_type, 182 | grpc_service_name, 183 | grpc_method_name) 184 | 185 | if self._enable_client_stream_receive_time_histogram and not self._legacy: 186 | self._metrics["grpc_client_stream_recv_histogram"].labels( 187 | grpc_type=grpc_type, 188 | grpc_service=grpc_service_name, 189 | grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) 190 | 191 | return response_iterator 192 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # Specify a configuration file. 4 | #rcfile= 5 | 6 | # Python code to execute, usually for sys.path manipulation such as 7 | # pygtk.require(). 8 | #init-hook= 9 | 10 | # Profiled execution. 11 | profile=no 12 | 13 | # Add files or directories to the blacklist. They should be base names, not 14 | # paths. 15 | ignore=CVS 16 | 17 | # Pickle collected data for later comparisons. 18 | persistent=yes 19 | 20 | # List of plugins (as comma separated values of python modules names) to load, 21 | # usually to register additional checkers. 22 | load-plugins= 23 | 24 | 25 | [MESSAGES CONTROL] 26 | 27 | # Enable the message, report, category or checker with the given id(s). You can 28 | # either give multiple identifier separated by comma (,) or put this option 29 | # multiple time. See also the "--disable" option for examples. 30 | enable=indexing-exception,old-raise-syntax 31 | 32 | # Disable the message, report, category or checker with the given id(s). You 33 | # can either give multiple identifiers separated by comma (,) or put this 34 | # option multiple times (only on the command line, not in the configuration 35 | # file where it should appear only once).You can also use "--disable=all" to 36 | # disable everything first and then reenable specific checks. For example, if 37 | # you want to run only the similarities checker, you can use "--disable=all 38 | # --enable=similarities". If you want to run only the classes checker, but have 39 | # no Warning level messages displayed, use"--disable=all --enable=classes 40 | # --disable=W" 41 | disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager,missing-docstring 42 | 43 | 44 | # Set the cache size for astng objects. 45 | cache-size=500 46 | 47 | 48 | [REPORTS] 49 | 50 | # Set the output format. Available formats are text, parseable, colorized, msvs 51 | # (visual studio) and html. You can also give a reporter class, eg 52 | # mypackage.mymodule.MyReporterClass. 53 | output-format=text 54 | 55 | # Put messages in a separate file for each module / package specified on the 56 | # command line instead of printing them on stdout. Reports (if any) will be 57 | # written in a file name "pylint_global.[txt|html]". 58 | files-output=no 59 | 60 | # Tells whether to display a full report or only the messages 61 | reports=no 62 | 63 | # Python expression which should return a note less than 10 (10 is the highest 64 | # note). You have access to the variables errors warning, statement which 65 | # respectively contain the number of errors / warnings messages and the total 66 | # number of statements analyzed. This is used by the global evaluation report 67 | # (RP0004). 68 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 69 | 70 | # Add a comment according to your evaluation note. This is used by the global 71 | # evaluation report (RP0004). 72 | comment=no 73 | 74 | # Template used to display messages. This is a python new-style format string 75 | # used to format the message information. See doc for all details 76 | #msg-template= 77 | 78 | 79 | [TYPECHECK] 80 | 81 | # Tells whether missing members accessed in mixin class should be ignored. A 82 | # mixin class is detected if its name ends with "mixin" (case insensitive). 83 | ignore-mixin-members=yes 84 | 85 | # List of classes names for which member attributes should not be checked 86 | # (useful for classes with attributes dynamically set). 87 | ignored-classes=SQLObject 88 | 89 | # When zope mode is activated, add a predefined set of Zope acquired attributes 90 | # to generated-members. 91 | zope=no 92 | 93 | # List of members which are set dynamically and missed by pylint inference 94 | # system, and so shouldn't trigger E0201 when accessed. Python regular 95 | # expressions are accepted. 96 | generated-members=REQUEST,acl_users,aq_parent 97 | 98 | # List of decorators that create context managers from functions, such as 99 | # contextlib.contextmanager. 100 | contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager 101 | 102 | 103 | [VARIABLES] 104 | 105 | # Tells whether we should check for unused import in __init__ files. 106 | init-import=no 107 | 108 | # A regular expression matching the beginning of the name of dummy variables 109 | # (i.e. not used). 110 | dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) 111 | 112 | # List of additional names supposed to be defined in builtins. Remember that 113 | # you should avoid to define new builtins when possible. 114 | additional-builtins= 115 | 116 | 117 | [BASIC] 118 | 119 | # Required attributes for module, separated by a comma 120 | required-attributes= 121 | 122 | # List of builtins function names that should not be used, separated by a comma 123 | bad-functions=apply,input,reduce 124 | 125 | 126 | # Disable the report(s) with the given id(s). 127 | # All non-Google reports are disabled by default. 128 | disable-report=R0001,R0002,R0003,R0004,R0101,R0102,R0201,R0202,R0220,R0401,R0402,R0701,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,R0923 129 | 130 | # Regular expression which should only match correct module names 131 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 132 | 133 | # Regular expression which should only match correct module level names 134 | const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ 135 | 136 | # Regular expression which should only match correct class names 137 | class-rgx=^_?[A-Z][a-zA-Z0-9]*$ 138 | 139 | # Regular expression which should only match correct function names 140 | function-rgx=^(?:(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ 141 | 142 | # Regular expression which should only match correct method names 143 | method-rgx=^(?:(?P__[a-z0-9_]+__|next)|(?P_{0,2}[A-Z][a-zA-Z0-9]*)|(?P_{0,2}[a-z][a-z0-9_]*))$ 144 | 145 | # Regular expression which should only match correct instance attribute names 146 | attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ 147 | 148 | # Regular expression which should only match correct argument names 149 | argument-rgx=^[a-z][a-z0-9_]*$ 150 | 151 | # Regular expression which should only match correct variable names 152 | variable-rgx=^[a-z][a-z0-9_]*$ 153 | 154 | # Regular expression which should only match correct attribute names in class 155 | # bodies 156 | class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ 157 | 158 | # Regular expression which should only match correct list comprehension / 159 | # generator expression variable names 160 | inlinevar-rgx=^[a-z][a-z0-9_]*$ 161 | 162 | # Good variable names which should always be accepted, separated by a comma 163 | good-names=main,_ 164 | 165 | # Bad variable names which should always be refused, separated by a comma 166 | bad-names= 167 | 168 | # Regular expression which should only match function or class names that do 169 | # not require a docstring. 170 | no-docstring-rgx=(__.*__|main) 171 | 172 | # Minimum line length for functions/classes that require docstrings, shorter 173 | # ones are exempt. 174 | docstring-min-length=10 175 | 176 | 177 | [FORMAT] 178 | 179 | # Maximum number of characters on a single line. 180 | max-line-length=80 181 | 182 | # Regexp for a line that is allowed to be longer than the limit. 183 | ignore-long-lines=(?x) 184 | (^\s*(import|from)\s 185 | |\$Id:\s\/\/depot\/.+#\d+\s\$ 186 | |^[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*("[^"]\S+"|'[^']\S+') 187 | |^\s*\#\ LINT\.ThenChange 188 | |^[^#]*\#\ type:\ [a-zA-Z_][a-zA-Z0-9_.,[\] ]*$ 189 | |pylint 190 | |""" 191 | |\# 192 | |lambda 193 | |(https?|ftp):) 194 | 195 | # Allow the body of an if to be on the same line as the test if there is no 196 | # else. 197 | single-line-if-stmt=y 198 | 199 | # List of optional constructs for which whitespace checking is disabled 200 | no-space-check= 201 | 202 | # Maximum number of lines in a module 203 | max-module-lines=99999 204 | 205 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 206 | # tab). 207 | indent-string=' ' 208 | 209 | 210 | [SIMILARITIES] 211 | 212 | # Minimum lines number of a similarity. 213 | min-similarity-lines=4 214 | 215 | # Ignore comments when computing similarities. 216 | ignore-comments=yes 217 | 218 | # Ignore docstrings when computing similarities. 219 | ignore-docstrings=yes 220 | 221 | # Ignore imports when computing similarities. 222 | ignore-imports=no 223 | 224 | 225 | [MISCELLANEOUS] 226 | 227 | # List of note tags to take in consideration, separated by a comma. 228 | notes= 229 | 230 | 231 | [IMPORTS] 232 | 233 | # Deprecated modules which should not be used, separated by a comma 234 | deprecated-modules=regsub,TERMIOS,Bastion,rexec,sets 235 | 236 | # Create a graph of every (i.e. internal and external) dependencies in the 237 | # given file (report RP0402 must not be disabled) 238 | import-graph= 239 | 240 | # Create a graph of external dependencies in the given file (report RP0402 must 241 | # not be disabled) 242 | ext-import-graph= 243 | 244 | # Create a graph of internal dependencies in the given file (report RP0402 must 245 | # not be disabled) 246 | int-import-graph= 247 | 248 | 249 | [CLASSES] 250 | 251 | # List of interface methods to ignore, separated by a comma. This is used for 252 | # instance to not check methods defines in Zope's Interface base class. 253 | ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by 254 | 255 | # List of method names used to declare (i.e. assign) instance attributes. 256 | defining-attr-methods=__init__,__new__,setUp 257 | 258 | # List of valid names for the first argument in a class method. 259 | valid-classmethod-first-arg=cls,class_ 260 | 261 | # List of valid names for the first argument in a metaclass class method. 262 | valid-metaclass-classmethod-first-arg=mcs 263 | 264 | 265 | [DESIGN] 266 | 267 | # Maximum number of arguments for function / method 268 | max-args=5 269 | 270 | # Argument names that match this expression will be ignored. Default to name 271 | # with leading underscore 272 | ignored-argument-names=_.* 273 | 274 | # Maximum number of locals for function / method body 275 | max-locals=15 276 | 277 | # Maximum number of return / yield for function / method body 278 | max-returns=6 279 | 280 | # Maximum number of branch for function / method body 281 | max-branches=12 282 | 283 | # Maximum number of statements in function / method body 284 | max-statements=50 285 | 286 | # Maximum number of parents for a class (see R0901). 287 | max-parents=7 288 | 289 | # Maximum number of attributes for a class (see R0902). 290 | max-attributes=7 291 | 292 | # Minimum number of public methods for a class (see R0903). 293 | min-public-methods=2 294 | 295 | # Maximum number of public methods for a class (see R0904). 296 | max-public-methods=20 297 | 298 | 299 | [EXCEPTIONS] 300 | 301 | # Exceptions that will emit a warning when being caught. Defaults to 302 | # "Exception" 303 | overgeneral-exceptions=Exception,StandardError,BaseException 304 | 305 | 306 | [AST] 307 | 308 | # Maximum line length for lambdas 309 | short-func-length=1 310 | 311 | # List of module members that should be marked as deprecated. 312 | # All of the string functions are listed in 4.1.4 Deprecated string functions 313 | # in the Python 2.4 docs. 314 | deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc 315 | 316 | 317 | [DOCSTRING] 318 | 319 | # List of exceptions that do not need to be mentioned in the Raises section of 320 | # a docstring. 321 | ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError 322 | 323 | 324 | 325 | [TOKENS] 326 | 327 | # Number of spaces of indent required when the last token on the preceding line 328 | # is an open (, [, or {. 329 | indent-after-paren=4 330 | --------------------------------------------------------------------------------