├── .gitignore ├── LICENSE ├── README.md ├── cwmetrics └── __init__.py ├── setup.py └── tests └── test_cwmetrics.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Example user template template 3 | ### Example user template 4 | 5 | # IntelliJ project files 6 | .idea 7 | *.iml 8 | out 9 | gen 10 | ### Python template 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | *$py.class 15 | 16 | # C extensions 17 | *.so 18 | 19 | # Distribution / packaging 20 | .Python 21 | build/ 22 | develop-eggs/ 23 | dist/ 24 | downloads/ 25 | eggs/ 26 | .eggs/ 27 | lib/ 28 | lib64/ 29 | parts/ 30 | sdist/ 31 | var/ 32 | wheels/ 33 | pip-wheel-metadata/ 34 | share/python-wheels/ 35 | *.egg-info/ 36 | .installed.cfg 37 | *.egg 38 | MANIFEST 39 | 40 | # PyInstaller 41 | # Usually these files are written by a python script from a template 42 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 43 | *.manifest 44 | *.spec 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | 50 | # Unit test / coverage reports 51 | htmlcov/ 52 | .tox/ 53 | .nox/ 54 | .coverage 55 | .coverage.* 56 | .cache 57 | nosetests.xml 58 | coverage.xml 59 | *.cover 60 | .hypothesis/ 61 | .pytest_cache/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | .python-version 95 | 96 | # pipenv 97 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 98 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 99 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 100 | # install all needed dependencies. 101 | #Pipfile.lock 102 | 103 | # celery beat schedule file 104 | celerybeat-schedule 105 | 106 | # SageMath parsed files 107 | *.sage.py 108 | 109 | # Environments 110 | .env 111 | .venv 112 | env/ 113 | venv/ 114 | ENV/ 115 | env.bak/ 116 | venv.bak/ 117 | 118 | # Spyder project settings 119 | .spyderproject 120 | .spyproject 121 | 122 | # Rope project settings 123 | .ropeproject 124 | 125 | # mkdocs documentation 126 | /site 127 | 128 | # mypy 129 | .mypy_cache/ 130 | .dmypy.json 131 | dmypy.json 132 | 133 | # Pyre type checker 134 | .pyre/ 135 | 136 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Bojan Keca 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cloudwatch-metrics-buffer ![Python 3](https://img.shields.io/badge/Python-3-brightgreen.svg) 2 | 3 | This is a wrapper library for publishing metrics to **[AWS CloudWatch](https://aws.amazon.com/cloudwatch/)**. 4 | 5 | While playing around with lambdas and serveless frameworks, I needed a library to help me with publishing metrics to CloudWatch in easy and elegant way. I wanted to avoid boilerplate code that polutes all my methods. Ideally, I would decorate my function to measure execution time or count. After unsuccessful search, I implemented my own. 6 | 7 | This wrapper will buffer metrics first, then send them in batches. It supports **timeit** and **count** decorators for metricating functions in elegant way. Though publishing metrics is batched, there is no guaranty ClouWatch will swallow everything. If you send > [150 TPS](https://docs.aws.amazon.com/en_pv/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html) 8 | (can happen easily if you scale out with Lambdas), you might get throttled. Read CloudWatch documentation for limits and pricing consideration. 9 | 10 | ## Installation 11 | ``` 12 | pip install cloudwatch-metrics-buffer 13 | ``` 14 | 15 | ## Usage 16 | ### Post metrics explicitly 17 | ```python 18 | from cwmetrics import CloudWatchMetricsBuffer 19 | 20 | cw = CloudWatchMetricsBuffer('Some Namespace') 21 | 22 | # buffer single metrics value to buffer 23 | cw.put_value('total_calls', 5) # metric without units of value 5 24 | cw.put_value('latency', 11.25, unit='Milliseconds') # metric with unit specified 25 | cw.put_value('latency', 11.25, dimensions={'HTTP Method': 'GET'}, unit='Milliseconds') # same latency with specified dimension 26 | cw.put_value('home-page', 1, timestamp=datetime.datetime(2019, 10, 10, 14, 0, 0)) # metric on exact time 27 | 28 | # buffer statistic value; use this if you are gathering your statistics along the way in your app 29 | cw.put_statistic('metric', sample_count=50, sum=10000, minimum=0, maximum=500) 30 | 31 | # send all to Cloudwatch 32 | cw.send() 33 | ``` 34 | 35 | ### Post metrics using decorators 36 | ```python 37 | from cwmetrics import CloudWatchMetricsBuffer 38 | 39 | cw = CloudWatchMetricsBuffer('Some Namespace') 40 | 41 | # send value of for metric for each execution 42 | @cw.count('count_metric1') 43 | def func(): 44 | ... 45 | 46 | func() 47 | ``` 48 | 49 | You can also decorate function multiple times. Publishing to CloudWatch is executed after outer decorator finishes: 50 | ```python 51 | # measure execution time in milliseconds and count request 52 | @cw.timeit('api') 53 | @cw.timeit('api', dimensions={'HTTP Method': 'GET'}) 54 | @cw.count('requests', dimensions={'HTTP Method': 'GET'}) 55 | def process_api_request(): 56 | ... 57 | 58 | # CW will receive 3 metric values 59 | process_api_request() 60 | ``` 61 | 62 | This will also work: 63 | ```python 64 | @cw.timeit('api') 65 | @cw.timeit('api', dimensions={'HTTP Method': 'GET'}) 66 | @cw.count('requests', dimensions={'HTTP Method': 'GET'}) 67 | @cw.count('requests') 68 | def process_api_request(): 69 | ... 70 | authenticate() 71 | ... 72 | 73 | @cw.timeit('auth') 74 | @cw.count('auth requests') 75 | def authenticate(): 76 | ... 77 | 78 | # all metrics (total of 6) are buffered and sent after method was executed 79 | process_api_request() 80 | ``` 81 | -------------------------------------------------------------------------------- /cwmetrics/__init__.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from functools import wraps 3 | from typing import Dict, Union, List, Callable 4 | 5 | import boto3 6 | import botocore.exceptions 7 | 8 | 9 | class CloudWatchMetricsBuffer(object): 10 | """ 11 | Buffer metrics for sending to CloudWatch: 12 | https://docs.aws.amazon.com/en_pv/AmazonCloudWatch/latest/monitoring/working_with_metrics.html 13 | 14 | A CloudWatchMetricsBuffer instance may be used to buffer first multiple metrics/values before sending to CloudWatch. 15 | Sending is executed in batches of 20 values. You can buffer metric values and metric statistics. 16 | 17 | Note that metrics cannot be removed once sent. If you are experimenting, I recommend using a 'test' namespace to 18 | avoid a list of unwanted metrics in your dashboard for 15 months. 19 | """ 20 | 21 | def __init__(self, namespace: str, *args, **kwargs): 22 | """ 23 | Initialize a boto3 client with the args and kwargs as input 24 | """ 25 | self.namespace = namespace 26 | self.metrics = [] 27 | self.client = boto3.client('cloudwatch', *args, **kwargs) 28 | self.nesting_level = 0 # to control when to publish metrics to CW 29 | 30 | def put_value(self, metric_name: str, value, dimensions: Dict = None, unit: str = None, 31 | timestamp: datetime = None): 32 | """ 33 | Buffer a single metric for later sending with the send() method. 34 | 35 | :type metric_name: str 36 | :type value: float | int 37 | :param dimensions: dict[str, str] for scoping down metric 38 | :param unit: string specifying the unit. Full list of allowed values in _handle_common_params. 39 | :param timestamp: datetime.datetime specifying when the metric happened or None to default to NOW 40 | """ 41 | metric = {'MetricName': metric_name, 'Value': value} 42 | self._handle_common_params(metric, dimensions, unit, timestamp) 43 | self.metrics.append(metric) 44 | 45 | def put_statistic(self, metric_name: str, sample_count: int, sum: Union[float, int], minimum: Union[float, int], 46 | maximum: Union[float, int], timestamp: datetime = None, dimensions: Dict = None, 47 | unit: str = None): 48 | """ 49 | Buffer a summary/statistic of multiple data points gathered outside for later sending with the send() method. 50 | 51 | :type metric_name: str 52 | :param sample_count: int specifying how many data points are being summarized 53 | :param sum: float or int giving the sum of the data points 54 | :param minimum: float or int giving the minimum 55 | :param maximum: float or int giving the maximum 56 | :param timestamp: datetime.datetime specifying when the metric happened or None to default to now 57 | :param dimensions: dict[str, str] for scoping down metric 58 | :param unit: string specifying the unit. Full list of allowed values in _handle_common_params. 59 | :return: 60 | """ 61 | metric = {'MetricName': metric_name, 'StatisticValues': {'Sum': sum, 'SampleCount': sample_count, 62 | 'Minimum': minimum, 'Maximum': maximum}} 63 | self._handle_common_params(metric, dimensions, unit, timestamp) 64 | self.metrics.append(metric) 65 | 66 | @staticmethod 67 | def _handle_common_params(metric: Dict, dimensions: Union[Dict, List], unit: str, timestamp: datetime): 68 | """ 69 | :type dimensions: dict[str,str] | list[dict[str,str]] | tuple[dict[str,str]] 70 | :param unit: 'Seconds'|'Microseconds'|'Milliseconds'|'Count'|'Count/Second'|'None'| 71 | 'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'| 72 | 'Percent'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'| 73 | 'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second' 74 | :type timestamp: None | datetime.datetime 75 | """ 76 | if unit is not None: 77 | metric['Unit'] = unit 78 | if isinstance(dimensions, dict): 79 | metric['Dimensions'] = [{'Name': k, 'Value': v} for k, v in dimensions.items()] 80 | elif isinstance(dimensions, list) or isinstance(dimensions, tuple): 81 | metric['Dimensions'] = dimensions 82 | if timestamp is not None: 83 | metric['Timestamp'] = timestamp 84 | else: 85 | metric['Timestamp'] = datetime.utcnow() 86 | 87 | def send(self): 88 | """ 89 | Send buffered metrics to CloudWatch 90 | """ 91 | while len(self.metrics) > 0: 92 | # put_metric_data is limited to 20 messages at a time, so the list of metrics is sent in 20-message chunks 93 | metrics = self.metrics[:20] 94 | try: 95 | self.client.put_metric_data(Namespace=self.namespace, MetricData=metrics) 96 | except botocore.exceptions.ClientError as e: 97 | if e.response.get('Error', {}).get('Code') == 'Throttling': 98 | print('throttled trying to send {} values for {}'.format(len(metrics), self.namespace)) 99 | else: 100 | raise 101 | del self.metrics[:20] 102 | 103 | def timeit(self, metric_name: str, dimensions: Dict[str, str] = None): 104 | """ 105 | Decorator for measuring execution time of the function in milliseconds. 106 | :param metric_name: string representing metric name 107 | :param dimensions: dict[str,str] | list[dict[str,str]] | tuple[dict[str,str]] 108 | :return: decorated function 109 | """ 110 | 111 | def timeit_decorator(func: Callable): 112 | @wraps(func) 113 | @self._nested 114 | def wrapper(*args, **kwargs): 115 | start = datetime.now() 116 | ret = func(*args, **kwargs) 117 | duration = (datetime.now() - start).total_seconds() * 1000 # milliseconds 118 | self.put_value(metric_name=metric_name, value=duration, dimensions=dimensions, unit='Milliseconds') 119 | return ret 120 | return wrapper 121 | return timeit_decorator 122 | 123 | def count(self, metric_name: str, count_value: int = 1, dimensions: Dict[str, str] = None): 124 | """ 125 | Decorator for counting execution of functions. 126 | :param metric_name: string representing metric name 127 | :param count_value: Counter is incremented by 1 by default. 128 | :param dimensions: 129 | :return: 130 | """ 131 | def count_decorator(func: Callable): 132 | @wraps(func) 133 | @self._nested 134 | def wrapper(*args, **kwargs): 135 | ret = func(*args, **kwargs) 136 | self.put_value(metric_name=metric_name, value=count_value, dimensions=dimensions) 137 | return ret 138 | return wrapper 139 | return count_decorator 140 | 141 | def _nested(self, func: Callable): 142 | """ 143 | Internal decorator used to count nesting level whenever decorators (count, timeit) are used. We want to buffer 144 | values until we are done with execution of the decorated function. Nesting level is counted and when it reaches 145 | 0, send() will be called to flush all the metrics. For example, you may want to use @timeit(), @count() and 146 | @count(with some dimensions), and also, decorated function may call another decorated function. Metrics will be 147 | sent to CloudWatch only when last decorated function executes. 148 | 149 | :param func: function 150 | :return: wrapped function 151 | """ 152 | def wrapper(*args, **kwargs): 153 | self.nesting_level += 1 154 | ret = func(*args, **kwargs) 155 | self.nesting_level -= 1 156 | # no closures above this one, send metrics now 157 | if self.nesting_level == 0: 158 | self.send() 159 | return ret 160 | return wrapper 161 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | from setuptools import setup, find_packages 3 | 4 | HERE = pathlib.Path(__file__).parent 5 | 6 | README = (HERE / "README.md").read_text() 7 | 8 | setup( 9 | name="cloudwatch-metrics-buffer", 10 | version="0.1.0", 11 | description="Library for posting metrics to AWS CloudWatch", 12 | long_description=README, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/kecabojan/cloudwatch-metrics-buffer", 15 | author="Bojan Keca", 16 | author_email="kecabojan@gmail.com", 17 | license="MIT", 18 | classifiers=[ 19 | "License :: OSI Approved :: MIT License", 20 | "Programming Language :: Python :: 3", 21 | "Programming Language :: Python :: 3.7", 22 | ], 23 | packages=find_packages(exclude=("tests",)), 24 | install_requires=["boto3", 'botocore'], 25 | ) -------------------------------------------------------------------------------- /tests/test_cwmetrics.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import time 4 | 5 | import boto3 6 | import moto 7 | import pytest 8 | 9 | from cwmetrics import CloudWatchMetricsBuffer 10 | 11 | 12 | # setup fake AWS Creds 13 | @pytest.fixture(scope='function') 14 | def aws_credentials(): 15 | """ 16 | Mocked AWS Credentials for moto. 17 | """ 18 | os.environ['AWS_ACCESS_KEY_ID'] = 'testing' 19 | os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing' 20 | os.environ['AWS_SECURITY_TOKEN'] = 'testing' 21 | os.environ['AWS_SESSION_TOKEN'] = 'testing' 22 | 23 | 24 | @moto.mock_cloudwatch 25 | def test_send_metrics(aws_credentials): 26 | client = boto3.client('cloudwatch') 27 | 28 | mb = CloudWatchMetricsBuffer('Test') 29 | 30 | mb.put_value('test_metric1', 11.1) 31 | mb.put_value('test_metric2', 22, dimensions={'dim1': 'dim_value1'}) 32 | mb.send() 33 | 34 | from_ = datetime.datetime.utcnow() - datetime.timedelta(minutes=1) 35 | to_ = datetime.datetime.utcnow() 36 | # get metrics from CW 37 | response = client.get_metric_statistics( 38 | Namespace='Test', 39 | MetricName='test_metric1', 40 | StartTime=from_, 41 | EndTime=to_, 42 | Period=60, 43 | Statistics=['Average'], 44 | ) 45 | 46 | assert 1 == len(response['Datapoints']) 47 | assert 11.1 == response['Datapoints'][0]['Average'] 48 | 49 | response = client.get_metric_statistics( 50 | Namespace='Test', 51 | MetricName='test_metric2', 52 | StartTime=from_, 53 | EndTime=to_, 54 | Period=60, 55 | Statistics=['Average'], 56 | Dimensions=[ 57 | { 58 | 'Name': 'dim1', 59 | 'Value': 'dim_value1' 60 | }, 61 | ], 62 | ) 63 | assert 1 == len(response['Datapoints']) 64 | assert 22 == response['Datapoints'][0]['Average'] 65 | 66 | 67 | @moto.mock_cloudwatch 68 | def test_timeit_decorator(aws_credentials): 69 | client = boto3.client('cloudwatch') 70 | 71 | mb = CloudWatchMetricsBuffer('Test') 72 | 73 | @mb.timeit('time_metric') 74 | def some_func(): 75 | time.sleep(0.2) 76 | 77 | some_func() 78 | 79 | from_ = datetime.datetime.utcnow() - datetime.timedelta(minutes=1) 80 | to_ = datetime.datetime.utcnow() 81 | response = client.get_metric_statistics( 82 | Namespace='Test', 83 | MetricName='time_metric', 84 | StartTime=from_, 85 | EndTime=to_, 86 | Period=60, 87 | Statistics=['Average'], 88 | ) 89 | assert 1 == len(response['Datapoints']) 90 | assert 200 <= response['Datapoints'][0]['Average'] < 210 91 | 92 | 93 | @moto.mock_cloudwatch 94 | def test_count_decorator(aws_credentials): 95 | client = boto3.client('cloudwatch') 96 | 97 | mb = CloudWatchMetricsBuffer('Test') 98 | 99 | @mb.count('count_metric') 100 | def some_func(): 101 | pass 102 | 103 | for _ in range(50): 104 | some_func() 105 | 106 | from_ = datetime.datetime.utcnow() - datetime.timedelta(minutes=1) 107 | to_ = datetime.datetime.utcnow() 108 | response = client.get_metric_statistics( 109 | Namespace='Test', 110 | MetricName='count_metric', 111 | StartTime=from_, 112 | EndTime=to_, 113 | Period=60, 114 | Statistics=['Sum'], 115 | ) 116 | assert 1 == len(response['Datapoints']) 117 | assert 50 == response['Datapoints'][0]['Sum'] 118 | 119 | 120 | @moto.mock_cloudwatch 121 | def test_nesting(aws_credentials): 122 | client = boto3.client('cloudwatch') 123 | 124 | mb = CloudWatchMetricsBuffer('Test') 125 | 126 | @mb.count('count_metric') 127 | @mb.timeit('time_metric') 128 | @mb.count('count_metric2', count_value=100) 129 | def some_func(): 130 | time.sleep(0.1) 131 | 132 | some_func() 133 | 134 | from_ = datetime.datetime.utcnow() - datetime.timedelta(minutes=1) 135 | to_ = datetime.datetime.utcnow() 136 | response = client.get_metric_statistics( 137 | Namespace='Test', 138 | MetricName='count_metric', 139 | StartTime=from_, 140 | EndTime=to_, 141 | Period=60, 142 | Statistics=['Sum'], 143 | ) 144 | assert 1 == len(response['Datapoints']) 145 | assert 1 == response['Datapoints'][0]['Sum'] 146 | 147 | response = client.get_metric_statistics( 148 | Namespace='Test', 149 | MetricName='count_metric2', 150 | StartTime=from_, 151 | EndTime=to_, 152 | Period=60, 153 | Statistics=['Sum'], 154 | ) 155 | assert 1 == len(response['Datapoints']) 156 | assert 100 == response['Datapoints'][0]['Sum'] 157 | 158 | response = client.get_metric_statistics( 159 | Namespace='Test', 160 | MetricName='time_metric', 161 | StartTime=from_, 162 | EndTime=to_, 163 | Period=60, 164 | Statistics=['Average'], 165 | ) 166 | assert 1 == len(response['Datapoints']) 167 | assert 100 <= response['Datapoints'][0]['Average'] <= 110 168 | 169 | 170 | @moto.mock_cloudwatch 171 | def test_function_in_function(aws_credentials): 172 | client = boto3.client('cloudwatch') 173 | 174 | mb = CloudWatchMetricsBuffer('Test') 175 | 176 | @mb.count('count_metric1') 177 | def func1(): 178 | pass 179 | 180 | @mb.count('count_metric2') 181 | def func2(): 182 | func1() 183 | 184 | func2() 185 | 186 | from_ = datetime.datetime.utcnow() - datetime.timedelta(minutes=1) 187 | to_ = datetime.datetime.utcnow() 188 | response = client.get_metric_statistics( 189 | Namespace='Test', 190 | MetricName='count_metric1', 191 | StartTime=from_, 192 | EndTime=to_, 193 | Period=60, 194 | Statistics=['Sum'], 195 | ) 196 | assert 1 == len(response['Datapoints']) 197 | assert 1 == response['Datapoints'][0]['Sum'] 198 | 199 | response = client.get_metric_statistics( 200 | Namespace='Test', 201 | MetricName='count_metric2', 202 | StartTime=from_, 203 | EndTime=to_, 204 | Period=60, 205 | Statistics=['Sum'], 206 | ) 207 | assert 1 == len(response['Datapoints']) 208 | assert 1 == response['Datapoints'][0]['Sum'] 209 | --------------------------------------------------------------------------------