├── .gitignore ├── .travis.yml ├── LICENSE ├── README.rst ├── cache ├── __init__.py ├── async_lru.py ├── async_ttl.py ├── key.py └── lru.py ├── requirements.txt ├── setup.py └── tests ├── __init__.py ├── lru_test.py └── ttl_test.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/* 2 | __pycache__/* 3 | async_cache.egg-info/* 4 | dist/* 5 | build/* -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.6" 4 | script: python3 tests/__init__.py 5 | install: pip3 install --upgrade pip 6 | deploy: 7 | provider: pypi 8 | user: "iamsinghrajat" 9 | password: 10 | secure: "LJCnWg/6HgUb0FfIMNGX6BIPng0OqiAqLCxnRwrvjbQS/ewTPaahE30V+wPrL5GfeLe4o8lYo62lHIvXydXNyt13jpzALIkdqR3XuNeiCbGUE6bOvdHfmZDeP7SkAb5yrPUW32oNetiFJaO0a4WyeVuD52eplYhVj0qHIrr0uPyIes9Wwdtv9t9+aJyuyVx9te7nov6Ln/03SsYErvA8DSlo8F1ZTAKUNwFTFWEBaRmMpKWCi69WVRH0qW0lriAWisWCwQUw2O1QcFolxzrARGp7yhuTbHeWyfo/ZkSXwUdw7AFAaJrfnlwaUKOan8B1p7m6SX8RZ24fHNiyDf/jFrc/cAht9AfSZudVDEMVzLq+67nmMb3LVL25wBfcTiLa/rTCpE1tY6n2AfRPf1HZphHr1FbUhi/6OyIPyjKgS4DdnYXvJmsfItoQ9PGH6w7YmOjmrooAfQTRsxbsfIYSr40pefY6RpSkcEAWizD8NpX6Uiq3dVVhf/55IbOB9BuAZMcDgW/R7hkk14APLAAHkvCdyVwSAx7BSKpJqJUwJHyk7XihkoB0/xEA6brKG1Aj2s4IpGhABinT3IDGNKLV2HbLTM3/sj7WBO40/0pb0jU6+H/AzTQeJyXyJIMoNL/dGoVFlvuOp3gnD4bznBpn05KEA4Dee80DjtMil60pEUs=" 11 | skip_cleanup: true 12 | skip_existing: true 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Rajat Singh 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | async-cache 2 | =========== 3 | :info: A caching solution for asyncio 4 | 5 | .. image:: https://img.shields.io/pypi/v/async-cache.svg 6 | :target: https://pypi.python.org/pypi/async-cache 7 | .. image:: https://www.codetriage.com/iamsinghrajat/async-cache/badges/users.svg 8 | :target: https://pypi.python.org/pypi/async-cache 9 | .. image:: https://static.pepy.tech/personalized-badge/async-cache?period=total&units=international_system&left_color=black&right_color=blue&left_text=Downloads 10 | :target: https://pepy.tech/project/async-cache 11 | .. image:: https://snyk.io/advisor/python/async-cache/badge.svg 12 | :target: https://snyk.io/advisor/python/async-cache 13 | :alt: async-cache 14 | 15 | 16 | 17 | 18 | 19 | Installation 20 | ------------ 21 | 22 | .. code-block:: shell 23 | 24 | pip install async-cache 25 | 26 | Basic Usage 27 | ----------- 28 | 29 | .. code-block:: python 30 | 31 | # LRU Cache 32 | from cache import AsyncLRU 33 | 34 | @AsyncLRU(maxsize=128) 35 | async def func(*args, **kwargs): 36 | """ 37 | maxsize : max number of results that are cached. 38 | if max limit is reached the oldest result is deleted. 39 | """ 40 | pass 41 | 42 | 43 | # TTL Cache 44 | from cache import AsyncTTL 45 | 46 | @AsyncTTL(time_to_live=60, maxsize=1024, skip_args=1) 47 | async def func(*args, **kwargs): 48 | """ 49 | time_to_live : max time for which a cached result is valid (in seconds) 50 | maxsize : max number of results that are cached. 51 | if max limit is reached the oldest result is deleted. 52 | skip_args : Use `1` to skip first arg of func in determining cache key 53 | """ 54 | pass 55 | 56 | # Supports primitive as well as non-primitive function parameter. 57 | # Currently TTL & LRU cache is supported. 58 | 59 | Advanced Usage 60 | -------------- 61 | 62 | .. code-block:: python 63 | 64 | class CustomDataClass: 65 | id: int 66 | value: int 67 | 68 | 69 | from cache import AsyncLRU 70 | 71 | @AsyncLRU(maxsize=128) 72 | async def func(model: "CustomDataClass"): 73 | ... 74 | # function logic 75 | ... 76 | 77 | # async-cache will work even if function parameters are: 78 | # 1. orm objects 79 | # 2. request object 80 | # 3. any other custom object type. 81 | 82 | 83 | # To refresh the function result use the `use_cache=False` param in the function invocation 84 | func(*args, use_cache=False, **kwargs) 85 | -------------------------------------------------------------------------------- /cache/__init__.py: -------------------------------------------------------------------------------- 1 | from .async_lru import AsyncLRU 2 | from .async_ttl import AsyncTTL 3 | -------------------------------------------------------------------------------- /cache/async_lru.py: -------------------------------------------------------------------------------- 1 | from .key import KEY 2 | from .lru import LRU 3 | 4 | 5 | class AsyncLRU: 6 | def __init__(self, maxsize=128): 7 | """ 8 | :param maxsize: Use maxsize as None for unlimited size cache 9 | """ 10 | self.lru = LRU(maxsize=maxsize) 11 | 12 | def cache_clear(self): 13 | """ 14 | Clears the LRU cache. 15 | 16 | This method empties the cache, removing all stored 17 | entries and effectively resetting the cache. 18 | 19 | :return: None 20 | """ 21 | self.lru.clear() 22 | 23 | def __call__(self, func): 24 | async def wrapper(*args, use_cache=True, **kwargs): 25 | key = KEY(args, kwargs) 26 | if key in self.lru and use_cache: 27 | return self.lru[key] 28 | else: 29 | self.lru[key] = await func(*args, **kwargs) 30 | return self.lru[key] 31 | 32 | wrapper.__name__ += func.__name__ 33 | wrapper.__dict__['cache_clear'] = self.cache_clear 34 | 35 | return wrapper 36 | -------------------------------------------------------------------------------- /cache/async_ttl.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from .key import KEY 4 | from .lru import LRU 5 | 6 | 7 | class AsyncTTL: 8 | class _TTL(LRU): 9 | def __init__(self, time_to_live, maxsize): 10 | super().__init__(maxsize=maxsize) 11 | 12 | self.time_to_live = ( 13 | datetime.timedelta(seconds=time_to_live) if time_to_live else None 14 | ) 15 | 16 | self.maxsize = maxsize 17 | 18 | def __contains__(self, key): 19 | if key not in self.keys(): 20 | return False 21 | else: 22 | key_expiration = super().__getitem__(key)[1] 23 | if key_expiration and key_expiration < datetime.datetime.now(): 24 | del self[key] 25 | return False 26 | else: 27 | return True 28 | 29 | def __getitem__(self, key): 30 | value = super().__getitem__(key)[0] 31 | return value 32 | 33 | def __setitem__(self, key, value): 34 | ttl_value = ( 35 | (datetime.datetime.now() + self.time_to_live) 36 | if self.time_to_live 37 | else None 38 | ) 39 | super().__setitem__(key, (value, ttl_value)) 40 | 41 | def __init__(self, time_to_live=60, maxsize=1024, skip_args: int = 0): 42 | """ 43 | 44 | :param time_to_live: Use time_to_live as None for non expiring cache 45 | :param maxsize: Use maxsize as None for unlimited size cache 46 | :param skip_args: Use `1` to skip first arg of func in determining cache key 47 | """ 48 | self.ttl = self._TTL(time_to_live=time_to_live, maxsize=maxsize) 49 | self.skip_args = skip_args 50 | 51 | def cache_clear(self): 52 | """ 53 | Clears the TTL cache. 54 | 55 | This method empties the cache, removing all stored 56 | entries and effectively resetting the cache. 57 | 58 | :return: None 59 | """ 60 | self.ttl.clear() 61 | 62 | def __call__(self, func): 63 | async def wrapper(*args, use_cache=True, **kwargs): 64 | key = KEY(args[self.skip_args:], kwargs) 65 | if key in self.ttl and use_cache: 66 | val = self.ttl[key] 67 | else: 68 | self.ttl[key] = await func(*args, **kwargs) 69 | val = self.ttl[key] 70 | 71 | return val 72 | 73 | wrapper.__name__ += func.__name__ 74 | wrapper.__dict__['cache_clear'] = self.cache_clear 75 | 76 | return wrapper 77 | -------------------------------------------------------------------------------- /cache/key.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | 4 | class KEY: 5 | def __init__(self, *args, **kwargs): 6 | self.args = args 7 | self.kwargs = kwargs 8 | kwargs.pop("use_cache", None) 9 | 10 | def __eq__(self, obj): 11 | return hash(self) == hash(obj) 12 | 13 | def __hash__(self): 14 | def _hash(param: Any): 15 | if isinstance(param, tuple): 16 | return tuple(map(_hash, param)) 17 | if isinstance(param, dict): 18 | return tuple(map(_hash, param.items())) 19 | elif hasattr(param, "__dict__"): 20 | return str(vars(param)) 21 | else: 22 | return str(param) 23 | 24 | return hash(_hash(self.args) + _hash(self.kwargs)) 25 | -------------------------------------------------------------------------------- /cache/lru.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | 4 | class LRU(OrderedDict): 5 | def __init__(self, maxsize, *args, **kwargs): 6 | self.maxsize = maxsize 7 | super().__init__(*args, **kwargs) 8 | 9 | def __getitem__(self, key): 10 | value = super().__getitem__(key) 11 | self.move_to_end(key) 12 | return value 13 | 14 | def __setitem__(self, key, value): 15 | super().__setitem__(key, value) 16 | if self.maxsize and len(self) > self.maxsize: 17 | oldest = next(iter(self)) 18 | del self[oldest] 19 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iamsinghrajat/async-cache/c5c34562d11782808892a66bc70b1db3fbed4463/requirements.txt -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.rst", "r") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="async-cache", 8 | version="1.1.1", 9 | author="Rajat Singh", 10 | author_email="iamsinghrajat@gmail.com", 11 | description="An asyncio Cache", 12 | long_description=long_description, 13 | long_description_content_type="text/x-rst", 14 | url="https://github.com/iamsinghrajat/async-cache", 15 | packages=setuptools.find_packages(), 16 | classifiers=[ 17 | "Programming Language :: Python :: 3", 18 | "License :: OSI Approved :: MIT License", 19 | "Operating System :: OS Independent", 20 | ], 21 | python_requires=">=3.3", 22 | keywords=["asyncio", "lru", "cache", "async", "cache", "lru-cache", "ttl"], 23 | ) 24 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iamsinghrajat/async-cache/c5c34562d11782808892a66bc70b1db3fbed4463/tests/__init__.py -------------------------------------------------------------------------------- /tests/lru_test.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | from timeit import timeit 4 | 5 | from cache import AsyncLRU, AsyncTTL 6 | 7 | 8 | @AsyncLRU(maxsize=128) 9 | async def func(wait: int): 10 | await asyncio.sleep(wait) 11 | 12 | @AsyncLRU(maxsize=128) 13 | async def cache_clear_fn(wait: int): 14 | await asyncio.sleep(wait) 15 | 16 | 17 | class TestClassFunc: 18 | @AsyncLRU(maxsize=128) 19 | async def obj_func(self, wait: int): 20 | await asyncio.sleep(wait) 21 | 22 | @staticmethod 23 | @AsyncTTL(maxsize=128, time_to_live=None, skip_args=1) 24 | async def skip_arg_func(arg: int, wait: int): 25 | await asyncio.sleep(wait) 26 | 27 | @classmethod 28 | @AsyncLRU(maxsize=128) 29 | async def class_func(cls, wait: int): 30 | await asyncio.sleep(wait) 31 | 32 | 33 | def test(): 34 | t1 = time.time() 35 | asyncio.get_event_loop().run_until_complete(func(4)) 36 | t2 = time.time() 37 | asyncio.get_event_loop().run_until_complete(func(4)) 38 | t3 = time.time() 39 | t_first_exec = (t2 - t1) * 1000 40 | t_second_exec = (t3 - t2) * 1000 41 | print(t_first_exec) 42 | print(t_second_exec) 43 | assert t_first_exec > 4000 44 | assert t_second_exec < 4000 45 | 46 | 47 | def test_obj_fn(): 48 | t1 = time.time() 49 | obj = TestClassFunc() 50 | asyncio.get_event_loop().run_until_complete(obj.obj_func(4)) 51 | t2 = time.time() 52 | asyncio.get_event_loop().run_until_complete(obj.obj_func(4)) 53 | t3 = time.time() 54 | t_first_exec = (t2 - t1) * 1000 55 | t_second_exec = (t3 - t2) * 1000 56 | print(t_first_exec) 57 | print(t_second_exec) 58 | assert t_first_exec > 4000 59 | assert t_second_exec < 4000 60 | 61 | 62 | def test_class_fn(): 63 | t1 = time.time() 64 | asyncio.get_event_loop().run_until_complete(TestClassFunc.class_func(4)) 65 | t2 = time.time() 66 | asyncio.get_event_loop().run_until_complete(TestClassFunc.class_func(4)) 67 | t3 = time.time() 68 | t_first_exec = (t2 - t1) * 1000 69 | t_second_exec = (t3 - t2) * 1000 70 | print(t_first_exec) 71 | print(t_second_exec) 72 | assert t_first_exec > 4000 73 | assert t_second_exec < 4000 74 | 75 | 76 | def test_skip_args(): 77 | t1 = time.time() 78 | asyncio.get_event_loop().run_until_complete(TestClassFunc.skip_arg_func(5, 4)) 79 | t2 = time.time() 80 | asyncio.get_event_loop().run_until_complete(TestClassFunc.skip_arg_func(6, 4)) 81 | t3 = time.time() 82 | t_first_exec = (t2 - t1) * 1000 83 | t_second_exec = (t3 - t2) * 1000 84 | print(t_first_exec) 85 | print(t_second_exec) 86 | assert t_first_exec > 4000 87 | assert t_second_exec < 4000 88 | 89 | 90 | def test_cache_refreshing_lru(): 91 | t1 = timeit( 92 | "asyncio.get_event_loop().run_until_complete(TestClassFunc().obj_func(1))", 93 | globals=globals(), 94 | number=1, 95 | ) 96 | t2 = timeit( 97 | "asyncio.get_event_loop().run_until_complete(TestClassFunc().obj_func(1))", 98 | globals=globals(), 99 | number=1, 100 | ) 101 | t3 = timeit( 102 | "asyncio.get_event_loop().run_until_complete(TestClassFunc().obj_func(1, use_cache=False))", 103 | globals=globals(), 104 | number=1, 105 | ) 106 | 107 | assert t1 > t2 108 | assert t1 - t3 <= 0.1 109 | 110 | 111 | def test_cache_clear(): 112 | # print("call function. Cache miss.") 113 | t1 = time.time() 114 | asyncio.get_event_loop().run_until_complete(cache_clear_fn(1)) 115 | t2 = time.time() 116 | # print("call function again. Cache hit") 117 | asyncio.get_event_loop().run_until_complete(cache_clear_fn(1)) 118 | t3 = time.time() 119 | cache_clear_fn.cache_clear() 120 | # print("Call cache_clear() to clear the cache.") 121 | asyncio.get_event_loop().run_until_complete(cache_clear_fn(1)) 122 | t4 = time.time() 123 | # print("call function third time. Cache miss)") 124 | 125 | assert t2 - t1 > 1, t2 - t1 # Cache miss 126 | assert t3 - t2 < 1, t3 - t2 # Cache hit 127 | assert t4 - t3 > 1, t4 - t3 # Cache miss 128 | 129 | 130 | 131 | if __name__ == "__main__": 132 | test() 133 | test_obj_fn() 134 | test_class_fn() 135 | test_skip_args() 136 | test_cache_refreshing_lru() 137 | test_cache_clear() 138 | -------------------------------------------------------------------------------- /tests/ttl_test.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | from timeit import timeit 4 | 5 | from cache import AsyncTTL 6 | 7 | 8 | @AsyncTTL(time_to_live=60) 9 | async def long_expiration_fn(wait: int): 10 | await asyncio.sleep(wait) 11 | return wait 12 | 13 | 14 | @AsyncTTL(time_to_live=5) 15 | async def short_expiration_fn(wait: int): 16 | await asyncio.sleep(wait) 17 | return wait 18 | 19 | 20 | @AsyncTTL(time_to_live=3) 21 | async def short_cleanup_fn(wait: int): 22 | await asyncio.sleep(wait) 23 | return wait 24 | 25 | @AsyncTTL(time_to_live=3) 26 | async def cache_clear_fn(wait: int): 27 | await asyncio.sleep(wait) 28 | return wait 29 | 30 | 31 | def cache_hit_test(): 32 | t1 = time.time() 33 | asyncio.get_event_loop().run_until_complete(long_expiration_fn(4)) 34 | t2 = time.time() 35 | asyncio.get_event_loop().run_until_complete(long_expiration_fn(4)) 36 | t3 = time.time() 37 | t_first_exec = (t2 - t1) * 1000 38 | t_second_exec = (t3 - t2) * 1000 39 | print(t_first_exec) 40 | print(t_second_exec) 41 | assert t_first_exec > 4000 42 | assert t_second_exec < 4000 43 | 44 | 45 | def cache_expiration_test(): 46 | t1 = time.time() 47 | asyncio.get_event_loop().run_until_complete(short_expiration_fn(1)) 48 | t2 = time.time() 49 | asyncio.get_event_loop().run_until_complete(short_expiration_fn(1)) 50 | t3 = time.time() 51 | time.sleep(5) 52 | t4 = time.time() 53 | asyncio.get_event_loop().run_until_complete(short_expiration_fn(1)) 54 | t5 = time.time() 55 | t_first_exec = (t2 - t1) * 1000 56 | t_second_exec = (t3 - t2) * 1000 57 | t_third_exec = (t5 - t4) * 1000 58 | print(t_first_exec) 59 | print(t_second_exec) 60 | print(t_third_exec) 61 | assert t_first_exec > 1000 62 | assert t_second_exec < 1000 63 | assert t_third_exec > 1000 64 | 65 | 66 | def test_cache_refreshing_ttl(): 67 | t1 = timeit('asyncio.get_event_loop().run_until_complete(short_cleanup_fn(1))', 68 | globals=globals(), number=1) 69 | t2 = timeit('asyncio.get_event_loop().run_until_complete(short_cleanup_fn(1))', 70 | globals=globals(), number=1) 71 | t3 = timeit('asyncio.get_event_loop().run_until_complete(short_cleanup_fn(1, use_cache=False))', 72 | globals=globals(), number=1) 73 | 74 | assert t1 > t2 75 | assert t1 - t3 <= 0.1 76 | 77 | def cache_clear_test(): 78 | # print("call function. Cache miss.") 79 | t1 = time.time() 80 | asyncio.get_event_loop().run_until_complete(cache_clear_fn(1)) 81 | t2 = time.time() 82 | # print("call function again. Cache hit") 83 | asyncio.get_event_loop().run_until_complete(cache_clear_fn(1)) 84 | t3 = time.time() 85 | cache_clear_fn.cache_clear() 86 | # print("Call cache_clear() to clear the cache.") 87 | asyncio.get_event_loop().run_until_complete(cache_clear_fn(1)) 88 | t4 = time.time() 89 | # print("call function third time. Cache miss)") 90 | 91 | assert t2 - t1 > 1, t2 - t1 # Cache miss 92 | assert t3 - t2 < 1, t3 - t2 # Cache hit 93 | assert t4 - t3 > 1, t4 - t3 # Cache miss 94 | 95 | 96 | if __name__ == "__main__": 97 | cache_hit_test() 98 | cache_expiration_test() 99 | test_cache_refreshing_ttl() 100 | cache_clear_test() 101 | --------------------------------------------------------------------------------