├── .gitignore ├── .travis.yml ├── CHANGELOG ├── LICENSE ├── MANIFEST.in ├── README.md ├── bin └── test_travis.sh ├── build.sh ├── fastcache ├── __init__.py ├── benchmark.py └── tests │ ├── __init__.py │ ├── test_clrucache.py │ ├── test_functools.py │ └── test_thread.py ├── meta.yaml ├── scripts └── threadsafety.py ├── setup.cfg ├── setup.py └── src └── _lrucache.c /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | eggs/ 15 | lib/ 16 | lib64/ 17 | parts/ 18 | sdist/ 19 | var/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | 24 | # Installer logs 25 | pip-log.txt 26 | pip-delete-this-directory.txt 27 | 28 | # Unit test / coverage reports 29 | htmlcov/ 30 | .tox/ 31 | .coverage 32 | .cache 33 | nosetests.xml 34 | coverage.xml 35 | 36 | # Translations 37 | *.mo 38 | 39 | # Mr Developer 40 | .mr.developer.cfg 41 | .project 42 | .pydevproject 43 | 44 | # Rope 45 | .ropeproject 46 | 47 | # Django stuff: 48 | *.log 49 | *.pot 50 | 51 | # Sphinx documentation 52 | docs/_build/ 53 | 54 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | matrix: 3 | include: 4 | - arch: arm64 5 | python: 2.7 6 | - arch: amd64 7 | python: 2.7 8 | - arch: arm64 9 | python: 3.4 10 | - arch: amd64 11 | python: 3.4 12 | - arch: arm64 13 | python: 3.5 14 | - arch: amd64 15 | python: 3.5 16 | - arch: arm64 17 | python: 3.6 18 | - arch: amd64 19 | python: 3.6 20 | - arch: arm64 21 | python: 3.7 22 | - arch: amd64 23 | python: 3.7 24 | 25 | install: python setup.py install 26 | 27 | script: bash bin/test_travis.sh 28 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | *1.0.2* 2 | - use pytest for testing 3 | - Bug fix for windows compatibility 4 | 5 | *1.0.1* 6 | - better error checking so fastcache now plays well with signals. There is a performance hit for this. Next Release should handle this better. 7 | 8 | *1.0.0* 9 | 10 | - clru_cache now supports dynamic attributes. 11 | - (c)lru_cache is now threadsafe via custom reentrant locks. 12 | 13 | *0.4.3* 14 | 15 | - Fixed bug in hash computations which resulted in `stack overflow`. The appropriate error (RuntimeError) is now returned 16 | 17 | *0.4.2* 18 | 19 | - The 'state' argument to clru_cache can now be a list or a dict 20 | - Slight performance improvemants 21 | - Fixed compiler warnings for Python 2 builds 22 | - Use setuptools by default. The environment variable USE_DISTUTILS=True 23 | forces the use of distutils 24 | 25 | *0.4.0* 26 | 27 | API change: 28 | 29 | Default behavior of fastcache is changed to raise TypeError on 30 | unhashable arguments to be 100% consistent with stdlib. 31 | 32 | Introduce a new argument 'unhashable' which controls how fastcache 33 | responds to unhashable arguments: 34 | *'error' (default) - raise TypeError 35 | *'warning' - raise UserWarning and call decorated function with args 36 | *'ignore' - call decorated function 37 | 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Peter Brady 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | fastcache 2 | ========= 3 | [![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/pbrady/fastcache?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 4 | 5 | C implementation of Python 3 lru_cache for Python 2.6, 2.7, 3.2, 3.3, 3.4 6 | 7 | Passes all tests in the standard library for functools.lru_cache. 8 | 9 | Obeys same API as Python 3.3/3.4 functools.lru_cache with 2 enhancements: 10 | 11 | 1. An additional argument `state` may be supplied which must be a `list` or `dict`. This allows one to safely cache functions for which the result depends on some context which is not a part of the function call signature. 12 | 2. An additional argument `unhashable` may be supplied to control how the cached function responds to unhashable arguments. The options are: 13 | * "error" (default) - Raise a `TypeError` 14 | * "warning" - Raise a `UserWarning` and call the wrapped function with the supplied arguments. 15 | * "ignore" - Just call the wrapped function with the supplied arguments. 16 | 17 | Performance Warning 18 | ------- 19 | As of Python 3.5, the CPython interpreter implements `functools.lru_cache` in C. It is generally faster than this library 20 | due to its use of a more performant internal API for dictionaries (and perhaps other reasons). Therefore this library 21 | is only recommended for Python 2.6-3.4 22 | 23 | Install 24 | ------- 25 | 26 | Via [pip](https://pypi.python.org/pypi/fastcache): 27 | 28 | pip install fastcache 29 | 30 | Manually : 31 | 32 | git clone https://github.com/pbrady/fastcache.git 33 | cd fastcache 34 | python setup.py install 35 | 36 | Via [conda](http://conda.pydata.org/docs/index.html) : 37 | 38 | * build latest and greatest github version 39 | 40 | ```bash 41 | git clone https://github.com/pbrady/fastcache.git 42 | conda-build fastcache 43 | conda install --use-local fastcache 44 | ``` 45 | 46 | * build latest released version on pypi 47 | ```bash 48 | git clone https://github.com/conda/conda-recipes.git 49 | conda-build conda-recipes/fastcache 50 | conda install --use-local fastcache 51 | ``` 52 | 53 | Test 54 | ---- 55 | 56 | ```python 57 | >>> import fastcache 58 | >>> fastcache.test() 59 | ``` 60 | 61 | Travis CI status : [![alt text][2]][1] 62 | 63 | [2]: https://travis-ci.org/pbrady/fastcache.svg?branch=master (Travis build status) 64 | [1]: http://travis-ci.org/pbrady/fastcache 65 | 66 | Tests include the official suite of tests from Python standard library for functools.lru_cache 67 | 68 | Use 69 | --- 70 | 71 | >>> from fastcache import clru_cache, __version__ 72 | >>> __version__ 73 | '0.3.3' 74 | >>> @clru_cache(maxsize=325, typed=False) 75 | ... def fib(n): 76 | ... """Terrible Fibonacci number generator.""" 77 | ... return n if n < 2 else fib(n-1) + fib(n-2) 78 | ... 79 | >>> fib(300) 80 | 222232244629420445529739893461909967206666939096499764990979600 81 | >>> fib.cache_info() 82 | CacheInfo(hits=298, misses=301, maxsize=325, currsize=301) 83 | >>> print(fib.__doc__) 84 | Terrible Fibonacci number generator. 85 | >>> fib.cache_clear() 86 | >>> fib.cache_info() 87 | CacheInfo(hits=0, misses=0, maxsize=325, currsize=0) 88 | >>> fib.__wrapped__(300) 89 | 222232244629420445529739893461909967206666939096499764990979600 90 | 91 | 92 | Speed 93 | ----- 94 | 95 | The speed up vs `lru_cache` provided by `functools` in 3.3 or 3.4 is 10x-30x depending on the function signature and whether one is comparing with 3.3 or 3.4. A sample run of the benchmarking suite for 3.3 is 96 | 97 | >>> import sys 98 | >>> sys.version_info 99 | sys.version_info(major=3, minor=3, micro=5, releaselevel='final', serial=0) 100 | >>> from fastcache import benchmark 101 | >>> benchmark.run() 102 | Test Suite 1 : 103 | 104 | Primarily tests cost of function call, hashing and cache hits. 105 | Benchmark script based on 106 | http://bugs.python.org/file28400/lru_cache_bench.py 107 | 108 | function call speed up 109 | untyped(i) 11.31, typed(i) 31.20 110 | untyped("spam", i) 16.71, typed("spam", i) 27.50 111 | untyped("spam", "spam", i) 14.24, typed("spam", "spam", i) 22.62 112 | untyped(a=i) 13.25, typed(a=i) 23.92 113 | untyped(a="spam", b=i) 10.51, typed(a="spam", b=i) 18.58 114 | untyped(a="spam", b="spam", c=i) 9.34, typed(a="spam", b="spam", c=i) 16.40 115 | 116 | min mean max 117 | untyped 9.337 12.559 16.706 118 | typed 16.398 23.368 31.197 119 | 120 | 121 | Test Suite 2 : 122 | 123 | Tests millions of misses and millions of hits to quantify 124 | cache behavior when cache is full. 125 | 126 | function call speed up 127 | untyped(i, j, a="spammy") 8.94, typed(i, j, a="spammy") 14.09 128 | 129 | A sample run of the benchmarking suite for 3.4 is 130 | 131 | >>> import sys 132 | >>> sys.version_info 133 | sys.version_info(major=3, minor=4, micro=1, releaselevel='final', serial=0) 134 | >>> from fastcache import benchmark 135 | >>> benchmark.run() 136 | Test Suite 1 : 137 | 138 | Primarily tests cost of function call, hashing and cache hits. 139 | Benchmark script based on 140 | http://bugs.python.org/file28400/lru_cache_bench.py 141 | 142 | function call speed up 143 | untyped(i) 9.74, typed(i) 23.31 144 | untyped("spam", i) 15.21, typed("spam", i) 20.82 145 | untyped("spam", "spam", i) 13.35, typed("spam", "spam", i) 17.43 146 | untyped(a=i) 12.27, typed(a=i) 19.04 147 | untyped(a="spam", b=i) 9.81, typed(a="spam", b=i) 14.25 148 | untyped(a="spam", b="spam", c=i) 7.77, typed(a="spam", b="spam", c=i) 11.61 149 | 150 | min mean max 151 | untyped 7.770 11.359 15.210 152 | typed 11.608 17.743 23.311 153 | 154 | 155 | Test Suite 2 : 156 | 157 | Tests millions of misses and millions of hits to quantify 158 | cache behavior when cache is full. 159 | 160 | function call speed up 161 | untyped(i, j, a="spammy") 8.27, typed(i, j, a="spammy") 11.18 162 | -------------------------------------------------------------------------------- /bin/test_travis.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # Exit on error 4 | set -e 5 | # Echo each command 6 | set -x 7 | 8 | mkdir -p empty 9 | cd empty 10 | cat << EOF | python 11 | import fastcache 12 | if not fastcache.test(): 13 | raise Exception('Tests failed') 14 | EOF 15 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | $PYTHON setup.py install 3 | -------------------------------------------------------------------------------- /fastcache/__init__.py: -------------------------------------------------------------------------------- 1 | """ C implementation of LRU caching. 2 | 3 | Provides 2 LRU caching function decorators: 4 | 5 | clru_cache - built-in (faster) 6 | >>> from fastcache import clru_cache 7 | >>> @clru_cache(maxsize=128,typed=False) 8 | ... def f(a, b): 9 | ... return (a, ) + (b, ) 10 | ... 11 | >>> type(f) 12 | >>> 13 | 14 | lru_cache - python wrapper around clru_cache (slower) 15 | >>> from fastcache import lru_cache 16 | >>> @lru_cache(maxsize=128,typed=False) 17 | ... def f(a, b): 18 | ... return (a, ) + (b, ) 19 | ... 20 | >>> type(f) 21 | >>> 22 | """ 23 | 24 | __version__ = "1.1.0" 25 | 26 | 27 | from ._lrucache import clru_cache 28 | from functools import update_wrapper 29 | 30 | def lru_cache(maxsize=128, typed=False, state=None, unhashable='error'): 31 | """Least-recently-used cache decorator. 32 | 33 | If *maxsize* is set to None, the LRU features are disabled and 34 | the cache can grow without bound. 35 | 36 | If *typed* is True, arguments of different types will be cached 37 | separately. For example, f(3.0) and f(3) will be treated as distinct 38 | calls with distinct results. 39 | 40 | If *state* is a list or dict, the items will be incorporated into 41 | argument hash. 42 | 43 | The result of calling the cached function with unhashable (mutable) 44 | arguments depends on the value of *unhashable*: 45 | 46 | If *unhashable* is 'error', a TypeError will be raised. 47 | 48 | If *unhashable* is 'warning', a UserWarning will be raised, and 49 | the wrapped function will be called with the supplied arguments. 50 | A miss will be recorded in the cache statistics. 51 | 52 | If *unhashable* is 'ignore', the wrapped function will be called 53 | with the supplied arguments. A miss will will be recorded in 54 | the cache statistics. 55 | 56 | View the cache statistics named tuple (hits, misses, maxsize, currsize) 57 | with f.cache_info(). Clear the cache and statistics with 58 | f.cache_clear(). Access the underlying function with f.__wrapped__. 59 | 60 | See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used 61 | 62 | """ 63 | def func_wrapper(func): 64 | _cached_func = clru_cache(maxsize, typed, state, unhashable)(func) 65 | 66 | def wrapper(*args, **kwargs): 67 | return _cached_func(*args, **kwargs) 68 | 69 | wrapper.__wrapped__ = func 70 | wrapper.cache_info = _cached_func.cache_info 71 | wrapper.cache_clear = _cached_func.cache_clear 72 | 73 | return update_wrapper(wrapper,func) 74 | 75 | return func_wrapper 76 | 77 | def test(*args): 78 | import pytest, os 79 | return not pytest.main([os.path.dirname(os.path.abspath(__file__))] + 80 | list(args)) 81 | -------------------------------------------------------------------------------- /fastcache/benchmark.py: -------------------------------------------------------------------------------- 1 | """ Benchmark against functools.lru_cache. 2 | 3 | Benchmark script from http://bugs.python.org/file28400/lru_cache_bench.py 4 | with a few modifications. 5 | 6 | Not available for Py < 3.3. 7 | """ 8 | from __future__ import print_function 9 | 10 | import sys 11 | 12 | if sys.version_info[:2] >= (3, 3): 13 | 14 | import functools 15 | import fastcache 16 | import timeit 17 | from itertools import count 18 | 19 | def _untyped(*args, **kwargs): 20 | pass 21 | 22 | def _typed(*args, **kwargs): 23 | pass 24 | 25 | _py_untyped = functools.lru_cache(maxsize=100)(_untyped) 26 | _c_untyped = fastcache.clru_cache(maxsize=100)(_untyped) 27 | 28 | _py_typed = functools.lru_cache(maxsize=100, typed=True)(_typed) 29 | _c_typed = fastcache.clru_cache(maxsize=100, typed=True)(_typed) 30 | 31 | def _arg_gen(min=1, max=100, repeat=3): 32 | for i in range(min, max): 33 | for r in range(repeat): 34 | for j, k in zip(range(i), count(i, -1)): 35 | yield j, k 36 | 37 | def _print_speedup(results): 38 | print('') 39 | print('{:9s} {:>6s} {:>6s} {:>6s}'.format('','min', 'mean', 'max')) 40 | def print_stats(name,off0, off1): 41 | arr = [py[0]/c[0] for py, c in zip(results[off0::4], 42 | results[off1::4])] 43 | print('{:9s} {:6.3f} {:6.3f} {:6.3f}'.format(name, 44 | min(arr), 45 | sum(arr)/len(arr), 46 | max(arr))) 47 | print_stats('untyped', 0, 1) 48 | print_stats('typed', 2, 3) 49 | 50 | def _print_single_speedup(res=None, init=False): 51 | if init: 52 | print('{:29s} {:>8s}'.format('function call', 'speed up')) 53 | else: 54 | print('{:32s} {:5.2f}'.format(res[0][1].split('_')[-1], 55 | res[0][0]/res[1][0]), end = ', ') 56 | print('{:32s} {:5.2f}'.format(res[2][1].split('_')[-1], 57 | res[2][0]/res[3][0])) 58 | def run(): 59 | 60 | print("Test Suite 1 : ", end='\n\n') 61 | print("Primarily tests cost of function call, hashing and cache hits.") 62 | print("Benchmark script based on") 63 | print(" http://bugs.python.org/file28400/lru_cache_bench.py", 64 | end = '\n\n') 65 | 66 | _print_single_speedup(init=True) 67 | 68 | results = [] 69 | args = ['i', '"spam", i', '"spam", "spam", i', 70 | 'a=i', 'a="spam", b=i', 'a="spam", b="spam", c=i'] 71 | for a in args: 72 | for f in ['_py_untyped', '_c_untyped', '_py_typed', '_c_typed']: 73 | s = '%s(%s)' % (f, a) 74 | t = min(timeit.repeat(''' 75 | for i in range(100): 76 | {} 77 | '''.format(s), 78 | setup='from fastcache.benchmark import %s' % f, 79 | repeat=10, number=1000)) 80 | results.append([t, s]) 81 | _print_single_speedup(results[-4:]) 82 | 83 | _print_speedup(results) 84 | 85 | print("\n\nTest Suite 2 :", end='\n\n') 86 | print("Tests millions of misses and millions of hits to quantify") 87 | print("cache behavior when cache is full.", end='\n\n') 88 | setup = "from fastcache.benchmark import {}\n" + \ 89 | "from fastcache.benchmark import _arg_gen" 90 | 91 | results = [] 92 | for f in ['_py_untyped', '_c_untyped', '_py_typed', '_c_typed']: 93 | s = '%s(i, j, a="spammy")' % f 94 | t = min(timeit.repeat(''' 95 | for i, j in _arg_gen(): 96 | %s 97 | ''' % s, setup=setup.format(f), 98 | repeat=3, number=100)) 99 | results.append([t, s]) 100 | 101 | _print_single_speedup(init=True) 102 | _print_single_speedup(results) 103 | -------------------------------------------------------------------------------- /fastcache/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pbrady/fastcache/6b7bbed5076f6ea66e664e365bf40b0f94e21bef/fastcache/tests/__init__.py -------------------------------------------------------------------------------- /fastcache/tests/test_clrucache.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import fastcache 3 | import itertools 4 | import warnings 5 | 6 | try: 7 | itertools.count(start=0, step=-1) 8 | count = itertools.count 9 | except TypeError: 10 | def count(start=0, step=1): 11 | i = step-1 12 | for j, c in enumerate(itertools.count(start)): 13 | yield c + i*j 14 | 15 | def arg_gen(min=1, max=100, repeat=3): 16 | for i in range(min, max): 17 | for r in range(repeat): 18 | for j, k in zip(range(i), count(i, -1)): 19 | yield j, k 20 | 21 | @pytest.fixture(scope='module', params=[fastcache.clru_cache, 22 | fastcache.lru_cache]) 23 | def cache(request): 24 | param = request.param 25 | return param 26 | 27 | 28 | def test_function_attributes(cache): 29 | """ Simple tests for attribute preservation. """ 30 | 31 | def tfunc(a, b): 32 | """test function docstring.""" 33 | return a + b 34 | cfunc = cache()(tfunc) 35 | assert cfunc.__doc__ == tfunc.__doc__ 36 | assert hasattr(cfunc, 'cache_info') 37 | assert hasattr(cfunc, 'cache_clear') 38 | assert hasattr(cfunc, '__wrapped__') 39 | 40 | 41 | def test_function_cache(cache): 42 | """ Test that cache returns appropriate values. """ 43 | 44 | cat_tuples = [True] 45 | 46 | def tfunc(a, b, c=None): 47 | if (cat_tuples[0] == True): 48 | return (a, b, c) + (c, a) 49 | else: 50 | return 2*a-10*b 51 | 52 | cfunc = cache(maxsize=100, state=cat_tuples)(tfunc) 53 | 54 | for i, j in arg_gen(max=75, repeat=5): 55 | assert cfunc(i, j) == tfunc(i, j) 56 | 57 | # change extra state 58 | cat_tuples[0] = False 59 | 60 | for i, j in arg_gen(max=75, repeat=5): 61 | assert cfunc(i, j) == tfunc(i, j) 62 | 63 | # test dict state 64 | d = {} 65 | cfunc = cache(maxsize=100, state=d)(tfunc) 66 | cfunc(1, 2) 67 | assert cfunc.cache_info().misses == 1 68 | d['a'] = 42 69 | cfunc(1, 2) 70 | assert cfunc.cache_info().misses == 2 71 | cfunc(1, 2) 72 | assert cfunc.cache_info().misses == 2 73 | assert cfunc.cache_info().hits == 1 74 | d.clear() 75 | cfunc(1, 2) 76 | assert cfunc.cache_info().misses == 2 77 | assert cfunc.cache_info().hits == 2 78 | d['a'] = 44 79 | cfunc(1, 2) 80 | assert cfunc.cache_info().misses == 3 81 | 82 | def test_memory_leaks(cache): 83 | """ Longer running test to check for memory leaks. """ 84 | 85 | def tfunc(a, b, c): 86 | return (a-1, 2*c) + (10*b-1, a*b, a*b+c) 87 | 88 | cfunc = cache(maxsize=2000)(tfunc) 89 | 90 | for i, j in arg_gen(max=1500, repeat=5): 91 | assert cfunc(i, j, c=i-j) == tfunc(i, j, c=i-j) 92 | 93 | def test_warn_unhashable_args(cache, recwarn): 94 | """ Function arguments must be hashable. """ 95 | 96 | @cache(unhashable='warning') 97 | def f(a, b): 98 | return (a, ) + (b, ) 99 | 100 | with warnings.catch_warnings() : 101 | warnings.simplefilter("always") 102 | assert f([1], 2) == f.__wrapped__([1], 2) 103 | w = recwarn.pop(UserWarning) 104 | assert issubclass(w.category, UserWarning) 105 | assert "Unhashable arguments cannot be cached" in str(w.message) 106 | assert w.filename 107 | assert w.lineno 108 | 109 | 110 | def test_ignore_unhashable_args(cache): 111 | """ Function arguments must be hashable. """ 112 | 113 | @cache(unhashable='ignore') 114 | def f(a, b): 115 | return (a, ) + (b, ) 116 | 117 | assert f([1], 2) == f.__wrapped__([1], 2) 118 | 119 | def test_default_unhashable_args(cache): 120 | @cache() 121 | def f(a, b): 122 | return (a, ) + (b, ) 123 | 124 | with pytest.raises(TypeError): 125 | f([1], 2) 126 | 127 | @cache(unhashable='error') 128 | def f(a, b): 129 | pass 130 | with pytest.raises(TypeError): 131 | f([1], 2) 132 | 133 | def test_state_type(cache): 134 | """ State must be a list or dict. """ 135 | f = lambda x : x 136 | with pytest.raises(TypeError): 137 | cache(state=(1, ))(f) 138 | with pytest.raises(TypeError): 139 | cache(state=-1)(f) 140 | 141 | def test_typed_False(cache): 142 | """ Verify typed==False. """ 143 | 144 | @cache(typed=False) 145 | def cfunc(a, b): 146 | return a+b 147 | 148 | # initialize cache with integer args 149 | cfunc(1, 2) 150 | assert cfunc(1, 2) is cfunc(1.0, 2) 151 | assert cfunc(1, 2) is cfunc(1, 2.0) 152 | # test keywords 153 | cfunc(1, b=2) 154 | assert cfunc(1,b=2) is cfunc(1.0,b=2) 155 | assert cfunc(1,b=2) is cfunc(1,b=2.0) 156 | 157 | def test_typed_True(cache): 158 | """ Verify typed==True. """ 159 | 160 | @cache(typed=True) 161 | def cfunc(a, b): 162 | return a+b 163 | 164 | assert cfunc(1, 2) is not cfunc(1.0, 2) 165 | assert cfunc(1, 2) is not cfunc(1, 2.0) 166 | # test keywords 167 | assert cfunc(1,b=2) is not cfunc(1.0,b=2) 168 | assert cfunc(1,b=2) is not cfunc(1,b=2.0) 169 | 170 | def test_dynamic_attribute(cache): 171 | f = lambda x : x 172 | cfunc = cache()(f) 173 | cfunc.new_attr = 5 174 | assert cfunc.new_attr == 5 175 | -------------------------------------------------------------------------------- /fastcache/tests/test_functools.py: -------------------------------------------------------------------------------- 1 | # Copied from python src Python-3.4.0/Lib/test/test_functools.py 2 | 3 | import abc 4 | import collections 5 | from itertools import permutations 6 | import pickle 7 | from random import choice 8 | import sys 9 | import unittest 10 | import fastcache 11 | import functools 12 | 13 | try: 14 | from functools import _CacheInfo 15 | except ImportError: 16 | _CacheInfo = collections.namedtuple("CacheInfo", 17 | ["hits", "misses", "maxsize", "currsize"]) 18 | 19 | class TestLRU(unittest.TestCase): 20 | 21 | def test_lru(self): 22 | def orig(x, y): 23 | return 3 * x + y 24 | f = fastcache.clru_cache(maxsize=20)(orig) 25 | hits, misses, maxsize, currsize = f.cache_info() 26 | self.assertEqual(maxsize, 20) 27 | self.assertEqual(currsize, 0) 28 | self.assertEqual(hits, 0) 29 | self.assertEqual(misses, 0) 30 | 31 | domain = range(5) 32 | for i in range(1000): 33 | x, y = choice(domain), choice(domain) 34 | actual = f(x, y) 35 | expected = orig(x, y) 36 | self.assertEqual(actual, expected) 37 | hits, misses, maxsize, currsize = f.cache_info() 38 | self.assertTrue(hits > misses) 39 | self.assertEqual(hits + misses, 1000) 40 | self.assertEqual(currsize, 20) 41 | 42 | f.cache_clear() # test clearing 43 | hits, misses, maxsize, currsize = f.cache_info() 44 | self.assertEqual(hits, 0) 45 | self.assertEqual(misses, 0) 46 | self.assertEqual(currsize, 0) 47 | f(x, y) 48 | hits, misses, maxsize, currsize = f.cache_info() 49 | self.assertEqual(hits, 0) 50 | self.assertEqual(misses, 1) 51 | self.assertEqual(currsize, 1) 52 | 53 | # Test bypassing the cache 54 | if hasattr(self, 'assertIs'): 55 | self.assertIs(f.__wrapped__, orig) 56 | f.__wrapped__(x, y) 57 | hits, misses, maxsize, currsize = f.cache_info() 58 | self.assertEqual(hits, 0) 59 | self.assertEqual(misses, 1) 60 | self.assertEqual(currsize, 1) 61 | 62 | # test size zero (which means "never-cache") 63 | @fastcache.clru_cache(0) 64 | def f(): 65 | #nonlocal f_cnt 66 | f_cnt[0] += 1 67 | return 20 68 | self.assertEqual(f.cache_info().maxsize, 0) 69 | f_cnt = [0] 70 | for i in range(5): 71 | self.assertEqual(f(), 20) 72 | self.assertEqual(f_cnt, [5]) 73 | hits, misses, maxsize, currsize = f.cache_info() 74 | self.assertEqual(hits, 0) 75 | self.assertEqual(misses, 5) 76 | self.assertEqual(currsize, 0) 77 | 78 | # test size one 79 | @fastcache.clru_cache(1) 80 | def f(): 81 | #nonlocal f_cnt 82 | f_cnt[0] += 1 83 | return 20 84 | self.assertEqual(f.cache_info().maxsize, 1) 85 | f_cnt[0] = 0 86 | for i in range(5): 87 | self.assertEqual(f(), 20) 88 | self.assertEqual(f_cnt, [1]) 89 | hits, misses, maxsize, currsize = f.cache_info() 90 | self.assertEqual(hits, 4) 91 | self.assertEqual(misses, 1) 92 | self.assertEqual(currsize, 1) 93 | 94 | # test size two 95 | @fastcache.clru_cache(2) 96 | def f(x): 97 | #nonlocal f_cnt 98 | f_cnt[0] += 1 99 | return x*10 100 | self.assertEqual(f.cache_info().maxsize, 2) 101 | f_cnt[0] = 0 102 | for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7: 103 | # * * * * 104 | self.assertEqual(f(x), x*10) 105 | self.assertEqual(f_cnt, [4]) 106 | hits, misses, maxsize, currsize = f.cache_info() 107 | self.assertEqual(hits, 12) 108 | self.assertEqual(misses, 4) 109 | self.assertEqual(currsize, 2) 110 | 111 | def test_lru_with_maxsize_none(self): 112 | @fastcache.clru_cache(maxsize=None) 113 | def fib(n): 114 | if n < 2: 115 | return n 116 | return fib(n-1) + fib(n-2) 117 | self.assertEqual([fib(n) for n in range(16)], 118 | [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]) 119 | self.assertEqual(fib.cache_info(), 120 | _CacheInfo(hits=28, misses=16, maxsize=None, currsize=16)) 121 | fib.cache_clear() 122 | self.assertEqual(fib.cache_info(), 123 | _CacheInfo(hits=0, misses=0, maxsize=None, currsize=0)) 124 | 125 | def test_lru_with_exceptions(self): 126 | # Verify that user_function exceptions get passed through without 127 | # creating a hard-to-read chained exception. 128 | # http://bugs.python.org/issue13177 129 | for maxsize in (None, 128): 130 | @fastcache.clru_cache(maxsize) 131 | def func(i): 132 | return 'abc'[i] 133 | self.assertEqual(func(0), 'a') 134 | try: 135 | with self.assertRaises(IndexError) as cm: 136 | func(15) 137 | # Does not have this attribute in Py2 138 | if hasattr(cm.exception,'__context__'): 139 | self.assertIsNone(cm.exception.__context__) 140 | # Verify that the previous exception did not result in a cached entry 141 | with self.assertRaises(IndexError): 142 | func(15) 143 | except TypeError: 144 | # py26 unittest wants assertRaises called with another arg 145 | if sys.version_info[:2] != (2, 6): 146 | raise 147 | else: 148 | pass 149 | 150 | def test_lru_with_types(self): 151 | for maxsize in (None, 128): 152 | @fastcache.clru_cache(maxsize=maxsize, typed=True) 153 | def square(x): 154 | return x * x 155 | self.assertEqual(square(3), 9) 156 | self.assertEqual(type(square(3)), type(9)) 157 | self.assertEqual(square(3.0), 9.0) 158 | self.assertEqual(type(square(3.0)), type(9.0)) 159 | self.assertEqual(square(x=3), 9) 160 | self.assertEqual(type(square(x=3)), type(9)) 161 | self.assertEqual(square(x=3.0), 9.0) 162 | self.assertEqual(type(square(x=3.0)), type(9.0)) 163 | self.assertEqual(square.cache_info().hits, 4) 164 | self.assertEqual(square.cache_info().misses, 4) 165 | 166 | def test_lru_with_keyword_args(self): 167 | @fastcache.clru_cache() 168 | def fib(n): 169 | if n < 2: 170 | return n 171 | return fib(n=n-1) + fib(n=n-2) 172 | self.assertEqual( 173 | [fib(n=number) for number in range(16)], 174 | [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610] 175 | ) 176 | self.assertEqual(fib.cache_info(), 177 | _CacheInfo(hits=28, misses=16, maxsize=128, currsize=16)) 178 | fib.cache_clear() 179 | self.assertEqual(fib.cache_info(), 180 | _CacheInfo(hits=0, misses=0, maxsize=128, currsize=0)) 181 | 182 | def test_lru_with_keyword_args_maxsize_none(self): 183 | @fastcache.clru_cache(maxsize=None) 184 | def fib(n): 185 | if n < 2: 186 | return n 187 | return fib(n=n-1) + fib(n=n-2) 188 | self.assertEqual([fib(n=number) for number in range(16)], 189 | [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]) 190 | self.assertEqual(fib.cache_info(), 191 | _CacheInfo(hits=28, misses=16, maxsize=None, currsize=16)) 192 | fib.cache_clear() 193 | self.assertEqual(fib.cache_info(), 194 | _CacheInfo(hits=0, misses=0, maxsize=None, currsize=0)) 195 | 196 | def test_need_for_rlock(self): 197 | # This will deadlock on an LRU cache that uses a regular lock 198 | 199 | @fastcache.clru_cache(maxsize=10) 200 | def test_func(x): 201 | 'Used to demonstrate a reentrant lru_cache call within a single thread' 202 | return x 203 | 204 | class DoubleEq: 205 | 'Demonstrate a reentrant lru_cache call within a single thread' 206 | def __init__(self, x): 207 | self.x = x 208 | def __hash__(self): 209 | return self.x 210 | def __eq__(self, other): 211 | if self.x == 2: 212 | test_func(DoubleEq(1)) 213 | return self.x == other.x 214 | 215 | test_func(DoubleEq(1)) # Load the cache 216 | test_func(DoubleEq(2)) # Load the cache 217 | self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call 218 | DoubleEq(2)) # Verify the correct return value 219 | -------------------------------------------------------------------------------- /fastcache/tests/test_thread.py: -------------------------------------------------------------------------------- 1 | """The Python interpreter may switch between threads inbetween bytecode 2 | execution. Bytecode execution in fastcache may occur during: 3 | (1) Calls to make_key which will call the __hash__ methods of the args and 4 | (2) `PyDict_Get(Set)Item` calls rely on Python comparisons (i.e, __eq__) 5 | to determine if a match has been found 6 | 7 | A good test for threadsafety is then to cache a function which takes user 8 | defined Python objects that have __hash__ and __eq__ methods which live in 9 | Python land rather built-in land. 10 | 11 | The test should not only ensure that the correct result is acheived (and no 12 | segfaults) but also assess memory leaks. 13 | 14 | The thread switching interval can be altered using sys.setswitchinterval. 15 | """ 16 | 17 | class PythonInt: 18 | """ Wrapper for an integer with python versions of __eq__ and __hash__.""" 19 | 20 | def __init__(self, val): 21 | self.value = val 22 | 23 | def __hash__(self): 24 | return hash(self.value) 25 | 26 | def __eq__(self, other): 27 | # only compare with other instances of PythonInt 28 | if not isinstance(other, PythonInt): 29 | raise TypeError("PythonInt cannot be compared to %s" % type(other)) 30 | return self.value == other.value 31 | 32 | from random import randint 33 | import unittest 34 | from fastcache import clru_cache as lru_cache 35 | from threading import Thread 36 | try: 37 | from sys import setswitchinterval as setinterval 38 | except ImportError: 39 | from sys import setcheckinterval 40 | def setinterval(i): 41 | return setcheckinterval(int(i)) 42 | 43 | 44 | def run_threads(threads): 45 | for t in threads: 46 | t.start() 47 | for t in threads: 48 | t.join() 49 | 50 | CACHE_SIZE=301 51 | FIB=CACHE_SIZE-1 52 | RAND_MIN, RAND_MAX = 1, 10 53 | 54 | @lru_cache(maxsize=CACHE_SIZE, typed=False) 55 | def fib(n): 56 | """Terrible Fibonacci number generator.""" 57 | v = n.value 58 | return v if v < 2 else fib(PythonInt(v-1)) + fib(PythonInt(v-2)) 59 | 60 | # establish correct result from single threaded exectution 61 | RESULT = fib(PythonInt(FIB)) 62 | 63 | def run_fib_with_clear(r): 64 | """ Run Fibonacci generator r times. """ 65 | for i in range(r): 66 | if randint(RAND_MIN, RAND_MAX) == RAND_MIN: 67 | fib.cache_clear() 68 | res = fib(PythonInt(FIB)) 69 | if RESULT != res: 70 | raise ValueError("Expected %d, Got %d" % (RESULT, res)) 71 | 72 | def run_fib_with_stats(r): 73 | """ Run Fibonacci generator r times. """ 74 | for i in range(r): 75 | res = fib(PythonInt(FIB)) 76 | if RESULT != res: 77 | raise ValueError("Expected %d, Got %d" % (RESULT, res)) 78 | 79 | 80 | class Test_Threading(unittest.TestCase): 81 | """ Threadsafety Tests for lru_cache. """ 82 | 83 | def setUp(self): 84 | setinterval(1e-6) 85 | self.numthreads = 4 86 | self.repeat = 1000 87 | 88 | def test_thread_random_cache_clears(self): 89 | """ randomly clear the cache during calls to fib. """ 90 | 91 | threads = [Thread(target=run_fib_with_clear, args=(self.repeat, )) 92 | for _ in range(self.numthreads)] 93 | run_threads(threads) 94 | # if we have gotten this far no exceptions have been raised 95 | self.assertEqual(0, 0) 96 | 97 | def test_thread_cache_info(self): 98 | """ Run thread safety test to make sure the cache statistics 99 | are correct.""" 100 | fib.cache_clear() 101 | threads = [Thread(target=run_fib_with_stats, args=(self.repeat, )) 102 | for _ in range(self.numthreads)] 103 | run_threads(threads) 104 | 105 | hits, misses, maxsize, currsize = fib.cache_info() 106 | self.assertEqual(misses, CACHE_SIZE) 107 | self.assertEqual(currsize, CACHE_SIZE) 108 | -------------------------------------------------------------------------------- /meta.yaml: -------------------------------------------------------------------------------- 1 | package: 2 | name: fastcache 3 | version: 0.4.0 4 | 5 | source: 6 | git_url : https://github.com/pbrady/fastcache.git 7 | 8 | requirements: 9 | build: 10 | - python 11 | - setuptools 12 | 13 | run: 14 | - python 15 | 16 | test: 17 | # Python imports 18 | imports: 19 | - fastcache 20 | - fastcache.benchmark 21 | - fastcache.tests 22 | 23 | 24 | about: 25 | home: https://github.com/pbrady/fastcache.git 26 | license: MIT License 27 | summary: 'C implementation of Python 3 lru_cache' 28 | 29 | # See 30 | # http://docs.continuum.io/conda/build.html for 31 | # more information about meta.yaml 32 | -------------------------------------------------------------------------------- /scripts/threadsafety.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | """The Python interpreter may switch between threads inbetween bytecode 4 | execution. Bytecode execution in fastcache may occur during: 5 | (1) Calls to make_key which will call the __hash__ methods of the args and 6 | (2) `PyDict_Get(Set)Item` calls rely on Python comparisons (i.e, __eq__) 7 | to determine if a match has been found 8 | 9 | A good test for threadsafety is then to cache a function which takes user 10 | defined Python objects that have __hash__ and __eq__ methods which live in 11 | Python land rather built-in land. 12 | 13 | The test should not only ensure that the correct result is acheived (and no 14 | segfaults) but also assess memory leaks. 15 | 16 | The thread switching interval can be altered using sys.setswitchinterval. 17 | """ 18 | 19 | class PythonInt: 20 | """ Wrapper for an integer with python versions of __eq__ and __hash__.""" 21 | 22 | def __init__(self, val): 23 | self.value = val 24 | 25 | def __hash__(self): 26 | return hash(self.value) 27 | 28 | def __eq__(self, other): 29 | # only compare with other instances of PythonInt 30 | if not isinstance(other, PythonInt): 31 | raise TypeError("PythonInt cannot be compared to %s" % type(other)) 32 | return self.value == other.value 33 | 34 | from fastcache import clru_cache 35 | #from functools import lru_cache as clru_cache 36 | from random import randint 37 | 38 | CACHE_SIZE=301 39 | FIB=CACHE_SIZE-1 40 | RAND_MIN, RAND_MAX = 1, 10 41 | 42 | @clru_cache(maxsize=CACHE_SIZE, typed=False) 43 | def fib(n): 44 | """Terrible Fibonacci number generator.""" 45 | v = n.value 46 | return v if v < 2 else fib2(PythonInt(v-1)) + fib(PythonInt(v-2)) 47 | 48 | @clru_cache(maxsize=CACHE_SIZE, typed=False) 49 | def fib2(n): 50 | """Terrible Fibonacci number generator.""" 51 | v = n.value 52 | return v if v < 2 else fib(PythonInt(v-1)) + fib2(PythonInt(v-2)) 53 | 54 | 55 | 56 | # establish correct result from single threaded exectution 57 | RESULT = fib(PythonInt(FIB)) 58 | 59 | def run_fib_with_clear(r): 60 | """ Run Fibonacci generator r times. """ 61 | for i in range(r): 62 | if randint(RAND_MIN, RAND_MAX) == RAND_MIN: 63 | fib.cache_clear() 64 | fib2.cache_clear() 65 | res = fib(PythonInt(FIB)) 66 | if RESULT != res: 67 | raise ValueError("Expected %d, Got %d" % (RESULT, res)) 68 | 69 | def run_fib_with_stats(r): 70 | """ Run Fibonacci generator r times. """ 71 | for i in range(r): 72 | res = fib(PythonInt(FIB)) 73 | if RESULT != res: 74 | raise ValueError("Expected %d, Got %d" % (RESULT, res)) 75 | 76 | from threading import Thread 77 | try: 78 | from sys import setswitchinterval as setinterval 79 | except ImportError: 80 | from sys import setcheckinterval 81 | def setinterval(i): 82 | return setcheckinterval(int(i)) 83 | 84 | 85 | def run_threads(threads): 86 | for t in threads: 87 | t.start() 88 | for t in threads: 89 | t.join() 90 | 91 | def run_test(n, r, i): 92 | """ Run thread safety test with n threads r times using interval i. """ 93 | setinterval(i) 94 | threads = [Thread(target=run_fib_with_clear, args=(r, )) for _ in range(n)] 95 | run_threads(threads) 96 | 97 | def run_test2(n, r, i): 98 | """ Run thread safety test to make sure the cache statistics 99 | are correct.""" 100 | fib.cache_clear() 101 | setinterval(i) 102 | threads = [Thread(target=run_fib_with_stats, args=(r, )) for _ in range(n)] 103 | run_threads(threads) 104 | 105 | hits, misses, maxsize, currsize = fib.cache_info() 106 | if misses != CACHE_SIZE//2+1: 107 | raise ValueError("Expected %d misses, Got %d" % 108 | (CACHE_SIZE//2+1, misses)) 109 | if maxsize != CACHE_SIZE: 110 | raise ValueError("Expected %d maxsize, Got %d" % 111 | (CACHE_SIZE, maxsize)) 112 | if currsize != CACHE_SIZE//2+1: 113 | raise ValueError("Expected %d currsize, Got %d" % 114 | (CACHE_SIZE//2+1, currsize)) 115 | 116 | import argparse 117 | 118 | def main(): 119 | parser = argparse.ArgumentParser(description='Run threadsafety test.') 120 | parser.add_argument('-n,--numthreads', 121 | type=int, 122 | default=2, 123 | dest='n', 124 | help='Number of threads.') 125 | parser.add_argument('-r,--repeat', 126 | type=int, 127 | default=5000, 128 | dest='r', 129 | help='Number of times to repeat test. Larger numbers '+ 130 | 'will make it easier to spot memory leaks.') 131 | parser.add_argument('-i,--interval', 132 | type=float, 133 | default=1e-6, 134 | dest='i', 135 | help='Time in seconds for sys.setswitchinterval.') 136 | 137 | run_test(**dict(vars(parser.parse_args()))) 138 | run_test2(**dict(vars(parser.parse_args()))) 139 | 140 | 141 | if __name__ == "__main__": 142 | main() 143 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from os import getenv 3 | 4 | # use setuptools by default as per the official advice at: 5 | # packaging.python.org/en/latest/current.html#packaging-tool-recommendations 6 | use_setuptools = True 7 | # set the environment variable USE_DISTUTILS=True to force the use of distutils 8 | use_distutils = getenv('USE_DISTUTILS') 9 | if use_distutils is not None: 10 | if use_distutils.lower() == 'true': 11 | use_setuptools = False 12 | else: 13 | print("Value {} for USE_DISTUTILS treated as False".\ 14 | format(use_distutils)) 15 | 16 | from distutils.command.build import build as _build 17 | 18 | if use_setuptools: 19 | try: 20 | from setuptools import setup, Extension 21 | from setuptools.command.install import install as _install 22 | from setuptools.command.build_ext import build_ext as _build_ext 23 | except ImportError: 24 | use_setuptools = False 25 | 26 | if not use_setuptools: 27 | from distutils.core import setup, Extension 28 | from distutils.command.install import install as _install 29 | from distutils.command.build_ext import build_ext as _build_ext 30 | 31 | vinfo = sys.version_info[:2] 32 | if vinfo < (2, 6): 33 | print("Fastcache currently requires Python 2.6 or newer. "+ 34 | "Python {}.{} detected".format(*vinfo)) 35 | sys.exit(-1) 36 | if vinfo[0] == 3 and vinfo < (3, 2): 37 | print("Fastcache currently requires Python 3.2 or newer. "+ 38 | "Python {}.{} detected".format(*vinfo)) 39 | sys.exit(-1) 40 | 41 | classifiers = [ 42 | 'License :: OSI Approved :: MIT License', 43 | 'Operating System :: OS Independent', 44 | 'Programming Language :: Python', 45 | 'Programming Language :: Python :: 2', 46 | 'Programming Language :: Python :: 2.6', 47 | 'Programming Language :: Python :: 2.7', 48 | 'Programming Language :: Python :: 3', 49 | 'Programming Language :: Python :: 3.2', 50 | 'Programming Language :: Python :: 3.3', 51 | 'Programming Language :: Python :: 3.4', 52 | 'Programming Language :: C', 53 | 54 | ] 55 | 56 | long_description = ''' 57 | C implementation of Python 3 functools.lru_cache. Provides speedup of 10-30x 58 | over standard library. Passes test suite from standard library for lru_cache. 59 | 60 | Provides 2 Least Recently Used caching function decorators: 61 | 62 | clru_cache - built-in (faster) 63 | >>> from fastcache import clru_cache, __version__ 64 | >>> __version__ 65 | '1.1.0' 66 | >>> @clru_cache(maxsize=325, typed=False) 67 | ... def fib(n): 68 | ... """Terrible Fibonacci number generator.""" 69 | ... return n if n < 2 else fib(n-1) + fib(n-2) 70 | ... 71 | >>> fib(300) 72 | 222232244629420445529739893461909967206666939096499764990979600 73 | >>> fib.cache_info() 74 | CacheInfo(hits=298, misses=301, maxsize=325, currsize=301) 75 | >>> print(fib.__doc__) 76 | Terrible Fibonacci number generator. 77 | >>> fib.cache_clear() 78 | >>> fib.cache_info() 79 | CacheInfo(hits=0, misses=0, maxsize=325, currsize=0) 80 | >>> fib.__wrapped__(300) 81 | 222232244629420445529739893461909967206666939096499764990979600 82 | >>> type(fib) 83 | >>> 84 | 85 | lru_cache - python wrapper around clru_cache 86 | >>> from fastcache import lru_cache 87 | >>> @lru_cache(maxsize=128, typed=False) 88 | ... def f(a, b): 89 | ... pass 90 | ... 91 | >>> type(f) 92 | >>> 93 | 94 | 95 | (c)lru_cache(maxsize=128, typed=False, state=None, unhashable='error') 96 | 97 | Least-recently-used cache decorator. 98 | 99 | If *maxsize* is set to None, the LRU features are disabled and the cache 100 | can grow without bound. 101 | 102 | If *typed* is True, arguments of different types will be cached separately. 103 | For example, f(3.0) and f(3) will be treated as distinct calls with 104 | distinct results. 105 | 106 | If *state* is a list or dict, the items will be incorporated into the 107 | argument hash. 108 | 109 | The result of calling the cached function with unhashable (mutable) 110 | arguments depends on the value of *unhashable*: 111 | 112 | If *unhashable* is 'error', a TypeError will be raised. 113 | 114 | If *unhashable* is 'warning', a UserWarning will be raised, and 115 | the wrapped function will be called with the supplied arguments. 116 | A miss will be recorded in the cache statistics. 117 | 118 | If *unhashable* is 'ignore', the wrapped function will be called 119 | with the supplied arguments. A miss will will be recorded in 120 | the cache statistics. 121 | 122 | View the cache statistics named tuple (hits, misses, maxsize, currsize) 123 | with f.cache_info(). Clear the cache and statistics with f.cache_clear(). 124 | Access the underlying function with f.__wrapped__. 125 | 126 | See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used 127 | ''' 128 | 129 | # the overall logic here is that by default macros can be only be passed if 130 | # one does 'python setup.py build_ext --define=MYMACRO' 131 | # If one attempts 'build' or 'install' with the --define flag, an error will 132 | # appear saying that --define is not an option 133 | # To get around this issue, we subclass build and install to capture --define 134 | # as well as build_ext which will use the --define arguments passed to 135 | # build or install 136 | 137 | define_opts = [] 138 | 139 | class BuildWithDefine(_build): 140 | 141 | _build_opts = _build.user_options 142 | user_options = [ 143 | ('define=', 'D', 144 | "C preprocessor macros to define"), 145 | ] 146 | user_options.extend(_build_opts) 147 | 148 | def initialize_options(self): 149 | _build.initialize_options(self) 150 | self.define = None 151 | 152 | def finalize_options(self): 153 | _build.finalize_options(self) 154 | # The argument parsing will result in self.define being a string, but 155 | # it has to be a list of 2-tuples. All the preprocessor symbols 156 | # specified by the 'define' option without an '=' will be set to '1'. 157 | # Multiple symbols can be separated with commas. 158 | if self.define: 159 | defines = self.define.split(',') 160 | self.define = [(s.strip(), 1) if '=' not in s else 161 | tuple(ss.strip() for ss in s.split('=')) 162 | for s in defines] 163 | define_opts.extend(self.define) 164 | 165 | def run(self): 166 | _build.run(self) 167 | 168 | class InstallWithDefine(_install): 169 | 170 | _install_opts = _install.user_options 171 | user_options = [ 172 | ('define=', 'D', 173 | "C preprocessor macros to define"), 174 | ] 175 | user_options.extend(_install_opts) 176 | 177 | def initialize_options(self): 178 | _install.initialize_options(self) 179 | self.define = None 180 | 181 | def finalize_options(self): 182 | _install.finalize_options(self) 183 | # The argument parsing will result in self.define being a string, but 184 | # it has to be a list of 2-tuples. All the preprocessor symbols 185 | # specified by the 'define' option without an '=' will be set to '1'. 186 | # Multiple symbols can be separated with commas. 187 | if self.define: 188 | defines = self.define.split(',') 189 | self.define = [(s.strip(), 1) if '=' not in s else 190 | tuple(ss.strip() for ss in s.split('=')) 191 | for s in defines] 192 | define_opts.extend(self.define) 193 | 194 | def run(self): 195 | _install.run(self) 196 | 197 | class BuildExt(_build_ext): 198 | 199 | def initialize_options(self): 200 | _build_ext.initialize_options(self) 201 | 202 | def finalize_options(self): 203 | _build_ext.finalize_options(self) 204 | if self.define is not None: 205 | self.define.extend(define_opts) 206 | elif define_opts: 207 | self.define = define_opts 208 | 209 | def run(self): 210 | _build_ext.run(self) 211 | 212 | 213 | setup(name = "fastcache", 214 | version = "1.1.0", 215 | description = "C implementation of Python 3 functools.lru_cache", 216 | long_description = long_description, 217 | author = "Peter Brady", 218 | author_email = "petertbrady@gmail.com", 219 | license = "MIT", 220 | url = "https://github.com/pbrady/fastcache", 221 | packages = ["fastcache", "fastcache.tests"], 222 | ext_modules = [Extension("fastcache._lrucache",["src/_lrucache.c"])], 223 | classifiers = classifiers, 224 | cmdclass={ 225 | 'build' : BuildWithDefine, 226 | 'install' : InstallWithDefine, 227 | 'build_ext' : BuildExt, 228 | } 229 | 230 | ) 231 | -------------------------------------------------------------------------------- /src/_lrucache.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "structmember.h" 3 | #include "pythread.h" 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | #if PY_MAJOR_VERSION == 2 10 | #define _PY2 11 | typedef long Py_hash_t; 12 | #endif 13 | 14 | #if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 2 15 | #define _PY32 16 | #endif 17 | 18 | #ifdef LLTRACE 19 | #define TBEGIN(x, line) printf("Beginning Trace of %s at lineno %d....", x); 20 | #define TEND(x) printf("Finished!\n") 21 | #else 22 | #define TBEGIN(x, line) 23 | #define TEND(x) 24 | #endif 25 | 26 | #ifdef WITH_THREAD 27 | #ifdef _PY2 28 | typedef int PyLockStatus; 29 | static PyLockStatus PY_LOCK_FAILURE = 0; 30 | static PyLockStatus PY_LOCK_ACQUIRED = 1; 31 | static PyLockStatus PY_LOCK_INTR = -999999; 32 | #endif 33 | 34 | static int 35 | rlock_acquire(PyThread_type_lock lock, long* rlock_owner, unsigned long* rlock_count) 36 | { 37 | long tid; 38 | PyLockStatus r; 39 | 40 | tid = PyThread_get_thread_ident(); 41 | if (*rlock_count > 0 && tid == (*rlock_owner)) { 42 | unsigned long count = *rlock_count + 1; 43 | if (count <= *rlock_count) { 44 | PyErr_SetString(PyExc_OverflowError, 45 | "Internal lock count overflowed"); 46 | return -1; 47 | } 48 | *rlock_count = count; 49 | return 1; 50 | } 51 | /* do/while loop from acquire_timed */ 52 | do { 53 | /* first a simple non-blocking try without releasing the GIL */ 54 | #ifdef _PY2 55 | r = PyThread_acquire_lock(lock, 0); 56 | #else 57 | r = PyThread_acquire_lock_timed(lock, 0, 0); 58 | #endif 59 | if (r == PY_LOCK_FAILURE) { 60 | Py_BEGIN_ALLOW_THREADS 61 | #ifdef _PY2 62 | r = PyThread_acquire_lock(lock, 1); 63 | #else 64 | r = PyThread_acquire_lock_timed(lock, -1, 1); 65 | #endif 66 | Py_END_ALLOW_THREADS 67 | } 68 | 69 | if (r == PY_LOCK_INTR) { 70 | /* Run signal handlers if we were interrupted. Propagate 71 | * exceptions from signal handlers, such as KeyboardInterrupt, by 72 | * passing up PY_LOCK_INTR. */ 73 | if (Py_MakePendingCalls() < 0) { 74 | return -1; 75 | } 76 | } 77 | } while (r == PY_LOCK_INTR); /* Retry if we were interrupted. */ 78 | if (r == PY_LOCK_ACQUIRED) { 79 | *rlock_owner = tid; 80 | *rlock_count = 1; 81 | return 1; 82 | } 83 | return -1; 84 | } 85 | 86 | static int 87 | rlock_release(PyThread_type_lock lock, long* rlock_owner, unsigned long* rlock_count) 88 | { 89 | long tid = PyThread_get_thread_ident(); 90 | 91 | if (*rlock_count == 0 || *rlock_owner != tid) { 92 | PyErr_SetString(PyExc_RuntimeError, 93 | "cannot release un-acquired lock"); 94 | return -1; 95 | } 96 | 97 | if (--(*rlock_count) == 0) { 98 | *rlock_owner = 0; 99 | PyThread_release_lock(lock); 100 | } 101 | return 1; 102 | } 103 | 104 | #define ACQUIRE_LOCK(obj) rlock_acquire((obj)->lock, &((obj)->rlock_owner), &((obj)->rlock_count)) 105 | #define RELEASE_LOCK(obj) rlock_release((obj)->lock, &((obj)->rlock_owner), &((obj)->rlock_count)) 106 | #define FREE_LOCK(obj) PyThread_free_lock((obj)->lock) 107 | #else 108 | #define ACQUIRE_LOCK(obj) 1 109 | #define RELEASE_LOCK(obj) 1 110 | #define FREE_LOCK(obj) 111 | #endif 112 | 113 | #define INC_RETURN(op) return Py_INCREF(op), (op) 114 | 115 | // THREAD SAFETY NOTES: 116 | // Python bytecode instructions are atomic but the GIL may switch between 117 | // threads in between instructions. 118 | // To make this threadsafe care needs to be taken one such that global objects 119 | // are left in a consistent between calls to python bytecode. 120 | // The relevant global objects are co->root, and co->cache_dict 121 | // The stats are global as well but are modified in one line: stat++ 122 | 123 | /* HashedArgs -- internal *****************************************/ 124 | typedef struct { 125 | PyObject_HEAD 126 | PyObject *args; 127 | Py_hash_t hashvalue; 128 | } HashedArgs; 129 | 130 | 131 | static void 132 | HashedArgs_dealloc(HashedArgs *self) 133 | { 134 | Py_XDECREF(self->args); 135 | Py_TYPE(self)->tp_free(self); 136 | return; 137 | } 138 | 139 | 140 | /* return precomputed tuple hash for speed */ 141 | static Py_hash_t 142 | HashedArgs_hash(HashedArgs *self) 143 | { 144 | return self->hashvalue; 145 | } 146 | 147 | 148 | /* Delegate comparison to tuples */ 149 | static PyObject * 150 | HashedArgs_richcompare(PyObject *v, PyObject *w, int op) 151 | { 152 | HashedArgs *hv = (HashedArgs *) v; 153 | HashedArgs *hw = (HashedArgs *) w; 154 | PyObject *res = PyObject_RichCompare(hv->args, hw->args, op); 155 | return res; 156 | } 157 | 158 | 159 | static PyTypeObject HashedArgs_type = { 160 | PyVarObject_HEAD_INIT(NULL, 0) 161 | "_lrucache.HashedArgs", /* tp_name */ 162 | sizeof(HashedArgs), /* tp_basicsize */ 163 | 0, /* tp_itemsize */ 164 | (destructor)HashedArgs_dealloc, /* tp_dealloc */ 165 | 0, /* tp_print */ 166 | 0, /* tp_getattr */ 167 | 0, /* tp_setattr */ 168 | 0, /* tp_reserved */ 169 | 0, /* tp_repr */ 170 | 0, /* tp_as_number */ 171 | 0, /* tp_as_sequence */ 172 | 0, /* tp_as_mapping */ 173 | (hashfunc)HashedArgs_hash, /* tp_hash */ 174 | 0, /* tp_call */ 175 | 0, /* tp_str */ 176 | 0, /* tp_getattro */ 177 | 0, /* tp_setattro */ 178 | 0, /* tp_as_buffer */ 179 | Py_TPFLAGS_DEFAULT, /* tp_flags */ 180 | 0, /* tp_doc */ 181 | 0, /* tp_traverse */ 182 | 0, /* tp_clear */ 183 | HashedArgs_richcompare, /* tp_richcompare */ 184 | }; 185 | 186 | /*************************************************** 187 | End of HashedArgs 188 | ***************************************************/ 189 | 190 | /*********************************************************** 191 | circular doubly linked list 192 | ************************************************************/ 193 | typedef struct clist{ 194 | PyObject_HEAD 195 | struct clist *prev; 196 | struct clist *next; 197 | PyObject *key; 198 | PyObject *result; 199 | } clist; 200 | 201 | 202 | static void 203 | clist_dealloc(clist *co) 204 | { 205 | clist *prev = co->prev; 206 | clist *next = co->next; 207 | 208 | // THREAD SAFETY NOTES: 209 | // Calls to DECREF can result in bytecode and thread switching. 210 | // Do DECREF after the linked list has been modified and is in 211 | // an acceptable state. 212 | if(prev != co){ 213 | // adjust neighbor pointers 214 | prev->next = next; 215 | next->prev = prev; 216 | } 217 | co->prev = NULL; 218 | co->next = NULL; 219 | Py_XDECREF(co->key); 220 | Py_XDECREF(co->result); 221 | Py_TYPE(co)->tp_free(co); 222 | return; 223 | } 224 | 225 | 226 | static PyTypeObject clist_type = { 227 | PyVarObject_HEAD_INIT(NULL, 0) 228 | "_lrucache.clist", /* tp_name */ 229 | sizeof(clist), /* tp_basicsize */ 230 | 0, /* tp_itemsize */ 231 | (destructor)clist_dealloc, /* tp_dealloc */ 232 | 0, /* tp_print */ 233 | 0, /* tp_getattr */ 234 | 0, /* tp_setattr */ 235 | 0, /* tp_reserved */ 236 | 0, /* tp_repr */ 237 | 0, /* tp_as_number */ 238 | 0, /* tp_as_sequence */ 239 | 0, /* tp_as_mapping */ 240 | 0, /* tp_hash */ 241 | 0, /* tp_call */ 242 | 0, /* tp_str */ 243 | 0, /* tp_getattro */ 244 | 0, /* tp_setattro */ 245 | 0, /* tp_as_buffer */ 246 | Py_TPFLAGS_DEFAULT, /* tp_flags */ 247 | }; 248 | 249 | 250 | static int 251 | insert_first(clist *root, PyObject *key, PyObject *result){ 252 | // first element will be inserted at root->next 253 | clist *first = PyObject_New(clist, &clist_type); 254 | clist *oldfirst = root->next; 255 | 256 | if(!first) 257 | return -1; 258 | 259 | first->result = result; 260 | // This will be the only reference to key (HashedArgs), do not INCREF 261 | first->key = key; 262 | 263 | root->next = first; 264 | first->next = oldfirst; 265 | first->prev = root; 266 | oldfirst->prev = first; 267 | // INCREF result since it will be used by clist and returned to the caller 268 | return Py_INCREF(result), 1; 269 | } 270 | 271 | 272 | static PyObject * 273 | make_first(clist *root, clist *node){ 274 | // make node the first node and return new reference to result 275 | // save previous first position 276 | clist *oldfirst = root->next; 277 | 278 | if (oldfirst != node) { 279 | // first adjust pointers around node's position 280 | node->prev->next = node->next; 281 | node->next->prev = node->prev; 282 | 283 | root->next = node; 284 | node->next = oldfirst; 285 | node->prev = root; 286 | oldfirst->prev = node; 287 | } 288 | INC_RETURN(node->result); 289 | } 290 | 291 | /********************************************************** 292 | cachedobject is the actual function with the cached results 293 | ***********************************************************/ 294 | 295 | /* how will unhashable arguments be handled */ 296 | enum unhashable {FC_ERROR, FC_WARNING, FC_IGNORE, FC_FAIL}; 297 | 298 | 299 | typedef struct { 300 | PyObject_HEAD 301 | PyObject *fn ; // original function 302 | PyObject *func_module, *func_name, *func_qualname, *func_annotations; 303 | PyObject *func_dict; 304 | PyObject *cache_dict; 305 | PyObject *ex_state; 306 | int typed; 307 | enum unhashable err; 308 | PyObject *cinfo; // named tuple constructor 309 | Py_ssize_t maxsize, hits, misses; 310 | clist *root; 311 | // lock for cache access 312 | #ifdef WITH_THREAD 313 | PyThread_type_lock lock; 314 | long rlock_owner; 315 | unsigned long rlock_count; 316 | #endif 317 | } cacheobject ; 318 | 319 | 320 | #define OFF(x) offsetof(cacheobject, x) 321 | // attributes from wrapped function 322 | static PyMemberDef cache_memberlist[] = { 323 | {"__wrapped__", T_OBJECT, OFF(fn), RESTRICTED | READONLY}, 324 | {"__module__", T_OBJECT, OFF(func_module), RESTRICTED | READONLY}, 325 | {"__name__", T_OBJECT, OFF(func_name), RESTRICTED | READONLY}, 326 | {"__qualname__",T_OBJECT, OFF(func_qualname), RESTRICTED | READONLY}, 327 | {"__annotations__", T_OBJECT, OFF(func_annotations), RESTRICTED | READONLY}, 328 | {NULL} /* Sentinel */ 329 | }; 330 | 331 | 332 | // getsetters from wrapped function 333 | static PyObject * 334 | cache_get_doc(cacheobject * co, void *closure) 335 | { 336 | PyFunctionObject * fn = (PyFunctionObject *) co->fn; 337 | if (fn->func_doc == NULL) 338 | Py_RETURN_NONE; 339 | 340 | INC_RETURN(fn->func_doc); 341 | } 342 | 343 | #if defined(_PY2) || defined (_PY32) 344 | 345 | static int 346 | restricted(void) 347 | { 348 | #ifdef _PY2 349 | if (!PyEval_GetRestricted()) 350 | #endif 351 | return 0; 352 | PyErr_SetString(PyExc_RuntimeError, 353 | "function attributes not accessible in restricted mode"); 354 | return 1; 355 | } 356 | 357 | 358 | static PyObject * 359 | func_get_dict(PyFunctionObject *op) 360 | { 361 | if (restricted()) 362 | return NULL; 363 | if (op->func_dict == NULL) { 364 | op->func_dict = PyDict_New(); 365 | if (op->func_dict == NULL) 366 | return NULL; 367 | } 368 | Py_INCREF(op->func_dict); 369 | return op->func_dict; 370 | } 371 | 372 | static int 373 | func_set_dict(PyFunctionObject *op, PyObject *value) 374 | { 375 | PyObject *tmp; 376 | 377 | if (restricted()) 378 | return -1; 379 | /* It is illegal to del f.func_dict */ 380 | if (value == NULL) { 381 | PyErr_SetString(PyExc_TypeError, 382 | "function's dictionary may not be deleted"); 383 | return -1; 384 | } 385 | /* Can only set func_dict to a dictionary */ 386 | if (!PyDict_Check(value)) { 387 | PyErr_SetString(PyExc_TypeError, 388 | "setting function's dictionary to a non-dict"); 389 | return -1; 390 | } 391 | tmp = op->func_dict; 392 | Py_INCREF(value); 393 | op->func_dict = value; 394 | Py_XDECREF(tmp); 395 | return 0; 396 | } 397 | 398 | static PyGetSetDef cache_getset[] = { 399 | {"__doc__", (getter)cache_get_doc, NULL, NULL, NULL}, 400 | {"__dict__", (getter)func_get_dict, (setter)func_set_dict}, 401 | {NULL} /* Sentinel */ 402 | }; 403 | 404 | #else 405 | 406 | static PyGetSetDef cache_getset[] = { 407 | {"__doc__", (getter)cache_get_doc, NULL, NULL, NULL}, 408 | {"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict}, 409 | {NULL} /* Sentinel */ 410 | }; 411 | 412 | #endif 413 | 414 | 415 | /* Bind a function to an object */ 416 | static PyObject * 417 | cache_descr_get(PyObject *func, PyObject *obj, PyObject *type) 418 | { 419 | if (obj == Py_None || obj == NULL) 420 | INC_RETURN(func); 421 | 422 | #ifdef _PY2 423 | return PyMethod_New(func, obj, type); 424 | #else 425 | return PyMethod_New(func, obj); 426 | #endif 427 | } 428 | 429 | 430 | static void 431 | cache_dealloc(cacheobject *co) 432 | { 433 | Py_CLEAR(co->fn); 434 | Py_CLEAR(co->func_module); 435 | Py_CLEAR(co->func_name); 436 | Py_CLEAR(co->func_qualname); 437 | Py_CLEAR(co->func_annotations); 438 | Py_CLEAR(co->func_dict); 439 | Py_CLEAR(co->cache_dict); 440 | Py_CLEAR(co->ex_state); 441 | Py_CLEAR(co->cinfo); 442 | Py_CLEAR(co->root); 443 | FREE_LOCK(co); 444 | Py_TYPE(co)->tp_free(co); 445 | 446 | } 447 | 448 | 449 | /* 450 | * attempt to set hs->hashvalue to hash(hs->args) Does not do alter any 451 | * reference counts. Returns NULL on error. If hs->hashvalue==-1 on return 452 | * then hs->args is Unhashable 453 | */ 454 | static PyObject * 455 | set_hash_value(cacheobject *co, HashedArgs *hs) 456 | { 457 | if ((hs->hashvalue = PyObject_Hash(hs->args)) == -1) { 458 | // unhashable 459 | if (co->err == FC_ERROR) { 460 | return NULL; 461 | } 462 | // if error was something other than a TypeError, exit 463 | if (!PyErr_GivenExceptionMatches(PyErr_Occurred(), PyExc_TypeError)) { 464 | return NULL; 465 | } 466 | PyErr_Clear(); 467 | 468 | if (co->err == FC_WARNING) { 469 | // try to issue warning 470 | if( PyErr_WarnEx(PyExc_UserWarning, 471 | "Unhashable arguments cannot be cached",1) < 0){ 472 | // warning becomes exception 473 | PyErr_SetString(PyExc_TypeError, 474 | "Cached function arguments must be hashable"); 475 | return NULL; 476 | } 477 | } 478 | } 479 | // success! 480 | return (PyObject *) hs; 481 | } 482 | 483 | // compute the hash of function args and kwargs 484 | // THREAD SAFTEY NOTES: 485 | // We access global data: co->ex_state and co->typed. 486 | // These data are defined at co creation time and are not 487 | // changed so we do not need to worry about thread safety here 488 | static PyObject * 489 | make_key(cacheobject *co, PyObject *args, PyObject *kw) 490 | { 491 | PyObject *item, *keys, *key; 492 | Py_ssize_t ex_size = 0; 493 | Py_ssize_t arg_size = 0; 494 | Py_ssize_t kw_size = 0; 495 | Py_ssize_t i, size, off; 496 | HashedArgs *hs; 497 | int is_list = 1; 498 | 499 | // determine size of arguments and types 500 | if (PyList_Check(co->ex_state)) 501 | ex_size = Py_SIZE(co->ex_state); 502 | else if (PyDict_CheckExact(co->ex_state)){ 503 | is_list = 0; 504 | ex_size = PyDict_Size(co->ex_state); 505 | } 506 | if (args && PyTuple_CheckExact(args)) 507 | arg_size = PyTuple_GET_SIZE(args); 508 | if (kw && PyDict_CheckExact(kw)) 509 | kw_size = PyDict_Size(kw); 510 | 511 | // allocate HashedArgs Object 512 | if(!(hs = PyObject_New(HashedArgs, &HashedArgs_type))) 513 | return NULL; 514 | 515 | // total size 516 | if (co->typed) 517 | size = (2-is_list)*ex_size+2*arg_size+3*kw_size; 518 | else 519 | size = (2-is_list)*ex_size+arg_size+2*kw_size; 520 | // initialize new tuple 521 | if(!(hs->args = PyTuple_New(size))){ 522 | return NULL; 523 | } 524 | // incorporate extra state 525 | if(is_list){ 526 | for(i = 0; i < ex_size; i++){ 527 | PyObject *tmp = PyList_GET_ITEM(co->ex_state, i); 528 | PyTuple_SET_ITEM(hs->args, i, tmp); 529 | Py_INCREF(tmp); 530 | } 531 | } 532 | else if(ex_size > 0){ 533 | if(!(keys = PyDict_Keys(co->ex_state))){ 534 | Py_DECREF(hs); 535 | return NULL; 536 | } 537 | if( PyList_Sort(keys) < 0){ 538 | Py_DECREF(keys); 539 | Py_DECREF(hs); 540 | return NULL; 541 | } 542 | for(i = 0; i < ex_size; i++){ 543 | key = PyList_GET_ITEM(keys, i); 544 | Py_INCREF(key); 545 | PyTuple_SET_ITEM(hs->args, 2*i, key); 546 | 547 | if(!(item = PyDict_GetItem(co->ex_state, key))){ 548 | Py_DECREF(keys); 549 | Py_DECREF(hs); 550 | return NULL; 551 | } 552 | Py_INCREF(item); 553 | PyTuple_SET_ITEM(hs->args, 2*i+1, item); 554 | } 555 | Py_DECREF(keys); 556 | } 557 | off = (2-is_list)*ex_size; 558 | 559 | // incorporate arguments 560 | for(i = 0; i < arg_size; i++){ 561 | PyObject *tmp = PyTuple_GET_ITEM(args, i); 562 | PyTuple_SET_ITEM(hs->args, off+i, tmp); 563 | Py_INCREF(tmp); 564 | if(co->typed) { 565 | off += 1; 566 | tmp = (PyObject *)Py_TYPE(tmp); 567 | Py_INCREF(tmp); 568 | PyTuple_SET_ITEM(hs->args, off+i, tmp); 569 | } 570 | } 571 | off += arg_size; 572 | 573 | // incorporate keyword arguments 574 | if(kw_size > 0){ 575 | if(!(keys = PyDict_Keys(kw))){ 576 | Py_DECREF(hs); 577 | return NULL; 578 | } 579 | if( PyList_Sort(keys) < 0){ 580 | Py_DECREF(keys); 581 | Py_DECREF(hs); 582 | return NULL; 583 | } 584 | for(i = 0; i < kw_size; i++){ 585 | key = PyList_GET_ITEM(keys, i); 586 | Py_INCREF(key); 587 | PyTuple_SET_ITEM(hs->args, off+i, key); 588 | if(!(item = PyDict_GetItem(kw, key))){ 589 | Py_DECREF(keys); 590 | Py_DECREF(hs); 591 | return NULL; 592 | } 593 | off += 1; 594 | Py_INCREF(item); 595 | PyTuple_SET_ITEM(hs->args, off+i, item); 596 | if (co->typed){ 597 | off += 1; 598 | item = (PyObject *)Py_TYPE(item); 599 | Py_INCREF(item); 600 | PyTuple_SET_ITEM(hs->args, off+i, item); 601 | } 602 | } 603 | Py_DECREF(keys); 604 | } 605 | // check for an error we may have missed 606 | if( PyErr_Occurred() ){ 607 | Py_DECREF(hs); 608 | return NULL; 609 | } 610 | // set hash value 611 | if( !set_hash_value(co, hs) ) { 612 | Py_DECREF(hs); 613 | return NULL; 614 | } 615 | 616 | return (PyObject *)hs; 617 | } 618 | 619 | 620 | /*********************************************************** 621 | * All calls to the cached function go through cache_call 622 | * Handles: (1) Generation of key (via make_key) 623 | * (2) Maintenance of circular doubly linked list 624 | * (3) Actual updates to cache dictionary 625 | * THREAD SAFETY NOTES: 626 | * 1. The GIL may switch threads between all PyDict_Get/Set/DelItem 627 | * If another thread were to call cache_clear while the dict was in 628 | * an indetermined state, that could be very very bad. Must lock all 629 | * updates to cache_dict 630 | ***********************************************************/ 631 | static PyObject * 632 | cache_call(cacheobject *co, PyObject *args, PyObject *kw) 633 | { 634 | PyObject *key, *result, *link, *first; 635 | 636 | /* no cache, just update stats and return */ 637 | if (co->maxsize == 0) { 638 | co->misses++; 639 | return PyObject_Call(co->fn, args, kw); 640 | } 641 | 642 | // generate a key from hashing the arguments 643 | // THREAD SAFETY NOTES: 644 | // Computing the hash will result in many potential calls to __hash__ 645 | // methods, allowing the GIL to switch threads. Thus it is possible that 646 | // two threads have called this function with the exact same arguments 647 | // and are constructing keys 648 | key = make_key(co, args, kw); 649 | if (!key) 650 | return NULL; 651 | 652 | /* check for unhashable type */ 653 | if ( ((HashedArgs *)key)->hashvalue == -1){ 654 | // no locking neccessary here 655 | Py_DECREF(key); 656 | co->misses++; 657 | return PyObject_Call(co->fn, args, kw); 658 | } 659 | 660 | /* For an unbounded cache, link is simply the result of the function call 661 | * For an LRU cache, link is a pointer to a clist node */ 662 | if(ACQUIRE_LOCK(co) == -1){ 663 | Py_DECREF(key); 664 | return NULL; 665 | } 666 | link = PyDict_GetItem(co->cache_dict, key); 667 | if(PyErr_Occurred()){ 668 | RELEASE_LOCK(co); 669 | Py_XDECREF(link); 670 | Py_DECREF(key); 671 | return NULL; 672 | } 673 | if(RELEASE_LOCK(co) == -1){ 674 | Py_XDECREF(link); 675 | Py_DECREF(key); 676 | return NULL; 677 | } 678 | 679 | if (!link){ 680 | result = PyObject_Call(co->fn, args, kw); // result refcount is one 681 | if(PyErr_Occurred() || !result){ 682 | Py_XDECREF(result); 683 | Py_DECREF(key); 684 | return NULL; 685 | } 686 | /* Unbounded cache, no clist maintenance, no locks needed */ 687 | if (co->maxsize < 0){ 688 | if( PyDict_SetItem(co->cache_dict, key, result) == -1 || 689 | PyErr_Occurred()){ 690 | Py_DECREF(key); 691 | Py_DECREF(result); 692 | return NULL; 693 | } 694 | Py_DECREF(key); 695 | return co->misses++, result; 696 | } 697 | /* Least Recently Used cache */ 698 | /* Need to reacquire the lock here and make sure that the key,result were 699 | * not added to the cache while we were waiting */ 700 | if(ACQUIRE_LOCK(co) == -1){ 701 | Py_DECREF(key); 702 | Py_DECREF(result); 703 | return NULL; 704 | } 705 | #ifdef WITH_THREAD 706 | link = PyDict_GetItem(co->cache_dict, key); 707 | if(PyErr_Occurred()){ 708 | RELEASE_LOCK(co); 709 | Py_DECREF(key); 710 | Py_DECREF(result); 711 | Py_XDECREF(link); 712 | return NULL; 713 | } 714 | if(link){ 715 | Py_DECREF(key); 716 | if(RELEASE_LOCK(co) == -1){ 717 | Py_DECREF(result); 718 | return NULL; 719 | } 720 | return co->hits++, result; 721 | } 722 | #endif 723 | /* if cache is full, repurpose the last link rather than 724 | * passing it off to garbage collection. */ 725 | if (((PyDictObject *)co->cache_dict)->ma_used == co->maxsize){ 726 | /* Note that the old key will be used to delete the link from the dictionary 727 | * Be sure to INCREF old link so we don't lose it before 728 | * we add it when the PyDict_DelItem occurs */ 729 | clist *last = co->root->prev; 730 | PyObject *old_key = last->key; 731 | PyObject *old_res = last->result; 732 | // set new items 733 | last->key = key; 734 | last->result = result; 735 | // bump to the front (get back the result we just set). 736 | result = make_first(co->root, last); 737 | // Increase ref count of repurposed link so we don't trigger GC 738 | // save the first position since the global co->root->next may change 739 | first = (PyObject *) co->root->next; 740 | // Increases first->refcount to 2 741 | if(PyDict_SetItem(co->cache_dict, key, first) == -1){ 742 | Py_DECREF(first); 743 | Py_DECREF(first); 744 | Py_DECREF(key); 745 | Py_DECREF(old_key); 746 | Py_DECREF(old_res); 747 | Py_DECREF(result); 748 | RELEASE_LOCK(co); 749 | return NULL; 750 | } 751 | // handle deletions 752 | if(PyDict_DelItem(co->cache_dict, old_key) == -1){ 753 | Py_DECREF(old_key); 754 | Py_DECREF(old_res); 755 | Py_DECREF(result); 756 | RELEASE_LOCK(co); 757 | return NULL; 758 | } 759 | // These would have been decrefed had we simply deleted the link 760 | Py_DECREF(old_key); 761 | Py_DECREF(old_res); 762 | if(PyErr_Occurred()){ 763 | Py_DECREF(result); 764 | RELEASE_LOCK(co); 765 | return NULL; 766 | } 767 | if(RELEASE_LOCK(co) == -1){ 768 | Py_DECREF(result); 769 | return NULL; 770 | } 771 | return co->misses++, result; 772 | } 773 | else { 774 | if(insert_first(co->root, key, result) < 0) { 775 | Py_DECREF(key); 776 | Py_DECREF(result); 777 | RELEASE_LOCK(co); 778 | return NULL; 779 | } 780 | first = (PyObject *) co->root->next; // insert_first sets refcount to 1 781 | // key and first count++ 782 | if(PyDict_SetItem(co->cache_dict, key, first) == -1 || PyErr_Occurred()){ 783 | Py_DECREF(first); 784 | Py_DECREF(result); 785 | RELEASE_LOCK(co); 786 | return NULL; 787 | } 788 | Py_DECREF(first); 789 | // Don't DECREF key here since we want both the dict and the node 'first' 790 | // To be able to have a valid copy 791 | co->misses++; 792 | if(RELEASE_LOCK(co) == -1){ 793 | Py_DECREF(result); 794 | return NULL; 795 | } 796 | return result; 797 | } 798 | } // link != NULL 799 | else { 800 | if( co->maxsize < 0){ 801 | Py_DECREF(key); 802 | co->hits++; 803 | INC_RETURN(link); 804 | } 805 | /* bump link to the front of the list and get result from link */ 806 | result = make_first(co->root, (clist *) link); 807 | Py_DECREF(key); 808 | co->hits++; 809 | return result; 810 | } 811 | } 812 | 813 | 814 | PyDoc_STRVAR(cacheclear__doc__, 815 | "cache_clear(self)\n\ 816 | \n\ 817 | Clear the cache and cache statistics."); 818 | static PyObject * 819 | cache_clear(PyObject *self) 820 | { 821 | cacheobject *co = (cacheobject *)self; 822 | // delete dictionary - use a lock to keep dict in a fully determined state 823 | if(ACQUIRE_LOCK(co) == -1) 824 | return NULL; 825 | PyDict_Clear(co->cache_dict); 826 | co->hits = 0; 827 | co->misses = 0; 828 | if(RELEASE_LOCK(co) == -1) 829 | return NULL; 830 | Py_RETURN_NONE; 831 | } 832 | 833 | 834 | PyDoc_STRVAR(cacheinfo__doc__, 835 | "cache_info(self)\n\ 836 | \n\ 837 | Report cache statistics."); 838 | static PyObject * 839 | cache_info(PyObject *self) 840 | { 841 | cacheobject * co = (cacheobject *) self; 842 | if (co->maxsize >= 0) 843 | return PyObject_CallFunction(co->cinfo,"nnnn",co->hits, 844 | co->misses, co->maxsize, 845 | ((PyDictObject *)co->cache_dict)->ma_used); 846 | else 847 | return PyObject_CallFunction(co->cinfo,"nnOn",co->hits, 848 | co->misses, Py_None, 849 | ((PyDictObject *)co->cache_dict)->ma_used); 850 | } 851 | 852 | 853 | static PyMethodDef cache_methods[] = { 854 | {"cache_clear", (PyCFunction) cache_clear, METH_NOARGS, 855 | cacheclear__doc__}, 856 | {"cache_info", (PyCFunction) cache_info, METH_NOARGS, 857 | cacheinfo__doc__}, 858 | {NULL, NULL} /* sentinel */ 859 | }; 860 | 861 | 862 | PyDoc_STRVAR(fn_doc, 863 | "Cached function."); 864 | 865 | 866 | static PyTypeObject cache_type = { 867 | PyVarObject_HEAD_INIT(NULL, 0) 868 | "fastcache.clru_cache", /* tp_name */ 869 | sizeof(cacheobject), /* tp_basicsize */ 870 | 0, /* tp_itemsize */ 871 | /* methods */ 872 | (destructor)cache_dealloc, /* tp_dealloc */ 873 | 0, /* tp_print */ 874 | 0, /* tp_getattr */ 875 | 0, /* tp_setattr */ 876 | 0, /* tp_reserved */ 877 | 0, /* tp_repr */ 878 | 0, /* tp_as_number */ 879 | 0, /* tp_as_sequence */ 880 | 0, /* tp_as_mapping */ 881 | 0, /* tp_hash */ 882 | (ternaryfunc)cache_call, /* tp_call */ 883 | 0, /* tp_str */ 884 | 0, /* tp_getattro */ 885 | 0, /* tp_setattro */ 886 | 0, /* tp_as_buffer */ 887 | Py_TPFLAGS_DEFAULT , /* tp_flags */ 888 | fn_doc, /* tp_doc */ 889 | 0, /* tp_traverse */ 890 | 0, /* tp_clear */ 891 | 0, /* tp_richcompare */ 892 | 0, /* tp_weaklistoffset */ 893 | 0, /* tp_iter */ 894 | 0, /* tp_iternext */ 895 | cache_methods, /* tp_methods */ 896 | cache_memberlist, /* tp_members */ 897 | cache_getset, /* tp_getset */ 898 | 0, /* tp_base */ 899 | 0, /* tp_dict */ 900 | cache_descr_get, /* tp_descr_get */ 901 | 0, /* tp_descr_set */ 902 | OFF(func_dict), /* tp_dictoffset */ 903 | 0, /* tp_init */ 904 | 0, /* tp_alloc */ 905 | 0, /* tp_new */ 906 | 0, /* tp_free */ 907 | }; 908 | 909 | 910 | /* lruobject - 911 | * the callable object returned by lrucache(all, my, cache, args) 912 | * [lrucache is known as clru_cache in python land] 913 | * records arguments to clru_cache and passes them along to the 914 | * cacheobject created when lruobject is called with a function 915 | * as an argument */ 916 | typedef struct { 917 | PyObject_HEAD 918 | Py_ssize_t maxsize; 919 | PyObject *state; 920 | int typed; 921 | enum unhashable err; 922 | } lruobject; 923 | 924 | 925 | static void lru_dealloc(lruobject *lru) 926 | { 927 | Py_CLEAR(lru->state); 928 | Py_TYPE(lru)->tp_free(lru); 929 | } 930 | 931 | 932 | static PyObject * 933 | get_func_attr(PyObject *fo, const char *name) 934 | { 935 | if( !PyObject_HasAttrString(fo,name)) 936 | Py_RETURN_NONE; 937 | else{ 938 | PyObject *attr = PyObject_GetAttrString(fo, name); 939 | if (attr == NULL) 940 | return NULL; 941 | return attr; 942 | } 943 | } 944 | 945 | 946 | /* takes a function as an argument and returns a cacheobject */ 947 | static PyObject * 948 | lru_call(lruobject *lru, PyObject *args, PyObject *kw) 949 | { 950 | PyObject *fo, *mod, *nt; 951 | cacheobject *co; 952 | 953 | if(! PyArg_ParseTuple(args, "O", &fo)) 954 | return NULL; 955 | 956 | if(! PyCallable_Check(fo)){ 957 | PyErr_SetString(PyExc_TypeError, "Argument must be callable."); 958 | return NULL; 959 | } 960 | co = PyObject_New(cacheobject, &cache_type); 961 | if (co == NULL) 962 | return NULL; 963 | 964 | #ifdef WITH_THREAD 965 | if ((co->lock = PyThread_allocate_lock()) == NULL){ 966 | Py_DECREF(co); 967 | return NULL; 968 | } 969 | // We need to initialize the rlock count and owner here 970 | co->rlock_count = 0; 971 | co->rlock_owner = 0; 972 | #endif 973 | if ((co->cache_dict = PyDict_New()) == NULL){ 974 | Py_DECREF(co); 975 | return NULL; 976 | } 977 | 978 | // initialize circular doubly linked list 979 | co->root = PyObject_New(clist, &clist_type); 980 | if(co->root == NULL){ 981 | Py_DECREF(co); 982 | return NULL; 983 | } 984 | 985 | // get namedtuple for cache_info() 986 | mod = PyImport_ImportModule("collections"); 987 | if (mod == NULL){ 988 | Py_DECREF(co); 989 | return NULL; 990 | } 991 | nt = PyObject_GetAttrString(mod, "namedtuple"); 992 | if (nt == NULL){ 993 | Py_DECREF(co); 994 | return NULL; 995 | } 996 | co->cinfo = PyObject_CallFunction(nt,"ss","CacheInfo", 997 | "hits misses maxsize currsize"); 998 | if (co->cinfo == NULL){ 999 | Py_DECREF(co); 1000 | return NULL; 1001 | } 1002 | 1003 | co->func_dict = get_func_attr(fo, "__dict__"); 1004 | 1005 | co->fn = fo; // __wrapped__ 1006 | Py_INCREF(co->fn); 1007 | 1008 | co->func_module = get_func_attr(fo, "__module__"); 1009 | co->func_annotations = get_func_attr(fo, "__annotations__"); 1010 | co->func_name = get_func_attr(fo, "__name__"); 1011 | co->func_qualname = get_func_attr(fo, "__qualname__"); 1012 | 1013 | co->ex_state = lru->state; 1014 | Py_INCREF(co->ex_state); 1015 | co->maxsize = lru->maxsize; 1016 | co->hits = 0; 1017 | co->misses = 0; 1018 | co->typed = lru->typed; 1019 | co->err = lru->err; 1020 | // start with self-referencing root node 1021 | co->root->prev = co->root; 1022 | co->root->next = co->root; 1023 | co->root->key = Py_None; 1024 | co->root->result = Py_None; 1025 | Py_INCREF(co->root->key); 1026 | Py_INCREF(co->root->result); 1027 | 1028 | return (PyObject *)co; 1029 | } 1030 | 1031 | 1032 | static PyTypeObject lru_type = { 1033 | PyVarObject_HEAD_INIT(NULL, 0) 1034 | "fastcache.lru", /* tp_name */ 1035 | sizeof(lruobject), /* tp_basicsize */ 1036 | 0, /* tp_itemsize */ 1037 | /* methods */ 1038 | (destructor)lru_dealloc, /* tp_dealloc */ 1039 | 0, /* tp_print */ 1040 | 0, /* tp_getattr */ 1041 | 0, /* tp_setattr */ 1042 | 0, /* tp_reserved */ 1043 | 0, /* tp_repr */ 1044 | 0, /* tp_as_number */ 1045 | 0, /* tp_as_sequence */ 1046 | 0, /* tp_as_mapping */ 1047 | 0, /* tp_hash */ 1048 | (ternaryfunc)lru_call, /* tp_call */ 1049 | 0, /* tp_str */ 1050 | 0, /* tp_getattro */ 1051 | 0, /* tp_setattro */ 1052 | 0, /* tp_as_buffer */ 1053 | Py_TPFLAGS_DEFAULT , /* tp_flags */ 1054 | }; 1055 | 1056 | 1057 | /* helper function for processing 'unhashable' */ 1058 | enum unhashable 1059 | process_uh(PyObject *arg, PyObject *(*f)(const char *)) 1060 | { 1061 | PyObject *uh[3] = {f("error"), f("warning"), f("ignore")}; 1062 | int i, j; 1063 | if (arg != NULL){ 1064 | 1065 | enum unhashable vals[3] = {FC_ERROR, FC_WARNING, FC_IGNORE}; 1066 | 1067 | for(i=0; i<3; i++){ 1068 | int k = PyObject_RichCompareBool(arg, uh[i], Py_EQ); 1069 | if (k < 0){ 1070 | for(j=0; j<3; j++) 1071 | Py_DECREF(uh[j]); 1072 | return FC_FAIL; 1073 | } 1074 | if (k){ 1075 | /* DECREF objects and return value */ 1076 | for(j=0; j<3; j++) 1077 | Py_DECREF(uh[j]); 1078 | return vals[i]; 1079 | } 1080 | } 1081 | } 1082 | for(j=0; j<3; j++) 1083 | Py_DECREF(uh[j]); 1084 | PyErr_SetString(PyExc_TypeError, 1085 | "Argument must be 'error', 'warning', or 'ignore'"); 1086 | return FC_FAIL; 1087 | } 1088 | 1089 | 1090 | /* LRU cache decorator */ 1091 | PyDoc_STRVAR(lrucache__doc__, 1092 | "clru_cache(maxsize=128, typed=False, state=None, unhashable='error')\n\n" 1093 | "Least-recently-used cache decorator.\n\n" 1094 | "If *maxsize* is set to None, the LRU features are disabled and the\n" 1095 | "cache can grow without bound.\n\n" 1096 | "If *typed* is True, arguments of different types will be cached\n" 1097 | "separately. For example, f(3.0) and f(3) will be treated as distinct\n" 1098 | "calls with distinct results.\n\n" 1099 | "If *state* is a list or dict, the items will be incorporated into the\n" 1100 | "argument hash.\n\n" 1101 | "The result of calling the cached function with unhashable (mutable)\n" 1102 | "arguments depends on the value of *unhashable*:\n\n" 1103 | " If *unhashable* is 'error', a TypeError will be raised.\n\n" 1104 | " If *unhashable* is 'warning', a UserWarning will be raised, and\n" 1105 | " the wrapped function will be called with the supplied arguments.\n" 1106 | " A miss will be recorded in the cache statistics.\n\n" 1107 | " If *unhashable* is 'ignore', the wrapped function will be called\n" 1108 | " with the supplied arguments. A miss will will be recorded in\n" 1109 | " the cache statistics.\n\n" 1110 | "View the cache statistics named tuple (hits, misses, maxsize, currsize)\n" 1111 | "with f.cache_info(). Clear the cache and statistics with\n" 1112 | "f.cache_clear(). Access the underlying function with f.__wrapped__.\n\n" 1113 | "See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used"); 1114 | 1115 | static PyObject * 1116 | lrucache(PyObject *self, PyObject *args, PyObject *kwargs) 1117 | { 1118 | PyObject *state = Py_None; 1119 | int typed = 0; 1120 | PyObject *omaxsize = Py_False; 1121 | PyObject *oerr = Py_None; 1122 | Py_ssize_t maxsize = 128; 1123 | static char *kwlist[] = {"maxsize", "typed", "state", "unhashable", NULL}; 1124 | lruobject *lru; 1125 | enum unhashable err; 1126 | #if defined(_PY2) || defined (_PY32) 1127 | PyObject *otyped = Py_False; 1128 | if(! PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO:lrucache", 1129 | kwlist, 1130 | &omaxsize, &otyped, &state, &oerr)) 1131 | return NULL; 1132 | typed = PyObject_IsTrue(otyped); 1133 | if (typed < -1) 1134 | return NULL; 1135 | #else 1136 | if(! PyArg_ParseTupleAndKeywords(args, kwargs, "|OpOO:lrucache", 1137 | kwlist, 1138 | &omaxsize, &typed, &state, &oerr)) 1139 | return NULL; 1140 | #endif 1141 | if (omaxsize != Py_False){ 1142 | if (omaxsize == Py_None) 1143 | maxsize = -1; 1144 | #ifdef _PY2 1145 | else if (PyInt_Check(omaxsize)){ 1146 | maxsize = PyInt_AsSsize_t(omaxsize); 1147 | if (maxsize < 0) 1148 | maxsize = -1; 1149 | } 1150 | #endif 1151 | else { 1152 | if( ! PyLong_Check(omaxsize)){ 1153 | PyErr_SetString(PyExc_TypeError, 1154 | "Argument must be an int."); 1155 | return NULL; 1156 | } 1157 | maxsize = PyLong_AsSsize_t(omaxsize); 1158 | if (maxsize < 0) 1159 | maxsize = -1; 1160 | } 1161 | } 1162 | 1163 | // ensure state is a list or dict 1164 | if (state != Py_None && !(PyList_Check(state) || PyDict_CheckExact(state))){ 1165 | PyErr_SetString(PyExc_TypeError, 1166 | "Argument must be a list or dict."); 1167 | return NULL; 1168 | } 1169 | 1170 | // check unhashable 1171 | if (oerr == Py_None) 1172 | err = FC_ERROR; 1173 | else{ 1174 | #ifdef _PY2 1175 | if(PyString_Check(oerr)) 1176 | err = process_uh(oerr, PyString_FromString); 1177 | else 1178 | #endif 1179 | if(PyUnicode_Check(oerr)) 1180 | err = process_uh(oerr, PyUnicode_FromString); 1181 | else 1182 | err = process_uh(NULL, NULL); // set error properly 1183 | } 1184 | if (err == FC_FAIL) 1185 | return NULL; 1186 | 1187 | lru = PyObject_New(lruobject, &lru_type); 1188 | if (lru == NULL) 1189 | return NULL; 1190 | 1191 | lru->maxsize = maxsize; 1192 | lru->state = state; 1193 | lru->typed = typed; 1194 | lru->err = err; 1195 | Py_INCREF(lru->state); 1196 | 1197 | return (PyObject *) lru; 1198 | } 1199 | 1200 | 1201 | static PyMethodDef lrucachemethods[] = { 1202 | {"clru_cache", (PyCFunction) lrucache, METH_VARARGS | METH_KEYWORDS, 1203 | lrucache__doc__}, 1204 | {NULL, NULL} /* sentinel */ 1205 | }; 1206 | 1207 | 1208 | #ifndef _PY2 1209 | static PyModuleDef lrucachemodule = { 1210 | PyModuleDef_HEAD_INIT, 1211 | "_lrucache", 1212 | "Least Recently Used cache", 1213 | -1, 1214 | lrucachemethods, 1215 | NULL, NULL, NULL, NULL 1216 | }; 1217 | #endif 1218 | 1219 | 1220 | #ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ 1221 | #define PyMODINIT_FUNC void 1222 | #endif 1223 | PyMODINIT_FUNC 1224 | #ifdef _PY2 1225 | init_lrucache(void) 1226 | { 1227 | #define _PYINIT_ERROR_RET return 1228 | #else 1229 | PyInit__lrucache(void) 1230 | { 1231 | PyObject *m; 1232 | #define _PYINIT_ERROR_RET return NULL 1233 | #endif 1234 | 1235 | lru_type.tp_new = PyType_GenericNew; 1236 | if (PyType_Ready(&lru_type) < 0) 1237 | _PYINIT_ERROR_RET; 1238 | 1239 | cache_type.tp_new = PyType_GenericNew; 1240 | if (PyType_Ready(&cache_type) < 0) 1241 | _PYINIT_ERROR_RET; 1242 | 1243 | HashedArgs_type.tp_new = PyType_GenericNew; 1244 | if (PyType_Ready(&HashedArgs_type) < 0) 1245 | _PYINIT_ERROR_RET; 1246 | 1247 | clist_type.tp_new = PyType_GenericNew; 1248 | if (PyType_Ready(&clist_type) < 0) 1249 | _PYINIT_ERROR_RET; 1250 | 1251 | #ifdef _PY2 1252 | Py_InitModule3("_lrucache", lrucachemethods, 1253 | "Least recently used cache."); 1254 | #else 1255 | m = PyModule_Create(&lrucachemodule); 1256 | if (m == NULL) 1257 | return NULL; 1258 | #endif 1259 | 1260 | Py_INCREF(&lru_type); 1261 | Py_INCREF(&cache_type); 1262 | Py_INCREF(&HashedArgs_type); 1263 | Py_INCREF(&clist_type); 1264 | 1265 | #ifndef _PY2 1266 | return m; 1267 | #endif 1268 | } 1269 | 1270 | #ifdef __cplusplus 1271 | } 1272 | #endif 1273 | --------------------------------------------------------------------------------