├── .coveragerc
├── .gitignore
├── .mypy.ini
├── .travis.yml
├── CONTRIBUTING.md
├── CONTRIBUTORS.md
├── LICENSE
├── README.md
├── examples.py
├── memoization
├── __init__.py
├── backport
│ ├── __init__.py
│ └── backport_enum.py
├── caching
│ ├── __init__.py
│ ├── fifo_cache.py
│ ├── fifo_cache.pyi
│ ├── general
│ │ ├── __init__.py
│ │ ├── keys_order_dependent.py
│ │ ├── keys_order_dependent.pyi
│ │ ├── keys_order_independent.py
│ │ ├── keys_order_independent.pyi
│ │ ├── values_with_ttl.py
│ │ ├── values_with_ttl.pyi
│ │ ├── values_without_ttl.py
│ │ └── values_without_ttl.pyi
│ ├── lfu_cache.py
│ ├── lfu_cache.pyi
│ ├── lru_cache.py
│ ├── lru_cache.pyi
│ ├── plain_cache.py
│ ├── plain_cache.pyi
│ ├── statistic_cache.py
│ └── statistic_cache.pyi
├── config
│ ├── __init__.py
│ ├── algorithm_mapping.py
│ └── algorithm_mapping.pyi
├── constant
│ ├── __init__.py
│ ├── flag.py
│ └── flag.pyi
├── memoization.py
├── memoization.pyi
├── model.py
├── model.pyi
├── py.typed
├── type
│ ├── __init__.py
│ ├── caching
│ │ ├── __init__.py
│ │ ├── cache.pyi
│ │ └── general
│ │ │ ├── __init__.py
│ │ │ └── keys.pyi
│ └── model.pyi
└── util
│ ├── __init__.py
│ └── algorithm_extension_validator.py
├── setup.py
└── test.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit =
3 | memoization/backport/*
4 | memoization/util/algorithm_extension_validator.py
5 | */travis/virtualenv/*
6 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by .ignore support plugin (hsz.mobi)
2 | ### Python template
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | .hypothesis/
50 | .pytest_cache/
51 |
52 | # Translations
53 | *.mo
54 | *.pot
55 |
56 | # Django stuff:
57 | *.log
58 | local_settings.py
59 | db.sqlite3
60 |
61 | # Flask stuff:
62 | instance/
63 | .webassets-cache
64 |
65 | # Scrapy stuff:
66 | .scrapy
67 |
68 | # Sphinx documentation
69 | docs/_build/
70 |
71 | # PyBuilder
72 | target/
73 |
74 | # Jupyter Notebook
75 | .ipynb_checkpoints
76 |
77 | # pyenv
78 | .python-version
79 |
80 | # celery beat schedule file
81 | celerybeat-schedule
82 |
83 | # SageMath parsed files
84 | *.sage.py
85 |
86 | # Environments
87 | .env
88 | .venv
89 | env/
90 | venv/
91 | ENV/
92 | env.bak/
93 | venv.bak/
94 | venv-*/
95 |
96 | # Spyder project settings
97 | .spyderproject
98 | .spyproject
99 |
100 | # Rope project settings
101 | .ropeproject
102 |
103 | # mkdocs documentation
104 | /site
105 |
106 | # mypy
107 | .mypy_cache/
108 | ### macOS template
109 | # General
110 | .DS_Store
111 | .AppleDouble
112 | .LSOverride
113 |
114 | # Icon must end with two \r
115 | Icon
116 |
117 | # Thumbnails
118 | ._*
119 |
120 | # Files that might appear in the root of a volume
121 | .DocumentRevisions-V100
122 | .fseventsd
123 | .Spotlight-V100
124 | .TemporaryItems
125 | .Trashes
126 | .VolumeIcon.icns
127 | .com.apple.timemachine.donotpresent
128 |
129 | # Directories potentially created on remote AFP share
130 | .AppleDB
131 | .AppleDesktop
132 | Network Trash Folder
133 | Temporary Items
134 | .apdisk
135 | ### Example user template template
136 | ### Example user template
137 |
138 | # IntelliJ project files
139 | .idea
140 | *.iml
141 | out
142 | gen### Windows template
143 | # Windows thumbnail cache files
144 | Thumbs.db
145 | ehthumbs.db
146 | ehthumbs_vista.db
147 |
148 | # Dump file
149 | *.stackdump
150 |
151 | # Folder config file
152 | [Dd]esktop.ini
153 |
154 | # Recycle Bin used on file shares
155 | $RECYCLE.BIN/
156 |
157 | # Windows Installer files
158 | *.cab
159 | *.msi
160 | *.msix
161 | *.msm
162 | *.msp
163 |
164 | # Windows shortcuts
165 | *.lnk
166 | ### Linux template
167 | *~
168 |
169 | # temporary files which can be created if a process still has a handle open of a deleted file
170 | .fuse_hidden*
171 |
172 | # KDE directory preferences
173 | .directory
174 |
175 | # Linux trash folder which might appear on any partition or disk
176 | .Trash-*
177 |
178 | # .nfs files are created when an open file is removed but is still being accessed
179 | .nfs*
180 | ### JetBrains template
181 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
182 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
183 |
184 | # User-specific stuff
185 | .idea/**/workspace.xml
186 | .idea/**/tasks.xml
187 | .idea/**/usage.statistics.xml
188 | .idea/**/dictionaries
189 | .idea/**/shelf
190 |
191 | # Sensitive or high-churn files
192 | .idea/**/dataSources/
193 | .idea/**/dataSources.ids
194 | .idea/**/dataSources.local.xml
195 | .idea/**/sqlDataSources.xml
196 | .idea/**/dynamic.xml
197 | .idea/**/uiDesigner.xml
198 | .idea/**/dbnavigator.xml
199 |
200 | # Gradle
201 | .idea/**/gradle.xml
202 | .idea/**/libraries
203 |
204 | # Gradle and Maven with auto-import
205 | # When using Gradle or Maven with auto-import, you should exclude module files,
206 | # since they will be recreated, and may cause churn. Uncomment if using
207 | # auto-import.
208 | # .idea/modules.xml
209 | # .idea/*.iml
210 | # .idea/modules
211 |
212 | # CMake
213 | cmake-build-*/
214 |
215 | # Mongo Explorer plugin
216 | .idea/**/mongoSettings.xml
217 |
218 | # File-based project format
219 | *.iws
220 |
221 | # IntelliJ
222 | out/
223 |
224 | # mpeltonen/sbt-idea plugin
225 | .idea_modules/
226 |
227 | # JIRA plugin
228 | atlassian-ide-plugin.xml
229 |
230 | # Cursive Clojure plugin
231 | .idea/replstate.xml
232 |
233 | # Crashlytics plugin (for Android Studio and IntelliJ)
234 | com_crashlytics_export_strings.xml
235 | crashlytics.properties
236 | crashlytics-build.properties
237 | fabric.properties
238 |
239 | # Editor-based Rest Client
240 | .idea/httpRequests
241 |
--------------------------------------------------------------------------------
/.mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 |
3 | [mypy-test]
4 | ignore_errors = True
5 |
6 | [mypy-examples]
7 | ignore_errors = True
8 |
9 | [mypy-memoization.util.algorithm_extension_validator]
10 | ignore_errors = True
11 |
12 | [mypy-memoization.backport.*]
13 | ignore_errors = True
14 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | dist: xenial
3 | python:
4 | - "3.4"
5 | - "3.5"
6 | - "3.6"
7 | - "3.7"
8 | - "3.8"
9 | - "3.9"
10 | - "3.9-dev"
11 | - "nightly"
12 | install:
13 | - pip install coverage
14 | - pip install coveralls
15 | script:
16 | - coverage run test.py
17 | after_success:
18 | - coveralls
19 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Thank you for your contribution to this project. If your solution works well, I will merge your pull requests ASAP.
4 | **Feel free to add yourself to [`CONTRIBUTORS.md`](https://github.com/lonelyenvoy/python-memoization/blob/master/CONTRIBUTORS.md) :-D**
5 |
6 | ## Bugs...
7 |
8 | - If you find a bug, please report it with an [issue](https://github.com/lonelyenvoy/python-memoization/issues).
9 | - If you want to fix a bug, please submit [pull requests](https://github.com/lonelyenvoy/python-memoization/pulls).
10 |
11 | ## Want to implement a cooler caching algorithm?
12 |
13 | If you find the given algorithms (FIFO, LRU, and LFU) unable to satisfiy your requirements,
14 | you can add any algorithms to this project. Since it is designed to be extensible,
15 | this can be easily done in a few steps:
16 |
17 | ### Step 1: Register your algorithm in `CachingAlgorithmFlag`
18 |
19 | Please locate this file: `memoization.constant.flag.CachingAlgorithmFlag`
20 |
21 | ```python
22 | class CachingAlgorithmFlag(enum.IntFlag):
23 | """
24 | Use this class to specify which caching algorithm you would like to use
25 | """
26 | FIFO = 1 # First In First Out
27 | LRU = 2 # Least Recently Used
28 | LFU = 4 # Least Frequently Used
29 | ```
30 |
31 | By default, these three internal algorithms are registered.
32 | Add your algorithms here, like `LRU_K = 8`. Note that the flag value should be a power of 2.
33 |
34 |
35 | ### Step 2: Write your caching toolkit with a wrapper function
36 |
37 | Please create a new python file in `memoization.caching` with the filename
38 | containing the name of your algorithm and a postfix `_cache`, like `lru_k_cache.py`.
39 |
40 | In this file, implement a `get_caching_wrapper` function, which creates a wrapper
41 | for any given user function. The signature of this function should be:
42 | ```python
43 | def get_caching_wrapper(user_function, max_size, ttl, algorithm, thread_safe):
44 | ...
45 | ```
46 |
47 | Note that you should also attach several members to the created wrapper,
48 | so that users can do some operations.
49 |
50 | These two functions are *required*:
51 |
52 | ```python
53 | # To see the statistics information
54 | wrapper.cache_info()
55 |
56 | # To clear the cache
57 | wrapper.cache_clear()
58 | ```
59 |
60 | These nine functions are *optional*, but recommended:
61 |
62 | ```python
63 | # To see whether the cache is empty
64 | wrapper.cache_is_empty()
65 |
66 | # To see whether the cache is full
67 | wrapper.cache_is_full()
68 |
69 | # To see whether the cache contains a cached item with the specified function call arguments
70 | wrapper.cache_contains_argument(function_arguments, alive_only)
71 |
72 | # To see whether the cache contains a cache item with the specified user function return value
73 | wrapper.cache_contains_result(return_value, alive_only)
74 |
75 | # To perform the given action for each cache element
76 | wrapper.cache_for_each(consumer)
77 |
78 | # To get user function arguments of all alive cache elements
79 | wrapper.cache_arguments()
80 |
81 | # To get user function return values of all alive cache elements
82 | wrapper.cache_results()
83 |
84 | # To get cache items, i.e. entries of all alive cache elements, in the form of (argument, result)
85 | wrapper.cache_items()
86 |
87 | # To remove all cache elements that satisfy the given predicate
88 | wrapper.cache_remove_if(predicate)
89 | ```
90 |
91 | For testing purposes, this member is *optional*, but recommended:
92 | ```python
93 | # Access to the cache which is typically a hash map with function
94 | # arguments as its key and function return values as its value
95 | wrapper._cache
96 | ```
97 |
98 | Please refer to `fifo_cache.py` as an example. Your code should looks like:
99 |
100 | ```python
101 | def get_caching_wrapper(user_function, max_size, ttl, algorithm, thread_safe):
102 | """
103 | Get a caching wrapper for LRU_K cache
104 | """
105 |
106 | def wrapper(*args, **kwargs):
107 | """
108 | The actual wrapper
109 | """
110 | ...
111 |
112 | def cache_clear():
113 | """
114 | Clear the cache and its statistics information
115 | """
116 | ...
117 |
118 | def cache_info():
119 | """
120 | Show statistics information
121 | :return: a CacheInfo object describing the cache
122 | """
123 | ...
124 |
125 | # expose operations to wrapper
126 | wrapper.cache_clear = cache_clear
127 | wrapper.cache_info = cache_info
128 | wrapper._cache = ...
129 |
130 | return wrapper
131 |
132 | ```
133 |
134 | ### Step 3: Add a mapping from `CachingAlgorithmFlag` to `algorithm_mapping`
135 |
136 | Please locate this file: `memoization.config.algorithm_mapping`
137 |
138 | Inside it you will see a mapping, which is by default:
139 |
140 | ```python
141 | algorithm_mapping = {
142 | CachingAlgorithmFlag.FIFO: fifo_cache,
143 | CachingAlgorithmFlag.LRU: lru_cache,
144 | CachingAlgorithmFlag.LFU: lfu_cache,
145 | }
146 | ```
147 |
148 | Add your newly created caching toolkits to the dictionary like:
149 | ```python
150 | import memoization.caching.lru_k_cache as lru_k_cache
151 |
152 | ...
153 | algorithm_mapping = {
154 | ...
155 | CachingAlgorithmFlag.LRU_K: lru_k_cache,
156 | }
157 | ```
158 |
159 |
160 | ### Step 4: Validate your design
161 |
162 | Please run the script `memoization.util.algorithm_extension_validator` to perform
163 | type checking on your newly implemented algorithm. The validator will tell you
164 | if anything goes wrong. If your code works well, you will see:
165 |
166 | ```
167 | [Validation OK]
168 | Congratulations! Your extended algorithm passed the validation. Thanks for your efforts.
169 | Please understand that this validator only ensure that the typings of your extension are correct. You are still required to write test cases for your algorithms.
170 | ```
171 |
172 | Remember that this validator does **NOT** automatically tests your algorithms,
173 | nor does it substitute `test.py`. You are required to write test cases and make
174 | your code pass them before you submit a pull request.
175 |
176 |
177 | ### Step 5: Enjoy!
178 |
179 |
180 | ## Acknowledgements
181 |
182 | Thank you again, developer, for helping us improve this project.
183 |
--------------------------------------------------------------------------------
/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | # Contributors ✨
2 |
3 | Thanks goes to these wonderful people:
4 |
5 | - [prathyushark](https://github.com/prathyushark)
6 | - [goredar](https://github.com/goredar)
7 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018-2020 lonelyenvoy
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # python-memoization
2 |
3 | [![Repository][repositorysvg]][repository] [![Build Status][travismaster]][travis] [![Codacy Badge][codacysvg]][codacy]
4 | [![Coverage Status][coverallssvg]][coveralls] [![Downloads][downloadssvg]][repository]
5 |
6 | [![PRs welcome][prsvg]][pr] [![License][licensesvg]][license] [![Supports Python][pythonsvg]][python]
7 |
8 |
9 | A powerful caching library for Python, with TTL support and multiple algorithm options.
10 |
11 | If you like this work, please [star](https://github.com/lonelyenvoy/python-memoization) it on GitHub.
12 |
13 | ## Why choose this library?
14 |
15 | Perhaps you know about [```functools.lru_cache```](https://docs.python.org/3/library/functools.html#functools.lru_cache)
16 | in Python 3, and you may be wondering why we are reinventing the wheel.
17 |
18 | Well, actually not. This lib is based on ```functools```. Please find below the comparison with ```lru_cache```.
19 |
20 | |Features|```functools.lru_cache```|```memoization```|
21 | |--------|-------------------|-----------|
22 | |Configurable max size|✔️|✔️|
23 | |Thread safety|✔️|✔️|
24 | |Flexible argument typing (typed & untyped)|✔️|Always typed|
25 | |Cache statistics|✔️|✔️|
26 | |LRU (Least Recently Used) as caching algorithm|✔️|✔️|
27 | |LFU (Least Frequently Used) as caching algorithm|No support|✔️|
28 | |FIFO (First In First Out) as caching algorithm|No support|✔️|
29 | |Extensibility for new caching algorithms|No support|✔️|
30 | |TTL (Time-To-Live) support|No support|✔️|
31 | |Support for unhashable arguments (dict, list, etc.)|No support|✔️|
32 | |Custom cache keys|No support|✔️|
33 | |On-demand partial cache clearing|No support|✔️|
34 | |Iterating through the cache|No support|✔️|
35 | |Python version|3.2+|3.4+|
36 |
37 | ```memoization``` solves some drawbacks of ```functools.lru_cache```:
38 |
39 | 1. ```lru_cache``` does not support __unhashable types__, which means function arguments cannot contain dict or list.
40 |
41 | ```python
42 | >>> from functools import lru_cache
43 | >>> @lru_cache()
44 | ... def f(x): return x
45 | ...
46 | >>> f([1, 2]) # unsupported
47 | Traceback (most recent call last):
48 | File "", line 1, in
49 | TypeError: unhashable type: 'list'
50 | ```
51 |
52 | 2. ```lru_cache``` is vulnerable to [__hash collision attack__](https://learncryptography.com/hash-functions/hash-collision-attack)
53 | and can be hacked or compromised. Using this technique, attackers can make your program __unexpectedly slow__ by
54 | feeding the cached function with certain cleverly designed inputs. However, in ```memoization```, caching is always
55 | typed, which means ```f(3)``` and ```f(3.0)``` will be treated as different calls and cached separately. Also,
56 | you can build your own cache key with a unique hashing strategy. These measures __prevents the attack__ from
57 | happening (or at least makes it a lot harder).
58 |
59 | ```python
60 | >>> hash((1,))
61 | 3430019387558
62 | >>> hash(3430019387558.0) # two different arguments with an identical hash value
63 | 3430019387558
64 | ```
65 |
66 | 3. Unlike `lru_cache`, `memoization` is designed to be highly extensible, which make it easy for developers to add and integrate
67 | __any caching algorithms__ (beyond FIFO, LRU and LFU) into this library. See [Contributing Guidance](https://github.com/lonelyenvoy/python-memoization/blob/master/CONTRIBUTING.md) for further detail.
68 |
69 |
70 | ## Installation
71 |
72 | ```bash
73 | pip install -U memoization
74 | ```
75 |
76 |
77 | ## 1-Minute Tutorial
78 |
79 | ```python
80 | from memoization import cached
81 |
82 | @cached
83 | def func(arg):
84 | ... # do something slow
85 | ```
86 |
87 | Simple enough - the results of ```func()``` are cached.
88 | Repetitive calls to ```func()``` with the same arguments run ```func()``` only once, enhancing performance.
89 |
90 | >:warning:__WARNING:__ for functions with unhashable arguments, the default setting may not enable `memoization` to work properly. See [custom cache keys](https://github.com/lonelyenvoy/python-memoization#custom-cache-keys) section below for details.
91 |
92 | ## 15-Minute Tutorial
93 |
94 | You will learn about the advanced features in the following tutorial, which enable you to customize `memoization` .
95 |
96 | Configurable options include `ttl`, `max_size`, `algorithm`, `thread_safe`, `order_independent` and `custom_key_maker`.
97 |
98 | ### TTL (Time-To-Live)
99 |
100 | ```python
101 | @cached(ttl=5) # the cache expires after 5 seconds
102 | def expensive_db_query(user_id):
103 | ...
104 | ```
105 |
106 | For impure functions, TTL (in second) will be a solution. This will be useful when the function returns resources that is valid only for a short time, e.g. fetching something from databases.
107 |
108 | ### Limited cache capacity
109 |
110 | ```python
111 | @cached(max_size=128) # the cache holds no more than 128 items
112 | def get_a_very_large_object(filename):
113 | ...
114 | ```
115 |
116 | By default, if you don't specify ```max_size```, the cache can hold unlimited number of items.
117 | When the cache is fully occupied, the former data will be overwritten by a certain algorithm described below.
118 |
119 | ### Choosing your caching algorithm
120 |
121 | ```python
122 | from memoization import cached, CachingAlgorithmFlag
123 |
124 | @cached(max_size=128, algorithm=CachingAlgorithmFlag.LFU) # the cache overwrites items using the LFU algorithm
125 | def func(arg):
126 | ...
127 | ```
128 |
129 | Possible values for ```algorithm``` are:
130 |
131 | - `CachingAlgorithmFlag.LRU`: _Least Recently Used_ (default)
132 | - `CachingAlgorithmFlag.LFU`: _Least Frequently Used_
133 | - `CachingAlgorithmFlag.FIFO`: _First In First Out_
134 |
135 | This option is valid only when a ```max_size``` is explicitly specified.
136 |
137 | ### Thread safe?
138 |
139 | ```python
140 | @cached(thread_safe=False)
141 | def func(arg):
142 | ...
143 | ```
144 |
145 | ```thread_safe``` is ```True``` by default. Setting it to ```False``` enhances performance.
146 |
147 | ### Order-independent cache key
148 |
149 | By default, the following function calls will be treated differently and cached twice, which means the cache misses at the second call.
150 |
151 | ```python
152 | func(a=1, b=1)
153 | func(b=1, a=1)
154 | ```
155 |
156 | You can avoid this behavior by passing an `order_independent` argument to the decorator, although it will slow down the performance a little bit.
157 |
158 | ```python
159 | @cached(order_independent=True)
160 | def func(**kwargs):
161 | ...
162 | ```
163 |
164 | ### Custom cache keys
165 |
166 | Prior to memorize your function inputs and outputs (i.e. putting them into a cache), `memoization` needs to
167 | build a __cache key__ using the inputs, so that the outputs can be retrieved later.
168 |
169 | > By default, `memoization` tries to combine all your function
170 | arguments and calculate its hash value using `hash()`. If it turns out that parts of your arguments are
171 | unhashable, `memoization` will fall back to turning them into a string using `str()`. This behavior relies
172 | on the assumption that the string exactly represents the internal state of the arguments, which is true for
173 | built-in types.
174 |
175 | However, this is not true for all objects. __If you pass objects which are
176 | instances of non-built-in classes, sometimes you will need to override the default key-making procedure__,
177 | because the `str()` function on these objects may not hold the correct information about their states.
178 |
179 | Here are some suggestions. __Implementations of a valid key maker__:
180 |
181 | - MUST be a function with the same signature as the cached function.
182 | - MUST produce unique keys, which means two sets of different arguments always map to two different keys.
183 | - MUST produce hashable keys, and a key is comparable with another key (`memoization` only needs to check for their equality).
184 | - should compute keys efficiently and produce small objects as keys.
185 |
186 | Example:
187 |
188 | ```python
189 | def get_employee_id(employee):
190 | return employee.id # returns a string or a integer
191 |
192 | @cached(custom_key_maker=get_employee_id)
193 | def calculate_performance(employee):
194 | ...
195 | ```
196 |
197 | Note that writing a robust key maker function can be challenging in some situations. If you find it difficult,
198 | feel free to ask for help by submitting an [issue](https://github.com/lonelyenvoy/python-memoization/issues).
199 |
200 |
201 | ### Knowing how well the cache is behaving
202 |
203 | ```python
204 | >>> @cached
205 | ... def f(x): return x
206 | ...
207 | >>> f.cache_info()
208 | CacheInfo(hits=0, misses=0, current_size=0, max_size=None, algorithm=, ttl=None, thread_safe=True, order_independent=False, use_custom_key=False)
209 | ```
210 |
211 | With ```cache_info```, you can retrieve the number of ```hits``` and ```misses``` of the cache, and other information indicating the caching status.
212 |
213 | - `hits`: the number of cache hits
214 | - `misses`: the number of cache misses
215 | - `current_size`: the number of items that were cached
216 | - `max_size`: the maximum number of items that can be cached (user-specified)
217 | - `algorithm`: caching algorithm (user-specified)
218 | - `ttl`: Time-To-Live value (user-specified)
219 | - `thread_safe`: whether the cache is thread safe (user-specified)
220 | - `order_independent`: whether the cache is kwarg-order-independent (user-specified)
221 | - `use_custom_key`: whether a custom key maker is used
222 |
223 | ### Other APIs
224 |
225 | - Access the original undecorated function `f` by `f.__wrapped__`.
226 | - Clear the cache by `f.cache_clear()`.
227 | - Check whether the cache is empty by `f.cache_is_empty()`.
228 | - Check whether the cache is full by `f.cache_is_full()`.
229 | - Disable `SyntaxWarning` by `memoization.suppress_warnings()`.
230 |
231 | ## Advanced API References
232 |
233 |
234 | Details
235 |
236 | ### Checking whether the cache contains something
237 |
238 | #### cache_contains_argument(function_arguments, alive_only)
239 |
240 | ```
241 | Return True if the cache contains a cached item with the specified function call arguments
242 |
243 | :param function_arguments: Can be a list, a tuple or a dict.
244 | - Full arguments: use a list to represent both positional arguments and keyword
245 | arguments. The list contains two elements, a tuple (positional arguments) and
246 | a dict (keyword arguments). For example,
247 | f(1, 2, 3, a=4, b=5, c=6)
248 | can be represented by:
249 | [(1, 2, 3), {'a': 4, 'b': 5, 'c': 6}]
250 | - Positional arguments only: when the arguments does not include keyword arguments,
251 | a tuple can be used to represent positional arguments. For example,
252 | f(1, 2, 3)
253 | can be represented by:
254 | (1, 2, 3)
255 | - Keyword arguments only: when the arguments does not include positional arguments,
256 | a dict can be used to represent keyword arguments. For example,
257 | f(a=4, b=5, c=6)
258 | can be represented by:
259 | {'a': 4, 'b': 5, 'c': 6}
260 |
261 | :param alive_only: Whether to check alive cache item only (default to True).
262 |
263 | :return: True if the desired cached item is present, False otherwise.
264 | ```
265 |
266 | #### cache_contains_result(return_value, alive_only)
267 |
268 | ```
269 | Return True if the cache contains a cache item with the specified user function return value. O(n) time
270 | complexity.
271 |
272 | :param return_value: A return value coming from the user function.
273 |
274 | :param alive_only: Whether to check alive cache item only (default to True).
275 |
276 | :return: True if the desired cached item is present, False otherwise.
277 | ```
278 |
279 | ### Iterating through the cache
280 |
281 | #### cache_arguments()
282 |
283 | ```
284 | Get user function arguments of all alive cache elements
285 |
286 | see also: cache_items()
287 |
288 | Example:
289 | @cached
290 | def f(a, b, c, d):
291 | ...
292 | f(1, 2, c=3, d=4)
293 | for argument in f.cache_arguments():
294 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
295 |
296 | :return: an iterable which iterates through a list of a tuple containing a tuple (positional arguments) and
297 | a dict (keyword arguments)
298 | ```
299 |
300 | #### cache_results()
301 |
302 | ```
303 | Get user function return values of all alive cache elements
304 |
305 | see also: cache_items()
306 |
307 | Example:
308 | @cached
309 | def f(a):
310 | return a
311 | f('hello')
312 | for result in f.cache_results():
313 | print(result) # 'hello'
314 |
315 | :return: an iterable which iterates through a list of user function result (of any type)
316 | ```
317 |
318 | #### cache_items()
319 |
320 | ```
321 | Get cache items, i.e. entries of all alive cache elements, in the form of (argument, result).
322 |
323 | argument: a tuple containing a tuple (positional arguments) and a dict (keyword arguments).
324 | result: a user function return value of any type.
325 |
326 | see also: cache_arguments(), cache_results().
327 |
328 | Example:
329 | @cached
330 | def f(a, b, c, d):
331 | return 'the answer is ' + str(a)
332 | f(1, 2, c=3, d=4)
333 | for argument, result in f.cache_items():
334 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
335 | print(result) # 'the answer is 1'
336 |
337 | :return: an iterable which iterates through a list of (argument, result) entries
338 | ```
339 |
340 | #### cache_for_each()
341 |
342 | ```
343 | Perform the given action for each cache element in an order determined by the algorithm until all
344 | elements have been processed or the action throws an error
345 |
346 | :param consumer: an action function to process the cache elements. Must have 3 arguments:
347 | def consumer(user_function_arguments, user_function_result, is_alive): ...
348 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
349 | args is a tuple holding positional arguments.
350 | kwargs is a dict holding keyword arguments.
351 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
352 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
353 | user_function_result is a return value coming from the user function.
354 | is_alive is a boolean value indicating whether the cache is still alive
355 | (if a TTL is given).
356 | ```
357 |
358 | ### Removing something from the cache
359 |
360 | #### cache_clear()
361 |
362 | ```
363 | Clear the cache and its statistics information
364 | ```
365 |
366 | #### cache_remove_if(predicate)
367 |
368 | ```
369 | Remove all cache elements that satisfy the given predicate
370 |
371 | :param predicate: a predicate function to judge whether the cache elements should be removed. Must
372 | have 3 arguments, and returns True or False:
373 | def consumer(user_function_arguments, user_function_result, is_alive): ...
374 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
375 | args is a tuple holding positional arguments.
376 | kwargs is a dict holding keyword arguments.
377 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
378 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
379 | user_function_result is a return value coming from the user function.
380 | is_alive is a boolean value indicating whether the cache is still alive
381 | (if a TTL is given).
382 |
383 | :return: True if at least one element is removed, False otherwise.
384 | ```
385 |
386 |
387 |
388 | ## Q&A
389 |
390 | 1. **Q: There are duplicated code in `memoization` and most of them can be eliminated by using another level of
391 | abstraction (e.g. classes and multiple inheritance). Why not refactor?**
392 |
393 | A: We would like to keep the code in a proper level of abstraction. However, these abstractions make it run slower.
394 | As this is a caching library focusing on speed, we have to give up some elegance for better performance. Refactoring
395 | is our future work.
396 |
397 |
398 | 2. **Q: I have submitted an issue and not received a reply for a long time. Anyone can help me?**
399 |
400 | A: Sorry! We are not working full-time, but working voluntarily on this project, so you might experience some delay.
401 | We appreciate your patience.
402 |
403 |
404 | ## Contributing
405 |
406 | This project welcomes contributions from anyone.
407 | - [Read Contributing Guidance](https://github.com/lonelyenvoy/python-memoization/blob/master/CONTRIBUTING.md) first.
408 | - [Submit bugs](https://github.com/lonelyenvoy/python-memoization/issues) and help us verify fixes.
409 | - [Submit pull requests](https://github.com/lonelyenvoy/python-memoization/pulls) for bug fixes and features and discuss existing proposals. Please make sure that your PR passes the tests in ```test.py```.
410 | - [See contributors](https://github.com/lonelyenvoy/python-memoization/blob/master/CONTRIBUTORS.md) of this project.
411 |
412 |
413 | ## License
414 |
415 | [The MIT License](https://github.com/lonelyenvoy/python-memoization/blob/master/LICENSE)
416 |
417 |
418 | [pythonsvg]: https://img.shields.io/pypi/pyversions/memoization.svg
419 | [python]: https://www.python.org
420 |
421 | [travismaster]: https://travis-ci.com/lonelyenvoy/python-memoization.svg?branch=master
422 | [travis]: https://travis-ci.com/lonelyenvoy/python-memoization
423 |
424 | [coverallssvg]: https://coveralls.io/repos/github/lonelyenvoy/python-memoization/badge.svg?branch=master
425 | [coveralls]: https://coveralls.io/github/lonelyenvoy/python-memoization?branch=master
426 |
427 | [repositorysvg]: https://img.shields.io/pypi/v/memoization
428 | [repository]: https://pypi.org/project/memoization
429 |
430 | [downloadssvg]: https://img.shields.io/pypi/dm/memoization
431 |
432 | [prsvg]: https://img.shields.io/badge/pull_requests-welcome-blue.svg
433 | [pr]: https://github.com/lonelyenvoy/python-memoization#contributing
434 |
435 | [licensesvg]: https://img.shields.io/badge/license-MIT-blue.svg
436 | [license]: https://github.com/lonelyenvoy/python-memoization/blob/master/LICENSE
437 |
438 | [codacysvg]: https://api.codacy.com/project/badge/Grade/52c68fb9de6b4b149e77e8e173616db6
439 | [codacy]: https://www.codacy.com/manual/petrinchor/python-memoization?utm_source=github.com&utm_medium=referral&utm_content=lonelyenvoy/python-memoization&utm_campaign=Badge_Grade
440 |
--------------------------------------------------------------------------------
/examples.py:
--------------------------------------------------------------------------------
1 | from memoization import cached, CachingAlgorithmFlag
2 | import timeit
3 |
4 |
5 | def factorial(n):
6 | assert n >= 0
7 | if n == 0 or n == 1:
8 | return 1
9 | return n * factorial(n - 1)
10 |
11 |
12 | # Example usage
13 | @cached(max_size=64, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False)
14 | def quick_factorial(n):
15 | assert n >= 0
16 | if n == 0 or n == 1:
17 | return 1
18 | return n * quick_factorial(n - 1)
19 |
20 |
21 | def test1():
22 | for i in range(1, 500):
23 | factorial(i)
24 |
25 |
26 | def test2():
27 | for i in range(1, 500):
28 | quick_factorial(i)
29 |
30 |
31 | depth = 500
32 | test_times = 100
33 |
34 | time1 = timeit.timeit(test1, number=test_times) / test_times
35 | time2 = timeit.timeit(test2, number=test_times) / test_times
36 |
37 | print('factorial(' + str(depth) + ') without memoization took ' + str(time1 * 1000) + ' ms')
38 | print('factorial(' + str(depth) + ') with memoization took ' + str(time2 * 1000) + ' ms')
39 |
--------------------------------------------------------------------------------
/memoization/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | __all__ = ['cached', 'suppress_warnings', 'CachingAlgorithmFlag', 'FIFO', 'LRU', 'LFU']
4 |
5 | if (3, 4) <= sys.version_info < (4, 0): # for Python >=3.4 <4
6 | from . import memoization as _memoization
7 |
8 | try:
9 | _memoization
10 | except NameError:
11 | sys.stderr.write('python-memoization does not support your python version.\n')
12 | sys.stderr.write('Go to https://github.com/lonelyenvoy/python-memoization for usage and more details.\n')
13 | raise ImportError('Unsupported python version')
14 | else:
15 | cached = _memoization.cached
16 | suppress_warnings = _memoization.suppress_warnings
17 | CachingAlgorithmFlag = _memoization.CachingAlgorithmFlag
18 | FIFO = _memoization.CachingAlgorithmFlag.FIFO
19 | LRU = _memoization.CachingAlgorithmFlag.LRU
20 | LFU = _memoization.CachingAlgorithmFlag.LFU
21 |
--------------------------------------------------------------------------------
/memoization/backport/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ['enum']
2 |
3 | from . import backport_enum as enum
4 |
--------------------------------------------------------------------------------
/memoization/caching/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/caching/__init__.py
--------------------------------------------------------------------------------
/memoization/caching/fifo_cache.py:
--------------------------------------------------------------------------------
1 | from threading import RLock
2 |
3 | from memoization.model import DummyWithable, CacheInfo
4 | import memoization.caching.general.keys_order_dependent as keys_toolkit_order_dependent
5 | import memoization.caching.general.keys_order_independent as keys_toolkit_order_independent
6 | import memoization.caching.general.values_with_ttl as values_toolkit_with_ttl
7 | import memoization.caching.general.values_without_ttl as values_toolkit_without_ttl
8 |
9 |
10 | def get_caching_wrapper(user_function, max_size, ttl, algorithm, thread_safe, order_independent, custom_key_maker):
11 | """Get a caching wrapper for FIFO cache"""
12 |
13 | cache = {} # the cache to store function results
14 | key_argument_map = {} # mapping from cache keys to user function arguments
15 | sentinel = object() # sentinel object for the default value of map.get
16 | hits = misses = 0 # hits and misses of the cache
17 | lock = RLock() if thread_safe else DummyWithable() # ensure thread-safe
18 | if ttl is not None: # set up values toolkit according to ttl
19 | values_toolkit = values_toolkit_with_ttl
20 | else:
21 | values_toolkit = values_toolkit_without_ttl
22 | if custom_key_maker is not None: # use custom make_key function
23 | make_key = custom_key_maker
24 | else:
25 | if order_independent: # set up keys toolkit according to order_independent
26 | make_key = keys_toolkit_order_independent.make_key
27 | else:
28 | make_key = keys_toolkit_order_dependent.make_key
29 |
30 | # for FIFO list
31 | full = False # whether the cache is full or not
32 | root = [] # linked list
33 | root[:] = [root, root, None, None] # initialize by pointing to self
34 | _PREV = 0 # index for the previous node
35 | _NEXT = 1 # index for the next node
36 | _KEY = 2 # index for the key
37 | _VALUE = 3 # index for the value
38 |
39 | def wrapper(*args, **kwargs):
40 | """The actual wrapper"""
41 | nonlocal hits, misses, root, full
42 | key = make_key(args, kwargs)
43 | cache_expired = False
44 | with lock:
45 | node = cache.get(key, sentinel)
46 | if node is not sentinel:
47 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
48 | hits += 1
49 | return values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
50 | else:
51 | cache_expired = True
52 | misses += 1
53 | result = user_function(*args, **kwargs)
54 | with lock:
55 | if key in cache:
56 | if cache_expired:
57 | # update cache with new ttl
58 | cache[key][_VALUE] = values_toolkit.make_cache_value(result, ttl)
59 | else:
60 | # result added to the cache while the lock was released
61 | # no need to add again
62 | pass
63 | elif full:
64 | # switch root to the oldest element in the cache
65 | old_root = root
66 | root = root[_NEXT]
67 | # keep references of root[_KEY] and root[_VALUE] to prevent arbitrary GC
68 | old_key = root[_KEY]
69 | old_value = root[_VALUE]
70 | # overwrite the content of the old root
71 | old_root[_KEY] = key
72 | old_root[_VALUE] = values_toolkit.make_cache_value(result, ttl)
73 | # clear the content of the new root
74 | root[_KEY] = root[_VALUE] = None
75 | # delete from cache
76 | del cache[old_key]
77 | del key_argument_map[old_key]
78 | # save the result to the cache
79 | cache[key] = old_root
80 | key_argument_map[key] = (args, kwargs)
81 | else:
82 | # add a node to the linked list
83 | last = root[_PREV]
84 | node = [last, root, key, values_toolkit.make_cache_value(result, ttl)] # new node
85 | cache[key] = root[_PREV] = last[_NEXT] = node # save result to the cache
86 | key_argument_map[key] = (args, kwargs)
87 | # check whether the cache is full
88 | full = (cache.__len__() >= max_size)
89 | return result
90 |
91 | def cache_clear():
92 | """Clear the cache and its statistics information"""
93 | nonlocal hits, misses, full
94 | with lock:
95 | cache.clear()
96 | key_argument_map.clear()
97 | hits = misses = 0
98 | full = False
99 | root[:] = [root, root, None, None]
100 |
101 | def cache_info():
102 | """
103 | Show statistics information
104 |
105 | :return: a CacheInfo object describing the cache
106 | """
107 | with lock:
108 | return CacheInfo(hits, misses, cache.__len__(), max_size, algorithm,
109 | ttl, thread_safe, order_independent, custom_key_maker is not None)
110 |
111 | def cache_is_empty():
112 | """Return True if the cache contains no elements"""
113 | return cache.__len__() == 0
114 |
115 | def cache_is_full():
116 | """Return True if the cache is full"""
117 | return full
118 |
119 | def cache_contains_argument(function_arguments, alive_only=True):
120 | """
121 | Return True if the cache contains a cached item with the specified function call arguments
122 |
123 | :param function_arguments: Can be a list, a tuple or a dict.
124 | - Full arguments: use a list to represent both positional arguments and keyword
125 | arguments. The list contains two elements, a tuple (positional arguments) and
126 | a dict (keyword arguments). For example,
127 | f(1, 2, 3, a=4, b=5, c=6)
128 | can be represented by:
129 | [(1, 2, 3), {'a': 4, 'b': 5, 'c': 6}]
130 | - Positional arguments only: when the arguments does not include keyword arguments,
131 | a tuple can be used to represent positional arguments. For example,
132 | f(1, 2, 3)
133 | can be represented by:
134 | (1, 2, 3)
135 | - Keyword arguments only: when the arguments does not include positional arguments,
136 | a dict can be used to represent keyword arguments. For example,
137 | f(a=4, b=5, c=6)
138 | can be represented by:
139 | {'a': 4, 'b': 5, 'c': 6}
140 |
141 | :param alive_only: Whether to check alive cache item only (default to True).
142 |
143 | :return: True if the desired cached item is present, False otherwise.
144 | """
145 | if isinstance(function_arguments, tuple):
146 | positional_argument_tuple = function_arguments
147 | keyword_argument_dict = {}
148 | elif isinstance(function_arguments, dict):
149 | positional_argument_tuple = ()
150 | keyword_argument_dict = function_arguments
151 | elif isinstance(function_arguments, list) and len(function_arguments) == 2:
152 | positional_argument_tuple, keyword_argument_dict = function_arguments
153 | if not isinstance(positional_argument_tuple, tuple) or not isinstance(keyword_argument_dict, dict):
154 | raise TypeError('Expected function_arguments to be a list containing a positional argument tuple '
155 | 'and a keyword argument dict')
156 | else:
157 | raise TypeError('Expected function_arguments to be a tuple, a dict, or a list with 2 elements')
158 | key = make_key(positional_argument_tuple, keyword_argument_dict)
159 | with lock:
160 | node = cache.get(key, sentinel)
161 | if node is not sentinel:
162 | return values_toolkit.is_cache_value_valid(node[_VALUE]) if alive_only else True
163 | return False
164 |
165 | def cache_contains_result(return_value, alive_only=True):
166 | """
167 | Return True if the cache contains a cache item with the specified user function return value. O(n) time
168 | complexity.
169 |
170 | :param return_value: A return value coming from the user function.
171 |
172 | :param alive_only: Whether to check alive cache item only (default to True).
173 |
174 | :return: True if the desired cached item is present, False otherwise.
175 | """
176 | with lock:
177 | node = root[_PREV]
178 | while node is not root:
179 | is_alive = values_toolkit.is_cache_value_valid(node[_VALUE])
180 | cache_result = values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
181 | if cache_result == return_value:
182 | return is_alive if alive_only else True
183 | node = node[_PREV]
184 | return False
185 |
186 | def cache_for_each(consumer):
187 | """
188 | Perform the given action for each cache element in an order determined by the algorithm until all
189 | elements have been processed or the action throws an error
190 |
191 | :param consumer: an action function to process the cache elements. Must have 3 arguments:
192 | def consumer(user_function_arguments, user_function_result, is_alive): ...
193 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
194 | args is a tuple holding positional arguments.
195 | kwargs is a dict holding keyword arguments.
196 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
197 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
198 | user_function_result is a return value coming from the user function.
199 | is_alive is a boolean value indicating whether the cache is still alive
200 | (if a TTL is given).
201 | """
202 | with lock:
203 | node = root[_PREV]
204 | while node is not root:
205 | is_alive = values_toolkit.is_cache_value_valid(node[_VALUE])
206 | user_function_result = values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
207 | user_function_arguments = key_argument_map[node[_KEY]]
208 | consumer(user_function_arguments, user_function_result, is_alive)
209 | node = node[_PREV]
210 |
211 | def cache_arguments():
212 | """
213 | Get user function arguments of all alive cache elements
214 |
215 | see also: cache_items()
216 |
217 | Example:
218 | @cached
219 | def f(a, b, c, d):
220 | ...
221 | f(1, 2, c=3, d=4)
222 | for argument in f.cache_arguments():
223 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
224 |
225 | :return: an iterable which iterates through a list of a tuple containing a tuple (positional arguments) and
226 | a dict (keyword arguments)
227 | """
228 | with lock:
229 | node = root[_PREV]
230 | while node is not root:
231 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
232 | yield key_argument_map[node[_KEY]]
233 | node = node[_PREV]
234 |
235 | def cache_results():
236 | """
237 | Get user function return values of all alive cache elements
238 |
239 | see also: cache_items()
240 |
241 | Example:
242 | @cached
243 | def f(a):
244 | return a
245 | f('hello')
246 | for result in f.cache_results():
247 | print(result) # 'hello'
248 |
249 | :return: an iterable which iterates through a list of user function result (of any type)
250 | """
251 | with lock:
252 | node = root[_PREV]
253 | while node is not root:
254 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
255 | yield values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
256 | node = node[_PREV]
257 |
258 | def cache_items():
259 | """
260 | Get cache items, i.e. entries of all alive cache elements, in the form of (argument, result).
261 |
262 | argument: a tuple containing a tuple (positional arguments) and a dict (keyword arguments).
263 | result: a user function return value of any type.
264 |
265 | see also: cache_arguments(), cache_results().
266 |
267 | Example:
268 | @cached
269 | def f(a, b, c, d):
270 | return 'the answer is ' + str(a)
271 | f(1, 2, c=3, d=4)
272 | for argument, result in f.cache_items():
273 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
274 | print(result) # 'the answer is 1'
275 |
276 | :return: an iterable which iterates through a list of (argument, result) entries
277 | """
278 | with lock:
279 | node = root[_PREV]
280 | while node is not root:
281 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
282 | yield key_argument_map[node[_KEY]], values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
283 | node = node[_PREV]
284 |
285 | def cache_remove_if(predicate):
286 | """
287 | Remove all cache elements that satisfy the given predicate
288 |
289 | :param predicate: a predicate function to judge whether the cache elements should be removed. Must
290 | have 3 arguments, and returns True or False:
291 | def consumer(user_function_arguments, user_function_result, is_alive): ...
292 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
293 | args is a tuple holding positional arguments.
294 | kwargs is a dict holding keyword arguments.
295 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
296 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
297 | user_function_result is a return value coming from the user function.
298 | is_alive is a boolean value indicating whether the cache is still alive
299 | (if a TTL is given).
300 |
301 | :return: True if at least one element is removed, False otherwise.
302 | """
303 | nonlocal full
304 | removed = False
305 | with lock:
306 | node = root[_PREV]
307 | while node is not root:
308 | is_alive = values_toolkit.is_cache_value_valid(node[_VALUE])
309 | user_function_result = values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
310 | user_function_arguments = key_argument_map[node[_KEY]]
311 | if predicate(user_function_arguments, user_function_result, is_alive):
312 | removed = True
313 | node_prev = node[_PREV]
314 | # relink pointers of node.prev.next and node.next.prev
315 | node_prev[_NEXT] = node[_NEXT]
316 | node[_NEXT][_PREV] = node_prev
317 | # clear the content of this node
318 | key = node[_KEY]
319 | node[_KEY] = node[_VALUE] = None
320 | # delete from cache
321 | del cache[key]
322 | del key_argument_map[key]
323 | # check whether the cache is full
324 | full = (cache.__len__() >= max_size)
325 | node = node_prev
326 | else:
327 | node = node[_PREV]
328 | return removed
329 |
330 | # expose operations to wrapper
331 | wrapper.cache_clear = cache_clear
332 | wrapper.cache_info = cache_info
333 | wrapper.cache_is_empty = cache_is_empty
334 | wrapper.cache_is_full = cache_is_full
335 | wrapper.cache_contains_argument = cache_contains_argument
336 | wrapper.cache_contains_result = cache_contains_result
337 | wrapper.cache_for_each = cache_for_each
338 | wrapper.cache_arguments = cache_arguments
339 | wrapper.cache_results = cache_results
340 | wrapper.cache_items = cache_items
341 | wrapper.cache_remove_if = cache_remove_if
342 | wrapper._cache = cache
343 | wrapper._fifo_root = root
344 | wrapper._root_name = '_fifo_root'
345 |
346 | return wrapper
347 |
--------------------------------------------------------------------------------
/memoization/caching/fifo_cache.pyi:
--------------------------------------------------------------------------------
1 | from memoization.type.caching.cache import get_caching_wrapper as get_caching_wrapper
2 |
--------------------------------------------------------------------------------
/memoization/caching/general/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/caching/general/__init__.py
--------------------------------------------------------------------------------
/memoization/caching/general/keys_order_dependent.py:
--------------------------------------------------------------------------------
1 | from memoization.model import HashedList
2 |
3 |
4 | def make_key(args, kwargs, kwargs_mark=(object(), )):
5 | """
6 | Make a cache key
7 | """
8 | key = args
9 | if kwargs:
10 | key += kwargs_mark
11 | for item in kwargs.items():
12 | key += item
13 | try:
14 | hash_value = hash(key)
15 | except TypeError: # process unhashable types
16 | return str(key)
17 | else:
18 | return HashedList(key, hash_value)
19 |
--------------------------------------------------------------------------------
/memoization/caching/general/keys_order_dependent.pyi:
--------------------------------------------------------------------------------
1 | from memoization.type.caching.general.keys import make_key as make_key
2 |
--------------------------------------------------------------------------------
/memoization/caching/general/keys_order_independent.py:
--------------------------------------------------------------------------------
1 | from memoization.model import HashedList
2 |
3 |
4 | def make_key(args, kwargs, kwargs_mark=(object(), )):
5 | """
6 | Make a cache key
7 | """
8 | key = args
9 | if kwargs:
10 | key += kwargs_mark
11 | for kwarg_key in sorted(kwargs.keys()):
12 | key += (kwarg_key, kwargs[kwarg_key])
13 | try:
14 | hash_value = hash(key)
15 | except TypeError: # process unhashable types
16 | return str(key)
17 | else:
18 | return HashedList(key, hash_value)
19 |
--------------------------------------------------------------------------------
/memoization/caching/general/keys_order_independent.pyi:
--------------------------------------------------------------------------------
1 | from memoization.type.caching.general.keys import make_key as make_key
2 |
--------------------------------------------------------------------------------
/memoization/caching/general/values_with_ttl.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 |
4 | def make_cache_value(result, ttl):
5 | return result, time.time() + ttl
6 |
7 |
8 | def is_cache_value_valid(value):
9 | return time.time() < value[1]
10 |
11 |
12 | def retrieve_result_from_cache_value(value):
13 | return value[0]
14 |
--------------------------------------------------------------------------------
/memoization/caching/general/values_with_ttl.pyi:
--------------------------------------------------------------------------------
1 | from typing import Optional, Tuple, TypeVar
2 |
3 | T = TypeVar('T')
4 | CacheValue = Tuple[T, float]
5 |
6 | def make_cache_value(result: T, ttl: Optional[float]) -> CacheValue[T]: ...
7 | def is_cache_value_valid(value: CacheValue[T]) -> bool: ...
8 | def retrieve_result_from_cache_value(value: CacheValue[T]) -> T: ...
9 |
--------------------------------------------------------------------------------
/memoization/caching/general/values_without_ttl.py:
--------------------------------------------------------------------------------
1 |
2 | def make_cache_value(result, ttl):
3 | return result
4 |
5 |
6 | def is_cache_value_valid(value):
7 | return True
8 |
9 |
10 | def retrieve_result_from_cache_value(value):
11 | return value
12 |
--------------------------------------------------------------------------------
/memoization/caching/general/values_without_ttl.pyi:
--------------------------------------------------------------------------------
1 | from typing import Optional, TypeVar
2 |
3 | T = TypeVar('T')
4 |
5 | def make_cache_value(result: T, ttl: Optional[float]) -> T: ...
6 | def is_cache_value_valid(value: T) -> bool: ...
7 | def retrieve_result_from_cache_value(value: T) -> T: ...
8 |
--------------------------------------------------------------------------------
/memoization/caching/lfu_cache.py:
--------------------------------------------------------------------------------
1 | from threading import RLock
2 |
3 | from memoization.model import DummyWithable, CacheInfo
4 | import memoization.caching.general.keys_order_dependent as keys_toolkit_order_dependent
5 | import memoization.caching.general.keys_order_independent as keys_toolkit_order_independent
6 | import memoization.caching.general.values_with_ttl as values_toolkit_with_ttl
7 | import memoization.caching.general.values_without_ttl as values_toolkit_without_ttl
8 |
9 |
10 | def get_caching_wrapper(user_function, max_size, ttl, algorithm, thread_safe, order_independent, custom_key_maker):
11 | """Get a caching wrapper for LFU cache"""
12 |
13 | cache = {} # the cache to store function results
14 | key_argument_map = {} # mapping from cache keys to user function arguments
15 | sentinel = object() # sentinel object for the default value of map.get
16 | hits = misses = 0 # hits and misses of the cache
17 | lock = RLock() if thread_safe else DummyWithable() # ensure thread-safe
18 | if ttl is not None: # set up values toolkit according to ttl
19 | values_toolkit = values_toolkit_with_ttl
20 | else:
21 | values_toolkit = values_toolkit_without_ttl
22 | if custom_key_maker is not None: # use custom make_key function
23 | make_key = custom_key_maker
24 | else:
25 | if order_independent: # set up keys toolkit according to order_independent
26 | make_key = keys_toolkit_order_independent.make_key
27 | else:
28 | make_key = keys_toolkit_order_dependent.make_key
29 | lfu_freq_list_root = _FreqNode.root() # LFU frequency list root
30 |
31 | def wrapper(*args, **kwargs):
32 | """The actual wrapper"""
33 | nonlocal hits, misses
34 | key = make_key(args, kwargs)
35 | cache_expired = False
36 | with lock:
37 | result = _access_lfu_cache(cache, key, sentinel)
38 | if result is not sentinel:
39 | if values_toolkit.is_cache_value_valid(result):
40 | hits += 1
41 | return values_toolkit.retrieve_result_from_cache_value(result)
42 | else:
43 | cache_expired = True
44 | misses += 1
45 | result = user_function(*args, **kwargs)
46 | with lock:
47 | if key in cache:
48 | if cache_expired:
49 | # update cache with new ttl
50 | cache[key].value = values_toolkit.make_cache_value(result, ttl)
51 | else:
52 | # result added to the cache while the lock was released
53 | # no need to add again
54 | pass
55 | else:
56 | user_function_arguments = (args, kwargs)
57 | cache_value = values_toolkit.make_cache_value(result, ttl)
58 | _insert_into_lfu_cache(cache, key_argument_map, user_function_arguments, key, cache_value,
59 | lfu_freq_list_root, max_size)
60 | return result
61 |
62 | def cache_clear():
63 | """Clear the cache and its statistics information"""
64 | nonlocal hits, misses, lfu_freq_list_root
65 | with lock:
66 | cache.clear()
67 | key_argument_map.clear()
68 | hits = misses = 0
69 | lfu_freq_list_root.prev = lfu_freq_list_root.next = lfu_freq_list_root
70 |
71 | def cache_info():
72 | """
73 | Show statistics information
74 |
75 | :return: a CacheInfo object describing the cache
76 | """
77 | with lock:
78 | return CacheInfo(hits, misses, cache.__len__(), max_size, algorithm,
79 | ttl, thread_safe, order_independent, custom_key_maker is not None)
80 |
81 | def cache_is_empty():
82 | """Return True if the cache contains no elements"""
83 | return cache.__len__() == 0
84 |
85 | def cache_is_full():
86 | """Return True if the cache is full"""
87 | return cache.__len__() >= max_size
88 |
89 | def cache_contains_argument(function_arguments, alive_only=True):
90 | """
91 | Return True if the cache contains a cached item with the specified function call arguments
92 |
93 | :param function_arguments: Can be a list, a tuple or a dict.
94 | - Full arguments: use a list to represent both positional arguments and keyword
95 | arguments. The list contains two elements, a tuple (positional arguments) and
96 | a dict (keyword arguments). For example,
97 | f(1, 2, 3, a=4, b=5, c=6)
98 | can be represented by:
99 | [(1, 2, 3), {'a': 4, 'b': 5, 'c': 6}]
100 | - Positional arguments only: when the arguments does not include keyword arguments,
101 | a tuple can be used to represent positional arguments. For example,
102 | f(1, 2, 3)
103 | can be represented by:
104 | (1, 2, 3)
105 | - Keyword arguments only: when the arguments does not include positional arguments,
106 | a dict can be used to represent keyword arguments. For example,
107 | f(a=4, b=5, c=6)
108 | can be represented by:
109 | {'a': 4, 'b': 5, 'c': 6}
110 |
111 | :param alive_only: Whether to check alive cache item only (default to True).
112 |
113 | :return: True if the desired cached item is present, False otherwise.
114 | """
115 | if isinstance(function_arguments, tuple):
116 | positional_argument_tuple = function_arguments
117 | keyword_argument_dict = {}
118 | elif isinstance(function_arguments, dict):
119 | positional_argument_tuple = ()
120 | keyword_argument_dict = function_arguments
121 | elif isinstance(function_arguments, list) and len(function_arguments) == 2:
122 | positional_argument_tuple, keyword_argument_dict = function_arguments
123 | if not isinstance(positional_argument_tuple, tuple) or not isinstance(keyword_argument_dict, dict):
124 | raise TypeError('Expected function_arguments to be a list containing a positional argument tuple '
125 | 'and a keyword argument dict')
126 | else:
127 | raise TypeError('Expected function_arguments to be a tuple, a dict, or a list with 2 elements')
128 | key = make_key(positional_argument_tuple, keyword_argument_dict)
129 | with lock:
130 | cache_node = cache.get(key, sentinel)
131 | if cache_node is not sentinel:
132 | return values_toolkit.is_cache_value_valid(cache_node.value) if alive_only else True
133 | return False
134 |
135 | def cache_contains_result(return_value, alive_only=True):
136 | """
137 | Return True if the cache contains a cache item with the specified user function return value. O(n) time
138 | complexity.
139 |
140 | :param return_value: A return value coming from the user function.
141 |
142 | :param alive_only: Whether to check alive cache item only (default to True).
143 |
144 | :return: True if the desired cached item is present, False otherwise.
145 | """
146 | with lock:
147 | freq_node = lfu_freq_list_root.prev
148 | while freq_node != lfu_freq_list_root:
149 | cache_head = freq_node.cache_head
150 | cache_node = cache_head.next
151 | while cache_node != cache_head:
152 | is_alive = values_toolkit.is_cache_value_valid(cache_node.value)
153 | cache_result = values_toolkit.retrieve_result_from_cache_value(cache_node.value)
154 | if cache_result == return_value:
155 | return is_alive if alive_only else True
156 | cache_node = cache_node.next
157 | freq_node = freq_node.prev
158 | return False
159 |
160 | def cache_for_each(consumer):
161 | """
162 | Perform the given action for each cache element in an order determined by the algorithm until all
163 | elements have been processed or the action throws an error
164 |
165 | :param consumer: an action function to process the cache elements. Must have 3 arguments:
166 | def consumer(user_function_arguments, user_function_result, is_alive): ...
167 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
168 | args is a tuple holding positional arguments.
169 | kwargs is a dict holding keyword arguments.
170 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
171 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
172 | user_function_result is a return value coming from the user function.
173 | cache_result is a return value coming from the user function.
174 | is_alive is a boolean value indicating whether the cache is still alive
175 | (if a TTL is given).
176 | """
177 | with lock:
178 | freq_node = lfu_freq_list_root.prev
179 | while freq_node != lfu_freq_list_root:
180 | cache_head = freq_node.cache_head
181 | cache_node = cache_head.next
182 | while cache_node != cache_head:
183 | is_alive = values_toolkit.is_cache_value_valid(cache_node.value)
184 | user_function_result = values_toolkit.retrieve_result_from_cache_value(cache_node.value)
185 | user_function_arguments = key_argument_map[cache_node.key]
186 | consumer(user_function_arguments, user_function_result, is_alive)
187 | cache_node = cache_node.next
188 | freq_node = freq_node.prev
189 |
190 | def cache_arguments():
191 | """
192 | Get user function arguments of all alive cache elements
193 |
194 | see also: cache_items()
195 |
196 | Example:
197 | @cached
198 | def f(a, b, c, d):
199 | ...
200 | f(1, 2, c=3, d=4)
201 | for argument in f.cache_arguments():
202 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
203 |
204 | :return: an iterable which iterates through a list of a tuple containing a tuple (positional arguments) and
205 | a dict (keyword arguments)
206 | """
207 | with lock:
208 | freq_node = lfu_freq_list_root.prev
209 | while freq_node != lfu_freq_list_root:
210 | cache_head = freq_node.cache_head
211 | cache_node = cache_head.next
212 | while cache_node != cache_head:
213 | if values_toolkit.is_cache_value_valid(cache_node.value):
214 | yield key_argument_map[cache_node.key]
215 | cache_node = cache_node.next
216 | freq_node = freq_node.prev
217 |
218 | def cache_results():
219 | """
220 | Get user function return values of all alive cache elements
221 |
222 | see also: cache_items()
223 |
224 | Example:
225 | @cached
226 | def f(a):
227 | return a
228 | f('hello')
229 | for result in f.cache_results():
230 | print(result) # 'hello'
231 |
232 | :return: an iterable which iterates through a list of user function result (of any type)
233 | """
234 | with lock:
235 | freq_node = lfu_freq_list_root.prev
236 | while freq_node != lfu_freq_list_root:
237 | cache_head = freq_node.cache_head
238 | cache_node = cache_head.next
239 | while cache_node != cache_head:
240 | if values_toolkit.is_cache_value_valid(cache_node.value):
241 | yield values_toolkit.retrieve_result_from_cache_value(cache_node.value)
242 | cache_node = cache_node.next
243 | freq_node = freq_node.prev
244 |
245 | def cache_items():
246 | """
247 | Get cache items, i.e. entries of all alive cache elements, in the form of (argument, result).
248 |
249 | argument: a tuple containing a tuple (positional arguments) and a dict (keyword arguments).
250 | result: a user function return value of any type.
251 |
252 | see also: cache_arguments(), cache_results().
253 |
254 | Example:
255 | @cached
256 | def f(a, b, c, d):
257 | return 'the answer is ' + str(a)
258 | f(1, 2, c=3, d=4)
259 | for argument, result in f.cache_items():
260 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
261 | print(result) # 'the answer is 1'
262 |
263 | :return: an iterable which iterates through a list of (argument, result) entries
264 | """
265 | with lock:
266 | freq_node = lfu_freq_list_root.prev
267 | while freq_node != lfu_freq_list_root:
268 | cache_head = freq_node.cache_head
269 | cache_node = cache_head.next
270 | while cache_node != cache_head:
271 | if values_toolkit.is_cache_value_valid(cache_node.value):
272 | yield (key_argument_map[cache_node.key],
273 | values_toolkit.retrieve_result_from_cache_value(cache_node.value))
274 | cache_node = cache_node.next
275 | freq_node = freq_node.prev
276 |
277 | def cache_remove_if(predicate):
278 | """
279 | Remove all cache elements that satisfy the given predicate
280 |
281 | :param predicate: a predicate function to judge whether the cache elements should be removed. Must
282 | have 3 arguments, and returns True or False:
283 | def consumer(user_function_arguments, user_function_result, is_alive): ...
284 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
285 | args is a tuple holding positional arguments.
286 | kwargs is a dict holding keyword arguments.
287 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
288 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
289 | user_function_result is a return value coming from the user function.
290 | cache_result is a return value coming from the user function.
291 | is_alive is a boolean value indicating whether the cache is still alive
292 | (if a TTL is given).
293 |
294 | :return: True if at least one element is removed, False otherwise.
295 | """
296 | removed = False
297 | with lock:
298 | freq_node = lfu_freq_list_root.prev
299 | while freq_node != lfu_freq_list_root:
300 | cache_head = freq_node.cache_head
301 | cache_node = cache_head.next
302 | removed_under_this_freq_node = False
303 | while cache_node != cache_head:
304 | is_alive = values_toolkit.is_cache_value_valid(cache_node.value)
305 | user_function_result = values_toolkit.retrieve_result_from_cache_value(cache_node.value)
306 | user_function_arguments = key_argument_map[cache_node.key]
307 | if predicate(user_function_arguments, user_function_result, is_alive):
308 | removed = removed_under_this_freq_node = True
309 | next_cache_node = cache_node.next
310 | del cache[cache_node.key] # delete from cache
311 | del key_argument_map[cache_node.key]
312 | cache_node.destroy() # modify references, drop this cache node
313 | cache_node = next_cache_node
314 | else:
315 | cache_node = cache_node.next
316 | # check whether only one cache node is left
317 | if removed_under_this_freq_node and freq_node.cache_head.next == freq_node.cache_head:
318 | # Getting here means that we just deleted the only data node in the cache list
319 | # Note: there is still an empty sentinel node
320 | # We then need to destroy the sentinel node and its parent frequency node too
321 | prev_freq_node = freq_node.prev
322 | freq_node.cache_head.destroy()
323 | freq_node.destroy()
324 | freq_node = prev_freq_node
325 | else:
326 | freq_node = freq_node.prev
327 | return removed
328 |
329 | # expose operations to wrapper
330 | wrapper.cache_clear = cache_clear
331 | wrapper.cache_info = cache_info
332 | wrapper.cache_is_empty = cache_is_empty
333 | wrapper.cache_is_full = cache_is_full
334 | wrapper.cache_contains_argument = cache_contains_argument
335 | wrapper.cache_contains_result = cache_contains_result
336 | wrapper.cache_for_each = cache_for_each
337 | wrapper.cache_arguments = cache_arguments
338 | wrapper.cache_results = cache_results
339 | wrapper.cache_items = cache_items
340 | wrapper.cache_remove_if = cache_remove_if
341 | wrapper._cache = cache
342 | wrapper._lfu_root = lfu_freq_list_root
343 | wrapper._root_name = '_lfu_root'
344 |
345 | return wrapper
346 |
347 |
348 | ################################################################################################################################
349 | # LFU Cache
350 | # Least Frequently Used Cache
351 | #
352 | # O(1) implementation - please refer to the following documents for more details:
353 | # http://dhruvbird.com/lfu.pdf
354 | # https://medium.com/@epicshane/a-python-implementation-of-lfu-least-frequently-used-cache-with-o-1-time-complexity-e16b34a3c49b
355 | ################################################################################################################################
356 |
357 |
358 | class _CacheNode(object):
359 | """
360 | Cache Node for LFU Cache
361 | """
362 |
363 | __slots__ = 'prev', 'next', 'parent', 'key', 'value', '__weakref__'
364 |
365 | def __init__(self, prev=None, next=None, parent=None, key=None, value=None):
366 | self.prev = prev
367 | self.next = next
368 | self.parent = parent
369 | self.key = key
370 | self.value = value
371 |
372 | @classmethod
373 | def root(cls, parent=None, key=None, value=None):
374 | """
375 | Generate an empty root node
376 | """
377 | node = cls(None, None, parent, key, value)
378 | node.prev = node.next = node
379 | return node
380 |
381 | def destroy(self):
382 | """
383 | Destroy the current cache node
384 | """
385 | self.prev.next = self.next
386 | self.next.prev = self.prev
387 | if self.parent.cache_head == self:
388 | self.parent.cache_head = None
389 | self.prev = self.next = self.parent = self.key = self.value = None
390 |
391 |
392 | class _FreqNode(object):
393 | """
394 | Frequency Node for LFU Cache
395 | """
396 |
397 | __slots__ = 'prev', 'next', 'frequency', 'cache_head', '__weakref__'
398 |
399 | def __init__(self, prev=None, next=None, frequency=None, cache_head=None):
400 | self.prev = prev
401 | self.next = next
402 | self.frequency = frequency
403 | self.cache_head = cache_head
404 |
405 | @classmethod
406 | def root(cls, frequency=None, cache_head=None):
407 | """
408 | Generate an empty root node
409 | """
410 | node = cls(None, None, frequency, cache_head)
411 | node.prev = node.next = node
412 | return node
413 |
414 | def destroy(self):
415 | """
416 | Destroy the current frequency node
417 | """
418 | self.prev.next = self.next
419 | self.next.prev = self.prev
420 | self.prev = self.next = self.cache_head = None
421 |
422 |
423 | def _insert_into_lfu_cache(cache, key_argument_map, user_function_arguments, key, value, root, max_size):
424 | first_freq_node = root.next
425 | if cache.__len__() >= max_size:
426 | # The cache is full
427 |
428 | if first_freq_node.frequency != 1:
429 | # The first element in frequency list has its frequency other than 1 (> 1)
430 | # We need to drop the last element in the cache list of the first frequency node
431 | # and then insert a new frequency node, attaching an empty cache node together with
432 | # another cache node with data to the frequency node
433 |
434 | # Find the target
435 | cache_head = first_freq_node.cache_head
436 | last_node = cache_head.prev
437 |
438 | # Modify references
439 | last_node.prev.next = cache_head
440 | cache_head.prev = last_node.prev
441 |
442 | # Drop the last node; hold the old data to prevent arbitrary GC
443 | old_key = last_node.key
444 | old_value = last_node.value
445 | last_node.destroy()
446 |
447 | if first_freq_node.cache_head.next == first_freq_node.cache_head:
448 | # Getting here means that we just deleted the only data node in the cache list
449 | # under the first frequency list
450 | # Note: there is still an empty sentinel node
451 | # We then need to destroy the sentinel node and its parent frequency node too
452 | first_freq_node.cache_head.destroy()
453 | first_freq_node.destroy()
454 | first_freq_node = root.next # update
455 |
456 | # Delete from cache
457 | del cache[old_key]
458 | del key_argument_map[old_key]
459 |
460 | # Prepare a new frequency node, a cache root node and a cache data node
461 | empty_cache_root = _CacheNode.root()
462 | freq_node = _FreqNode(root, first_freq_node, 1, empty_cache_root)
463 | cache_node = _CacheNode(empty_cache_root, empty_cache_root, freq_node, key, value)
464 | empty_cache_root.parent = freq_node
465 |
466 | # Modify references
467 | root.next.prev = root.next = freq_node
468 | empty_cache_root.prev = empty_cache_root.next = cache_node
469 |
470 | else:
471 | # We can find the last element in the cache list under the first frequency list
472 | # Moving it to the head and replace the stored data with a new key and a new value
473 | # This is more efficient
474 |
475 | # Find the target
476 | cache_head = first_freq_node.cache_head
477 | manipulated_node = cache_head.prev
478 |
479 | # Modify references
480 | manipulated_node.prev.next = cache_head
481 | cache_head.prev = manipulated_node.prev
482 | manipulated_node.next = cache_head.next
483 | manipulated_node.prev = cache_head
484 | cache_head.next.prev = cache_head.next = manipulated_node
485 |
486 | # Replace the data; hold the old data to prevent arbitrary GC
487 | old_key = manipulated_node.key
488 | old_value = manipulated_node.value
489 | manipulated_node.key = key
490 | manipulated_node.value = value
491 |
492 | # use another name so it can be accessed later
493 | cache_node = manipulated_node
494 |
495 | # Delete from cache
496 | del cache[old_key]
497 | del key_argument_map[old_key]
498 | else:
499 | # The cache is not full
500 |
501 | if first_freq_node.frequency != 1:
502 | # The first element in frequency list has its frequency other than 1 (> 1)
503 | # Creating a new node in frequency list with 1 as its frequency required
504 | # We also need to create a new cache list and attach it to this new node
505 |
506 | # Create a cache root and a frequency node
507 | cache_root = _CacheNode.root()
508 | freq_node = _FreqNode(root, first_freq_node, 1, cache_root)
509 | cache_root.parent = freq_node
510 |
511 | # Create another cache node to store data
512 | cache_node = _CacheNode(cache_root, cache_root, freq_node, key, value)
513 |
514 | # Modify references
515 | cache_root.prev = cache_root.next = cache_node
516 | first_freq_node.prev = root.next = freq_node # note: DO NOT swap "=", because first_freq_node == root.next
517 |
518 | else:
519 | # We create a new cache node in the cache list
520 | # under the frequency node with frequency 1
521 |
522 | # Create a cache node and store data in it
523 | cache_head = first_freq_node.cache_head
524 | cache_node = _CacheNode(cache_head, cache_head.next, first_freq_node, key, value)
525 |
526 | # Modify references
527 | cache_node.prev.next = cache_node.next.prev = cache_node
528 |
529 | # Finally, insert the data into the cache
530 | cache[key] = cache_node
531 | key_argument_map[key] = user_function_arguments
532 |
533 |
534 | def _access_lfu_cache(cache, key, sentinel):
535 | if key in cache:
536 | cache_node = cache[key]
537 | else:
538 | # Key does not exist
539 | # Access failed
540 | return sentinel
541 | freq_node = cache_node.parent
542 | target_frequency = freq_node.frequency + 1
543 | if freq_node.next.frequency != target_frequency:
544 | # The next node on the frequency list has a frequency value different from
545 | # (the frequency of the current node) + 1, which means we need to construct
546 | # a new frequency node and an empty cache root node
547 | # Then we move the current node to the newly created cache list
548 |
549 | # Create a cache root and a frequency root
550 | cache_root = _CacheNode.root()
551 | new_freq_node = _FreqNode(freq_node, freq_node.next, target_frequency, cache_root)
552 | cache_root.parent = new_freq_node
553 |
554 | # Modify references
555 | cache_node.prev.next = cache_node.next
556 | cache_node.next.prev = cache_node.prev
557 | cache_node.prev = cache_node.next = cache_root
558 | cache_root.prev = cache_root.next = cache_node
559 | new_freq_node.next.prev = new_freq_node.prev.next = new_freq_node
560 | cache_node.parent = cache_root.parent
561 |
562 | else:
563 | # We can move the cache node to the cache list of the next node on the frequency list
564 |
565 | # Find the head element of the next cache list
566 | next_cache_head = freq_node.next.cache_head
567 |
568 | # Modify references
569 | cache_node.prev.next = cache_node.next
570 | cache_node.next.prev = cache_node.prev
571 | cache_node.next = next_cache_head.next
572 | cache_node.prev = next_cache_head
573 | next_cache_head.next.prev = next_cache_head.next = cache_node
574 | cache_node.parent = freq_node.next
575 |
576 | # check the status of the current frequency node
577 | if freq_node.cache_head.next == freq_node.cache_head:
578 | # Getting here means that we just moved away the only data node in the cache list
579 | # Note: there is still an empty sentinel node
580 | # We then need to destroy the sentinel node and its parent frequency node too
581 | freq_node.cache_head.destroy()
582 | freq_node.destroy()
583 |
584 | return cache_node.value
585 |
--------------------------------------------------------------------------------
/memoization/caching/lfu_cache.pyi:
--------------------------------------------------------------------------------
1 | from memoization.type.caching.cache import get_caching_wrapper as get_caching_wrapper
2 |
--------------------------------------------------------------------------------
/memoization/caching/lru_cache.py:
--------------------------------------------------------------------------------
1 | from threading import RLock
2 |
3 | from memoization.model import DummyWithable, CacheInfo
4 | import memoization.caching.general.keys_order_dependent as keys_toolkit_order_dependent
5 | import memoization.caching.general.keys_order_independent as keys_toolkit_order_independent
6 | import memoization.caching.general.values_with_ttl as values_toolkit_with_ttl
7 | import memoization.caching.general.values_without_ttl as values_toolkit_without_ttl
8 |
9 |
10 | def get_caching_wrapper(user_function, max_size, ttl, algorithm, thread_safe, order_independent, custom_key_maker):
11 | """Get a caching wrapper for LRU cache"""
12 |
13 | cache = {} # the cache to store function results
14 | key_argument_map = {} # mapping from cache keys to user function arguments
15 | sentinel = object() # sentinel object for the default value of map.get
16 | hits = misses = 0 # hits and misses of the cache
17 | lock = RLock() if thread_safe else DummyWithable() # ensure thread-safe
18 | if ttl is not None: # set up values toolkit according to ttl
19 | values_toolkit = values_toolkit_with_ttl
20 | else:
21 | values_toolkit = values_toolkit_without_ttl
22 | if custom_key_maker is not None: # use custom make_key function
23 | make_key = custom_key_maker
24 | else:
25 | if order_independent: # set up keys toolkit according to order_independent
26 | make_key = keys_toolkit_order_independent.make_key
27 | else:
28 | make_key = keys_toolkit_order_dependent.make_key
29 |
30 | # for LRU list
31 | full = False # whether the cache is full or not
32 | root = [] # linked list
33 | root[:] = [root, root, None, None] # initialize by pointing to self
34 | _PREV = 0 # index for the previous node
35 | _NEXT = 1 # index for the next node
36 | _KEY = 2 # index for the key
37 | _VALUE = 3 # index for the value
38 |
39 | def wrapper(*args, **kwargs):
40 | """The actual wrapper"""
41 | nonlocal hits, misses, root, full
42 | key = make_key(args, kwargs)
43 | cache_expired = False
44 | with lock:
45 | node = cache.get(key, sentinel)
46 | if node is not sentinel:
47 | # move the node to the front of the list
48 | node_prev, node_next, _, result = node
49 | node_prev[_NEXT] = node_next
50 | node_next[_PREV] = node_prev
51 | node[_PREV] = root[_PREV]
52 | node[_NEXT] = root
53 | root[_PREV][_NEXT] = node
54 | root[_PREV] = node
55 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
56 | # update statistics
57 | hits += 1
58 | return values_toolkit.retrieve_result_from_cache_value(result)
59 | else:
60 | cache_expired = True
61 | misses += 1
62 | result = user_function(*args, **kwargs)
63 | with lock:
64 | if key in cache:
65 | if cache_expired:
66 | # update cache with new ttl
67 | cache[key][_VALUE] = values_toolkit.make_cache_value(result, ttl)
68 | else:
69 | # result added to the cache while the lock was released
70 | # no need to add again
71 | pass
72 | elif full:
73 | # switch root to the oldest element in the cache
74 | old_root = root
75 | root = root[_NEXT]
76 | # keep references of root[_KEY] and root[_VALUE] to prevent arbitrary GC
77 | old_key = root[_KEY]
78 | old_value = root[_VALUE]
79 | # overwrite the content of the old root
80 | old_root[_KEY] = key
81 | old_root[_VALUE] = values_toolkit.make_cache_value(result, ttl)
82 | # clear the content of the new root
83 | root[_KEY] = root[_VALUE] = None
84 | # delete from cache
85 | del cache[old_key]
86 | del key_argument_map[old_key]
87 | # save the result to the cache
88 | cache[key] = old_root
89 | key_argument_map[key] = (args, kwargs)
90 | else:
91 | # add a node to the linked list
92 | last = root[_PREV]
93 | node = [last, root, key, values_toolkit.make_cache_value(result, ttl)] # new node
94 | cache[key] = root[_PREV] = last[_NEXT] = node # save result to the cache
95 | key_argument_map[key] = (args, kwargs)
96 | # check whether the cache is full
97 | full = (cache.__len__() >= max_size)
98 | return result
99 |
100 | def cache_clear():
101 | """Clear the cache and its statistics information"""
102 | nonlocal hits, misses, full
103 | with lock:
104 | cache.clear()
105 | key_argument_map.clear()
106 | hits = misses = 0
107 | full = False
108 | root[:] = [root, root, None, None]
109 |
110 | def cache_info():
111 | """
112 | Show statistics information
113 |
114 | :return: a CacheInfo object describing the cache
115 | """
116 | with lock:
117 | return CacheInfo(hits, misses, cache.__len__(), max_size, algorithm,
118 | ttl, thread_safe, order_independent, custom_key_maker is not None)
119 |
120 | def cache_is_empty():
121 | """Return True if the cache contains no elements"""
122 | return cache.__len__() == 0
123 |
124 | def cache_is_full():
125 | """Return True if the cache is full"""
126 | return full
127 |
128 | def cache_contains_argument(function_arguments, alive_only=True):
129 | """
130 | Return True if the cache contains a cached item with the specified function call arguments
131 |
132 | :param function_arguments: Can be a list, a tuple or a dict.
133 | - Full arguments: use a list to represent both positional arguments and keyword
134 | arguments. The list contains two elements, a tuple (positional arguments) and
135 | a dict (keyword arguments). For example,
136 | f(1, 2, 3, a=4, b=5, c=6)
137 | can be represented by:
138 | [(1, 2, 3), {'a': 4, 'b': 5, 'c': 6}]
139 | - Positional arguments only: when the arguments does not include keyword arguments,
140 | a tuple can be used to represent positional arguments. For example,
141 | f(1, 2, 3)
142 | can be represented by:
143 | (1, 2, 3)
144 | - Keyword arguments only: when the arguments does not include positional arguments,
145 | a dict can be used to represent keyword arguments. For example,
146 | f(a=4, b=5, c=6)
147 | can be represented by:
148 | {'a': 4, 'b': 5, 'c': 6}
149 |
150 | :param alive_only: Whether to check alive cache item only (default to True).
151 |
152 | :return: True if the desired cached item is present, False otherwise.
153 | """
154 | if isinstance(function_arguments, tuple):
155 | positional_argument_tuple = function_arguments
156 | keyword_argument_dict = {}
157 | elif isinstance(function_arguments, dict):
158 | positional_argument_tuple = ()
159 | keyword_argument_dict = function_arguments
160 | elif isinstance(function_arguments, list) and len(function_arguments) == 2:
161 | positional_argument_tuple, keyword_argument_dict = function_arguments
162 | if not isinstance(positional_argument_tuple, tuple) or not isinstance(keyword_argument_dict, dict):
163 | raise TypeError('Expected function_arguments to be a list containing a positional argument tuple '
164 | 'and a keyword argument dict')
165 | else:
166 | raise TypeError('Expected function_arguments to be a tuple, a dict, or a list with 2 elements')
167 | key = make_key(positional_argument_tuple, keyword_argument_dict)
168 | with lock:
169 | node = cache.get(key, sentinel)
170 | if node is not sentinel:
171 | return values_toolkit.is_cache_value_valid(node[_VALUE]) if alive_only else True
172 | return False
173 |
174 | def cache_contains_result(return_value, alive_only=True):
175 | """
176 | Return True if the cache contains a cache item with the specified user function return value. O(n) time
177 | complexity.
178 |
179 | :param return_value: A return value coming from the user function.
180 |
181 | :param alive_only: Whether to check alive cache item only (default to True).
182 |
183 | :return: True if the desired cached item is present, False otherwise.
184 | """
185 | with lock:
186 | node = root[_PREV]
187 | while node is not root:
188 | is_alive = values_toolkit.is_cache_value_valid(node[_VALUE])
189 | cache_result = values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
190 | if cache_result == return_value:
191 | return is_alive if alive_only else True
192 | node = node[_PREV]
193 | return False
194 |
195 | def cache_for_each(consumer):
196 | """
197 | Perform the given action for each cache element in an order determined by the algorithm until all
198 | elements have been processed or the action throws an error
199 |
200 | :param consumer: an action function to process the cache elements. Must have 3 arguments:
201 | def consumer(user_function_arguments, user_function_result, is_alive): ...
202 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
203 | args is a tuple holding positional arguments.
204 | kwargs is a dict holding keyword arguments.
205 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
206 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
207 | user_function_result is a return value coming from the user function.
208 | cache_result is a return value coming from the user function.
209 | is_alive is a boolean value indicating whether the cache is still alive
210 | (if a TTL is given).
211 | """
212 | with lock:
213 | node = root[_PREV]
214 | while node is not root:
215 | is_alive = values_toolkit.is_cache_value_valid(node[_VALUE])
216 | user_function_result = values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
217 | user_function_arguments = key_argument_map[node[_KEY]]
218 | consumer(user_function_arguments, user_function_result, is_alive)
219 | node = node[_PREV]
220 |
221 | def cache_arguments():
222 | """
223 | Get user function arguments of all alive cache elements
224 |
225 | see also: cache_items()
226 |
227 | Example:
228 | @cached
229 | def f(a, b, c, d):
230 | ...
231 | f(1, 2, c=3, d=4)
232 | for argument in f.cache_arguments():
233 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
234 |
235 | :return: an iterable which iterates through a list of a tuple containing a tuple (positional arguments) and
236 | a dict (keyword arguments)
237 | """
238 | with lock:
239 | node = root[_PREV]
240 | while node is not root:
241 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
242 | yield key_argument_map[node[_KEY]]
243 | node = node[_PREV]
244 |
245 | def cache_results():
246 | """
247 | Get user function return values of all alive cache elements
248 |
249 | see also: cache_items()
250 |
251 | Example:
252 | @cached
253 | def f(a):
254 | return a
255 | f('hello')
256 | for result in f.cache_results():
257 | print(result) # 'hello'
258 |
259 | :return: an iterable which iterates through a list of user function result (of any type)
260 | """
261 | with lock:
262 | node = root[_PREV]
263 | while node is not root:
264 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
265 | yield values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
266 | node = node[_PREV]
267 |
268 | def cache_items():
269 | """
270 | Get cache items, i.e. entries of all alive cache elements, in the form of (argument, result).
271 |
272 | argument: a tuple containing a tuple (positional arguments) and a dict (keyword arguments).
273 | result: a user function return value of any type.
274 |
275 | see also: cache_arguments(), cache_results().
276 |
277 | Example:
278 | @cached
279 | def f(a, b, c, d):
280 | return 'the answer is ' + str(a)
281 | f(1, 2, c=3, d=4)
282 | for argument, result in f.cache_items():
283 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
284 | print(result) # 'the answer is 1'
285 |
286 | :return: an iterable which iterates through a list of (argument, result) entries
287 | """
288 | with lock:
289 | node = root[_PREV]
290 | while node is not root:
291 | if values_toolkit.is_cache_value_valid(node[_VALUE]):
292 | yield key_argument_map[node[_KEY]], values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
293 | node = node[_PREV]
294 |
295 | def cache_remove_if(predicate):
296 | """
297 | Remove all cache elements that satisfy the given predicate
298 |
299 | :param predicate: a predicate function to judge whether the cache elements should be removed. Must
300 | have 3 arguments, and returns True or False:
301 | def consumer(user_function_arguments, user_function_result, is_alive): ...
302 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
303 | args is a tuple holding positional arguments.
304 | kwargs is a dict holding keyword arguments.
305 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
306 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
307 | user_function_result is a return value coming from the user function.
308 | cache_result is a return value coming from the user function.
309 | is_alive is a boolean value indicating whether the cache is still alive
310 | (if a TTL is given).
311 |
312 | :return: True if at least one element is removed, False otherwise.
313 | """
314 | nonlocal full
315 | removed = False
316 | with lock:
317 | node = root[_PREV]
318 | while node is not root:
319 | is_alive = values_toolkit.is_cache_value_valid(node[_VALUE])
320 | user_function_result = values_toolkit.retrieve_result_from_cache_value(node[_VALUE])
321 | user_function_arguments = key_argument_map[node[_KEY]]
322 | if predicate(user_function_arguments, user_function_result, is_alive):
323 | removed = True
324 | node_prev = node[_PREV]
325 | # relink pointers of node.prev.next and node.next.prev
326 | node_prev[_NEXT] = node[_NEXT]
327 | node[_NEXT][_PREV] = node_prev
328 | # clear the content of this node
329 | key = node[_KEY]
330 | node[_KEY] = node[_VALUE] = None
331 | # delete from cache
332 | del cache[key]
333 | del key_argument_map[key]
334 | # check whether the cache is full
335 | full = (cache.__len__() >= max_size)
336 | node = node_prev
337 | else:
338 | node = node[_PREV]
339 | return removed
340 |
341 | # expose operations to wrapper
342 | wrapper.cache_clear = cache_clear
343 | wrapper.cache_info = cache_info
344 | wrapper.cache_is_empty = cache_is_empty
345 | wrapper.cache_is_full = cache_is_full
346 | wrapper.cache_contains_argument = cache_contains_argument
347 | wrapper.cache_contains_result = cache_contains_result
348 | wrapper.cache_for_each = cache_for_each
349 | wrapper.cache_arguments = cache_arguments
350 | wrapper.cache_results = cache_results
351 | wrapper.cache_items = cache_items
352 | wrapper.cache_remove_if = cache_remove_if
353 | wrapper._cache = cache
354 | wrapper._lru_root = root
355 | wrapper._root_name = '_lru_root'
356 |
357 | return wrapper
358 |
--------------------------------------------------------------------------------
/memoization/caching/lru_cache.pyi:
--------------------------------------------------------------------------------
1 | from memoization.type.caching.cache import get_caching_wrapper as get_caching_wrapper
2 |
--------------------------------------------------------------------------------
/memoization/caching/plain_cache.py:
--------------------------------------------------------------------------------
1 | from threading import RLock
2 |
3 | from memoization.model import DummyWithable, CacheInfo
4 | import memoization.caching.general.keys_order_dependent as keys_toolkit_order_dependent
5 | import memoization.caching.general.keys_order_independent as keys_toolkit_order_independent
6 | import memoization.caching.general.values_with_ttl as values_toolkit_with_ttl
7 | import memoization.caching.general.values_without_ttl as values_toolkit_without_ttl
8 |
9 |
10 | def get_caching_wrapper(user_function, max_size, ttl, algorithm, thread_safe, order_independent, custom_key_maker):
11 | """Get a caching wrapper for space-unlimited cache"""
12 |
13 | cache = {} # the cache to store function results
14 | key_argument_map = {} # mapping from cache keys to user function arguments
15 | sentinel = object() # sentinel object for the default value of map.get
16 | hits = misses = 0 # hits and misses of the cache
17 | lock = RLock() if thread_safe else DummyWithable() # ensure thread-safe
18 | if ttl is not None: # set up values toolkit according to ttl
19 | values_toolkit = values_toolkit_with_ttl
20 | else:
21 | values_toolkit = values_toolkit_without_ttl
22 | if custom_key_maker is not None: # use custom make_key function
23 | make_key = custom_key_maker
24 | else:
25 | if order_independent: # set up keys toolkit according to order_independent
26 | make_key = keys_toolkit_order_independent.make_key
27 | else:
28 | make_key = keys_toolkit_order_dependent.make_key
29 |
30 | def wrapper(*args, **kwargs):
31 | """The actual wrapper"""
32 | nonlocal hits, misses
33 | key = make_key(args, kwargs)
34 | value = cache.get(key, sentinel)
35 | if value is not sentinel and values_toolkit.is_cache_value_valid(value):
36 | with lock:
37 | hits += 1
38 | return values_toolkit.retrieve_result_from_cache_value(value)
39 | else:
40 | with lock:
41 | misses += 1
42 | result = user_function(*args, **kwargs)
43 | cache[key] = values_toolkit.make_cache_value(result, ttl)
44 | key_argument_map[key] = (args, kwargs)
45 | return result
46 |
47 | def cache_clear():
48 | """Clear the cache and statistics information"""
49 | nonlocal hits, misses
50 | with lock:
51 | cache.clear()
52 | key_argument_map.clear()
53 | hits = misses = 0
54 |
55 | def cache_info():
56 | """
57 | Show statistics information
58 |
59 | :return: a CacheInfo object describing the cache
60 | """
61 | with lock:
62 | return CacheInfo(hits, misses, cache.__len__(), max_size, algorithm,
63 | ttl, thread_safe, order_independent, custom_key_maker is not None)
64 |
65 | def cache_is_empty():
66 | """Return True if the cache contains no elements"""
67 | return cache.__len__() == 0
68 |
69 | def cache_is_full():
70 | """Return True if the cache is full"""
71 | return False
72 |
73 | def cache_contains_argument(function_arguments, alive_only=True):
74 | """
75 | Return True if the cache contains a cached item with the specified function call arguments
76 |
77 | :param function_arguments: Can be a list, a tuple or a dict.
78 | - Full arguments: use a list to represent both positional arguments and keyword
79 | arguments. The list contains two elements, a tuple (positional arguments) and
80 | a dict (keyword arguments). For example,
81 | f(1, 2, 3, a=4, b=5, c=6)
82 | can be represented by:
83 | [(1, 2, 3), {'a': 4, 'b': 5, 'c': 6}]
84 | - Positional arguments only: when the arguments does not include keyword arguments,
85 | a tuple can be used to represent positional arguments. For example,
86 | f(1, 2, 3)
87 | can be represented by:
88 | (1, 2, 3)
89 | - Keyword arguments only: when the arguments does not include positional arguments,
90 | a dict can be used to represent keyword arguments. For example,
91 | f(a=4, b=5, c=6)
92 | can be represented by:
93 | {'a': 4, 'b': 5, 'c': 6}
94 |
95 | :param alive_only: Whether to check alive cache item only (default to True).
96 |
97 | :return: True if the desired cached item is present, False otherwise.
98 | """
99 | if isinstance(function_arguments, tuple):
100 | positional_argument_tuple = function_arguments
101 | keyword_argument_dict = {}
102 | elif isinstance(function_arguments, dict):
103 | positional_argument_tuple = ()
104 | keyword_argument_dict = function_arguments
105 | elif isinstance(function_arguments, list) and len(function_arguments) == 2:
106 | positional_argument_tuple, keyword_argument_dict = function_arguments
107 | if not isinstance(positional_argument_tuple, tuple) or not isinstance(keyword_argument_dict, dict):
108 | raise TypeError('Expected function_arguments to be a list containing a positional argument tuple '
109 | 'and a keyword argument dict')
110 | else:
111 | raise TypeError('Expected function_arguments to be a tuple, a dict, or a list with 2 elements')
112 | key = make_key(positional_argument_tuple, keyword_argument_dict)
113 | with lock:
114 | value = cache.get(key, sentinel)
115 | if value is not sentinel:
116 | return values_toolkit.is_cache_value_valid(value) if alive_only else True
117 | return False
118 |
119 | def cache_contains_result(return_value, alive_only=True):
120 | """
121 | Return True if the cache contains a cache item with the specified user function return value. O(n) time
122 | complexity.
123 |
124 | :param return_value: A return value coming from the user function.
125 |
126 | :param alive_only: Whether to check alive cache item only (default to True).
127 |
128 | :return: True if the desired cached item is present, False otherwise.
129 | """
130 | with lock:
131 | for value in cache.values():
132 | is_alive = values_toolkit.is_cache_value_valid(value)
133 | cache_result = values_toolkit.retrieve_result_from_cache_value(value)
134 | if cache_result == return_value:
135 | return is_alive if alive_only else True
136 | return False
137 |
138 | def cache_for_each(consumer):
139 | """
140 | Perform the given action for each cache element in an order determined by the algorithm until all
141 | elements have been processed or the action throws an error
142 |
143 | :param consumer: an action function to process the cache elements. Must have 3 arguments:
144 | def consumer(user_function_arguments, user_function_result, is_alive): ...
145 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
146 | args is a tuple holding positional arguments.
147 | kwargs is a dict holding keyword arguments.
148 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
149 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
150 | user_function_result is a return value coming from the user function.
151 | cache_result is a return value coming from the user function.
152 | is_alive is a boolean value indicating whether the cache is still alive
153 | (if a TTL is given).
154 | """
155 | with lock:
156 | for key, value in cache.items():
157 | is_alive = values_toolkit.is_cache_value_valid(value)
158 | user_function_result = values_toolkit.retrieve_result_from_cache_value(value)
159 | user_function_arguments = key_argument_map[key]
160 | consumer(user_function_arguments, user_function_result, is_alive)
161 |
162 | def cache_arguments():
163 | """
164 | Get user function arguments of all alive cache elements
165 |
166 | see also: cache_items()
167 |
168 | Example:
169 | @cached
170 | def f(a, b, c, d):
171 | ...
172 | f(1, 2, c=3, d=4)
173 | for argument in f.cache_arguments():
174 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
175 |
176 | :return: an iterable which iterates through a list of a tuple containing a tuple (positional arguments) and
177 | a dict (keyword arguments)
178 | """
179 | with lock:
180 | for key, value in cache.items():
181 | if values_toolkit.is_cache_value_valid(value):
182 | yield key_argument_map[key]
183 |
184 | def cache_results():
185 | """
186 | Get user function return values of all alive cache elements
187 |
188 | see also: cache_items()
189 |
190 | Example:
191 | @cached
192 | def f(a):
193 | return a
194 | f('hello')
195 | for result in f.cache_results():
196 | print(result) # 'hello'
197 |
198 | :return: an iterable which iterates through a list of user function result (of any type)
199 | """
200 | with lock:
201 | for key, value in cache.items():
202 | if values_toolkit.is_cache_value_valid(value):
203 | yield values_toolkit.retrieve_result_from_cache_value(value)
204 |
205 | def cache_items():
206 | """
207 | Get cache items, i.e. entries of all alive cache elements, in the form of (argument, result).
208 |
209 | argument: a tuple containing a tuple (positional arguments) and a dict (keyword arguments).
210 | result: a user function return value of any type.
211 |
212 | see also: cache_arguments(), cache_results().
213 |
214 | Example:
215 | @cached
216 | def f(a, b, c, d):
217 | return 'the answer is ' + str(a)
218 | f(1, 2, c=3, d=4)
219 | for argument, result in f.cache_items():
220 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
221 | print(result) # 'the answer is 1'
222 |
223 | :return: an iterable which iterates through a list of (argument, result) entries
224 | """
225 | with lock:
226 | for key, value in cache.items():
227 | if values_toolkit.is_cache_value_valid(value):
228 | yield key_argument_map[key], values_toolkit.retrieve_result_from_cache_value(value)
229 |
230 | def cache_remove_if(predicate):
231 | """
232 | Remove all cache elements that satisfy the given predicate
233 |
234 | :param predicate: a predicate function to judge whether the cache elements should be removed. Must
235 | have 3 arguments, and returns True or False:
236 | def consumer(user_function_arguments, user_function_result, is_alive): ...
237 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
238 | args is a tuple holding positional arguments.
239 | kwargs is a dict holding keyword arguments.
240 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
241 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
242 | user_function_result is a return value coming from the user function.
243 | cache_result is a return value coming from the user function.
244 | is_alive is a boolean value indicating whether the cache is still alive
245 | (if a TTL is given).
246 |
247 | :return: True if at least one element is removed, False otherwise.
248 | """
249 | with lock:
250 | keys_to_be_removed = []
251 | for key, value in cache.items():
252 | is_alive = values_toolkit.is_cache_value_valid(value)
253 | user_function_result = values_toolkit.retrieve_result_from_cache_value(value)
254 | user_function_arguments = key_argument_map[key]
255 | if predicate(user_function_arguments, user_function_result, is_alive):
256 | keys_to_be_removed.append(key)
257 | for key in keys_to_be_removed:
258 | del cache[key]
259 | del key_argument_map[key]
260 | return len(keys_to_be_removed) > 0
261 |
262 | # expose operations and members of wrapper
263 | wrapper.cache_clear = cache_clear
264 | wrapper.cache_info = cache_info
265 | wrapper.cache_is_empty = cache_is_empty
266 | wrapper.cache_is_full = cache_is_full
267 | wrapper.cache_contains_argument = cache_contains_argument
268 | wrapper.cache_contains_result = cache_contains_result
269 | wrapper.cache_for_each = cache_for_each
270 | wrapper.cache_arguments = cache_arguments
271 | wrapper.cache_results = cache_results
272 | wrapper.cache_items = cache_items
273 | wrapper.cache_remove_if = cache_remove_if
274 | wrapper._cache = cache
275 |
276 | return wrapper
277 |
--------------------------------------------------------------------------------
/memoization/caching/plain_cache.pyi:
--------------------------------------------------------------------------------
1 | from memoization.type.caching.cache import get_caching_wrapper as get_caching_wrapper
2 |
--------------------------------------------------------------------------------
/memoization/caching/statistic_cache.py:
--------------------------------------------------------------------------------
1 | from threading import RLock
2 |
3 | from memoization.model import DummyWithable, CacheInfo
4 |
5 |
6 | def get_caching_wrapper(user_function, max_size, ttl, algorithm, thread_safe, order_independent, custom_key_maker):
7 | """Get a caching wrapper for statistics only, without any actual caching"""
8 |
9 | misses = 0 # number of misses of the cache
10 | lock = RLock() if thread_safe else DummyWithable() # ensure thread-safe
11 |
12 | def wrapper(*args, **kwargs):
13 | """The actual wrapper"""
14 | nonlocal misses
15 | with lock:
16 | misses += 1
17 | return user_function(*args, **kwargs)
18 |
19 | def cache_clear():
20 | """Clear the cache and statistics information"""
21 | nonlocal misses
22 | with lock:
23 | misses = 0
24 |
25 | def cache_info():
26 | """
27 | Show statistics information
28 |
29 | :return: a CacheInfo object describing the cache
30 | """
31 | with lock:
32 | return CacheInfo(0, misses, 0, max_size, algorithm,
33 | ttl, thread_safe, order_independent, custom_key_maker is not None)
34 |
35 | def cache_is_empty():
36 | """Return True if the cache contains no elements"""
37 | return True
38 |
39 | def cache_is_full():
40 | """Return True if the cache is full"""
41 | return True
42 |
43 | def cache_contains_argument(function_arguments, alive_only=True):
44 | """
45 | Return True if the cache contains a cached item with the specified function call arguments
46 |
47 | :param function_arguments: Can be a list, a tuple or a dict.
48 | - Full arguments: use a list to represent both positional arguments and keyword
49 | arguments. The list contains two elements, a tuple (positional arguments) and
50 | a dict (keyword arguments). For example,
51 | f(1, 2, 3, a=4, b=5, c=6)
52 | can be represented by:
53 | [(1, 2, 3), {'a': 4, 'b': 5, 'c': 6}]
54 | - Positional arguments only: when the arguments does not include keyword arguments,
55 | a tuple can be used to represent positional arguments. For example,
56 | f(1, 2, 3)
57 | can be represented by:
58 | (1, 2, 3)
59 | - Keyword arguments only: when the arguments does not include positional arguments,
60 | a dict can be used to represent keyword arguments. For example,
61 | f(a=4, b=5, c=6)
62 | can be represented by:
63 | {'a': 4, 'b': 5, 'c': 6}
64 |
65 | :param alive_only: Whether to check alive cache item only (default to True).
66 |
67 | :return: True if the desired cached item is present, False otherwise.
68 | """
69 | return False
70 |
71 | def cache_contains_result(return_value, alive_only=True):
72 | """
73 | Return True if the cache contains a cache item with the specified user function return value. O(n) time
74 | complexity.
75 |
76 | :param return_value: A return value coming from the user function.
77 |
78 | :param alive_only: Whether to check alive cache item only (default to True).
79 |
80 | :return: True if the desired cached item is present, False otherwise.
81 | """
82 | return False
83 |
84 | def cache_for_each(consumer):
85 | """
86 | Perform the given action for each cache element in an order determined by the algorithm until all
87 | elements have been processed or the action throws an error
88 |
89 | :param consumer: an action function to process the cache elements. Must have 3 arguments:
90 | def consumer(user_function_arguments, user_function_result, is_alive): ...
91 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
92 | args is a tuple holding positional arguments.
93 | kwargs is a dict holding keyword arguments.
94 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
95 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
96 | user_function_result is a return value coming from the user function.
97 | cache_result is a return value coming from the user function.
98 | is_alive is a boolean value indicating whether the cache is still alive
99 | (if a TTL is given).
100 | """
101 | pass
102 |
103 | def cache_arguments():
104 | """
105 | Get user function arguments of all alive cache elements
106 |
107 | see also: cache_items()
108 |
109 | Example:
110 | @cached
111 | def f(a, b, c, d):
112 | ...
113 | f(1, 2, c=3, d=4)
114 | for argument in f.cache_arguments():
115 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
116 |
117 | :return: an iterable which iterates through a list of a tuple containing a tuple (positional arguments) and
118 | a dict (keyword arguments)
119 | """
120 | yield from ()
121 |
122 | def cache_results():
123 | """
124 | Get user function return values of all alive cache elements
125 |
126 | see also: cache_items()
127 |
128 | Example:
129 | @cached
130 | def f(a):
131 | return a
132 | f('hello')
133 | for result in f.cache_results():
134 | print(result) # 'hello'
135 |
136 | :return: an iterable which iterates through a list of user function result (of any type)
137 | """
138 | yield from ()
139 |
140 | def cache_items():
141 | """
142 | Get cache items, i.e. entries of all alive cache elements, in the form of (argument, result).
143 |
144 | argument: a tuple containing a tuple (positional arguments) and a dict (keyword arguments).
145 | result: a user function return value of any type.
146 |
147 | see also: cache_arguments(), cache_results().
148 |
149 | Example:
150 | @cached
151 | def f(a, b, c, d):
152 | return 'the answer is ' + str(a)
153 | f(1, 2, c=3, d=4)
154 | for argument, result in f.cache_items():
155 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
156 | print(result) # 'the answer is 1'
157 |
158 | :return: an iterable which iterates through a list of (argument, result) entries
159 | """
160 | yield from ()
161 |
162 | def cache_remove_if(predicate):
163 | """
164 | Remove all cache elements that satisfy the given predicate
165 |
166 | :param predicate: a predicate function to judge whether the cache elements should be removed. Must
167 | have 3 arguments, and returns True or False:
168 | def consumer(user_function_arguments, user_function_result, is_alive): ...
169 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
170 | args is a tuple holding positional arguments.
171 | kwargs is a dict holding keyword arguments.
172 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
173 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
174 | user_function_result is a return value coming from the user function.
175 | cache_result is a return value coming from the user function.
176 | is_alive is a boolean value indicating whether the cache is still alive
177 | (if a TTL is given).
178 |
179 | :return: True if at least one element is removed, False otherwise.
180 | """
181 | return False
182 |
183 | # expose operations and members of wrapper
184 | wrapper.cache_clear = cache_clear
185 | wrapper.cache_info = cache_info
186 | wrapper.cache_is_empty = cache_is_empty
187 | wrapper.cache_is_full = cache_is_full
188 | wrapper.cache_contains_argument = cache_contains_argument
189 | wrapper.cache_contains_result = cache_contains_result
190 | wrapper.cache_for_each = cache_for_each
191 | wrapper.cache_arguments = cache_arguments
192 | wrapper.cache_results = cache_results
193 | wrapper.cache_items = cache_items
194 | wrapper.cache_remove_if = cache_remove_if
195 | wrapper._cache = None
196 |
197 | return wrapper
198 |
199 |
--------------------------------------------------------------------------------
/memoization/caching/statistic_cache.pyi:
--------------------------------------------------------------------------------
1 | from memoization.type.caching.cache import get_caching_wrapper as get_caching_wrapper
2 |
--------------------------------------------------------------------------------
/memoization/config/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/config/__init__.py
--------------------------------------------------------------------------------
/memoization/config/algorithm_mapping.py:
--------------------------------------------------------------------------------
1 | from memoization.constant.flag import CachingAlgorithmFlag
2 | import memoization.caching.fifo_cache as fifo_cache
3 | import memoization.caching.lru_cache as lru_cache
4 | import memoization.caching.lfu_cache as lfu_cache
5 |
6 |
7 | def get_cache_toolkit(algorithm=CachingAlgorithmFlag.FIFO):
8 | algorithm_mapping = {
9 | CachingAlgorithmFlag.FIFO: fifo_cache,
10 | CachingAlgorithmFlag.LRU: lru_cache,
11 | CachingAlgorithmFlag.LFU: lfu_cache,
12 | }
13 | try:
14 | return algorithm_mapping[algorithm]
15 | except KeyError:
16 | raise KeyError('Unrecognized caching algorithm flag')
17 |
--------------------------------------------------------------------------------
/memoization/config/algorithm_mapping.pyi:
--------------------------------------------------------------------------------
1 | from types import ModuleType
2 | from typing import Callable, Any, Optional, Hashable, TypeVar
3 |
4 | from memoization.type.model import CachedFunction
5 |
6 | T = TypeVar('T', bound=Callable[..., Any])
7 |
8 | class CacheToolkit(ModuleType):
9 | def get_caching_wrapper(self,
10 | user_function: T,
11 | max_size: Optional[int],
12 | ttl: Optional[float],
13 | algorithm: Optional[int],
14 | thread_safe: Optional[bool],
15 | order_independent: Optional[bool],
16 | custom_key_maker: Optional[Callable[..., Hashable]]) -> CachedFunction[T]: ...
17 |
18 | def get_cache_toolkit(algorithm: int = ...) -> CacheToolkit: ...
19 |
--------------------------------------------------------------------------------
/memoization/constant/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/constant/__init__.py
--------------------------------------------------------------------------------
/memoization/constant/flag.py:
--------------------------------------------------------------------------------
1 | try:
2 | import enum # only works on Python 3.5+
3 | enum.IntFlag # only works on Python 3.6+
4 | except (ImportError, AttributeError):
5 | # backport for Python 3.4 and 3.5
6 | from memoization.backport import enum # type: ignore
7 |
8 |
9 | class CachingAlgorithmFlag(enum.IntFlag):
10 | """
11 | Use this class to specify which caching algorithm you would like to use
12 | """
13 | FIFO = 1 # First In First Out
14 | LRU = 2 # Least Recently Used
15 | LFU = 4 # Least Frequently Used
16 |
--------------------------------------------------------------------------------
/memoization/constant/flag.pyi:
--------------------------------------------------------------------------------
1 | from typing import Mapping
2 |
3 |
4 | class CachingAlgorithmFlag:
5 | FIFO: int = ...
6 | LRU: int = ...
7 | LFU: int = ...
8 | __members__: Mapping[str, int]
9 |
--------------------------------------------------------------------------------
/memoization/memoization.py:
--------------------------------------------------------------------------------
1 | from functools import partial, update_wrapper
2 | import inspect
3 | import warnings
4 |
5 | import memoization.caching.statistic_cache as statistic_cache
6 | import memoization.caching.plain_cache as plain_cache
7 | from memoization.constant.flag import CachingAlgorithmFlag as CachingAlgorithmFlag # for type-checking to work properly
8 | from memoization.config.algorithm_mapping import get_cache_toolkit
9 |
10 |
11 | # Public symbols
12 | __all__ = ['cached', 'suppress_warnings', 'CachingAlgorithmFlag']
13 | __version__ = '0.4.0'
14 |
15 | # Whether warnings are enabled
16 | _warning_enabled = True
17 |
18 |
19 | # Insert the algorithm flags to the global namespace for convenience
20 | globals().update(CachingAlgorithmFlag.__members__)
21 |
22 |
23 | def cached(user_function=None, max_size=None, ttl=None,
24 | algorithm=CachingAlgorithmFlag.LRU, thread_safe=True, order_independent=False, custom_key_maker=None):
25 | """
26 | @cached decorator wrapper
27 |
28 | :param user_function: The decorated function, to be cached.
29 |
30 | :param max_size: The max number of items can be held in the cache.
31 |
32 | :param ttl: Time-To-Live.
33 | Defining how long the cached data is valid (in seconds)
34 | If not given, the data in cache is valid forever.
35 | Valid only when max_size > 0 or max_size is None
36 |
37 | :param algorithm: The algorithm used when caching.
38 | Default: LRU (Least Recently Used)
39 | Valid only when max_size > 0
40 | Refer to CachingAlgorithmFlag for possible choices.
41 |
42 | :param thread_safe: Whether the cache is thread safe.
43 | Setting it to False enhances performance.
44 |
45 | :param order_independent: Whether the cache is kwarg-order-independent.
46 | For the following code snippet:
47 | f(a=1, b=1)
48 | f(b=1, a=1)
49 | - If True, f(a=1, b=1) will be treated the same as f(b=1, a=1) and the cache
50 | will be hit once.
51 | - If False, they will be treated as different calls and the cache will miss.
52 | Setting it to True adds performance overhead.
53 | Valid only when (max_size > 0 or max_size is None) and custom_key_maker is None
54 |
55 | :param custom_key_maker: Use this parameter to override the default cache key maker.
56 | It should be a function with the same signature as user_function.
57 | - The produced key must be unique, which means two sets of different arguments
58 | always map to two different keys.
59 | - The produced key must be hashable and comparable with another key (the
60 | memoization library only needs to check for their equality).
61 | - Key computation should be efficient, and keys should be small objects.
62 | Valid only when max_size > 0 or max_size is None
63 | e.g.
64 | def get_employee_id(employee):
65 | return employee.id
66 | @cached(custom_key_maker=get_employee_id)
67 | def calculate_performance(employee):
68 | ...
69 |
70 | :return: decorator function
71 | """
72 |
73 | # Adapt to the usage of calling the decorator and that of not calling it
74 | # i.e. @cached and @cached()
75 | if user_function is None:
76 | return partial(cached, max_size=max_size, ttl=ttl, algorithm=algorithm,
77 | thread_safe=thread_safe, order_independent=order_independent, custom_key_maker=custom_key_maker)
78 |
79 | # Perform type checking
80 | if not hasattr(user_function, '__call__'):
81 | raise TypeError('Unable to do memoization on non-callable object ' + str(user_function))
82 | if max_size is not None:
83 | if not isinstance(max_size, int):
84 | raise TypeError('Expected max_size to be an integer or None')
85 | elif max_size < 0:
86 | raise ValueError('Expected max_size to be a nonnegative integer or None')
87 | if ttl is not None:
88 | if not isinstance(ttl, int) and not isinstance(ttl, float):
89 | raise TypeError('Expected ttl to be a number or None')
90 | elif ttl <= 0:
91 | raise ValueError('Expected ttl to be a positive number or None')
92 | if not isinstance(algorithm, CachingAlgorithmFlag):
93 | raise TypeError('Expected algorithm to be an instance of CachingAlgorithmFlag')
94 | if not isinstance(thread_safe, bool):
95 | raise TypeError('Expected thread_safe to be a boolean value')
96 | if not isinstance(order_independent, bool):
97 | raise TypeError('Expected order_independent to be a boolean value')
98 | if custom_key_maker is not None and not hasattr(custom_key_maker, '__call__'):
99 | raise TypeError('Expected custom_key_maker to be callable or None')
100 |
101 | # Check custom key maker and wrap it
102 | if custom_key_maker is not None:
103 | if _warning_enabled:
104 | custom_key_maker_info = inspect.getfullargspec(custom_key_maker)
105 | user_function_info = inspect.getfullargspec(user_function)
106 | if custom_key_maker_info.args != user_function_info.args or \
107 | custom_key_maker_info.varargs != user_function_info.varargs or \
108 | custom_key_maker_info.varkw != user_function_info.varkw or \
109 | custom_key_maker_info.kwonlyargs != user_function_info.kwonlyargs or \
110 | custom_key_maker_info.defaults != user_function_info.defaults or \
111 | custom_key_maker_info.kwonlydefaults != user_function_info.kwonlydefaults:
112 | warnings.warn('Expected custom_key_maker to have the same signature as the function being cached. '
113 | 'Call memoization.suppress_warnings() before using @cached to remove this message.',
114 | SyntaxWarning)
115 |
116 | def custom_key_maker_wrapper(args, kwargs):
117 | return custom_key_maker(*args, **kwargs)
118 | else:
119 | custom_key_maker_wrapper = None
120 |
121 | # Create wrapper
122 | wrapper = _create_cached_wrapper(user_function, max_size, ttl, algorithm,
123 | thread_safe, order_independent, custom_key_maker_wrapper)
124 | wrapper.__signature__ = inspect.signature(user_function) # copy the signature of user_function to the wrapper
125 | return update_wrapper(wrapper, user_function) # update wrapper to make it look like the original function
126 |
127 |
128 | def suppress_warnings(should_warn=False):
129 | """
130 | Disable/Enable warnings when @cached is used
131 | Must be called before using @cached
132 |
133 | :param should_warn: Whether warnings should be shown (False by default)
134 | """
135 | global _warning_enabled
136 | _warning_enabled = should_warn
137 |
138 |
139 | def _create_cached_wrapper(user_function, max_size, ttl, algorithm, thread_safe, order_independent, custom_key_maker):
140 | """
141 | Factory that creates an actual executed function when a function is decorated with @cached
142 | """
143 | if max_size == 0:
144 | return statistic_cache.get_caching_wrapper(user_function, max_size, ttl, algorithm,
145 | thread_safe, order_independent, custom_key_maker)
146 | elif max_size is None:
147 | return plain_cache.get_caching_wrapper(user_function, max_size, ttl, algorithm,
148 | thread_safe, order_independent, custom_key_maker)
149 | else:
150 | cache_toolkit = get_cache_toolkit(algorithm)
151 | return cache_toolkit.get_caching_wrapper(user_function, max_size, ttl, algorithm,
152 | thread_safe, order_independent, custom_key_maker)
153 |
154 |
155 | if __name__ == '__main__':
156 | import sys
157 | sys.stderr.write('python-memoization v' + __version__ +
158 | ': A powerful caching library for Python, with TTL support and multiple algorithm options.\n')
159 | sys.stderr.write('Go to https://github.com/lonelyenvoy/python-memoization for usage and more details.\n')
160 |
--------------------------------------------------------------------------------
/memoization/memoization.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any, Callable, Optional, overload, TypeVar, Hashable
2 |
3 | from memoization.constant.flag import CachingAlgorithmFlag as CachingAlgorithmFlagType
4 | from memoization.type.model import CachedFunction
5 |
6 | T = TypeVar('T', bound=Callable[..., Any])
7 |
8 | __version__: str
9 |
10 | # Decorator with optional arguments - @cached(...)
11 | @overload
12 | def cached(max_size: Optional[int] = ...,
13 | ttl: Optional[float] = ...,
14 | algorithm: Optional[int] = ...,
15 | thread_safe: Optional[bool] = ...,
16 | order_independent: Optional[bool] = ...,
17 | custom_key_maker: Optional[Callable[..., Hashable]] = ...) -> Callable[[T], CachedFunction[T]]: ...
18 |
19 | # Bare decorator usage - @cache
20 | @overload
21 | def cached(user_function: T = ...) -> CachedFunction[T]: ...
22 |
23 | def suppress_warnings(should_warn: bool = ...) -> None: ...
24 |
25 | CachingAlgorithmFlag = CachingAlgorithmFlagType
26 |
--------------------------------------------------------------------------------
/memoization/model.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 |
4 | __all__ = ['DummyWithable', 'HashedList', 'CacheInfo']
5 |
6 |
7 | class DummyWithable(object):
8 | """
9 | This class is used to create instances that can bypass "with" statements
10 |
11 | e.g.
12 | lock = DummyWithable()
13 | with lock:
14 | manipulate_data()
15 | """
16 |
17 | __slots__ = ()
18 |
19 | def __enter__(self):
20 | pass
21 |
22 | def __exit__(self, exc_type, exc_val, exc_tb):
23 | pass
24 |
25 |
26 | class HashedList(list):
27 | """
28 | This class guarantees that hash() will be called no more than once per element.
29 | """
30 |
31 | __slots__ = ('hash_value', )
32 |
33 | def __init__(self, tup, hash_value):
34 | super().__init__(tup)
35 | self.hash_value = hash_value
36 |
37 | def __hash__(self):
38 | return self.hash_value
39 |
40 |
41 | # Named type CacheInfo
42 | CacheInfo = namedtuple('CacheInfo', ['hits', 'misses', 'current_size', 'max_size', 'algorithm',
43 | 'ttl', 'thread_safe', 'order_independent', 'use_custom_key'])
44 |
--------------------------------------------------------------------------------
/memoization/model.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any, Tuple, Optional
2 |
3 | class DummyWithable:
4 | __slots__: Tuple[str] = ...
5 | def __enter__(self) -> None: ...
6 | def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: ...
7 |
8 | class HashedList:
9 | hash_value: int = ...
10 | __slots__: Tuple[str]
11 | def __init__(self, tup: Tuple[Any], hash_value: int) -> None: ...
12 | def __hash__(self) -> int: ...
13 |
14 | class CacheInfo:
15 | hits: int
16 | misses: int
17 | current_size: int
18 | max_size: Optional[int]
19 | algorithm: int
20 | ttl: Optional[float]
21 | thread_safe: bool
22 | order_independent: bool
23 | use_custom_key: bool
24 | def __init__(
25 | self,
26 | hits: int,
27 | misses: int,
28 | current_size: int,
29 | max_size: Optional[int],
30 | algorithm: int,
31 | ttl: Optional[float],
32 | thread_safe: bool,
33 | order_independent: bool,
34 | use_custom_key: bool,
35 | ): ...
36 |
--------------------------------------------------------------------------------
/memoization/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/py.typed
--------------------------------------------------------------------------------
/memoization/type/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/type/__init__.py
--------------------------------------------------------------------------------
/memoization/type/caching/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/type/caching/__init__.py
--------------------------------------------------------------------------------
/memoization/type/caching/cache.pyi:
--------------------------------------------------------------------------------
1 | from typing import Callable, Optional, Any, TypeVar, Protocol, Hashable
2 |
3 | from memoization.type.model import CachedFunction
4 |
5 | T = TypeVar('T', bound=Callable[..., Any])
6 |
7 |
8 | def get_caching_wrapper(user_function: T,
9 | max_size: Optional[int],
10 | ttl: Optional[float],
11 | algorithm: Optional[int],
12 | thread_safe: Optional[bool],
13 | order_independent: Optional[bool],
14 | custom_key_maker: Optional[Callable[..., Hashable]]) -> CachedFunction[T]: ...
15 |
--------------------------------------------------------------------------------
/memoization/type/caching/general/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/type/caching/general/__init__.py
--------------------------------------------------------------------------------
/memoization/type/caching/general/keys.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any, Tuple, Dict, Union, Optional
2 |
3 | from memoization.model import HashedList
4 |
5 |
6 | def make_key(args: Tuple[Any],
7 | kwargs: Optional[Dict[str, Any]],
8 | kwargs_mark: Tuple[object] = ...) -> Union[str, HashedList]: ...
9 |
--------------------------------------------------------------------------------
/memoization/type/model.pyi:
--------------------------------------------------------------------------------
1 | from typing import TypeVar, Callable, Any, Protocol, Union, List, Tuple, Dict, Iterable
2 |
3 | from memoization.model import CacheInfo
4 |
5 | T = TypeVar('T', bound=Callable[..., Any])
6 |
7 |
8 | class CachedFunction(Protocol[T]):
9 | __call__: T
10 | __wrapped__: T
11 |
12 | def cache_clear(self) -> None:
13 | """Clear the cache and its statistics information"""
14 |
15 | def cache_info(self) -> CacheInfo:
16 | """
17 | Show statistics information
18 |
19 | :return: a CacheInfo object describing the cache
20 | """
21 |
22 | def cache_is_empty(self) -> bool:
23 | """Return True if the cache contains no elements"""
24 |
25 | def cache_is_full(self) -> bool:
26 | """Return True if the cache is full"""
27 |
28 | def cache_contains_argument(self, function_arguments: Union[List, Tuple, Dict[str, Any]],
29 | alive_only: bool = ...) -> bool:
30 | """
31 | Return True if the cache contains a cached item with the specified function call arguments
32 |
33 | :param function_arguments: Can be a list, a tuple or a dict.
34 | - Full arguments: use a list to represent both positional arguments and keyword
35 | arguments. The list contains two elements, a tuple (positional arguments) and
36 | a dict (keyword arguments). For example,
37 | f(1, 2, 3, a=4, b=5, c=6)
38 | can be represented by:
39 | [(1, 2, 3), {'a': 4, 'b': 5, 'c': 6}]
40 | - Positional arguments only: when the arguments does not include keyword arguments,
41 | a tuple can be used to represent positional arguments. For example,
42 | f(1, 2, 3)
43 | can be represented by:
44 | (1, 2, 3)
45 | - Keyword arguments only: when the arguments does not include positional arguments,
46 | a dict can be used to represent keyword arguments. For example,
47 | f(a=4, b=5, c=6)
48 | can be represented by:
49 | {'a': 4, 'b': 5, 'c': 6}
50 |
51 | :param alive_only: Whether to check alive cache item only (default to True).
52 |
53 | :return: True if the desired cached item is present, False otherwise.
54 | """
55 |
56 | def cache_contains_result(self, return_value: Any, alive_only: bool = ...) -> bool:
57 | """
58 | Return True if the cache contains a cache item with the specified user function return value. O(n) time
59 | complexity.
60 |
61 | :param return_value: A return value coming from the user function.
62 |
63 | :param alive_only: Whether to check alive cache item only (default to True).
64 |
65 | :return: True if the desired cached item is present, False otherwise.
66 | """
67 |
68 | def cache_for_each(self, consumer: Callable[[Tuple[Tuple, Dict], Any, bool], None]) -> None:
69 | """
70 | Perform the given action for each cache element in an order determined by the algorithm until all
71 | elements have been processed or the action throws an error
72 |
73 | :param consumer: an action function to process the cache elements. Must have 3 arguments:
74 | def consumer(user_function_arguments, user_function_result, is_alive): ...
75 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
76 | args is a tuple holding positional arguments.
77 | kwargs is a dict holding keyword arguments.
78 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
79 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
80 | user_function_result is a return value coming from the user function.
81 | is_alive is a boolean value indicating whether the cache is still alive
82 | (if a TTL is given).
83 | """
84 |
85 | def cache_arguments(self) -> Iterable[Tuple[Tuple, Dict]]:
86 | """
87 | Get user function arguments of all alive cache elements
88 |
89 | see also: cache_items()
90 |
91 | Example:
92 | @cached
93 | def f(a, b, c, d):
94 | ...
95 | f(1, 2, c=3, d=4)
96 | for argument in f.cache_arguments():
97 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
98 |
99 | :return: an iterable which iterates through a list of a tuple containing a tuple (positional arguments) and
100 | a dict (keyword arguments)
101 | """
102 |
103 | def cache_results(self) -> Iterable[Any]:
104 | """
105 | Get user function return values of all alive cache elements
106 |
107 | see also: cache_items()
108 |
109 | Example:
110 | @cached
111 | def f(a):
112 | return a
113 | f('hello')
114 | for result in f.cache_results():
115 | print(result) # 'hello'
116 |
117 | :return: an iterable which iterates through a list of user function result (of any type)
118 | """
119 |
120 | def cache_items(self) -> Iterable[Tuple[Tuple[Tuple, Dict], Any]]:
121 | """
122 | Get cache items, i.e. entries of all alive cache elements, in the form of (argument, result).
123 |
124 | argument: a tuple containing a tuple (positional arguments) and a dict (keyword arguments).
125 | result: a user function return value of any type.
126 |
127 | see also: cache_arguments(), cache_results().
128 |
129 | Example:
130 | @cached
131 | def f(a, b, c, d):
132 | return 'the answer is ' + str(a)
133 | f(1, 2, c=3, d=4)
134 | for argument, result in f.cache_items():
135 | print(argument) # ((1, 2), {'c': 3, 'd': 4})
136 | print(result) # 'the answer is 1'
137 |
138 | :return: an iterable which iterates through a list of (argument, result) entries
139 | """
140 |
141 | def cache_remove_if(self, predicate: Callable[[Tuple[Tuple, Dict], Any, bool], bool]) -> bool:
142 | """
143 | Remove all cache elements that satisfy the given predicate
144 |
145 | :param predicate: a predicate function to judge whether the cache elements should be removed. Must
146 | have 3 arguments, and returns True or False:
147 | def consumer(user_function_arguments, user_function_result, is_alive): ...
148 | user_function_arguments is a tuple holding arguments in the form of (args, kwargs).
149 | args is a tuple holding positional arguments.
150 | kwargs is a dict holding keyword arguments.
151 | for example, for a function: foo(a, b, c, d), calling it by: foo(1, 2, c=3, d=4)
152 | user_function_arguments == ((1, 2), {'c': 3, 'd': 4})
153 | user_function_result is a return value coming from the user function.
154 | is_alive is a boolean value indicating whether the cache is still alive
155 | (if a TTL is given).
156 |
157 | :return: True if at least one element is removed, False otherwise.
158 | """
159 |
--------------------------------------------------------------------------------
/memoization/util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lonelyenvoy/python-memoization/9c1a0d13e8fbad1f7f4a05d5b41873a7ca5b6aaa/memoization/util/__init__.py
--------------------------------------------------------------------------------
/memoization/util/algorithm_extension_validator.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import traceback
3 |
4 | from memoization.constant.flag import CachingAlgorithmFlag
5 | from memoization.config.algorithm_mapping import get_cache_toolkit
6 | from memoization.model import CacheInfo
7 | from memoization import cached
8 |
9 | _problematic = False
10 | _non_internal_algorithms_found = False
11 |
12 |
13 | def validate():
14 | """
15 | Use this function to validate your extended caching algorithms.
16 | """
17 | global _non_internal_algorithms_found
18 | internal_algorithms = ['FIFO', 'LRU', 'LFU']
19 | has_cache_info = True
20 |
21 | for name, member in CachingAlgorithmFlag.__members__.items():
22 | if name not in internal_algorithms:
23 |
24 | @cached(max_size=5, ttl=0.5, algorithm=member, thread_safe=True)
25 | def tested_function(x):
26 | return x
27 |
28 | def undecorated_tested_function(x):
29 | return x
30 |
31 | _non_internal_algorithms_found = True
32 | print('Found extended algorithm <{}>'.format(name))
33 | try:
34 | cache_toolkit = get_cache_toolkit(member)
35 | except KeyError:
36 | _error('Cannot find mapping configuration for algorithm <{}>\n'.format(name))
37 | return
38 | if not hasattr(cache_toolkit, 'get_caching_wrapper'):
39 | _error('Cannot find get_caching_wrapper function in module <{}>\n'
40 | .format(cache_toolkit.__name__))
41 | return
42 | if not callable(cache_toolkit.get_caching_wrapper):
43 | _error('Expected {}.get_caching_wrapper to be callable\n'
44 | .format(cache_toolkit.__name__))
45 | return
46 | wrapper = cache_toolkit.get_caching_wrapper(
47 | user_function=undecorated_tested_function, max_size=5, ttl=0.5, algorithm=member,
48 | thread_safe=True, order_independent=False, custom_key_maker=None)
49 |
50 | if not hasattr(wrapper, 'cache_info'):
51 | has_cache_info = False
52 | _error('Cannot find cache_info function in the cache wrapper of <{}>\n'
53 | .format(cache_toolkit.__name__))
54 | elif not callable(wrapper.cache_info):
55 | has_cache_info = False
56 | _error('Expected cache_info of wrapper of <{}> to be callable\n'
57 | .format(cache_toolkit.__name__))
58 |
59 | for function_name in (
60 | 'cache_clear', 'cache_is_empty', 'cache_is_full', 'cache_contains_argument',
61 | 'cache_contains_result', 'cache_for_each', 'cache_arguments', 'cache_results', 'cache_items',
62 | 'cache_remove_if',
63 | ):
64 | _expect_has_attribute_and_callable(wrapper, function_name, cache_toolkit.__name__)
65 |
66 | for x in range(0, 5):
67 | tested_function(x)
68 |
69 | if has_cache_info:
70 | info = tested_function.cache_info()
71 | if not isinstance(info, CacheInfo):
72 | _error('The return value of cache_info is not an instance of CacheInfo')
73 | else:
74 | if not isinstance(info.hits, int):
75 | _error('Expected cache_info().hits to be an integer')
76 | if not isinstance(info.misses, int):
77 | _error('Expected cache_info().misses to be an integer')
78 | if not isinstance(info.current_size, int):
79 | _error('Expected cache_info().current_size to be an integer')
80 | if info.max_size is not None and not isinstance(info.max_size, int):
81 | _error('Expected cache_info().max_size to be an integer')
82 | if info.algorithm != member:
83 | _error('Expected cache_info().algorithm = <{}> to be <{}>'
84 | .format(info.algorithm, member))
85 | if info.ttl is not None and not isinstance(info.ttl, int) and not isinstance(info.ttl, float):
86 | _error('Expected cache_info().ttl to be an integer or a float')
87 | if not isinstance(info.thread_safe, bool):
88 | _error('Expected cache_info().thread_safe to be a bool')
89 |
90 |
91 | def _expect_has_attribute_and_callable(wrapper, attribute_name, parent_object_name):
92 | if not hasattr(wrapper, attribute_name):
93 | _error('Cannot find {} function in the cache wrapper of <{}>\n'.format(attribute_name, parent_object_name))
94 | elif not callable(getattr(wrapper, attribute_name)):
95 | _error('Expected {} of wrapper of <{}> to be callable\n'.format(attribute_name, parent_object_name))
96 |
97 |
98 | def _error(message):
99 | global _problematic
100 | _problematic = True
101 | sys.stderr.write('[ERROR] ' + message + '\n')
102 |
103 |
104 | if __name__ == '__main__':
105 | try:
106 | validate()
107 | if _non_internal_algorithms_found is False:
108 | sys.stderr.write('No extended algorithms found. Please read the extension guidance.\n')
109 | else:
110 | if _problematic is False:
111 | print('\n[Validation OK]')
112 | print('Congratulations! Your extended algorithm passed the validation. Thanks for your efforts.')
113 | print('Please understand that this validator only ensure that the typings of your extension are correct. '
114 | 'You are still required to write test cases for your algorithms.')
115 | else:
116 | _error('\nError(s) occurred during validation. It\'s likely that your extended algorithm '
117 | 'does not function properly. Please read the extension guidance.\n'
118 | 'If you consider it a bug of the validator itself, you are welcome to fix it in '
119 | 'your pull request or to create an issue for further help. Thanks!\n')
120 | except:
121 | _error('\nUnexpected error(s) occurred during validation. It\'s likely that your extended algorithm '
122 | 'does not function properly. Please read the extension guidance.\n'
123 | 'If you consider it a bug of the validator itself, you are welcome to fix it in '
124 | 'your pull request or to create an issue for further help. Thanks!\n')
125 | traceback.print_exc()
126 |
127 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import glob
2 | from setuptools import setup, find_packages # type: ignore
3 | from memoization.memoization import __version__ as memoization_version
4 |
5 |
6 | def get_long_description():
7 | with open('README.md', 'r', encoding='utf8') as f:
8 | return f.read()
9 |
10 |
11 | def get_package_data():
12 | package_data = {
13 | 'memoization': ['py.typed', '*.pyi']
14 | }
15 | # include all *.pyi stub files
16 | for filename in glob.iglob('memoization/**/*.pyi', recursive=True):
17 | parts = filename.split('/')
18 | package_name = '.'.join(parts[:-1])
19 | pyi_name = parts[-1]
20 | if package_name not in package_data:
21 | package_data[package_name] = [pyi_name]
22 | else:
23 | package_data[package_name].append(pyi_name)
24 | return package_data
25 |
26 |
27 | setup(
28 | name='memoization',
29 | version=memoization_version,
30 | description='A powerful caching library for Python, with TTL support and multiple algorithm options. '
31 | '(https://github.com/lonelyenvoy/python-memoization)',
32 | long_description=get_long_description(),
33 | long_description_content_type='text/markdown',
34 | keywords='memoization memorization remember decorator cache caching function callable '
35 | 'functional ttl limited capacity fast high-performance optimization',
36 | url='https://github.com/lonelyenvoy/python-memoization',
37 | author='lonelyenvoy',
38 | author_email='petrinchor@gmail.com',
39 | license='MIT',
40 | packages=find_packages(),
41 | package_data=get_package_data(),
42 | exclude_package_data={
43 | '': ['examples.py', 'test.py']
44 | },
45 | python_requires='>=3, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
46 | classifiers=[
47 | 'Development Status :: 5 - Production/Stable',
48 | 'License :: OSI Approved :: MIT License',
49 | 'Operating System :: OS Independent',
50 | 'Programming Language :: Python :: 3.4',
51 | 'Programming Language :: Python :: 3.5',
52 | 'Programming Language :: Python :: 3.6',
53 | 'Programming Language :: Python :: 3.7',
54 | 'Programming Language :: Python :: 3.8',
55 | 'Programming Language :: Python :: 3.9',
56 | 'Programming Language :: Python :: 3.10',
57 | 'Topic :: Software Development :: Libraries',
58 | 'Topic :: Software Development :: Libraries :: Python Modules',
59 | 'Typing :: Typed',
60 | ]
61 | )
62 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import random
3 | import weakref
4 | import gc
5 | import time
6 | from itertools import chain
7 | from threading import Thread
8 | from threading import Lock
9 | import inspect
10 | import warnings
11 |
12 | from memoization import cached, suppress_warnings, CachingAlgorithmFlag
13 | from memoization.caching.general.keys_order_dependent import make_key
14 |
15 | exec_times = {} # executed time of each tested function
16 | lock = Lock() # for multi-threading tests
17 | random.seed(100) # set seed to ensure that test results are reproducible
18 |
19 | for i in range(1, 100):
20 | exec_times['f' + str(i)] = 0 # init to zero
21 |
22 |
23 | ################################################################################
24 | # Tested functions
25 | ################################################################################
26 |
27 | @cached
28 | def f1(x):
29 | exec_times['f1'] += 1
30 | return x
31 |
32 |
33 | @cached()
34 | def f2(x):
35 | exec_times['f2'] += 1
36 | return x
37 |
38 |
39 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False)
40 | def f3(x):
41 | exec_times['f3'] += 1
42 | return x
43 |
44 |
45 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False)
46 | def f4(x):
47 | exec_times['f4'] += 1
48 | return x
49 |
50 |
51 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False)
52 | def f5(x):
53 | exec_times['f5'] += 1
54 | return x
55 |
56 |
57 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=True)
58 | def f6(x):
59 | with lock:
60 | exec_times['f6'] += 1
61 | return x
62 |
63 |
64 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=True)
65 | def f7(x):
66 | with lock:
67 | exec_times['f7'] += 1
68 | return x
69 |
70 |
71 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=True)
72 | def f8(x):
73 | with lock:
74 | exec_times['f8'] += 1
75 | return x
76 |
77 |
78 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False, ttl=0.5)
79 | def f9(x):
80 | exec_times['f9'] += 1
81 | return x
82 |
83 |
84 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False, ttl=0.5)
85 | def f10(x):
86 | exec_times['f10'] += 1
87 | return x
88 |
89 |
90 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False, ttl=0.5)
91 | def f11(x):
92 | exec_times['f11'] += 1
93 | return x
94 |
95 |
96 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False, ttl=0.5)
97 | def f12(arg, **kwargs):
98 | exec_times['f12'] += 1
99 | return arg
100 |
101 |
102 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False, ttl=0.5)
103 | def f13(arg, **kwargs):
104 | exec_times['f13'] += 1
105 | return arg
106 |
107 |
108 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False, ttl=0.5)
109 | def f14(arg, **kwargs):
110 | exec_times['f14'] += 1
111 | return arg
112 |
113 |
114 | @cached(max_size=0)
115 | def f15(x):
116 | exec_times['f15'] += 1
117 | return x
118 |
119 |
120 | @cached(order_independent=True)
121 | def f16(*args, **kwargs):
122 | exec_times['f16'] += 1
123 | return args[0]
124 |
125 |
126 | @cached(max_size=5)
127 | def f17(a=1, *b, c=2, **d):
128 | exec_times['f17'] += 1
129 | return a
130 |
131 |
132 | def general_custom_key_maker(a=1, *b, c=2, **d):
133 | return a
134 |
135 |
136 | @cached(max_size=5, custom_key_maker=general_custom_key_maker)
137 | def f18(a=1, *b, c=2, **d):
138 | exec_times['f18'] += 1
139 | return a
140 |
141 |
142 | @cached(max_size=5, custom_key_maker=lambda a=1, *b, c=2, **d: a)
143 | def f19(a=1, *b, c=2, **d):
144 | exec_times['f19'] += 1
145 | return a
146 |
147 |
148 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, custom_key_maker=general_custom_key_maker)
149 | def f20(a=1, *b, c=2, **d):
150 | exec_times['f20'] += 1
151 | return a
152 |
153 |
154 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, custom_key_maker=general_custom_key_maker)
155 | def f21(a=1, *b, c=2, **d):
156 | exec_times['f21'] += 1
157 | return a
158 |
159 |
160 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, custom_key_maker=general_custom_key_maker)
161 | def f22(a=1, *b, c=2, **d):
162 | exec_times['f22'] += 1
163 | return a
164 |
165 |
166 | def f23(a=1, *b, c=2, **d):
167 | exec_times['f23'] += 1
168 | return a
169 |
170 |
171 | @cached
172 | def f24(a=1, *b, c=2, **d):
173 | exec_times['f24'] += 1
174 | return a
175 |
176 |
177 | @cached()
178 | def f25(a=1, *b, c=2, **d):
179 | exec_times['f25'] += 1
180 | return a
181 |
182 |
183 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO)
184 | def f26(a=1, *b, c=2, **d):
185 | exec_times['f26'] += 1
186 | return a
187 |
188 |
189 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU)
190 | def f27(a=1, *b, c=2, **d):
191 | exec_times['f27'] += 1
192 | return a
193 |
194 |
195 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU)
196 | def f28(a=1, *b, c=2, **d):
197 | exec_times['f28'] += 1
198 | return a
199 |
200 |
201 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, ttl=0.5)
202 | def f29(a=1, *b, c=2, **d):
203 | exec_times['f29'] += 1
204 | return a
205 |
206 |
207 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, ttl=0.5)
208 | def f30(a=1, *b, c=2, **d):
209 | exec_times['f30'] += 1
210 | return a
211 |
212 |
213 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, ttl=0.5)
214 | def f31(a=1, *b, c=2, **d):
215 | exec_times['f31'] += 1
216 | return a
217 |
218 |
219 | @cached(ttl=0.5)
220 | def f32(a=1, *b, c=2, **d):
221 | exec_times['f32'] += 1
222 | return a
223 |
224 |
225 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, ttl=0.5)
226 | def f33(a=1, *b, c=2, **d):
227 | exec_times['f33'] += 1
228 | return a, b, c, d
229 |
230 |
231 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, ttl=0.5)
232 | def f34(a=1, *b, c=2, **d):
233 | exec_times['f34'] += 1
234 | return a, b, c, d
235 |
236 |
237 | @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, ttl=0.5)
238 | def f35(a=1, *b, c=2, **d):
239 | exec_times['f35'] += 1
240 | return a, b, c, d
241 |
242 |
243 | @cached(ttl=0.5)
244 | def f36(a=1, *b, c=2, **d):
245 | exec_times['f36'] += 1
246 | return a, b, c, d
247 |
248 |
249 | ################################################################################
250 | # Test entry point
251 | ################################################################################
252 |
253 | class TestMemoization(unittest.TestCase):
254 | def test_memoization_with_default_arguments(self):
255 | for _ in range(5):
256 | f1(10)
257 | f2(10)
258 | f1(20)
259 | f2(20)
260 |
261 | self.assertEqual(exec_times['f1'], 2)
262 | self.assertEqual(exec_times['f2'], 2)
263 |
264 | for info in f1.cache_info(), f2.cache_info():
265 | self.assertIsNone(info.max_size)
266 | self.assertEqual(info.algorithm, CachingAlgorithmFlag.LRU)
267 | self.assertIsNone(info.ttl)
268 | self.assertTrue(info.thread_safe)
269 |
270 | self.assertEqual(info.hits, 4)
271 | self.assertEqual(info.misses, 2)
272 | self.assertEqual(info.current_size, 2)
273 | for f in f1, f2:
274 | keys = make_key((10,), None), make_key((20,), None)
275 | for key in keys:
276 | self.assertIn(key, f._cache)
277 |
278 | f1.cache_clear()
279 | f2.cache_clear()
280 | self._check_empty_cache_after_clearing(f1)
281 | self._check_empty_cache_after_clearing(f2)
282 |
283 | def test_memoization_with_FIFO(self):
284 | self.assertTrue(hasattr(f3, '_fifo_root'))
285 | self._fifo_test(f3)
286 | f3.cache_clear()
287 | self._check_empty_cache_after_clearing(f3)
288 |
289 | def test_memoization_with_LRU(self):
290 | self.assertTrue(hasattr(f4, '_lru_root'))
291 | self._lru_test(f4)
292 | f4.cache_clear()
293 | self._check_empty_cache_after_clearing(f4)
294 |
295 | def test_memoization_with_LFU(self):
296 | self.assertTrue(hasattr(f5, '_lfu_root'))
297 | self._lfu_test(f5)
298 | self._check_lfu_cache_clearing(f5)
299 |
300 | def test_memoization_with_FIFO_multithread(self):
301 | self.assertTrue(hasattr(f6, '_fifo_root'))
302 | self._general_multithreading_test(f6, CachingAlgorithmFlag.FIFO)
303 | self._fifo_test(f6)
304 | f6.cache_clear()
305 | self._check_empty_cache_after_clearing(f6)
306 |
307 | def test_memoization_with_LRU_multithread(self):
308 | self.assertTrue(hasattr(f7, '_lru_root'))
309 | self._general_multithreading_test(f7, CachingAlgorithmFlag.LRU)
310 | self._lru_test(f7)
311 | f7.cache_clear()
312 | self._check_empty_cache_after_clearing(f7)
313 |
314 | def test_memoization_with_LFU_multithread(self):
315 | self.assertTrue(hasattr(f8, '_lfu_root'))
316 | self._general_multithreading_test(f8, CachingAlgorithmFlag.LFU)
317 | self._lfu_test(f8)
318 | self._check_lfu_cache_clearing(f8)
319 |
320 | def test_memoization_with_FIFO_TTL(self):
321 | self.assertTrue(hasattr(f9, '_fifo_root'))
322 | self._general_ttl_test(f9)
323 | f9.cache_clear()
324 | self._check_empty_cache_after_clearing(f9)
325 |
326 | def test_memoization_with_LRU_TTL(self):
327 | self.assertTrue(hasattr(f10, '_lru_root'))
328 | self._general_ttl_test(f10)
329 | f10.cache_clear()
330 | self._check_empty_cache_after_clearing(f10)
331 |
332 | def test_memoization_with_LFU_TTL(self):
333 | self.assertTrue(hasattr(f11, '_lfu_root'))
334 | self._general_ttl_test(f11)
335 | self._check_lfu_cache_clearing(f11)
336 |
337 | def test_memoization_with_FIFO_TTL_kwargs(self):
338 | self.assertTrue(hasattr(f12, '_fifo_root'))
339 | self._general_ttl_kwargs_test(f12)
340 | f12.cache_clear()
341 | self._check_empty_cache_after_clearing(f12)
342 |
343 | def test_memoization_with_LRU_TTL_kwargs(self):
344 | self.assertTrue(hasattr(f13, '_lru_root'))
345 | self._general_ttl_kwargs_test(f13)
346 | f13.cache_clear()
347 | self._check_empty_cache_after_clearing(f13)
348 |
349 | def test_memoization_with_LFU_TTL_kwargs(self):
350 | self.assertTrue(hasattr(f14, '_lfu_root'))
351 | self._general_ttl_kwargs_test(f14)
352 | self._check_lfu_cache_clearing(f14)
353 |
354 | def test_memoization_for_unhashable_arguments_with_FIFO(self):
355 | self._general_unhashable_arguments_test(f3)
356 | f3.cache_clear()
357 | self._check_empty_cache_after_clearing(f3)
358 |
359 | def test_memoization_for_unhashable_arguments_with_LRU(self):
360 | self._general_unhashable_arguments_test(f4)
361 | f4.cache_clear()
362 | self._check_empty_cache_after_clearing(f4)
363 |
364 | def test_memoization_for_unhashable_arguments_with_LFU(self):
365 | self._general_unhashable_arguments_test(f5)
366 | self._check_lfu_cache_clearing(f5)
367 |
368 | def test_memoization_statistic_only(self):
369 | f15(1)
370 | f15(2)
371 | f15(3)
372 |
373 | self.assertEqual(exec_times['f15'], 3)
374 |
375 | info = f15.cache_info()
376 | self.assertEqual(info.max_size, 0)
377 | self.assertIsNone(info.ttl)
378 | self.assertTrue(info.thread_safe)
379 | self.assertEqual(info.hits, 0)
380 | self.assertEqual(info.misses, 3)
381 | self.assertEqual(info.current_size, 0)
382 |
383 | f15.cache_clear()
384 | info = f15.cache_info()
385 | self.assertEqual(info.hits, 0)
386 | self.assertEqual(info.misses, 0)
387 | self.assertEqual(info.current_size, 0)
388 |
389 | def test_memoization_for_different_order_of_kwargs(self):
390 | f16(
391 | 1,
392 | 2,
393 | kwarg1={"some": "dict"},
394 | kwarg2=["it's", "a", "list"],
395 | kwarg3="just_string",
396 | kwarg4=4,
397 | )
398 | f16(
399 | 1,
400 | 2,
401 | kwarg2=["it's", "a", "list"],
402 | kwarg1={"some": "dict"},
403 | kwarg4=4,
404 | kwarg3="just_string",
405 | )
406 | f16(
407 | 1,
408 | 2,
409 | kwarg3="just_string",
410 | kwarg1={"some": "dict"},
411 | kwarg4=4,
412 | kwarg2=["it's", "a", "list"],
413 | )
414 |
415 | self.assertEqual(exec_times['f16'], 1)
416 |
417 | info = f16.cache_info()
418 | self.assertEqual(info.hits, 2)
419 | self.assertEqual(info.misses, 1)
420 | self.assertEqual(info.current_size, 1)
421 |
422 | def test_memoization_for_all_kinds_of_args(self):
423 | self.assertTrue(hasattr(f17, '_lru_root'))
424 | self._lru_test(f17)
425 | f17.cache_clear()
426 | self._check_empty_cache_after_clearing(f17)
427 |
428 | def test_memoization_for_custom_key_maker_function(self):
429 | self._general_custom_key_maker_for_all_kinds_of_args_test(f18, general_custom_key_maker)
430 | self._general_custom_key_maker_for_all_kinds_of_args_test(f20, general_custom_key_maker)
431 | self._general_custom_key_maker_for_all_kinds_of_args_test(f21, general_custom_key_maker)
432 | self._general_custom_key_maker_for_all_kinds_of_args_test(f22, general_custom_key_maker)
433 |
434 | def test_memoization_for_custom_key_maker_lambda(self):
435 | self._general_custom_key_maker_for_all_kinds_of_args_test(f19, general_custom_key_maker)
436 |
437 | def test_memoization_must_preserve_type_signature(self):
438 | self.assertEqual(inspect.getfullargspec(f23), inspect.getfullargspec(f24))
439 | self.assertEqual(inspect.getfullargspec(f23), inspect.getfullargspec(f25))
440 | self.assertEqual(inspect.getfullargspec(f23), inspect.getfullargspec(f26))
441 | self.assertEqual(inspect.getfullargspec(f23), inspect.getfullargspec(f27))
442 | self.assertEqual(inspect.getfullargspec(f23), inspect.getfullargspec(f28))
443 |
444 | def test_memoization_with_custom_key_maker_and_inconsistent_type_signature(self):
445 | def inconsistent_custom_key_maker(*args, **kwargs):
446 | return args[0]
447 |
448 | def should_show_warning():
449 | with warnings.catch_warnings(record=True) as caught_warnings:
450 | warnings.simplefilter('always')
451 |
452 | @cached(max_size=5, custom_key_maker=inconsistent_custom_key_maker)
453 | def f(a=1, *b, c=2, **d):
454 | return a, b, c, d
455 |
456 | self.assertEqual(len(caught_warnings), 1)
457 | self.assertEqual(caught_warnings[0].category, SyntaxWarning)
458 | self.assertTrue('signature' in str(caught_warnings[0].message))
459 |
460 | def should_not_show_warning():
461 | with warnings.catch_warnings(record=True) as caught_warnings:
462 | warnings.simplefilter('always')
463 |
464 | @cached(max_size=5, custom_key_maker=inconsistent_custom_key_maker)
465 | def f(a=1, *b, c=2, **d):
466 | return a, b, c, d
467 |
468 | self.assertEqual(len(caught_warnings), 0)
469 |
470 | should_show_warning()
471 | suppress_warnings(should_warn=False)
472 | should_not_show_warning()
473 | suppress_warnings(should_warn=True)
474 | should_show_warning()
475 |
476 | def test_memoization_for_cache_contains(self):
477 | for tested_function in (f29, f30, f31, f32):
478 | tested_function(100)
479 | self.assertTrue(tested_function.cache_contains_argument((100,)))
480 | self.assertTrue(tested_function.cache_contains_result(100))
481 | tested_function(keyword=10)
482 | self.assertTrue(tested_function.cache_contains_argument({'keyword': 10}))
483 | self.assertTrue(tested_function.cache_contains_result(1))
484 | tested_function(50, 2, 3, 4, keyword1=5, keyword2=6)
485 | self.assertTrue(tested_function.cache_contains_argument([(50, 2, 3, 4), {'keyword1': 5, 'keyword2': 6}]))
486 | self.assertTrue(tested_function.cache_contains_result(50))
487 |
488 | time.sleep(0.6) # wait until the cache expires
489 | self.assertFalse(tested_function.cache_contains_argument((100,), alive_only=True))
490 | self.assertFalse(tested_function.cache_contains_result(100, alive_only=True))
491 | self.assertTrue(tested_function.cache_contains_argument((100,), alive_only=False))
492 | self.assertTrue(tested_function.cache_contains_result(100, alive_only=False))
493 |
494 | self.assertFalse(tested_function.cache_contains_argument({'keyword': 10}, alive_only=True))
495 | self.assertFalse(tested_function.cache_contains_result(1, alive_only=True))
496 | self.assertTrue(tested_function.cache_contains_argument({'keyword': 10}, alive_only=False))
497 | self.assertTrue(tested_function.cache_contains_result(1, alive_only=False))
498 |
499 | self.assertFalse(tested_function.cache_contains_argument([(50, 2, 3, 4), {'keyword1': 5, 'keyword2': 6}], alive_only=True))
500 | self.assertFalse(tested_function.cache_contains_result(50, alive_only=True))
501 | self.assertTrue(tested_function.cache_contains_argument([(50, 2, 3, 4), {'keyword1': 5, 'keyword2': 6}], alive_only=False))
502 | self.assertTrue(tested_function.cache_contains_result(50, alive_only=False))
503 |
504 | def test_memoization_for_cache_remove_if(self):
505 | for tested_function in (f33, f34, f35, f36):
506 |
507 | def always_false(user_function_arguments, user_function_result, is_alive):
508 | return False
509 |
510 | def argument_is_42(user_function_arguments, user_function_result, is_alive):
511 | return user_function_arguments == ((42,), {})
512 |
513 | def result_contain_42(user_function_arguments, user_function_result, is_alive):
514 | for item in user_function_result:
515 | if item == 42:
516 | return True
517 | return False
518 |
519 | def is_dead(user_function_arguments, user_function_result, is_alive):
520 | return not is_alive
521 |
522 | tested_function(1)
523 | tested_function(42)
524 | tested_function(c=42)
525 |
526 | tested_function.cache_remove_if(always_false)
527 | self.assertTrue(tested_function.cache_contains_argument((1,)))
528 | self.assertTrue(tested_function.cache_contains_argument((42,)))
529 | self.assertTrue(tested_function.cache_contains_argument({'c': 42}))
530 |
531 | tested_function.cache_remove_if(argument_is_42)
532 | self.assertTrue(tested_function.cache_contains_argument((1,)))
533 | self.assertFalse(tested_function.cache_contains_argument((42,)))
534 | self.assertTrue(tested_function.cache_contains_argument({'c': 42}))
535 |
536 | tested_function(1)
537 | tested_function(42)
538 | tested_function(c=42)
539 |
540 | tested_function.cache_remove_if(result_contain_42)
541 | self.assertTrue(tested_function.cache_contains_argument((1,)))
542 | self.assertFalse(tested_function.cache_contains_argument((42,)))
543 | self.assertFalse(tested_function.cache_contains_argument({'c': 42}))
544 |
545 | time.sleep(0.6) # wait until cache expires
546 |
547 | self.assertTrue(tested_function.cache_contains_argument((1,), alive_only=False))
548 | self.assertFalse(tested_function.cache_contains_argument((1,), alive_only=True))
549 |
550 | tested_function(2)
551 | tested_function.cache_remove_if(is_dead)
552 | self.assertFalse(tested_function.cache_contains_argument((1,), alive_only=False))
553 | self.assertFalse(tested_function.cache_contains_argument((42,), alive_only=False))
554 | self.assertFalse(tested_function.cache_contains_argument({'c': 42}, alive_only=False))
555 | self.assertFalse(tested_function.cache_contains_argument((1,), alive_only=True))
556 | self.assertFalse(tested_function.cache_contains_argument((42,), alive_only=True))
557 | self.assertFalse(tested_function.cache_contains_argument({'c': 42}, alive_only=True))
558 | self.assertTrue(tested_function.cache_contains_argument((2,), alive_only=True))
559 |
560 | def _general_test(self, tested_function, algorithm, hits, misses, in_cache, not_in_cache):
561 | # clear
562 | exec_times[tested_function.__name__] = 0
563 | tested_function.cache_clear()
564 | self.assertTrue(tested_function.cache_is_empty())
565 | self.assertFalse(tested_function.cache_is_full())
566 |
567 | for i in range(20):
568 | tested_function(i)
569 | tested_function(99)
570 |
571 | self.assertTrue(tested_function.cache_is_full())
572 | self.assertFalse(tested_function.cache_is_empty())
573 |
574 | self.assertEqual(exec_times[tested_function.__name__], 21)
575 | info = tested_function.cache_info()
576 | self.assertEqual(info.max_size, 5)
577 | self.assertEqual(info.algorithm, algorithm)
578 | self.assertIsNone(info.ttl)
579 | self.assertIsNotNone(info.thread_safe)
580 |
581 | self.assertEqual(info.hits, 0)
582 | self.assertEqual(info.misses, 21)
583 | self.assertEqual(info.current_size, 5)
584 |
585 | results = (99, 19, 18, 17, 16)
586 | arguments = [(x,) for x in results]
587 | for argument in arguments:
588 | self.assertTrue(tested_function.cache_contains_argument(argument))
589 | keys = [make_key(x, None) for x in arguments]
590 | for key in keys:
591 | self.assertIn(key, tested_function._cache)
592 | for result in results:
593 | self.assertTrue(tested_function.cache_contains_result(result))
594 |
595 | # 10 consecutive calls here
596 | tested_function(16)
597 | tested_function(17)
598 | tested_function(18)
599 | tested_function(16)
600 | tested_function(17)
601 | tested_function(18)
602 |
603 | tested_function(19)
604 | tested_function(15)
605 | tested_function(100)
606 | tested_function(16)
607 |
608 | info = tested_function.cache_info()
609 | self.assertEqual(info.hits, hits)
610 | self.assertEqual(info.misses, misses)
611 | self.assertEqual(info.current_size, 5)
612 |
613 | keys = [make_key((x,), None) for x in in_cache]
614 | for key in keys:
615 | self.assertIn(key, tested_function._cache)
616 | keys = [make_key((x,), None) for x in chain(not_in_cache, range(0, 15))]
617 | for key in keys:
618 | self.assertNotIn(key, tested_function._cache)
619 |
620 | def _general_multithreading_test(self, tested_function, algorithm):
621 | number_of_keys = 30000
622 | number_of_threads = 4
623 |
624 | # clear
625 | exec_times[tested_function.__name__] = 0
626 | tested_function.cache_clear()
627 |
628 | info = tested_function.cache_info()
629 | self.assertEqual(info.max_size, 5)
630 | self.assertEqual(info.algorithm, algorithm)
631 | self.assertIsNone(info.ttl)
632 | self.assertTrue(info.thread_safe)
633 | self.assertEqual(info.current_size, 0)
634 |
635 | # Test must-hit
636 | def run_must_hit():
637 | keys = list(range(5)) * int(number_of_keys / 5)
638 | random.shuffle(keys)
639 | for i in keys:
640 | tested_function(i)
641 |
642 | threads = [Thread(target=run_must_hit) for _ in range(number_of_threads)]
643 | for thread in threads:
644 | thread.start()
645 | for thread in threads:
646 | thread.join()
647 |
648 | self.assertGreaterEqual(exec_times[tested_function.__name__], 5)
649 | info = tested_function.cache_info()
650 | self.assertLessEqual(info.hits, number_of_keys * number_of_threads - 5)
651 | self.assertGreaterEqual(info.misses, 5)
652 | self.assertEqual(info.current_size, 5)
653 |
654 | for key in [make_key((x,), None) for x in range(5)]:
655 | self.assertIn(key, tested_function._cache)
656 |
657 | # Test can-miss
658 | def run_can_miss():
659 | keys = list(range(20)) * int(number_of_keys / 20)
660 | random.shuffle(keys)
661 | for i in keys:
662 | tested_function(i)
663 |
664 | threads = [Thread(target=run_can_miss) for _ in range(number_of_threads)]
665 | for thread in threads:
666 | thread.start()
667 | for thread in threads:
668 | thread.join()
669 |
670 | executed_times = exec_times[tested_function.__name__]
671 | self.assertLessEqual(executed_times, number_of_keys * number_of_threads)
672 | self.assertGreaterEqual(executed_times, 20)
673 | info = tested_function.cache_info()
674 | self.assertGreaterEqual(info.hits, 0)
675 | self.assertLessEqual(info.misses, number_of_keys * number_of_threads)
676 | self.assertEqual(info.current_size, 5)
677 |
678 | def _fifo_test(self, tested_function):
679 | self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.FIFO, hits=7, misses=24,
680 | in_cache=(16, 100, 15, 99, 19), not_in_cache=(18, 17))
681 | self.assertEqual(exec_times[tested_function.__name__], 24)
682 | self._cache_for_each_test(tested_function, [((16,), {}), ((100,), {}), ((15,), {}), ((99,), {}), ((19,), {})])
683 |
684 | def _lru_test(self, tested_function):
685 | self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.LRU, hits=7, misses=24,
686 | in_cache=(16, 100, 15, 19, 18), not_in_cache=(99, 17))
687 | self.assertEqual(exec_times[tested_function.__name__], 24)
688 | self._cache_for_each_test(tested_function, [((16,), {}), ((100,), {}), ((15,), {}), ((19,), {}), ((18,), {})])
689 |
690 | def _lfu_test(self, tested_function):
691 | self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.LFU, hits=8, misses=23,
692 | in_cache=(18, 17, 16, 19, 100), not_in_cache=(99, 15))
693 | self.assertEqual(exec_times[tested_function.__name__], 23)
694 | self._cache_for_each_test(tested_function, [((16,), {}), ((18,), {}), ((17,), {}), ((19,), {}), ((100,), {})])
695 |
696 | def _cache_for_each_test(self, tested_function, expected_argument_list):
697 | cache_collected = []
698 |
699 | def collect(arguments, value, is_alive):
700 | cache_collected.append((arguments, value, is_alive))
701 |
702 | tested_function.cache_for_each(collect)
703 | for item in cache_collected:
704 | self.assertEqual(item[2], True)
705 | self.assertEqual([item[0] for item in cache_collected], [argument for argument in expected_argument_list])
706 |
707 | actual_argument_list = []
708 | for argument in tested_function.cache_arguments():
709 | actual_argument_list.append(argument)
710 | self.assertEqual(list(tested_function.cache_arguments()), expected_argument_list)
711 | self.assertEqual(actual_argument_list, expected_argument_list)
712 |
713 | expected_result_list = [argument[0][0] for argument in expected_argument_list]
714 | actual_result_list = []
715 | for result in tested_function.cache_results():
716 | actual_result_list.append(result)
717 | self.assertEqual(list(tested_function.cache_results()), expected_result_list)
718 | self.assertEqual(actual_result_list, expected_result_list)
719 |
720 | expected_cache_item_list = [(argument, argument[0][0]) for argument in expected_argument_list]
721 | actual_cache_items_list = []
722 | for argument, result in tested_function.cache_items():
723 | actual_cache_items_list.append((argument, result))
724 | self.assertEqual(list(tested_function.cache_items()), expected_cache_item_list)
725 | self.assertEqual(actual_cache_items_list, expected_cache_item_list)
726 |
727 | def _check_empty_cache_after_clearing(self, tested_function):
728 | self.assertTrue(tested_function.cache_is_empty())
729 | self.assertFalse(tested_function.cache_is_full())
730 |
731 | info = tested_function.cache_info()
732 | self.assertEqual(info.hits, 0)
733 | self.assertEqual(info.misses, 0)
734 | self.assertEqual(info.current_size, 0)
735 |
736 | cache = tested_function._cache
737 | self.assertEqual(len(cache), 0)
738 |
739 | def _check_lfu_cache_clearing(self, tested_function):
740 | root_next = weakref.ref(tested_function._lfu_root.next)
741 | first_cache_head = weakref.ref(tested_function._lfu_root.next.cache_head)
742 | self.assertIsNotNone(root_next())
743 | self.assertIsNotNone(first_cache_head())
744 |
745 | tested_function.cache_clear()
746 | self._check_empty_cache_after_clearing(tested_function)
747 |
748 | gc.collect()
749 | self.assertIsNone(root_next())
750 | self.assertIsNone(first_cache_head())
751 |
752 | def _general_ttl_test(self, tested_function, arg=1, kwargs=None):
753 | # clear
754 | exec_times[tested_function.__name__] = 0
755 | tested_function.cache_clear()
756 |
757 | def call_tested_function(arg, kwargs):
758 | if kwargs is None:
759 | tested_function(arg)
760 | else:
761 | tested_function(arg, **kwargs)
762 |
763 | key = make_key((arg,), kwargs)
764 | call_tested_function(arg, kwargs)
765 |
766 | info = tested_function.cache_info()
767 | self.assertEqual(info.hits, 0)
768 | self.assertEqual(info.misses, 1)
769 | self.assertEqual(info.current_size, 1)
770 | self.assertIn(key, tested_function._cache)
771 |
772 | call_tested_function(arg, kwargs) # this WILL NOT call the tested function
773 |
774 | info = tested_function.cache_info()
775 | self.assertEqual(info.hits, 1)
776 | self.assertEqual(info.misses, 1)
777 | self.assertEqual(info.current_size, 1)
778 | self.assertIn(key, tested_function._cache)
779 | self.assertEqual(exec_times[tested_function.__name__], 1)
780 |
781 | time.sleep(0.6) # wait until the cache expires
782 |
783 | info = tested_function.cache_info()
784 | self.assertEqual(info.current_size, 1)
785 |
786 | call_tested_function(arg, kwargs) # this WILL call the tested function
787 |
788 | info = tested_function.cache_info()
789 | self.assertEqual(info.hits, 1)
790 | self.assertEqual(info.misses, 2)
791 | self.assertEqual(info.current_size, 1)
792 | self.assertIn(key, tested_function._cache)
793 | self.assertEqual(exec_times[tested_function.__name__], 2)
794 |
795 | # The previous call should have been cached, so it must not call the function again
796 | call_tested_function(arg, kwargs) # this SHOULD NOT call the tested function
797 |
798 | info = tested_function.cache_info()
799 | self.assertEqual(info.hits, 2)
800 | self.assertEqual(info.misses, 2)
801 | self.assertEqual(info.current_size, 1)
802 | self.assertIn(key, tested_function._cache)
803 | self.assertEqual(exec_times[tested_function.__name__], 2)
804 |
805 | def _general_ttl_kwargs_test(self, tested_function):
806 | self._general_ttl_test(tested_function, arg=1, kwargs={"test": {"kwargs": [1, 0.5]}, "complex": True})
807 |
808 | def _general_unhashable_arguments_test(self, tested_function):
809 | args = ([1, 2, 3], {'this': 'is unhashable'}, ['yet', ['another', ['complex', {'type, ': 'isn\'t it?'}]]])
810 | for arg in args:
811 | # clear
812 | exec_times[tested_function.__name__] = 0
813 | tested_function.cache_clear()
814 |
815 | key = make_key((arg,), None)
816 | tested_function(arg)
817 | self.assertIn(key, tested_function._cache)
818 |
819 | if isinstance(arg, list):
820 | arg.append(0)
821 | elif isinstance(arg, dict):
822 | arg['foo'] = 'bar'
823 | else:
824 | raise TypeError
825 | key = make_key((arg,), None)
826 | tested_function(arg)
827 | self.assertIn(key, tested_function._cache)
828 |
829 | if isinstance(arg, list):
830 | arg.pop()
831 | elif isinstance(arg, dict):
832 | del arg['foo']
833 | else:
834 | raise TypeError
835 | key = make_key((arg,), None)
836 | tested_function(arg)
837 | self.assertIn(key, tested_function._cache)
838 |
839 | self.assertEqual(exec_times[tested_function.__name__], 2)
840 | info = tested_function.cache_info()
841 | self.assertEqual(info.hits, 1)
842 | self.assertEqual(info.misses, 2)
843 | self.assertEqual(info.current_size, 2)
844 |
845 | def _general_custom_key_maker_for_all_kinds_of_args_test(self, tested_function, custom_key_maker):
846 | # clear
847 | exec_times[tested_function.__name__] = 0
848 | tested_function.cache_clear()
849 |
850 | for _ in range(3):
851 | tested_function(2, 3, 4, 5, 6, c=7, test=True, how_many_args=8)
852 | tested_function(10, 3, 4, 5, 6, c=7, test=True, how_many_args=8)
853 | tested_function(a=50)
854 |
855 | self.assertEqual(exec_times[tested_function.__name__], 3)
856 | info = tested_function.cache_info()
857 | self.assertEqual(info.max_size, 5)
858 | self.assertIsNotNone(info.algorithm)
859 | self.assertIsNone(info.ttl)
860 | self.assertIsNotNone(info.thread_safe)
861 | self.assertTrue(info.use_custom_key)
862 |
863 | self.assertEqual(info.hits, 2)
864 | self.assertEqual(info.misses, 3)
865 | self.assertEqual(info.current_size, 3)
866 |
867 | keys = [custom_key_maker(2, 3, 4, 5, 6, c=7, test=True, how_many_args=8),
868 | custom_key_maker(10, 3, 4, 5, 6, c=7, test=True, how_many_args=8),
869 | custom_key_maker(a=50)]
870 | for key in keys:
871 | self.assertIn(key, tested_function._cache)
872 |
873 |
874 | if __name__ == '__main__':
875 | unittest.main()
876 |
--------------------------------------------------------------------------------