├── .editorconfig
├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── CHANGELOG
├── LICENSE
├── MANIFEST.in
├── README.rst
├── bench.py
├── cacheops
├── __init__.py
├── apps.py
├── conf.py
├── getset.py
├── invalidation.py
├── jinja2.py
├── lua
│ ├── cache_thing.lua
│ ├── cache_thing_insideout.lua
│ ├── invalidate.lua
│ └── invalidate_insideout.lua
├── management
│ ├── __init__.py
│ └── commands
│ │ ├── __init__.py
│ │ ├── cleanfilecache.py
│ │ ├── invalidate.py
│ │ └── reapconjs.py
├── query.py
├── reaper.py
├── redis.py
├── serializers.py
├── sharding.py
├── signals.py
├── simple.py
├── templatetags
│ ├── __init__.py
│ └── cacheops.py
├── transaction.py
├── tree.py
└── utils.py
├── manage.py
├── publish.sh
├── pytest.ini
├── requirements-test.txt
├── setup.cfg
├── setup.py
├── tests
├── __init__.py
├── bench.py
├── fixtures
│ └── basic.json
├── models.py
├── settings.py
├── test_extras.py
├── test_low_level.py
├── tests.py
├── tests_sharding.py
├── tests_transactions.py
├── urls.py
└── utils.py
└── tox.ini
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*]
4 | indent_style = space
5 | indent_size = 4
6 | charset = utf-8
7 | trim_trailing_whitespace = true
8 | insert_final_newline = true
9 |
10 | [Makefile]
11 | indent_style = tab
12 |
13 | [*.{yml,yaml}]
14 | indent_size = 2
15 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: "CI"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-22.04
12 | steps:
13 | - uses: actions/checkout@v3
14 | - name: Set up Python 3.13
15 | uses: actions/setup-python@v4
16 | with:
17 | python-version: "3.13"
18 | - name: "Install Dependencies"
19 | run: pip install flake8
20 | - name: Lint
21 | run: flake8
22 |
23 | test:
24 | runs-on: ubuntu-22.04
25 | continue-on-error: ${{ matrix.experimental }}
26 | strategy:
27 | fail-fast: false
28 | matrix:
29 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "pypy3.10"]
30 | experimental: [false]
31 | include:
32 | - python-version: "3.13"
33 | experimental: true
34 |
35 | services:
36 | postgres:
37 | image: postgres
38 | env:
39 | POSTGRES_PASSWORD: cacheops
40 | POSTGRES_USER: cacheops
41 | POSTGRES_HOST_AUTH_METHOD: trust
42 | ports:
43 | - 5432:5432
44 | redis:
45 | image: redis
46 | ports:
47 | - 6379:6379
48 | mysql:
49 | image: mysql
50 | env:
51 | MYSQL_ROOT_PASSWORD: cacheops
52 | MYSQL_DATABASE: cacheops
53 | ports:
54 | - 3306:3306
55 | options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=10
56 |
57 | name: ${{ matrix.experimental && 'Django main [ok to fail]' || format('Python {0}', matrix.python-version) }}
58 | steps:
59 | - uses: actions/checkout@v3
60 | - name: Setup python
61 | uses: actions/setup-python@v4
62 | with:
63 | python-version: ${{ matrix.python-version }}
64 | architecture: x64
65 |
66 | - name: "Install Dependencies"
67 | run: pip install tox tox-gh-actions
68 |
69 | - name: "Run tests"
70 | run: tox ${{ matrix.experimental && '-e py313-djmain' || '' }}
71 | env:
72 | MYSQL_HOST: 127.0.0.1
73 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | *.pyc
3 | dist
4 | *.egg-info
5 | build
6 | sqlite*.db
7 | .tox
8 | .cache
9 |
--------------------------------------------------------------------------------
/CHANGELOG:
--------------------------------------------------------------------------------
1 | 7.2
2 | - support and test against Python 3.13 and Django 5.1/5.2 (thx to Stu Tomlinson)
3 | - updated md5 to set usedforsecurity for FIPS compliance (Logan Bibby)
4 | - skip ArrayField in exact conds
5 |
6 | 7.1
7 | - support and test against Python 3.12 and Django 5.0
8 | - prevent dups in conjs and disjs by using proper data-structures (thx Sergey Prokopiev for a test)
9 | - unpin funcy major version
10 | - fixed conj keys TTL in Redis 7.x
11 | - updated and cleaned up tests
12 |
13 | 7.0.2
14 | - fixed .aggregate()
15 | - fixed big memory usage during migrations
16 | - fixed INSIDEOUT in older redises
17 | - better handle model families with abstracts in them
18 | - allow funcy 2.0+
19 |
20 | 7.0.1
21 | - made it work with Redis 6.x and older again
22 | - handle abstract models better
23 | - some docs improvements
24 |
25 | 7.0
26 | - support Django 4.2b and 5.0a
27 | - added a new insideout mode
28 | - made join invalidation more granular
29 | Backwards incompatible changes:
30 | - dropped Python 3.5, 3.6 and Django 2.1, 2.2, 3.0 and 3.1 support
31 | - removed CACHEOPS_LRU
32 | - removed CacheopsRedis, should inherit redis.Redis instead
33 |
34 | 6.2
35 | - support Python 3.11 and Django 4.1
36 | - added command to clear stale cacheops keys (Bruno Alla)
37 | - fixed `invalidate_m2o` for polymorphic models (#430) (Andrey Alekseev)
38 | - updated README: TOC, link to the post, some explanations
39 |
40 | 6.1
41 | - support Django 3.2 and 4.0 (thx to Olivér Kecskeméty)
42 | - do not gulp commit errors (Oleg Yamnikov)
43 | - fixed precall key when a prefix is defined (Peter Baumgartner)
44 | - fixed m2o/m2m queries invalidation on object deletion (thx to Sergey Tikhonov)
45 |
46 | 6.0
47 | - support and test against Python 3.9 and Django 3.1/3.2
48 | - added custom serializers support (thx to Arcady Usov)
49 | - support callable extra in @cached_as() and friends
50 | - made simple cache obey prefix
51 | - skip JSONFields for purposes of invalidation
52 | - configure skipped fields by internal types, classes still supported
53 | - handle `DatabaseError` on transaction cleanup (Roman Gorbil)
54 | - do not query old object if cacheops is disabled
55 | - do not fetch deferred fields during invalidation, fixes #387
56 | Backwards incompatible changes:
57 | - callable `extra` param, including type, now behaves differently
58 | - simple cache now uses prefix
59 |
60 | 5.1
61 | - support subqueries in annotations (Jeremy Stretch)
62 | - included tests into distro (John Vandenberg)
63 | - fixed .invalidated_update(), if updated QuerySet had related fields selected (M1hacka)
64 | - fixed possible deadlock in .invalidated_update() (M1hacka)
65 | - fixed filtering with expressions
66 | - fixed queries filtering in Exists (thx to Axel Wegener)
67 | - updated min redis-py to 3.0.0
68 |
69 | 5.0.1
70 | - fixed reverse o2o invalidation (thx to John Anderson)
71 | - fixed unstable cache key when field validors are used
72 | - guard against non-integer timeouts
73 |
74 | 5.0
75 | - support Python 3.8 and Django 3.0 (thx to Misha Kalyna)
76 | - improve model fields stamping (Alex Lokhman)
77 | - disabled postponed invalidation when no_invalidation is applied (Vladimir)
78 | - fixed custom manager derived from BaseManager (Eric Plaster)
79 | Backwards incompatible changes:
80 | - dropped Python 2.7 and Djangos before 2.1
81 | - Redis 4.0+ required
82 |
83 | 4.2
84 | - support Django 2.1 and 2.2
85 | - added keep_fresh option to @cached_as (George Lee)
86 | - pass CACHEOPS_SENTINEL options through (David Fuentes Baldomir)
87 | - made SKIP_FIELDS and LONG_DISJUCTION configurable (Nic Wolff)
88 | - fixed .aggregate() over .annotate() fields in Django 2.2
89 | - fixed .bulk_create() in Django 2.2 (Grzegorz Szczepanczyk)
90 | - fixed test database to work in environments without /dev/shm (George Lee)
91 | - fixed proxy/abstract model bug
92 | - added test case for issue #312 (Andy Tzeng)
93 | - some warnings and doc-strings improvements
94 |
95 | 4.1
96 | - use UNLINK instead of DEL when available, use Redis 4.0+ for that
97 | - request m2m objects for invalidation using correct db
98 | - fixed caching counts, aggregates and exists in writes,
99 | dirty transactions and while cacheops is disabled
100 | - fixed various multidb/invalidation issues (Svan70)
101 | - fixed db in .invalidated_update()
102 | - fixed possible key disrepancy in cache_on_save with multidb
103 | - fixed jinja2 support in Python 3
104 | - documented CACHEOPS_CLIENT_CLASS
105 |
106 | 4.0.7
107 | - fixed RawSQL() and Subquery() (thx to thakryptex)
108 | - made the Redis client class configurable (Nic Wolff)
109 | - package is now also distributed as a universal wheel (Jon Dufresne)
110 |
111 | 4.0.6
112 | - fixed m2m invalidation issue with certain configs
113 | - fixed catastrophic backtracking in template extensions
114 |
115 | 4.0.5
116 | - fixed db selection in invalidation fetch and .invalidated_update() when router fails (M1ha Shvn)
117 | - fixed unlickely "_clone() unexpected keyword '_cacheprofile'" error
118 | - fixed LookupError bug
119 | - fixed .meta.concrete_model not set bug
120 | - fixed docs on template tags
121 |
122 | 4.0.4
123 | - fixed caching while app registry not ready
124 | - fixed random ordered dicts producing varying SQL for same query
125 |
126 | 4.0.3
127 | - configure via Sentinel (Tapo4ek)
128 |
129 | 4.0.2
130 | - fixed caching django migrations
131 |
132 | 4.0.1
133 | - do not prevent fast path deletes on not cached models
134 | - minor optimization
135 |
136 | 4.0
137 | - added cache key prefix function
138 | - added .aggregate() caching
139 | - support QuerySet.union() and friends
140 | - added cache_invalidated signal (thx to Kimmo Kiiski)
141 | - cache .first() and .last() on 'get' op
142 | - correctly skip or show nice error message on non-django models/managers
143 | - allow cleaning file cache in non default place
144 | - cache keys no longer change on fields reorder
145 | - fixed .invalidated_update() on updated object versions
146 | - fixed template tags in Django 2.0
147 | - fixed deprecation warnings in Python 3
148 | - use module global CACHEOPS_DEBUG to cache evolving code
149 | - minor optimizations
150 | Backwards incompatible changes:
151 | - dropped Django 1.7 support
152 | - dropped write_only flag
153 | - dropped implicit write_only in .get_or_create(), .select_for_update() and friends
154 | - .iterator() is never cached now
155 | - invalidate_all() works immediately even in transaction
156 | - @cached_as(timeout=0) no longer means timeout is ignored/derived from querysets,
157 | use timeout=None instead.
158 |
159 | 3.2.1
160 | - fixed CACHEOPS_DEGRADE_ON_FAILURE=True
161 |
162 | 3.2
163 | - support Django 1.11
164 | - support Python 3.6
165 | - preliminary support for Django 2.0
166 | - support multidb nested transactions
167 | - fixed pk with default (leonardo orozco)
168 |
169 | 3.1.3
170 | - better dirty sql heuristic
171 | - fixed on commit sequence issue (thx to Irae Hueck Costa)
172 | - fixed dup field__eq queries (Michał Ochman)
173 |
174 | 3.1.2
175 | - fixed querysets with custom .iterator() (like django-polymorphic)
176 | - support no argument @cached_view
177 | - check that sample is passed into @cached_as()
178 |
179 | 3.1.1
180 | - fixed unexpected dirty transaction
181 | - fixed a bug with destroying manager
182 | - fixed CACHEOPS setting and upper-case app labels
183 |
184 | 3.1
185 | - added locking to combat dog-pile effect
186 | - handle transactions smarter (ihucos)
187 | - handle functions in @cached_as() and @cached() args
188 | - do not allow unknown kwargs in @cached_as()
189 |
190 | 3.0.1
191 | - support Django 1.10 (worked before, but now it's official)
192 | - accept empty CACHEOPS_REDIS setting
193 | - fixed ImportError in cleanfilecache command (Roman)
194 |
195 | 3.0
196 | - support PyPy
197 | - support Python 3.5
198 | - support Django 1.9
199 | - added transparent transaction handling (Joel Hillacre)
200 | - added .invalidated_update()
201 | - added cache_read signal (Joona Pääkkönen)
202 | - added CACHEOPS_ENABLED setting
203 | - invalidate on intermediate tables in long joins
204 | - support settings override
205 | - made CACHEOPS keys case insensitive
206 | - allow redis connection settings to be specified by a URL (Tim Savage)
207 | - fixed router support
208 | - fixed clone cache settings affecting original queryset
209 | - more fixes for non-ascii in str params
210 | - calc func cache keys smarter
211 | - no_invalidation optimizations
212 | Backwards incompatible changes:
213 | - Django 1.6 and earlier no longer supported
214 | - dropped old CACHEOPS setting format
215 | - removed CACHEOPS_FAKE setting
216 | - removed .cached_call() from basic and file caches
217 | - disabled cacheops for fake migration models
218 |
219 | 2.4.5
220 | - backport: disabled cacheops for fake migration models
221 | - disabled cacheops for south models
222 | - fixed get_queryset() on custom qs rename bug
223 |
224 | 2.4.3
225 | - fixed .get() on reverse fk related manager
226 | - fixed memory leak on migrations
227 |
228 | 2.4.2
229 | - fixed .values() and .values_list() in Django 1.9
230 | - stopped sharing cache between proxies and base
231 |
232 | 2.4.1
233 | - export FileCache and RedisCache from cacheops
234 | - allow non-ascii in str params
235 | - fixed subqueries with different db
236 |
237 | 2.4
238 | - added @decorator_tag to easily create cached template tags
239 | - create redis client lazily
240 | - let invalidate @cached_view()
241 | - support template responses used by generic CBV
242 | - support argumentless no parentheses form for @cached and @cached_view
243 | - removed unneeded invalidation calls in .bulk_create()
244 | - allow decorating built-in and external functions
245 | - added .cached_call() to simple and file cache
246 | - added key_func argument to @cached_as()
247 | - check that timeout is specified in CACHEOPS setting
248 | - fixed m2m invalidation on reverse changes
249 | - fixed passing kwargs in @cached_as()
250 | - fixed @cached with no parentheses
251 | - fixed .bulk_create() API in Django 1.4
252 |
253 | 2.3.2
254 | - made cacheops invalidate before other post_* signals (Emil Stenström)
255 | - fixed invalidation on proxy/base model changes
256 | - added no_invalidation to fake cacheops
257 | - test against MySQL
258 |
259 | 2.3.1
260 | - updated support for Django 1.8 (Andriy Sokolovskiy)
261 | - fixed bulk_create() to return objects instead of None (Ilya Baryshev)
262 |
263 | 2.3
264 | - Django 1.8 support and preliminary Django 1.9 support
265 | - made 'ops' config option to accept single string
266 | - added invalidate_fragment()
267 | - added a way to get/set/delete function cache easily
268 | - added redis.TimeoutError to degradation handling (George Kappel)
269 | - fixed invalidation on QuerySet.bulk_create(),
270 | worked only from Manager previously
271 | - fixed .bulk_create() API to comply with Django
272 | - minor optimizations
273 |
274 | 2.2.1
275 | - fixed thread local error
276 | - fixed ops = 'all' both in config and .cache()
277 |
278 | 2.2
279 | - switched to new CACHEOPS setting style
280 | - added CACHEOPS_DEFAULTS setting
281 | - work as LRU cache
282 | - cache .exists() calls in Django 1.6+
283 | - invalidate on .bulk_create()
284 | - added no_invalidation context manager/decorator
285 | - documented local_get and cache_on_save
286 | - fixed saving objects with F-expression fields (Fedor Nasyrov)
287 | - fixed queries with Django 1.7 transforms
288 | - fixed binary fields in python 3
289 | - stopped using simplejson
290 | - simpler queryset key calculation,
291 | third party fields should be more compatible now
292 | - removed bogus clone param from .nocache() call
293 |
294 | 2.1.1
295 | - fixed bug in m2m invalidation
296 | - fixed bug with null geometry fields
297 | - fixed unpickling objects with BinaryFields
298 |
299 | 2.1
300 | - support Django 1.7
301 | - do not fetch from cache when doing .select_for_update() and similar,
302 | but do write to cache if it's enabled
303 | - fixed inherited models exception in admin,
304 | multi-table models are still not really supported!
305 | - fixed fake cacheops
306 | - fixed deprecation warning in Django 1.6 admin
307 |
308 | 2.0
309 | - conditions on related models will now invalidate queryset
310 | - m2m invalidation is much more granular
311 | - removed requirement that timeout should not be greater than default
312 | - lua scripting is used to save and invalidate cache, making things faster
313 | - better invalidation for complex and custom fields
314 | - silent stringify of unknown objects by default
315 | - support caching django.contrib.gis queries (koodjo)
316 | - cacheops is now thread-safe
317 | - added a way to no-op cacheops
318 | - added @cached_view() and @cached_view_as()
319 | - pass several samples for @cached_as() and @cached_view_as()
320 | - fixed working with querysets created by non-patched constructor (#3 and dups)
321 | - fixed invalidate_model() for proxy models
322 | - fixed deepcopy(queryset) bug
323 | - fixed possible collisions when cached functions passed complex structured arguments
324 | - fixed StringifyError on timedelta (mpyatishev)
325 | Backwards incompatible changes:
326 | - filters on TextFields no longer affect invalidation
327 | - @cached_as() cache key will now depend on function arguments
328 | - @cached_as() and @cached() will now depend on function line in a code,
329 | permitting usage of lambdas and same named methods without passing extra
330 | - @cached_as() and @cached() will now take timeout as first argument and extra as second.
331 | Anyway using them as keyword arguments is recommended
332 | - Django 1.2 no longer supported
333 | - Redis 2.6+ is required
334 |
335 | 1.3.1
336 | - fixed bug with negating "some"-conditions
337 | - fixed bug with schemes unsync when invalidating model
338 | Backwards incompatible changes:
339 | - reverted .cache(write_only=...) behaviour to enable caching for all ops
340 | - .cache(timeout=...) call will enable caching for all ops
341 |
342 | 1.3.0
343 | - support generic relations (erthalion)
344 | - support filtering by time equality
345 | - optimizations for python 3
346 | Backwards incompatible changes:
347 | - .cache(write_only=...) doesn't enable caching for all ops anymore (not really intended)
348 |
349 | 1.2.1
350 | - set six minimum version right (crazyzubr)
351 |
352 | 1.2
353 | - Python 3 support
354 |
355 | 1.1.1
356 | - fixed Django 1.5- compatibility (aykutozat)
357 |
358 | 1.1
359 | - Django 1.6+ support
360 | - added Django template tags
361 | - fixed caching querysets combined with | and & operators
362 |
363 | 1.0.3
364 | - added db_agnostic option to cache profile
365 | - partial support for Django 1.6+
366 |
367 | 1.0.2
368 | - fixed cached_on_save
369 | - fixed .inplace() altering cache key
370 |
371 | 1.0.1
372 | - .delete() method for simple cache
373 | - .invalidate() method for @cached() and file_cache.cached() functions
374 |
375 | 1.0.0
376 | - defend against model changes corrupting cache (ttyS15)
377 | - support F-expressions (Yuego)
378 | - fixed local_get with unhashable arg TypeError
379 | - fixed caching of raw queries (Yuego)
380 |
381 | 0.9.9
382 | - fixed file cache md5 reference
383 |
384 | 0.9.8
385 | - support isnull lookup for better invalidation
386 | - fixed 'Query' has no len()
387 | - dumped django.utils.hashcompat in favor of hashlib
388 |
389 | 0.9.7
390 | - support for flex models
391 | - support @cached_as(SomeModel)
392 | - file cache default dir changed to /tmp/cacheops_file_cache
393 | - better support for tuples in extra param in @cached and jinja2 tags
394 |
395 | 0.9.6
396 | - support gracefull degradation on redis fail (Beres Botond)
397 |
398 | 0.9.5
399 | - support for proxy models
400 |
401 | 0.9.4
402 | - fixed occasional redis 100% cpu use (tumb1er)
403 |
404 | 0.9.3
405 | - invalidate and cleanfilecache commands added to dist
406 | - django 1.5 compatability (jhpinson)
407 |
408 | 0.9.2
409 | - compatability with latest redis-py
410 | - many other bug fixes
411 | - minor optimizations
412 | - better docs, including PERFORMANCE section
413 |
414 |
415 | ... lost in ancient history ...
416 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2011-2020, Alexander Schepanovski.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without modification,
5 | are permitted provided that the following conditions are met:
6 |
7 | 1. Redistributions of source code must retain the above copyright notice,
8 | this list of conditions and the following disclaimer.
9 |
10 | 2. Redistributions in binary form must reproduce the above copyright
11 | notice, this list of conditions and the following disclaimer in the
12 | documentation and/or other materials provided with the distribution.
13 |
14 | 3. Neither the name of cacheops nor the names of its contributors may
15 | be used to endorse or promote products derived from this software
16 | without specific prior written permission.
17 |
18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
22 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.rst
3 | include CHANGELOG
4 | include cacheops/lua/*
5 | include manage.py bench.py
6 | include requirements-test.txt
7 | include tox.ini
8 | recursive-include tests *.json
9 | recursive-include tests *.py
10 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Cacheops |Build Status|
2 | ========
3 |
4 | A slick app that supports automatic or manual queryset caching and `automatic
5 | granular event-driven invalidation `_.
6 |
7 | It uses `redis `_ as backend for ORM cache and redis or
8 | filesystem for simple time-invalidated one.
9 |
10 | And there is more to it:
11 |
12 | - decorators to cache any user function or view as a queryset or by time
13 | - extensions for django and jinja2 templates
14 | - transparent transaction support
15 | - dog-pile prevention mechanism
16 | - a couple of hacks to make django faster
17 |
18 | .. contents:: Contents
19 | :local:
20 | :backlinks: top
21 |
22 | Requirements
23 | ++++++++++++
24 |
25 | Python 3.8+, Django 3.2+ and Redis 4.0+.
26 |
27 |
28 | Installation
29 | ++++++++++++
30 |
31 | Using pip:
32 |
33 | .. code:: bash
34 |
35 | $ pip install django-cacheops
36 |
37 | # Or from github directly
38 | $ pip install git+https://github.com/Suor/django-cacheops.git@master
39 |
40 |
41 | Setup
42 | +++++
43 |
44 | Add ``cacheops`` to your ``INSTALLED_APPS``.
45 |
46 | Setup redis connection and enable caching for desired models:
47 |
48 | .. code:: python
49 |
50 | CACHEOPS_REDIS = {
51 | 'host': 'localhost', # redis-server is on same machine
52 | 'port': 6379, # default redis port
53 | 'db': 1, # SELECT non-default redis database
54 | # using separate redis db or redis instance
55 | # is highly recommended
56 |
57 | 'socket_timeout': 3, # connection timeout in seconds, optional
58 | 'password': '...', # optional
59 | 'unix_socket_path': '' # replaces host and port
60 | }
61 |
62 | # Alternatively the redis connection can be defined using a URL:
63 | CACHEOPS_REDIS = "redis://localhost:6379/1"
64 | # or
65 | CACHEOPS_REDIS = "unix://path/to/socket?db=1"
66 | # or with password (note a colon)
67 | CACHEOPS_REDIS = "redis://:password@localhost:6379/1"
68 |
69 | # If you want to use sentinel, specify this variable
70 | CACHEOPS_SENTINEL = {
71 | 'locations': [('localhost', 26379)], # sentinel locations, required
72 | 'service_name': 'mymaster', # sentinel service name, required
73 | 'socket_timeout': 0.1, # connection timeout in seconds, optional
74 | 'db': 0 # redis database, default: 0
75 | ... # everything else is passed to Sentinel()
76 | }
77 |
78 | # Use your own redis client class, should be compatible or subclass redis.Redis
79 | CACHEOPS_CLIENT_CLASS = 'your.redis.ClientClass'
80 |
81 | CACHEOPS = {
82 | # Automatically cache any User.objects.get() calls for 15 minutes
83 | # This also includes .first() and .last() calls,
84 | # as well as request.user or post.author access,
85 | # where Post.author is a foreign key to auth.User
86 | 'auth.user': {'ops': 'get', 'timeout': 60*15},
87 |
88 | # Automatically cache all gets and queryset fetches
89 | # to other django.contrib.auth models for an hour
90 | 'auth.*': {'ops': {'fetch', 'get'}, 'timeout': 60*60},
91 |
92 | # Cache all queries to Permission
93 | # 'all' is an alias for {'get', 'fetch', 'count', 'aggregate', 'exists'}
94 | 'auth.permission': {'ops': 'all', 'timeout': 60*60},
95 |
96 | # Enable manual caching on all other models with default timeout of an hour
97 | # Use Post.objects.cache().get(...)
98 | # or Tags.objects.filter(...).order_by(...).cache()
99 | # to cache particular ORM request.
100 | # Invalidation is still automatic
101 | '*.*': {'ops': (), 'timeout': 60*60},
102 |
103 | # And since ops is empty by default you can rewrite last line as:
104 | '*.*': {'timeout': 60*60},
105 |
106 | # NOTE: binding signals has its overhead, like preventing fast mass deletes,
107 | # you might want to only register whatever you cache and dependencies.
108 |
109 | # Finally you can explicitely forbid even manual caching with:
110 | 'some_app.*': None,
111 | }
112 |
113 | You can configure default profile setting with ``CACHEOPS_DEFAULTS``. This way you can rewrite the config above:
114 |
115 | .. code:: python
116 |
117 | CACHEOPS_DEFAULTS = {
118 | 'timeout': 60*60
119 | }
120 | CACHEOPS = {
121 | 'auth.user': {'ops': 'get', 'timeout': 60*15},
122 | 'auth.*': {'ops': ('fetch', 'get')},
123 | 'auth.permission': {'ops': 'all'},
124 | '*.*': {},
125 | }
126 |
127 | Using ``'*.*'`` with non-empty ``ops`` is **not recommended**
128 | since it will easily cache something you don't intent to or even know about like migrations tables.
129 | The better approach will be restricting by app with ``'app_name.*'``.
130 |
131 | Besides ``ops`` and ``timeout`` options you can also use:
132 |
133 | ``local_get: True``
134 | To cache simple gets for this model in process local memory.
135 | This is very fast, but is not invalidated in any way until process is restarted.
136 | Still could be useful for extremely rarely changed things.
137 |
138 | ``cache_on_save=True | 'field_name'``
139 | To write an instance to cache upon save.
140 | Cached instance will be retrieved on ``.get(field_name=...)`` request.
141 | Setting to ``True`` causes caching by primary key.
142 |
143 | Additionally, you can tell cacheops to degrade gracefully on redis fail with:
144 |
145 | .. code:: python
146 |
147 | CACHEOPS_DEGRADE_ON_FAILURE = True
148 |
149 | There is also a possibility to make all cacheops methods and decorators no-op, e.g. for testing:
150 |
151 | .. code:: python
152 |
153 | from django.test import override_settings
154 |
155 | @override_settings(CACHEOPS_ENABLED=False)
156 | def test_something():
157 | # ...
158 | assert cond
159 |
160 |
161 | Usage
162 | +++++
163 |
164 | | **Automatic caching**
165 |
166 | It's automatic you just need to set it up.
167 |
168 |
169 | | **Manual caching**
170 |
171 | You can force any queryset to use cache by calling its ``.cache()`` method:
172 |
173 | .. code:: python
174 |
175 | Article.objects.filter(tag=2).cache()
176 |
177 |
178 | Here you can specify which ops should be cached for the queryset, for example, this code:
179 |
180 | .. code:: python
181 |
182 | qs = Article.objects.filter(tag=2).cache(ops=['count'])
183 | paginator = Paginator(objects, ipp)
184 | articles = list(pager.page(page_num)) # hits database
185 |
186 |
187 | will cache count call in ``Paginator`` but not later articles fetch.
188 | There are five possible actions - ``get``, ``fetch``, ``count``, ``aggregate`` and ``exists``.
189 | You can pass any subset of this ops to ``.cache()`` method even empty - to turn off caching.
190 | There is, however, a shortcut for the latter:
191 |
192 | .. code:: python
193 |
194 | qs = Article.objects.filter(visible=True).nocache()
195 | qs1 = qs.filter(tag=2) # hits database
196 | qs2 = qs.filter(category=3) # hits it once more
197 |
198 |
199 | It is useful when you want to disable automatic caching on particular queryset.
200 |
201 | You can also override default timeout for particular queryset with ``.cache(timeout=...)``.
202 |
203 |
204 | | **Function caching**
205 |
206 | You can cache and invalidate result of a function the same way as a queryset.
207 | Cached results of the next function will be invalidated on any ``Article`` change,
208 | addition or deletion:
209 |
210 | .. code:: python
211 |
212 | from cacheops import cached_as
213 |
214 | @cached_as(Article, timeout=120)
215 | def article_stats():
216 | return {
217 | 'tags': list(Article.objects.values('tag').annotate(Count('id')))
218 | 'categories': list(Article.objects.values('category').annotate(Count('id')))
219 | }
220 |
221 |
222 | Note that we are using list on both querysets here, it's because we don't want
223 | to cache queryset objects but their results.
224 |
225 | Also note that if you want to filter queryset based on arguments,
226 | e.g. to make invalidation more granular, you can use a local function:
227 |
228 | .. code:: python
229 |
230 | def articles_block(category, count=5):
231 | qs = Article.objects.filter(category=category)
232 |
233 | @cached_as(qs, extra=count)
234 | def _articles_block():
235 | articles = list(qs.filter(photo=True)[:count])
236 | if len(articles) < count:
237 | articles += list(qs.filter(photo=False)[:count-len(articles)])
238 | return articles
239 |
240 | return _articles_block()
241 |
242 | We added ``extra`` here to make different keys for calls with same ``category`` but different
243 | ``count``. Cache key will also depend on function arguments, so we could just pass ``count`` as
244 | an argument to inner function. We also omitted ``timeout`` here, so a default for the model
245 | will be used.
246 |
247 | Another possibility is to make function cache invalidate on changes to any one of several models:
248 |
249 | .. code:: python
250 |
251 | @cached_as(Article.objects.filter(public=True), Tag)
252 | def article_stats():
253 | return {...}
254 |
255 | As you can see, we can mix querysets and models here.
256 |
257 |
258 | | **View caching**
259 |
260 | You can also cache and invalidate a view as a queryset. This works mostly the same way as function
261 | caching, but only path of the request parameter is used to construct cache key:
262 |
263 | .. code:: python
264 |
265 | from cacheops import cached_view_as
266 |
267 | @cached_view_as(News)
268 | def news_index(request):
269 | # ...
270 | return render(...)
271 |
272 | You can pass ``timeout``, ``extra`` and several samples the same way as to ``@cached_as()``. Note that you can pass a function as ``extra``:
273 |
274 | .. code:: python
275 |
276 | @cached_view_as(News, extra=lambda req: req.user.is_staff)
277 | def news_index(request):
278 | # ... add extra things for staff
279 | return render(...)
280 |
281 | A function passed as ``extra`` receives the same arguments as the cached function.
282 |
283 | Class based views can also be cached:
284 |
285 | .. code:: python
286 |
287 | class NewsIndex(ListView):
288 | model = News
289 |
290 | news_index = cached_view_as(News, ...)(NewsIndex.as_view())
291 |
292 |
293 | Invalidation
294 | ++++++++++++
295 |
296 | Cacheops uses both time and event-driven invalidation. The event-driven one
297 | listens on model signals and invalidates appropriate caches on ``Model.save()``, ``.delete()``
298 | and m2m changes.
299 |
300 | Invalidation tries to be granular which means it won't invalidate a queryset
301 | that cannot be influenced by added/updated/deleted object judging by query
302 | conditions. Most of the time this will do what you want, if it won't you can use
303 | one of the following:
304 |
305 | .. code:: python
306 |
307 | from cacheops import invalidate_obj, invalidate_model, invalidate_all
308 |
309 | invalidate_obj(some_article) # invalidates queries affected by some_article
310 | invalidate_model(Article) # invalidates all queries for model
311 | invalidate_all() # flush redis cache database
312 |
313 | And last there is ``invalidate`` command::
314 |
315 | ./manage.py invalidate articles.Article.34 # same as invalidate_obj
316 | ./manage.py invalidate articles.Article # same as invalidate_model
317 | ./manage.py invalidate articles # invalidate all models in articles
318 |
319 | And the one that FLUSHES cacheops redis database::
320 |
321 | ./manage.py invalidate all
322 |
323 | Don't use that if you share redis database for both cache and something else.
324 |
325 |
326 | | **Turning off and postponing invalidation**
327 |
328 | There is also a way to turn off invalidation for a while:
329 |
330 | .. code:: python
331 |
332 | from cacheops import no_invalidation
333 |
334 | with no_invalidation:
335 | # ... do some changes
336 | obj.save()
337 |
338 | Also works as decorator:
339 |
340 | .. code:: python
341 |
342 | @no_invalidation
343 | def some_work(...):
344 | # ... do some changes
345 | obj.save()
346 |
347 | Combined with ``try ... finally`` it could be used to postpone invalidation:
348 |
349 | .. code:: python
350 |
351 | try:
352 | with no_invalidation:
353 | # ...
354 | finally:
355 | invalidate_obj(...)
356 | # ... or
357 | invalidate_model(...)
358 |
359 | Postponing invalidation can speed up batch jobs.
360 |
361 |
362 | | **Mass updates**
363 |
364 | Normally `qs.update(...)` doesn't emit any events and thus doesn't trigger invalidation.
365 | And there is no transparent and efficient way to do that: trying to act on conditions will
366 | invalidate too much if update conditions are orthogonal to many queries conditions,
367 | and to act on specific objects we will need to fetch all of them,
368 | which `QuerySet.update()` users generally try to avoid.
369 |
370 | In the case you actually want to perform the latter cacheops provides a shortcut:
371 |
372 | .. code:: python
373 |
374 | qs.invalidated_update(...)
375 |
376 | Note that all the updated objects are fetched twice, prior and post the update.
377 |
378 | Components
379 | ++++++++++
380 |
381 |
382 | Simple time-invalidated cache
383 | -----------------------------
384 |
385 | To cache result of a function call or a view for some time use:
386 |
387 | .. code:: python
388 |
389 | from cacheops import cached, cached_view
390 |
391 | @cached(timeout=number_of_seconds)
392 | def top_articles(category):
393 | return ... # Some costly queries
394 |
395 | @cached_view(timeout=number_of_seconds)
396 | def top_articles(request, category=None):
397 | # Some costly queries
398 | return HttpResponse(...)
399 |
400 |
401 | ``@cached()`` will generate separate entry for each combination of decorated function and its
402 | arguments. Also you can use ``extra`` same way as in ``@cached_as()``, most useful for nested
403 | functions:
404 |
405 | .. code:: python
406 |
407 | @property
408 | def articles_json(self):
409 | @cached(timeout=10*60, extra=self.category_id)
410 | def _articles_json():
411 | ...
412 | return json.dumps(...)
413 |
414 | return _articles_json()
415 |
416 |
417 | You can manually invalidate or update a result of a cached function:
418 |
419 | .. code:: python
420 |
421 | top_articles.invalidate(some_category)
422 | top_articles.key(some_category).set(new_value)
423 |
424 |
425 | To invalidate cached view you can pass absolute uri instead of request:
426 |
427 | .. code:: python
428 |
429 | top_articles.invalidate('http://example.com/page', some_category)
430 |
431 |
432 | Cacheops also provides get/set primitives for simple cache:
433 |
434 | .. code:: python
435 |
436 | from cacheops import cache
437 |
438 | cache.set(cache_key, data, timeout=None)
439 | cache.get(cache_key)
440 | cache.delete(cache_key)
441 |
442 |
443 | ``cache.get`` will raise ``CacheMiss`` if nothing is stored for given key:
444 |
445 | .. code:: python
446 |
447 | from cacheops import cache, CacheMiss
448 |
449 | try:
450 | result = cache.get(key)
451 | except CacheMiss:
452 | ... # deal with it
453 |
454 |
455 | File Cache
456 | ----------
457 |
458 | File based cache can be used the same way as simple time-invalidated one:
459 |
460 | .. code:: python
461 |
462 | from cacheops import file_cache
463 |
464 | @file_cache.cached(timeout=number_of_seconds)
465 | def top_articles(category):
466 | return ... # Some costly queries
467 |
468 | @file_cache.cached_view(timeout=number_of_seconds)
469 | def top_articles(request, category):
470 | # Some costly queries
471 | return HttpResponse(...)
472 |
473 | # later, on appropriate event
474 | top_articles.invalidate(some_category)
475 | # or
476 | top_articles.key(some_category).set(some_value)
477 |
478 | # primitives
479 | file_cache.set(cache_key, data, timeout=None)
480 | file_cache.get(cache_key)
481 | file_cache.delete(cache_key)
482 |
483 |
484 | It has several improvements upon django built-in file cache, both about high load.
485 | First, it's safe against concurrent writes. Second, it's invalidation is done as separate task,
486 | you'll need to call this from crontab for that to work::
487 |
488 | /path/manage.py cleanfilecache
489 | /path/manage.py cleanfilecache /path/to/non-default/cache/dir
490 |
491 |
492 | Django templates integration
493 | ----------------------------
494 |
495 | Cacheops provides tags to cache template fragments. They mimic ``@cached_as``
496 | and ``@cached`` decorators, however, they require explicit naming of each fragment:
497 |
498 | .. code:: django
499 |
500 | {% load cacheops %}
501 |
502 | {% cached_as [ ...] %}
503 | ... some template code ...
504 | {% endcached_as %}
505 |
506 | {% cached [ ...] %}
507 | ... some template code ...
508 | {% endcached %}
509 |
510 | You can use ``None`` for timeout in ``@cached_as`` to use it's default value for model.
511 |
512 | To invalidate cached fragment use:
513 |
514 | .. code:: python
515 |
516 | from cacheops import invalidate_fragment
517 |
518 | invalidate_fragment(fragment_name, extra1, ...)
519 |
520 | If you have more complex fragment caching needs, cacheops provides a helper to
521 | make your own template tags which decorate a template fragment in a way
522 | analogous to decorating a function with ``@cached`` or ``@cached_as``.
523 | This is **experimental** feature for now.
524 |
525 | To use it create ``myapp/templatetags/mycachetags.py`` and add something like this there:
526 |
527 | .. code:: python
528 |
529 | from cacheops import cached_as, CacheopsLibrary
530 |
531 | register = CacheopsLibrary()
532 |
533 | @register.decorator_tag(takes_context=True)
534 | def cache_menu(context, menu_name):
535 | from django.utils import translation
536 | from myapp.models import Flag, MenuItem
537 |
538 | request = context.get('request')
539 | if request and request.user.is_staff():
540 | # Use noop decorator to bypass caching for staff
541 | return lambda func: func
542 |
543 | return cached_as(
544 | # Invalidate cache if any menu item or a flag for menu changes
545 | MenuItem,
546 | Flag.objects.filter(name='menu'),
547 | # Vary for menu name and language, also stamp it as "menu" to be safe
548 | extra=("menu", menu_name, translation.get_language()),
549 | timeout=24 * 60 * 60
550 | )
551 |
552 | ``@decorator_tag`` here creates a template tag behaving the same as returned decorator
553 | upon wrapped template fragment. Resulting template tag could be used as follows:
554 |
555 | .. code:: django
556 |
557 | {% load mycachetags %}
558 |
559 | {% cache_menu "top" %}
560 | ... the top menu template code ...
561 | {% endcache_menu %}
562 |
563 | ... some template code ..
564 |
565 | {% cache_menu "bottom" %}
566 | ... the bottom menu template code ...
567 | {% endcache_menu %}
568 |
569 |
570 | Jinja2 extension
571 | ----------------
572 |
573 | Add ``cacheops.jinja2.cache`` to your extensions and use:
574 |
575 | .. code:: jinja
576 |
577 | {% cached_as [, timeout=] [, extra=] %}
578 | ... some template code ...
579 | {% endcached_as %}
580 |
581 | or
582 |
583 | .. code:: jinja
584 |
585 | {% cached [timeout=] [, extra=] %}
586 | ...
587 | {% endcached %}
588 |
589 | Tags work the same way as corresponding decorators.
590 |
591 |
592 | Special topics
593 | ++++++++++++++
594 |
595 | Transactions
596 | ------------
597 |
598 | Cacheops transparently supports transactions. This is implemented by following simple rules:
599 |
600 | 1. Once transaction is dirty (has changes) caching turns off. The reason is that the state of database at this point is only visible to current transaction and should not affect other users and vice versa.
601 |
602 | 2. Any invalidating calls are scheduled to run on the outer commit of transaction.
603 |
604 | 3. Savepoints and rollbacks are also handled appropriately.
605 |
606 | Mind that simple and file cache don't turn itself off in transactions but work as usual.
607 |
608 |
609 | Dog-pile effect prevention
610 | --------------------------
611 |
612 | There is optional locking mechanism to prevent several threads or processes simultaneously performing same heavy task. It works with ``@cached_as()`` and querysets:
613 |
614 | .. code:: python
615 |
616 | @cached_as(qs, lock=True)
617 | def heavy_func(...):
618 | # ...
619 |
620 | for item in qs.cache(lock=True):
621 | # ...
622 |
623 | It is also possible to specify ``lock: True`` in ``CACHEOPS`` setting but that would probably be a waste. Locking has no overhead on cache hit though.
624 |
625 |
626 | Multiple database support
627 | -------------------------
628 |
629 | By default cacheops considers query result is same for same query, not depending
630 | on database queried. That could be changed with ``db_agnostic`` cache profile option:
631 |
632 | .. code:: python
633 |
634 | CACHEOPS = {
635 | 'some.model': {'ops': 'get', 'db_agnostic': False, 'timeout': ...}
636 | }
637 |
638 |
639 | Sharing redis instance
640 | ----------------------
641 |
642 | Cacheops provides a way to share a redis instance by adding prefix to cache keys:
643 |
644 | .. code:: python
645 |
646 | CACHEOPS_PREFIX = lambda query: ...
647 | # or
648 | CACHEOPS_PREFIX = 'some.module.cacheops_prefix'
649 |
650 | A most common usage would probably be a prefix by host name:
651 |
652 | .. code:: python
653 |
654 | # get_request() returns current request saved to threadlocal by some middleware
655 | cacheops_prefix = lambda _: get_request().get_host()
656 |
657 | A ``query`` object passed to callback also enables reflection on used databases and tables:
658 |
659 | .. code:: python
660 |
661 | def cacheops_prefix(query):
662 | query.dbs # A list of databases queried
663 | query.tables # A list of tables query is invalidated on
664 |
665 | if set(query.tables) <= HELPER_TABLES:
666 | return 'helper:'
667 | if query.tables == ['blog_post']:
668 | return 'blog:'
669 |
670 |
671 | Custom serialization
672 | --------------------
673 |
674 | Cacheops uses ``pickle`` by default, employing it's default protocol. But you can specify your own
675 | it might be any module or a class having ``.dumps()`` and ``.loads()`` functions. For example you can use ``dill`` instead, which can serialize more things like anonymous functions:
676 |
677 | .. code:: python
678 |
679 | CACHEOPS_SERIALIZER = 'dill'
680 |
681 | One less obvious use is to fix pickle protocol, to use cacheops cache across python versions:
682 |
683 | .. code:: python
684 |
685 | import pickle
686 |
687 | class CACHEOPS_SERIALIZER:
688 | dumps = lambda data: pickle.dumps(data, 3)
689 | loads = pickle.loads
690 |
691 |
692 | Using memory limit
693 | ------------------
694 |
695 | Cacheops offers an "insideout" mode, which idea is instead of conj sets contatining cache keys, cache values contain a checksum of random stamps stored in conj keys, which are checked on each read to stay the same. To use that add to settings:
696 |
697 | .. code:: python
698 |
699 | CACHEOPS_INSIDEOUT = True # Might become default in future
700 |
701 | And set up ``maxmemory`` and ``maxmemory-policy`` in redis config::
702 |
703 | maxmemory 4gb
704 | maxmemory-policy volatile-lru # or other volatile-*
705 |
706 | Note that using any of ``allkeys-*`` policies might drop important invalidation structures of cacheops and lead to stale cache.
707 |
708 |
709 | Memory usage cleanup
710 | --------------------
711 |
712 | **This does not apply to "insideout" mode. This issue doesn't happen there.**
713 |
714 | In some cases, cacheops may leave some conjunction keys of expired cache keys in redis without being able to invalidate them. Those will still expire with age, but in the meantime may cause issues like slow invalidation (even "BUSY Redis ...") and extra memory usage. To prevent that it is advised to not cache complex queries, see `Perfomance tips <#performance-tips>`_, 5.
715 |
716 | Cacheops ships with a ``cacheops.reap_conjs`` function that can clean up these keys,
717 | ignoring conjunction sets with some reasonable size. It can be called using the ``reapconjs`` management command::
718 |
719 | ./manage.py reapconjs --chunk-size=100 --min-conj-set-size=10000 # with custom values
720 | ./manage.py reapconjs # with default values (chunks=1000, min size=1000)
721 |
722 | The command is a small wrapper that calls a function with the main logic. You can also call it from your code, for example from a Celery task:
723 |
724 | .. code:: python
725 |
726 | from cacheops import reap_conjs
727 |
728 | @app.task
729 | def reap_conjs_task():
730 | reap_conjs(
731 | chunk_size=2000,
732 | min_conj_set_size=100,
733 | )
734 |
735 |
736 | Keeping stats
737 | -------------
738 |
739 | Cacheops provides ``cache_read`` and ``cache_invalidated`` signals for you to keep track.
740 |
741 | Cache read signal is emitted immediately after each cache lookup. Passed arguments are: ``sender`` - model class if queryset cache is fetched,
742 | ``func`` - decorated function and ``hit`` - fetch success as boolean value.
743 |
744 | Here is a simple stats implementation:
745 |
746 | .. code:: python
747 |
748 | from cacheops.signals import cache_read
749 | from statsd.defaults.django import statsd
750 |
751 | def stats_collector(sender, func, hit, **kwargs):
752 | event = 'hit' if hit else 'miss'
753 | statsd.incr('cacheops.%s' % event)
754 |
755 | cache_read.connect(stats_collector)
756 |
757 | Cache invalidation signal is emitted after object, model or global invalidation passing ``sender`` and ``obj_dict`` args. Note that during normal operation cacheops only uses object invalidation, calling it once for each model create/delete and twice for update: passing old and new object dictionary.
758 |
759 |
760 | Troubleshooting
761 | +++++++++++++++
762 |
763 | CAVEATS
764 | -------
765 |
766 | 1. Conditions other than ``__exact``, ``__in`` and ``__isnull=True`` don't make invalidation
767 | more granular.
768 | 2. Conditions on TextFields, FileFields and BinaryFields don't make it either.
769 | One should not test on their equality anyway. See `CACHEOPS_SKIP_FIELDS` though.
770 | 3. Update of "select_related" object does not invalidate cache for queryset.
771 | Use ``.prefetch_related()`` instead.
772 | 4. Mass updates don't trigger invalidation by default. But see ``.invalidated_update()``.
773 | 5. Sliced queries are invalidated as non-sliced ones.
774 | 6. Doesn't work with ``.raw()`` and other sql queries.
775 | 7. Conditions on subqueries don't affect invalidation.
776 | 8. Doesn't work right with multi-table inheritance.
777 |
778 | Here 1, 2, 3, 5 are part of the design compromise, trying to solve them will make
779 | things complicated and slow. 7 can be implemented if needed, but it's
780 | probably counter-productive since one can just break queries into simpler ones,
781 | which cache better. 4 is a deliberate choice, making it "right" will flush
782 | cache too much when update conditions are orthogonal to most queries conditions,
783 | see, however, `.invalidated_update()`. 8 is postponed until it will gain
784 | more interest or a champion willing to implement it emerges.
785 |
786 | All unsupported things could still be used easily enough with the help of ``@cached_as()``.
787 |
788 |
789 | Performance tips
790 | ----------------
791 |
792 | Here come some performance tips to make cacheops and Django ORM faster.
793 |
794 | 1. When you use cache you pickle and unpickle lots of django model instances, which could be slow. You can optimize django models serialization with `django-pickling `_.
795 |
796 | 2. Constructing querysets is rather slow in django, mainly because most of ``QuerySet`` methods clone self, then change it and return the clone. Original queryset is usually thrown away. Cacheops adds ``.inplace()`` method, which makes queryset mutating, preventing useless cloning:
797 |
798 | .. code:: python
799 |
800 | items = Item.objects.inplace().filter(category=12).order_by('-date')[:20]
801 |
802 | You can revert queryset to cloning state using ``.cloning()`` call. Note that this is a micro-optimization technique. Using it is only desirable in the hottest places, not everywhere.
803 |
804 | 3. Use template fragment caching when possible, it's way more fast because you don't need to generate anything. Also pickling/unpickling a string is much faster than a list of model instances.
805 |
806 | 4. Run separate redis instance for cache with disabled `persistence `_. You can manually call `SAVE `_ or `BGSAVE `_ to stay hot upon server restart.
807 |
808 | 5. If you filter queryset on many different or complex conditions cache could degrade performance (comparing to uncached db calls) in consequence of frequent cache misses. Disable cache in such cases entirely or on some heuristics which detect if this request would be probably hit. E.g. enable cache if only some primary fields are used in filter.
809 |
810 | Caching querysets with large amount of filters also slows down all subsequent invalidation on that model (negligable for "insideout" mode). You can disable caching if more than some amount of fields is used in filter simultaneously.
811 |
812 | 6. Split database queries into smaller ones when you cache them. This goes against usual approach, but this allows invalidation to be more granular: smaller parts will be invalidated independently and each part will invalidate more precisely.
813 |
814 | .. code:: python
815 |
816 | Post.objects.filter(category__slug="foo")
817 | # A single database query, but will be invalidated not only on
818 | # any Category with .slug == "foo" change, but also for any Post change
819 |
820 | Post.objects.filter(category=Category.objects.get(slug="foo"))
821 | # Two queries, each invalidates only on a granular event:
822 | # either category.slug == "foo" or Post with .category_id ==
823 |
824 |
825 | Writing a test
826 | --------------
827 |
828 | Writing a test for an issue you are experiencing can speed up its resolution a lot.
829 | Here is how you do that. I suppose you have some application code causing it.
830 |
831 | 1. Make a fork.
832 | 2. Install all from ``requirements-test.txt``.
833 | 3. Ensure you can run tests with ``pytest``.
834 | 4. Copy relevant models code to ``tests/models.py``.
835 | 5. Go to ``tests/tests.py`` and paste code causing exception to ``IssueTests.test_{issue_number}``.
836 | 6. Execute ``pytest -k {issue_number}`` and see it failing.
837 | 7. Cut down model and test code until error disappears and make a step back.
838 | 8. Commit changes and make a pull request.
839 |
840 |
841 | TODO
842 | ++++
843 |
844 | - faster .get() handling for simple cases such as get by pk/id, with simple key calculation
845 | - integrate previous one with prefetch_related()
846 | - shard cache between multiple redises
847 | - respect subqueries?
848 | - respect headers in @cached_view*?
849 | - group invalidate_obj() calls?
850 | - a postpone invalidation context manager/decorator?
851 | - fast mode: store cache in local memory, but check in with redis if it's valid
852 | - an interface for complex fields to extract exact on parts or transforms: ArrayField.len => field__len=?, ArrayField[0] => field__0=?, JSONField['some_key'] => field__some_key=?
853 | - custom cache eviction strategy in lua
854 | - cache a string directly (no pickle) for direct serving (custom key function?)
855 |
856 |
857 | .. |Build Status| image:: https://github.com/Suor/django-cacheops/actions/workflows/ci.yml/badge.svg
858 | :target: https://github.com/Suor/django-cacheops/actions/workflows/ci.yml?query=branch%3Amaster
859 |
--------------------------------------------------------------------------------
/bench.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os, time, gc, sys, shutil
3 | from funcy import re_tester
4 | os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
5 |
6 | verbosity = 1
7 | interactive = False
8 | fixtures = ['basic']
9 |
10 |
11 | HEADER_TEMPLATE = '==================== %-20s ===================='
12 |
13 |
14 | def run_benchmarks(tests):
15 | for name, test in tests:
16 | if 'h' in flags:
17 | print(HEADER_TEMPLATE % name)
18 | time = bench_test(test)
19 | print('%-18s time: %.3fms' % (name, time * 1000))
20 |
21 | def bench_test(test):
22 | prepared = None
23 | if 'prepare_once' in test:
24 | prepared = test['prepare_once']()
25 | if 'h' in flags:
26 | print('-' * 62)
27 |
28 | if 'p' in flags:
29 | test['run'] = profile(test['run'])
30 |
31 | total = 0
32 | n = 1
33 | while total < 2:
34 | gc.disable()
35 | durations = [bench_once(test, prepared) for _ in range(n)]
36 | gc.enable()
37 |
38 | if '1' in flags:
39 | break
40 |
41 | total = sum(d for _, d in durations)
42 | n *= 2
43 |
44 | return min(d for d, _ in durations)
45 |
46 | def bench_once(test, prepared=None):
47 | zero_start = time.time()
48 | if 'prepare' in test:
49 | prepared = test['prepare']()
50 | if 'h' in flags:
51 | print('-' * 62)
52 | start = time.time()
53 | if prepared is None:
54 | test['run']()
55 | else:
56 | test['run'](prepared)
57 | now = time.time()
58 | return now - start, now - zero_start
59 |
60 | import django
61 | from django.db import connection
62 | from django.core.management import call_command
63 |
64 | django.setup()
65 |
66 |
67 | # Parse command line arguments
68 | flags = ''.join(arg[1:] for arg in sys.argv[1:] if arg.startswith('-'))
69 | args = [arg for arg in sys.argv[1:] if not arg.startswith('-')]
70 | selector = args[0] if args else ''
71 | select = selector[1:].__eq__ if selector.startswith('=') else re_tester(selector)
72 |
73 | if 'p' in flags:
74 | from profilehooks import profile
75 |
76 |
77 | db_name = None
78 | try:
79 | shutil.rmtree('tests/migrations', True)
80 | call_command('makemigrations', 'tests', verbosity=0)
81 | db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive)
82 | call_command('loaddata', *fixtures, **{'verbosity': verbosity})
83 |
84 | from cacheops.redis import redis_client
85 | redis_client.flushdb()
86 |
87 | from tests.bench import TESTS # import is here because it executes queries
88 | if selector:
89 | tests = [(name, test) for name, test in TESTS if select(name)]
90 | else:
91 | tests = TESTS
92 | run_benchmarks(tests)
93 | except KeyboardInterrupt:
94 | pass
95 | finally:
96 | if db_name:
97 | connection.creation.destroy_test_db(db_name, verbosity=verbosity)
98 | shutil.rmtree('tests/migrations')
99 |
--------------------------------------------------------------------------------
/cacheops/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = '7.2'
2 | VERSION = tuple(map(int, __version__.split('.')))
3 |
4 | from .simple import * # noqa
5 | from .query import * # noqa
6 | from .invalidation import * # noqa
7 | from .reaper import * # noqa
8 | from .templatetags.cacheops import * # noqa
9 |
--------------------------------------------------------------------------------
/cacheops/apps.py:
--------------------------------------------------------------------------------
1 | from django.apps import AppConfig
2 |
3 | from cacheops.query import install_cacheops
4 | from cacheops.transaction import install_cacheops_transaction_support
5 |
6 |
7 | class CacheopsConfig(AppConfig):
8 | name = 'cacheops'
9 |
10 | def ready(self):
11 | install_cacheops()
12 | install_cacheops_transaction_support()
13 |
--------------------------------------------------------------------------------
/cacheops/conf.py:
--------------------------------------------------------------------------------
1 | from importlib import import_module
2 | from funcy import memoize, merge
3 |
4 | from django.conf import settings as base_settings
5 | from django.core.exceptions import ImproperlyConfigured
6 | from django.core.signals import setting_changed
7 |
8 |
9 | ALL_OPS = {'get', 'fetch', 'count', 'aggregate', 'exists'}
10 |
11 |
12 | class Defaults:
13 | CACHEOPS_ENABLED = True
14 | CACHEOPS_REDIS = {}
15 | CACHEOPS_DEFAULTS = {}
16 | CACHEOPS = {}
17 | CACHEOPS_PREFIX = lambda query: ''
18 | CACHEOPS_INSIDEOUT = False
19 | CACHEOPS_CLIENT_CLASS = None
20 | CACHEOPS_DEGRADE_ON_FAILURE = False
21 | CACHEOPS_SENTINEL = {}
22 | # NOTE: we don't use this fields in invalidator conditions since their values could be very long
23 | # and one should not filter by their equality anyway.
24 | CACHEOPS_SKIP_FIELDS = "FileField", "TextField", "BinaryField", "JSONField", "ArrayField"
25 | CACHEOPS_LONG_DISJUNCTION = 8
26 | CACHEOPS_SERIALIZER = 'pickle'
27 |
28 | FILE_CACHE_DIR = '/tmp/cacheops_file_cache'
29 | FILE_CACHE_TIMEOUT = 60*60*24*30
30 |
31 |
32 | class Settings(object):
33 | def __getattr__(self, name):
34 | res = getattr(base_settings, name, getattr(Defaults, name))
35 | if name in ['CACHEOPS_PREFIX', 'CACHEOPS_SERIALIZER']:
36 | res = import_string(res) if isinstance(res, str) else res
37 |
38 | # Convert old list of classes to list of strings
39 | if name == 'CACHEOPS_SKIP_FIELDS':
40 | res = [f if isinstance(f, str) else f.get_internal_type(res) for f in res]
41 |
42 | # Save to dict to speed up next access, __getattr__ won't be called
43 | self.__dict__[name] = res
44 | return res
45 |
46 | settings = Settings()
47 | setting_changed.connect(lambda setting, **kw: settings.__dict__.pop(setting, None), weak=False)
48 |
49 |
50 | def import_string(path):
51 | if "." in path:
52 | module, attr = path.rsplit(".", 1)
53 | return getattr(import_module(module), attr)
54 | else:
55 | return import_module(path)
56 |
57 |
58 | @memoize
59 | def prepare_profiles():
60 | """
61 | Prepares a dict 'app.model' -> profile, for use in model_profile()
62 | """
63 | profile_defaults = {
64 | 'ops': (),
65 | 'local_get': False,
66 | 'db_agnostic': True,
67 | 'lock': False,
68 | }
69 | profile_defaults.update(settings.CACHEOPS_DEFAULTS)
70 |
71 | model_profiles = {}
72 | for app_model, profile in settings.CACHEOPS.items():
73 | if profile is None:
74 | model_profiles[app_model.lower()] = None
75 | continue
76 |
77 | model_profiles[app_model.lower()] = mp = merge(profile_defaults, profile)
78 | if mp['ops'] == 'all':
79 | mp['ops'] = ALL_OPS
80 | # People will do that anyway :)
81 | if isinstance(mp['ops'], str):
82 | mp['ops'] = {mp['ops']}
83 | mp['ops'] = set(mp['ops'])
84 |
85 | if 'timeout' not in mp:
86 | raise ImproperlyConfigured(
87 | 'You must specify "timeout" option in "%s" CACHEOPS profile' % app_model)
88 | if not isinstance(mp['timeout'], int):
89 | raise ImproperlyConfigured(
90 | '"timeout" option in "%s" CACHEOPS profile should be an integer' % app_model)
91 |
92 | return model_profiles
93 |
94 |
95 | def model_profile(model):
96 | """
97 | Returns cacheops profile for a model
98 | """
99 | assert not model._meta.abstract, "Can't get profile for %s" % model
100 | # Django migrations create lots of fake models, just skip them
101 | if model.__module__ == '__fake__':
102 | return None
103 |
104 | model_profiles = prepare_profiles()
105 |
106 | app = model._meta.app_label.lower()
107 | model_name = model._meta.model_name
108 | for guess in ('%s.%s' % (app, model_name), '%s.*' % app, '*.*'):
109 | if guess in model_profiles:
110 | return model_profiles[guess]
111 | else:
112 | return None
113 |
--------------------------------------------------------------------------------
/cacheops/getset.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | import hashlib
3 | import json
4 | import random
5 |
6 | from .conf import settings
7 | from .redis import redis_client, handle_connection_failure, load_script
8 | from .transaction import transaction_states
9 |
10 |
11 | LOCK_TIMEOUT = 60
12 |
13 |
14 | @handle_connection_failure
15 | def cache_thing(prefix, cache_key, data, cond_dnfs, timeout, dbs=(), precall_key='',
16 | expected_checksum=''):
17 | """
18 | Writes data to cache and creates appropriate invalidators.
19 |
20 | If precall_key is not the empty string, the data will only be cached if the
21 | precall_key is set to avoid caching stale data.
22 |
23 | If expected_checksum is set and does not match the actual one then cache won't be written.
24 | """
25 | # Could have changed after last check, sometimes superficially
26 | if transaction_states.is_dirty(dbs):
27 | return
28 |
29 | if settings.CACHEOPS_INSIDEOUT:
30 | schemes = dnfs_to_schemes(cond_dnfs)
31 | conj_keys = dnfs_to_conj_keys(prefix, cond_dnfs)
32 | return load_script('cache_thing_insideout')(
33 | keys=[prefix, cache_key],
34 | args=[
35 | settings.CACHEOPS_SERIALIZER.dumps(data),
36 | json.dumps(schemes),
37 | json.dumps(conj_keys),
38 | timeout,
39 | # Need to pass it from here since random inside is not seeded in Redis pre 7.0
40 | random.random(),
41 | expected_checksum,
42 | ]
43 | )
44 | else:
45 | if prefix and precall_key == "":
46 | precall_key = prefix
47 | load_script('cache_thing')(
48 | keys=[prefix, cache_key, precall_key],
49 | args=[
50 | settings.CACHEOPS_SERIALIZER.dumps(data),
51 | json.dumps(cond_dnfs, default=str),
52 | timeout
53 | ]
54 | )
55 |
56 |
57 | @contextmanager
58 | def getting(key, cond_dnfs, prefix, lock=False):
59 | if not lock:
60 | yield _read(key, cond_dnfs, prefix)
61 | else:
62 | locked = False
63 | try:
64 | data = _get_or_lock(key, cond_dnfs, prefix)
65 | locked = data is None
66 | yield data
67 | finally:
68 | if locked:
69 | _release_lock(key)
70 |
71 |
72 | @handle_connection_failure
73 | def _read(key, cond_dnfs, prefix):
74 | if not settings.CACHEOPS_INSIDEOUT:
75 | return redis_client.get(key)
76 |
77 | conj_keys = dnfs_to_conj_keys(prefix, cond_dnfs)
78 | coded, *stamps = redis_client.mget(key, *conj_keys)
79 | if coded is None or coded == b'LOCK':
80 | return coded
81 |
82 | if None in stamps:
83 | redis_client.unlink(key)
84 | return None
85 |
86 | stamp_checksum, data = coded.split(b':', 1)
87 | if stamp_checksum.decode() != join_stamps(stamps):
88 | redis_client.unlink(key)
89 | return None
90 |
91 | return data
92 |
93 |
94 | @handle_connection_failure
95 | def _get_or_lock(key, cond_dnfs, prefix):
96 | _lock = redis_client.register_script("""
97 | local locked = redis.call('set', KEYS[1], 'LOCK', 'nx', 'ex', ARGV[1])
98 | if locked then
99 | redis.call('del', KEYS[2])
100 | end
101 | return locked
102 | """)
103 | signal_key = key + ':signal'
104 |
105 | while True:
106 | data = _read(key, cond_dnfs, prefix)
107 | if data is None:
108 | if _lock(keys=[key, signal_key], args=[LOCK_TIMEOUT]):
109 | return None
110 | elif data != b'LOCK':
111 | return data
112 |
113 | # No data and not locked, wait
114 | redis_client.brpoplpush(signal_key, signal_key, timeout=LOCK_TIMEOUT)
115 |
116 |
117 | @handle_connection_failure
118 | def _release_lock(key):
119 | _unlock = redis_client.register_script("""
120 | if redis.call('get', KEYS[1]) == 'LOCK' then
121 | redis.call('del', KEYS[1])
122 | end
123 | redis.call('lpush', KEYS[2], 1)
124 | redis.call('expire', KEYS[2], 1)
125 | """)
126 | signal_key = key + ':signal'
127 | _unlock(keys=[key, signal_key])
128 |
129 |
130 | # Key manipulation helpers
131 |
132 | def join_stamps(stamps):
133 | return hashlib.sha1(b' '.join(stamps)).hexdigest()
134 |
135 |
136 | def dnfs_to_conj_keys(prefix, cond_dnfs):
137 | def _conj_cache_key(table, conj):
138 | conj_str = '&'.join(f'{field}={val}' for field, val in sorted(conj.items()))
139 | return f'{prefix}conj:{table}:{conj_str}'
140 |
141 | return [_conj_cache_key(table, conj) for table, disj in cond_dnfs.items()
142 | for conj in disj]
143 |
144 | def dnfs_to_schemes(cond_dnfs):
145 | return {table: list({",".join(sorted(conj)) for conj in disj})
146 | for table, disj in cond_dnfs.items() if disj}
147 |
--------------------------------------------------------------------------------
/cacheops/invalidation.py:
--------------------------------------------------------------------------------
1 | import json
2 | import threading
3 | from funcy import memoize, post_processing, ContextDecorator, decorator, walk_values
4 | from django.db import DEFAULT_DB_ALIAS
5 | from django.db.models.expressions import F, Expression
6 |
7 | from .conf import settings
8 | from .sharding import get_prefix
9 | from .redis import redis_client, handle_connection_failure, load_script
10 | from .signals import cache_invalidated
11 | from .transaction import queue_when_in_transaction
12 |
13 |
14 | __all__ = ('invalidate_obj', 'invalidate_model', 'invalidate_all', 'no_invalidation')
15 |
16 |
17 | @decorator
18 | def skip_on_no_invalidation(call):
19 | if not settings.CACHEOPS_ENABLED or no_invalidation.active:
20 | return
21 | return call()
22 |
23 |
24 | @skip_on_no_invalidation
25 | @queue_when_in_transaction
26 | @handle_connection_failure
27 | def invalidate_dict(model, obj_dict, using=DEFAULT_DB_ALIAS):
28 | if no_invalidation.active or not settings.CACHEOPS_ENABLED:
29 | return
30 |
31 | model = model._meta.concrete_model
32 | prefix = get_prefix(_cond_dnfs=[(model._meta.db_table, list(obj_dict.items()))], dbs=[using])
33 |
34 | if settings.CACHEOPS_INSIDEOUT:
35 | script = 'invalidate_insideout'
36 | serialized_dict = json.dumps(walk_values(str, obj_dict))
37 | else:
38 | script = 'invalidate'
39 | serialized_dict = json.dumps(obj_dict, default=str)
40 | load_script(script)(keys=[prefix], args=[model._meta.db_table, serialized_dict])
41 | cache_invalidated.send(sender=model, obj_dict=obj_dict)
42 |
43 |
44 | @skip_on_no_invalidation
45 | def invalidate_obj(obj, using=DEFAULT_DB_ALIAS):
46 | """
47 | Invalidates caches that can possibly be influenced by object
48 | """
49 | model = obj.__class__._meta.concrete_model
50 | invalidate_dict(model, get_obj_dict(model, obj), using=using)
51 |
52 |
53 | @skip_on_no_invalidation
54 | @queue_when_in_transaction
55 | @handle_connection_failure
56 | def invalidate_model(model, using=DEFAULT_DB_ALIAS):
57 | """
58 | Invalidates all caches for given model.
59 | NOTE: This is a heavy artillery which uses redis KEYS request,
60 | which could be relatively slow on large datasets.
61 | """
62 | model = model._meta.concrete_model
63 | # NOTE: if we use sharding dependent on DNF then this will fail,
64 | # which is ok, since it's hard/impossible to predict all the shards
65 | prefix = get_prefix(tables=[model._meta.db_table], dbs=[using])
66 | conjs_keys = redis_client.keys('%sconj:%s:*' % (prefix, model._meta.db_table))
67 | if conjs_keys:
68 | if settings.CACHEOPS_INSIDEOUT:
69 | redis_client.unlink(*conjs_keys)
70 | else:
71 | cache_keys = redis_client.sunion(conjs_keys)
72 | keys = list(cache_keys) + conjs_keys
73 | redis_client.unlink(*keys)
74 | cache_invalidated.send(sender=model, obj_dict=None)
75 |
76 |
77 | @skip_on_no_invalidation
78 | @handle_connection_failure
79 | def invalidate_all():
80 | redis_client.flushdb()
81 | cache_invalidated.send(sender=None, obj_dict=None)
82 |
83 |
84 | class InvalidationState(threading.local):
85 | def __init__(self):
86 | self.depth = 0
87 |
88 | class _no_invalidation(ContextDecorator):
89 | state = InvalidationState()
90 |
91 | def __enter__(self):
92 | self.state.depth += 1
93 |
94 | def __exit__(self, type, value, traceback):
95 | self.state.depth -= 1
96 |
97 | @property
98 | def active(self):
99 | return self.state.depth
100 |
101 | no_invalidation = _no_invalidation()
102 |
103 |
104 | ### ORM instance serialization
105 |
106 | @memoize
107 | def serializable_fields(model):
108 | return {f for f in model._meta.fields
109 | if f.get_internal_type() not in settings.CACHEOPS_SKIP_FIELDS}
110 |
111 | @post_processing(dict)
112 | def get_obj_dict(model, obj):
113 | for field in serializable_fields(model):
114 | # Skip deferred fields, in post_delete trying to fetch them results in error anyway.
115 | # In post_save we rely on deferred values be the same as in pre_save.
116 | if field.attname not in obj.__dict__:
117 | continue
118 |
119 | value = getattr(obj, field.attname)
120 | if value is None:
121 | yield field.attname, None
122 | elif isinstance(value, (F, Expression)):
123 | continue
124 | else:
125 | yield field.attname, field.get_prep_value(value)
126 |
--------------------------------------------------------------------------------
/cacheops/jinja2.py:
--------------------------------------------------------------------------------
1 | from jinja2 import nodes
2 | from jinja2.ext import Extension
3 |
4 | import cacheops
5 | from cacheops.utils import carefully_strip_whitespace
6 |
7 |
8 | __all__ = ['cache']
9 |
10 |
11 | class CacheopsExtension(Extension):
12 | tags = ['cached_as', 'cached']
13 |
14 | def parse(self, parser):
15 | lineno = parser.stream.current.lineno
16 | tag_name = parser.stream.current.value
17 | tag_location = '%s:%s' % (parser.name, lineno)
18 |
19 | next(parser.stream)
20 | args, kwargs = self.parse_args(parser)
21 | args = [nodes.Const(tag_name), nodes.Const(tag_location)] + args
22 |
23 | block_call = self.call_method('handle_tag', args, kwargs)
24 | body = parser.parse_statements(['name:end%s' % tag_name], drop_needle=True)
25 |
26 | return nodes.CallBlock(block_call, [], [], body).set_lineno(lineno)
27 |
28 | def handle_tag(self, tag_name, tag_location, *args, **kwargs):
29 | caller = kwargs.pop('caller')
30 |
31 | cacheops_decorator = getattr(cacheops, tag_name)
32 | kwargs.setdefault('extra', '')
33 | if isinstance(kwargs['extra'], tuple):
34 | kwargs['extra'] += (tag_location,)
35 | else:
36 | kwargs['extra'] = str(kwargs['extra']) + tag_location
37 |
38 | @cacheops_decorator(*args, **kwargs)
39 | def _handle_tag():
40 | content = caller()
41 | # TODO: make this cache preparation configurable
42 | return carefully_strip_whitespace(content)
43 |
44 | return _handle_tag()
45 |
46 | def parse_args(self, parser):
47 | args = []
48 | kwargs = []
49 | require_comma = False
50 |
51 | while parser.stream.current.type != 'block_end':
52 | if require_comma:
53 | parser.stream.expect('comma')
54 |
55 | if parser.stream.current.type == 'name' and parser.stream.look().type == 'assign':
56 | key = parser.stream.current.value
57 | parser.stream.skip(2)
58 | value = parser.parse_expression()
59 | kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
60 | else:
61 | if kwargs:
62 | parser.fail('Invalid argument syntax for CacheopsExtension tag',
63 | parser.stream.current.lineno)
64 | args.append(parser.parse_expression())
65 |
66 | require_comma = True
67 |
68 | return args, kwargs
69 |
70 | cache = CacheopsExtension
71 |
--------------------------------------------------------------------------------
/cacheops/lua/cache_thing.lua:
--------------------------------------------------------------------------------
1 | local prefix = KEYS[1]
2 | local key = KEYS[2]
3 | local precall_key = KEYS[3]
4 | local data = ARGV[1]
5 | local dnfs = cjson.decode(ARGV[2])
6 | local timeout = tonumber(ARGV[3])
7 |
8 | if precall_key ~= prefix and redis.call('exists', precall_key) == 0 then
9 | -- Cached data was invalidated during the function call. The data is
10 | -- stale and should not be cached.
11 | return
12 | end
13 |
14 | -- Write data to cache
15 | redis.call('setex', key, timeout, data)
16 |
17 |
18 | -- A pair of funcs
19 | -- NOTE: we depend here on keys order being stable
20 | local conj_schema = function (conj)
21 | local parts = {}
22 | for field, _ in pairs(conj) do
23 | table.insert(parts, field)
24 | end
25 |
26 | return table.concat(parts, ',')
27 | end
28 |
29 | local conj_cache_key = function (db_table, conj)
30 | local parts = {}
31 | for field, val in pairs(conj) do
32 | table.insert(parts, field .. '=' .. tostring(val))
33 | end
34 |
35 | return prefix .. 'conj:' .. db_table .. ':' .. table.concat(parts, '&')
36 | end
37 |
38 |
39 | -- Update schemes and invalidators
40 | for db_table, disj in pairs(dnfs) do
41 | for _, conj in ipairs(disj) do
42 | -- Ensure scheme is known
43 | redis.call('sadd', prefix .. 'schemes:' .. db_table, conj_schema(conj))
44 |
45 | -- Add new cache_key to list of dependencies
46 | local conj_key = conj_cache_key(db_table, conj)
47 | redis.call('sadd', conj_key, key)
48 | -- NOTE: an invalidator should live longer than any key it references.
49 | -- So we update its ttl on every key if needed.
50 | -- NOTE: we also can't use "EXPIRE conj_key timeout GT" because it will have no effect on
51 | -- newly created and thus involatile conj keys.
52 | local conj_ttl = redis.call('ttl', conj_key)
53 | if conj_ttl < timeout then
54 | -- We set conj_key life with a margin over key life to call expire rarer
55 | -- And add few extra seconds to be extra safe
56 | redis.call('expire', conj_key, timeout * 2 + 10)
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/cacheops/lua/cache_thing_insideout.lua:
--------------------------------------------------------------------------------
1 | local prefix = KEYS[1]
2 | local key = KEYS[2]
3 | local data = ARGV[1]
4 | local schemes = cjson.decode(ARGV[2])
5 | local conj_keys = cjson.decode(ARGV[3])
6 | local timeout = tonumber(ARGV[4])
7 | local rnd = ARGV[5] -- A new value for empty stamps
8 | local expected_checksum = ARGV[6]
9 |
10 | -- Ensure schemes are known
11 | for db_table, _schemes in pairs(schemes) do
12 | redis.call('sadd', prefix .. 'schemes:' .. db_table, unpack(_schemes))
13 | end
14 |
15 | -- Fill in invalidators and collect stamps
16 | local stamps = {}
17 | for _, conj_key in ipairs(conj_keys) do
18 | -- REDIS_7
19 | local stamp = redis.call('set', conj_key, rnd, 'nx', 'get') or rnd
20 | -- /REDIS_7
21 | -- REDIS_4
22 | local stamp = redis.call('get', conj_key)
23 | if not stamp then
24 | stamp = rnd
25 | redis.call('set', conj_key, rnd)
26 | end
27 | -- /REDIS_4
28 | table.insert(stamps, stamp)
29 | -- NOTE: an invalidator should live longer than any key it references.
30 | -- So we update its ttl on every key if needed.
31 | -- NOTE: we also can't use "EXPIRE conj_key timeout GT" because it will have no effect on
32 | -- newly created and thus involatile conj keys.
33 | local conj_ttl = redis.call('ttl', conj_key)
34 | if conj_ttl < timeout then
35 | -- We set conj_key life with a margin over key life to call expire rarer
36 | -- And add few extra seconds to be extra safe
37 | redis.call('expire', conj_key, timeout * 2 + 10)
38 | end
39 | end
40 |
41 | -- Write data to cache along with a checksum of the stamps to see if any of them changed
42 | local all_stamps = table.concat(stamps, ' ')
43 | local stamp_checksum = redis.sha1hex(all_stamps)
44 |
45 | if expected_checksum ~= '' and stamp_checksum ~= expected_checksum then
46 | -- Cached data was invalidated during the function call. The data is
47 | -- stale and should not be cached.
48 | return stamp_checksum -- This one is used for keep_fresh implementation
49 | end
50 |
51 | redis.call('set', key, stamp_checksum .. ':' .. data, 'ex', timeout)
52 |
--------------------------------------------------------------------------------
/cacheops/lua/invalidate.lua:
--------------------------------------------------------------------------------
1 | local prefix = KEYS[1]
2 | local db_table = ARGV[1]
3 | local obj = cjson.decode(ARGV[2])
4 |
5 | -- Utility functions
6 | local conj_cache_key = function (db_table, scheme, obj)
7 | local parts = {}
8 | for field in string.gmatch(scheme, "[^,]+") do
9 | table.insert(parts, field .. '=' .. tostring(obj[field]))
10 | end
11 |
12 | return prefix .. 'conj:' .. db_table .. ':' .. table.concat(parts, '&')
13 | end
14 |
15 | local call_in_chunks = function (command, args)
16 | local step = 1000
17 | for i = 1, #args, step do
18 | redis.call(command, unpack(args, i, math.min(i + step - 1, #args)))
19 | end
20 | end
21 |
22 |
23 | -- Calculate conj keys
24 | local conj_keys = {}
25 | local schemes = redis.call('smembers', prefix .. 'schemes:' .. db_table)
26 | for _, scheme in ipairs(schemes) do
27 | table.insert(conj_keys, conj_cache_key(db_table, scheme, obj))
28 | end
29 |
30 |
31 | -- Delete cache keys and refering conj keys
32 | if next(conj_keys) ~= nil then
33 | local cache_keys = redis.call('sunion', unpack(conj_keys))
34 | -- we delete cache keys since they are invalid
35 | -- and conj keys as they will refer only deleted keys
36 | redis.call("unlink", unpack(conj_keys))
37 | if next(cache_keys) ~= nil then
38 | -- NOTE: can't just do redis.call('del', unpack(...)) cause there is limit on number
39 | -- of return values in lua.
40 | call_in_chunks('del', cache_keys)
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/cacheops/lua/invalidate_insideout.lua:
--------------------------------------------------------------------------------
1 | local prefix = KEYS[1]
2 | local db_table = ARGV[1]
3 | local obj = cjson.decode(ARGV[2])
4 |
5 | local conj_cache_key = function (db_table, scheme, obj)
6 | local parts = {}
7 | for field in string.gmatch(scheme, "[^,]+") do
8 | -- All obj values are strings, we still use tostring() in case obj does not contain field
9 | table.insert(parts, field .. '=' .. tostring(obj[field]))
10 | end
11 |
12 | return prefix .. 'conj:' .. db_table .. ':' .. table.concat(parts, '&')
13 | end
14 |
15 | -- Drop conj keys
16 | local conj_keys = {}
17 | local schemes = redis.call('smembers', prefix .. 'schemes:' .. db_table)
18 | for _, scheme in ipairs(schemes) do
19 | redis.call('unlink', conj_cache_key(db_table, scheme, obj))
20 | end
21 |
--------------------------------------------------------------------------------
/cacheops/management/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suor/django-cacheops/d4b390372d839ff1389662cb311a916eb2c992ed/cacheops/management/__init__.py
--------------------------------------------------------------------------------
/cacheops/management/commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suor/django-cacheops/d4b390372d839ff1389662cb311a916eb2c992ed/cacheops/management/commands/__init__.py
--------------------------------------------------------------------------------
/cacheops/management/commands/cleanfilecache.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from django.core.management.base import BaseCommand
4 |
5 | from cacheops.conf import settings
6 |
7 |
8 | class Command(BaseCommand):
9 | help = 'Clean filebased cache'
10 |
11 | def add_arguments(self, parser):
12 | parser.add_argument('path', nargs='*', default=['default'])
13 |
14 | def handle(self, **options):
15 | for path in options['path']:
16 | if path == 'default':
17 | path = settings.FILE_CACHE_DIR
18 | os.system(r'find %s -type f \! -iname "\." -mmin +0 -delete' % path)
19 |
--------------------------------------------------------------------------------
/cacheops/management/commands/invalidate.py:
--------------------------------------------------------------------------------
1 | from django.core.management.base import LabelCommand, CommandError
2 | from django.apps import apps
3 |
4 | from cacheops.invalidation import *
5 |
6 |
7 | class Command(LabelCommand):
8 | help = 'Invalidates cache for entire app, model or particular instance'
9 | args = '(all | | . | ..) +'
10 | label = 'app or model or object'
11 |
12 | def handle_label(self, label, **options):
13 | if label == 'all':
14 | self.handle_all()
15 | else:
16 | app_n_model = label.split('.')
17 | if len(app_n_model) == 1:
18 | self.handle_app(app_n_model[0])
19 | elif len(app_n_model) == 2:
20 | self.handle_model(*app_n_model)
21 | elif len(app_n_model) == 3:
22 | self.handle_obj(*app_n_model)
23 | else:
24 | raise CommandError('Wrong model/app name syntax: %s\n'
25 | 'Type or .' % label)
26 |
27 | def handle_all(self):
28 | invalidate_all()
29 |
30 | def handle_app(self, app_name):
31 | for model in self.get_app(app_name).get_models(include_auto_created=True):
32 | invalidate_model(model)
33 |
34 | def handle_model(self, app_name, model_name):
35 | invalidate_model(self.get_model(app_name, model_name))
36 |
37 | def handle_obj(self, app_name, model_name, obj_pk):
38 | model = self.get_model(app_name, model_name)
39 | try:
40 | obj = model.objects.get(pk=obj_pk)
41 | except model.DoesNotExist:
42 | raise CommandError('No %s.%s with pk = %s' % (app_name, model_name, obj_pk))
43 | invalidate_obj(obj)
44 |
45 | def get_app(self, app_name):
46 | try:
47 | return apps.get_app_config(app_name)
48 | except LookupError as e:
49 | raise CommandError(e)
50 |
51 | def get_model(self, app_name, model_name):
52 | try:
53 | return apps.get_app_config(app_name).get_model(model_name)
54 | except LookupError as e:
55 | raise CommandError(e)
56 |
--------------------------------------------------------------------------------
/cacheops/management/commands/reapconjs.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 |
3 | from django.core.management.base import BaseCommand
4 |
5 | from cacheops.reaper import reap_conjs
6 |
7 |
8 | class Command(BaseCommand):
9 | help = 'Removes expired conjunction keys from cacheops.'
10 |
11 | def add_arguments(self, parser: ArgumentParser):
12 | parser.add_argument('--chunk-size', type=int, default=1000)
13 | parser.add_argument('--min-conj-set-size', type=int, default=1000)
14 | parser.add_argument('--dry-run', action='store_true')
15 |
16 | def handle(self, chunk_size: int, min_conj_set_size: int, dry_run: bool, **kwargs):
17 | reap_conjs(
18 | chunk_size=chunk_size,
19 | min_conj_set_size=min_conj_set_size,
20 | dry_run=dry_run,
21 | )
22 |
--------------------------------------------------------------------------------
/cacheops/query.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import threading
3 | from random import random
4 |
5 | from funcy import select_keys, cached_property, once, once_per, monkey, wraps, walk, chain
6 | from funcy import lmap, lcat, join_with
7 |
8 | from django.utils.encoding import force_str
9 | from django.core.exceptions import ImproperlyConfigured, EmptyResultSet
10 | from django.db import DEFAULT_DB_ALIAS, connections, models
11 | from django.db.models.manager import BaseManager
12 | from django.db.models.query import MAX_GET_RESULTS
13 | from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
14 | from django.db.transaction import atomic
15 |
16 | from .conf import model_profile, settings, ALL_OPS
17 | from .utils import monkey_mix, stamp_fields, get_cache_key, cached_view_fab, family_has_profile
18 | from .utils import md5
19 | from .getset import cache_thing, getting
20 | from .sharding import get_prefix
21 | from .tree import dnfs
22 | from .invalidation import invalidate_obj, invalidate_dict, skip_on_no_invalidation
23 | from .transaction import transaction_states
24 | from .signals import cache_read
25 |
26 |
27 | __all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
28 |
29 | _local_get_cache = {}
30 |
31 |
32 | def cached_as(*samples, timeout=None, extra=None, lock=None, keep_fresh=False):
33 | """
34 | Caches results of a function and invalidates them same way as given queryset(s).
35 | NOTE: Ignores queryset cached ops settings, always caches.
36 |
37 | If keep_fresh is True, this will prevent caching if the given querysets are
38 | invalidated during the function call. This prevents prolonged caching of
39 | stale data.
40 | """
41 | if not samples:
42 | raise TypeError('Pass a queryset, a model or an object to cache like')
43 |
44 | # If we unexpectedly get list instead of queryset return identity decorator.
45 | # Paginator could do this when page.object_list is empty.
46 | if len(samples) == 1 and isinstance(samples[0], list):
47 | return lambda func: func
48 |
49 | def _get_queryset(sample):
50 | if isinstance(sample, models.Model):
51 | queryset = sample.__class__.objects.filter(pk=sample.pk)
52 | elif isinstance(sample, type) and issubclass(sample, models.Model):
53 | queryset = sample.objects.all()
54 | else:
55 | queryset = sample
56 |
57 | queryset._require_cacheprofile()
58 |
59 | return queryset
60 |
61 | querysets = lmap(_get_queryset, samples)
62 | dbs = list({qs.db for qs in querysets})
63 | cond_dnfs = join_with(lcat, map(dnfs, querysets)) # TODO: use cached version?
64 | qs_keys = [qs._cache_key(prefix=False) for qs in querysets]
65 | if timeout is None:
66 | timeout = min(qs._cacheprofile['timeout'] for qs in querysets)
67 | if lock is None:
68 | lock = any(qs._cacheprofile['lock'] for qs in querysets)
69 |
70 | def decorator(func):
71 | @wraps(func)
72 | def wrapper(*args, **kwargs):
73 | if not settings.CACHEOPS_ENABLED or transaction_states.is_dirty(dbs):
74 | return func(*args, **kwargs)
75 |
76 | prefix = get_prefix(func=func, _cond_dnfs=cond_dnfs, dbs=dbs)
77 | extra_val = extra(*args, **kwargs) if callable(extra) else extra
78 | cache_key = prefix + 'as:' + get_cache_key(func, args, kwargs, qs_keys, extra_val)
79 |
80 | with getting(cache_key, cond_dnfs, prefix, lock=lock) as cache_data:
81 | cache_read.send(sender=None, func=func, hit=cache_data is not None)
82 | if cache_data is not None:
83 | return settings.CACHEOPS_SERIALIZER.loads(cache_data)
84 | else:
85 | precall_key = ''
86 | expected_checksum = ''
87 | if keep_fresh and settings.CACHEOPS_INSIDEOUT:
88 | # The conj stamps should not be dropped while we calculate the function.
89 | # But being filled in concurrently is a normal concurrent cache write.
90 | # However, if they are filled in and then dropped, we cannot detect that.
91 | # Unless we fill them ourselves and get expected checksum now. We also need
92 | # to fill in schemes, so we just reuse the cache_thing().
93 | expected_checksum = cache_thing(prefix, cache_key, '', cond_dnfs, timeout,
94 | dbs=dbs, expected_checksum='never match')
95 | elif keep_fresh:
96 | # We call this "asp" for "as precall" because this key is
97 | # cached before the actual function is called. We randomize
98 | # the key to prevent falsely thinking the key was not
99 | # invalidated when in fact it was invalidated and the
100 | # function was called again in another process.
101 | suffix = get_cache_key(func, args, kwargs, qs_keys, extra_val, random())
102 | precall_key = prefix + 'asp:' + suffix
103 | # Cache a precall_key to watch for invalidation during
104 | # the function call. Its value does not matter. If and
105 | # only if it remains valid before, during, and after the
106 | # call, the result can be cached and returned.
107 | cache_thing(prefix, precall_key, 'PRECALL', cond_dnfs, timeout, dbs=dbs)
108 |
109 | result = func(*args, **kwargs)
110 | cache_thing(prefix, cache_key, result, cond_dnfs, timeout, dbs=dbs,
111 | precall_key=precall_key, expected_checksum=expected_checksum)
112 | return result
113 |
114 | return wrapper
115 | return decorator
116 |
117 |
118 | def cached_view_as(*samples, **kwargs):
119 | return cached_view_fab(cached_as)(*samples, **kwargs)
120 |
121 |
122 | class QuerySetMixin(object):
123 | @cached_property
124 | def _cacheprofile(self):
125 | profile = model_profile(self.model)
126 | return profile.copy() if profile else None
127 |
128 | @cached_property
129 | def _cloning(self):
130 | return 1000
131 |
132 | def _require_cacheprofile(self):
133 | if self._cacheprofile is None:
134 | raise ImproperlyConfigured(
135 | 'Cacheops is not enabled for %s.%s model.\n'
136 | 'If you don\'t want to cache anything by default '
137 | 'you can configure it with empty ops.'
138 | % (self.model._meta.app_label, self.model._meta.model_name))
139 |
140 | def _cache_key(self, prefix=True):
141 | """
142 | Compute a cache key for this queryset
143 | """
144 | md = md5()
145 | md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
146 | # Vary cache key for proxy models
147 | md.update('%s.%s' % (self.model.__module__, self.model.__name__))
148 | # Protect from field list changes in model
149 | md.update(stamp_fields(self.model))
150 | # Use query SQL as part of a key
151 | try:
152 | sql, params = self.query.get_compiler(self.db).as_sql()
153 | try:
154 | sql_str = sql % params
155 | except UnicodeDecodeError:
156 | sql_str = sql % walk(force_str, params)
157 | md.update(force_str(sql_str))
158 | except EmptyResultSet:
159 | pass
160 | # If query results differ depending on database
161 | if self._cacheprofile and not self._cacheprofile['db_agnostic']:
162 | md.update(self.db)
163 | # Iterable class pack results differently
164 | it_class = self._iterable_class
165 | md.update('%s.%s' % (it_class.__module__, it_class.__name__))
166 |
167 | cache_key = 'q:%s' % md.hexdigest()
168 | return self._prefix + cache_key if prefix else cache_key
169 |
170 | @cached_property
171 | def _prefix(self):
172 | return get_prefix(_queryset=self)
173 |
174 | @cached_property
175 | def _cond_dnfs(self):
176 | return dnfs(self)
177 |
178 | def _cache_results(self, cache_key, results):
179 | cache_thing(self._prefix, cache_key, results,
180 | self._cond_dnfs, self._cacheprofile['timeout'], dbs=[self.db])
181 |
182 | def _should_cache(self, op):
183 | # If cache and op are enabled and not within write or dirty transaction
184 | return settings.CACHEOPS_ENABLED \
185 | and self._cacheprofile and op in self._cacheprofile['ops'] \
186 | and not self._for_write \
187 | and not transaction_states[self.db].is_dirty()
188 |
189 | def cache(self, ops=None, timeout=None, lock=None):
190 | """
191 | Enables caching for given ops
192 | ops - a subset of {'get', 'fetch', 'count', 'exists', 'aggregate'},
193 | ops caching to be turned on, all enabled by default
194 | timeout - override default cache timeout
195 | lock - use lock to prevent dog-pile effect
196 |
197 | NOTE: you actually can disable caching by omitting corresponding ops,
198 | .cache(ops=[]) disables caching for this queryset.
199 | """
200 | self._require_cacheprofile()
201 |
202 | if ops is None or ops == 'all':
203 | ops = ALL_OPS
204 | if isinstance(ops, str):
205 | ops = {ops}
206 | self._cacheprofile['ops'] = set(ops)
207 |
208 | if timeout is not None:
209 | self._cacheprofile['timeout'] = timeout
210 | if lock is not None:
211 | self._cacheprofile['lock'] = lock
212 |
213 | return self
214 |
215 | def nocache(self):
216 | """
217 | Convinience method, turns off caching for this queryset
218 | """
219 | # cache profile not present means caching is not enabled for this model
220 | if self._cacheprofile is None:
221 | return self
222 | else:
223 | return self.cache(ops=[])
224 |
225 | def cloning(self, cloning=1000):
226 | self._cloning = cloning
227 | return self
228 |
229 | def inplace(self):
230 | return self.cloning(0)
231 |
232 | def _clone(self, **kwargs):
233 | if self._cloning:
234 | return self.clone(**kwargs)
235 | else:
236 | self.__dict__.update(kwargs)
237 | return self
238 |
239 | def clone(self, **kwargs):
240 | clone = self._no_monkey._clone(self, **kwargs)
241 | clone._cloning = self._cloning - 1 if self._cloning else 0
242 | # NOTE: need to copy profile so that clone changes won't affect this queryset
243 | if self.__dict__.get('_cacheprofile'):
244 | clone._cacheprofile = self._cacheprofile.copy()
245 | return clone
246 |
247 | def _fetch_all(self):
248 | # If already fetched or should pass by then fall back
249 | if self._result_cache is not None or not self._should_cache('fetch'):
250 | return self._no_monkey._fetch_all(self)
251 |
252 | cache_key = self._cache_key()
253 | lock = self._cacheprofile['lock']
254 |
255 | with getting(cache_key, self._cond_dnfs, self._prefix, lock=lock) as cache_data:
256 | cache_read.send(sender=self.model, func=None, hit=cache_data is not None)
257 | if cache_data is not None:
258 | self._result_cache = settings.CACHEOPS_SERIALIZER.loads(cache_data)
259 | else:
260 | self._result_cache = list(self._iterable_class(self))
261 | self._cache_results(cache_key, self._result_cache)
262 |
263 | return self._no_monkey._fetch_all(self)
264 |
265 | def count(self):
266 | if self._should_cache('count'):
267 | # Optmization borrowed from overridden method:
268 | # if queryset cache is already filled just return its len
269 | if self._result_cache is not None:
270 | return len(self._result_cache)
271 | return cached_as(self)(lambda: self._no_monkey.count(self))()
272 | else:
273 | return self._no_monkey.count(self)
274 |
275 | def aggregate(self, *args, **kwargs):
276 | if self._should_cache('aggregate'):
277 | # Apply all aggregates the same way original .aggregate() does, but do not perform sql.
278 | # This code is mostly taken from QuerySet.aggregate().
279 | normalized_kwargs = kwargs.copy()
280 | for arg in args:
281 | try:
282 | normalized_kwargs[arg.default_alias] = arg
283 | except (AttributeError, TypeError):
284 | # Let Django raise a proper error
285 | return self._no_monkey.aggregate(*args, **kwargs)
286 |
287 | # Simulate Query.get_aggregation() preparations, this adds proper joins to qs.query
288 | if not normalized_kwargs:
289 | return {}
290 |
291 | qs = self._clone()
292 | aggregates = {}
293 | for alias, aggregate_expr in normalized_kwargs.items():
294 | aggregate = aggregate_expr.resolve_expression(
295 | qs.query, allow_joins=True, reuse=None, summarize=True
296 | )
297 | if not aggregate.contains_aggregate:
298 | raise TypeError("%s is not an aggregate expression" % alias)
299 | aggregates[alias] = aggregate
300 |
301 | # Use resulting qs as a ref, aggregates still contain names, etc
302 | func = lambda: self._no_monkey.aggregate(self, *args, **kwargs)
303 | return cached_as(qs, extra=aggregates)(func)()
304 | else:
305 | return self._no_monkey.aggregate(self, *args, **kwargs)
306 |
307 | def get(self, *args, **kwargs):
308 | # .get() uses the same ._fetch_all() method to fetch data,
309 | # so here we add 'fetch' to ops
310 | if self._should_cache('get'):
311 | # NOTE: local_get=True enables caching of simple gets in local memory,
312 | # which is very fast, but not invalidated.
313 | # Don't bother with Q-objects, select_related and previous filters,
314 | # simple gets - thats what we are really up to here.
315 | #
316 | # TODO: this checks are far from adequate, at least these are missed:
317 | # - self._fields (values, values_list)
318 | # - annotations
319 | # - ...
320 | # TODO: don't distinguish between pk, pk__exaxt, id, id__exact
321 | # TOOD: work with .filter(**kwargs).get() ?
322 | if self._cacheprofile['local_get'] \
323 | and not args \
324 | and not self.query.select_related \
325 | and not self.query.where.children:
326 | # NOTE: We use simpler way to generate a cache key to cut costs.
327 | # Some day it could produce same key for different requests.
328 | key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
329 | try:
330 | return _local_get_cache[key]
331 | except KeyError:
332 | _local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
333 | return _local_get_cache[key]
334 | except TypeError:
335 | # If some arg is unhashable we can't save it to dict key,
336 | # we just skip local cache in that case
337 | pass
338 |
339 | if 'fetch' in self._cacheprofile['ops']:
340 | qs = self
341 | else:
342 | qs = self._clone().cache()
343 | else:
344 | qs = self
345 |
346 | return qs._no_monkey.get(qs, *args, **kwargs)
347 |
348 | def first(self):
349 | if self._should_cache('get'):
350 | return self._no_monkey.first(self._clone().cache())
351 | return self._no_monkey.first(self)
352 |
353 | def last(self):
354 | if self._should_cache('get'):
355 | return self._no_monkey.last(self._clone().cache())
356 | return self._no_monkey.last(self)
357 |
358 | def exists(self):
359 | if self._should_cache('exists'):
360 | if self._result_cache is not None:
361 | return bool(self._result_cache)
362 | return cached_as(self)(lambda: self._no_monkey.exists(self))()
363 | else:
364 | return self._no_monkey.exists(self)
365 |
366 | def bulk_create(self, objs, *args, **kwargs):
367 | objs = self._no_monkey.bulk_create(self, objs, *args, **kwargs)
368 | if family_has_profile(self.model):
369 | for obj in objs:
370 | invalidate_obj(obj, using=self.db)
371 | return objs
372 |
373 | def invalidated_update(self, **kwargs):
374 | clone = self._clone().nocache().select_related(None)
375 | clone._for_write = True # affects routing
376 |
377 | with atomic(using=clone.db):
378 | objects = list(clone.select_for_update())
379 | rows = clone.update(**kwargs)
380 |
381 | # TODO: do not refetch objects but update with kwargs in simple cases?
382 | # We use clone database to fetch new states, as this is the db they were written to.
383 | # Using router with new_objects may fail, using self may return slave during lag.
384 | pks = {obj.pk for obj in objects}
385 | new_objects = self.model.objects.filter(pk__in=pks).using(clone.db)
386 |
387 | for obj in chain(objects, new_objects):
388 | invalidate_obj(obj, using=clone.db)
389 |
390 | return rows
391 |
392 |
393 | def connect_first(signal, receiver, sender):
394 | old_receivers = signal.receivers
395 | signal.receivers = []
396 | signal.connect(receiver, sender=sender, weak=False)
397 | signal.receivers += old_receivers
398 |
399 | # We need to stash old object before Model.save() to invalidate on its properties
400 | _old_objs = threading.local()
401 |
402 | class ManagerMixin(object):
403 | @once_per('cls')
404 | def _install_cacheops(self, cls):
405 | # Set up signals
406 | connect_first(pre_save, self._pre_save, sender=cls)
407 | connect_first(post_save, self._post_save, sender=cls)
408 | connect_first(post_delete, self._post_delete, sender=cls)
409 |
410 | # Install auto-created models as their module attributes to make them picklable
411 | module = sys.modules[cls.__module__]
412 | if not hasattr(module, cls.__name__):
413 | setattr(module, cls.__name__, cls)
414 |
415 | # This is probably still needed if models are created dynamically or imported late
416 | def contribute_to_class(self, cls, name):
417 | self._no_monkey.contribute_to_class(self, cls, name)
418 | # NOTE: we check it here rather then inside _install_cacheops()
419 | # because we don't want @once_per() and family_has_profile() memory to hold refs.
420 | # Otherwise, temporary classes made for migrations might hoard lots of memory.
421 | if cls.__module__ != '__fake__' and family_has_profile(cls):
422 | self._install_cacheops(cls)
423 |
424 | @skip_on_no_invalidation
425 | def _pre_save(self, sender, instance, using, **kwargs):
426 | if instance.pk is not None and not instance._state.adding:
427 | try:
428 | # TODO: do not fetch non-serializable fields
429 | _old_objs.__dict__[sender, instance.pk] \
430 | = sender.objects.using(using).get(pk=instance.pk)
431 | except sender.DoesNotExist:
432 | pass
433 |
434 | @skip_on_no_invalidation
435 | def _post_save(self, sender, instance, using, **kwargs):
436 | # Invoke invalidations for both old and new versions of saved object
437 | old = _old_objs.__dict__.pop((sender, instance.pk), None)
438 | if old:
439 | invalidate_obj(old, using=using)
440 | invalidate_obj(instance, using=using)
441 |
442 | invalidate_o2o(sender, old, instance, using=using)
443 |
444 | # We run invalidations but skip caching if we are dirty
445 | if transaction_states[using].is_dirty():
446 | return
447 |
448 | # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
449 | # but its base having one. Or vice versa.
450 | # We still need to invalidate in this case, but cache on save better be skipped.
451 | cacheprofile = model_profile(instance.__class__)
452 | if not cacheprofile:
453 | return
454 |
455 | # Enabled cache_on_save makes us write saved object to cache.
456 | # Later it can be retrieved with .get(=)
457 | # is pk unless specified.
458 | # This sweet trick saves a db request and helps with slave lag.
459 | cache_on_save = cacheprofile.get('cache_on_save')
460 | if cache_on_save:
461 | # HACK: We get this object "from field" so it can contain
462 | # some undesirable attributes or other objects attached.
463 | # RelatedField accessors do that, for example.
464 | #
465 | # So we strip down any _*_cache attrs before saving
466 | # and later reassign them
467 | unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
468 | for k in unwanted_dict:
469 | del instance.__dict__[k]
470 |
471 | key = 'pk' if cache_on_save is True else cache_on_save
472 | cond = {key: getattr(instance, key)}
473 | qs = sender.objects.inplace().using(using).filter(**cond).order_by()
474 | # Mimic Django .get() logic
475 | if MAX_GET_RESULTS and (
476 | not qs.query.select_for_update
477 | or connections[qs.db].features.supports_select_for_update_with_limit):
478 | qs.query.set_limits(high=MAX_GET_RESULTS)
479 | qs._cache_results(qs._cache_key(), [instance])
480 |
481 | # Reverting stripped attributes
482 | instance.__dict__.update(unwanted_dict)
483 |
484 | def _post_delete(self, sender, instance, using, **kwargs):
485 | """
486 | Invalidation upon object deletion.
487 | """
488 | # NOTE: this will behave wrong if someone changed object fields
489 | # before deletion (why anyone will do that?)
490 | invalidate_obj(instance, using=using)
491 | # NOTE: this is needed because m2m_changed is not sent on such deletion:
492 | # https://code.djangoproject.com/ticket/17688
493 | invalidate_m2o(sender, instance, using)
494 |
495 | def inplace(self):
496 | return self.get_queryset().inplace()
497 |
498 | def cache(self, *args, **kwargs):
499 | return self.get_queryset().cache(*args, **kwargs)
500 |
501 | def nocache(self):
502 | return self.get_queryset().nocache()
503 |
504 | def invalidated_update(self, **kwargs):
505 | return self.get_queryset().inplace().invalidated_update(**kwargs)
506 |
507 |
508 | def invalidate_o2o(sender, old, instance, using=DEFAULT_DB_ALIAS):
509 | """Invoke invalidation for o2o reverse queries"""
510 | o2o_fields = [f for f in sender._meta.fields if isinstance(f, models.OneToOneField)]
511 | for f in o2o_fields:
512 | old_value = getattr(old, f.attname, None)
513 | value = getattr(instance, f.attname)
514 | if old_value != value:
515 | rmodel, rfield = f.related_model, f.remote_field.field_name
516 | if old:
517 | invalidate_dict(rmodel, {rfield: old_value}, using=using)
518 | invalidate_dict(rmodel, {rfield: value}, using=using)
519 |
520 |
521 | def invalidate_m2o(sender, instance, using=DEFAULT_DB_ALIAS):
522 | """Invoke invalidation for m2o and m2m queries to a deleted instance"""
523 | all_fields = sender._meta.get_fields(include_hidden=True, include_parents=True)
524 | m2o_fields = [f for f in all_fields if isinstance(f, models.ManyToOneRel)]
525 | fk_fields_names_map = {
526 | f.name: f.attname
527 | for f in all_fields if isinstance(f, models.ForeignKey)
528 | }
529 | for f in m2o_fields:
530 | attr = fk_fields_names_map.get(f.field_name, f.field_name)
531 | value = getattr(instance, attr)
532 | rmodel, rfield = f.related_model, f.remote_field.attname
533 | invalidate_dict(rmodel, {rfield: value}, using=using)
534 |
535 |
536 | def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
537 | using=DEFAULT_DB_ALIAS, **kwargs):
538 | """
539 | Invoke invalidation on m2m changes.
540 | """
541 | # Skip this machinery for explicit through tables,
542 | # since post_save and post_delete events are triggered for them
543 | if not sender._meta.auto_created:
544 | return
545 | if action not in ('pre_clear', 'post_add', 'pre_remove'):
546 | return
547 |
548 | m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
549 | if m2m.remote_field.through == sender)
550 | instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
551 | if reverse:
552 | instance_column, model_column = model_column, instance_column
553 |
554 | # TODO: optimize several invalidate_objs/dicts at once
555 | if action == 'pre_clear':
556 | objects = sender.objects.using(using).filter(**{instance_column: instance.pk})
557 | for obj in objects:
558 | invalidate_obj(obj, using=using)
559 | elif action in ('post_add', 'pre_remove'):
560 | # NOTE: we don't need to query through objects here,
561 | # cause we already know all their meaningful attributes.
562 | for pk in pk_set:
563 | invalidate_dict(sender, {
564 | instance_column: instance.pk,
565 | model_column: pk
566 | }, using=using)
567 |
568 |
569 | @once
570 | def install_cacheops():
571 | """
572 | Installs cacheops by numerous monkey patches
573 | """
574 | monkey_mix(BaseManager, ManagerMixin)
575 | monkey_mix(models.QuerySet, QuerySetMixin)
576 |
577 | # Use app registry to introspect used apps
578 | from django.apps import apps
579 |
580 | # Install profile and signal handlers for any earlier created models
581 | for model in apps.get_models(include_auto_created=True):
582 | if family_has_profile(model):
583 | if not isinstance(model._default_manager, BaseManager):
584 | raise ImproperlyConfigured("Can't install cacheops for %s.%s model:"
585 | " non-django model class or manager is used."
586 | % (model._meta.app_label, model._meta.model_name))
587 | model._default_manager._install_cacheops(model)
588 |
589 | # Bind m2m changed handlers
590 | m2ms = (f for f in model._meta.get_fields(include_hidden=True) if f.many_to_many)
591 | for m2m in m2ms:
592 | rel = m2m if hasattr(m2m, 'through') else m2m.remote_field
593 | opts = rel.through._meta
594 | m2m_changed.connect(invalidate_m2m, sender=rel.through,
595 | dispatch_uid=(opts.app_label, opts.model_name))
596 |
597 | # Turn off caching in admin
598 | if apps.is_installed('django.contrib.admin'):
599 | from django.contrib.admin.options import ModelAdmin
600 |
601 | @monkey(ModelAdmin)
602 | def get_queryset(self, request):
603 | return get_queryset.original(self, request).nocache()
604 |
605 | # Make buffers/memoryviews pickleable to serialize binary field data
606 | import copyreg
607 | copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
608 |
--------------------------------------------------------------------------------
/cacheops/reaper.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from django.db import DEFAULT_DB_ALIAS
4 |
5 | from .redis import redis_client
6 | from .sharding import get_prefix
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | def reap_conjs(
12 | chunk_size: int = 1000,
13 | min_conj_set_size: int = 1000,
14 | using=DEFAULT_DB_ALIAS,
15 | dry_run: bool = False,
16 | ):
17 | """
18 | Remove expired cache keys from invalidation sets.
19 |
20 | Cacheops saves each DB resultset cache key in a "conj set" so it can delete it later if it
21 | thinks it should be invalidated due to a saved record with matching values. But the resultset
22 | caches time out after 30 minutes, and their cache keys live in those conj sets forever!
23 |
24 | So conj sets for frequent queries on tables that aren't updated often end up containing
25 | millions of already-expired cache keys and maybe a few thousand actually useful ones,
26 | and block Redis for multiple - or many - seconds when cacheops finally decides
27 | to invalidate them.
28 |
29 | This function scans cacheops' conj keys for already-expired cache keys and removes them.
30 | """
31 | logger.info('Starting scan for large conj sets')
32 | prefix = get_prefix(dbs=[using])
33 | for conj_key in redis_client.scan_iter(prefix + 'conj:*', count=chunk_size):
34 | total = redis_client.scard(conj_key)
35 | if total < min_conj_set_size:
36 | continue
37 | logger.info('Found %s cache keys in %s, scanning for expired keys', total, conj_key)
38 | _clear_conj_key(conj_key, chunk_size, dry_run)
39 | logger.info('Done scan for large conj sets')
40 |
41 |
42 | def _clear_conj_key(conj_key: bytes, chunk_size: int, dry_run: bool):
43 | """Scan the cache keys in a conj set in batches and remove any that have expired."""
44 | count, removed = 0, 0
45 | for keys in _iter_keys_chunk(chunk_size, conj_key):
46 | count += len(keys)
47 | values = redis_client.mget(keys)
48 | expired = [k for k, v in zip(keys, values) if not v]
49 | if expired:
50 | if not dry_run:
51 | redis_client.srem(conj_key, *expired)
52 | removed += len(expired)
53 | logger.info('Removed %s/%s cache keys from %s', removed, count, conj_key)
54 | if removed and not dry_run:
55 | redis_client.execute_command('MEMORY PURGE')
56 |
57 |
58 | def _iter_keys_chunk(chunk_size, key):
59 | cursor = 0
60 | while True:
61 | cursor, items = redis_client.sscan(key, cursor, count=chunk_size)
62 | if items:
63 | yield items
64 | if cursor == 0:
65 | break
66 |
--------------------------------------------------------------------------------
/cacheops/redis.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | from django.core.exceptions import ImproperlyConfigured
4 | from django.utils.module_loading import import_string
5 |
6 | from funcy import decorator, identity, memoize, omit, LazyObject
7 | import redis
8 | from redis.sentinel import Sentinel
9 | from .conf import settings
10 |
11 |
12 | if settings.CACHEOPS_DEGRADE_ON_FAILURE:
13 | @decorator
14 | def handle_connection_failure(call):
15 | try:
16 | return call()
17 | except redis.ConnectionError as e:
18 | warnings.warn("The cacheops cache is unreachable! Error: %s" % e, RuntimeWarning)
19 | except redis.TimeoutError as e:
20 | warnings.warn("The cacheops cache timed out! Error: %s" % e, RuntimeWarning)
21 | else:
22 | handle_connection_failure = identity
23 |
24 |
25 | @LazyObject
26 | def redis_client():
27 | if settings.CACHEOPS_REDIS and settings.CACHEOPS_SENTINEL:
28 | raise ImproperlyConfigured("CACHEOPS_REDIS and CACHEOPS_SENTINEL are mutually exclusive")
29 |
30 | client_class = redis.Redis
31 | if settings.CACHEOPS_CLIENT_CLASS:
32 | client_class = import_string(settings.CACHEOPS_CLIENT_CLASS)
33 |
34 | if settings.CACHEOPS_SENTINEL:
35 | if not {'locations', 'service_name'} <= set(settings.CACHEOPS_SENTINEL):
36 | raise ImproperlyConfigured("Specify locations and service_name for CACHEOPS_SENTINEL")
37 |
38 | sentinel = Sentinel(
39 | settings.CACHEOPS_SENTINEL['locations'],
40 | **omit(settings.CACHEOPS_SENTINEL, ('locations', 'service_name', 'db')))
41 | return sentinel.master_for(
42 | settings.CACHEOPS_SENTINEL['service_name'],
43 | redis_class=client_class,
44 | db=settings.CACHEOPS_SENTINEL.get('db', 0)
45 | )
46 |
47 | # Allow client connection settings to be specified by a URL.
48 | if isinstance(settings.CACHEOPS_REDIS, str):
49 | return client_class.from_url(settings.CACHEOPS_REDIS)
50 | else:
51 | return client_class(**settings.CACHEOPS_REDIS)
52 |
53 |
54 | ### Lua script loader
55 |
56 | import os.path
57 | import re
58 |
59 |
60 | @memoize
61 | def load_script(name):
62 | filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
63 | with open(filename) as f:
64 | code = f.read()
65 | if is_redis_7():
66 | code = re.sub(r'REDIS_4.*?/REDIS_4', '', code, flags=re.S)
67 | else:
68 | code = re.sub(r'REDIS_7.*?/REDIS_7', '', code, flags=re.S)
69 | return redis_client.register_script(code)
70 |
71 |
72 | @memoize
73 | def is_redis_7():
74 | redis_version = redis_client.info('server')['redis_version']
75 | return int(redis_version.split('.')[0]) >= 7
76 |
--------------------------------------------------------------------------------
/cacheops/serializers.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 |
4 | class PickleSerializer:
5 | # properties
6 | PickleError = pickle.PickleError
7 | HIGHEST_PROTOCOL = pickle.HIGHEST_PROTOCOL
8 |
9 | # methods
10 | dumps = pickle.dumps
11 | loads = pickle.loads
12 |
--------------------------------------------------------------------------------
/cacheops/sharding.py:
--------------------------------------------------------------------------------
1 | from funcy import cached_property
2 | from django.core.exceptions import ImproperlyConfigured
3 |
4 | from .conf import settings
5 |
6 |
7 | def get_prefix(**kwargs):
8 | return settings.CACHEOPS_PREFIX(PrefixQuery(**kwargs))
9 |
10 |
11 | class PrefixQuery(object):
12 | def __init__(self, **kwargs):
13 | assert set(kwargs) <= {'func', '_queryset', '_cond_dnfs', 'dbs', 'tables'}
14 | kwargs.setdefault('func', None)
15 | self.__dict__.update(kwargs)
16 |
17 | @cached_property
18 | def dbs(self):
19 | return [self._queryset.db]
20 |
21 | @cached_property
22 | def db(self):
23 | if len(self.dbs) > 1:
24 | dbs_str = ', '.join(self.dbs)
25 | raise ImproperlyConfigured('Single db required, but several used: ' + dbs_str)
26 | return self.dbs[0]
27 |
28 | # TODO: think if I should expose it and how. Same for queryset.
29 | @cached_property
30 | def _cond_dnfs(self):
31 | return self._queryset._cond_dnfs
32 |
33 | @cached_property
34 | def tables(self):
35 | return list(self._cond_dnfs)
36 |
37 | @cached_property
38 | def table(self):
39 | if len(self.tables) > 1:
40 | tables_str = ', '.join(self.tables)
41 | raise ImproperlyConfigured('Single table required, but several used: ' + tables_str)
42 | return self.tables[0]
43 |
--------------------------------------------------------------------------------
/cacheops/signals.py:
--------------------------------------------------------------------------------
1 | import django.dispatch
2 |
3 | cache_read = django.dispatch.Signal() # args: func, hit
4 | cache_invalidated = django.dispatch.Signal() # args: obj_dict
5 |
--------------------------------------------------------------------------------
/cacheops/simple.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | from funcy import wraps
5 |
6 | from .conf import settings
7 | from .utils import get_cache_key, cached_view_fab, md5hex
8 | from .redis import redis_client, handle_connection_failure
9 | from .sharding import get_prefix
10 |
11 |
12 | __all__ = ('cache', 'cached', 'cached_view', 'file_cache', 'CacheMiss', 'FileCache', 'RedisCache')
13 |
14 |
15 | class CacheMiss(Exception):
16 | pass
17 |
18 | class CacheKey(str):
19 | @classmethod
20 | def make(cls, value, cache=None, timeout=None):
21 | self = CacheKey(value)
22 | self.cache = cache
23 | self.timeout = timeout
24 | return self
25 |
26 | def get(self):
27 | self.cache._get(self)
28 |
29 | def set(self, value):
30 | self.cache._set(self, value, self.timeout)
31 |
32 | def delete(self):
33 | self.cache._delete(self)
34 |
35 | class BaseCache(object):
36 | """
37 | Simple cache with time-based invalidation
38 | """
39 | def cached(self, timeout=None, extra=None):
40 | """
41 | A decorator for caching function calls
42 | """
43 | # Support @cached (without parentheses) form
44 | if callable(timeout):
45 | return self.cached()(timeout)
46 |
47 | def _get_key(func, args, kwargs):
48 | extra_val = extra(*args, **kwargs) if callable(extra) else extra
49 | return get_prefix(func=func) + 'c:' + get_cache_key(func, args, kwargs, extra_val)
50 |
51 | def decorator(func):
52 | @wraps(func)
53 | def wrapper(*args, **kwargs):
54 | if not settings.CACHEOPS_ENABLED:
55 | return func(*args, **kwargs)
56 |
57 | cache_key = _get_key(func, args, kwargs)
58 | try:
59 | result = self._get(cache_key)
60 | except CacheMiss:
61 | result = func(*args, **kwargs)
62 | self._set(cache_key, result, timeout)
63 |
64 | return result
65 |
66 | def invalidate(*args, **kwargs):
67 | self._delete(_get_key(func, args, kwargs))
68 | wrapper.invalidate = invalidate
69 |
70 | def key(*args, **kwargs):
71 | return CacheKey.make(_get_key(func, args, kwargs), cache=self, timeout=timeout)
72 | wrapper.key = key
73 |
74 | return wrapper
75 | return decorator
76 |
77 | def cached_view(self, timeout=None, extra=None):
78 | if callable(timeout):
79 | return self.cached_view()(timeout)
80 | return cached_view_fab(self.cached)(timeout=timeout, extra=extra)
81 |
82 | def get(self, cache_key):
83 | return self._get(get_prefix() + cache_key)
84 |
85 | def set(self, cache_key, data, timeout=None):
86 | self._set(get_prefix() + cache_key, data, timeout)
87 |
88 | def delete(self, cache_key):
89 | self._delete(get_prefix() + cache_key)
90 |
91 |
92 | class RedisCache(BaseCache):
93 | def __init__(self, conn):
94 | self.conn = conn
95 |
96 | def _get(self, cache_key):
97 | data = self.conn.get(cache_key)
98 | if data is None:
99 | raise CacheMiss
100 | return settings.CACHEOPS_SERIALIZER.loads(data)
101 |
102 | @handle_connection_failure
103 | def _set(self, cache_key, data, timeout=None):
104 | pickled_data = settings.CACHEOPS_SERIALIZER.dumps(data)
105 | if timeout is not None:
106 | self.conn.setex(cache_key, timeout, pickled_data)
107 | else:
108 | self.conn.set(cache_key, pickled_data)
109 |
110 | @handle_connection_failure
111 | def _delete(self, cache_key):
112 | self.conn.delete(cache_key)
113 |
114 | cache = RedisCache(redis_client)
115 | cached = cache.cached
116 | cached_view = cache.cached_view
117 |
118 |
119 | class FileCache(BaseCache):
120 | """
121 | A file cache which fixes bugs and misdesign in django default one.
122 | Uses mtimes in the future to designate expire time. This makes unnecessary
123 | reading stale files.
124 | """
125 | def __init__(self, path, timeout=settings.FILE_CACHE_TIMEOUT):
126 | self._dir = path
127 | self._default_timeout = timeout
128 |
129 | def _key_to_filename(self, key):
130 | """
131 | Returns a filename corresponding to cache key
132 | """
133 | digest = md5hex(key)
134 | return os.path.join(self._dir, digest[-2:], digest[:-2])
135 |
136 | def _get(self, key):
137 | filename = self._key_to_filename(key)
138 | try:
139 | # Remove file if it's stale
140 | if time.time() >= os.stat(filename).st_mtime:
141 | self.delete(filename)
142 | raise CacheMiss
143 |
144 | with open(filename, 'rb') as f:
145 | return settings.CACHEOPS_SERIALIZER.load(f)
146 | except (IOError, OSError, EOFError):
147 | raise CacheMiss
148 |
149 | def _set(self, key, data, timeout=None):
150 | filename = self._key_to_filename(key)
151 | dirname = os.path.dirname(filename)
152 |
153 | if timeout is None:
154 | timeout = self._default_timeout
155 |
156 | try:
157 | if not os.path.exists(dirname):
158 | os.makedirs(dirname)
159 |
160 | # Use open with exclusive rights to prevent data corruption
161 | f = os.open(filename, os.O_EXCL | os.O_WRONLY | os.O_CREAT)
162 | try:
163 | os.write(f, settings.CACHEOPS_SERIALIZER.dumps(data))
164 | finally:
165 | os.close(f)
166 |
167 | # Set mtime to expire time
168 | os.utime(filename, (0, time.time() + timeout))
169 | except (IOError, OSError):
170 | pass
171 |
172 | def _delete(self, fname):
173 | try:
174 | os.remove(fname)
175 | # Trying to remove directory in case it's empty
176 | dirname = os.path.dirname(fname)
177 | os.rmdir(dirname)
178 | except (IOError, OSError):
179 | pass
180 |
181 | file_cache = FileCache(settings.FILE_CACHE_DIR)
182 |
--------------------------------------------------------------------------------
/cacheops/templatetags/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suor/django-cacheops/d4b390372d839ff1389662cb311a916eb2c992ed/cacheops/templatetags/__init__.py
--------------------------------------------------------------------------------
/cacheops/templatetags/cacheops.py:
--------------------------------------------------------------------------------
1 | from inspect import getfullargspec, unwrap
2 | from functools import partial
3 |
4 | from django.template import Library
5 | from django.template.library import TagHelperNode, parse_bits
6 |
7 | import cacheops
8 | from cacheops.utils import carefully_strip_whitespace
9 |
10 |
11 | __all__ = ['CacheopsLibrary', 'invalidate_fragment']
12 |
13 |
14 | class CacheopsLibrary(Library):
15 | def decorator_tag(self, func=None, takes_context=False):
16 | if func is None:
17 | return partial(self.decorator_tag, takes_context=takes_context)
18 |
19 | name = func.__name__
20 | params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func))
21 |
22 | def _compile(parser, token):
23 | # content
24 | nodelist = parser.parse(('end' + name,))
25 | parser.delete_first_token()
26 |
27 | # args
28 | bits = token.split_contents()[1:]
29 | args, kwargs = parse_bits(
30 | parser, bits, params, varargs, varkw, defaults,
31 | kwonly, kwonly_defaults, takes_context, name,
32 | )
33 | return CachedNode(func, takes_context, args, kwargs, nodelist)
34 |
35 | self.tag(name=name, compile_function=_compile)
36 | return func
37 |
38 | register = CacheopsLibrary()
39 |
40 |
41 | class CachedNode(TagHelperNode):
42 | def __init__(self, func, takes_context, args, kwargs, nodelist):
43 | super(CachedNode, self).__init__(func, takes_context, args, kwargs)
44 | self.nodelist = nodelist
45 |
46 | def render(self, context):
47 | args, kwargs = self.get_resolved_arguments(context)
48 | decorator = self.func(*args, **kwargs)
49 | render = _make_render(context, self.nodelist)
50 | return decorator(render)()
51 |
52 |
53 | def _make_render(context, nodelist):
54 | def render():
55 | # TODO: make this cache preparation configurable
56 | return carefully_strip_whitespace(nodelist.render(context))
57 | return render
58 |
59 |
60 | @register.decorator_tag
61 | def cached(timeout, fragment_name, *extra):
62 | return cacheops.cached(timeout=timeout, extra=(fragment_name,) + extra)
63 |
64 |
65 | def invalidate_fragment(fragment_name, *extra):
66 | render = _make_render(None, None)
67 | cached(None, fragment_name, *extra)(render).invalidate()
68 |
69 |
70 | @register.decorator_tag
71 | def cached_as(queryset, timeout, fragment_name, *extra):
72 | return cacheops.cached_as(queryset, timeout=timeout, extra=(fragment_name,) + extra)
73 |
--------------------------------------------------------------------------------
/cacheops/transaction.py:
--------------------------------------------------------------------------------
1 | import threading
2 | from collections import defaultdict
3 |
4 | from funcy import once, decorator
5 |
6 | from django.db import DEFAULT_DB_ALIAS, DatabaseError
7 | from django.db.backends.utils import CursorWrapper
8 | from django.db.transaction import Atomic, get_connection, on_commit
9 |
10 | from .utils import monkey_mix
11 |
12 |
13 | __all__ = ('queue_when_in_transaction', 'install_cacheops_transaction_support',
14 | 'transaction_states')
15 |
16 |
17 | class TransactionState(list):
18 | def begin(self):
19 | self.append({'cbs': [], 'dirty': False})
20 |
21 | def commit(self):
22 | context = self.pop()
23 | if self:
24 | # savepoint
25 | self[-1]['cbs'].extend(context['cbs'])
26 | self[-1]['dirty'] = self[-1]['dirty'] or context['dirty']
27 | else:
28 | # transaction
29 | for func, args, kwargs in context['cbs']:
30 | func(*args, **kwargs)
31 |
32 | def rollback(self):
33 | self.pop()
34 |
35 | def push(self, item):
36 | self[-1]['cbs'].append(item)
37 |
38 | def mark_dirty(self):
39 | self[-1]['dirty'] = True
40 |
41 | def is_dirty(self):
42 | return any(context['dirty'] for context in self)
43 |
44 | class TransactionStates(threading.local):
45 | def __init__(self):
46 | super(TransactionStates, self).__init__()
47 | self._states = defaultdict(TransactionState)
48 |
49 | def __getitem__(self, key):
50 | return self._states[key or DEFAULT_DB_ALIAS]
51 |
52 | def is_dirty(self, dbs):
53 | return any(self[db].is_dirty() for db in dbs)
54 |
55 | transaction_states = TransactionStates()
56 |
57 |
58 | @decorator
59 | def queue_when_in_transaction(call):
60 | if transaction_states[call.using]:
61 | transaction_states[call.using].push((call, (), {}))
62 | else:
63 | return call()
64 |
65 |
66 | class AtomicMixIn(object):
67 | def __enter__(self):
68 | entering = not transaction_states[self.using]
69 | transaction_states[self.using].begin()
70 | self._no_monkey.__enter__(self)
71 | if entering:
72 | on_commit(transaction_states[self.using].commit, self.using)
73 |
74 | def __exit__(self, exc_type, exc_value, traceback):
75 | connection = get_connection(self.using)
76 | try:
77 | self._no_monkey.__exit__(self, exc_type, exc_value, traceback)
78 | except DatabaseError:
79 | transaction_states[self.using].rollback()
80 | raise
81 | else:
82 | if not connection.closed_in_transaction and exc_type is None and \
83 | not connection.needs_rollback:
84 | if transaction_states[self.using]:
85 | transaction_states[self.using].commit()
86 | else:
87 | transaction_states[self.using].rollback()
88 |
89 |
90 | class CursorWrapperMixin(object):
91 | def callproc(self, procname, params=None):
92 | result = self._no_monkey.callproc(self, procname, params)
93 | if transaction_states[self.db.alias]:
94 | transaction_states[self.db.alias].mark_dirty()
95 | return result
96 |
97 | def execute(self, sql, params=None):
98 | result = self._no_monkey.execute(self, sql, params)
99 | if transaction_states[self.db.alias] and is_sql_dirty(sql):
100 | transaction_states[self.db.alias].mark_dirty()
101 | return result
102 |
103 | def executemany(self, sql, param_list):
104 | result = self._no_monkey.executemany(self, sql, param_list)
105 | if transaction_states[self.db.alias] and is_sql_dirty(sql):
106 | transaction_states[self.db.alias].mark_dirty()
107 | return result
108 |
109 |
110 | CHARS = set('abcdefghijklmnoprqstuvwxyz_')
111 |
112 | def is_sql_dirty(sql):
113 | # This should not happen as using bytes in Python 3 is against db protocol,
114 | # but some people will pass it anyway
115 | if isinstance(sql, bytes):
116 | sql = sql.decode()
117 | # NOTE: not using regex here for speed
118 | sql = sql.lower()
119 | for action in ('update', 'insert', 'delete'):
120 | p = sql.find(action)
121 | if p == -1:
122 | continue
123 | start, end = p - 1, p + len(action)
124 | if (start < 0 or sql[start] not in CHARS) and (end >= len(sql) or sql[end] not in CHARS):
125 | return True
126 | else:
127 | return False
128 |
129 |
130 | @once
131 | def install_cacheops_transaction_support():
132 | monkey_mix(Atomic, AtomicMixIn)
133 | monkey_mix(CursorWrapper, CursorWrapperMixin)
134 |
--------------------------------------------------------------------------------
/cacheops/tree.py:
--------------------------------------------------------------------------------
1 | from itertools import product
2 | from funcy import group_by, join_with, lcat, lmap, cat
3 |
4 | from django.db.models import Subquery
5 | from django.db.models.query import QuerySet
6 | from django.db.models.sql import OR
7 | from django.db.models.sql.datastructures import Join
8 | from django.db.models.sql.query import Query, ExtraWhere
9 | from django.db.models.sql.where import NothingNode
10 | from django.db.models.lookups import Lookup, Exact, In, IsNull
11 | from django.db.models.expressions import BaseExpression, Exists
12 |
13 | from .conf import settings
14 | from .invalidation import serializable_fields
15 |
16 | # This existed prior to Django 5.2
17 | try:
18 | from django.db.models.sql.where import SubqueryConstraint
19 | except ImportError:
20 | class SubqueryConstraint(object):
21 | pass
22 |
23 |
24 | def dnfs(qs):
25 | """
26 | Converts query condition tree into a DNF of eq conds.
27 | Separately for each alias.
28 |
29 | Any negations, conditions with lookups other than __exact or __in,
30 | conditions on joined models and subrequests are ignored.
31 | __in is converted into = or = or = ...
32 | """
33 | SOME = Some()
34 | SOME_TREE = {frozenset({(None, None, SOME, True)})}
35 |
36 | def negate(term):
37 | return (term[0], term[1], term[2], not term[3])
38 |
39 | def _dnf(where):
40 | """
41 | Constructs DNF of where tree consisting of terms in form:
42 | (alias, attribute, value, negation)
43 | meaning `alias.attribute = value`
44 | or `not alias.attribute = value` if negation is False
45 |
46 | Any conditions other then eq are dropped.
47 | """
48 | if isinstance(where, Lookup):
49 | # If where.lhs don't refer to a field then don't bother
50 | if not hasattr(where.lhs, 'target'):
51 | return SOME_TREE
52 | # Don't bother with complex right hand side either
53 | if isinstance(where.rhs, (QuerySet, Query, BaseExpression)):
54 | return SOME_TREE
55 | # Skip conditions on non-serialized fields
56 | if where.lhs.target not in serializable_fields(where.lhs.target.model):
57 | return SOME_TREE
58 |
59 | attname = where.lhs.target.attname
60 | if isinstance(where, Exact):
61 | return {frozenset({(where.lhs.alias, attname, where.rhs, True)})}
62 | elif isinstance(where, IsNull):
63 | return {frozenset({(where.lhs.alias, attname, None, where.rhs)})}
64 | elif isinstance(where, In) and len(where.rhs) < settings.CACHEOPS_LONG_DISJUNCTION:
65 | return {frozenset({(where.lhs.alias, attname, v, True)}) for v in where.rhs}
66 | else:
67 | return SOME_TREE
68 | elif isinstance(where, NothingNode):
69 | return set()
70 | elif isinstance(where, (ExtraWhere, SubqueryConstraint, Exists)):
71 | return SOME_TREE
72 | elif len(where) == 0:
73 | return {frozenset()}
74 | else:
75 | children_dnfs = lmap(_dnf, where.children)
76 |
77 | if len(children_dnfs) == 0:
78 | return {frozenset()}
79 | elif len(children_dnfs) == 1:
80 | result = children_dnfs[0]
81 | else:
82 | # Just unite children joined with OR
83 | if where.connector == OR:
84 | result = set(cat(children_dnfs))
85 | # Use Cartesian product to AND children
86 | else:
87 | result = {frozenset(cat(conjs)) for conjs in product(*children_dnfs)}
88 |
89 | # Negating and expanding brackets
90 | if where.negated:
91 | result = {frozenset(map(negate, conjs)) for conjs in product(*result)}
92 |
93 | return result
94 |
95 | def clean_conj(conj, for_alias):
96 | conds = {}
97 | for alias, attname, value, negation in conj:
98 | # "SOME" conds, negated conds and conds for other aliases should be stripped
99 | if value is not SOME and negation and alias == for_alias:
100 | # Conjs with fields eq 2 different values will never cause invalidation
101 | if attname in conds and conds[attname] != value:
102 | return None
103 | conds[attname] = value
104 | return conds
105 |
106 | def clean_dnf(tree, aliases):
107 | cleaned = [clean_conj(conj, alias) for conj in tree for alias in aliases]
108 | # Remove deleted conjunctions
109 | cleaned = [conj for conj in cleaned if conj is not None]
110 | # Any empty conjunction eats up the rest
111 | # NOTE: a more elaborate DNF reduction is not really needed,
112 | # just keep your querysets sane.
113 | if not all(cleaned):
114 | return [{}]
115 | return cleaned
116 |
117 | def add_join_conds(dnf, query):
118 | from collections import defaultdict
119 |
120 | # A cond on parent (alias, col) means the same cond applies to target and vice a versa
121 | join_exts = defaultdict(list)
122 | for alias, join in query.alias_map.items():
123 | if query.alias_refcount[alias] and isinstance(join, Join):
124 | for parent_col, target_col in join.join_cols:
125 | join_exts[join.parent_alias, parent_col].append((join.table_alias, target_col))
126 | join_exts[join.table_alias, target_col].append((join.parent_alias, parent_col))
127 |
128 | if not join_exts:
129 | return dnf
130 |
131 | return {
132 | conj | {
133 | (join_alias, join_col, v, negation)
134 | for alias, col, v, negation in conj
135 | for join_alias, join_col in join_exts[alias, col]
136 | }
137 | for conj in dnf
138 | }
139 |
140 | def query_dnf(query):
141 | def table_for(alias):
142 | return alias if alias == main_alias else query.alias_map[alias].table_name
143 |
144 | dnf = _dnf(query.where)
145 | dnf = add_join_conds(dnf, query)
146 |
147 | # NOTE: we exclude content_type as it never changes and will hold dead invalidation info
148 | main_alias = query.model._meta.db_table
149 | aliases = {alias for alias, join in query.alias_map.items()
150 | if query.alias_refcount[alias]} \
151 | | {main_alias} - {'django_content_type'}
152 | tables = group_by(table_for, aliases)
153 | return {table: clean_dnf(dnf, table_aliases) for table, table_aliases in tables.items()}
154 |
155 | if qs.query.combined_queries:
156 | dnfs_ = join_with(lcat, (query_dnf(q) for q in qs.query.combined_queries))
157 | else:
158 | dnfs_ = query_dnf(qs.query)
159 |
160 | # Add any subqueries used for annotation
161 | if qs.query.annotations:
162 | subqueries = (query_dnf(getattr(q, 'query', None))
163 | for q in qs.query.annotations.values() if isinstance(q, Subquery))
164 | dnfs_.update(join_with(lcat, subqueries))
165 |
166 | return dnfs_
167 |
168 |
169 | class Some:
170 | def __str__(self):
171 | return 'SOME'
172 | __repr__ = __str__
173 |
--------------------------------------------------------------------------------
/cacheops/utils.py:
--------------------------------------------------------------------------------
1 | import re
2 | import json
3 | import inspect
4 | import sys
5 | from funcy import memoize, compose, wraps, any, any_fn, select_values, mapcat
6 |
7 | from django.db import models
8 | from django.http import HttpRequest
9 |
10 | from .conf import model_profile
11 |
12 |
13 | def model_family(model):
14 | """
15 | The family is models sharing a database table, events on one should affect each other.
16 |
17 | We simply collect a list of all proxy models, including subclasess, superclasses and siblings.
18 | Two descendants of an abstract model are not family - they cannot affect each other.
19 | """
20 | if model._meta.abstract: # No table - no family
21 | return set()
22 |
23 | @memoize
24 | def class_tree(cls):
25 | # NOTE: we also list multitable submodels here, we just don't care.
26 | # Cacheops doesn't support them anyway.
27 | return {cls} | set(mapcat(class_tree, cls.__subclasses__()))
28 |
29 | table_bases = {b for b in model.__mro__ if issubclass(b, models.Model) and b is not models.Model
30 | and not b._meta.proxy and not b._meta.abstract}
31 | family = set(mapcat(class_tree, table_bases))
32 | return {cls for cls in family if not cls._meta.abstract}
33 |
34 | @memoize
35 | def family_has_profile(cls):
36 | return any(model_profile, model_family(cls))
37 |
38 |
39 | class MonkeyProxy(object):
40 | pass
41 |
42 | def monkey_mix(cls, mixin):
43 | """
44 | Mixes a mixin into existing class.
45 | Does not use actual multi-inheritance mixins, just monkey patches methods.
46 | Mixin methods can call copies of original ones stored in `_no_monkey` proxy:
47 |
48 | class SomeMixin(object):
49 | def do_smth(self, arg):
50 | ... do smth else before
51 | self._no_monkey.do_smth(self, arg)
52 | ... do smth else after
53 | """
54 | assert not hasattr(cls, '_no_monkey'), 'Multiple monkey mix not supported'
55 | cls._no_monkey = MonkeyProxy()
56 |
57 | test = any_fn(inspect.isfunction, inspect.ismethoddescriptor)
58 | methods = select_values(test, mixin.__dict__)
59 |
60 | for name, method in methods.items():
61 | if hasattr(cls, name):
62 | setattr(cls._no_monkey, name, getattr(cls, name))
63 | setattr(cls, name, method)
64 |
65 |
66 | @memoize
67 | def stamp_fields(model):
68 | """
69 | Returns serialized description of model fields.
70 | """
71 | def _stamp(field):
72 | name, class_name, *_ = field.deconstruct()
73 | return name, class_name, field.attname, field.column
74 |
75 | stamp = str(sorted(map(_stamp, model._meta.fields)))
76 | return md5hex(stamp)
77 |
78 |
79 | ### Cache keys calculation
80 |
81 | def obj_key(obj):
82 | if isinstance(obj, models.Model):
83 | return '%s.%s.%s' % (obj._meta.app_label, obj._meta.model_name, obj.pk)
84 | elif hasattr(obj, 'build_absolute_uri'):
85 | return obj.build_absolute_uri() # Only vary HttpRequest by uri
86 | elif inspect.isfunction(obj):
87 | factors = [obj.__module__, obj.__name__]
88 | # Really useful to ignore this while code still in development
89 | if hasattr(obj, '__code__') and not obj.__globals__.get('CACHEOPS_DEBUG'):
90 | factors.append(obj.__code__.co_firstlineno)
91 | return factors
92 | else:
93 | return str(obj)
94 |
95 | def get_cache_key(*factors):
96 | return md5hex(json.dumps(factors, sort_keys=True, default=obj_key))
97 |
98 | def cached_view_fab(_cached):
99 | def force_render(response):
100 | if hasattr(response, 'render') and callable(response.render):
101 | response.render()
102 | return response
103 |
104 | def cached_view(*dargs, **dkwargs):
105 | def decorator(func):
106 | cached_func = _cached(*dargs, **dkwargs)(compose(force_render, func))
107 |
108 | @wraps(func)
109 | def wrapper(request, *args, **kwargs):
110 | assert isinstance(request, HttpRequest), \
111 | "A view should be passed with HttpRequest as first argument"
112 | if request.method not in ('GET', 'HEAD'):
113 | return func(request, *args, **kwargs)
114 |
115 | return cached_func(request, *args, **kwargs)
116 |
117 | if hasattr(cached_func, 'invalidate'):
118 | wrapper.invalidate = cached_func.invalidate
119 | wrapper.key = cached_func.key
120 |
121 | return wrapper
122 | return decorator
123 | return cached_view
124 |
125 |
126 | ### Whitespace handling for template tags
127 |
128 | from django.utils.safestring import mark_safe
129 |
130 | NEWLINE_BETWEEN_TAGS = mark_safe('>\n<')
131 | SPACE_BETWEEN_TAGS = mark_safe('> <')
132 |
133 | def carefully_strip_whitespace(text):
134 | def repl(m):
135 | return NEWLINE_BETWEEN_TAGS if '\n' in m.group(0) else SPACE_BETWEEN_TAGS
136 | text = re.sub(r'>\s{2,}<', repl, text)
137 | return text
138 |
139 |
140 | ### hashing helpers
141 |
142 | import hashlib
143 |
144 |
145 | class md5:
146 | def __init__(self, s=None):
147 | # set usedforsecurity for FIPS compliance
148 | kwargs = {'usedforsecurity': False} if sys.version_info >= (3, 9) else {}
149 | self.md5 = hashlib.md5(**kwargs)
150 | if s is not None:
151 | self.update(s)
152 |
153 | def update(self, s):
154 | return self.md5.update(s.encode('utf-8'))
155 |
156 | def hexdigest(self):
157 | return self.md5.hexdigest()
158 |
159 |
160 | def md5hex(s):
161 | return md5(s).hexdigest()
162 |
--------------------------------------------------------------------------------
/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import sys
4 |
5 | if __name__ == "__main__":
6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
7 |
8 | from django.core.management import execute_from_command_line
9 |
10 | execute_from_command_line(sys.argv)
11 |
--------------------------------------------------------------------------------
/publish.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | set -ex
4 |
5 | NAME=django-cacheops
6 | VERSION=`awk '/__version__ = /{gsub(/'\''/, "", $3); print $3}' cacheops/__init__.py`
7 |
8 | echo "Publishing $NAME-$VERSION..."
9 | python setup.py sdist bdist_wheel
10 | twine check dist/$NAME-$VERSION*
11 | twine upload --skip-existing -uSuor dist/$NAME-$VERSION*
12 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | DJANGO_SETTINGS_MODULE=tests.settings
3 | python_files = test*.py
4 | addopts = --no-migrations
5 |
--------------------------------------------------------------------------------
/requirements-test.txt:
--------------------------------------------------------------------------------
1 | pytest==8.3.5
2 | pytest-django==4.11.1
3 | django>=3.2
4 | redis>=3.0.0
5 | funcy>=1.8
6 | before_after==1.0.0
7 | jinja2>=2.10
8 | dill
9 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal = 1
3 |
4 | [metadata]
5 | license_file = LICENSE
6 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 |
4 | # Remove build status
5 | README = open('README.rst').read().replace('|Build Status|', '', 1)
6 |
7 |
8 | setup(
9 | name='django-cacheops',
10 | version='7.2',
11 | author='Alexander Schepanovski',
12 | author_email='suor.web@gmail.com',
13 |
14 | description='A slick ORM cache with automatic granular event-driven invalidation for Django.',
15 | long_description=README,
16 | url='http://github.com/Suor/django-cacheops',
17 | license='BSD',
18 |
19 | packages=[
20 | 'cacheops',
21 | 'cacheops.management',
22 | 'cacheops.management.commands',
23 | 'cacheops.templatetags'
24 | ],
25 | python_requires='>=3.7',
26 | install_requires=[
27 | 'django>=3.2',
28 | 'redis>=3.0.0',
29 | 'funcy>=1.8',
30 | ],
31 | classifiers=[
32 | 'Development Status :: 5 - Production/Stable',
33 | 'License :: OSI Approved :: BSD License',
34 | 'Operating System :: OS Independent',
35 | 'Programming Language :: Python',
36 | 'Programming Language :: Python :: 3.8',
37 | 'Programming Language :: Python :: 3.9',
38 | 'Programming Language :: Python :: 3.10',
39 | 'Programming Language :: Python :: 3.11',
40 | 'Programming Language :: Python :: 3.12',
41 | 'Programming Language :: Python :: 3.13',
42 | 'Framework :: Django',
43 | 'Framework :: Django :: 3.2',
44 | 'Framework :: Django :: 4.0',
45 | 'Framework :: Django :: 4.1',
46 | 'Framework :: Django :: 4.2',
47 | 'Framework :: Django :: 5.0',
48 | 'Framework :: Django :: 5.1',
49 | 'Framework :: Django :: 5.2',
50 |
51 | 'Environment :: Web Environment',
52 | 'Intended Audience :: Developers',
53 | 'Topic :: Internet :: WWW/HTTP',
54 | 'Topic :: Software Development :: Libraries :: Python Modules',
55 | ],
56 |
57 | zip_safe=False,
58 | include_package_data=True,
59 | )
60 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suor/django-cacheops/d4b390372d839ff1389662cb311a916eb2c992ed/tests/__init__.py
--------------------------------------------------------------------------------
/tests/bench.py:
--------------------------------------------------------------------------------
1 | from cacheops import invalidate_obj, invalidate_model
2 | from cacheops.conf import settings
3 | from cacheops.redis import redis_client
4 | from cacheops.tree import dnfs
5 |
6 | from .models import Category, Post, Extra
7 |
8 |
9 | posts = list(Post.objects.cache().all())
10 | posts_pickle = settings.CACHEOPS_SERIALIZER.dumps(posts)
11 |
12 | def do_pickle():
13 | settings.CACHEOPS_SERIALIZER.dumps(posts)
14 |
15 | def do_unpickle():
16 | settings.CACHEOPS_SERIALIZER.loads(posts_pickle)
17 |
18 |
19 | get_key = Category.objects.filter(pk=1).order_by()._cache_key()
20 | def invalidate_get():
21 | redis_client.delete(get_key)
22 |
23 | def do_get():
24 | Category.objects.cache().get(pk=1)
25 |
26 | def do_get_nocache():
27 | Category.objects.nocache().get(pk=1)
28 |
29 |
30 | c = Category.objects.first()
31 | def invalidate_count():
32 | invalidate_obj(c)
33 |
34 | def do_count():
35 | Category.objects.cache().count()
36 |
37 | def do_count_nocache():
38 | Category.objects.nocache().count()
39 |
40 |
41 | fetch_qs = Category.objects.all()
42 | fetch_key = fetch_qs._cache_key()
43 |
44 | def invalidate_fetch():
45 | redis_client.delete(fetch_key)
46 |
47 | def do_fetch():
48 | list(Category.objects.cache().all())
49 |
50 | def do_fetch_nocache():
51 | list(Category.objects.nocache().all())
52 |
53 | def do_fetch_construct():
54 | Category.objects.all()
55 |
56 | def do_fetch_cache_key():
57 | fetch_qs._cache_key()
58 |
59 | filter_qs = Category.objects.filter(pk=1)
60 | def do_filter_cache_key():
61 | filter_qs._cache_key()
62 |
63 |
64 | def do_common_construct():
65 | return Category.objects.filter(pk=1).exclude(title__contains='Hi').order_by('title')[:20]
66 |
67 | def do_common_inplace():
68 | return Category.objects.inplace() \
69 | .filter(pk=1).exclude(title__contains='Hi').order_by('title')[:20]
70 |
71 | common_qs = do_common_construct()
72 | common_key = common_qs._cache_key()
73 |
74 | def do_common_cache_key():
75 | common_qs._cache_key()
76 |
77 | def do_common_dnfs():
78 | dnfs(common_qs)
79 |
80 | def do_common():
81 | qs = Category.objects.filter(pk=1).exclude(title__contains='Hi').order_by('title').cache()[:20]
82 | list(qs)
83 |
84 | def do_common_nocache():
85 | qs = Category.objects.filter(pk=1).exclude(title__contains='Hi').order_by('title') \
86 | .nocache()[:20]
87 | list(qs)
88 |
89 | def invalidate_common():
90 | redis_client.delete(common_key)
91 |
92 | def prepare_obj():
93 | return Category.objects.cache().get(pk=1)
94 |
95 | def do_invalidate_obj(obj):
96 | invalidate_obj(obj)
97 |
98 | def do_save_obj(obj):
99 | obj.save()
100 |
101 |
102 | ### Complex queryset
103 |
104 | from django.db.models import Q
105 |
106 | def do_complex_construct():
107 | return Post.objects.filter(id__gt=1, title='Hi').exclude(category__in=[10, 20]) \
108 | .filter(Q(id__range=(10, 20)) | ~Q(title__contains='abc')) \
109 | .select_related('category').prefetch_related('category') \
110 | .order_by('title')[:10]
111 |
112 | def do_complex_inplace():
113 | return Post.objects.inplace() \
114 | .filter(id__gt=1, title='Hi').exclude(category__in=[10, 20]) \
115 | .filter(Q(id__range=(10, 20)) | ~Q(title__contains='abc')) \
116 | .select_related('category').prefetch_related('category') \
117 | .order_by('title')[:10]
118 |
119 | complex_qs = do_complex_construct()
120 | def do_complex_cache_key():
121 | complex_qs._cache_key()
122 |
123 | def do_complex_dnfs():
124 | dnfs(complex_qs)
125 |
126 |
127 | ### More invalidation
128 |
129 | def prepare_cache():
130 | def _variants(*args, **kwargs):
131 | qs = Extra.objects.cache().filter(*args, **kwargs)
132 | qs.count()
133 | list(qs)
134 | list(qs[:2])
135 | list(qs.values())
136 |
137 | _variants(pk=1)
138 | _variants(post=1)
139 | _variants(tag=5)
140 | _variants(to_tag=10)
141 |
142 | _variants(pk=1, post=1)
143 | _variants(pk=1, tag=5)
144 | _variants(post=1, tag=5)
145 |
146 | _variants(pk=1, post=1, tag=5)
147 | _variants(pk=1, post=1, to_tag=10)
148 |
149 | _variants(Q(pk=1) | Q(tag=5))
150 | _variants(Q(pk=1) | Q(tag=1))
151 | _variants(Q(pk=1) | Q(tag=2))
152 | _variants(Q(pk=1) | Q(tag=3))
153 | _variants(Q(pk=1) | Q(tag=4))
154 |
155 | return Extra.objects.cache().get(pk=1)
156 |
157 | def do_invalidate_model(obj):
158 | invalidate_model(obj.__class__)
159 |
160 |
161 | TESTS = [
162 | ('pickle', {'run': do_pickle}),
163 | ('unpickle', {'run': do_unpickle}),
164 |
165 | ('get_nocache', {'run': do_get_nocache}),
166 | ('get_hit', {'prepare_once': do_get, 'run': do_get}),
167 | ('get_miss', {'prepare': invalidate_get, 'run': do_get}),
168 |
169 | ('count_nocache', {'run': do_count_nocache}),
170 | ('count_hit', {'prepare_once': do_count, 'run': do_count}),
171 | ('count_miss', {'prepare': invalidate_count, 'run': do_count}),
172 |
173 | ('fetch_construct', {'run': do_fetch_construct}),
174 | ('fetch_nocache', {'run': do_fetch_nocache}),
175 | ('fetch_hit', {'prepare_once': do_fetch, 'run': do_fetch}),
176 | ('fetch_miss', {'prepare': invalidate_fetch, 'run': do_fetch}),
177 | ('fetch_cache_key', {'run': do_fetch_cache_key}),
178 |
179 | ('filter_cache_key', {'run': do_filter_cache_key}),
180 | ('common_construct', {'run': do_common_construct}),
181 | ('common_inplace', {'run': do_common_inplace}),
182 | ('common_cache_key', {'run': do_common_cache_key}),
183 | ('common_dnfs', {'run': do_common_dnfs}),
184 | ('common_nocache', {'run': do_common_nocache}),
185 | ('common_hit', {'prepare_once': do_common, 'run': do_common}),
186 | ('common_miss', {'prepare': invalidate_common, 'run': do_common}),
187 |
188 | ('invalidate_obj', {'prepare': prepare_obj, 'run': do_invalidate_obj}),
189 | ('save_obj', {'prepare': prepare_obj, 'run': do_save_obj}),
190 |
191 | ('complex_construct', {'run': do_complex_construct}),
192 | ('complex_inplace', {'run': do_complex_inplace}),
193 | ('complex_cache_key', {'run': do_complex_cache_key}),
194 | ('complex_dnfs', {'run': do_complex_dnfs}),
195 |
196 | ('big_invalidate', {'prepare': prepare_cache, 'run': do_invalidate_obj}),
197 | ('model_invalidate', {'prepare': prepare_cache, 'run': do_invalidate_model}),
198 | ]
199 |
--------------------------------------------------------------------------------
/tests/fixtures/basic.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "tests.category",
4 | "pk": 1,
5 | "fields": {
6 | "title": "Django"
7 | }
8 | },
9 | {
10 | "model": "tests.category",
11 | "pk": 2,
12 | "fields": {
13 | "title": "Rails"
14 | }
15 | },
16 | {
17 | "model": "tests.category",
18 | "pk": 3,
19 | "fields": {
20 | "title": "Perl"
21 | }
22 | },
23 | {
24 | "model": "tests.category",
25 | "pk": 4,
26 | "fields": {
27 | "title": "Python"
28 | }
29 | },
30 | {
31 | "model": "tests.category",
32 | "pk": 5,
33 | "fields": {
34 | "title": "Node.js"
35 | }
36 | },
37 |
38 | {
39 | "model": "tests.post",
40 | "pk": 1,
41 | "fields": {
42 | "title": "Cacheops",
43 | "category": 1,
44 | "visible": true
45 | }
46 | },
47 | {
48 | "model": "tests.post",
49 | "pk": 2,
50 | "fields": {
51 | "title": "Implicit variable as pronoun",
52 | "category": 3,
53 | "visible": true
54 | }
55 | },
56 | {
57 | "model": "tests.post",
58 | "pk": 3,
59 | "fields": {
60 | "title": "Perl 6 hyperoperator",
61 | "category": 3,
62 | "visible": false
63 | }
64 | },
65 |
66 | {
67 | "model": "tests.extra",
68 | "pk": 1,
69 | "fields": {
70 | "post": 1,
71 | "tag": 5,
72 | "to_tag": 10
73 | }
74 | },
75 | {
76 | "model": "tests.extra",
77 | "pk": 2,
78 | "fields": {
79 | "post": 2,
80 | "tag": 10,
81 | "to_tag": 5
82 | }
83 | }
84 | ]
85 |
--------------------------------------------------------------------------------
/tests/models.py:
--------------------------------------------------------------------------------
1 | import os
2 | import uuid
3 | from datetime import date, time
4 |
5 | from django.db import models
6 | from django.db.models.query import QuerySet
7 | from django.db.models import sql, manager
8 | from django.contrib.auth.models import User
9 | from django.utils import timezone
10 |
11 |
12 | ### For basic tests and bench
13 |
14 | class Category(models.Model):
15 | title = models.CharField(max_length=128)
16 |
17 | def __unicode__(self):
18 | return self.title
19 |
20 | class Post(models.Model):
21 | title = models.CharField(max_length=128)
22 | category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='posts')
23 | visible = models.BooleanField(default=True)
24 |
25 | def __unicode__(self):
26 | return self.title
27 |
28 | class Extra(models.Model):
29 | post = models.OneToOneField(Post, on_delete=models.CASCADE)
30 | tag = models.IntegerField(db_column='custom_column_name', unique=True)
31 | to_tag = models.ForeignKey('self', on_delete=models.CASCADE, to_field='tag', null=True)
32 |
33 | def __unicode__(self):
34 | return 'Extra(post_id=%s, tag=%s)' % (self.post_id, self.tag)
35 |
36 |
37 | ### Specific and custom fields
38 |
39 | class CustomValue(object):
40 | def __init__(self, value):
41 | self.value = value
42 |
43 | def __str__(self):
44 | return str(self.value)
45 |
46 | def __eq__(self, other):
47 | return isinstance(other, CustomValue) and self.value == other.value
48 |
49 | class CustomField(models.Field):
50 | def db_type(self, connection):
51 | return 'text'
52 |
53 | def to_python(self, value):
54 | if isinstance(value, CustomValue):
55 | return value
56 | return CustomValue(value)
57 |
58 | def from_db_value(self, value, expession, conn):
59 | return self.to_python(value)
60 |
61 | def get_prep_value(self, value):
62 | return value.value
63 |
64 | class CustomWhere(sql.where.WhereNode):
65 | pass
66 |
67 | class CustomQuery(sql.Query):
68 | pass
69 |
70 | class CustomManager(models.Manager):
71 | def get_query_set(self):
72 | q = CustomQuery(self.model, CustomWhere)
73 | return QuerySet(self.model, q)
74 | get_queryset = get_query_set
75 |
76 |
77 | class IntegerArrayField(models.Field):
78 | def db_type(self, connection):
79 | return 'text'
80 |
81 | def to_python(self, value):
82 | if value in (None, ''):
83 | return None
84 | if isinstance(value, list):
85 | return value
86 | return [int(v) for v in value.split(',')]
87 |
88 | def from_db_value(self, value, expession, conn):
89 | return self.to_python(value)
90 |
91 | def get_prep_value(self, value):
92 | return ','.join(map(str, value))
93 |
94 | def custom_value_default():
95 | return CustomValue('default')
96 |
97 | class Weird(models.Model):
98 | date_field = models.DateField(default=date(2000, 1, 1))
99 | datetime_field = models.DateTimeField(default=timezone.now)
100 | time_field = models.TimeField(default=time(10, 10))
101 | list_field = IntegerArrayField(default=list, blank=True)
102 | custom_field = CustomField(default=custom_value_default)
103 | binary_field = models.BinaryField()
104 |
105 | objects = models.Manager()
106 | customs = CustomManager()
107 |
108 |
109 | # TODO: check other new fields:
110 | # - PostgreSQL ones: HStoreField, RangeFields, unaccent
111 | # - Other: DurationField
112 | if os.environ.get('CACHEOPS_DB') in {'postgresql', 'postgis'}:
113 | from django.contrib.postgres.fields import ArrayField
114 | try:
115 | from django.db.models import JSONField
116 | except ImportError:
117 | try:
118 | from django.contrib.postgres.fields import JSONField # Used before Django 3.1
119 | except ImportError:
120 | JSONField = None
121 |
122 | class TaggedPost(models.Model):
123 | name = models.CharField(max_length=200)
124 | tags = ArrayField(models.IntegerField())
125 | if JSONField:
126 | meta = JSONField()
127 |
128 |
129 | # 16
130 | class Profile(models.Model):
131 | user = models.ForeignKey(User, on_delete=models.CASCADE)
132 | tag = models.IntegerField()
133 |
134 |
135 | # Proxy model
136 | class Video(models.Model):
137 | title = models.CharField(max_length=128)
138 |
139 | class VideoProxy(Video):
140 | class Meta:
141 | proxy = True
142 |
143 | class NonCachedVideoProxy(Video):
144 | class Meta:
145 | proxy = True
146 |
147 | class NonCachedMedia(models.Model):
148 | title = models.CharField(max_length=128)
149 |
150 | class MediaProxy(NonCachedMedia):
151 | class Meta:
152 | proxy = True
153 |
154 |
155 | class MediaType(models.Model):
156 | name = models.CharField(max_length=50)
157 |
158 | # Multi-table inheritance
159 | class Media(models.Model):
160 | name = models.CharField(max_length=128)
161 | media_type = models.ForeignKey(
162 | MediaType,
163 | on_delete=models.CASCADE,
164 | )
165 |
166 | def __str__(self):
167 | return str(self.media_type)
168 |
169 |
170 | class Movie(Media):
171 | year = models.IntegerField()
172 |
173 |
174 | class Scene(models.Model):
175 | """Model with FK to submodel."""
176 | name = models.CharField(max_length=50)
177 | movie = models.ForeignKey(
178 | Movie,
179 | on_delete=models.CASCADE,
180 | related_name="scenes",
181 | )
182 |
183 | # M2M models
184 | class Label(models.Model):
185 | text = models.CharField(max_length=127, blank=True, default='')
186 |
187 | class Brand(models.Model):
188 | labels = models.ManyToManyField(Label, related_name='brands')
189 |
190 | # M2M with explicit through models
191 | class LabelT(models.Model):
192 | text = models.CharField(max_length=127, blank=True, default='')
193 |
194 | class BrandT(models.Model):
195 | labels = models.ManyToManyField(LabelT, related_name='brands', through='Labeling')
196 |
197 | class Labeling(models.Model):
198 | label = models.ForeignKey(LabelT, on_delete=models.CASCADE)
199 | brand = models.ForeignKey(BrandT, on_delete=models.CASCADE)
200 | tag = models.IntegerField()
201 |
202 | class PremiumBrand(Brand):
203 | extra = models.CharField(max_length=127, blank=True, default='')
204 |
205 |
206 | # local_get
207 | class Local(models.Model):
208 | tag = models.IntegerField(null=True)
209 |
210 |
211 | # 45
212 | class CacheOnSaveModel(models.Model):
213 | title = models.CharField(max_length=32)
214 |
215 |
216 | # 47
217 | class DbAgnostic(models.Model):
218 | pass
219 |
220 | class DbBinded(models.Model):
221 | pass
222 |
223 | # contrib.postgis
224 | if os.environ.get('CACHEOPS_DB') == 'postgis':
225 | from django.contrib.gis.db import models as gis_models
226 |
227 | class Geometry(gis_models.Model):
228 | point = gis_models.PointField(geography=True, dim=3, blank=True, null=True, default=None)
229 |
230 |
231 | # 145
232 | class One(models.Model):
233 | boolean = models.BooleanField(default=False)
234 |
235 | def set_boolean_true(sender, instance, created, **kwargs):
236 | if created:
237 | return
238 |
239 | dialog = One.objects.cache().get(id=instance.id)
240 | assert dialog.boolean is True
241 |
242 | from django.db.models.signals import post_save
243 | post_save.connect(set_boolean_true, sender=One)
244 |
245 |
246 | # 312
247 | class Device(models.Model):
248 | uid = models.UUIDField(default=uuid.uuid4)
249 | model = models.CharField(max_length=64)
250 |
251 |
252 | # 333
253 | class CustomQuerySet(QuerySet):
254 | pass
255 |
256 |
257 | class CustomFromQSManager(manager.BaseManager.from_queryset(CustomQuerySet)):
258 | use_for_related_fields = True
259 |
260 |
261 | class CustomFromQSModel(models.Model):
262 | boolean = models.BooleanField(default=False)
263 | objects = CustomFromQSManager()
264 |
265 |
266 | # 352
267 | class CombinedField(models.CharField):
268 | def __init__(self, *args, **kwargs):
269 | super().__init__(*args, **kwargs)
270 | self.another_field = models.CharField(*args, **kwargs)
271 |
272 | def contribute_to_class(self, cls, name, **kwargs):
273 | super().contribute_to_class(cls, name, private_only=True)
274 | self.another_field.contribute_to_class(cls, name, **kwargs)
275 |
276 |
277 | class CombinedFieldModel(models.Model):
278 | text = CombinedField(max_length=8, default='example')
279 |
280 |
281 | # 353
282 | class Foo(models.Model):
283 | pass
284 |
285 |
286 | class Bar(models.Model):
287 | foo = models.OneToOneField(
288 | to="Foo",
289 | on_delete=models.SET_NULL,
290 | related_name='bar',
291 | blank=True,
292 | null=True
293 | )
294 |
295 |
296 | # 385
297 | class Client(models.Model):
298 | def __init__(self, *args, **kwargs):
299 | # copied from Django 2.1.5 (not exists in Django 3.1.5 installed by current requirements)
300 | def curry(_curried_func, *args, **kwargs):
301 | def _curried(*moreargs, **morekwargs):
302 | return _curried_func(*args, *moreargs, **{**kwargs, **morekwargs})
303 |
304 | return _curried
305 |
306 | super().__init__(*args, **kwargs)
307 | setattr(self, '_get_private_data', curry(sum, [1, 2, 3, 4]))
308 |
309 | name = models.CharField(max_length=255)
310 |
311 |
312 | # Abstract models
313 | class Abs(models.Model):
314 | class Meta:
315 | abstract = True
316 |
317 | class Concrete1(Abs):
318 | pass
319 |
320 | class AbsChild(Abs):
321 | class Meta:
322 | abstract = True
323 |
324 | class Concrete2(AbsChild):
325 | pass
326 |
327 | class NoProfile(models.Model):
328 | title = models.CharField(max_length=128)
329 |
330 | class NoProfileProxy(NoProfile):
331 | class Meta:
332 | proxy = True
333 |
334 | class AbsNoProfile(NoProfile):
335 | class Meta:
336 | abstract = True
337 |
338 | class NoProfileChild(AbsNoProfile):
339 | pass
340 |
341 |
342 | class ParentId(models.Model):
343 | pass
344 |
345 | class ParentStr(models.Model):
346 | name = models.CharField(max_length=128, primary_key=True)
347 |
348 | class Mess(ParentId, ParentStr):
349 | pass
350 |
351 | class MessChild(Mess):
352 | pass
353 |
--------------------------------------------------------------------------------
/tests/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | INSTALLED_APPS = [
4 | 'cacheops',
5 | 'django.contrib.contenttypes',
6 | 'django.contrib.auth',
7 | 'tests',
8 | ]
9 |
10 | ROOT_URLCONF = 'tests.urls'
11 |
12 | MIDDLEWARE_CLASSES = []
13 |
14 | AUTH_PROFILE_MODULE = 'tests.UserProfile'
15 |
16 | DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
17 | USE_TZ = True
18 |
19 | # Django replaces this, but it still wants it. *shrugs*
20 | DATABASE_ENGINE = 'django.db.backends.sqlite3',
21 | if os.environ.get('CACHEOPS_DB') == 'postgresql':
22 | DATABASES = {
23 | 'default': {
24 | 'ENGINE': 'django.db.backends.postgresql_psycopg2',
25 | 'NAME': 'cacheops',
26 | 'USER': 'cacheops',
27 | 'PASSWORD': 'cacheops',
28 | 'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
29 | },
30 | 'slave': {
31 | 'ENGINE': 'django.db.backends.postgresql_psycopg2',
32 | 'NAME': 'cacheops_slave',
33 | 'USER': 'cacheops',
34 | 'PASSWORD': 'cacheops',
35 | 'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
36 | },
37 | }
38 |
39 | # Use psycopg2cffi for PyPy
40 | try:
41 | import psycopg2 # noqa
42 | except ImportError:
43 | from psycopg2cffi import compat
44 | compat.register()
45 |
46 | elif os.environ.get('CACHEOPS_DB') == 'postgis':
47 | POSTGIS_VERSION = (2, 1, 1)
48 | DATABASES = {
49 | 'default': {
50 | 'ENGINE': 'django.contrib.gis.db.backends.postgis',
51 | 'NAME': 'cacheops',
52 | 'USER': 'cacheops',
53 | 'PASSWORD': 'cacheops',
54 | 'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
55 | },
56 | 'slave': {
57 | 'ENGINE': 'django.contrib.gis.db.backends.postgis',
58 | 'NAME': 'cacheops_slave',
59 | 'USER': 'cacheops',
60 | 'PASSWORD': 'cacheops',
61 | 'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
62 | },
63 | }
64 | elif os.environ.get('CACHEOPS_DB') == 'mysql':
65 | DATABASES = {
66 | 'default': {
67 | 'ENGINE': 'django.db.backends.mysql',
68 | 'NAME': 'cacheops',
69 | 'USER': 'root',
70 | 'PASSWORD': 'cacheops',
71 | 'HOST': os.getenv('MYSQL_HOST') or '127.0.0.1',
72 | },
73 | 'slave': {
74 | 'ENGINE': 'django.db.backends.mysql',
75 | 'NAME': 'cacheops_slave',
76 | 'USER': 'root',
77 | 'PASSWORD': 'cacheops',
78 | 'HOST': os.getenv('MYSQL_HOST') or '127.0.0.1',
79 | },
80 | }
81 | else:
82 | DATABASES = {
83 | 'default': {
84 | 'ENGINE': 'django.db.backends.sqlite3',
85 | 'NAME': 'sqlite.db',
86 | # Make in memory sqlite test db to work with threads
87 | # See https://code.djangoproject.com/ticket/12118
88 | 'TEST': {
89 | 'NAME': ':memory:cache=shared'
90 | }
91 | },
92 | 'slave': {
93 | 'ENGINE': 'django.db.backends.sqlite3',
94 | 'NAME': 'sqlite_slave.db',
95 | }
96 | }
97 |
98 | CACHEOPS_REDIS = {
99 | 'host': os.getenv('REDIS_HOST') or '127.0.0.1',
100 | 'port': 6379,
101 | 'db': 13,
102 | 'socket_timeout': 3,
103 | }
104 | CACHEOPS_DEFAULTS = {
105 | 'timeout': 60*60
106 | }
107 | CACHEOPS = {
108 | 'tests.local': {'local_get': True},
109 | 'tests.cacheonsavemodel': {'cache_on_save': True},
110 | 'tests.dbbinded': {'db_agnostic': False},
111 | 'tests.*': {},
112 | 'tests.noncachedvideoproxy': None,
113 | 'tests.noncachedmedia': None,
114 | 'tests.noprofile': None,
115 | 'auth.*': {},
116 | }
117 |
118 | if os.environ.get('CACHEOPS_PREFIX'):
119 | CACHEOPS_PREFIX = lambda q: 'p:'
120 |
121 | CACHEOPS_INSIDEOUT = bool(os.environ.get('CACHEOPS_INSIDEOUT'))
122 | CACHEOPS_DEGRADE_ON_FAILURE = bool(os.environ.get('CACHEOPS_DEGRADE_ON_FAILURE'))
123 | ALLOWED_HOSTS = ['testserver']
124 |
125 | SECRET_KEY = 'abc'
126 |
127 | TEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates'}]
128 |
--------------------------------------------------------------------------------
/tests/test_extras.py:
--------------------------------------------------------------------------------
1 | from django.db import transaction
2 | from django.test import TestCase, override_settings
3 |
4 | from cacheops import cached_as, no_invalidation, invalidate_obj, invalidate_model, invalidate_all
5 | from cacheops.conf import settings
6 | from cacheops.signals import cache_read, cache_invalidated
7 |
8 | from .utils import BaseTestCase, make_inc
9 | from .models import Post, Category, Local, DbAgnostic, DbBinded
10 |
11 |
12 | class SettingsTests(TestCase):
13 | def test_context_manager(self):
14 | self.assertTrue(settings.CACHEOPS_ENABLED)
15 |
16 | with self.settings(CACHEOPS_ENABLED=False):
17 | self.assertFalse(settings.CACHEOPS_ENABLED)
18 |
19 | @override_settings(CACHEOPS_ENABLED=False)
20 | def test_decorator(self):
21 | self.assertFalse(settings.CACHEOPS_ENABLED)
22 |
23 |
24 | @override_settings(CACHEOPS_ENABLED=False)
25 | class ClassOverrideSettingsTests(TestCase):
26 | def test_class(self):
27 | self.assertFalse(settings.CACHEOPS_ENABLED)
28 |
29 |
30 | class SignalsTests(BaseTestCase):
31 | def setUp(self):
32 | super(SignalsTests, self).setUp()
33 |
34 | def set_signal(signal=None, **kwargs):
35 | self.signal_calls.append(kwargs)
36 |
37 | self.signal_calls = []
38 | cache_read.connect(set_signal, dispatch_uid=1, weak=False)
39 |
40 | def tearDown(self):
41 | super(SignalsTests, self).tearDown()
42 | cache_read.disconnect(dispatch_uid=1)
43 |
44 | def test_queryset(self):
45 | # Miss
46 | test_model = Category.objects.create(title="foo")
47 | Category.objects.cache().get(id=test_model.id)
48 | self.assertEqual(self.signal_calls, [{'sender': Category, 'func': None, 'hit': False}])
49 |
50 | # Hit
51 | self.signal_calls = []
52 | Category.objects.cache().get(id=test_model.id) # hit
53 | self.assertEqual(self.signal_calls, [{'sender': Category, 'func': None, 'hit': True}])
54 |
55 | def test_queryset_empty(self):
56 | list(Category.objects.cache().filter(pk__in=[]))
57 | self.assertEqual(self.signal_calls, [{'sender': Category, 'func': None, 'hit': False}])
58 |
59 | def test_cached_as(self):
60 | get_calls = make_inc(cached_as(Category.objects.filter(title='test')))
61 | func = get_calls.__wrapped__
62 |
63 | # Miss
64 | self.assertEqual(get_calls(), 1)
65 | self.assertEqual(self.signal_calls, [{'sender': None, 'func': func, 'hit': False}])
66 |
67 | # Hit
68 | self.signal_calls = []
69 | self.assertEqual(get_calls(), 1)
70 | self.assertEqual(self.signal_calls, [{'sender': None, 'func': func, 'hit': True}])
71 |
72 | def test_invalidation_signal(self):
73 | def set_signal(signal=None, **kwargs):
74 | signal_calls.append(kwargs)
75 |
76 | signal_calls = []
77 | cache_invalidated.connect(set_signal, dispatch_uid=1, weak=False)
78 |
79 | invalidate_all()
80 | invalidate_model(Post)
81 | c = Category.objects.create(title='Hey')
82 | self.assertEqual(signal_calls, [
83 | {'sender': None, 'obj_dict': None},
84 | {'sender': Post, 'obj_dict': None},
85 | {'sender': Category, 'obj_dict': {'id': c.pk, 'title': 'Hey'}},
86 | ])
87 |
88 |
89 | class LockingTests(BaseTestCase):
90 | def test_lock(self):
91 | import random
92 | import threading
93 | from .utils import ThreadWithReturnValue
94 | from before_after import before
95 |
96 | @cached_as(Post, lock=True, timeout=60)
97 | def func():
98 | return random.random()
99 |
100 | results = []
101 | locked = threading.Event()
102 | thread = [None]
103 |
104 | def second_thread():
105 | def _target():
106 | try:
107 | with before('redis.Redis.brpoplpush', lambda *a, **kw: locked.set()):
108 | results.append(func())
109 | except Exception:
110 | locked.set()
111 | raise
112 |
113 | thread[0] = ThreadWithReturnValue(target=_target)
114 | thread[0].start()
115 | assert locked.wait(1) # Wait until right before the block
116 |
117 | with before('random.random', second_thread):
118 | results.append(func())
119 |
120 | thread[0].join()
121 |
122 | self.assertEqual(results[0], results[1])
123 |
124 |
125 | class NoInvalidationTests(BaseTestCase):
126 | fixtures = ['basic']
127 |
128 | def _template(self, invalidate):
129 | post = Post.objects.cache().get(pk=1)
130 | invalidate(post)
131 |
132 | with self.assertNumQueries(0):
133 | Post.objects.cache().get(pk=1)
134 |
135 | def test_context_manager(self):
136 | def invalidate(post):
137 | with no_invalidation:
138 | invalidate_obj(post)
139 | self._template(invalidate)
140 |
141 | def test_decorator(self):
142 | self._template(no_invalidation(invalidate_obj))
143 |
144 | def test_nested(self):
145 | def invalidate(post):
146 | with no_invalidation:
147 | with no_invalidation:
148 | pass
149 | invalidate_obj(post)
150 | self._template(invalidate)
151 |
152 | def test_in_transaction(self):
153 | with transaction.atomic():
154 | post = Post.objects.cache().get(pk=1)
155 |
156 | with no_invalidation:
157 | post.save()
158 |
159 | with self.assertNumQueries(0):
160 | Post.objects.cache().get(pk=1)
161 |
162 |
163 | class LocalGetTests(BaseTestCase):
164 | def setUp(self):
165 | Local.objects.create(pk=1)
166 | super(LocalGetTests, self).setUp()
167 |
168 | def test_unhashable_args(self):
169 | Local.objects.cache().get(pk__in=[1, 2])
170 |
171 |
172 | class DbAgnosticTests(BaseTestCase):
173 | databases = ('default', 'slave')
174 |
175 | def test_db_agnostic_by_default(self):
176 | list(DbAgnostic.objects.cache())
177 |
178 | with self.assertNumQueries(0, using='slave'):
179 | list(DbAgnostic.objects.cache().using('slave'))
180 |
181 | def test_db_agnostic_disabled(self):
182 | list(DbBinded.objects.cache())
183 |
184 | with self.assertNumQueries(1, using='slave'):
185 | list(DbBinded.objects.cache().using('slave'))
186 |
187 |
188 | def test_model_family():
189 | from cacheops.utils import model_family
190 | from .models import Abs, Concrete1, AbsChild, Concrete2
191 | from .models import NoProfile, NoProfileProxy, AbsNoProfile, NoProfileChild
192 | from .models import ParentId, ParentStr, Mess, MessChild
193 |
194 | # Abstract models do not have family, children of an abstract model are not a family
195 | assert model_family(Abs) == set()
196 | assert model_family(Concrete1) == {Concrete1}
197 | assert model_family(AbsChild) == set()
198 | assert model_family(Concrete2) == {Concrete2}
199 |
200 | # Everything in but an abstract model
201 | assert model_family(NoProfile) == {NoProfile, NoProfileProxy, NoProfileChild}
202 | assert model_family(NoProfileProxy) == {NoProfile, NoProfileProxy, NoProfileChild}
203 | assert model_family(AbsNoProfile) == set()
204 | assert model_family(NoProfileChild) == {NoProfile, NoProfileProxy, NoProfileChild}
205 |
206 | # The worst of multiple inheritance
207 | assert model_family(Mess) == {Mess, MessChild, ParentId, ParentStr}
208 |
--------------------------------------------------------------------------------
/tests/test_low_level.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from cacheops.redis import redis_client
4 |
5 | from .models import User
6 | from .utils import BaseTestCase
7 |
8 |
9 | @pytest.fixture()
10 | def base(db):
11 | case = BaseTestCase()
12 | case.setUp()
13 | yield
14 | case.tearDown()
15 |
16 |
17 | def test_ttl(base):
18 | user = User.objects.create(username='Suor')
19 | qs = User.objects.cache(timeout=100).filter(pk=user.pk)
20 | list(qs)
21 | assert 90 <= redis_client.ttl(qs._cache_key()) <= 100
22 | assert redis_client.ttl(f'{qs._prefix}conj:auth_user:id={user.id}') > 100
23 |
--------------------------------------------------------------------------------
/tests/tests_sharding.py:
--------------------------------------------------------------------------------
1 | from django.core.exceptions import ImproperlyConfigured
2 | from django.test import override_settings
3 |
4 | from cacheops import cache, CacheMiss
5 | from .models import Category, Post, Extra
6 | from .utils import BaseTestCase
7 |
8 |
9 | class PrefixTests(BaseTestCase):
10 | databases = ('default', 'slave')
11 | fixtures = ['basic']
12 |
13 | def test_context(self):
14 | prefix = ['']
15 | with override_settings(CACHEOPS_PREFIX=lambda _: prefix[0]):
16 | with self.assertNumQueries(2):
17 | Category.objects.cache().count()
18 | prefix[0] = 'x'
19 | Category.objects.cache().count()
20 |
21 | @override_settings(CACHEOPS_PREFIX=lambda q: q.db)
22 | def test_db(self):
23 | with self.assertNumQueries(1):
24 | list(Category.objects.cache())
25 |
26 | with self.assertNumQueries(1, using='slave'):
27 | list(Category.objects.cache().using('slave'))
28 | list(Category.objects.cache().using('slave'))
29 |
30 | @override_settings(CACHEOPS_PREFIX=lambda q: q.table)
31 | def test_table(self):
32 | self.assertTrue(Category.objects.all()._cache_key().startswith('tests_category'))
33 |
34 | with self.assertRaises(ImproperlyConfigured):
35 | list(Post.objects.filter(category__title='Django').cache())
36 |
37 | @override_settings(CACHEOPS_PREFIX=lambda q: q.table)
38 | def test_self_join_tables(self):
39 | list(Extra.objects.filter(to_tag__pk=1).cache())
40 |
41 | @override_settings(CACHEOPS_PREFIX=lambda q: q.table)
42 | def test_union_tables(self):
43 | qs = Post.objects.filter(pk=1).union(Post.objects.filter(pk=2)).cache()
44 | list(qs)
45 |
46 |
47 | class SimpleCacheTests(BaseTestCase):
48 | def test_prefix(self):
49 | with override_settings(CACHEOPS_PREFIX=lambda _: 'a'):
50 | cache.set("key", "value")
51 | self.assertEqual(cache.get("key"), "value")
52 |
53 | with self.assertRaises(CacheMiss):
54 | cache.get("key")
55 |
--------------------------------------------------------------------------------
/tests/tests_transactions.py:
--------------------------------------------------------------------------------
1 | from django.db import connection, IntegrityError
2 | from django.db.transaction import atomic
3 | from django.test import TransactionTestCase
4 |
5 | from cacheops.transaction import queue_when_in_transaction
6 |
7 | from .models import Category, Post
8 | from .utils import run_in_thread
9 |
10 |
11 | def get_category():
12 | return Category.objects.cache().get(pk=1)
13 |
14 |
15 | class IntentionalRollback(Exception):
16 | pass
17 |
18 |
19 | class TransactionSupportTests(TransactionTestCase):
20 | databases = ('default', 'slave')
21 | fixtures = ['basic']
22 |
23 | def test_atomic(self):
24 | with atomic():
25 | obj = get_category()
26 | obj.title = 'Changed'
27 | obj.save()
28 | self.assertEqual('Changed', get_category().title)
29 | self.assertEqual('Django', run_in_thread(get_category).title)
30 | self.assertEqual('Changed', run_in_thread(get_category).title)
31 | self.assertEqual('Changed', get_category().title)
32 |
33 | def test_nested(self):
34 | with atomic():
35 | with atomic():
36 | obj = get_category()
37 | obj.title = 'Changed'
38 | obj.save()
39 | self.assertEqual('Changed', get_category().title)
40 | self.assertEqual('Django', run_in_thread(get_category).title)
41 | self.assertEqual('Changed', get_category().title)
42 | self.assertEqual('Django', run_in_thread(get_category).title)
43 | self.assertEqual('Changed', run_in_thread(get_category).title)
44 | self.assertEqual('Changed', get_category().title)
45 |
46 | def test_rollback(self):
47 | try:
48 | with atomic():
49 | obj = get_category()
50 | obj.title = 'Changed'
51 | obj.save()
52 | self.assertEqual('Changed', get_category().title)
53 | self.assertEqual('Django', run_in_thread(get_category).title)
54 | raise IntentionalRollback()
55 | except IntentionalRollback:
56 | pass
57 | self.assertEqual('Django', get_category().title)
58 | self.assertEqual('Django', run_in_thread(get_category).title)
59 |
60 | def test_nested_rollback(self):
61 | with atomic():
62 | try:
63 | with atomic():
64 | obj = get_category()
65 | obj.title = 'Changed'
66 | obj.save()
67 | self.assertEqual('Changed', get_category().title)
68 | self.assertEqual('Django', run_in_thread(get_category).title)
69 | raise IntentionalRollback()
70 | except IntentionalRollback:
71 | pass
72 | self.assertEqual('Django', get_category().title)
73 | self.assertEqual('Django', run_in_thread(get_category).title)
74 | self.assertEqual('Django', get_category().title)
75 | self.assertEqual('Django', run_in_thread(get_category).title)
76 |
77 | def test_smart_transactions(self):
78 | with atomic():
79 | get_category()
80 | with self.assertNumQueries(0):
81 | get_category()
82 | with atomic():
83 | with self.assertNumQueries(0):
84 | get_category()
85 |
86 | obj = get_category()
87 | obj.title += ' changed'
88 | obj.save()
89 |
90 | get_category()
91 | with self.assertNumQueries(1):
92 | get_category()
93 |
94 | def test_rollback_during_integrity_error(self):
95 | # store category in cache
96 | get_category()
97 |
98 | # Make current DB be "dirty" by write
99 | with self.assertRaises(IntegrityError):
100 | with atomic():
101 | Post.objects.create(category_id=-1, title='')
102 |
103 | # however, this write should be rolled back and current DB should
104 | # not be "dirty"
105 |
106 | with self.assertNumQueries(0):
107 | get_category()
108 |
109 | def test_call_cacheops_cbs_before_on_commit_cbs(self):
110 | calls = []
111 |
112 | with atomic():
113 | def django_commit_handler():
114 | calls.append('django')
115 | connection.on_commit(django_commit_handler)
116 |
117 | @queue_when_in_transaction
118 | def cacheops_commit_handler(using):
119 | calls.append('cacheops')
120 | cacheops_commit_handler('default')
121 |
122 | self.assertEqual(calls, ['cacheops', 'django'])
123 |
124 | def test_multidb(self):
125 | try:
126 | with atomic('slave'):
127 | with atomic():
128 | obj = get_category()
129 | obj.title = 'Changed'
130 | obj.save()
131 | raise IntentionalRollback()
132 | except IntentionalRollback:
133 | pass
134 | self.assertEqual('Changed', get_category().title)
135 |
--------------------------------------------------------------------------------
/tests/urls.py:
--------------------------------------------------------------------------------
1 | urlpatterns = []
2 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | from django.test import TestCase
2 |
3 | from cacheops import invalidate_all
4 | from cacheops.transaction import transaction_states
5 |
6 |
7 | class BaseTestCase(TestCase):
8 | def setUp(self):
9 | # Emulate not being in transaction by tricking system to ignore its pretest level.
10 | # TestCase wraps each test into 1 or 2 transaction(s) altering cacheops behavior.
11 | # The alternative is using TransactionTestCase, which is 10x slow.
12 | from funcy import empty
13 | transaction_states._states, self._states \
14 | = empty(transaction_states._states), transaction_states._states
15 |
16 | invalidate_all()
17 |
18 | def tearDown(self):
19 | transaction_states._states = self._states
20 |
21 |
22 | def make_inc(deco=lambda x: x):
23 | calls = [0]
24 |
25 | @deco
26 | def inc(_=None, **kw):
27 | calls[0] += 1
28 | return calls[0]
29 |
30 | inc.get = lambda: calls[0]
31 | return inc
32 |
33 |
34 | # Thread utilities
35 | from threading import Thread
36 |
37 |
38 | class ThreadWithReturnValue(Thread):
39 | def __init__(self, *args, **kwargs):
40 | super(ThreadWithReturnValue, self).__init__(*args, **kwargs)
41 | self._return = None
42 | self._exc = None
43 |
44 | def run(self):
45 | try:
46 | self._return = self._target(*self._args, **self._kwargs)
47 | except Exception as e:
48 | self._exc = e
49 | finally:
50 | # Django does not drop postgres connections opened in new threads.
51 | # This leads to postgres complaining about db accessed when we try to destroy it.
52 | # See https://code.djangoproject.com/ticket/22420#comment:18
53 | from django.db import connection
54 | connection.close()
55 |
56 | def join(self, *args, **kwargs):
57 | super(ThreadWithReturnValue, self).join(*args, **kwargs)
58 | if self._exc:
59 | raise self._exc
60 | return self._return
61 |
62 |
63 | def run_in_thread(target):
64 | t = ThreadWithReturnValue(target=target)
65 | t.start()
66 | return t.join()
67 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist =
3 | lint,
4 | py{38,39}-dj{32,40},
5 | py310-dj{32,40,41},
6 | py311-dj{41,42,50},
7 | py312-dj{42,50,51,52},
8 | py313-dj{50,51,52},
9 | pypy310-dj40
10 |
11 | [gh-actions]
12 | python =
13 | 3.8: py38
14 | 3.9: py39
15 | 3.10: py310
16 | 3.11: py311
17 | 3.12: py312
18 | 3.13: py313
19 | pypy-3.10: pypy310
20 |
21 |
22 | [testenv]
23 | passenv = *
24 | allowlist_externals = *
25 | # This is required for gdal to install
26 | setenv =
27 | CPLUS_INCLUDE_PATH=/usr/include/gdal
28 | C_INCLUDE_PATH=/usr/include/gdal
29 | deps =
30 | pytest==8.3.5
31 | pytest-django==4.11.1
32 | dj32: Django>=3.2,<3.3
33 | dj40: Django>=4.0,<4.1
34 | dj41: Django>=4.1,<4.2
35 | dj42: Django>=4.2.8,<5.0
36 | dj50: Django>=5.0,<5.1
37 | dj51: Django>=5.1,<5.2
38 | dj52: Django>=5.2,<5.3
39 | djmain: git+https://github.com/django/django
40 | mysqlclient
41 | py{38,39,310,311,312,313}: psycopg2-binary
42 | ; gdal=={env:GDAL_VERSION:2.4}
43 | pypy310: psycopg2cffi>=2.7.6
44 | before_after==1.0.0
45 | jinja2>=2.10
46 | dill
47 | commands =
48 | pytest []
49 | env CACHEOPS_PREFIX=1 pytest []
50 | env CACHEOPS_INSIDEOUT=1 pytest []
51 | env CACHEOPS_DB=mysql pytest []
52 | env CACHEOPS_DB=postgresql pytest []
53 | ; env CACHEOPS_DB=postgis pytest []
54 | ; Test invalidate command
55 | ./manage.py invalidate tests.post
56 | ./manage.py invalidate tests
57 | ./manage.py invalidate all
58 |
59 |
60 | [flake8]
61 | max-line-length = 100
62 | ignore = E126,E127,E131,E226,E261,E265,E266,E302,E305,E401,E402,F403,F405,E731,W503
63 | exclude = cross.py,.tox/*
64 |
65 | [testenv:lint]
66 | deps = flake8
67 | commands = flake8
68 |
--------------------------------------------------------------------------------