├── MANIFEST.in ├── Makefile ├── cachecore ├── __init__.py ├── posixemulation.py └── core.py ├── .travis.yml ├── LICENSE ├── README.rst ├── setup.py └── test_cachecore.py /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst LICENSE 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | python test_cachecore.py -------------------------------------------------------------------------------- /cachecore/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .core import * -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - 2.6 4 | - 2.7 5 | - 3.2 6 | script: make test 7 | 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 Kenneth Reitz. 2 | 3 | Permission to use, copy, modify, and/or distribute this software for any 4 | purpose with or without fee is hereby granted, provided that the above 5 | copyright notice and this permission notice appear in all copies. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | CacheCore 2 | ========= 3 | 4 | Simple cache backends, inspired by werkzeug.contrib.cache. 5 | 6 | 7 | Creating a Cache Object 8 | ----------------------- 9 | 10 | To create a cache object you just import the cache system of your choice 11 | from the cache module and instantiate it. Then you can start working 12 | with that object:: 13 | 14 | >>> from cachecore import SimpleCache 15 | >>> c = SimpleCache() 16 | >>> c.set("foo", "value") 17 | >>> c.get("foo") 18 | 'value' 19 | >>> c.get("missing") is None 20 | True 21 | 22 | 23 | Cache Types 24 | ----------- 25 | 26 | - In-Memory 27 | - Redis 28 | - Memcache 29 | - Filesystem 30 | - Your own (extend BaseCache) 31 | 32 | 33 | Installation 34 | ------------ 35 | 36 | Installing cachecore is simple with pip:: 37 | 38 | $ pip install cachecore -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | 6 | try: 7 | from setuptools import setup 8 | except ImportError: 9 | from distutils.core import setup 10 | 11 | if sys.argv[-1] == 'publish': 12 | os.system('python setup.py sdist upload') 13 | sys.exit() 14 | 15 | packages = [ 16 | 'cachecore', 17 | ] 18 | 19 | setup( 20 | name='cachecore', 21 | version='0.1.0', 22 | description='Simple Cache Base Classes.', 23 | long_description=open('README.rst').read(), 24 | author='Kenneth Reitz', 25 | author_email='me@kennethreitz.com', 26 | url='https://github.com/core/cachecore', 27 | packages=packages, 28 | include_package_data=True, 29 | license=open('LICENSE').read(), 30 | classifiers=( 31 | 'Development Status :: 5 - Production/Stable', 32 | 'Intended Audience :: Developers', 33 | 'Natural Language :: English', 34 | 'License :: OSI Approved :: ISC License (ISCL)', 35 | 'Programming Language :: Python', 36 | 'Programming Language :: Python :: 2.6', 37 | 'Programming Language :: Python :: 2.7', 38 | # 'Programming Language :: Python :: 3', 39 | # 'Programming Language :: Python :: 3.0', 40 | # 'Programming Language :: Python :: 3.1', 41 | ), 42 | ) 43 | -------------------------------------------------------------------------------- /cachecore/posixemulation.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | r""" 3 | werkzeug.posixemulation 4 | ~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | Provides a POSIX emulation for some features that are relevant to 7 | web applications. The main purpose is to simplify support for 8 | systems such as Windows NT that are not 100% POSIX compatible. 9 | 10 | Currently this only implements a :func:`rename` function that 11 | follows POSIX semantics. Eg: if the target file already exists it 12 | will be replaced without asking. 13 | 14 | This module was introduced in 0.6.1 and is not a public interface. 15 | It might become one in later versions of Werkzeug. 16 | 17 | :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. 18 | :license: BSD, see LICENSE for more details. 19 | """ 20 | import sys 21 | import os 22 | import errno 23 | import time 24 | import random 25 | 26 | 27 | can_rename_open_file = False 28 | if os.name == 'nt': # pragma: no cover 29 | _rename = lambda src, dst: False 30 | _rename_atomic = lambda src, dst: False 31 | 32 | try: 33 | import ctypes 34 | 35 | _MOVEFILE_REPLACE_EXISTING = 0x1 36 | _MOVEFILE_WRITE_THROUGH = 0x8 37 | _MoveFileEx = ctypes.windll.kernel32.MoveFileExW 38 | 39 | def _rename(src, dst): 40 | if not isinstance(src, unicode): 41 | src = unicode(src, sys.getfilesystemencoding()) 42 | if not isinstance(dst, unicode): 43 | dst = unicode(dst, sys.getfilesystemencoding()) 44 | if _rename_atomic(src, dst): 45 | return True 46 | retry = 0 47 | rv = False 48 | while not rv and retry < 100: 49 | rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING | 50 | _MOVEFILE_WRITE_THROUGH) 51 | if not rv: 52 | time.sleep(0.001) 53 | retry += 1 54 | return rv 55 | 56 | # new in Vista and Windows Server 2008 57 | _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction 58 | _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction 59 | _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW 60 | _CloseHandle = ctypes.windll.kernel32.CloseHandle 61 | can_rename_open_file = True 62 | 63 | def _rename_atomic(src, dst): 64 | ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename') 65 | if ta == -1: 66 | return False 67 | try: 68 | retry = 0 69 | rv = False 70 | while not rv and retry < 100: 71 | rv = _MoveFileTransacted(src, dst, None, None, 72 | _MOVEFILE_REPLACE_EXISTING | 73 | _MOVEFILE_WRITE_THROUGH, ta) 74 | if rv: 75 | rv = _CommitTransaction(ta) 76 | break 77 | else: 78 | time.sleep(0.001) 79 | retry += 1 80 | return rv 81 | finally: 82 | _CloseHandle(ta) 83 | except Exception: 84 | pass 85 | 86 | def rename(src, dst): 87 | # Try atomic or pseudo-atomic rename 88 | if _rename(src, dst): 89 | return 90 | # Fall back to "move away and replace" 91 | try: 92 | os.rename(src, dst) 93 | except OSError, e: 94 | if e.errno != errno.EEXIST: 95 | raise 96 | old = "%s-%08x" % (dst, random.randint(0, sys.maxint)) 97 | os.rename(dst, old) 98 | os.rename(src, dst) 99 | try: 100 | os.unlink(old) 101 | except Exception: 102 | pass 103 | else: 104 | rename = os.rename 105 | can_rename_open_file = True 106 | -------------------------------------------------------------------------------- /test_cachecore.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import time 6 | import unittest 7 | import tempfile 8 | import shutil 9 | 10 | from unittest import TestCase 11 | import cachecore as cache 12 | 13 | try: 14 | import redis 15 | try: 16 | from redis.exceptions import ConnectionError as RedisConnectionError 17 | cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection') 18 | except RedisConnectionError: 19 | redis = None 20 | except ImportError: 21 | redis = None 22 | 23 | 24 | class SimpleCacheTestCase(TestCase): 25 | 26 | def test_get_dict(self): 27 | c = cache.SimpleCache() 28 | c.set('a', 'a') 29 | c.set('b', 'b') 30 | d = c.get_dict('a', 'b') 31 | assert 'a' in d 32 | assert 'a' == d['a'] 33 | assert 'b' in d 34 | assert 'b' == d['b'] 35 | 36 | def test_set_many(self): 37 | c = cache.SimpleCache() 38 | c.set_many({0: 0, 1: 1, 2: 4}) 39 | assert c.get(2) == 4 40 | c.set_many((i, i*i) for i in xrange(3)) 41 | assert c.get(2) == 4 42 | 43 | 44 | class FileSystemCacheTestCase(TestCase): 45 | 46 | def test_set_get(self): 47 | tmp_dir = tempfile.mkdtemp() 48 | try: 49 | c = cache.FileSystemCache(cache_dir=tmp_dir) 50 | for i in range(3): 51 | c.set(str(i), i * i) 52 | for i in range(3): 53 | result = c.get(str(i)) 54 | assert result == i * i 55 | finally: 56 | shutil.rmtree(tmp_dir) 57 | 58 | def test_filesystemcache_prune(self): 59 | THRESHOLD = 13 60 | tmp_dir = tempfile.mkdtemp() 61 | c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD) 62 | for i in range(2 * THRESHOLD): 63 | c.set(str(i), i) 64 | cache_files = os.listdir(tmp_dir) 65 | shutil.rmtree(tmp_dir) 66 | assert len(cache_files) <= THRESHOLD 67 | 68 | 69 | def test_filesystemcache_clear(self): 70 | tmp_dir = tempfile.mkdtemp() 71 | c = cache.FileSystemCache(cache_dir=tmp_dir) 72 | c.set('foo', 'bar') 73 | cache_files = os.listdir(tmp_dir) 74 | assert len(cache_files) == 1 75 | c.clear() 76 | cache_files = os.listdir(tmp_dir) 77 | assert len(cache_files) == 0 78 | shutil.rmtree(tmp_dir) 79 | 80 | 81 | # class RedisCacheTestCase(TestCase): 82 | 83 | # def make_cache(self): 84 | # return cache.RedisCache(key_prefix='werkzeug-test-case:') 85 | 86 | # def teardown(self): 87 | # self.make_cache().clear() 88 | 89 | # def test_compat(self): 90 | # c = self.make_cache() 91 | # c._client.set(c.key_prefix + 'foo', 'Awesome') 92 | # self.assert_equal(c.get('foo'), 'Awesome') 93 | # c._client.set(c.key_prefix + 'foo', '42') 94 | # self.assert_equal(c.get('foo'), 42) 95 | 96 | # def test_get_set(self): 97 | # c = self.make_cache() 98 | # c.set('foo', ['bar']) 99 | # assert c.get('foo') == ['bar'] 100 | 101 | # def test_get_many(self): 102 | # c = self.make_cache() 103 | # c.set('foo', ['bar']) 104 | # c.set('spam', 'eggs') 105 | # assert c.get_many('foo', 'spam') == [['bar'], 'eggs'] 106 | 107 | # def test_set_many(self): 108 | # c = self.make_cache() 109 | # c.set_many({'foo': 'bar', 'spam': ['eggs']}) 110 | # assert c.get('foo') == 'bar' 111 | # assert c.get('spam') == ['eggs'] 112 | 113 | # def test_expire(self): 114 | # c = self.make_cache() 115 | # c.set('foo', 'bar', 1) 116 | # time.sleep(2) 117 | # assert c.get('foo') is None 118 | 119 | # def test_add(self): 120 | # c = self.make_cache() 121 | # # sanity check that add() works like set() 122 | # c.add('foo', 'bar') 123 | # assert c.get('foo') == 'bar' 124 | # c.add('foo', 'qux') 125 | # assert c.get('foo') == 'bar' 126 | 127 | # def test_delete(self): 128 | # c = self.make_cache() 129 | # c.add('foo', 'bar') 130 | # assert c.get('foo') == 'bar' 131 | # c.delete('foo') 132 | # assert c.get('foo') is None 133 | 134 | # def test_delete_many(self): 135 | # c = self.make_cache() 136 | # c.add('foo', 'bar') 137 | # c.add('spam', 'eggs') 138 | # c.delete_many('foo', 'spam') 139 | # assert c.get('foo') is None 140 | # assert c.get('spam') is None 141 | 142 | # def test_inc_dec(self): 143 | # c = self.make_cache() 144 | # c.set('foo', 1) 145 | # assert c.inc('foo') == 2 146 | # assert c.dec('foo') == 1 147 | # c.delete('foo') 148 | 149 | 150 | # def test_true_false(self): 151 | # c = self.make_cache() 152 | # c.set('foo', True) 153 | # assert c.get('foo') == True 154 | # c.set('bar', False) 155 | # assert c.get('bar') == False 156 | 157 | 158 | def suite(): 159 | suite = unittest.TestSuite() 160 | suite.addTest(unittest.makeSuite(SimpleCacheTestCase)) 161 | suite.addTest(unittest.makeSuite(FileSystemCacheTestCase)) 162 | # if redis is not None: 163 | # suite.addTest(unittest.makeSuite(RedisCacheTestCase)) 164 | return suite 165 | 166 | if __name__ == '__main__': 167 | # print suite() 168 | unittest.main() -------------------------------------------------------------------------------- /cachecore/core.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | cachecore 4 | ~~~~~~~~~ 5 | 6 | Simple cache backends, inspired by werkzeug.contrib.cache. 7 | 8 | The main problem with dynamic Web sites is, well, they're dynamic. Each 9 | time a user requests a page, the webserver executes a lot of code, queries 10 | the database, renders templates until the visitor gets the page he sees. 11 | 12 | This is a lot more expensive than just loading a file from the file system 13 | and sending it to the visitor. 14 | 15 | For most Web applications, this overhead isn't a big deal but once it 16 | becomes, you will be glad to have a cache system in place. 17 | 18 | How Caching Works 19 | ================= 20 | 21 | Caching is pretty simple. Basically you have a cache object lurking around 22 | somewhere that is connected to a remote cache or the file system or 23 | something else. When the request comes in you check if the current page 24 | is already in the cache and if so, you're returning it from the cache. 25 | Otherwise you generate the page and put it into the cache. (Or a fragment 26 | of the page, you don't have to cache the full thing) 27 | 28 | Here is a simple example of how to cache a sidebar for a template:: 29 | 30 | def get_sidebar(user): 31 | identifier = 'sidebar_for/user%d' % user.id 32 | value = cache.get(identifier) 33 | if value is not None: 34 | return value 35 | value = generate_sidebar_for(user=user) 36 | cache.set(identifier, value, timeout=60 * 5) 37 | return value 38 | 39 | Creating a Cache Object 40 | ======================= 41 | 42 | To create a cache object you just import the cache system of your choice 43 | from the cache module and instantiate it. Then you can start working 44 | with that object: 45 | 46 | >>> from cachecore import SimpleCache 47 | >>> c = SimpleCache() 48 | >>> c.set("foo", "value") 49 | >>> c.get("foo") 50 | 'value' 51 | >>> c.get("missing") is None 52 | True 53 | 54 | Please keep in mind that you have to create the cache and put it somewhere 55 | you have access to it (either as a module global you can import or you just 56 | put it into your WSGI application). 57 | """ 58 | 59 | import os 60 | import re 61 | import tempfile 62 | try: 63 | from hashlib import md5 64 | except ImportError: 65 | from md5 import new as md5 66 | from itertools import izip 67 | from time import time 68 | 69 | from .posixemulation import rename 70 | 71 | try: 72 | import cPickle as pickle 73 | except ImportError: 74 | import pickle 75 | 76 | 77 | def _items(mappingorseq): 78 | """Wrapper for efficient iteration over mappings represented by dicts 79 | or sequences:: 80 | 81 | >>> for k, v in _items((i, i*i) for i in xrange(5)): 82 | ... assert k*k == v 83 | 84 | >>> for k, v in _items(dict((i, i*i) for i in xrange(5))): 85 | ... assert k*k == v 86 | 87 | """ 88 | return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \ 89 | else mappingorseq 90 | 91 | 92 | class BaseCache(object): 93 | """Baseclass for the cache systems. All the cache systems implement this 94 | API or a superset of it. 95 | 96 | :param default_timeout: the default timeout that is used if no timeout is 97 | specified on :meth:`set`. 98 | """ 99 | 100 | def __init__(self, default_timeout=300): 101 | self.default_timeout = default_timeout 102 | 103 | def get(self, key): 104 | """Looks up key in the cache and returns the value for it. 105 | If the key does not exist `None` is returned instead. 106 | 107 | :param key: the key to be looked up. 108 | """ 109 | return None 110 | 111 | def delete(self, key): 112 | """Deletes `key` from the cache. If it does not exist in the cache 113 | nothing happens. 114 | 115 | :param key: the key to delete. 116 | """ 117 | pass 118 | 119 | def get_many(self, *keys): 120 | """Returns a list of values for the given keys. 121 | For each key a item in the list is created. Example:: 122 | 123 | foo, bar = cache.get_many("foo", "bar") 124 | 125 | If a key can't be looked up `None` is returned for that key 126 | instead. 127 | 128 | :param keys: The function accepts multiple keys as positional 129 | arguments. 130 | """ 131 | return map(self.get, keys) 132 | 133 | def get_dict(self, *keys): 134 | """Works like :meth:`get_many` but returns a dict:: 135 | 136 | d = cache.get_dict("foo", "bar") 137 | foo = d["foo"] 138 | bar = d["bar"] 139 | 140 | :param keys: The function accepts multiple keys as positional 141 | arguments. 142 | """ 143 | return dict(izip(keys, self.get_many(*keys))) 144 | 145 | def set(self, key, value, timeout=None): 146 | """Adds a new key/value to the cache (overwrites value, if key already 147 | exists in the cache). 148 | 149 | :param key: the key to set 150 | :param value: the value for the key 151 | :param timeout: the cache timeout for the key (if not specified, 152 | it uses the default timeout). 153 | """ 154 | pass 155 | 156 | def add(self, key, value, timeout=None): 157 | """Works like :meth:`set` but does not overwrite the values of already 158 | existing keys. 159 | 160 | :param key: the key to set 161 | :param value: the value for the key 162 | :param timeout: the cache timeout for the key or the default 163 | timeout if not specified. 164 | """ 165 | pass 166 | 167 | def set_many(self, mapping, timeout=None): 168 | """Sets multiple keys and values from a mapping. 169 | 170 | :param mapping: a mapping with the keys/values to set. 171 | :param timeout: the cache timeout for the key (if not specified, 172 | it uses the default timeout). 173 | """ 174 | for key, value in _items(mapping): 175 | self.set(key, value, timeout) 176 | 177 | def delete_many(self, *keys): 178 | """Deletes multiple keys at once. 179 | 180 | :param keys: The function accepts multiple keys as positional 181 | arguments. 182 | """ 183 | for key in keys: 184 | self.delete(key) 185 | 186 | def clear(self): 187 | """Clears the cache. Keep in mind that not all caches support 188 | completely clearing the cache. 189 | """ 190 | pass 191 | 192 | def inc(self, key, delta=1): 193 | """Increments the value of a key by `delta`. If the key does 194 | not yet exist it is initialized with `delta`. 195 | 196 | For supporting caches this is an atomic operation. 197 | 198 | :param key: the key to increment. 199 | :param delta: the delta to add. 200 | """ 201 | self.set(key, (self.get(key) or 0) + delta) 202 | 203 | def dec(self, key, delta=1): 204 | """Decrements the value of a key by `delta`. If the key does 205 | not yet exist it is initialized with `-delta`. 206 | 207 | For supporting caches this is an atomic operation. 208 | 209 | :param key: the key to increment. 210 | :param delta: the delta to subtract. 211 | """ 212 | self.set(key, (self.get(key) or 0) - delta) 213 | 214 | 215 | class NullCache(BaseCache): 216 | """A cache that doesn't cache. This can be useful for unit testing. 217 | 218 | :param default_timeout: a dummy parameter that is ignored but exists 219 | for API compatibility with other caches. 220 | """ 221 | 222 | 223 | class SimpleCache(BaseCache): 224 | """Simple memory cache for single process environments. This class exists 225 | mainly for the development server and is not 100% thread safe. It tries 226 | to use as many atomic operations as possible and no locks for simplicity 227 | but it could happen under heavy load that keys are added multiple times. 228 | 229 | :param threshold: the maximum number of items the cache stores before 230 | it starts deleting some. 231 | :param default_timeout: the default timeout that is used if no timeout is 232 | specified on :meth:`~BaseCache.set`. 233 | """ 234 | 235 | def __init__(self, threshold=500, default_timeout=300): 236 | BaseCache.__init__(self, default_timeout) 237 | self._cache = {} 238 | self.clear = self._cache.clear 239 | self._threshold = threshold 240 | 241 | def _prune(self): 242 | if len(self._cache) > self._threshold: 243 | now = time() 244 | for idx, (key, (expires, _)) in enumerate(self._cache.items()): 245 | if expires <= now or idx % 3 == 0: 246 | self._cache.pop(key, None) 247 | 248 | def get(self, key): 249 | now = time() 250 | expires, value = self._cache.get(key, (0, None)) 251 | if expires > time(): 252 | return pickle.loads(value) 253 | 254 | def set(self, key, value, timeout=None): 255 | if timeout is None: 256 | timeout = self.default_timeout 257 | self._prune() 258 | self._cache[key] = (time() + timeout, pickle.dumps(value, 259 | pickle.HIGHEST_PROTOCOL)) 260 | 261 | def add(self, key, value, timeout=None): 262 | if timeout is None: 263 | timeout = self.default_timeout 264 | if len(self._cache) > self._threshold: 265 | self._prune() 266 | item = (time() + timeout, pickle.dumps(value, 267 | pickle.HIGHEST_PROTOCOL)) 268 | self._cache.setdefault(key, item) 269 | 270 | def delete(self, key): 271 | self._cache.pop(key, None) 272 | 273 | 274 | _test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match 275 | 276 | class MemcachedCache(BaseCache): 277 | """A cache that uses memcached as backend. 278 | 279 | The first argument can either be an object that resembles the API of a 280 | :class:`memcache.Client` or a tuple/list of server addresses. In the 281 | event that a tuple/list is passed, Werkzeug tries to import the best 282 | available memcache library. 283 | 284 | Implementation notes: This cache backend works around some limitations in 285 | memcached to simplify the interface. For example unicode keys are encoded 286 | to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return 287 | the keys in the same format as passed. Furthermore all get methods 288 | silently ignore key errors to not cause problems when untrusted user data 289 | is passed to the get methods which is often the case in web applications. 290 | 291 | :param servers: a list or tuple of server addresses or alternatively 292 | a :class:`memcache.Client` or a compatible client. 293 | :param default_timeout: the default timeout that is used if no timeout is 294 | specified on :meth:`~BaseCache.set`. 295 | :param key_prefix: a prefix that is added before all keys. This makes it 296 | possible to use the same memcached server for different 297 | applications. Keep in mind that 298 | :meth:`~BaseCache.clear` will also clear keys with a 299 | different prefix. 300 | """ 301 | 302 | def __init__(self, servers=None, default_timeout=300, key_prefix=None): 303 | BaseCache.__init__(self, default_timeout) 304 | if servers is None or isinstance(servers, (list, tuple)): 305 | if servers is None: 306 | servers = ['127.0.0.1:11211'] 307 | self._client = self.import_preferred_memcache_lib(servers) 308 | if self._client is None: 309 | raise RuntimeError('no memcache module found') 310 | else: 311 | # NOTE: servers is actually an already initialized memcache 312 | # client. 313 | self._client = servers 314 | 315 | self.key_prefix = key_prefix 316 | 317 | def get(self, key): 318 | if isinstance(key, unicode): 319 | key = key.encode('utf-8') 320 | if self.key_prefix: 321 | key = self.key_prefix + key 322 | # memcached doesn't support keys longer than that. Because often 323 | # checks for so long keys can occour because it's tested from user 324 | # submitted data etc we fail silently for getting. 325 | if _test_memcached_key(key): 326 | return self._client.get(key) 327 | 328 | def get_dict(self, *keys): 329 | key_mapping = {} 330 | have_encoded_keys = False 331 | for key in keys: 332 | if isinstance(key, unicode): 333 | encoded_key = key.encode('utf-8') 334 | have_encoded_keys = True 335 | else: 336 | encoded_key = key 337 | if self.key_prefix: 338 | encoded_key = self.key_prefix + encoded_key 339 | if _test_memcached_key(key): 340 | key_mapping[encoded_key] = key 341 | d = rv = self._client.get_multi(key_mapping.keys()) 342 | if have_encoded_keys or self.key_prefix: 343 | rv = {} 344 | for key, value in d.iteritems(): 345 | rv[key_mapping[key]] = value 346 | if len(rv) < len(keys): 347 | for key in keys: 348 | if key not in rv: 349 | rv[key] = None 350 | return rv 351 | 352 | def add(self, key, value, timeout=None): 353 | if timeout is None: 354 | timeout = self.default_timeout 355 | if isinstance(key, unicode): 356 | key = key.encode('utf-8') 357 | if self.key_prefix: 358 | key = self.key_prefix + key 359 | self._client.add(key, value, timeout) 360 | 361 | def set(self, key, value, timeout=None): 362 | if timeout is None: 363 | timeout = self.default_timeout 364 | if isinstance(key, unicode): 365 | key = key.encode('utf-8') 366 | if self.key_prefix: 367 | key = self.key_prefix + key 368 | self._client.set(key, value, timeout) 369 | 370 | def get_many(self, *keys): 371 | d = self.get_dict(*keys) 372 | return [d[key] for key in keys] 373 | 374 | def set_many(self, mapping, timeout=None): 375 | if timeout is None: 376 | timeout = self.default_timeout 377 | new_mapping = {} 378 | for key, value in _items(mapping): 379 | if isinstance(key, unicode): 380 | key = key.encode('utf-8') 381 | if self.key_prefix: 382 | key = self.key_prefix + key 383 | new_mapping[key] = value 384 | self._client.set_multi(new_mapping, timeout) 385 | 386 | def delete(self, key): 387 | if isinstance(key, unicode): 388 | key = key.encode('utf-8') 389 | if self.key_prefix: 390 | key = self.key_prefix + key 391 | if _test_memcached_key(key): 392 | self._client.delete(key) 393 | 394 | def delete_many(self, *keys): 395 | new_keys = [] 396 | for key in keys: 397 | if isinstance(key, unicode): 398 | key = key.encode('utf-8') 399 | if self.key_prefix: 400 | key = self.key_prefix + key 401 | if _test_memcached_key(key): 402 | new_keys.append(key) 403 | self._client.delete_multi(new_keys) 404 | 405 | def clear(self): 406 | self._client.flush_all() 407 | 408 | def inc(self, key, delta=1): 409 | if isinstance(key, unicode): 410 | key = key.encode('utf-8') 411 | if self.key_prefix: 412 | key = self.key_prefix + key 413 | self._client.incr(key, delta) 414 | 415 | def dec(self, key, delta=1): 416 | if isinstance(key, unicode): 417 | key = key.encode('utf-8') 418 | if self.key_prefix: 419 | key = self.key_prefix + key 420 | self._client.decr(key, delta) 421 | 422 | def import_preferred_memcache_lib(self, servers): 423 | """Returns an initialized memcache client. Used by the constructor.""" 424 | try: 425 | import pylibmc 426 | except ImportError: 427 | pass 428 | else: 429 | return pylibmc.Client(servers) 430 | 431 | try: 432 | from google.appengine.api import memcache 433 | except ImportError: 434 | pass 435 | else: 436 | return memcache.Client() 437 | 438 | try: 439 | import memcache 440 | except ImportError: 441 | pass 442 | else: 443 | return memcache.Client(servers) 444 | 445 | 446 | # backwards compatibility 447 | GAEMemcachedCache = MemcachedCache 448 | 449 | 450 | class RedisCache(BaseCache): 451 | """Uses the Redis key-value store as a cache backend. 452 | 453 | The first argument can be either a string denoting address of the Redis 454 | server or an object resembling an instance of a redis.Redis class. 455 | 456 | Note: Python Redis API already takes care of encoding unicode strings on 457 | the fly. 458 | 459 | .. versionadded:: 0.7 460 | 461 | .. versionadded:: 0.8 462 | `key_prefix` was added. 463 | 464 | .. versionchanged:: 0.8 465 | This cache backend now properly serializes objects. 466 | 467 | .. versionchanged:: 0.8.3 468 | This cache backend now supports password authentication. 469 | 470 | :param host: address of the Redis server or an object which API is 471 | compatible with the official Python Redis client (redis-py). 472 | :param port: port number on which Redis server listens for connections. 473 | :param password: password authentication for the Redis server. 474 | :param default_timeout: the default timeout that is used if no timeout is 475 | specified on :meth:`~BaseCache.set`. 476 | :param key_prefix: A prefix that should be added to all keys. 477 | """ 478 | 479 | def __init__(self, host='localhost', port=6379, password=None, 480 | default_timeout=300, key_prefix=None): 481 | BaseCache.__init__(self, default_timeout) 482 | if isinstance(host, basestring): 483 | try: 484 | import redis 485 | except ImportError: 486 | raise RuntimeError('no redis module found') 487 | self._client = redis.Redis(host=host, port=port, password=password) 488 | else: 489 | self._client = host 490 | self.key_prefix = key_prefix or '' 491 | 492 | def dump_object(self, value): 493 | """Dumps an object into a string for redis. By default it serializes 494 | integers as regular string and pickle dumps everything else. 495 | """ 496 | t = type(value) 497 | if t is int or t is long: 498 | return str(value) 499 | return '!' + pickle.dumps(value) 500 | 501 | def load_object(self, value): 502 | """The reversal of :meth:`dump_object`. This might be callde with 503 | None. 504 | """ 505 | if value is None: 506 | return None 507 | if value.startswith('!'): 508 | return pickle.loads(value[1:]) 509 | try: 510 | return int(value) 511 | except ValueError: 512 | # before 0.8 we did not have serialization. Still support that. 513 | return value 514 | 515 | def get(self, key): 516 | return self.load_object(self._client.get(self.key_prefix + key)) 517 | 518 | def get_many(self, *keys): 519 | if self.key_prefix: 520 | keys = [self.key_prefix + key for key in keys] 521 | return [self.load_object(x) for x in self._client.mget(keys)] 522 | 523 | def set(self, key, value, timeout=None): 524 | if timeout is None: 525 | timeout = self.default_timeout 526 | dump = self.dump_object(value) 527 | self._client.setex(self.key_prefix + key, dump, timeout) 528 | 529 | def add(self, key, value, timeout=None): 530 | if timeout is None: 531 | timeout = self.default_timeout 532 | dump = self.dump_object(value) 533 | added = self._client.setnx(self.key_prefix + key, dump) 534 | if added: 535 | self._client.expire(self.key_prefix + key, timeout) 536 | 537 | def set_many(self, mapping, timeout=None): 538 | if timeout is None: 539 | timeout = self.default_timeout 540 | pipe = self._client.pipeline() 541 | for key, value in _items(mapping): 542 | dump = self.dump_object(value) 543 | pipe.setex(self.key_prefix + key, dump, timeout) 544 | pipe.execute() 545 | 546 | def delete(self, key): 547 | self._client.delete(self.key_prefix + key) 548 | 549 | def delete_many(self, *keys): 550 | if not keys: 551 | return 552 | if self.key_prefix: 553 | keys = [self.key_prefix + key for key in keys] 554 | self._client.delete(*keys) 555 | 556 | def clear(self): 557 | if self.key_prefix: 558 | keys = self._client.keys(self.key_prefix + '*') 559 | if keys: 560 | self._client.delete(*keys) 561 | else: 562 | self._client.flushdb() 563 | 564 | def inc(self, key, delta=1): 565 | return self._client.incr(self.key_prefix + key, delta) 566 | 567 | def dec(self, key, delta=1): 568 | return self._client.decr(self.key_prefix + key, delta) 569 | 570 | 571 | class FileSystemCache(BaseCache): 572 | """A cache that stores the items on the file system. This cache depends 573 | on being the only user of the `cache_dir`. Make absolutely sure that 574 | nobody but this cache stores files there or otherwise the cache will 575 | randomly delete files therein. 576 | 577 | :param cache_dir: the directory where cache files are stored. 578 | :param threshold: the maximum number of items the cache stores before 579 | it starts deleting some. 580 | :param default_timeout: the default timeout that is used if no timeout is 581 | specified on :meth:`~BaseCache.set`. 582 | :param mode: the file mode wanted for the cache files, default 0600 583 | """ 584 | 585 | #: used for temporary files by the FileSystemCache 586 | _fs_transaction_suffix = '.__wz_cache' 587 | 588 | def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0600): 589 | BaseCache.__init__(self, default_timeout) 590 | self._path = cache_dir 591 | self._threshold = threshold 592 | self._mode = mode 593 | if not os.path.exists(self._path): 594 | os.makedirs(self._path) 595 | 596 | def _list_dir(self): 597 | """return a list of (fully qualified) cache filenames 598 | """ 599 | return [os.path.join(self._path, fn) for fn in os.listdir(self._path) 600 | if not fn.endswith(self._fs_transaction_suffix)] 601 | 602 | def _prune(self): 603 | entries = self._list_dir() 604 | if len(entries) > self._threshold: 605 | now = time() 606 | for idx, fname in enumerate(entries): 607 | remove = False 608 | f = None 609 | try: 610 | try: 611 | f = open(fname, 'rb') 612 | expires = pickle.load(f) 613 | remove = expires <= now or idx % 3 == 0 614 | finally: 615 | if f is not None: 616 | f.close() 617 | except Exception: 618 | pass 619 | if remove: 620 | try: 621 | os.remove(fname) 622 | except (IOError, OSError): 623 | pass 624 | 625 | def clear(self): 626 | for fname in self._list_dir(): 627 | try: 628 | os.remove(fname) 629 | except (IOError, OSError): 630 | pass 631 | 632 | def _get_filename(self, key): 633 | hash = md5(key).hexdigest() 634 | return os.path.join(self._path, hash) 635 | 636 | def get(self, key): 637 | filename = self._get_filename(key) 638 | try: 639 | f = open(filename, 'rb') 640 | try: 641 | if pickle.load(f) >= time(): 642 | return pickle.load(f) 643 | finally: 644 | f.close() 645 | os.remove(filename) 646 | except Exception: 647 | return None 648 | 649 | def add(self, key, value, timeout=None): 650 | filename = self._get_filename(key) 651 | if not os.path.exists(filename): 652 | self.set(key, value, timeout) 653 | 654 | def set(self, key, value, timeout=None): 655 | if timeout is None: 656 | timeout = self.default_timeout 657 | filename = self._get_filename(key) 658 | self._prune() 659 | try: 660 | fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix, 661 | dir=self._path) 662 | f = os.fdopen(fd, 'wb') 663 | try: 664 | pickle.dump(int(time() + timeout), f, 1) 665 | pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) 666 | finally: 667 | f.close() 668 | rename(tmp, filename) 669 | os.chmod(filename, self._mode) 670 | except (IOError, OSError): 671 | pass 672 | 673 | def delete(self, key): 674 | try: 675 | os.remove(self._get_filename(key)) 676 | except (IOError, OSError): 677 | pass 678 | --------------------------------------------------------------------------------