{{ post.content }}
187 | 196 | 197 | 198 | 199 | If you want the full source code check out the `tutorial source`_. 200 | 201 | .. _tutorial source: 202 | https://github.com/fengsp/rc/tree/master/examples/tutorial.py 203 | -------------------------------------------------------------------------------- /examples/tutorial.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import time 3 | 4 | from flask import Flask 5 | from flask import request, url_for, redirect, abort, render_template_string 6 | from flask_sqlalchemy import SQLAlchemy 7 | from rc import Cache 8 | 9 | 10 | app = Flask(__name__) 11 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' 12 | db = SQLAlchemy(app) 13 | cache = Cache() 14 | 15 | 16 | def init_db(): 17 | db.create_all() 18 | 19 | 20 | class Post(db.Model): 21 | id = db.Column(db.Integer, primary_key=True) 22 | title = db.Column(db.String(100), nullable=False) 23 | content = db.Column(db.Text, nullable=False) 24 | created_ts = db.Column(db.Integer, nullable=False) 25 | updated_ts = db.Column(db.Integer, nullable=False) 26 | 27 | def __init__(self, title, content, created_ts, updated_ts): 28 | self.title = title 29 | self.content = content 30 | self.created_ts = created_ts 31 | self.updated_ts = updated_ts 32 | 33 | def __repr__(self): 34 | return '{{ post.content }}
146 | 155 | 156 | 157 | """ 158 | 159 | 160 | if __name__ == '__main__': 161 | init_db() 162 | app.run() 163 | -------------------------------------------------------------------------------- /logo/rc.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fengsp/rc/32c4d4e2cb7ba734b2dbd9bd83bcc85a2f09499f/logo/rc.psd -------------------------------------------------------------------------------- /rc/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | rc 4 | ~~ 5 | 6 | The redis cache. 7 | 8 | :copyright: (c) 2016 by Shipeng Feng. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | from rc.cache import Cache, CacheCluster 12 | from rc.serializer import BaseSerializer, JSONSerializer, PickleSerializer 13 | from rc.redis_router import BaseRedisRouter, RedisCRC32HashRouter 14 | from rc.redis_router import RedisConsistentHashRouter 15 | from rc.testing import NullCache, FakeRedisCache 16 | 17 | 18 | __version__ = '0.3.1' 19 | 20 | 21 | __all__ = [ 22 | 'Cache', 'CacheCluster', 23 | 24 | 'BaseSerializer', 'JSONSerializer', 'PickleSerializer', 25 | 26 | 'BaseRedisRouter', 'RedisCRC32HashRouter', 'RedisConsistentHashRouter', 27 | 28 | 'NullCache', 'FakeRedisCache', 29 | ] 30 | -------------------------------------------------------------------------------- /rc/cache.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import inspect 3 | import functools 4 | from itertools import izip 5 | 6 | from rc.redis_clients import RedisClient 7 | from rc.redis_cluster import RedisCluster 8 | from rc.serializer import JSONSerializer 9 | from rc.utils import generate_key_for_cached_func 10 | from rc.promise import Promise 11 | 12 | 13 | #: Running mode for cache 14 | NORMAL_MODE = 0 15 | BATCH_MODE = 1 16 | 17 | 18 | class cached_property(object): 19 | 20 | def __init__(self, fget): 21 | self.fget = fget 22 | 23 | def __get__(self, obj, objtype): 24 | rv = obj.__dict__[self.fget.__name__] = self.fget(obj) 25 | return rv 26 | 27 | 28 | class BaseCache(object): 29 | """Baseclass for all redis cache systems. 30 | 31 | :param namespace: a prefix that should be added to all keys 32 | :param serializer_cls: the serialization class you want to use. 33 | :param default_expire: default expiration time that is used if no 34 | expire specified on :meth:`~rc.cache.BaseCache.set`. 35 | :param bypass_values: a list of return values that would be ignored by the 36 | cache decorator and won't be cached at all. 37 | 38 | .. versionadded:: 0.3 39 | The `bypass_values` parameter was added. 40 | """ 41 | 42 | def __init__(self, namespace=None, serializer_cls=None, 43 | default_expire=3 * 24 * 3600, bypass_values=[]): 44 | if serializer_cls is None: 45 | serializer_cls = JSONSerializer 46 | self.namespace = namespace or '' 47 | self.serializer_cls = serializer_cls 48 | self.default_expire = default_expire 49 | self.bypass_values = bypass_values 50 | self._running_mode = NORMAL_MODE 51 | self._pending_operations = [] 52 | 53 | def get_client(self): 54 | """Returns the redis client that is used for cache.""" 55 | raise NotImplementedError() 56 | 57 | @cached_property 58 | def client(self): 59 | """Returns the redis client that is used for cache.""" 60 | return self.get_client() 61 | 62 | @cached_property 63 | def serializer(self): 64 | """Returns the serializer instance that is used for cache.""" 65 | return self.serializer_cls() 66 | 67 | def _raw_get(self, key): 68 | return self.client.get(self.namespace + key) 69 | 70 | def _raw_set(self, key, string, expire=None): 71 | if expire is None: 72 | expire = self.default_expire 73 | return self.client.setex(self.namespace + key, expire, string) 74 | 75 | def _raw_get_many(self, *keys): 76 | if not keys: 77 | return [] 78 | if self.namespace: 79 | keys = [self.namespace + key for key in keys] 80 | return self.client.mget(keys) 81 | 82 | def get(self, key): 83 | """Returns the value for the cache key, otherwise `None` is returned. 84 | 85 | :param key: cache key 86 | """ 87 | return self.serializer.loads(self._raw_get(key)) 88 | 89 | def set(self, key, value, expire=None): 90 | """Adds or overwrites key/value to the cache. The value expires in 91 | time seconds. 92 | 93 | :param key: cache key 94 | :param value: value for the key 95 | :param expire: expiration time 96 | :return: Whether the key has been set 97 | """ 98 | return self._raw_set(key, self.serializer.dumps(value), expire) 99 | 100 | def delete(self, key): 101 | """Deletes the value for the cache key. 102 | 103 | :param key: cache key 104 | :return: Whether the key has been deleted 105 | """ 106 | return self.client.delete(self.namespace + key) 107 | 108 | def get_many(self, *keys): 109 | """Returns the a list of values for the cache keys.""" 110 | return [self.serializer.loads(s) for s in self._raw_get_many(*keys)] 111 | 112 | def set_many(self, mapping, expire=None): 113 | """Sets multiple keys and values using dictionary. 114 | The values expires in time seconds. 115 | 116 | :param mapping: a dictionary with key/values to set 117 | :param expire: expiration time 118 | :return: whether all keys has been set 119 | """ 120 | if not mapping: 121 | return True 122 | rv = True 123 | for key, value in mapping.iteritems(): 124 | if not self.set(key, value, expire): 125 | rv = False 126 | return rv 127 | 128 | def delete_many(self, *keys): 129 | """Deletes multiple keys. 130 | 131 | :return: whether all keys has been deleted 132 | """ 133 | if not keys: 134 | return True 135 | return all(self.delete(key) for key in keys) 136 | 137 | def cache(self, key_prefix=None, expire=None, include_self=False): 138 | """A decorator that is used to cache a function with supplied 139 | parameters. It is intended for decorator usage:: 140 | 141 | @cache.cache() 142 | def load(name): 143 | return load_from_database(name) 144 | 145 | rv = load('foo') 146 | rv = load('foo') # returned from cache 147 | 148 | The cache key doesn't need to be specified, it will be created with 149 | the name of the module + the name of the function + function arguments. 150 | 151 | :param key_prefix: this is used to ensure cache result won't clash 152 | with another function that has the same name 153 | in this module, normally you do not need to pass 154 | this in 155 | :param expire: expiration time 156 | :param include_self: whether to include the `self` or `cls` as 157 | cache key for method or not, default to be False 158 | 159 | .. note:: 160 | 161 | The function being decorated must be called with the same 162 | positional and keyword arguments. Otherwise, you might create 163 | multiple caches. If you pass one parameter as positional, do it 164 | always. 165 | 166 | .. note:: 167 | 168 | Using objects as part of the cache key is possible, though it is 169 | suggested to not pass in an object instance as parameter. We 170 | perform a str() on the passed in objects so that you can provide 171 | a __str__ function that returns a identifying string for that 172 | object, the unique string will be used as part of the cache key. 173 | 174 | .. note:: 175 | 176 | When a method on a class is decorated, the ``self`` or ``cls`` 177 | arguments is not included in the cache key. Starting from 0.2 178 | you can control it with `include_self`. If you set 179 | `include_self` to True, remember to provide `__str__` method 180 | for the object, otherwise you might encounter random behavior. 181 | 182 | .. versionadded:: 0.2 183 | The `include_self` parameter was added. 184 | """ 185 | def decorator(f): 186 | argspec = inspect.getargspec(f) 187 | if argspec and argspec[0] and argspec[0][0] in ('self', 'cls'): 188 | has_self = True 189 | else: 190 | has_self = False 191 | 192 | @functools.wraps(f) 193 | def wrapper(*args, **kwargs): 194 | cache_args = args 195 | # handle self and cls 196 | if has_self: 197 | if not include_self: 198 | cache_args = args[1:] 199 | cache_key = generate_key_for_cached_func( 200 | key_prefix, f, *cache_args, **kwargs) 201 | if self._running_mode == BATCH_MODE: 202 | promise = Promise() 203 | self._pending_operations.append( 204 | (f, args, kwargs, promise, cache_key, expire)) 205 | return promise 206 | rv = self._raw_get(cache_key) 207 | if rv is None: 208 | value = f(*args, **kwargs) 209 | rv = self.serializer.dumps(value) 210 | if value not in self.bypass_values: 211 | self._raw_set(cache_key, rv, expire) 212 | return self.serializer.loads(rv) 213 | 214 | wrapper.__rc_cache_params__ = { 215 | 'key_prefix': key_prefix, 216 | 'expire': expire, 217 | 'include_self': include_self, 218 | } 219 | return wrapper 220 | return decorator 221 | 222 | def invalidate(self, func, *args, **kwargs): 223 | """Invalidate a cache decorated function. You must call this with 224 | the same positional and keyword arguments as what you did when you 225 | call the decorated function, otherwise the cache will not be deleted. 226 | The usage is simple:: 227 | 228 | @cache.cache() 229 | def load(name, limit): 230 | return load_from_database(name, limit) 231 | 232 | rv = load('foo', limit=5) 233 | 234 | cache.invalidate(load, 'foo', limit=5) 235 | 236 | :param func: decorated function to invalidate 237 | :param args: same positional arguments as you call the function 238 | :param kwargs: same keyword arguments as you call the function 239 | :return: whether it is invalidated or not 240 | """ 241 | try: 242 | cache_params = func.__rc_cache_params__ 243 | except AttributeError: 244 | raise TypeError('Attempted to invalidate a function that is' 245 | 'not cache decorated') 246 | key_prefix = cache_params['key_prefix'] 247 | cache_args = args 248 | include_self = cache_params.get('include_self', False) 249 | if include_self: 250 | instance_self = getattr(func, '__self__', None) 251 | if instance_self: 252 | cache_args = tuple([instance_self] + list(args)) 253 | cache_key = generate_key_for_cached_func( 254 | key_prefix, func, *cache_args, **kwargs) 255 | return self.delete(cache_key) 256 | 257 | def batch_mode(self): 258 | """Returns a context manager for cache batch mode. This is used 259 | to batch fetch results of cache decorated functions. All results 260 | returned by cache decorated function will be 261 | :class:`~rc.promise.Promise` object. This context manager runs the 262 | batch fetch and then resolves all promises in the end. Example:: 263 | 264 | results = [] 265 | with cache.batch_mode(): 266 | for i in range(10): 267 | results.append(get_result(i)) 268 | results = map(lambda r: r.value, results) 269 | 270 | .. note:: 271 | 272 | When you are using rc on this mode, rc is not thread safe. 273 | """ 274 | return BatchManager(self) 275 | 276 | def batch(self, cancel=False): 277 | if self._running_mode != BATCH_MODE: 278 | raise RuntimeError('You have to batch on batch mode.') 279 | pending_operations = self._pending_operations 280 | self._pending_operations = [] 281 | self._running_mode = NORMAL_MODE 282 | if cancel: 283 | return 284 | cache_keys = [] 285 | for f, args, kwargs, promise, cache_key, expire in pending_operations: 286 | cache_keys.append(cache_key) 287 | cache_results = self._raw_get_many(*cache_keys) 288 | for rv, (func, args, kwargs, promise, cache_key, expire) in izip( 289 | cache_results, pending_operations): 290 | if rv is None: 291 | value = func(*args, **kwargs) 292 | rv = self.serializer.dumps(value) 293 | if value not in self.bypass_values: 294 | self._raw_set(cache_key, rv, expire) 295 | promise.resolve(self.serializer.loads(rv)) 296 | 297 | 298 | class Cache(BaseCache): 299 | """Uses a single Redis server as backend. 300 | 301 | :param host: address of the Redis, this is compatible with the official 302 | Python StrictRedis cilent (redis-py). 303 | :param port: port number of the Redis server. 304 | :param db: db numeric index of the Redis server. 305 | :param password: password authentication for the Redis server. 306 | :param socket_timeout: socket timeout for the StrictRedis client. 307 | :param namespace: a prefix that should be added to all keys. 308 | :param serializer_cls: the serialization class you want to use. 309 | By default, it is :class:`rc.JSONSerializer`. 310 | :param default_expire: default expiration time that is used if no 311 | expire specified on :meth:`set`. 312 | :param redis_options: a dictionary of parameters that are useful for 313 | setting other parameters to the StrictRedis client. 314 | :param bypass_values: a list of return values that would be ignored by the 315 | cache decorator and won't be cached at all. 316 | 317 | .. versionadded:: 0.3 318 | The `bypass_values` parameter was added. 319 | """ 320 | 321 | def __init__(self, host='localhost', port=6379, db=0, password=None, 322 | socket_timeout=None, namespace=None, serializer_cls=None, 323 | default_expire=3 * 24 * 3600, redis_options=None, 324 | bypass_values=[]): 325 | BaseCache.__init__(self, namespace, serializer_cls, default_expire, 326 | bypass_values) 327 | if redis_options is None: 328 | redis_options = {} 329 | self.host = host 330 | self.port = port 331 | self.db = db 332 | self.password = password 333 | self.socket_timeout = socket_timeout 334 | self.redis_options = redis_options 335 | 336 | def get_client(self): 337 | return RedisClient(host=self.host, port=self.port, db=self.db, 338 | password=self.password, 339 | socket_timeout=self.socket_timeout, 340 | **self.redis_options) 341 | 342 | def set_many(self, mapping, expire=None): 343 | if not mapping: 344 | return True 345 | if expire is None: 346 | expire = self.default_expire 347 | pipe = self.client.pipeline() 348 | for key, value in mapping.iteritems(): 349 | string = self.serializer.dumps(value) 350 | pipe.setex(self.namespace + key, expire, string) 351 | return all(pipe.execute()) 352 | 353 | def delete_many(self, *keys): 354 | if not keys: 355 | return True 356 | if self.namespace: 357 | keys = [self.namespace + key for key in keys] 358 | return self.client.delete(*keys) 359 | 360 | 361 | class CacheCluster(BaseCache): 362 | """The a redis cluster as backend. 363 | 364 | Basic example:: 365 | 366 | cache = CacheCluster({ 367 | 0: {'port': 6379}, 368 | 1: {'port': 6479}, 369 | 2: {'port': 6579}, 370 | 3: {'port': 6679}, 371 | }) 372 | 373 | :param hosts: a dictionary of hosts that maps the host host_name to 374 | configuration parameters. The parameters are used to 375 | construct a :class:`~rc.redis_cluster.HostConfig`. 376 | :param namespace: a prefix that should be added to all keys. 377 | :param serializer_cls: the serialization class you want to use. 378 | By default, it is :class:`~rc.JSONSerializer`. 379 | :param default_expire: default expiration time that is used if no 380 | expire specified on :meth:`set`. 381 | :param router_cls: use this to override the redis router class, 382 | default to be :class:`~rc.RedisCRC32HashRouter`. 383 | :param router_options: a dictionary of parameters that is useful for 384 | setting other parameters of router 385 | :param pool_cls: use this to override the redis connection pool class, 386 | default to be :class:`~redis.ConnectionPool` 387 | :param pool_options: a dictionary of parameters that is useful for 388 | setting other parameters of pool 389 | :param max_concurrency: defines how many parallel queries can happen 390 | at the same time 391 | :param poller_timeout: for multi key operations we use a select loop as 392 | the parallel query implementation, use this 393 | to specify timeout for the underlying pollers 394 | (select/poll/kqueue/epoll). 395 | :param bypass_values: a list of return values that would be ignored by the 396 | cache decorator and won't be cached at all. 397 | 398 | .. versionadded:: 0.3 399 | The `bypass_values` parameter was added. 400 | """ 401 | 402 | def __init__(self, hosts, namespace=None, serializer_cls=None, 403 | default_expire=3 * 24 * 3600, router_cls=None, 404 | router_options=None, pool_cls=None, pool_options=None, 405 | max_concurrency=64, poller_timeout=1.0, bypass_values=[]): 406 | BaseCache.__init__(self, namespace, serializer_cls, default_expire, 407 | bypass_values) 408 | self.hosts = hosts 409 | self.router_cls = router_cls 410 | self.router_options = router_options 411 | self.pool_cls = pool_cls 412 | self.pool_options = pool_options 413 | self.max_concurrency = max_concurrency 414 | self.poller_timeout = poller_timeout 415 | 416 | def get_client(self): 417 | redis_cluster = RedisCluster(self.hosts, router_cls=self.router_cls, 418 | router_options=self.router_options, 419 | pool_cls=self.pool_cls, 420 | pool_options=self.pool_options) 421 | return redis_cluster.get_client(self.max_concurrency, 422 | self.poller_timeout) 423 | 424 | def set_many(self, mapping, expire=None): 425 | if not mapping: 426 | return True 427 | if expire is None: 428 | expire = self.default_expire 429 | string_mapping = {} 430 | for key, value in mapping.iteritems(): 431 | string = self.serializer.dumps(value) 432 | string_mapping[self.namespace + key] = string 433 | return self.client.msetex(string_mapping, expire) 434 | 435 | def delete_many(self, *keys): 436 | if not keys: 437 | return True 438 | if self.namespace: 439 | keys = [self.namespace + key for key in keys] 440 | return self.client.mdelete(*keys) 441 | 442 | 443 | class BatchManager(object): 444 | """Context manager that helps us with batching.""" 445 | 446 | def __init__(self, cache): 447 | self.cache = cache 448 | 449 | def __enter__(self): 450 | self.cache._running_mode = BATCH_MODE 451 | return self.cache 452 | 453 | def __exit__(self, exc_type, exc_value, tb): 454 | if exc_type is not None: 455 | self.cache.batch(cancel=True) 456 | else: 457 | self.cache.batch() 458 | -------------------------------------------------------------------------------- /rc/ketama.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import hashlib 3 | import math 4 | 5 | from bisect import bisect 6 | 7 | 8 | def md5_bytes(key): 9 | return map(ord, hashlib.md5(key).digest()) 10 | 11 | 12 | class HashRing(object): 13 | 14 | def __init__(self, nodes=None, weights=None): 15 | self._nodes = set(nodes or []) 16 | self._weights = weights if weights else {} 17 | 18 | self._rebuild_circle() 19 | 20 | def _rebuild_circle(self): 21 | self._hashring = {} 22 | self._sorted_keys = [] 23 | total_weight = 0 24 | for node in self._nodes: 25 | total_weight += self._weights.get(node, 1) 26 | 27 | for node in self._nodes: 28 | weight = self._weights.get(node, 1) 29 | 30 | ks = math.floor((40 * len(self._nodes) * weight) / total_weight) 31 | 32 | for i in xrange(0, int(ks)): 33 | k = md5_bytes('%s-%s-salt' % (node, i)) 34 | 35 | for l in xrange(0, 4): 36 | key = ((k[3 + l * 4] << 24) | (k[2 + l * 4] << 16) | 37 | (k[1 + l * 4] << 8) | k[l * 4]) 38 | self._hashring[key] = node 39 | self._sorted_keys.append(key) 40 | 41 | self._sorted_keys.sort() 42 | 43 | def _get_node_pos(self, key): 44 | if not self._hashring: 45 | return 46 | 47 | if isinstance(key, unicode): 48 | key = key.encode('utf8') 49 | 50 | k = md5_bytes(key) 51 | key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0] 52 | 53 | nodes = self._sorted_keys 54 | pos = bisect(nodes, key) 55 | 56 | if pos == len(nodes): 57 | return 0 58 | return pos 59 | 60 | def get_node(self, key): 61 | pos = self._get_node_pos(key) 62 | if pos is None: 63 | return 64 | return self._hashring[self._sorted_keys[pos]] 65 | -------------------------------------------------------------------------------- /rc/poller.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import select 3 | 4 | 5 | class BasePoller(object): 6 | is_supported = False 7 | 8 | def __init__(self, objects): 9 | self.objects = dict(objects) 10 | 11 | def poll(self, timeout=None): 12 | """The return value is two list of objects that are ready: 13 | (rlist, wlist). 14 | """ 15 | raise NotImplementedError() 16 | 17 | def pop(self, host_name): 18 | return self.objects.pop(host_name, None) 19 | 20 | def __len__(self): 21 | return len(self.objects) 22 | 23 | 24 | class SelectPoller(BasePoller): 25 | is_supported = hasattr(select, 'select') 26 | 27 | def poll(self, timeout=None): 28 | objs = self.objects.values() 29 | rlist, wlist, _ = select.select(objs, objs, [], timeout) 30 | return rlist, wlist 31 | 32 | 33 | class PollPoller(BasePoller): 34 | is_supported = hasattr(select, 'poll') 35 | 36 | def __init__(self, objects): 37 | BasePoller.__init__(self, objects) 38 | self.pollobj = select.poll() 39 | self.fd_to_object = {} 40 | for _, obj in objects: 41 | self.pollobj.register(obj.fileno(), select.POLLIN | select.POLLOUT) 42 | self.fd_to_object[obj.fileno()] = obj 43 | 44 | def pop(self, host_name): 45 | rv = BasePoller.pop(self, host_name) 46 | if rv is not None: 47 | self.pollobj.unregister(rv.fileno()) 48 | self.fd_to_object.pop(rv.fileno(), None) 49 | return rv 50 | 51 | def poll(self, timeout=None): 52 | rlist = [] 53 | wlist = [] 54 | for fd, event in self.pollobj.poll(timeout): 55 | obj = self.fd_to_object[fd] 56 | if event & select.POLLIN: 57 | rlist.append(obj) 58 | elif event & select.POLLOUT: 59 | wlist.append(obj) 60 | return rlist, wlist 61 | 62 | 63 | class KQueuePoller(BasePoller): 64 | is_supported = hasattr(select, 'kqueue') 65 | 66 | def __init__(self, objects): 67 | BasePoller.__init__(self, objects) 68 | self.kqueue = select.kqueue() 69 | self.events = [] 70 | self.fd_to_object = {} 71 | for _, obj in objects: 72 | r_event = select.kevent( 73 | obj.fileno(), filter=select.KQ_FILTER_READ, 74 | flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE) 75 | self.events.append(r_event) 76 | w_event = select.kevent( 77 | obj.fileno(), filter=select.KQ_FILTER_WRITE, 78 | flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE) 79 | self.events.append(w_event) 80 | self.fd_to_object[obj.fileno()] = obj 81 | 82 | def pop(self, host_name): 83 | rv = BasePoller.pop(self, host_name) 84 | if rv is not None: 85 | self.events = [e for e in self.events if e.ident != rv.fileno()] 86 | self.fd_to_object.pop(rv.fileno(), None) 87 | return rv 88 | 89 | def poll(self, timeout=None): 90 | rlist = [] 91 | wlist = [] 92 | events = self.kqueue.control(self.events, 128, timeout) 93 | for event in events: 94 | obj = self.fd_to_object.get(event.ident) 95 | if obj is None: 96 | continue 97 | if event.filter == select.KQ_FILTER_READ: 98 | rlist.append(obj) 99 | elif event.filter == select.KQ_FILTER_WRITE: 100 | wlist.append(obj) 101 | return rlist, wlist 102 | 103 | 104 | class EpollPoller(BasePoller): 105 | is_supported = hasattr(select, 'epoll') 106 | 107 | def __init__(self, objects): 108 | BasePoller.__init__(self, objects) 109 | self.epoll = select.epoll() 110 | self.fd_to_object = {} 111 | for _, obj in objects: 112 | self.fd_to_object[obj.fileno()] = obj 113 | self.epoll.register(obj.fileno(), select.EPOLLIN | select.EPOLLOUT) 114 | 115 | def pop(self, host_name): 116 | rv = BasePoller.pop(self, host_name) 117 | if rv is not None: 118 | self.epoll.unregister(rv.fileno()) 119 | self.fd_to_object.pop(rv.fileno(), None) 120 | return rv 121 | 122 | def poll(self, timeout=None): 123 | if timeout is None: 124 | timeout = -1 125 | rlist = [] 126 | wlist = [] 127 | for fd, event in self.epoll.poll(timeout): 128 | obj = self.fd_to_object[fd] 129 | if event & select.EPOLLIN: 130 | rlist.append(obj) 131 | elif event & select.EPOLLOUT: 132 | wlist.append(obj) 133 | return rlist, wlist 134 | 135 | 136 | supported_pollers = [poller for poller in [EpollPoller, KQueuePoller, 137 | PollPoller, SelectPoller] 138 | if poller.is_supported] 139 | poller = supported_pollers[0] 140 | -------------------------------------------------------------------------------- /rc/promise.py: -------------------------------------------------------------------------------- 1 | PENDING_STATE = 0 2 | RESOLVED_STATE = 1 3 | 4 | 5 | class Promise(object): 6 | """A promise object. You can access ``promise.value`` to get the 7 | resolved value. Here is one example:: 8 | 9 | p = Promise() 10 | assert p.is_pending 11 | assert not p.is_resolved 12 | assert p.value is None 13 | p.resolve('value') 14 | assert not p.is_pending 15 | assert p.is_resolved 16 | assert p.value == 'value' 17 | """ 18 | 19 | def __init__(self): 20 | #: the value for this promise if it's resolved 21 | self.value = None 22 | self._state = PENDING_STATE 23 | self._callbacks = [] 24 | 25 | def resolve(self, value): 26 | """Resolves with value.""" 27 | if self._state != PENDING_STATE: 28 | raise RuntimeError('Promise is no longer pending.') 29 | self.value = value 30 | self._state = RESOLVED_STATE 31 | for callback in self._callbacks: 32 | callback(value) 33 | 34 | @property 35 | def is_resolved(self): 36 | """Return `True` if the promise is resolved.""" 37 | return self._state == RESOLVED_STATE 38 | 39 | @property 40 | def is_pending(self): 41 | """Return `True` if the promise is pending.""" 42 | return self._state == PENDING_STATE 43 | 44 | def then(self, on_resolve=None): 45 | """Add one callback that is called with the resolved value when the 46 | promise is resolved, and return the promise itself. One demo:: 47 | 48 | p = Promise() 49 | d = {} 50 | p.then(lambda v: d.setdefault('key', v)) 51 | p.resolve('value') 52 | assert p.value == 'value' 53 | assert d['key'] == 'value' 54 | """ 55 | if on_resolve is not None: 56 | if self._state == PENDING_STATE: 57 | self._callbacks.append(on_resolve) 58 | elif self._state == RESOLVED_STATE: 59 | on_resolve(self.value) 60 | return self 61 | 62 | def __repr__(self): 63 | if self._state == PENDING_STATE: 64 | v = '(pending)' 65 | else: 66 | v = repr(self.value) 67 | return '<%s %s>' % ( 68 | self.__class__.__name__, 69 | v, 70 | ) 71 | -------------------------------------------------------------------------------- /rc/redis_clients.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import socket 3 | import errno 4 | from itertools import izip 5 | 6 | from redis import StrictRedis 7 | from redis.client import list_or_args 8 | from redis.exceptions import ConnectionError 9 | try: 10 | from redis.exceptions import TimeoutError 11 | except ImportError: 12 | TimeoutError = ConnectionError 13 | 14 | from rc.poller import poller 15 | 16 | 17 | class BaseRedisClient(StrictRedis): 18 | pass 19 | 20 | 21 | class RedisClient(BaseRedisClient): 22 | pass 23 | 24 | 25 | class RedisClusterClient(BaseRedisClient): 26 | 27 | def __init__(self, connection_pool, max_concurrency=64, 28 | poller_timeout=1.0): 29 | BaseRedisClient.__init__(self, connection_pool=connection_pool) 30 | self.max_concurrency = max_concurrency 31 | self.poller_timeout = poller_timeout 32 | 33 | def execute_command(self, *args, **options): 34 | command_name = args[0] 35 | command_args = args[1:] 36 | connection_pool = self.connection_pool 37 | router = connection_pool.cluster.router 38 | host_name = router.get_host_for_command(command_name, command_args) 39 | connection = connection_pool.get_connection(command_name, host_name) 40 | try: 41 | connection.send_command(*args) 42 | return self.parse_response(connection, command_name, **options) 43 | except (ConnectionError, TimeoutError) as e: 44 | connection.disconnect() 45 | if not connection.retry_on_timeout and isinstance(e, TimeoutError): 46 | raise 47 | connection.send_command(*args) 48 | return self.parse_response(connection, command_name, **options) 49 | finally: 50 | connection_pool.release(connection) 51 | 52 | def delete(self, name): 53 | """We just support one key delete for now.""" 54 | names = [name] 55 | return self.execute_command('DEL', *names) 56 | 57 | def mdelete(self, *names): 58 | commands = [] 59 | for name in names: 60 | commands.append(('DEL', name)) 61 | results = self._execute_multi_command_with_poller('DEL', commands) 62 | return sum(results.values()) 63 | 64 | def msetex(self, mapping, time): 65 | commands = [] 66 | for name, value in mapping.iteritems(): 67 | commands.append(('SETEX', name, time, value)) 68 | results = self._execute_multi_command_with_poller('SETEX', commands) 69 | return all(results.values()) 70 | 71 | def mget(self, keys, *args): 72 | args = list_or_args(keys, args) 73 | commands = [] 74 | for arg in args: 75 | commands.append(('MGET', arg)) 76 | results = self._execute_multi_command_with_poller('MGET', commands) 77 | return [results[k] for k in args] 78 | 79 | def _execute_multi_command_with_poller(self, command_name, commands): 80 | connection_pool = self.connection_pool 81 | router = connection_pool.cluster.router 82 | # put command to the corresponding command buffer 83 | bufs = {} 84 | for args in commands: 85 | host_name = router.get_host_for_key(args[1]) 86 | buf = self._get_command_buffer(bufs, command_name, host_name) 87 | buf.enqueue_command(args) 88 | # poll all results back with max concurrency 89 | results = {} 90 | remaining_buf_items = bufs.items() 91 | while remaining_buf_items: 92 | buf_items = remaining_buf_items[:self.max_concurrency] 93 | remaining_buf_items = remaining_buf_items[self.max_concurrency:] 94 | bufs_poll = poller(buf_items) 95 | while bufs_poll: 96 | rlist, wlist = bufs_poll.poll(self.poller_timeout) 97 | for rbuf in rlist: 98 | if not rbuf.has_pending_request: 99 | results.update(rbuf.fetch_response(self)) 100 | bufs_poll.pop(rbuf.host_name) 101 | for wbuf in wlist: 102 | if wbuf.has_pending_request: 103 | wbuf.send_pending_request() 104 | # clean 105 | for _, buf in bufs.iteritems(): 106 | connection_pool.release(buf.connection) 107 | return results 108 | 109 | def _get_command_buffer(self, bufs, command_name, host_name): 110 | buf = bufs.get(host_name) 111 | if buf is not None: 112 | return buf 113 | connection_pool = self.connection_pool 114 | connection = connection_pool.get_connection(command_name, host_name) 115 | buf = CommandBuffer(host_name, connection, command_name) 116 | bufs[host_name] = buf 117 | return buf 118 | 119 | 120 | class CommandBuffer(object): 121 | """The command buffer is used for sending and fetching multi key command 122 | related data. 123 | """ 124 | 125 | def __init__(self, host_name, connection, command_name): 126 | self.host_name = host_name 127 | self.connection = connection 128 | self.command_name = command_name 129 | self.commands = [] 130 | self.pending_commands = [] 131 | self._send_buf = [] 132 | 133 | connection.connect() 134 | 135 | def assert_open(self): 136 | if self.connection._sock is None: 137 | raise ValueError('Can not operate on closed file.') 138 | 139 | def enqueue_command(self, command): 140 | self.commands.append(command) 141 | 142 | def fileno(self): 143 | self.assert_open() 144 | return self.connection._sock.fileno() 145 | 146 | @property 147 | def has_pending_request(self): 148 | return self._send_buf or self.commands 149 | 150 | def _try_send_buffer(self): 151 | sock = self.connection._sock 152 | try: 153 | timeout = sock.gettimeout() 154 | sock.setblocking(False) 155 | try: 156 | for i, item in enumerate(self._send_buf): 157 | sent = 0 158 | while 1: 159 | try: 160 | sent = sock.send(item) 161 | except socket.error, e: 162 | if e.errno == errno.EAGAIN: 163 | continue 164 | elif e.errno == errno.EWOULDBLOCK: 165 | break 166 | raise 167 | break 168 | if sent < len(item): 169 | self._send_buf[:i + 1] = [item[sent:]] 170 | break 171 | else: 172 | del self._send_buf[:] 173 | finally: 174 | sock.settimeout(timeout) 175 | except socket.timeout: 176 | self.connection.disconnect() 177 | raise TimeoutError('Timeout writing to socket (%s)' 178 | % self.host_name) 179 | except socket.error: 180 | self.connection.disconnect() 181 | raise ConnectionError('Error while writing to socket (%s)' 182 | % self.host_name) 183 | except: 184 | self.connection.disconnect() 185 | raise 186 | 187 | def batch_commands(self, commands): 188 | args = [] 189 | for command in commands: 190 | command_args = command[1:] 191 | args.extend(command_args) 192 | if args: 193 | return [(self.command_name,) + tuple(args)] 194 | else: 195 | return [] 196 | 197 | def send_pending_request(self): 198 | self.assert_open() 199 | if self.commands: 200 | if self.command_name in ('MGET', 'DEL'): 201 | commands = self.batch_commands(self.commands) 202 | else: 203 | commands = self.commands 204 | self._send_buf.extend(self.connection.pack_commands(commands)) 205 | self.pending_commands = self.commands 206 | self.commands = [] 207 | if not self._send_buf: 208 | return True 209 | self._try_send_buffer() 210 | return not self._send_buf 211 | 212 | def fetch_response(self, client): 213 | self.assert_open() 214 | if self.has_pending_request: 215 | raise RuntimeError('There are pending requests.') 216 | if self.command_name in ('MGET', 'DEL'): 217 | rv = client.parse_response(self.connection, self.command_name) 218 | else: 219 | rv = [] 220 | for i in xrange(len(self.pending_commands)): 221 | rv.append(client.parse_response( 222 | self.connection, self.command_name)) 223 | if self.command_name == 'DEL': 224 | rv = [1] * rv + [0] * (len(self.pending_commands) - rv) 225 | pending_keys = map(lambda c: c[1], self.pending_commands) 226 | return dict(izip(pending_keys, rv)) 227 | -------------------------------------------------------------------------------- /rc/redis_cluster.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import weakref 3 | 4 | from redis.connection import ConnectionPool, UnixDomainSocketConnection 5 | try: 6 | from redis.connection import SSLConnection 7 | except ImportError: 8 | SSLConnection = None 9 | 10 | from rc.redis_clients import RedisClusterClient 11 | from rc.redis_router import RedisCRC32HashRouter 12 | 13 | 14 | class HostConfig(object): 15 | 16 | def __init__(self, host_name, host='localhost', port=6379, 17 | unix_socket_path=None, db=0, password=None, 18 | ssl=False, ssl_options=None): 19 | self.host_name = host_name 20 | self.host = host 21 | self.port = port 22 | self.unix_socket_path = unix_socket_path 23 | self.db = db 24 | self.password = password 25 | self.ssl = ssl 26 | self.ssl_options = ssl_options 27 | 28 | def __repr__(self): 29 | identity_dict = { 30 | 'host': self.host, 31 | 'port': self.port, 32 | 'unix_socket_path': self.unix_socket_path, 33 | 'db': self.db, 34 | } 35 | return '<%s %s>' % ( 36 | self.__class__.__name__, 37 | ' '.join('%s=%s' % x for x in sorted(identity_dict.items())), 38 | ) 39 | 40 | 41 | class RedisCluster(object): 42 | """The redis cluster is the object that holds the connection pools to 43 | the redis nodes. 44 | 45 | :param hosts: a dictionary of hosts that maps the host host_name to 46 | configuration parameters. The parameters are used to 47 | construct a :class:`~rc.redis_cluster.HostConfig`. 48 | :param router_cls: use this to override the redis router class 49 | :param router_options: a dictionary of parameters that is useful for 50 | setting other parameters of router 51 | :param pool_cls: use this to override the redis connection pool class 52 | :param pool_options: a dictionary of parameters that is useful for 53 | setting other parameters of pool 54 | """ 55 | 56 | def __init__(self, hosts, router_cls=None, router_options=None, 57 | pool_cls=None, pool_options=None): 58 | if router_cls is None: 59 | router_cls = RedisCRC32HashRouter 60 | if pool_cls is None: 61 | pool_cls = ConnectionPool 62 | if pool_options is None: 63 | pool_options = {} 64 | if router_options is None: 65 | router_options = {} 66 | self.router_cls = router_cls 67 | self.router_options = router_options 68 | self.pool_cls = pool_cls 69 | self.pool_options = pool_options 70 | self.hosts = {} 71 | for host_name, host_config in hosts.iteritems(): 72 | self.hosts[host_name] = HostConfig(host_name, **host_config) 73 | self.router = self.router_cls(self.hosts, **router_options) 74 | #: connection pools of all hosts 75 | self._pools = {} 76 | 77 | def get_pool_of_host(self, host_name): 78 | """Returns the connection pool for a certain host.""" 79 | pool = self._pools.get(host_name) 80 | if pool is not None: 81 | return pool 82 | else: 83 | host_config = self.hosts[host_name] 84 | pool_options = dict(self.pool_options) 85 | pool_options['db'] = host_config.db 86 | pool_options['password'] = host_config.password 87 | if host_config.unix_socket_path is not None: 88 | pool_options['path'] = host_config.unix_socket_path 89 | pool_options['connection_class'] = UnixDomainSocketConnection 90 | else: 91 | pool_options['host'] = host_config.host 92 | pool_options['port'] = host_config.port 93 | if host_config.ssl: 94 | if SSLConnection is None: 95 | raise RuntimeError('SSL connections are not supported') 96 | pool_options['connection_class'] = SSLConnection 97 | pool_options.update(host_config.ssl_options or {}) 98 | pool = self.pool_cls(**pool_options) 99 | self._pools[host_name] = pool 100 | return pool 101 | 102 | def get_client(self, max_concurrency=64, poller_timeout=1.0): 103 | """Returns a cluster client. This client can automatically route 104 | the requests to the corresponding node. 105 | 106 | :param max_concurrency: defines how many parallel queries can happen 107 | at the same time 108 | :param poller_timeout: for multi key commands we use a select loop as 109 | the parallel query implementation, use this 110 | to specify timeout for underlying pollers 111 | (select/poll/kqueue/epoll). 112 | """ 113 | return RedisClusterClient( 114 | RedisClusterPool(self), max_concurrency, poller_timeout) 115 | 116 | 117 | class RedisClusterPool(object): 118 | """The cluster pool works with the cluster client to get the correct pool. 119 | """ 120 | 121 | def __init__(self, cluster): 122 | self.cluster = cluster 123 | 124 | def get_connection(self, command_name, host_name): 125 | real_pool = self.cluster.get_pool_of_host(host_name) 126 | connection = real_pool.get_connection(command_name) 127 | connection.__birth_pool = weakref.ref(real_pool) 128 | return connection 129 | 130 | def release(self, connection): 131 | real_pool = connection.__birth_pool() 132 | if real_pool is not None: 133 | real_pool.release(connection) 134 | -------------------------------------------------------------------------------- /rc/redis_router.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from binascii import crc32 3 | 4 | from rc.ketama import HashRing 5 | 6 | 7 | class BaseRedisRouter(object): 8 | """Subclass this to implement your own router.""" 9 | 10 | def __init__(self, hosts): 11 | self.hosts = hosts 12 | 13 | def get_key_for_command(self, command, args): 14 | if command in ('GET', 'SET', 'SETEX', 'DEL'): 15 | return args[0] 16 | raise RuntimeError('The command "%s" is not supported yet.' % command) 17 | 18 | def get_host_for_key(self, key): 19 | """Get host name for a certain key.""" 20 | raise NotImplementedError() 21 | 22 | def get_host_for_command(self, command, args): 23 | return self.get_host_for_key(self.get_key_for_command(command, args)) 24 | 25 | 26 | class RedisCRC32HashRouter(BaseRedisRouter): 27 | """Use crc32 for hash partitioning.""" 28 | 29 | def __init__(self, hosts): 30 | BaseRedisRouter.__init__(self, hosts) 31 | self._sorted_host_names = sorted(hosts.keys()) 32 | 33 | def get_host_for_key(self, key): 34 | if isinstance(key, unicode): 35 | key = key.encode('utf-8') 36 | else: 37 | key = str(key) 38 | pos = crc32(key) % len(self._sorted_host_names) 39 | return self._sorted_host_names[pos] 40 | 41 | 42 | class RedisConsistentHashRouter(BaseRedisRouter): 43 | """Use ketama for hash partitioning.""" 44 | 45 | def __init__(self, hosts): 46 | BaseRedisRouter.__init__(self, hosts) 47 | self._hashring = HashRing(hosts.values()) 48 | 49 | def get_host_for_key(self, key): 50 | node = self._hashring.get_node(key) 51 | if node is None: 52 | raise RuntimeError('Can not find a host using consistent hash') 53 | return node.host_name 54 | -------------------------------------------------------------------------------- /rc/serializer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import json 3 | try: 4 | import cPickle as pickle 5 | except ImportError: 6 | import pickle 7 | 8 | 9 | class BaseSerializer(object): 10 | """Baseclass for serializer. Subclass this to get your own serializer.""" 11 | 12 | def dumps(self, obj): 13 | """Dumps an object into a string for redis.""" 14 | raise NotImplementedError() 15 | 16 | def loads(self, string): 17 | """Read a serialized object from a string.""" 18 | raise NotImplementedError() 19 | 20 | 21 | class PickleSerializer(BaseSerializer): 22 | """One serializer that uses Pickle""" 23 | 24 | def dumps(self, obj): 25 | return pickle.dumps(obj) 26 | 27 | def loads(self, string): 28 | if string is None: 29 | return 30 | return pickle.loads(string) 31 | 32 | 33 | class JSONSerializer(BaseSerializer): 34 | """One serializer that uses JSON""" 35 | 36 | def dumps(self, obj): 37 | return json.dumps(obj) 38 | 39 | def loads(self, string): 40 | if string is None: 41 | return 42 | return json.loads(string) 43 | -------------------------------------------------------------------------------- /rc/testing.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from rc.cache import BaseCache 3 | 4 | 5 | class NullCache(BaseCache): 6 | """Use this for unit test. This doesn't cache.""" 7 | 8 | def __init__(self, *args, **kwargs): 9 | BaseCache.__init__(self) 10 | 11 | def get(self, key): 12 | """Always return `None`""" 13 | return 14 | 15 | def set(self, key, value, time=None): 16 | """Always return `True`""" 17 | return True 18 | 19 | def delete(self, key): 20 | """Always return `True`""" 21 | return True 22 | 23 | def get_many(self, *keys): 24 | """Always return a list of `None`""" 25 | return [None for key in keys] 26 | 27 | 28 | class FakeRedisCache(BaseCache): 29 | """Uses a fake redis server as backend. It depends on the 30 | `fakeredis`_ library. 31 | 32 | .. _fakeredis: https://github.com/jamesls/fakeredis 33 | 34 | :param namespace: a prefix that should be added to all keys. 35 | :param serializer_cls: the serialization class you want to use. 36 | By default, it is :class:`rc.JSONSerializer`. 37 | :param default_expire: default expiration time that is used if no 38 | expire specified on :meth:`set`. 39 | """ 40 | 41 | def __init__(self, namespace=None, serializer_cls=None, 42 | default_expire=3 * 24 * 3600): 43 | BaseCache.__init__(self, namespace, serializer_cls, default_expire) 44 | 45 | def get_client(self): 46 | import fakeredis 47 | return fakeredis.FakeStrictRedis() 48 | -------------------------------------------------------------------------------- /rc/utils.py: -------------------------------------------------------------------------------- 1 | def u_(s): 2 | if isinstance(s, unicode): 3 | return s 4 | if not isinstance(s, str): 5 | s = str(s) 6 | return unicode(s, 'utf-8') 7 | 8 | 9 | def generate_key_for_cached_func(key_prefix, func, *args, **kwargs): 10 | """Generate key for cached function. The cache key will be created with 11 | the name of the module + the name of the function + function arguments. 12 | """ 13 | if key_prefix is None: 14 | key_prefix = [] 15 | else: 16 | key_prefix = [key_prefix] 17 | module_name = func.__module__ 18 | func_name = func.__name__ 19 | # handle keyword arguments 20 | kwargs = kwargs.items() 21 | if kwargs: 22 | kwargs.sort(key=lambda t: t[0]) 23 | kwargs = map(lambda t: (u_(t[0]), u_(t[1])), kwargs) 24 | kwargs = map(lambda t: u'='.join(t), kwargs) 25 | # handle positional arguments 26 | args = map(lambda arg: u_(arg), args) 27 | # join them together 28 | return u' '.join(key_prefix + [module_name, func_name] + args + kwargs) 29 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import re 2 | from setuptools import setup 3 | 4 | 5 | with open('rc/__init__.py', 'rb') as f: 6 | version = str(eval(re.search(r'__version__\s+=\s+(.*)', 7 | f.read().decode('utf-8')).group(1))) 8 | 9 | 10 | setup( 11 | name='rc', 12 | author='Shipeng Feng', 13 | author_email='fsp261@gmail.com', 14 | version=version, 15 | url='http://github.com/fengsp/rc', 16 | packages=['rc'], 17 | description='rc, the redis cache', 18 | install_requires=[ 19 | 'redis>=2.6', 20 | ], 21 | classifiers=[ 22 | 'License :: OSI Approved :: BSD License', 23 | 'Programming Language :: Python', 24 | 'Programming Language :: Python :: 2' 25 | ], 26 | ) 27 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import uuid 3 | import os 4 | import time 5 | import socket 6 | import shutil 7 | from subprocess import Popen, PIPE 8 | 9 | import pytest 10 | 11 | 12 | devnull = open(os.devnull, 'w') 13 | 14 | 15 | class RedisServer(object): 16 | 17 | def __init__(self, socket_path): 18 | self.socket_path = socket_path 19 | self.redis = Popen(['redis-server', '-'], stdin=PIPE, stdout=devnull) 20 | self.redis.stdin.write(''' 21 | port 0 22 | unixsocket %s 23 | save ""''' % socket_path) 24 | self.redis.stdin.flush() 25 | self.redis.stdin.close() 26 | while 1: 27 | try: 28 | s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 29 | s.connect(socket_path) 30 | except IOError: 31 | time.sleep(0.05) 32 | continue 33 | else: 34 | break 35 | 36 | def shutdown(self): 37 | self.redis.kill() 38 | self.redis.wait() 39 | os.remove(self.socket_path) 40 | 41 | def __del__(self): 42 | try: 43 | self.shutdown() 44 | except: 45 | pass 46 | 47 | 48 | @pytest.fixture(scope='session') 49 | def redis_hosts(request): 50 | socket_dir = tempfile.mkdtemp() 51 | hosts = {} 52 | servers = [] 53 | for i in range(4): 54 | socket_path = os.path.join(socket_dir, str(uuid.uuid4())) 55 | server = RedisServer(socket_path) 56 | for j in range(4): 57 | hosts['cache-server-%s' % (i * 4 + j)] = { 58 | 'unix_socket_path': socket_path, 59 | 'db': j, 60 | } 61 | servers.append(server) 62 | 63 | def fin(): 64 | for server in servers: 65 | server.shutdown() 66 | shutil.rmtree(socket_dir) 67 | request.addfinalizer(fin) 68 | return hosts 69 | 70 | 71 | @pytest.fixture(scope='session') 72 | def redis_unix_socket_path(redis_hosts): 73 | return redis_hosts.values()[0]['unix_socket_path'] 74 | -------------------------------------------------------------------------------- /tests/test_cache.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | 4 | from rc.cache import Cache, CacheCluster 5 | from rc.testing import NullCache, FakeRedisCache 6 | 7 | 8 | def test_null_cache(): 9 | cache = NullCache() 10 | with pytest.raises(NotImplementedError): 11 | cache.client 12 | assert cache.get('key') is None 13 | assert cache.set('key', 'value') 14 | assert cache.delete('key') 15 | assert cache.get_many('key1', 'key2') == [None, None] 16 | assert cache.set_many({'key1': 'value1', 'key2': 'value2'}) 17 | assert cache.delete_many('key1', 'key2') 18 | 19 | 20 | def test_fakeredis_cache(): 21 | cache = FakeRedisCache() 22 | assert cache.get('key') is None 23 | assert cache.set('key', 'value') 24 | assert cache.get('key') == 'value' 25 | assert cache.delete('key') 26 | assert cache.get_many('key1', 'key2') == [None, None] 27 | assert cache.set_many({'key1': 'value1', 'key2': 'value2'}) 28 | assert cache.delete_many('key1', 'key2') 29 | 30 | 31 | def test_cache_basic_apis(redis_unix_socket_path): 32 | cache = Cache(redis_options={'unix_socket_path': redis_unix_socket_path}) 33 | assert cache.get('key') is None 34 | assert cache.set('key', 'value') 35 | assert cache.get('key') == 'value' 36 | assert cache.delete('key') 37 | assert cache.get('key') is None 38 | 39 | assert cache.get_many('key1', 'key2') == [None, None] 40 | assert cache.set_many({'key1': 'value1', 'key2': 'value2'}) 41 | assert cache.get_many('key1', 'key2') == ['value1', 'value2'] 42 | assert cache.delete_many('key1', 'key2') 43 | assert cache.get_many('key1', 'key2') == [None, None] 44 | assert cache.get_many() == [] 45 | assert cache.set_many({}) 46 | assert cache.delete_many() 47 | 48 | assert cache.get('key') is None 49 | assert cache.set('key', ['value']) 50 | assert cache.get('key') == ['value'] 51 | assert cache.get_many('key') == [['value']] 52 | assert cache.delete('key') 53 | assert cache.get('key') is None 54 | 55 | # import time 56 | # assert cache.get('key') is None 57 | # cache.set('key', 'value', 1) 58 | # time.sleep(1) 59 | # assert cache.get('key') is None 60 | 61 | 62 | def test_cache_namespace(redis_unix_socket_path): 63 | cache01 = Cache(redis_options={'unix_socket_path': redis_unix_socket_path}) 64 | cache02 = Cache( 65 | namespace='test:', 66 | redis_options={'unix_socket_path': redis_unix_socket_path}) 67 | assert cache01.set('key', 'value') 68 | assert cache01.get('key') == 'value' 69 | assert cache02.get('key') is None 70 | 71 | 72 | def test_cache_decorator_basic_apis(redis_unix_socket_path): 73 | cache = Cache(redis_options={'unix_socket_path': redis_unix_socket_path}) 74 | 75 | @cache.cache() 76 | def load(name, offset): 77 | return ' '.join(('load', name, offset)) 78 | rv = load('name', 'offset') 79 | assert isinstance(rv, unicode) 80 | assert rv == 'load name offset' 81 | assert load('name', offset='offset') == 'load name offset' 82 | 83 | @cache.cache() 84 | def load(name, offset): 85 | return ' '.join(('load02', name, offset)) 86 | assert load('name', 'offset') == 'load name offset' 87 | assert load('name', offset='offset') == 'load name offset' 88 | assert cache.invalidate(load, 'name', 'offset') 89 | assert load('name', 'offset') == 'load02 name offset' 90 | assert load('name', offset='offset') == 'load name offset' 91 | assert cache.invalidate(load, 'name', offset='offset') 92 | assert load('name', offset='offset') == 'load02 name offset' 93 | 94 | class Foo(object): 95 | @cache.cache() 96 | def load_method(self, name, offset): 97 | return ' '.join(('load', name, str(offset))) 98 | foo = Foo() 99 | assert foo.load_method('name', 10) == 'load name 10' 100 | assert foo.load_method('name', offset=10) == 'load name 10' 101 | 102 | class Foo(object): 103 | @cache.cache() 104 | def load_method(self, name, offset): 105 | return ' '.join(('load02', name, str(offset))) 106 | foo = Foo() 107 | assert foo.load_method('name', 10) == 'load name 10' 108 | assert cache.invalidate(foo.load_method, 'name', 10) 109 | assert foo.load_method('name', 10) == 'load02 name 10' 110 | 111 | 112 | def test_cache_decorator_include_self(redis_unix_socket_path): 113 | cache = Cache(redis_options={'unix_socket_path': redis_unix_socket_path}) 114 | 115 | class User(object): 116 | def __init__(self, user_id): 117 | self.user_id = user_id 118 | 119 | def __str__(self): 120 | return '