├── .gitignore ├── README ├── memcache.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | build 4 | dist 5 | 6 | *.egg-info 7 | 8 | .DS_Store 9 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Important Deprecation Note: This was a py3 compatibility port of https://github.com/linsomniac/python-memcached 2 | which since then has added py23. So you should use that. 3 | 4 | --- 5 | 6 | Python3 port for pure python memcached client library, ported and being 7 | kept up to date by https://github.com/eguven 8 | 9 | Please report issues and submit code changes to the github repository at: 10 | 11 | https://github.com/eguven/python3-memcached 12 | 13 | Below is the original README content. 14 | 15 | This package was originally written by Evan Martin of Danga. 16 | Sean Reifschneider of tummy.com, ltd. has taken over maintenance of it. 17 | 18 | This software is a 100% Python interface to the memcached memory cache 19 | daemon. It is the client side software which allows storing values in one 20 | or more, possibly remote, memcached servers. Search google for memcached 21 | for more information. 22 | -------------------------------------------------------------------------------- /memcache.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | client module for memcached (memory cache daemon) 5 | 6 | Overview 7 | ======== 8 | 9 | See U{the MemCached homepage} for more about memcached. 10 | 11 | Usage summary 12 | ============= 13 | 14 | This should give you a feel for how this module operates:: 15 | 16 | import memcache 17 | mc = memcache.Client(['127.0.0.1:11211'], debug=0) 18 | 19 | mc.set("some_key", "Some value") 20 | value = mc.get("some_key") 21 | 22 | mc.set("another_key", 3) 23 | mc.delete("another_key") 24 | 25 | mc.set("key", "1") # note that the key used for incr/decr must be a string. 26 | mc.incr("key") 27 | mc.decr("key") 28 | 29 | The standard way to use memcache with a database is like this:: 30 | 31 | key = derive_key(obj) 32 | obj = mc.get(key) 33 | if not obj: 34 | obj = backend_api.get(...) 35 | mc.set(key, obj) 36 | 37 | # we now have obj, and future passes through this code 38 | # will use the object from the cache. 39 | 40 | Detailed Documentation 41 | ====================== 42 | 43 | More detailed documentation is available in the L{Client} class. 44 | """ 45 | 46 | import sys 47 | import socket 48 | import time 49 | import os 50 | import re 51 | import pickle 52 | 53 | from io import StringIO, BytesIO 54 | 55 | from binascii import crc32 # zlib version is not cross-platform 56 | def cmemcache_hash(key): 57 | return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1) 58 | serverHashFunction = cmemcache_hash 59 | 60 | def useOldServerHashFunction(): 61 | """Use the old python-memcache server hash function.""" 62 | global serverHashFunction 63 | serverHashFunction = crc32 64 | 65 | try: 66 | from zlib import compress, decompress 67 | _supports_compress = True 68 | except ImportError: 69 | _supports_compress = False 70 | # quickly define a decompress just in case we recv compressed data. 71 | def decompress(val): 72 | raise _Error("received compressed data but I don't support compression (import error)") 73 | 74 | invalid_key_characters = ''.join(map(chr, list(range(33)) + [127])) 75 | 76 | # Original author: Evan Martin of Danga Interactive 77 | __author__ = "Sean Reifschneider " 78 | __version__ = "1.51" 79 | __copyright__ = "Copyright (C) 2003 Danga Interactive" 80 | # http://en.wikipedia.org/wiki/Python_Software_Foundation_License 81 | __license__ = "Python Software Foundation License" 82 | 83 | SERVER_MAX_KEY_LENGTH = 250 84 | # Storing values larger than 1MB requires recompiling memcached. If you do, 85 | # this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N" 86 | # after importing this module. 87 | SERVER_MAX_VALUE_LENGTH = 1024*1024 88 | 89 | class _Error(Exception): 90 | pass 91 | 92 | 93 | class _ConnectionDeadError(Exception): 94 | pass 95 | 96 | 97 | from threading import local 98 | 99 | 100 | _DEAD_RETRY = 30 # number of seconds before retrying a dead server. 101 | _SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout. 102 | 103 | 104 | class Client(local): 105 | """ 106 | Object representing a pool of memcache servers. 107 | 108 | See L{memcache} for an overview. 109 | 110 | In all cases where a key is used, the key can be either: 111 | 1. A simple hashable type (string, integer, etc.). 112 | 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid 113 | making this module calculate a hash value. You may prefer, for 114 | example, to keep all of a given user's objects on the same memcache 115 | server, so you could use the user's unique id as the hash value. 116 | 117 | @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog 118 | @group Insertion: set, add, replace, set_multi 119 | @group Retrieval: get, get_multi 120 | @group Integers: incr, decr 121 | @group Removal: delete, delete_multi 122 | @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ 123 | set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi 124 | """ 125 | _FLAG_PICKLE = 1<<0 126 | _FLAG_INTEGER = 1<<1 127 | _FLAG_LONG = 1<<2 128 | _FLAG_COMPRESSED = 1<<3 129 | 130 | _SERVER_RETRIES = 10 # how many times to try finding a free server. 131 | 132 | # exceptions for Client 133 | class MemcachedKeyError(Exception): 134 | pass 135 | class MemcachedKeyLengthError(MemcachedKeyError): 136 | pass 137 | class MemcachedKeyCharacterError(MemcachedKeyError): 138 | pass 139 | class MemcachedKeyNoneError(MemcachedKeyError): 140 | pass 141 | class MemcachedKeyTypeError(MemcachedKeyError): 142 | pass 143 | class MemcachedStringEncodingError(Exception): 144 | pass 145 | 146 | def __init__(self, servers, debug=0, pickleProtocol=0, 147 | pickler=pickle.Pickler, unpickler=pickle.Unpickler, 148 | pload=None, pid=None, 149 | server_max_key_length=SERVER_MAX_KEY_LENGTH, 150 | server_max_value_length=SERVER_MAX_VALUE_LENGTH, 151 | dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, 152 | cache_cas = False, flush_on_reconnect=0, check_keys=True): 153 | """ 154 | Create a new Client object with the given list of servers. 155 | 156 | @param servers: C{servers} is passed to L{set_servers}. 157 | @param debug: whether to display error messages when a server can't be 158 | contacted. 159 | @param pickleProtocol: number to mandate protocol used by (c)Pickle. 160 | @param pickler: optional override of default Pickler to allow subclassing. 161 | @param unpickler: optional override of default Unpickler to allow subclassing. 162 | @param pload: optional persistent_load function to call on pickle loading. 163 | Useful for cPickle since subclassing isn't allowed. 164 | @param pid: optional persistent_id function to call on pickle storing. 165 | Useful for cPickle since subclassing isn't allowed. 166 | @param dead_retry: number of seconds before retrying a blacklisted 167 | server. Default to 30 s. 168 | @param socket_timeout: timeout in seconds for all calls to a server. Defaults 169 | to 3 seconds. 170 | @param cache_cas: (default False) If true, cas operations will be 171 | cached. WARNING: This cache is not expired internally, if you have 172 | a long-running process you will need to expire it manually via 173 | client.reset_cas(), or the cache can grow unlimited. 174 | @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) 175 | Data that is larger than this will not be sent to the server. 176 | @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) 177 | Data that is larger than this will not be sent to the server. 178 | @param flush_on_reconnect: optional flag which prevents a scenario that 179 | can cause stale data to be read: If there's more than one memcached 180 | server and the connection to one is interrupted, keys that mapped to 181 | that server will get reassigned to another. If the first server comes 182 | back, those keys will map to it again. If it still has its data, get()s 183 | can read stale data that was overwritten on another server. This flag 184 | is off by default for backwards compatibility. 185 | @param check_keys: (default True) If True, the key is checked to 186 | ensure it is the correct length and composed of the right characters. 187 | """ 188 | local.__init__(self) 189 | self.debug = debug 190 | self.dead_retry = dead_retry 191 | self.socket_timeout = socket_timeout 192 | self.flush_on_reconnect = flush_on_reconnect 193 | self.set_servers(servers) 194 | self.stats = {} 195 | self.cache_cas = cache_cas 196 | self.reset_cas() 197 | self.do_check_key = check_keys 198 | 199 | # Allow users to modify pickling/unpickling behavior 200 | self.pickleProtocol = pickleProtocol 201 | self.pickler = pickler 202 | self.unpickler = unpickler 203 | self.persistent_load = pload 204 | self.persistent_id = pid 205 | self.server_max_key_length = server_max_key_length 206 | self.server_max_value_length = server_max_value_length 207 | 208 | # figure out the pickler style 209 | file = StringIO() 210 | try: 211 | pickler = self.pickler(file, protocol = self.pickleProtocol) 212 | self.picklerIsKeyword = True 213 | except TypeError: 214 | self.picklerIsKeyword = False 215 | 216 | 217 | def reset_cas(self): 218 | """ 219 | Reset the cas cache. This is only used if the Client() object 220 | was created with "cache_cas=True". If used, this cache does not 221 | expire internally, so it can grow unbounded if you do not clear it 222 | yourself. 223 | """ 224 | self.cas_ids = {} 225 | 226 | 227 | def set_servers(self, servers): 228 | """ 229 | Set the pool of servers used by this client. 230 | 231 | @param servers: an array of servers. 232 | Servers can be passed in two forms: 233 | 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 234 | 2. Tuples of the form C{("host:port", weight)}, where C{weight} is 235 | an integer weight value. 236 | """ 237 | self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry, 238 | socket_timeout=self.socket_timeout, 239 | flush_on_reconnect=self.flush_on_reconnect) 240 | for s in servers] 241 | self._init_buckets() 242 | 243 | def get_stats(self, stat_args = None): 244 | '''Get statistics from each of the servers. 245 | 246 | @param stat_args: Additional arguments to pass to the memcache 247 | "stats" command. 248 | 249 | @return: A list of tuples ( server_identifier, stats_dictionary ). 250 | The dictionary contains a number of name/value pairs specifying 251 | the name of the status field and the string value associated with 252 | it. The values are not converted from strings. 253 | ''' 254 | data = [] 255 | for s in self.servers: 256 | if not s.connect(): continue 257 | if s.family == socket.AF_INET: 258 | name = '%s:%s (%s)' % ( s.ip, s.port, s.weight ) 259 | elif s.family == socket.AF_INET6: 260 | name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight ) 261 | else: 262 | name = 'unix:%s (%s)' % ( s.address, s.weight ) 263 | if not stat_args: 264 | s.send_cmd(b'stats') 265 | elif isinstance(stat_args, bytes): 266 | s.send_cmd(b'stats ' + stat_args) 267 | else: 268 | s.send_cmd(b'stats ' + str(stat_args).encode('utf-8')) 269 | serverData = {} 270 | data.append(( name.encode('ascii'), serverData )) 271 | readline = s.readline 272 | while 1: 273 | line = readline() 274 | if not line or line.strip() == b'END': break 275 | stats = line.decode('ascii').split(' ', 2) 276 | serverData[stats[1].encode('ascii')] = stats[2].encode('ascii') 277 | 278 | return(data) 279 | 280 | def get_slabs(self): 281 | data = [] 282 | for s in self.servers: 283 | if not s.connect(): continue 284 | if s.family == socket.AF_INET: 285 | name = '%s:%s (%s)' % ( s.ip, s.port, s.weight ) 286 | elif s.family == socket.AF_INET6: 287 | name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight ) 288 | else: 289 | name = 'unix:%s (%s)' % ( s.address, s.weight ) 290 | serverData = {} 291 | data.append(( name, serverData )) 292 | s.send_cmd('stats items') 293 | readline = s.readline 294 | while 1: 295 | line = readline() 296 | if not line or line.strip() == b'END': break 297 | item = line.split(' ', 2) 298 | #0 = STAT, 1 = ITEM, 2 = Value 299 | slab = item[1].split(':', 2) 300 | #0 = items, 1 = Slab #, 2 = Name 301 | if slab[1] not in serverData: 302 | serverData[slab[1]] = {} 303 | serverData[slab[1]][slab[2]] = item[2] 304 | return data 305 | 306 | def flush_all(self): 307 | """Expire all data in memcache servers that are reachable.""" 308 | for s in self.servers: 309 | if not s.connect(): continue 310 | s.flush() 311 | 312 | def debuglog(self, str): 313 | if self.debug: 314 | sys.stderr.write("MemCached: %s\n" % str) 315 | 316 | def _statlog(self, func): 317 | if func not in self.stats: 318 | self.stats[func] = 1 319 | else: 320 | self.stats[func] += 1 321 | 322 | def forget_dead_hosts(self): 323 | """ 324 | Reset every host in the pool to an "alive" state. 325 | """ 326 | for s in self.servers: 327 | s.deaduntil = 0 328 | 329 | def _init_buckets(self): 330 | self.buckets = [] 331 | for server in self.servers: 332 | for i in range(server.weight): 333 | self.buckets.append(server) 334 | 335 | def _get_server(self, key): 336 | if isinstance(key, tuple): 337 | serverhash, key = key 338 | else: 339 | serverhash = serverHashFunction(key.encode('utf-8')) 340 | 341 | for i in range(Client._SERVER_RETRIES): 342 | server = self.buckets[serverhash % len(self.buckets)] 343 | if server.connect(): 344 | #print "(using server %s)" % server, 345 | return server, key 346 | serverhash = serverHashFunction((str(serverhash) + str(i)).encode("ascii")) 347 | return None, None 348 | 349 | def disconnect_all(self): 350 | for s in self.servers: 351 | s.close_socket() 352 | 353 | def delete_multi(self, keys, time=0, key_prefix=''): 354 | ''' 355 | Delete multiple keys in the memcache doing just one query. 356 | 357 | >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) 358 | >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 359 | 1 360 | >>> mc.delete_multi(['key1', 'key2']) 361 | 1 362 | >>> mc.get_multi(['key1', 'key2']) == {} 363 | 1 364 | 365 | 366 | This method is recommended over iterated regular L{delete}s as it reduces total latency, since 367 | your app doesn't have to wait for each round-trip of L{delete} before sending 368 | the next one. 369 | 370 | @param keys: An iterable of keys to clear 371 | @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. 372 | @param key_prefix: Optional string to prepend to each key when sending to memcache. 373 | See docs for L{get_multi} and L{set_multi}. 374 | 375 | @return: 1 if no failure in communication with any memcacheds. 376 | @rtype: int 377 | 378 | ''' 379 | 380 | self._statlog('delete_multi') 381 | 382 | server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) 383 | 384 | # send out all requests on each server before reading anything 385 | dead_servers = [] 386 | 387 | rc = 1 388 | for server in server_keys.keys(): 389 | bigcmd = [] 390 | write = bigcmd.append 391 | if time != None: 392 | for key in server_keys[server]: # These are mangled keys 393 | write("delete %s %d\r\n" % (key, time)) 394 | else: 395 | for key in server_keys[server]: # These are mangled keys 396 | write("delete %s\r\n" % key) 397 | try: 398 | server.send_cmds(''.join(bigcmd)) 399 | except socket.error as msg: 400 | rc = 0 401 | if isinstance(msg, tuple): msg = msg[1] 402 | server.mark_dead(msg) 403 | dead_servers.append(server) 404 | 405 | # if any servers died on the way, don't expect them to respond. 406 | for server in dead_servers: 407 | del server_keys[server] 408 | 409 | for server, keys in server_keys.items(): 410 | try: 411 | for key in keys: 412 | server.expect(b"DELETED") 413 | except socket.error as msg: 414 | if isinstance(msg, tuple): msg = msg[1] 415 | server.mark_dead(msg) 416 | rc = 0 417 | return rc 418 | 419 | def delete(self, key, time=0): 420 | '''Deletes a key from the memcache. 421 | 422 | @return: Nonzero on success. 423 | @param time: number of seconds any subsequent set / update commands 424 | should fail. Defaults to None for no delay. 425 | @rtype: int 426 | ''' 427 | if self.do_check_key: 428 | self.check_key(key) 429 | server, key = self._get_server(key) 430 | if not server: 431 | return 0 432 | self._statlog('delete') 433 | if time != None and time != 0: 434 | cmd = "delete %s %d" % (key, time) 435 | else: 436 | cmd = "delete %s" % key 437 | 438 | try: 439 | server.send_cmd(cmd.encode('utf-8')) 440 | line = server.readline() 441 | if line and line.strip() in [b'DELETED', b'NOT_FOUND']: return 1 442 | self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s' 443 | % repr(line)) 444 | except socket.error as msg: 445 | if isinstance(msg, tuple): msg = msg[1] 446 | server.mark_dead(msg) 447 | return 0 448 | 449 | def incr(self, key, delta=1): 450 | """ 451 | Sends a command to the server to atomically increment the value 452 | for C{key} by C{delta}, or by 1 if C{delta} is unspecified. 453 | Returns None if C{key} doesn't exist on server, otherwise it 454 | returns the new value after incrementing. 455 | 456 | Note that the value for C{key} must already exist in the memcache, 457 | and it must be the string representation of an integer. 458 | 459 | >>> mc.set("counter", "20") # returns 1, indicating success 460 | 1 461 | >>> mc.incr("counter") 462 | 21 463 | >>> mc.incr("counter") 464 | 22 465 | 466 | Overflow on server is not checked. Be aware of values approaching 467 | 2**32. See L{decr}. 468 | 469 | @param delta: Integer amount to increment by (should be zero or greater). 470 | @return: New value after incrementing. 471 | @rtype: int 472 | """ 473 | return self._incrdecr("incr", key, delta) 474 | 475 | def decr(self, key, delta=1): 476 | """ 477 | Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and 478 | new values are capped at 0. If server value is 1, a decrement of 2 479 | returns 0, not -1. 480 | 481 | @param delta: Integer amount to decrement by (should be zero or greater). 482 | @return: New value after decrementing or None on error. 483 | @rtype: int 484 | """ 485 | return self._incrdecr("decr", key, delta) 486 | 487 | def _incrdecr(self, cmd, key, delta): 488 | if self.do_check_key: 489 | self.check_key(key) 490 | server, key = self._get_server(key) 491 | if not server: 492 | return None 493 | self._statlog(cmd) 494 | cmd = "%s %s %d" % (cmd, key, delta) 495 | try: 496 | server.send_cmd(cmd) 497 | line = server.readline() 498 | if line == None or line.strip() ==b'NOT_FOUND': return None 499 | return int(line) 500 | except socket.error as msg: 501 | if isinstance(msg, tuple): msg = msg[1] 502 | server.mark_dead(msg) 503 | return None 504 | 505 | def add(self, key, val, time = 0, min_compress_len = 0): 506 | ''' 507 | Add new key with value. 508 | 509 | Like L{set}, but only stores in memcache if the key doesn't already exist. 510 | 511 | @return: Nonzero on success. 512 | @rtype: int 513 | ''' 514 | return self._set("add", key, val, time, min_compress_len) 515 | 516 | def append(self, key, val, time=0, min_compress_len=0): 517 | '''Append the value to the end of the existing key's value. 518 | 519 | Only stores in memcache if key already exists. 520 | Also see L{prepend}. 521 | 522 | @return: Nonzero on success. 523 | @rtype: int 524 | ''' 525 | return self._set("append", key, val, time, min_compress_len) 526 | 527 | def prepend(self, key, val, time=0, min_compress_len=0): 528 | '''Prepend the value to the beginning of the existing key's value. 529 | 530 | Only stores in memcache if key already exists. 531 | Also see L{append}. 532 | 533 | @return: Nonzero on success. 534 | @rtype: int 535 | ''' 536 | return self._set("prepend", key, val, time, min_compress_len) 537 | 538 | def replace(self, key, val, time=0, min_compress_len=0): 539 | '''Replace existing key with value. 540 | 541 | Like L{set}, but only stores in memcache if the key already exists. 542 | The opposite of L{add}. 543 | 544 | @return: Nonzero on success. 545 | @rtype: int 546 | ''' 547 | return self._set("replace", key, val, time, min_compress_len) 548 | 549 | def set(self, key, val, time=0, min_compress_len=0): 550 | '''Unconditionally sets a key to a given value in the memcache. 551 | 552 | The C{key} can optionally be an tuple, with the first element 553 | being the server hash value and the second being the key. 554 | If you want to avoid making this module calculate a hash value. 555 | You may prefer, for example, to keep all of a given user's objects 556 | on the same memcache server, so you could use the user's unique 557 | id as the hash value. 558 | 559 | @return: Nonzero on success. 560 | @rtype: int 561 | @param time: Tells memcached the time which this value should expire, either 562 | as a delta number of seconds, or an absolute unix time-since-the-epoch 563 | value. See the memcached protocol docs section "Storage Commands" 564 | for more info on . We default to 0 == cache forever. 565 | @param min_compress_len: The threshold length to kick in auto-compression 566 | of the value using the zlib.compress() routine. If the value being cached is 567 | a string, then the length of the string is measured, else if the value is an 568 | object, then the length of the pickle result is measured. If the resulting 569 | attempt at compression yields a larger string than the input, then it is 570 | discarded. For backwards compatability, this parameter defaults to 0, 571 | indicating don't ever try to compress. 572 | ''' 573 | return self._set("set", key, val, time, min_compress_len) 574 | 575 | 576 | def cas(self, key, val, time=0, min_compress_len=0): 577 | '''Sets a key to a given value in the memcache if it hasn't been 578 | altered since last fetched. (See L{gets}). 579 | 580 | The C{key} can optionally be an tuple, with the first element 581 | being the server hash value and the second being the key. 582 | If you want to avoid making this module calculate a hash value. 583 | You may prefer, for example, to keep all of a given user's objects 584 | on the same memcache server, so you could use the user's unique 585 | id as the hash value. 586 | 587 | @return: Nonzero on success. 588 | @rtype: int 589 | @param time: Tells memcached the time which this value should expire, 590 | either as a delta number of seconds, or an absolute unix 591 | time-since-the-epoch value. See the memcached protocol docs section 592 | "Storage Commands" for more info on . We default to 593 | 0 == cache forever. 594 | @param min_compress_len: The threshold length to kick in 595 | auto-compression of the value using the zlib.compress() routine. If 596 | the value being cached is a string, then the length of the string is 597 | measured, else if the value is an object, then the length of the 598 | pickle result is measured. If the resulting attempt at compression 599 | yields a larger string than the input, then it is discarded. For 600 | backwards compatability, this parameter defaults to 0, indicating 601 | don't ever try to compress. 602 | ''' 603 | return self._set("cas", key, val, time, min_compress_len) 604 | 605 | def _map_and_prefix_keys(self, key_iterable, key_prefix): 606 | """Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of 607 | prefixed key -> original key. 608 | 609 | 610 | """ 611 | # Check it just once ... 612 | key_extra_len=len(key_prefix) 613 | if key_prefix and self.do_check_key: 614 | self.check_key(key_prefix) 615 | 616 | # server (_Host) -> list of unprefixed server keys in mapping 617 | server_keys = {} 618 | 619 | prefixed_to_orig_key = {} 620 | # build up a list for each server of all the keys we want. 621 | for orig_key in key_iterable: 622 | if isinstance(orig_key, tuple): 623 | # Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on. 624 | # Ensure call to _get_server gets a Tuple as well. 625 | str_orig_key = str(orig_key[1]) 626 | server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key. 627 | else: 628 | str_orig_key = str(orig_key) # set_multi supports int / long keys. 629 | server, key = self._get_server(key_prefix + str_orig_key) 630 | 631 | # Now check to make sure key length is proper ... 632 | if self.do_check_key: 633 | self.check_key(str_orig_key, key_extra_len=key_extra_len) 634 | 635 | if not server: 636 | continue 637 | 638 | if server not in server_keys: 639 | server_keys[server] = [] 640 | server_keys[server].append(key) 641 | prefixed_to_orig_key[key] = orig_key 642 | 643 | return (server_keys, prefixed_to_orig_key) 644 | 645 | def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0): 646 | ''' 647 | Sets multiple keys in the memcache doing just one query. 648 | 649 | >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) 650 | >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 651 | 1 652 | 653 | 654 | This method is recommended over regular L{set} as it lowers the number of 655 | total packets flying around your network, reducing total latency, since 656 | your app doesn't have to wait for each round-trip of L{set} before sending 657 | the next one. 658 | 659 | @param mapping: A dict of key/value pairs to set. 660 | @param time: Tells memcached the time which this value should expire, either 661 | as a delta number of seconds, or an absolute unix time-since-the-epoch 662 | value. See the memcached protocol docs section "Storage Commands" 663 | for more info on . We default to 0 == cache forever. 664 | @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: 665 | >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_') 666 | >>> len(notset_keys) == 0 667 | True 668 | >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'} 669 | True 670 | 671 | Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. 672 | In this case, the return result would be the list of notset original keys, prefix not applied. 673 | 674 | @param min_compress_len: The threshold length to kick in auto-compression 675 | of the value using the zlib.compress() routine. If the value being cached is 676 | a string, then the length of the string is measured, else if the value is an 677 | object, then the length of the pickle result is measured. If the resulting 678 | attempt at compression yields a larger string than the input, then it is 679 | discarded. For backwards compatability, this parameter defaults to 0, 680 | indicating don't ever try to compress. 681 | @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. 682 | @rtype: list 683 | 684 | ''' 685 | 686 | self._statlog('set_multi') 687 | 688 | server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(iter(mapping.keys()), key_prefix) 689 | 690 | # send out all requests on each server before reading anything 691 | dead_servers = [] 692 | notstored = [] # original keys. 693 | 694 | for server in server_keys.keys(): 695 | bigcmd = bytearray() 696 | write = bigcmd.extend 697 | try: 698 | newline = b"\r\n" 699 | for key in server_keys[server]: # These are mangled keys 700 | store_info = self._val_to_store_info(mapping[prefixed_to_orig_key[key]], min_compress_len) 701 | if store_info: 702 | cmd = bytearray(("set %s %d %d %d\r\n" % (key, store_info[0], time, store_info[1])).encode('utf-8')) 703 | # now write to bigcmd: cmd + val + newline 704 | write(cmd) 705 | write(store_info[2]) 706 | write(newline) 707 | else: 708 | notstored.append(prefixed_to_orig_key[key]) 709 | server.send_cmds(bytes(bigcmd)) 710 | except socket.error as msg: 711 | if isinstance(msg, tuple): msg = msg[1] 712 | server.mark_dead(msg) 713 | dead_servers.append(server) 714 | 715 | # if any servers died on the way, don't expect them to respond. 716 | for server in dead_servers: 717 | del server_keys[server] 718 | 719 | # short-circuit if there are no servers, just return all keys 720 | if not server_keys: return(list(mapping.keys())) 721 | 722 | for server, keys in server_keys.items(): 723 | try: 724 | for key in keys: 725 | line = server.readline() 726 | if line == b'STORED': 727 | continue 728 | else: 729 | notstored.append(prefixed_to_orig_key[key]) #un-mangle. 730 | except (_Error, socket.error) as msg: 731 | if isinstance(msg, tuple): msg = msg[1] 732 | server.mark_dead(msg) 733 | return notstored 734 | 735 | def _val_to_store_info(self, val, min_compress_len): 736 | """ 737 | Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself. 738 | """ 739 | flags = 0 740 | 741 | # check types exactly rather than using isinstance, or subclasses 742 | # will be deserialized into instances of the parent class 743 | # (most blatantly, bool --> int) 744 | if type(val) == str: 745 | val = val.encode('utf-8') 746 | elif type(val) == int: 747 | flags |= Client._FLAG_INTEGER 748 | val = str(val).encode('ascii') 749 | # force no attempt to compress this silly string. 750 | min_compress_len = 0 751 | else: 752 | flags |= Client._FLAG_PICKLE 753 | file = BytesIO() 754 | if self.picklerIsKeyword: 755 | pickler = self.pickler(file, protocol = self.pickleProtocol) 756 | else: 757 | pickler = self.pickler(file, self.pickleProtocol) 758 | if self.persistent_id: 759 | pickler.persistent_id = self.persistent_id 760 | pickler.dump(val) 761 | val = file.getvalue() 762 | 763 | lv = len(val) 764 | # We should try to compress if min_compress_len > 0 and we could 765 | # import zlib and this string is longer than our min threshold. 766 | if min_compress_len and _supports_compress and lv > min_compress_len: 767 | comp_val = compress(val) 768 | # Only retain the result if the compression result is smaller 769 | # than the original. 770 | if len(comp_val) < lv: 771 | flags |= Client._FLAG_COMPRESSED 772 | val = comp_val 773 | 774 | # silently do not store if value length exceeds maximum 775 | if self.server_max_value_length != 0 and \ 776 | len(val) > self.server_max_value_length: return(0) 777 | 778 | return (flags, len(val), val) 779 | 780 | def _cmd_builder(self, cmd, key, time, store_info): 781 | '''A utility method to build platform specific fullcmd, mainly due 782 | to pickle return value type. 783 | ''' 784 | if cmd == 'cas': 785 | c = "cas %s %d %d %d %d\r\n" % ( 786 | key, store_info[0], time, store_info[1], self.cas_ids[key]) 787 | else: 788 | c = "%s %s %d %d %d\r\n" % ( 789 | cmd, key, store_info[0], time, store_info[1]) 790 | if isinstance(store_info[2], bytes): 791 | return c.encode('utf-8') + store_info[2] 792 | else: 793 | raise _Error("_cmd_builder: unknown data type (%s)" % 794 | type(store_info[2])) 795 | 796 | def _set(self, cmd, key, val, time, min_compress_len = 0): 797 | if self.do_check_key: 798 | self.check_key(key) 799 | server, key = self._get_server(key) 800 | if not server: 801 | return 0 802 | 803 | def _unsafe_set(): 804 | self._statlog(cmd) 805 | 806 | store_info = self._val_to_store_info(val, min_compress_len) 807 | if not store_info: return(0) 808 | 809 | if cmd == 'cas': 810 | if key not in self.cas_ids: 811 | return self._set('set', key, val, time, min_compress_len) 812 | fullcmd = self._cmd_builder(cmd, key, time, store_info) 813 | else: 814 | fullcmd = self._cmd_builder(cmd, key, time, store_info) 815 | try: 816 | server.send_cmd(fullcmd) 817 | return(server.expect(b"STORED", raise_exception=True) 818 | == b"STORED") 819 | except socket.error as msg: 820 | if isinstance(msg, tuple): msg = msg[1] 821 | server.mark_dead(msg) 822 | return 0 823 | 824 | try: 825 | return _unsafe_set() 826 | except _ConnectionDeadError: 827 | # retry once 828 | try: 829 | if server._get_socket(): 830 | return _unsafe_set() 831 | except (_ConnectionDeadError, socket.error) as msg: 832 | server.mark_dead(msg) 833 | return 0 834 | 835 | def _get(self, cmd, key): 836 | if self.do_check_key: 837 | self.check_key(key) 838 | server, key = self._get_server(key) 839 | if not server: 840 | return None 841 | 842 | def _unsafe_get(): 843 | self._statlog(cmd) 844 | 845 | try: 846 | server.send_cmd("{0} {1}".format(cmd, key).encode("utf-8")) 847 | rkey = flags = rlen = cas_id = None 848 | 849 | if cmd == 'gets': 850 | rkey, flags, rlen, cas_id, = self._expect_cas_value(server, 851 | raise_exception=True) 852 | if rkey and self.cache_cas: 853 | self.cas_ids[rkey] = cas_id 854 | else: 855 | rkey, flags, rlen, = self._expectvalue(server, 856 | raise_exception=True) 857 | 858 | if not rkey: 859 | return None 860 | try: 861 | value = self._recv_value(server, flags, rlen) 862 | finally: 863 | server.expect(b"END", raise_exception=True) 864 | except (_Error, socket.error) as msg: 865 | if isinstance(msg, tuple): msg = msg[1] 866 | server.mark_dead(msg) 867 | return None 868 | 869 | return value 870 | 871 | try: 872 | return _unsafe_get() 873 | except _ConnectionDeadError: 874 | # retry once 875 | try: 876 | if server.connect(): 877 | return _unsafe_get() 878 | return None 879 | except (_ConnectionDeadError, socket.error) as msg: 880 | server.mark_dead(msg) 881 | return None 882 | 883 | def get(self, key): 884 | '''Retrieves a key from the memcache. 885 | 886 | @return: The value or None. 887 | ''' 888 | return self._get('get', key) 889 | 890 | def gets(self, key): 891 | '''Retrieves a key from the memcache. Used in conjunction with 'cas'. 892 | 893 | @return: The value or None. 894 | ''' 895 | return self._get('gets', key) 896 | 897 | def get_multi(self, keys, key_prefix=''): 898 | ''' 899 | Retrieves multiple keys from the memcache doing just one query. 900 | 901 | >>> success = mc.set("foo", "bar") 902 | >>> success = mc.set("baz", 42) 903 | >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42} 904 | 1 905 | >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 906 | 1 907 | 908 | This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. 909 | >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 910 | 1 911 | 912 | get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. 913 | They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. 914 | In this mode, the key_prefix could be a table name, and the key itself a db primary key number. 915 | 916 | >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == [] 917 | 1 918 | >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'} 919 | 1 920 | 921 | This method is recommended over regular L{get} as it lowers the number of 922 | total packets flying around your network, reducing total latency, since 923 | your app doesn't have to wait for each round-trip of L{get} before sending 924 | the next one. 925 | 926 | See also L{set_multi}. 927 | 928 | @param keys: An array of keys. 929 | @param key_prefix: A string to prefix each key when we communicate with memcache. 930 | Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. 931 | @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the returned dictionary will not have it present. 932 | 933 | ''' 934 | 935 | self._statlog('get_multi') 936 | 937 | server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) 938 | 939 | # send out all requests on each server before reading anything 940 | dead_servers = [] 941 | for server in server_keys.keys(): 942 | try: 943 | server.send_cmd("get %s" % " ".join(server_keys[server])) 944 | except socket.error as msg: 945 | if isinstance(msg, tuple): msg = msg[1] 946 | server.mark_dead(msg) 947 | dead_servers.append(server) 948 | 949 | # if any servers died on the way, don't expect them to respond. 950 | for server in dead_servers: 951 | del server_keys[server] 952 | 953 | retvals = {} 954 | 955 | for server in server_keys.keys(): 956 | try: 957 | line = server.readline() 958 | while line and line != b'END': 959 | rkey, flags, rlen = self._expectvalue(server, line) 960 | # Bo Yang reports that this can sometimes be None 961 | if rkey is not None: 962 | if isinstance(rkey,bytes): 963 | rkey = rkey.decode() 964 | val = self._recv_value(server, flags, rlen) 965 | retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key. 966 | line = server.readline() 967 | except (_Error, socket.error) as msg: 968 | if isinstance(msg, tuple): msg = msg[1] 969 | server.mark_dead(msg) 970 | return retvals 971 | 972 | def _expect_cas_value(self, server, line=None, raise_exception=False): 973 | if not line: 974 | line = server.readline(raise_exception) 975 | 976 | if line and line[:5] == b'VALUE': 977 | resp, rkey, flags, len, cas_id = line.split() 978 | return (rkey, int(flags), int(len), int(cas_id)) 979 | else: 980 | return (None, None, None, None) 981 | 982 | def _expectvalue(self, server, line=None, raise_exception=False): 983 | if not line: 984 | line = server.readline(raise_exception) 985 | 986 | if line and line[:5] == b'VALUE': 987 | resp, rkey, flags, len = line.split() 988 | flags = int(flags) 989 | rlen = int(len) 990 | return (rkey, flags, rlen) 991 | else: 992 | return (None, None, None) 993 | 994 | def _recv_value(self, server, flags, rlen): 995 | rlen += 2 # include \r\n 996 | buf = server.recv(rlen) 997 | if len(buf) != rlen: 998 | raise _Error("received %d bytes when expecting %d" % (len(buf), rlen)) 999 | 1000 | if len(buf) == rlen: 1001 | buf = buf[:-2] # strip \r\n 1002 | 1003 | if flags & Client._FLAG_COMPRESSED: 1004 | buf = decompress(buf) 1005 | 1006 | if flags == 0 or flags == Client._FLAG_COMPRESSED: 1007 | # Either a bare string or a compressed string now decompressed... 1008 | val = buf.decode('utf-8') 1009 | elif flags & Client._FLAG_INTEGER: 1010 | val = int(buf) 1011 | elif flags & Client._FLAG_LONG: 1012 | val = int(buf) 1013 | elif flags & Client._FLAG_PICKLE: 1014 | try: 1015 | file = BytesIO(buf) 1016 | unpickler = self.unpickler(file) 1017 | if self.persistent_load: 1018 | unpickler.persistent_load = self.persistent_load 1019 | val = unpickler.load() 1020 | except Exception as e: 1021 | self.debuglog('Pickle error: %s\n' % e) 1022 | val = None 1023 | else: 1024 | self.debuglog("unknown flags on get: %x\n" % flags) 1025 | 1026 | return val 1027 | 1028 | def check_key(self, key, key_extra_len=0): 1029 | """Checks sanity of key. Fails if: 1030 | Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). 1031 | Contains control characters (Raises MemcachedKeyCharacterError). 1032 | Is not a string (Raises MemcachedKeyError) 1033 | Is None (Raises MemcachedKeyError) 1034 | """ 1035 | if isinstance(key, tuple): key = key[1] 1036 | if not key: 1037 | raise Client.MemcachedKeyNoneError("Key is None") 1038 | if not isinstance(key, str): 1039 | raise Client.MemcachedKeyTypeError("Key must be str()'s") 1040 | 1041 | if isinstance(key, bytes): 1042 | keylen = len(key) 1043 | else: 1044 | keylen = len(key.encode("utf-8")) 1045 | 1046 | if self.server_max_key_length != 0 and \ 1047 | keylen + key_extra_len > self.server_max_key_length: 1048 | raise Client.MemcachedKeyLengthError("Key length is > %s" 1049 | % self.server_max_key_length) 1050 | after_translate = key.translate(key.maketrans('', '', invalid_key_characters)) 1051 | if len(key) != len(after_translate): 1052 | raise Client.MemcachedKeyCharacterError( 1053 | "Control characters not allowed") 1054 | 1055 | 1056 | class _Host(object): 1057 | 1058 | def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY, 1059 | socket_timeout=_SOCKET_TIMEOUT, flush_on_reconnect=0): 1060 | self.dead_retry = dead_retry 1061 | self.socket_timeout = socket_timeout 1062 | self.debug = debug 1063 | self.flush_on_reconnect = flush_on_reconnect 1064 | if isinstance(host, tuple): 1065 | host, self.weight = host 1066 | else: 1067 | self.weight = 1 1068 | 1069 | # parse the connection string 1070 | m = re.match(r'^(?Punix):(?P.*)$', host) 1071 | if not m: 1072 | m = re.match(r'^(?Pinet6):' 1073 | r'\[(?P[^\[\]]+)\](:(?P[0-9]+))?$', host) 1074 | if not m: 1075 | m = re.match(r'^(?Pinet):' 1076 | r'(?P[^:]+)(:(?P[0-9]+))?$', host) 1077 | if not m: m = re.match(r'^(?P[^:]+)(:(?P[0-9]+))?$', host) 1078 | if not m: 1079 | raise ValueError('Unable to parse connection string: "%s"' % host) 1080 | 1081 | hostData = m.groupdict() 1082 | if hostData.get('proto') == 'unix': 1083 | self.family = socket.AF_UNIX 1084 | self.address = hostData['path'] 1085 | elif hostData.get('proto') == 'inet6': 1086 | self.family = socket.AF_INET6 1087 | self.ip = hostData['host'] 1088 | self.port = int(hostData.get('port') or 11211) 1089 | self.address = ( self.ip, self.port ) 1090 | else: 1091 | self.family = socket.AF_INET 1092 | self.ip = hostData['host'] 1093 | self.port = int(hostData.get('port') or 11211) 1094 | self.address = ( self.ip, self.port ) 1095 | 1096 | self.deaduntil = 0 1097 | self.socket = None 1098 | self.flush_on_next_connect = 0 1099 | 1100 | self.buffer = b'' 1101 | 1102 | def debuglog(self, str): 1103 | if self.debug: 1104 | sys.stderr.write("MemCached: %s\n" % str) 1105 | 1106 | def _check_dead(self): 1107 | if self.deaduntil and self.deaduntil > time.time(): 1108 | return 1 1109 | self.deaduntil = 0 1110 | return 0 1111 | 1112 | def connect(self): 1113 | if self._get_socket(): 1114 | return 1 1115 | return 0 1116 | 1117 | def mark_dead(self, reason): 1118 | self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason)) 1119 | self.deaduntil = time.time() + self.dead_retry 1120 | if self.flush_on_reconnect: 1121 | self.flush_on_next_connect = 1 1122 | self.close_socket() 1123 | 1124 | def _get_socket(self): 1125 | if self._check_dead(): 1126 | return None 1127 | if self.socket: 1128 | return self.socket 1129 | s = socket.socket(self.family, socket.SOCK_STREAM) 1130 | 1131 | if hasattr(s, 'settimeout'): s.settimeout(self.socket_timeout) 1132 | try: 1133 | s.connect(self.address) 1134 | except socket.timeout as msg: 1135 | self.mark_dead("connect: %s" % msg) 1136 | return None 1137 | except socket.error as msg: 1138 | if isinstance(msg, tuple): msg = msg[1] 1139 | self.mark_dead("connect: %s" % msg) 1140 | return None 1141 | self.socket = s 1142 | self.buffer = b'' 1143 | if self.flush_on_next_connect: 1144 | self.flush() 1145 | self.flush_on_next_connect = 0 1146 | return s 1147 | 1148 | def close_socket(self): 1149 | if self.socket: 1150 | self.socket.close() 1151 | self.socket = None 1152 | 1153 | def send_cmd(self, cmd): 1154 | if not isinstance(cmd, bytes): 1155 | self.socket.sendall((cmd + '\r\n').encode('ascii')) 1156 | else: 1157 | self.socket.sendall(cmd + b'\r\n') 1158 | 1159 | 1160 | def send_cmds(self, cmds): 1161 | """ cmds already has trailing \r\n's applied """ 1162 | if not isinstance(cmds, bytes): 1163 | self.socket.sendall(cmds.encode('ascii')) 1164 | else: 1165 | self.socket.sendall(cmds) 1166 | 1167 | def readline(self, raise_exception=False): 1168 | """Read a line and return it. If "raise_exception" is set, 1169 | raise _ConnectionDeadError if the read fails, otherwise return 1170 | an empty string. 1171 | """ 1172 | buf = self.buffer 1173 | recv = self.socket.recv 1174 | while True: 1175 | index = buf.find(b'\r\n') 1176 | if index >= 0: 1177 | break 1178 | data = recv(4096) 1179 | if not data: 1180 | # connection close, let's kill it and raise 1181 | self.close_socket() 1182 | if raise_exception: 1183 | raise _ConnectionDeadError() 1184 | else: 1185 | return b'' 1186 | 1187 | buf += data 1188 | self.buffer = buf[index+2:] 1189 | return buf[:index] 1190 | 1191 | def expect(self, text, raise_exception=False): 1192 | line = self.readline(raise_exception) 1193 | if line != text: 1194 | self.debuglog("while expecting '%s', got unexpected response '%s'" 1195 | % (text, line)) 1196 | return line 1197 | 1198 | def recv(self, rlen): 1199 | self_socket_recv = self.socket.recv 1200 | buf = self.buffer 1201 | while len(buf) < rlen: 1202 | foo = self_socket_recv(max(rlen - len(buf), 4096)) 1203 | buf += foo 1204 | if not foo: 1205 | raise _Error( 'Read %d bytes, expecting %d, ' 1206 | 'read returned 0 length bytes' % ( len(buf), rlen )) 1207 | self.buffer = buf[rlen:] 1208 | return buf[:rlen] 1209 | 1210 | def flush(self): 1211 | self.send_cmd(b'flush_all') 1212 | self.expect(b'OK') 1213 | 1214 | def __str__(self): 1215 | d = '' 1216 | if self.deaduntil: 1217 | d = " (dead until %d)" % self.deaduntil 1218 | 1219 | if self.family == socket.AF_INET: 1220 | return "inet:%s:%d%s" % (self.address[0], self.address[1], d) 1221 | elif self.family == socket.AF_INET6: 1222 | return "inet6:[%s]:%d%s" % (self.address[0], self.address[1], d) 1223 | else: 1224 | return "unix:%s%s" % (self.address, d) 1225 | 1226 | 1227 | def _doctest(): 1228 | import doctest, memcache 1229 | servers = ["127.0.0.1:11211"] 1230 | mc = Client(servers, debug=1) 1231 | globs = {"mc": mc} 1232 | return doctest.testmod(memcache, globs=globs) 1233 | 1234 | if __name__ == "__main__": 1235 | print("Testing docstrings...") 1236 | _doctest() 1237 | print("Running tests:") 1238 | print() 1239 | serverList = [["127.0.0.1:11211"]] 1240 | if '--do-unix' in sys.argv: 1241 | serverList.append([os.path.join(os.getcwd(), 'memcached.socket')]) 1242 | 1243 | for servers in serverList: 1244 | mc = Client(servers, debug=1) 1245 | 1246 | def to_s(val): 1247 | if not isinstance(val, str): 1248 | return "%s (%s)" % (val, type(val)) 1249 | return "%s" % val 1250 | def test_setget(key, val): 1251 | print("Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)), end=' ') 1252 | mc.set(key, val) 1253 | newval = mc.get(key) 1254 | if newval == val and type(newval) == type(val): 1255 | print("OK") 1256 | return 1 1257 | else: 1258 | print("FAIL") 1259 | return 0 1260 | 1261 | 1262 | class FooStruct: 1263 | def __init__(self): 1264 | self.bar = "baz" 1265 | def __str__(self): 1266 | return "A FooStruct" 1267 | def __eq__(self, other): 1268 | if isinstance(other, FooStruct): 1269 | return self.bar == other.bar 1270 | return 0 1271 | 1272 | class StrSubclass(str): 1273 | pass 1274 | 1275 | test_setget("a_string", "some random string") 1276 | test_setget("a_string_subclass", StrSubclass("L337 57R")) 1277 | test_setget("an_integer", 42) 1278 | test_setget("bool_True", True) 1279 | test_setget("bool_False", False) 1280 | if test_setget("long", int(1<<30)): 1281 | print("Testing delete ...", end=' ') 1282 | if mc.delete("long"): 1283 | print("OK") 1284 | else: 1285 | print("FAIL") 1286 | print("Testing get_multi ...", end=' ') 1287 | print(mc.get_multi(["a_string", "an_integer"])) 1288 | 1289 | print("Testing get(unknown value) ...", end=' ') 1290 | print(to_s(mc.get("unknown_value"))) 1291 | 1292 | f = FooStruct() 1293 | test_setget("foostruct", f) 1294 | 1295 | print("Testing incr ...", end=' ') 1296 | x = mc.incr("an_integer", 1) 1297 | if x == 43: 1298 | print("OK") 1299 | else: 1300 | print("FAIL") 1301 | 1302 | print("Testing decr ...", end=' ') 1303 | x = mc.decr("an_integer", 1) 1304 | if x == 42: 1305 | print("OK") 1306 | else: 1307 | print("FAIL") 1308 | 1309 | # sanity tests 1310 | print("Testing sending spaces...", end=' ') 1311 | try: 1312 | x = mc.set("this has spaces", 1) 1313 | except Client.MemcachedKeyCharacterError as msg: 1314 | print("OK") 1315 | else: 1316 | print("FAIL") 1317 | 1318 | print("Testing sending control characters...", end=' ') 1319 | try: 1320 | x = mc.set("this\x10has\x11control characters\x02", 1) 1321 | except Client.MemcachedKeyCharacterError as msg: 1322 | print("OK") 1323 | else: 1324 | print("FAIL") 1325 | 1326 | print("Testing using insanely long key...", end=' ') 1327 | try: 1328 | x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'aaaa', 1) 1329 | except Client.MemcachedKeyLengthError as msg: 1330 | print("OK") 1331 | else: 1332 | print("FAIL") 1333 | 1334 | print("Testing sending a unicode-string key...", end=' ') 1335 | try: 1336 | x = mc.set('会', 1) 1337 | except Client.MemcachedStringEncodingError as msg: 1338 | print("FAIL", end=' ') 1339 | else: 1340 | print("OK", end=' ') 1341 | import pickle 1342 | s = pickle.loads(b'V\\u4f1a\np0\n.') 1343 | print("Testing sending a key of type bytes") 1344 | try: 1345 | x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1) 1346 | except Client.MemcachedKeyTypeError: 1347 | print("OK") 1348 | else: 1349 | print("FAIL") 1350 | 1351 | print("Testing using a value larger than the memcached value limit...") 1352 | print('NOTE: "MemCached: while expecting[...]" is normal...') 1353 | x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH) 1354 | if mc.get('keyhere') == None: 1355 | print("OK", end=' ') 1356 | else: 1357 | print("FAIL", end=' ') 1358 | x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa') 1359 | if mc.get('keyhere') == None: 1360 | print("OK") 1361 | else: 1362 | print("FAIL") 1363 | 1364 | print("Testing set_multi() with no memcacheds running", end=' ') 1365 | mc.disconnect_all() 1366 | errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'}) 1367 | if errors != []: 1368 | print("FAIL") 1369 | else: 1370 | print("OK") 1371 | 1372 | print("Testing delete_multi() with no memcacheds running", end=' ') 1373 | mc.disconnect_all() 1374 | ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'}) 1375 | if ret != 1: 1376 | print("FAIL") 1377 | else: 1378 | print("OK") 1379 | 1380 | # vim: ts=4 sw=4 et : 1381 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | import memcache 5 | 6 | setup(name="python3-memcached", 7 | version=memcache.__version__, 8 | description="Pure python memcached client", 9 | long_description=open("README").read(), 10 | author="Evan Martin", 11 | author_email="martine@danga.com", 12 | maintainer="Eren Guven", 13 | maintainer_email="erenguven0@gmail.com", 14 | url="https://github.com/eguven/python3-memcached", 15 | py_modules=["memcache"], 16 | classifiers=[ 17 | "Development Status :: 5 - Production/Stable", 18 | "Intended Audience :: Developers", 19 | "License :: OSI Approved :: Python Software Foundation License", 20 | "Operating System :: OS Independent", 21 | "Programming Language :: Python", 22 | "Programming Language :: Python :: 3", 23 | "Programming Language :: Python :: 3.1", 24 | "Programming Language :: Python :: 3.2", 25 | "Programming Language :: Python :: 3.3", 26 | "Topic :: Internet", 27 | "Topic :: Software Development :: Libraries :: Python Modules", 28 | ]) 29 | 30 | --------------------------------------------------------------------------------