├── requirements.txt ├── tests ├── __init__.py ├── utils.py ├── test_setmulti.py └── test_memcache.py ├── test-requirements.txt ├── SECURITY.md ├── .gitignore ├── Makefile ├── setup.cfg ├── MANIFEST.in ├── PKG-INFO ├── .github ├── dependabot.yml └── workflows │ ├── main.yml │ └── release.yml ├── tox.ini ├── README.md ├── setup.py ├── PSF.LICENSE ├── ChangeLog └── memcache.py /requirements.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | nose 2 | coverage 3 | hacking 4 | mock 5 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | Please report any security issues to jafo00@gmail.com 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | /dist 3 | /python_memcached.egg-info 4 | .tox 5 | .coverage 6 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | python memcache.py 3 | ( cd tests; make ) 4 | 5 | clean: 6 | rm -f memcache.pyc memcache.py.orig 7 | 8 | push: 9 | bzr push lp:python-memcached 10 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_rpm] 2 | release = 1 3 | packager = Sean Reifschneider 4 | requires = python-memcached 5 | 6 | [flake8] 7 | ignore = H304,H405 8 | 9 | [wheel] 10 | universal = 1 11 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.md 2 | include *.rst 3 | include *.txt 4 | 5 | include ChangeLog 6 | include MakeFile 7 | include PSF.LICENSE 8 | 9 | global-exclude *.pyc 10 | global-exclude .gitignore 11 | global-exclude .DS_Store 12 | -------------------------------------------------------------------------------- /PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 1.0 2 | Name: python-memcached 3 | Version: 1.60 4 | Summary: A Python memcached client library. 5 | Home-page: http://www.tummy.com/Community/software/python-memcached/ 6 | Author: Sean Reifschneider 7 | Author-email: jafo00@gmail.com 8 | License: Python Software Foundation License 9 | Description: A Python memcached client library. 10 | Platform: UNKNOWN 11 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Keep GitHub Actions up to date with GitHub's Dependabot... 2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" # Group all Actions updates into a single larger pull request 12 | schedule: 13 | interval: weekly 14 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 1.6 3 | envlist = py{36,37,38,39,310,311,312},pypy,pep8 4 | skipsdist = True 5 | 6 | [testenv] 7 | usedevelop = True 8 | # Customize pip command, add -U to force updates. 9 | install_command = pip install -U {opts} {packages} 10 | deps = -r{toxinidir}/requirements.txt 11 | -r{toxinidir}/test-requirements.txt 12 | commands = 13 | nosetests {posargs} 14 | python -c 'import memcache; memcache._doctest()' 15 | 16 | [tox:jenkins] 17 | downloadcache = ~/cache/pip 18 | 19 | [testenv:pep8] 20 | commands = flake8 21 | 22 | [testenv:cover] 23 | commands = nosetests --with-coverage {posargs} 24 | 25 | [flake8] 26 | exclude = .venv*,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*.egg,.update-venv,build 27 | max-line-length = 119 28 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | import sys 3 | 4 | from io import StringIO 5 | 6 | 7 | @contextmanager 8 | def captured_output(stream_name): 9 | """Return a context manager used by captured_stdout/stdin/stderr 10 | that temporarily replaces the sys stream *stream_name* with a StringIO. 11 | 12 | This function and the following ``captured_std*`` are copied 13 | from CPython's ``test.support`` module. 14 | """ 15 | orig_stdout = getattr(sys, stream_name) 16 | setattr(sys, stream_name, StringIO()) 17 | try: 18 | yield getattr(sys, stream_name) 19 | finally: 20 | setattr(sys, stream_name, orig_stdout) 21 | 22 | 23 | def captured_stderr(): 24 | """Capture the output of sys.stderr: 25 | 26 | with captured_stderr() as stderr: 27 | print('hello', file=sys.stderr) 28 | self.assertEqual(stderr.getvalue(), 'hello\n') 29 | """ 30 | return captured_output('stderr') 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | This software is a 100% Python interface to the memcached memory cache 4 | daemon. It is the client side software which allows storing values 5 | in one or more, possibly remote, memcached servers. Search google for 6 | memcached for more information. 7 | 8 | This library is stable and largely in maintenance mode. Another library that 9 | is getting more active enhancements is 10 | [pymemcache](https://pypi.org/project/pymemcache/) and they have links and a 11 | good set of comparisons between them on their page. 12 | 13 | This package was originally written by Evan Martin of Danga. Please do 14 | not contact Evan about maintenance. Sean Reifschneider of tummy.com, 15 | ltd. has taken over maintenance of it. 16 | 17 | Please report issues and submit code changes to the github repository at: 18 | 19 | https://github.com/linsomniac/python-memcached 20 | 21 | For changes prior to 2013-03-26, see the old Launchpad repository at: 22 | 23 | Historic issues: https://launchpad.net/python-memcached 24 | 25 | ## Testing 26 | 27 | Test patches locally and easily by running tox: 28 | 29 | pip install tox 30 | tox -e py312 31 | 32 | Test for style by running tox: 33 | 34 | tox -e pep8 35 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Python package 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: ["3.10", "3.11", "3.12", "3.13"] 11 | 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Set up Python ${{ matrix.python-version }} 15 | uses: actions/setup-python@v5 16 | with: 17 | python-version: ${{ matrix.python-version }} 18 | - name: Set up Memcached 19 | run: | 20 | sudo apt update 21 | sudo apt -y install memcached 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install flake8 pytest 26 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 27 | if [ -f pyproject.toml ]; then pip install .; fi 28 | - name: Lint with flake8 29 | run: | 30 | # stop the build if there are Python syntax errors or undefined names 31 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 32 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 33 | flake8 . --count --exit-zero --max-complexity=16 --max-line-length=127 --statistics 34 | - name: Run tests 35 | run: | 36 | python memcache.py 37 | pytest 38 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | # See: https://github.com/pypa/gh-action-pypi-publish 4 | 5 | # Triggered from github UI 6 | #on: ["workflow_dispatch"] 7 | on: 8 | release: 9 | types: [released] 10 | 11 | jobs: 12 | build-and-publish: 13 | runs-on: ubuntu-latest 14 | environment: release 15 | permissions: 16 | # IMPORTANT: this permission is mandatory for trusted publishing 17 | id-token: write 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v4 21 | - name: Set up Python 22 | uses: actions/setup-python@v5 23 | with: 24 | python-version: '3.x' 25 | - name: Extract version from tag 26 | run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV 27 | - name: Update version in memcache.py 28 | run: | 29 | sed -ri 's/^(\s*__version__\s*=\s*).*$/\1"'"${RELEASE_VERSION}"'"/' memcache.py 30 | - name: Update version in PKG-INFO 31 | run: | 32 | sed -ri 's/^(\sVersion:\s*).*$/\1"'"${RELEASE_VERSION}"'"/' PKG-INFO 33 | - name: Build 34 | run: pipx run build . 35 | # This requires Trusted Publishing be set up at PyPi 36 | # Go to -> Manage -> Publishing and enter this repos info 37 | # Info: [Owner] / [Repo name] / release.yml / [BLANK] 38 | - name: Publish 39 | uses: pypa/gh-action-pypi-publish@release/v1 40 | with: 41 | skip-existing: true 42 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup # noqa 4 | from setuptools.depends import get_module_constant 5 | 6 | dl_url = "https://github.com/linsomniac/python-memcached/releases/download/{0}/python-memcached-{0}.tar.gz" 7 | 8 | version = get_module_constant("memcache", "__version__") 9 | setup( 10 | name="python-memcached", 11 | version=version, 12 | description="Pure python memcached client", 13 | long_description=open("README.md").read(), 14 | long_description_content_type="text/markdown", 15 | author="Evan Martin", 16 | author_email="martine@danga.com", 17 | maintainer="Sean Reifschneider", 18 | maintainer_email="jafo00@gmail.com", 19 | url="https://github.com/linsomniac/python-memcached", 20 | download_url="https://github.com/linsomniac/python-memcached/releases/download/{0}/python-memcached-{0}.tar.gz".format( 21 | version 22 | ), # noqa 23 | py_modules=["memcache"], 24 | install_requires=open("requirements.txt").read().split(), 25 | classifiers=[ 26 | "Development Status :: 5 - Production/Stable", 27 | "Intended Audience :: Developers", 28 | "License :: OSI Approved :: Python Software Foundation License", 29 | "Operating System :: OS Independent", 30 | "Programming Language :: Python", 31 | "Topic :: Internet", 32 | "Topic :: Software Development :: Libraries :: Python Modules", 33 | "Programming Language :: Python", 34 | "Programming Language :: Python :: 3", 35 | "Programming Language :: Python :: 3.9", 36 | "Programming Language :: Python :: 3.10", 37 | "Programming Language :: Python :: 3.11", 38 | "Programming Language :: Python :: 3.12", 39 | "Programming Language :: Python :: 3.13", 40 | ], 41 | ) 42 | -------------------------------------------------------------------------------- /tests/test_setmulti.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Tests for set_multi. 4 | # 5 | # ============== 6 | # This is based on a skeleton test file, more information at: 7 | # 8 | # https://github.com/linsomniac/python-unittest-skeleton 9 | 10 | from __future__ import print_function 11 | 12 | import socket 13 | import sys 14 | import unittest 15 | 16 | from .utils import captured_stderr 17 | 18 | sys.path.append('..') 19 | import memcache # noqa: E402 20 | 21 | DEBUG = False 22 | 23 | 24 | class test_Memcached_Set_Multi(unittest.TestCase): 25 | def setUp(self): 26 | RECV_CHUNKS = [b'chunk1'] 27 | 28 | class FakeSocket(object): 29 | def __init__(self, *args): 30 | if DEBUG: 31 | print('FakeSocket{0!r}'.format(args)) 32 | self._recv_chunks = list(RECV_CHUNKS) 33 | 34 | def connect(self, *args): 35 | if DEBUG: 36 | print('FakeSocket.connect{0!r}'.format(args)) 37 | 38 | def sendall(self, *args): 39 | if DEBUG: 40 | print('FakeSocket.sendall{0!r}'.format(args)) 41 | 42 | def recv(self, *args): 43 | if self._recv_chunks: 44 | data = self._recv_chunks.pop(0) 45 | else: 46 | data = '' 47 | if DEBUG: 48 | print('FakeSocket.recv{0!r} -> {1!r}'.format(args, data)) 49 | return data 50 | 51 | def close(self): 52 | if DEBUG: 53 | print('FakeSocket.close()') 54 | 55 | self.old_socket = socket.socket 56 | socket.socket = FakeSocket 57 | 58 | self.mc = memcache.Client(['memcached'], debug=True) 59 | 60 | def tearDown(self): 61 | socket.socket = self.old_socket 62 | 63 | def test_Socket_Disconnect(self): 64 | mapping = {'foo': 'FOO', 'bar': 'BAR'} 65 | with captured_stderr() as log: 66 | bad_keys = self.mc.set_multi(mapping) 67 | self.assertIn('connection closed in readline().', log.getvalue()) 68 | self.assertEqual(sorted(bad_keys), ['bar', 'foo']) 69 | if DEBUG: 70 | print('set_multi({0!r}) -> {1!r}'.format(mapping, bad_keys)) 71 | 72 | 73 | if __name__ == '__main__': 74 | unittest.main() 75 | -------------------------------------------------------------------------------- /PSF.LICENSE: -------------------------------------------------------------------------------- 1 | PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 2 | -------------------------------------------- 3 | 4 | 1. This LICENSE AGREEMENT is between the Python Software Foundation 5 | ("PSF"), and the Individual or Organization ("Licensee") accessing and 6 | otherwise using this software ("Python") in source or binary form and 7 | its associated documentation. 8 | 9 | 2. Subject to the terms and conditions of this License Agreement, PSF hereby 10 | grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, 11 | analyze, test, perform and/or display publicly, prepare derivative works, 12 | distribute, and otherwise use Python alone or in any derivative version, 13 | provided, however, that PSF's License Agreement and PSF's notice of copyright, 14 | i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 15 | 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; 16 | All Rights Reserved" are retained in Python alone or in any derivative version 17 | prepared by Licensee. 18 | 19 | 3. In the event Licensee prepares a derivative work that is based on 20 | or incorporates Python or any part thereof, and wants to make 21 | the derivative work available to others as provided herein, then 22 | Licensee hereby agrees to include in any such work a brief summary of 23 | the changes made to Python. 24 | 25 | 4. PSF is making Python available to Licensee on an "AS IS" 26 | basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR 27 | IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND 28 | DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS 29 | FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT 30 | INFRINGE ANY THIRD PARTY RIGHTS. 31 | 32 | 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 33 | FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS 34 | A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, 35 | OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 36 | 37 | 6. This License Agreement will automatically terminate upon a material 38 | breach of its terms and conditions. 39 | 40 | 7. Nothing in this License Agreement shall be deemed to create any 41 | relationship of agency, partnership, or joint venture between PSF and 42 | Licensee. This License Agreement does not grant permission to use PSF 43 | trademarks or trade name in a trademark sense to endorse or promote 44 | products or services of Licensee, or any third party. 45 | 46 | 8. By copying, installing or otherwise using Python, Licensee 47 | agrees to be bound by the terms and conditions of this License 48 | Agreement. 49 | -------------------------------------------------------------------------------- /tests/test_memcache.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function 3 | 4 | import unittest 5 | import zlib 6 | 7 | try: 8 | import unittest.mock as mock 9 | except ImportError: 10 | import mock 11 | 12 | from memcache import Client, _Host, SERVER_MAX_KEY_LENGTH, SERVER_MAX_VALUE_LENGTH # noqa: H301 13 | from .utils import captured_stderr 14 | 15 | 16 | class FooStruct(object): 17 | 18 | def __init__(self): 19 | self.bar = "baz" 20 | 21 | def __str__(self): 22 | return "A FooStruct" 23 | 24 | def __eq__(self, other): 25 | if isinstance(other, FooStruct): 26 | return self.bar == other.bar 27 | return 0 28 | 29 | 30 | class TestMemcache(unittest.TestCase): 31 | def setUp(self): 32 | # TODO(): unix socket server stuff 33 | servers = ["127.0.0.1:11211"] 34 | self.mc = Client(servers, debug=1) 35 | 36 | def tearDown(self): 37 | self.mc.flush_all() 38 | self.mc.disconnect_all() 39 | 40 | def check_setget(self, key, val, noreply=False): 41 | self.mc.set(key, val, noreply=noreply) 42 | newval = self.mc.get(key) 43 | self.assertEqual(newval, val) 44 | 45 | def test_setget(self): 46 | self.check_setget("a_string", "some random string") 47 | self.check_setget("a_string_2", "some random string", noreply=True) 48 | self.check_setget("an_integer", 42) 49 | self.check_setget("an_integer_2", 42, noreply=True) 50 | 51 | def test_quit_all(self): 52 | self.mc.quit_all() 53 | 54 | def test_delete(self): 55 | self.check_setget("long", int(1 << 30)) 56 | result = self.mc.delete("long") 57 | self.assertEqual(result, True) 58 | self.assertEqual(self.mc.get("long"), None) 59 | result = self.mc.delete("") 60 | self.assertEqual(result, False) 61 | 62 | def test_default(self): 63 | key = "default" 64 | default = object() 65 | result = self.mc.get(key, default=default) 66 | self.assertEqual(result, default) 67 | 68 | self.mc.set("default", None) 69 | result = self.mc.get(key, default=default) 70 | self.assertIsNone(result) 71 | 72 | self.mc.set("default", 123) 73 | result = self.mc.get(key, default=default) 74 | self.assertEqual(result, 123) 75 | 76 | @mock.patch.object(_Host, 'send_cmd') 77 | @mock.patch.object(_Host, 'readline') 78 | def test_touch(self, mock_readline, mock_send_cmd): 79 | with captured_stderr(): 80 | self.mc.touch('key') 81 | mock_send_cmd.assert_called_with(b'touch key 0') 82 | 83 | def test_get_multi(self): 84 | self.check_setget("gm_a_string", "some random string") 85 | self.check_setget("gm_an_integer", 42) 86 | self.assertEqual( 87 | self.mc.get_multi(["gm_a_string", "gm_an_integer"]), 88 | {"gm_an_integer": 42, "gm_a_string": "some random string"}) 89 | 90 | def test_get_unknown_value(self): 91 | self.mc.delete("unknown_value") 92 | 93 | self.assertEqual(self.mc.get("unknown_value"), None) 94 | 95 | def test_setget_foostruct(self): 96 | f = FooStruct() 97 | self.check_setget("foostruct", f) 98 | self.check_setget("foostruct_2", f, noreply=True) 99 | 100 | def test_incr(self): 101 | self.check_setget("i_an_integer", 42) 102 | self.assertEqual(self.mc.incr("i_an_integer", 1), 43) 103 | 104 | def test_incr_noreply(self): 105 | self.check_setget("i_an_integer_2", 42) 106 | self.assertEqual(self.mc.incr("i_an_integer_2", 1, noreply=True), None) 107 | self.assertEqual(self.mc.get("i_an_integer_2"), 43) 108 | 109 | def test_decr(self): 110 | self.check_setget("i_an_integer", 42) 111 | self.assertEqual(self.mc.decr("i_an_integer", 1), 41) 112 | 113 | def test_decr_noreply(self): 114 | self.check_setget("i_an_integer_2", 42) 115 | self.assertEqual(self.mc.decr("i_an_integer_2", 1, noreply=True), None) 116 | self.assertEqual(self.mc.get("i_an_integer_2"), 41) 117 | 118 | def test_sending_spaces(self): 119 | try: 120 | self.mc.set("this has spaces", 1) 121 | except Client.MemcachedKeyCharacterError as err: 122 | self.assertTrue("characters not allowed" in err.args[0]) 123 | else: 124 | self.fail( 125 | "Expected Client.MemcachedKeyCharacterError, nothing raised") 126 | 127 | def test_sending_control_characters(self): 128 | try: 129 | self.mc.set("this\x10has\x11control characters\x02", 1) 130 | except Client.MemcachedKeyCharacterError as err: 131 | self.assertTrue("characters not allowed" in err.args[0]) 132 | else: 133 | self.fail( 134 | "Expected Client.MemcachedKeyCharacterError, nothing raised") 135 | 136 | def test_sending_key_too_long(self): 137 | try: 138 | self.mc.set('a' * SERVER_MAX_KEY_LENGTH + 'a', 1) 139 | except Client.MemcachedKeyLengthError as err: 140 | self.assertTrue("length is >" in err.args[0]) 141 | else: 142 | self.fail( 143 | "Expected Client.MemcachedKeyLengthError, nothing raised") 144 | 145 | # These should work. 146 | self.mc.set('a' * SERVER_MAX_KEY_LENGTH, 1) 147 | self.mc.set('a' * SERVER_MAX_KEY_LENGTH, 1, noreply=True) 148 | 149 | def test_setget_boolean(self): 150 | """GitHub issue #75. Set/get with boolean values.""" 151 | self.check_setget("bool", True) 152 | 153 | def test_unicode_key(self): 154 | s = u'\u4f1a' 155 | maxlen = SERVER_MAX_KEY_LENGTH // len(s.encode('utf-8')) 156 | key = s * maxlen 157 | 158 | self.mc.set(key, 5) 159 | value = self.mc.get(key) 160 | self.assertEqual(value, 5) 161 | 162 | def test_unicode_value(self): 163 | key = 'key' 164 | value = u'Iñtërnâtiônàlizætiøn2' 165 | self.mc.set(key, value) 166 | cached_value = self.mc.get(key) 167 | self.assertEqual(value, cached_value) 168 | 169 | def test_binary_string(self): 170 | value = 'value_to_be_compressed' 171 | compressed_value = zlib.compress(value.encode()) 172 | 173 | self.mc.set('binary1', compressed_value) 174 | compressed_result = self.mc.get('binary1') 175 | self.assertEqual(compressed_value, compressed_result) 176 | self.assertEqual(value, zlib.decompress(compressed_result).decode()) 177 | 178 | self.mc.add('binary1-add', compressed_value) 179 | compressed_result = self.mc.get('binary1-add') 180 | self.assertEqual(compressed_value, compressed_result) 181 | self.assertEqual(value, zlib.decompress(compressed_result).decode()) 182 | 183 | self.mc.set_multi({'binary1-set_many': compressed_value}) 184 | compressed_result = self.mc.get('binary1-set_many') 185 | self.assertEqual(compressed_value, compressed_result) 186 | self.assertEqual(value, zlib.decompress(compressed_result).decode()) 187 | 188 | def test_ignore_too_large_value(self): 189 | # NOTE: "MemCached: while expecting[...]" is normal... 190 | key = 'keyhere' 191 | 192 | value = 'a' * (SERVER_MAX_VALUE_LENGTH // 2) 193 | self.assertTrue(self.mc.set(key, value)) 194 | self.assertEqual(self.mc.get(key), value) 195 | 196 | value = 'a' * SERVER_MAX_VALUE_LENGTH 197 | with captured_stderr() as log: 198 | self.assertIs(self.mc.set(key, value), False) 199 | self.assertEqual( 200 | log.getvalue(), 201 | "MemCached: while expecting 'STORED', got unexpected response " 202 | "'SERVER_ERROR object too large for cache'\n" 203 | ) 204 | # This test fails if the -I option is used on the memcached server 205 | self.assertTrue(self.mc.get(key) is None) 206 | 207 | def test_get_set_multi_key_prefix(self): 208 | """Testing set_multi() with no memcacheds running.""" 209 | 210 | prefix = 'pfx_' 211 | values = {'key1': 'a', 'key2': 'b'} 212 | errors = self.mc.set_multi(values, key_prefix=prefix) 213 | self.assertEqual(errors, []) 214 | 215 | keys = list(values) 216 | self.assertEqual(self.mc.get_multi(keys, key_prefix=prefix), 217 | values) 218 | 219 | def test_set_multi_dead_servers(self): 220 | """Testing set_multi() with no memcacheds running.""" 221 | 222 | self.mc.disconnect_all() 223 | with captured_stderr() as log: 224 | for server in self.mc.servers: 225 | server.mark_dead('test') 226 | self.assertIn('Marking dead.', log.getvalue()) 227 | errors = self.mc.set_multi({'key1': 'a', 'key2': 'b'}) 228 | self.assertEqual(sorted(errors), ['key1', 'key2']) 229 | 230 | def test_disconnect_all_delete_multi(self): 231 | """Testing delete_multi() with no memcacheds running.""" 232 | self.mc.disconnect_all() 233 | with captured_stderr() as output: 234 | ret = self.mc.delete_multi(('keyhere', 'keythere')) 235 | self.assertEqual(ret, 1) 236 | self.assertEqual( 237 | output.getvalue(), 238 | "MemCached: while expecting 'DELETED', got unexpected response " 239 | "'NOT_FOUND'\n" 240 | "MemCached: while expecting 'DELETED', got unexpected response " 241 | "'NOT_FOUND'\n" 242 | ) 243 | 244 | @mock.patch.object(_Host, 'send_cmd') # Don't send any commands. 245 | @mock.patch.object(_Host, 'readline') 246 | def test_touch_unexpected_reply(self, mock_readline, mock_send_cmd): 247 | """touch() logs an error upon receiving an unexpected reply.""" 248 | mock_readline.return_value = 'SET' # the unexpected reply 249 | with captured_stderr() as output: 250 | self.mc.touch('key') 251 | self.assertEqual( 252 | output.getvalue(), 253 | "MemCached: touch expected %s, got: 'SET'\n" % 'TOUCHED' 254 | ) 255 | 256 | 257 | class TestMemcacheEncoder(unittest.TestCase): 258 | def setUp(self): 259 | # TODO(): unix socket server stuff 260 | servers = ["127.0.0.1:11211"] 261 | self.mc = Client(servers, debug=1, key_encoder=self.encoder) 262 | 263 | def tearDown(self): 264 | self.mc.flush_all() 265 | self.mc.disconnect_all() 266 | 267 | def encoder(self, key): 268 | return key.lower() 269 | 270 | def check_setget(self, key, val, noreply=False): 271 | self.mc.set(key, val, noreply=noreply) 272 | newval = self.mc.get(key) 273 | self.assertEqual(newval, val) 274 | 275 | def test_setget(self): 276 | self.check_setget("a_string", "some random string") 277 | self.check_setget("A_String2", "some random string") 278 | self.check_setget("an_integer", 42) 279 | self.assertEqual("some random string", self.mc.get("A_String")) 280 | self.assertEqual("some random string", self.mc.get("a_sTRing2")) 281 | self.assertEqual(42, self.mc.get("An_Integer")) 282 | 283 | 284 | if __name__ == '__main__': 285 | unittest.main() 286 | -------------------------------------------------------------------------------- /ChangeLog: -------------------------------------------------------------------------------- 1 | Sun, 14 Jan 2024 08:49:00 -0700 Sean Reifschneider 2 | 3 | * Fix some docstring typos (Christian Clauss PR #192) 4 | 5 | * delete() return value now is 1 for "DELETED" and 0 for "NOT_FOUND" or server error. 6 | (Nick Pope PR #190) 7 | 8 | * Removed obsolete configuration for Travis CI. (Nick Pope PR #189) 9 | 10 | Thu, 28 Dec 2023 19:24:00 -0700 Sean Reifschneider 11 | 12 | * 1.61 release. 13 | 14 | * Fixing the ChangeLog entry which had a bad date. 15 | 16 | Sun, 24 Dec 2023 22:33:47 -0700 Sean Reifschneider 17 | 18 | * 1.60 release. 19 | 20 | * Allow to use a datetime.timedelta parameter for Client.set 21 | (PR from Theo Massard) 22 | 23 | * Fix cmemcahe_hash 0 values being translated to 1 (PR from Chow Loong Jin) 24 | 25 | * Fix tuple key support in multi methods (PR from Sergii Mikhtoniuk) #154 26 | 27 | * Add support for default value in get (PR from Jakub Szafrański) 28 | 29 | * test_memcache.py: try import unittest.mock (PR from Tim Orling) 30 | 31 | * Use == as suggested by Python 3.8 (PR from @za) 32 | 33 | * Decoding in slab funcs, replacing "1" with "True" in while. #175 34 | 35 | * Tell PyPI to parse README as markdown (PR from @MartinThoma) #168 36 | 37 | * test_memcache.py: try import unittest.mock (PR from @moto-timo) #172 38 | 39 | * Removed Python 2.7 from setup.cfg, adding 3.8-3.12 40 | 41 | * Add support for default value in get() (PR from @samupl) #158 42 | 43 | * Fix tuple key support in multi methods (regression in 1.55). 44 | (PR from @sergiimk) #155 45 | 46 | * Fix cmemcahe_hash 0 values being translated to 1. For example "ob" 47 | would hash to 0 and then get converted to 1. Looks like this dated 48 | back to 2009, the original code we imported for that hash function. 49 | (PR from @hyperair) #152 50 | 51 | * Removing six.PY2/3 conditionals 52 | 53 | * Removing "time" from delete and making expiry mandatory in touch. 54 | (port of @erankor's PR in #30 ) 55 | 56 | * Added quit() method. This method sends the 'quit' command to the 57 | servers and then closes the connections, reducing the number of 58 | TIME_WAIT sockets hanging around the OS. (PR from @userrl) #15 59 | 60 | * Allow to use a datetime.timedelta parameter for Client.set (PR from @tbobm) #146 61 | 62 | * Allow keys to be encoded before use. (Port of PR in #52 from @harlowja) 63 | 64 | * Removing historic python 2to3 code and deprecated socket.error (for 65 | OSError). (PR from @pgajdos) #186 66 | 67 | * Updating to the latest PSF license 68 | 69 | Fri, 15 Dec 2018 09:02:05 -0700 Sean Reifschneider 70 | 71 | * 1.59 release. 72 | 73 | * Added testing for Python 3.5 and 3.6 (PR from Tim Graham) #110, #131 74 | 75 | * Fixed typos in docstrings (PR from Romuald Brunet, reviewed by Tim 76 | Graham) #105 77 | 78 | * Removing Python 2.6, 3.2, and 3.3 testing (PR from Tim Graham) #115, #116 79 | 80 | * Removing unnecessary parens in return statements (PR from Tim Graham) 81 | #113 82 | 83 | * Remove unused _has_unicode/_str_cls vars (PR from Tim Graham) #111 84 | 85 | * Add flake8 testing and cleanups (PR from Tim Graham, cleanups from Sean 86 | Reifschneider) #112 87 | 88 | * Fixed storing non-ASCII values on Python 2 and binary values on Python 3 89 | (PR from Nicolas Noé) #135 90 | 91 | * Fixed touch(..., time=0) command (PR from Nicolas Noé) #137 92 | 93 | Fri, 27 May 2016 13:44:55 -0600 Sean Reifschneider 94 | 95 | * 1.58 release. 96 | 97 | * Fixing a performance regression in v1.54 in Python 2, using cPickle again. 98 | Patch by edmorley #86 99 | 100 | * Support for "stats slabs". 101 | Patch by grg350 #93 102 | 103 | * get_stats encoding fix. 104 | Patch by bartTC #91 105 | 106 | * Pin Six version to >= 1.4 107 | Patch by pipermirriam #81 108 | 109 | * setup.py build process pulls version from memcached.py 110 | Patch by dieselmachine #72 111 | 112 | * delete() and delete_multi() now default the "time" argument to None, 113 | since the protocol doesn't allow a time in some implementations. 114 | Patch by oremj #27 115 | 116 | Fri, 31 Jul 2015 11:38:25 -0600 Sean Reifschneider 117 | 118 | * 1.57 release. 119 | 120 | * Fix for #75 and #76, mc.set(key, False) results in "ValueError: 121 | invalid literal for int()". Reported by Carlos Sanchez on github. 122 | 123 | Sun, 26 Jul 2015 14:44:20 -0600 Sean Reifschneider 124 | 125 | * 1.56 release. 126 | 127 | * More patches for python3 from Victor Stinner #67. Thanks! 128 | 129 | * Tests moved out into their own unittest testcases. 130 | Patch by Jeremy Thurgood. 131 | 132 | * Adding support for pluggable compressors. Patch by cactus on github. 133 | 134 | * Adding support for "noreply". Patch by cactus on github. 135 | 136 | * Allowing empty key in _multi() calls. Suggested by sergio97 on github. 137 | 138 | Sun, 21 Sep 2014 13:41:30 -0600 Sean Reifschneider 139 | 140 | * 1.54 release. 141 | 142 | * Numerous patches to attempt to get Python 3 working, but 143 | tests are still failing under Python 3, so this code is not ready 144 | yet. 145 | 146 | * MemcachedKeyCharacterError better describes key problem. 147 | Suggested by Roy Smith 148 | 149 | * Added touch(), patch by erankor on github. 150 | 151 | * Allow empty server list, patch by zewt on github. 152 | 153 | * If unknown flags in get request, raises a value error rather than the 154 | code bombing out. 155 | 156 | * Setting the SERVER_MAX_* values after import did not work, despite 157 | being documented to. Reported by alexf101 on github. 158 | 159 | Sun, 07 Jun 2013 11:12:18 -0600 Sean Reifschneider 160 | 161 | * 1.53 release. 162 | 163 | * Fixing set_multi() so that if the server closes the connection 164 | it will no longer raise AttributeError. Issue found and resolution 165 | reviewed by Ben Hoyt. 166 | 167 | * readline() now will mark the connection dead if the read fails. 168 | It was just closing it before. This is related to the set_multi() 169 | change but worth noting separately. Thanks to Ben Hoyt. 170 | 171 | * Discussion of the above: 172 | https://github.com/linsomniac/python-memcached/commit/b7054a964aed0e6d86e853e60aab09cd0183b9f6#commitcomment-3337557 173 | 174 | Sun, 02 Jun 2013 01:08:26 -0600 Sean Reifschneider 175 | 176 | * 1.52 release. 177 | 178 | * Changing check_keys to use re.match() instead of str.translate(), 179 | because re.match() works with Python < 2.6. 180 | Found by Giovanni Di Milia. 181 | 182 | Mon, 06 May 2013 15:29:07 -0600 Sean Reifschneider 183 | 184 | * 1.51 release. 185 | 186 | * Add a MANIFEST.in file, patch by Daniel Widerin. 187 | 188 | Mon, 06 May 2013 07:20:21 -0600 Sean Reifschneider 189 | 190 | * 1.50 release. 191 | 192 | * Client() now takes a "check_keys" option, which defaults to True. 193 | If False, it disables the checking of keys to ensure they have 194 | acceptable size and are composed of non-control characters. 195 | Suggested by Ben Hoyt. 196 | 197 | * Converting control character checking of keys based on performance 198 | testing of alternatives by Ben Hoyt. 199 | 200 | * Converted unicode tests from using u'', patch from Eren Güve. 201 | 202 | * Included license file (pull request by "Philippe" pombredanne). 203 | 204 | * Doing a "set" after server goes away, raised AttributeError: 205 | 'NoneType' object has no attribute 'sendall'. Patch by Ken Lalonde 206 | 207 | * incr/decr return None instead of 0 on server connection failure. 208 | Suggested by Ivan Virabyan 209 | 210 | * Supports IPv6 connections using: "inet6:[fd00::32:19f7]:11000". 211 | Patch by Romain Courteaud 212 | 213 | * Switching over to github for this project: 214 | 215 | https://github.com/linsomniac 216 | 217 | * Bug #974632: _ConnectionDeadError sometimes was escaping the get/set 218 | code. This should return to readline() not raising an exception, except 219 | in the case that it's called from the get/set(). Report from Gary 220 | Poster, proposed patch by Brad Crittenden. 221 | 222 | Misc fixes by Brad Crittenden: fixing a docstring, if "port" is set to 223 | any false-like value it will default to 11211. 224 | 225 | Mon, 29 Nov 2011 12:37:32 -0700 Sean Reifschneider 226 | 227 | * Bug #887765: Interrupted connection to memcache server can cause 228 | inconsistencies. 229 | Added "flush_on_reconnect" (defaults to off) to Client() which will 230 | cause a client that has lost connection to a server and then reconnects 231 | to flush the cache on the reconnect so that it doesn't get old values 232 | from that server. Patch by Daniel Benamy. 233 | 234 | Sun, 27 Nov 2011 18:15:32 -0700 Sean Reifschneider 235 | 236 | * Bug #745633: Values of maximum size are not stored 237 | API inconsistency, max value length was tested for <= while max KEY 238 | length was <. So I picked that keys and values *LONGER* than the 239 | specified max value are what is used, and added documentation and tests 240 | to that effect. The test for max value tested that length plus 4, so 241 | I've changed that to be that value plus 1. Issue found by matt-quru. 242 | 243 | * Bug #713488: Issues Invalid "delete" command. 244 | Protocol has changed so that the "delete" operation no longer takes a 245 | "time" argument. It seems that some servers will refuse a "delete key 246 | 0" while others will accept it, but the official server will NOT accept 247 | "delete key 1". So I've changed it so that if no "time" argument is 248 | specified, no time argument is sent to the server. 249 | 250 | * Bug #713451: server.expect("END") needs to be in a finally block 251 | Expect an "END" when the _recv_value() raises an exception. 252 | Patch by Jay Farrimond. 253 | 254 | * Bug: #741090: cas cache can grow unbounded. Default now is that the 255 | cache is not used, unless the "Client()" object is created with 256 | "cache_cas=True". In that case, you need to have your own cas clearing 257 | code, a simple one would be to use Client().reset_cas() to completely 258 | clear the cas_ids cache. Problem pointed out by Shaun Cutts. 259 | 260 | * Bug #728359: Make python-memcache work on memcache restarts. 261 | Patch by Tarek Ziade', reviewed and further patches submitted by Hugo 262 | Beauze'e-Luysse and Neganov Alexandr. 263 | 264 | * Bug #798342: If memcached server sends unknown flag in response for 265 | "get", results in: 266 | "UnboundLocalError: local variable 'val' referenced before assignment" 267 | Now returns "None" instead. Patch by Sharoon Thomas 268 | 269 | Mon, 20 Dec 2010 19:14:17 -0700 Sean Reifschneider 270 | 271 | * Bug #680359: useOldServerHashFunction() is broken. It now correctly 272 | switches back to the old memcache hash function. 273 | 274 | Thu, 16 Dec 2010 02:07:40 -0700 Sean Reifschneider 275 | 276 | * Bug #471727: Changed the delete() code to explicitly check for both 277 | NOT_FOUND and DELETED as the responses and return successful for both. 278 | It also logs an error if one of these two responses is not found. 279 | Also added a test to ensure that delete() works. 280 | 281 | * When using set_multi and one value is too big, traceback 282 | TypeError: 'int' object is unsubscriptable 283 | Patch by Orjan Persson 284 | 285 | * Fixing Bug #529855: Server host can now be bare host without ":". 286 | Fix proposed by Roger Binns. 287 | 288 | * Fixing Bug #491164: Typo fix, "compession" -> "compRession". 289 | 290 | * Fixing Bug #509712: "TypeError: 'NoneType' object is unsubscriptable" 291 | Also fixed some other similar code to not have issues with that. 292 | 293 | * Also related to 509712 and 628339: readline() now returns '' instead 294 | of None when a server dies. This should be safer. Patch suggested by 295 | Denis Otkidach. 296 | 297 | * Fixing Bug #628339: Read from server sometimes fails. Patch by Jeremy 298 | Cowles. 299 | 300 | * Fixing Bug #633553: Add stat arguments support to get_stats(). Patch 301 | by Ryan Lane. 302 | 303 | * Changing the license to the PSF License. 304 | 305 | * Removing Evan's e-mail address at his request, changing authorship to 306 | Sean. 307 | 308 | Sat, 28 Nov 2009 01:07:42 -0700 Sean Reifschneider 309 | 310 | * Version 1.45 311 | 312 | * Per-connection max server key length. Patch by Nicolas Delaby 313 | 314 | * Patches to make memcached more garbage-collectable. Removes 315 | "debugfunc" argument from _Host objects and changed to "debug" 316 | boolean. Patches by John McFarlane and Aryeh Katz. 317 | 318 | * Switching to a cmemcache compatible hash function. Implemented by 319 | André Cru and Ludvig Ericson. To switch back to the old style, use: 320 | 321 | memcached.useOldServerHashFunction() 322 | 323 | * Rejecting keys that have spaces in them. Patch by Etienne Posthumus. 324 | 325 | * Fixing exception raising syntax. Patch by Samuel Stauffer. 326 | 327 | * Optimizations in read code. Patch by Samuel Stauffer. 328 | 329 | * Changing classes to be newstyle. Patch by Samuel Stauffer. 330 | 331 | * Changed "has_key" to "in". Patch by Samuel Stauffer. 332 | 333 | * incr/decr were raising ValueError if the key did not exist, the 334 | docstring said it returned none. Patch by Chihiro Sakatoku. 335 | 336 | * Adding cas method, submitted by Ben Gutierrez. 337 | 338 | * Fix in the docstring for how to use the "set" method. Found and fixed 339 | by William McVey 340 | 341 | Thu, 02 Apr 2009 13:37:49 -0600 Sean Reifschneider 342 | 343 | * Version 1.44 344 | 345 | * Allowing spaces in the key. (Patch provided by xmm on Launchpad) 346 | 347 | * Detecting when the pickler needs a positional argument. (Patch 348 | provided by Brad Clements on Launchpad) 349 | 350 | * Moving length check after the compression. (Patch provided by user 351 | Tom on Launchpad) 352 | 353 | * Fixing arguments passed to the _Error if invalid read length. 354 | 355 | * Fixing the representation of domain sockets. (Patch provided by user 356 | MTB on Launchpad) 357 | 358 | * Changing a typo of dead_until. (Patch provided by Shane R. Spencer) 359 | 360 | * Providing better error messages (patch provided by Johan Euphrosine). 361 | 362 | * Adding get_slabs() function to get stats. (Patch provided 363 | by Nick Verbeck) 364 | 365 | Sun, 01 Jun 2008 15:05:11 -0600 Sean Reifschneider 366 | 367 | * Version 1.43 368 | 369 | * eliott reported a bug in the 1.42 related to the socket timeout code 370 | causing a traceback due to the timeout value not being set. 371 | 372 | Sat, 31 May 2008 02:09:17 -0600 Sean Reifschneider 373 | 374 | * Version 1.42 375 | 376 | * Paul Hummer set up a Launchpad project which I'm going to start using 377 | to track patches and allow users to set up their own bzr branches and 378 | manage merging in the upstream patches with their own. 379 | 380 | https://launchpad.net/python-memcached 381 | 382 | * Patch from Jehiah Czebotar which does: Changing the calls to 383 | mark_dead() to make them dereference tuples, reducing timeout on 384 | sockets to 3 seconds, settable via setting Host._SOCKET_TIMEOUT. 385 | 386 | * Patches from Steve Schwarz for set_multi() to return the full set of 387 | keys if all servers are down. Previously would not report any keys. 388 | 389 | * Fix from Steve Schwarz delete_multi() argument "seconds" not being 390 | correctly handled. Changed it to "time" to match all other calls. 391 | 392 | * Patch from Peter Wilkinson to support using unix domain sockets. 393 | He reports that tests succeed with with memcached daemons running, 394 | the normal and a domain socket started via 395 | "memcached -s memcached.socket". I massaged it quite a bit. 396 | 397 | To use domain sockets, use a connect string of "unix:/path/to/socket" 398 | Note however that if you are using a host name of "unix", it will now 399 | detect "unix:11211" as being a domain socket with the name "11211". 400 | In this case, please use "inet:unix:11211". 401 | 402 | Because of this, it is now preferred to use a connect string prefix 403 | of "inet:" or "unix:". 404 | 405 | Tue, 29 Apr 2008 21:03:53 -0600 Sean Reifschneider 406 | 407 | * Version 1.41 408 | 409 | * Patch from Jehiah Czebotar to catch an additional server disconnect 410 | situation. 411 | 412 | * Patch from Andrey Petrov to add the "append" and "replace" commands. 413 | 414 | Tue, 18 Sep 2007 20:52:09 -0600 Sean Reifschneider 415 | 416 | * Version 1.40 417 | 418 | * Updated setup.py file that uses distutils provided by Kai Lautaportti. 419 | 420 | * Prevent keys from containing ASCII character 127 as well, patch provided 421 | by Philip Neustrom. 422 | 423 | * Added ability to overload the persistent_load/id, patch provided by 424 | Steve Schwarz. 425 | 426 | * Fixed ability to pass (server_hash,key) in place of key in Client.set() 427 | Reported by Alexander Klyuev. 428 | 429 | Tue, 14 Aug 2007 14:43:27 -0600 Sean Reifschneider 430 | 431 | * Version 1.39 432 | 433 | * Michael Krause reports the previous version doesn't work for 434 | _val_to_store_info() calls because it's defined as a staticmethod. 435 | Removing staticmethod decorator. Also confirmed by Kai Lautaportti, 436 | with suggested fix of removing staticmethod. 437 | 438 | Fri, 10 Aug 2007 17:50:13 -0600 Sean Reifschneider 439 | 440 | * Version 1.38 441 | 442 | * Matt McClanahan submitted a patch that allow add() to have a 443 | min_compress_len argument. 444 | 445 | * Steve Schwarz submitted a patch allowing user-defined picklers. 446 | 447 | * Michael Krause suggested checking the return value to prevent an 448 | exception from being raised in _set() when a value is too large to be 449 | stored. 450 | 451 | Fri, 27 Jul 2007 01:55:48 -0600 Sean Reifschneider 452 | 453 | * Version 1.37 454 | 455 | * Fixing call from add() to _set() with parameter for min_compress_len. 456 | Reported by Jeff Fisher. 457 | 458 | Thu, 07 Jun 2007 04:10:31 -0600 Sean Reifschneider 459 | 460 | * Version 1.36 461 | 462 | * Patch by Dave St.Germain to make the Client() class sub-class 463 | threadlocal to help with multi-threading issues. Only available in 464 | Python 2.4 and above. 465 | 466 | * Patch by James Robinson with: 467 | 1) new set_multi method. 468 | 2) factored out determining the flags, length, and value to store 469 | from set() into method _val_to_store_info() for use by both set() 470 | and set_multi(). 471 | 3) send_cmds() method on host which doesn't apply the trailing '\r\n' 472 | for use by set_multi. 473 | 4) check_key() extended a bit to allow for testing the prefix passed 474 | to set_multi just once, not once per each key. 475 | 5) Patch also enables support for auto compression in set, set_multi, 476 | and replace. 477 | 478 | * Suggestion by Helge Tesdal, fixes in check_key for non-string keys. 479 | 480 | * NOTE: On a farm of clients with multiple servers, all clients will 481 | need to be upgraded to this version. The next patch changes the 482 | server hash. 483 | 484 | * Philip Neustrom supplied a patch to change the server hash function to 485 | binascii.crc32. The original "hash()" call is not cross-platform, so 486 | big and little endian systems accessing the same memcache may end up 487 | hitting different servers. Restore the old functionality by calling: 488 | "memcached.serverHashFunction = hash" after importing memcache. 489 | 490 | * Philip Neustrom points out that passing Unicode keys or values causes 491 | problems because len(key) or len(value) is not equal to the number of 492 | bytes that are required to store the key/value. Philip provides a 493 | patch which raises an exception in this case. Raises 494 | memcache.Client.MemcachedStringEncodingError exception in this case. 495 | 496 | * NOTE: If you recompiled memcached to increase the default 1MB max 497 | value size, you will need to call "memcached.MAX_SERVER_VALUE_LENGTH = N" 498 | or memcached will not store values larger than the default 1MB. 499 | 500 | * Philip Neustrom includes another patch which checks that the key 501 | doesn't exceed the memcache server's max size. If it does, the item 502 | is silently not stored. 503 | 504 | * Philip Neustrom added a bunch of sanity checks. 505 | 506 | * Jehiah Czebotar provided a patch to make the add() and replace() 507 | functions return 0 when the add or replace fails, similar to how set() 508 | works. 509 | 510 | Sat, 16 Sep 2006 18:31:46 -0600 Sean Reifschneider 511 | 512 | * Version 1.34 513 | 514 | * In get_multi, if the recv loop reads 0 bytes, raising an EOFError. 515 | Identified by Jim Baker. 516 | 517 | Tue, 05 Sep 2006 14:06:50 -0600 Sean Reifschneider 518 | 519 | * Version 1.33 520 | 521 | * Including patch from Yoshinori K. Okuji to read in larger chunks for 522 | readline() calls. This should dramatically improve performance under 523 | some circumstances. 524 | 525 | Sun, 03 Sep 2006 14:02:03 -0600 Sean Reifschneider 526 | 527 | * Version 1.32 528 | 529 | * Including patch from Philip Neustrom which checks keys sent to the 530 | server for length and bad characters. 531 | 532 | Sat, 20 May 2006 14:51:28 -0600 Sean Reifschneider 533 | 534 | * Version 1.31 535 | 536 | * Rolled version 1.30 since the Danga folks are now listing this 537 | version as the official version. Removing the "tummy" from the version 538 | number, and incrementing so that it's clear it's more recent than "1.2". 539 | 540 | * Patch applied from Simon Forman for handling of weighted hosts. 541 | 542 | * Added a little more meat to the README. 543 | 544 | Sat, 28 Jan 2006 15:59:50 -0700 Sean Reifschneider 545 | 546 | * cludwin at socallocal suggested that the write-combining with 547 | sendall() may not be beneficial. After testing on both SMP and non-SMP 548 | machines, I can't see a significant benefit to not doing the 549 | write-combining, even on large strings. The benefits of write-combining 550 | on smaller strings seems to be significant on UP machines in tight loops. 551 | Even on strings that are larger than 2MB, there seems to be no benefit to 552 | splitting out the writes. 553 | 554 | Sun, 18 Sep 2005 18:56:31 -0600 Sean Reifschneider 555 | 556 | * Changing a printf to debuglog and catching a pickle exception, patch 557 | submitted by Justin Azoff. 558 | 559 | Thu, 14 Jul 2005 11:17:30 -0700 Sean Reifschneider 560 | 561 | * Alex Stapleton found that the sendall call was slow for writing data 562 | larger than several kilobytes. I had him test a change to his patch, 563 | which worked as well, but was simpler. The code now does two sendall 564 | calls, one for the data and one for the line termination, if the data is 565 | larger than 100 bytes. 566 | 567 | Thu, 7 Apr 2005 14:45:44 -0700 Sean Reifschneider 568 | 569 | * Incorporating some fixes to get_multi() from Bo Yang 570 | 571 | Mon, 13 Dec 2004 02:35:17 -0700 Sean Reifschneider 572 | 573 | * Simplifying the readline() function and speeding it up ~25%. 574 | * Fixing a bug in readline() if the server drops, mark_dead() was not 575 | being properly called. 576 | 577 | Sun, 12 Dec 2004 18:56:33 -0700 Sean Reifschneider 578 | 579 | * Adding "stats()" and "flush_all()" methods. 580 | 581 | Thu, 10 Aug 2003 12:17:50 -0700 Evan Martin 582 | 583 | * Slightly more verbose self-test output. 584 | * Fix mark_dead() to use proper classname. 585 | * Make pooltest.py run from the test directory. 586 | 587 | Thu, 07 Aug 2003 16:32:32 -0700 Evan Martin 588 | 589 | * Add incr, decr, and delete. 590 | * Better Python (based on comments from Uriah Welcome). 591 | * Docs, using epydoc. 592 | 593 | Thu, 07 Aug 2003 14:20:27 -0700 Evan Martin 594 | 595 | * Initial prerelease. 596 | -------------------------------------------------------------------------------- /memcache.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """client module for memcached (memory cache daemon) 4 | 5 | Overview 6 | ======== 7 | 8 | See U{the MemCached homepage} for more 9 | about memcached. 10 | 11 | Usage summary 12 | ============= 13 | 14 | This should give you a feel for how this module operates:: 15 | 16 | import memcache 17 | mc = memcache.Client(['127.0.0.1:11211'], debug=0) 18 | 19 | mc.set("some_key", "Some value") 20 | value = mc.get("some_key") 21 | 22 | mc.set("another_key", 3) 23 | mc.delete("another_key") 24 | 25 | mc.set("key", "1") # note that the key used for incr/decr must be 26 | # a string. 27 | mc.incr("key") 28 | mc.decr("key") 29 | 30 | The standard way to use memcache with a database is like this: 31 | 32 | key = derive_key(obj) 33 | obj = mc.get(key) 34 | if not obj: 35 | obj = backend_api.get(...) 36 | mc.set(key, obj) 37 | 38 | # we now have obj, and future passes through this code 39 | # will use the object from the cache. 40 | 41 | Detailed Documentation 42 | ====================== 43 | 44 | More detailed documentation is available in the L{Client} class. 45 | 46 | """ 47 | 48 | 49 | import binascii 50 | from datetime import timedelta 51 | from io import BytesIO 52 | import re 53 | import socket 54 | import sys 55 | import threading 56 | import time 57 | import zlib 58 | 59 | import pickle 60 | 61 | 62 | def cmemcache_hash(key): 63 | return ((binascii.crc32(key) & 0xffffffff) >> 16) & 0x7fff 64 | 65 | 66 | serverHashFunction = cmemcache_hash 67 | 68 | 69 | def useOldServerHashFunction(): 70 | """Use the old python-memcache server hash function.""" 71 | global serverHashFunction 72 | serverHashFunction = binascii.crc32 73 | 74 | 75 | valid_key_chars_re = re.compile(b'[\x21-\x7e\x80-\xff]+$') 76 | 77 | 78 | # Original author: Evan Martin of Danga Interactive 79 | __author__ = "Sean Reifschneider " 80 | __version__ = "1.60" 81 | __copyright__ = "Copyright (C) 2003 Danga Interactive" 82 | # http://en.wikipedia.org/wiki/Python_Software_Foundation_License 83 | __license__ = "Python Software Foundation License" 84 | 85 | SERVER_MAX_KEY_LENGTH = 250 86 | # Storing values larger than 1MB requires starting memcached with -I for 87 | # memcached >= 1.4.2 or recompiling for < 1.4.2. If you do, this value can be 88 | # changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N" after importing this 89 | # module. 90 | SERVER_MAX_VALUE_LENGTH = 1024 * 1024 91 | 92 | 93 | class _Error(Exception): 94 | pass 95 | 96 | 97 | class _ConnectionDeadError(Exception): 98 | pass 99 | 100 | 101 | _DEAD_RETRY = 30 # number of seconds before retrying a dead server. 102 | _SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout. 103 | 104 | 105 | class Client(threading.local): 106 | """Object representing a pool of memcache servers. 107 | 108 | See L{memcache} for an overview. 109 | 110 | In all cases where a key is used, the key can be either: 111 | 1. A simple hashable type (string, integer, etc.). 112 | 2. A tuple of C{(hashvalue, key)}. This is useful if you want 113 | to avoid making this module calculate a hash value. You may 114 | prefer, for example, to keep all of a given user's objects on 115 | the same memcache server, so you could use the user's unique 116 | id as the hash value. 117 | 118 | 119 | @group Setup: __init__, set_servers, forget_dead_hosts, 120 | disconnect_all, debuglog 121 | @group Insertion: set, add, replace, set_multi 122 | @group Retrieval: get, get_multi 123 | @group Integers: incr, decr 124 | @group Removal: delete, delete_multi 125 | @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, 126 | debuglog, set, set_multi, add, replace, get, get_multi, 127 | incr, decr, delete, delete_multi 128 | """ 129 | _FLAG_PICKLE = 1 << 0 130 | _FLAG_INTEGER = 1 << 1 131 | _FLAG_LONG = 1 << 2 132 | _FLAG_COMPRESSED = 1 << 3 133 | _FLAG_TEXT = 1 << 4 134 | 135 | _SERVER_RETRIES = 10 # how many times to try finding a free server. 136 | 137 | # exceptions for Client 138 | class MemcachedKeyError(Exception): 139 | pass 140 | 141 | class MemcachedKeyLengthError(MemcachedKeyError): 142 | pass 143 | 144 | class MemcachedKeyCharacterError(MemcachedKeyError): 145 | pass 146 | 147 | class MemcachedKeyNoneError(MemcachedKeyError): 148 | pass 149 | 150 | class MemcachedKeyTypeError(MemcachedKeyError): 151 | pass 152 | 153 | class MemcachedStringEncodingError(Exception): 154 | pass 155 | 156 | def __init__(self, servers, debug=0, pickleProtocol=0, 157 | pickler=pickle.Pickler, unpickler=pickle.Unpickler, 158 | compressor=zlib.compress, decompressor=zlib.decompress, 159 | pload=None, pid=None, 160 | server_max_key_length=None, server_max_value_length=None, 161 | dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, 162 | cache_cas=False, flush_on_reconnect=0, check_keys=True, 163 | key_encoder=None): 164 | """Create a new Client object with the given list of servers. 165 | 166 | @param servers: C{servers} is passed to L{set_servers}. 167 | @param debug: whether to display error messages when a server 168 | can't be contacted. 169 | @param pickleProtocol: number to mandate protocol used by 170 | (c)Pickle. 171 | @param pickler: optional override of default Pickler to allow 172 | subclassing. 173 | @param unpickler: optional override of default Unpickler to 174 | allow subclassing. 175 | @param pload: optional persistent_load function to call on 176 | pickle loading. Useful for cPickle since subclassing isn't 177 | allowed. 178 | @param pid: optional persistent_id function to call on pickle 179 | storing. Useful for cPickle since subclassing isn't allowed. 180 | @param dead_retry: number of seconds before retrying a 181 | blacklisted server. Default to 30 s. 182 | @param socket_timeout: timeout in seconds for all calls to a 183 | server. Defaults to 3 seconds. 184 | @param cache_cas: (default False) If true, cas operations will 185 | be cached. WARNING: This cache is not expired internally, if 186 | you have a long-running process you will need to expire it 187 | manually via client.reset_cas(), or the cache can grow 188 | unlimited. 189 | @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) 190 | Data that is larger than this will not be sent to the server. 191 | @param server_max_value_length: (default 192 | SERVER_MAX_VALUE_LENGTH) Data that is larger than this will 193 | not be sent to the server. 194 | @param flush_on_reconnect: optional flag which prevents a 195 | scenario that can cause stale data to be read: If there's more 196 | than one memcached server and the connection to one is 197 | interrupted, keys that mapped to that server will get 198 | reassigned to another. If the first server comes back, those 199 | keys will map to it again. If it still has its data, get()s 200 | can read stale data that was overwritten on another 201 | server. This flag is off by default for backwards 202 | compatibility. 203 | @param check_keys: (default True) If True, the key is checked 204 | to ensure it is the correct length and composed of the right 205 | characters. 206 | @param key_encoder: (default None) If provided a functor that will 207 | be called to encode keys before they are checked and used. It will 208 | be expected to take one parameter (the key) and return a new encoded 209 | key as a result. 210 | """ 211 | super().__init__() 212 | self.debug = debug 213 | self.dead_retry = dead_retry 214 | self.socket_timeout = socket_timeout 215 | self.flush_on_reconnect = flush_on_reconnect 216 | self.set_servers(servers) 217 | self.stats = {} 218 | self.cache_cas = cache_cas 219 | self.reset_cas() 220 | self.do_check_key = check_keys 221 | 222 | # Allow users to modify pickling/unpickling behavior 223 | self.pickleProtocol = pickleProtocol 224 | self.pickler = pickler 225 | self.unpickler = unpickler 226 | self.compressor = compressor 227 | self.decompressor = decompressor 228 | self.persistent_load = pload 229 | self.persistent_id = pid 230 | self.server_max_key_length = server_max_key_length 231 | if key_encoder is None: 232 | def key_encoder(key): 233 | return key 234 | self.key_encoder = key_encoder 235 | if self.server_max_key_length is None: 236 | self.server_max_key_length = SERVER_MAX_KEY_LENGTH 237 | self.server_max_value_length = server_max_value_length 238 | if self.server_max_value_length is None: 239 | self.server_max_value_length = SERVER_MAX_VALUE_LENGTH 240 | 241 | # figure out the pickler style 242 | file = BytesIO() 243 | try: 244 | pickler = self.pickler(file, protocol=self.pickleProtocol) 245 | self.picklerIsKeyword = True 246 | except TypeError: 247 | self.picklerIsKeyword = False 248 | 249 | def _encode_key(self, key): 250 | if isinstance(key, tuple): 251 | if isinstance(key[1], str): 252 | return (key[0], key[1].encode('utf8')) 253 | elif isinstance(key, str): 254 | return key.encode('utf8') 255 | return key 256 | 257 | def _encode_cmd(self, cmd, key, headers, noreply, *args): 258 | cmd_bytes = cmd.encode('utf-8') 259 | fullcmd = [cmd_bytes, b' ', key] 260 | 261 | if headers: 262 | headers = headers.encode('utf-8') 263 | fullcmd.append(b' ') 264 | fullcmd.append(headers) 265 | 266 | if noreply: 267 | fullcmd.append(b' noreply') 268 | 269 | if args: 270 | fullcmd.append(b' ') 271 | fullcmd.extend(args) 272 | return b''.join(fullcmd) 273 | 274 | def reset_cas(self): 275 | """Reset the cas cache. 276 | 277 | This is only used if the Client() object was created with 278 | "cache_cas=True". If used, this cache does not expire 279 | internally, so it can grow unbounded if you do not clear it 280 | yourself. 281 | """ 282 | self.cas_ids = {} 283 | 284 | def set_servers(self, servers): 285 | """Set the pool of servers used by this client. 286 | 287 | @param servers: an array of servers. 288 | Servers can be passed in two forms: 289 | 1. Strings of the form C{"host:port"}, which implies a 290 | default weight of 1. 291 | 2. Tuples of the form C{("host:port", weight)}, where 292 | C{weight} is an integer weight value. 293 | 294 | """ 295 | self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry, 296 | socket_timeout=self.socket_timeout, 297 | flush_on_reconnect=self.flush_on_reconnect) 298 | for s in servers] 299 | self._init_buckets() 300 | 301 | def get_stats(self, stat_args=None): 302 | """Get statistics from each of the servers. 303 | 304 | @param stat_args: Additional arguments to pass to the memcache 305 | "stats" command. 306 | 307 | @return: A list of tuples ( server_identifier, 308 | stats_dictionary ). The dictionary contains a number of 309 | name/value pairs specifying the name of the status field 310 | and the string value associated with it. The values are 311 | not converted from strings. 312 | """ 313 | data = [] 314 | for s in self.servers: 315 | if not s.connect(): 316 | continue 317 | if s.family == socket.AF_INET: 318 | name = '{}:{} ({})'.format(s.ip, s.port, s.weight) 319 | elif s.family == socket.AF_INET6: 320 | name = '[{}]:{} ({})'.format(s.ip, s.port, s.weight) 321 | else: 322 | name = 'unix:{} ({})'.format(s.address, s.weight) 323 | if not stat_args: 324 | s.send_cmd('stats') 325 | else: 326 | s.send_cmd('stats ' + stat_args) 327 | serverData = {} 328 | data.append((name, serverData)) 329 | readline = s.readline 330 | while True: 331 | line = readline() 332 | if line: 333 | line = line.decode('ascii') 334 | if not line or line.strip() == 'END': 335 | break 336 | stats = line.split(' ', 2) 337 | serverData[stats[1]] = stats[2] 338 | 339 | return data 340 | 341 | def get_slab_stats(self): 342 | data = [] 343 | for s in self.servers: 344 | if not s.connect(): 345 | continue 346 | if s.family == socket.AF_INET: 347 | name = '{}:{} ({})'.format(s.ip, s.port, s.weight) 348 | elif s.family == socket.AF_INET6: 349 | name = '[{}]:{} ({})'.format(s.ip, s.port, s.weight) 350 | else: 351 | name = 'unix:{} ({})'.format(s.address, s.weight) 352 | serverData = {} 353 | data.append((name, serverData)) 354 | s.send_cmd('stats slabs') 355 | readline = s.readline 356 | while True: 357 | line = readline() 358 | if line: 359 | line = line.decode('ascii') 360 | if not line or line.strip() == 'END': 361 | break 362 | item = line.split(' ', 2) 363 | if line.startswith('STAT active_slabs') or line.startswith('STAT total_malloced'): 364 | serverData[item[1]] = item[2] 365 | else: 366 | # 0 = STAT, 1 = ITEM, 2 = Value 367 | slab = item[1].split(':', 2) 368 | # 0 = Slab #, 1 = Name 369 | if slab[0] not in serverData: 370 | serverData[slab[0]] = {} 371 | serverData[slab[0]][slab[1]] = item[2] 372 | return data 373 | 374 | def quit_all(self) -> None: 375 | '''Send a "quit" command to all servers and wait for the connection to close.''' 376 | for s in self.servers: 377 | s.quit() 378 | 379 | def get_slabs(self): 380 | data = [] 381 | for s in self.servers: 382 | if not s.connect(): 383 | continue 384 | if s.family == socket.AF_INET: 385 | name = '{}:{} ({})'.format(s.ip, s.port, s.weight) 386 | elif s.family == socket.AF_INET6: 387 | name = '[{}]:{} ({})'.format(s.ip, s.port, s.weight) 388 | else: 389 | name = 'unix:{} ({})'.format(s.address, s.weight) 390 | serverData = {} 391 | data.append((name, serverData)) 392 | s.send_cmd('stats items') 393 | readline = s.readline 394 | while True: 395 | line = readline() 396 | if not line or line.strip() == 'END': 397 | break 398 | item = line.split(' ', 2) 399 | # 0 = STAT, 1 = ITEM, 2 = Value 400 | slab = item[1].split(':', 2) 401 | # 0 = items, 1 = Slab #, 2 = Name 402 | if slab[1] not in serverData: 403 | serverData[slab[1]] = {} 404 | serverData[slab[1]][slab[2]] = item[2] 405 | return data 406 | 407 | def flush_all(self): 408 | """Expire all data in memcache servers that are reachable.""" 409 | for s in self.servers: 410 | if not s.connect(): 411 | continue 412 | s.flush() 413 | 414 | def debuglog(self, str): 415 | if self.debug: 416 | sys.stderr.write("MemCached: %s\n" % str) 417 | 418 | def _statlog(self, func): 419 | if func not in self.stats: 420 | self.stats[func] = 1 421 | else: 422 | self.stats[func] += 1 423 | 424 | def forget_dead_hosts(self): 425 | """Reset every host in the pool to an "alive" state.""" 426 | for s in self.servers: 427 | s.deaduntil = 0 428 | 429 | def _init_buckets(self): 430 | self.buckets = [] 431 | for server in self.servers: 432 | for i in range(server.weight): 433 | self.buckets.append(server) 434 | 435 | def _get_server(self, key): 436 | if isinstance(key, tuple): 437 | serverhash, key = key 438 | else: 439 | serverhash = serverHashFunction(key) 440 | 441 | if not self.buckets: 442 | return None, None 443 | 444 | for i in range(Client._SERVER_RETRIES): 445 | server = self.buckets[serverhash % len(self.buckets)] 446 | if server.connect(): 447 | # print("(using server %s)" % server,) 448 | return server, key 449 | serverhash = str(serverhash) + str(i) 450 | if isinstance(serverhash, str): 451 | serverhash = serverhash.encode('ascii') 452 | serverhash = serverHashFunction(serverhash) 453 | return None, None 454 | 455 | def disconnect_all(self): 456 | for s in self.servers: 457 | s.close_socket() 458 | 459 | def delete_multi(self, keys, time=None, key_prefix='', noreply=False): 460 | """Delete multiple keys in the memcache doing just one query. 461 | 462 | >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'}) 463 | >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'} 464 | 1 465 | >>> mc.delete_multi(['key1', 'key2']) 466 | 1 467 | >>> mc.get_multi(['key1', 'key2']) == {} 468 | 1 469 | 470 | This method is recommended over iterated regular L{delete}s as 471 | it reduces total latency, since your app doesn't have to wait 472 | for each round-trip of L{delete} before sending the next one. 473 | 474 | @param keys: An iterable of keys to clear 475 | @param time: number of seconds any subsequent set / update 476 | commands should fail. Defaults to 0 for no delay. 477 | @param key_prefix: Optional string to prepend to each key when 478 | sending to memcache. See docs for L{get_multi} and 479 | L{set_multi}. 480 | @param noreply: optional parameter instructs the server to not send the 481 | reply. 482 | @return: 1 if no failure in communication with any memcacheds. 483 | @rtype: int 484 | """ 485 | 486 | self._statlog('delete_multi') 487 | 488 | server_keys, prefixed_to_orig_key = self._map_and_prefix_keys( 489 | keys, key_prefix) 490 | 491 | # send out all requests on each server before reading anything 492 | dead_servers = [] 493 | 494 | rc = 1 495 | for server in server_keys.keys(): 496 | bigcmd = [] 497 | write = bigcmd.append 498 | if time is not None: 499 | headers = str(time) 500 | else: 501 | headers = None 502 | for key in server_keys[server]: # These are mangled keys 503 | cmd = self._encode_cmd('delete', self.key_encoder(key), headers, noreply, b'\r\n') 504 | write(cmd) 505 | try: 506 | server.send_cmds(b''.join(bigcmd)) 507 | except OSError as msg: 508 | rc = 0 509 | if isinstance(msg, tuple): 510 | msg = msg[1] 511 | server.mark_dead(msg) 512 | dead_servers.append(server) 513 | 514 | # if noreply, just return 515 | if noreply: 516 | return rc 517 | 518 | # if any servers died on the way, don't expect them to respond. 519 | for server in dead_servers: 520 | del server_keys[server] 521 | 522 | for server, keys in server_keys.items(): 523 | try: 524 | for key in keys: 525 | server.expect(b"DELETED") 526 | except OSError as msg: 527 | if isinstance(msg, tuple): 528 | msg = msg[1] 529 | server.mark_dead(msg) 530 | rc = 0 531 | return rc 532 | 533 | def delete(self, key, noreply=False): 534 | '''Deletes a key from the memcache. 535 | 536 | @return: Nonzero on success. 537 | @param noreply: optional parameter instructs the server to not send the 538 | reply. 539 | @rtype: int 540 | ''' 541 | key = self._encode_key(self.key_encoder(key)) 542 | if self.do_check_key: 543 | self.check_key(key) 544 | server, key = self._get_server(key) 545 | if not server: 546 | return 0 547 | self._statlog('delete') 548 | fullcmd = self._encode_cmd('delete', key, None, noreply) 549 | 550 | try: 551 | server.send_cmd(fullcmd) 552 | if noreply: 553 | return 1 554 | line = server.readline() 555 | if line and line.strip() == b'DELETED': 556 | return 1 557 | self.debuglog('delete expected DELETED, got: {!r}'.format(line)) 558 | except OSError as msg: 559 | if isinstance(msg, tuple): 560 | msg = msg[1] 561 | server.mark_dead(msg) 562 | return 0 563 | 564 | def touch(self, key, time=0, noreply=False): 565 | '''Updates the expiration time of a key in memcache. 566 | 567 | @return: Nonzero on success. 568 | @param time: Tells memcached the time which this value should 569 | expire, either as a delta number of seconds, or an absolute 570 | unix time-since-the-epoch value. See the memcached protocol 571 | docs section "Storage Commands" for more info on . We 572 | default to 0 == cache forever. 573 | @param noreply: optional parameter instructs the server to not send the 574 | reply. 575 | @rtype: int 576 | ''' 577 | key = self._encode_key(self.key_encoder(key)) 578 | if self.do_check_key: 579 | self.check_key(key) 580 | server, key = self._get_server(key) 581 | if not server: 582 | return 0 583 | self._statlog('touch') 584 | fullcmd = self._encode_cmd('touch', key, str(time), noreply) 585 | 586 | try: 587 | server.send_cmd(fullcmd) 588 | if noreply: 589 | return 1 590 | line = server.readline() 591 | if line and line.strip() in [b'TOUCHED']: 592 | return 1 593 | self.debuglog('touch expected TOUCHED, got: {!r}'.format(line)) 594 | except OSError as msg: 595 | if isinstance(msg, tuple): 596 | msg = msg[1] 597 | server.mark_dead(msg) 598 | return 0 599 | 600 | def incr(self, key, delta=1, noreply=False): 601 | """Increment value for C{key} by C{delta} 602 | 603 | Sends a command to the server to atomically increment the 604 | value for C{key} by C{delta}, or by 1 if C{delta} is 605 | unspecified. Returns None if C{key} doesn't exist on server, 606 | otherwise it returns the new value after incrementing. 607 | 608 | Note that the value for C{key} must already exist in the 609 | memcache, and it must be the string representation of an 610 | integer. 611 | 612 | >>> mc.set("counter", "20") # returns 1, indicating success 613 | 1 614 | >>> mc.incr("counter") 615 | 21 616 | >>> mc.incr("counter") 617 | 22 618 | 619 | Overflow on server is not checked. Be aware of values 620 | approaching 2**32. See L{decr}. 621 | 622 | @param delta: Integer amount to increment by (should be zero 623 | or greater). 624 | 625 | @param noreply: optional parameter instructs the server to not send the 626 | reply. 627 | 628 | @return: New value after incrementing, no None for noreply or error. 629 | @rtype: int 630 | """ 631 | return self._incrdecr("incr", self.key_encoder(key), delta, noreply) 632 | 633 | def decr(self, key, delta=1, noreply=False): 634 | """Decrement value for C{key} by C{delta} 635 | 636 | Like L{incr}, but decrements. Unlike L{incr}, underflow is 637 | checked and new values are capped at 0. If server value is 1, 638 | a decrement of 2 returns 0, not -1. 639 | 640 | @param delta: Integer amount to decrement by (should be zero 641 | or greater). 642 | 643 | @param noreply: optional parameter instructs the server to not send the 644 | reply. 645 | 646 | @return: New value after decrementing, or None for noreply or error. 647 | @rtype: int 648 | """ 649 | return self._incrdecr("decr", self.key_encoder(key), delta, noreply) 650 | 651 | def _incrdecr(self, cmd, key, delta, noreply=False): 652 | key = self._encode_key(key) 653 | if self.do_check_key: 654 | self.check_key(key) 655 | server, key = self._get_server(key) 656 | if not server: 657 | return None 658 | self._statlog(cmd) 659 | fullcmd = self._encode_cmd(cmd, key, str(delta), noreply) 660 | try: 661 | server.send_cmd(fullcmd) 662 | if noreply: 663 | return 664 | line = server.readline() 665 | if line is None or line.strip() == b'NOT_FOUND': 666 | return None 667 | return int(line) 668 | except OSError as msg: 669 | if isinstance(msg, tuple): 670 | msg = msg[1] 671 | server.mark_dead(msg) 672 | return None 673 | 674 | def add(self, key, val, time=0, min_compress_len=0, noreply=False): 675 | '''Add new key with value. 676 | 677 | Like L{set}, but only stores in memcache if the key doesn't 678 | already exist. 679 | 680 | @return: Nonzero on success. 681 | @rtype: int 682 | ''' 683 | return self._set("add", self.key_encoder(key), val, time, min_compress_len, noreply) 684 | 685 | def append(self, key, val, time=0, min_compress_len=0, noreply=False): 686 | '''Append the value to the end of the existing key's value. 687 | 688 | Only stores in memcache if key already exists. 689 | Also see L{prepend}. 690 | 691 | @return: Nonzero on success. 692 | @rtype: int 693 | ''' 694 | return self._set("append", self.key_encoder(key), val, time, min_compress_len, noreply) 695 | 696 | def prepend(self, key, val, time=0, min_compress_len=0, noreply=False): 697 | '''Prepend the value to the beginning of the existing key's value. 698 | 699 | Only stores in memcache if key already exists. 700 | Also see L{append}. 701 | 702 | @return: Nonzero on success. 703 | @rtype: int 704 | ''' 705 | return self._set("prepend", self.key_encoder(key), val, time, min_compress_len, noreply) 706 | 707 | def replace(self, key, val, time=0, min_compress_len=0, noreply=False): 708 | '''Replace existing key with value. 709 | 710 | Like L{set}, but only stores in memcache if the key already exists. 711 | The opposite of L{add}. 712 | 713 | @return: Nonzero on success. 714 | @rtype: int 715 | ''' 716 | return self._set("replace", self.key_encoder(key), val, time, min_compress_len, noreply) 717 | 718 | def set(self, key, val, time=0, min_compress_len=0, noreply=False): 719 | '''Unconditionally sets a key to a given value in the memcache. 720 | 721 | The C{key} can optionally be an tuple, with the first element 722 | being the server hash value and the second being the key. If 723 | you want to avoid making this module calculate a hash value. 724 | You may prefer, for example, to keep all of a given user's 725 | objects on the same memcache server, so you could use the 726 | user's unique id as the hash value. 727 | 728 | @return: Nonzero on success. 729 | @rtype: int 730 | 731 | @param time: Tells memcached the time at which this value should 732 | expire, either as a delta number of seconds, or an absolute 733 | unix time-since-the-epoch value. See the memcached protocol 734 | docs section "Storage Commands" for more info on . We 735 | default to 0 == cache forever. Optionally now accepts a timedelta. 736 | 737 | @param min_compress_len: The threshold length to kick in 738 | auto-compression of the value using the compressor 739 | routine. If the value being cached is a string, then the 740 | length of the string is measured, else if the value is an 741 | object, then the length of the pickle result is measured. If 742 | the resulting attempt at compression yields a larger string 743 | than the input, then it is discarded. For backwards 744 | compatibility, this parameter defaults to 0, indicating don't 745 | ever try to compress. 746 | 747 | @param noreply: optional parameter instructs the server to not 748 | send the reply. 749 | ''' 750 | if isinstance(time, timedelta): 751 | time = int(time.total_seconds()) 752 | return self._set("set", self.key_encoder(key), val, time, min_compress_len, noreply) 753 | 754 | def cas(self, key, val, time=0, min_compress_len=0, noreply=False): 755 | '''Check and set (CAS) 756 | 757 | Sets a key to a given value in the memcache if it hasn't been 758 | altered since last fetched. (See L{gets}). 759 | 760 | The C{key} can optionally be an tuple, with the first element 761 | being the server hash value and the second being the key. If 762 | you want to avoid making this module calculate a hash value. 763 | You may prefer, for example, to keep all of a given user's 764 | objects on the same memcache server, so you could use the 765 | user's unique id as the hash value. 766 | 767 | @return: Nonzero on success. 768 | @rtype: int 769 | 770 | @param time: Tells memcached the time which this value should 771 | expire, either as a delta number of seconds, or an absolute 772 | unix time-since-the-epoch value. See the memcached protocol 773 | docs section "Storage Commands" for more info on . We 774 | default to 0 == cache forever. 775 | 776 | @param min_compress_len: The threshold length to kick in 777 | auto-compression of the value using the compressor 778 | routine. If the value being cached is a string, then the 779 | length of the string is measured, else if the value is an 780 | object, then the length of the pickle result is measured. If 781 | the resulting attempt at compression yields a larger string 782 | than the input, then it is discarded. For backwards 783 | compatibility, this parameter defaults to 0, indicating don't 784 | ever try to compress. 785 | 786 | @param noreply: optional parameter instructs the server to not 787 | send the reply. 788 | ''' 789 | return self._set("cas", self.key_encoder(key), val, time, min_compress_len, noreply) 790 | 791 | def _map_and_prefix_keys(self, key_iterable, key_prefix): 792 | """Map keys to the servers they will reside on. 793 | 794 | Compute the mapping of server (_Host instance) -> list of keys to 795 | stuff onto that server, as well as the mapping of prefixed key 796 | -> original key. 797 | """ 798 | key_prefix = self._encode_key(key_prefix) 799 | # Check it just once ... 800 | key_extra_len = len(key_prefix) 801 | if key_prefix and self.do_check_key: 802 | self.check_key(key_prefix) 803 | 804 | # server (_Host) -> list of unprefixed server keys in mapping 805 | server_keys = {} 806 | 807 | prefixed_to_orig_key = {} 808 | # build up a list for each server of all the keys we want. 809 | for orig_key in key_iterable: 810 | if isinstance(orig_key, tuple): 811 | # Tuple of hashvalue, key ala _get_server(). Caller is 812 | # essentially telling us what server to stuff this on. 813 | # Ensure call to _get_server gets a Tuple as well. 814 | serverhash, key = orig_key 815 | 816 | key = self._encode_key(self.key_encoder(key)) 817 | if not isinstance(key, bytes): 818 | # set_multi supports int / long keys. 819 | key = str(key).encode('utf8') 820 | bytes_orig_key = key 821 | 822 | # Gotta pre-mangle key before hashing to a 823 | # server. Returns the mangled key. 824 | server, key = self._get_server( 825 | (serverhash, key_prefix + key)) 826 | else: 827 | key = self._encode_key(self.key_encoder(orig_key)) 828 | if not isinstance(key, bytes): 829 | # set_multi supports int / long keys. 830 | key = str(key).encode('utf8') 831 | bytes_orig_key = key 832 | server, key = self._get_server(key_prefix + key) 833 | 834 | # alert when passed in key is None 835 | if orig_key is None: 836 | self.check_key(orig_key, key_extra_len=key_extra_len) 837 | 838 | # Now check to make sure key length is proper ... 839 | if self.do_check_key: 840 | self.check_key(bytes_orig_key, key_extra_len=key_extra_len) 841 | 842 | if not server: 843 | continue 844 | 845 | if server not in server_keys: 846 | server_keys[server] = [] 847 | server_keys[server].append(key) 848 | prefixed_to_orig_key[key] = orig_key 849 | 850 | return (server_keys, prefixed_to_orig_key) 851 | 852 | def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0, 853 | noreply=False): 854 | '''Sets multiple keys in the memcache doing just one query. 855 | 856 | >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) 857 | >>> keys = mc.get_multi(['key1', 'key2']) 858 | >>> keys == {'key1': 'val1', 'key2': 'val2'} 859 | True 860 | 861 | 862 | This method is recommended over regular L{set} as it lowers 863 | the number of total packets flying around your network, 864 | reducing total latency, since your app doesn't have to wait 865 | for each round-trip of L{set} before sending the next one. 866 | 867 | @param mapping: A dict of key/value pairs to set. 868 | 869 | @param time: Tells memcached the time which this value should 870 | expire, either as a delta number of seconds, or an 871 | absolute unix time-since-the-epoch value. See the 872 | memcached protocol docs section "Storage Commands" for 873 | more info on . We default to 0 == cache forever. 874 | 875 | @param key_prefix: Optional string to prepend to each key when 876 | sending to memcache. Allows you to efficiently stuff these 877 | keys into a pseudo-namespace in memcache: 878 | 879 | >>> notset_keys = mc.set_multi( 880 | ... {'key1' : 'val1', 'key2' : 'val2'}, 881 | ... key_prefix='subspace_') 882 | >>> len(notset_keys) == 0 883 | True 884 | >>> keys = mc.get_multi(['subspace_key1', 'subspace_key2']) 885 | >>> keys == {'subspace_key1': 'val1', 'subspace_key2': 'val2'} 886 | True 887 | 888 | Causes key 'subspace_key1' and 'subspace_key2' to be 889 | set. Useful in conjunction with a higher-level layer which 890 | applies namespaces to data in memcache. In this case, the 891 | return result would be the list of notset original keys, 892 | prefix not applied. 893 | 894 | @param min_compress_len: The threshold length to kick in 895 | auto-compression of the value using the compressor 896 | routine. If the value being cached is a string, then the 897 | length of the string is measured, else if the value is an 898 | object, then the length of the pickle result is 899 | measured. If the resulting attempt at compression yields a 900 | larger string than the input, then it is discarded. For 901 | backwards compatibility, this parameter defaults to 0, 902 | indicating don't ever try to compress. 903 | 904 | @param noreply: optional parameter instructs the server to not 905 | send the reply. 906 | 907 | @return: List of keys which failed to be stored [ memcache out 908 | of memory, etc. ]. 909 | 910 | @rtype: list 911 | ''' 912 | self._statlog('set_multi') 913 | 914 | server_keys, prefixed_to_orig_key = self._map_and_prefix_keys( 915 | mapping.keys(), key_prefix) 916 | 917 | # send out all requests on each server before reading anything 918 | dead_servers = [] 919 | notstored = [] # original keys. 920 | 921 | for server in server_keys.keys(): 922 | bigcmd = [] 923 | write = bigcmd.append 924 | try: 925 | for key in server_keys[server]: # These are mangled keys 926 | store_info = self._val_to_store_info( 927 | mapping[prefixed_to_orig_key[key]], 928 | min_compress_len) 929 | if store_info: 930 | flags, len_val, val = store_info 931 | headers = "%d %d %d" % (flags, time, len_val) 932 | fullcmd = self._encode_cmd('set', self.key_encoder(key), headers, 933 | noreply, 934 | b'\r\n', val, b'\r\n') 935 | write(fullcmd) 936 | else: 937 | notstored.append(prefixed_to_orig_key[key]) 938 | server.send_cmds(b''.join(bigcmd)) 939 | except OSError as msg: 940 | if isinstance(msg, tuple): 941 | msg = msg[1] 942 | server.mark_dead(msg) 943 | dead_servers.append(server) 944 | 945 | # if noreply, just return early 946 | if noreply: 947 | return notstored 948 | 949 | # if any servers died on the way, don't expect them to respond. 950 | for server in dead_servers: 951 | del server_keys[server] 952 | 953 | # short-circuit if there are no servers, just return all keys 954 | if not server_keys: 955 | return list(mapping.keys()) 956 | 957 | for server, keys in server_keys.items(): 958 | try: 959 | for key in keys: 960 | if server.readline() == b'STORED': 961 | continue 962 | else: 963 | # un-mangle. 964 | notstored.append(prefixed_to_orig_key[key]) 965 | except (_Error, OSError) as msg: 966 | if isinstance(msg, tuple): 967 | msg = msg[1] 968 | server.mark_dead(msg) 969 | return notstored 970 | 971 | def _val_to_store_info(self, val, min_compress_len): 972 | """Transform val to a storable representation. 973 | 974 | Returns a tuple of the flags, the length of the new value, and 975 | the new value itself. 976 | """ 977 | flags = 0 978 | # Check against the exact type, rather than using isinstance(), so that 979 | # subclasses of native types (such as markup-safe strings) are pickled 980 | # and restored as instances of the correct class. 981 | val_type = type(val) 982 | if val_type == bytes: 983 | pass 984 | elif val_type == str: 985 | flags |= Client._FLAG_TEXT 986 | val = val.encode('utf-8') 987 | elif val_type == int: 988 | flags |= Client._FLAG_INTEGER 989 | val = ('%d' % val).encode('ascii') 990 | # force no attempt to compress this silly string. 991 | min_compress_len = 0 992 | else: 993 | flags |= Client._FLAG_PICKLE 994 | file = BytesIO() 995 | if self.picklerIsKeyword: 996 | pickler = self.pickler(file, protocol=self.pickleProtocol) 997 | else: 998 | pickler = self.pickler(file, self.pickleProtocol) 999 | if self.persistent_id: 1000 | pickler.persistent_id = self.persistent_id 1001 | pickler.dump(val) 1002 | val = file.getvalue() 1003 | 1004 | lv = len(val) 1005 | # We should try to compress if min_compress_len > 0 1006 | # and this string is longer than our min threshold. 1007 | if min_compress_len and lv > min_compress_len: 1008 | comp_val = self.compressor(val) 1009 | # Only retain the result if the compression result is smaller 1010 | # than the original. 1011 | if len(comp_val) < lv: 1012 | flags |= Client._FLAG_COMPRESSED 1013 | val = comp_val 1014 | 1015 | # silently do not store if value length exceeds maximum 1016 | if (self.server_max_value_length != 0 and len(val) > self.server_max_value_length): 1017 | return 0 1018 | 1019 | return (flags, len(val), val) 1020 | 1021 | def _set(self, cmd, key, val, time, min_compress_len=0, noreply=False): 1022 | key = self._encode_key(key) 1023 | if self.do_check_key: 1024 | self.check_key(key) 1025 | server, key = self._get_server(key) 1026 | if not server: 1027 | return 0 1028 | 1029 | def _unsafe_set(): 1030 | self._statlog(cmd) 1031 | 1032 | if cmd == 'cas' and key not in self.cas_ids: 1033 | return self._set('set', key, val, time, min_compress_len, 1034 | noreply) 1035 | 1036 | store_info = self._val_to_store_info(val, min_compress_len) 1037 | if not store_info: 1038 | return 0 1039 | flags, len_val, encoded_val = store_info 1040 | 1041 | if cmd == 'cas': 1042 | headers = ("%d %d %d %d" 1043 | % (flags, time, len_val, self.cas_ids[key])) 1044 | else: 1045 | headers = "%d %d %d" % (flags, time, len_val) 1046 | fullcmd = self._encode_cmd(cmd, key, headers, noreply, 1047 | b'\r\n', encoded_val) 1048 | 1049 | try: 1050 | server.send_cmd(fullcmd) 1051 | if noreply: 1052 | return True 1053 | return server.expect(b"STORED", raise_exception=True) == b"STORED" 1054 | except OSError as msg: 1055 | if isinstance(msg, tuple): 1056 | msg = msg[1] 1057 | server.mark_dead(msg) 1058 | return 0 1059 | 1060 | try: 1061 | return _unsafe_set() 1062 | except _ConnectionDeadError: 1063 | # retry once 1064 | try: 1065 | if server._get_socket(): 1066 | return _unsafe_set() 1067 | except (_ConnectionDeadError, OSError) as msg: 1068 | server.mark_dead(msg) 1069 | return 0 1070 | 1071 | def _get(self, cmd, key, default=None): 1072 | key = self._encode_key(key) 1073 | if self.do_check_key: 1074 | self.check_key(key) 1075 | server, key = self._get_server(key) 1076 | if not server: 1077 | return None 1078 | 1079 | def _unsafe_get(): 1080 | self._statlog(cmd) 1081 | 1082 | try: 1083 | cmd_bytes = cmd.encode('utf-8') 1084 | fullcmd = b''.join((cmd_bytes, b' ', key)) 1085 | server.send_cmd(fullcmd) 1086 | rkey = flags = rlen = cas_id = None 1087 | 1088 | if cmd == 'gets': 1089 | rkey, flags, rlen, cas_id, = self._expect_cas_value( 1090 | server, raise_exception=True 1091 | ) 1092 | if rkey and self.cache_cas: 1093 | self.cas_ids[rkey] = cas_id 1094 | else: 1095 | rkey, flags, rlen, = self._expectvalue( 1096 | server, raise_exception=True 1097 | ) 1098 | 1099 | if not rkey: 1100 | return default 1101 | try: 1102 | value = self._recv_value(server, flags, rlen) 1103 | finally: 1104 | server.expect(b"END", raise_exception=True) 1105 | except (_Error, OSError) as msg: 1106 | if isinstance(msg, tuple): 1107 | msg = msg[1] 1108 | server.mark_dead(msg) 1109 | return None 1110 | 1111 | return value 1112 | 1113 | try: 1114 | return _unsafe_get() 1115 | except _ConnectionDeadError: 1116 | # retry once 1117 | try: 1118 | if server.connect(): 1119 | return _unsafe_get() 1120 | return None 1121 | except (_ConnectionDeadError, OSError) as msg: 1122 | server.mark_dead(msg) 1123 | return None 1124 | 1125 | def get(self, key, default=None): 1126 | '''Retrieves a key from the memcache. 1127 | 1128 | @return: The value or None. 1129 | ''' 1130 | return self._get('get', self.key_encoder(key), default) 1131 | 1132 | def gets(self, key): 1133 | '''Retrieves a key from the memcache. Used in conjunction with 'cas'. 1134 | 1135 | @return: The value or None. 1136 | ''' 1137 | return self._get('gets', self.key_encoder(key)) 1138 | 1139 | def get_multi(self, keys, key_prefix=''): 1140 | '''Retrieves multiple keys from the memcache doing just one query. 1141 | 1142 | >>> success = mc.set("foo", "bar") 1143 | >>> success = mc.set("baz", 42) 1144 | >>> mc.get_multi(["foo", "baz", "foobar"]) == { 1145 | ... "foo": "bar", "baz": 42 1146 | ... } 1147 | 1 1148 | >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1149 | 1 1150 | 1151 | This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict 1152 | will just have unprefixed keys 'k1', 'k2'. 1153 | 1154 | >>> mc.get_multi(['k1', 'k2', 'nonexist'], 1155 | ... key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1156 | 1 1157 | 1158 | get_multi [ and L{set_multi} ] can take str()-ables like ints / 1159 | longs as keys too. Such as your db pri key fields. They're 1160 | rotored through str() before being passed off to memcache, 1161 | with or without the use of a key_prefix. In this mode, the 1162 | key_prefix could be a table name, and the key itself a db 1163 | primary key number. 1164 | 1165 | >>> mc.set_multi({42: 'douglass adams', 1166 | ... 46: 'and 2 just ahead of me'}, 1167 | ... key_prefix='numkeys_') == [] 1168 | 1 1169 | >>> mc.get_multi([46, 42], key_prefix='numkeys_') == { 1170 | ... 42: 'douglass adams', 1171 | ... 46: 'and 2 just ahead of me' 1172 | ... } 1173 | 1 1174 | 1175 | This method is recommended over regular L{get} as it lowers 1176 | the number of total packets flying around your network, 1177 | reducing total latency, since your app doesn't have to wait 1178 | for each round-trip of L{get} before sending the next one. 1179 | 1180 | See also L{set_multi}. 1181 | 1182 | @param keys: An array of keys. 1183 | 1184 | @param key_prefix: A string to prefix each key when we 1185 | communicate with memcache. Facilitates pseudo-namespaces 1186 | within memcache. Returned dictionary keys will not have this 1187 | prefix. 1188 | 1189 | @return: A dictionary of key/value pairs that were 1190 | available. If key_prefix was provided, the keys in the returned 1191 | dictionary will not have it present. 1192 | ''' 1193 | 1194 | self._statlog('get_multi') 1195 | 1196 | server_keys, prefixed_to_orig_key = self._map_and_prefix_keys( 1197 | [self.key_encoder(k) for k in keys], key_prefix) 1198 | 1199 | # send out all requests on each server before reading anything 1200 | dead_servers = [] 1201 | for server in server_keys.keys(): 1202 | try: 1203 | fullcmd = b"get " + b" ".join(server_keys[server]) 1204 | server.send_cmd(fullcmd) 1205 | except OSError as msg: 1206 | if isinstance(msg, tuple): 1207 | msg = msg[1] 1208 | server.mark_dead(msg) 1209 | dead_servers.append(server) 1210 | 1211 | # if any servers died on the way, don't expect them to respond. 1212 | for server in dead_servers: 1213 | del server_keys[server] 1214 | 1215 | retvals = {} 1216 | for server in server_keys.keys(): 1217 | try: 1218 | line = server.readline() 1219 | while line and line != b'END': 1220 | rkey, flags, rlen = self._expectvalue(server, line) 1221 | # Bo Yang reports that this can sometimes be None 1222 | if rkey is not None: 1223 | val = self._recv_value(server, flags, rlen) 1224 | # un-prefix returned key. 1225 | retvals[prefixed_to_orig_key[rkey]] = val 1226 | line = server.readline() 1227 | except (_Error, OSError) as msg: 1228 | if isinstance(msg, tuple): 1229 | msg = msg[1] 1230 | server.mark_dead(msg) 1231 | return retvals 1232 | 1233 | def _expect_cas_value(self, server, line=None, raise_exception=False): 1234 | if not line: 1235 | line = server.readline(raise_exception) 1236 | 1237 | if line and line[:5] == b'VALUE': 1238 | resp, rkey, flags, len, cas_id = line.split() 1239 | return (rkey, int(flags), int(len), int(cas_id)) 1240 | else: 1241 | return (None, None, None, None) 1242 | 1243 | def _expectvalue(self, server, line=None, raise_exception=False): 1244 | if not line: 1245 | line = server.readline(raise_exception) 1246 | 1247 | if line and line[:5] == b'VALUE': 1248 | resp, rkey, flags, len = line.split() 1249 | flags = int(flags) 1250 | rlen = int(len) 1251 | return (rkey, flags, rlen) 1252 | else: 1253 | return (None, None, None) 1254 | 1255 | def _recv_value(self, server, flags, rlen): 1256 | rlen += 2 # include \r\n 1257 | buf = server.recv(rlen) 1258 | if len(buf) != rlen: 1259 | raise _Error("received %d bytes when expecting %d" 1260 | % (len(buf), rlen)) 1261 | 1262 | if len(buf) == rlen: 1263 | buf = buf[:-2] # strip \r\n 1264 | 1265 | if flags & Client._FLAG_COMPRESSED: 1266 | buf = self.decompressor(buf) 1267 | flags &= ~Client._FLAG_COMPRESSED 1268 | if flags == 0: 1269 | # Bare bytes 1270 | val = buf 1271 | elif flags & Client._FLAG_TEXT: 1272 | val = buf.decode('utf-8') 1273 | elif flags & Client._FLAG_INTEGER: 1274 | val = int(buf) 1275 | elif flags & Client._FLAG_LONG: 1276 | val = int(buf) 1277 | elif flags & Client._FLAG_PICKLE: 1278 | try: 1279 | file = BytesIO(buf) 1280 | unpickler = self.unpickler(file) 1281 | if self.persistent_load: 1282 | unpickler.persistent_load = self.persistent_load 1283 | val = unpickler.load() 1284 | except Exception as e: 1285 | self.debuglog('Pickle error: %s\n' % e) 1286 | return None 1287 | else: 1288 | self.debuglog("unknown flags on get: %x\n" % flags) 1289 | raise ValueError('Unknown flags on get: %x' % flags) 1290 | 1291 | return val 1292 | 1293 | def check_key(self, key, key_extra_len=0): 1294 | """Checks sanity of key. 1295 | 1296 | Fails if: 1297 | 1298 | Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). 1299 | Contains control characters (Raises MemcachedKeyCharacterError). 1300 | Is not a string (Raises MemcachedStringEncodingError) 1301 | Is an unicode string (Raises MemcachedStringEncodingError) 1302 | Is not a string (Raises MemcachedKeyError) 1303 | Is None (Raises MemcachedKeyError) 1304 | """ 1305 | if isinstance(key, tuple): 1306 | key = key[1] 1307 | if key is None: 1308 | raise Client.MemcachedKeyNoneError("Key is None") 1309 | if key == '': 1310 | if key_extra_len == 0: 1311 | raise Client.MemcachedKeyNoneError("Key is empty") 1312 | 1313 | # key is empty but there is some other component to key 1314 | return 1315 | 1316 | if not isinstance(key, bytes): 1317 | raise Client.MemcachedKeyTypeError("Key must be a binary string") 1318 | 1319 | if (self.server_max_key_length != 0 and len(key) + key_extra_len > self.server_max_key_length): 1320 | raise Client.MemcachedKeyLengthError( 1321 | "Key length is > %s" % self.server_max_key_length 1322 | ) 1323 | if not valid_key_chars_re.match(key): 1324 | raise Client.MemcachedKeyCharacterError( 1325 | "Control/space characters not allowed (key=%r)" % key) 1326 | 1327 | 1328 | class _Host: 1329 | 1330 | def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY, 1331 | socket_timeout=_SOCKET_TIMEOUT, flush_on_reconnect=0): 1332 | self.dead_retry = dead_retry 1333 | self.socket_timeout = socket_timeout 1334 | self.debug = debug 1335 | self.flush_on_reconnect = flush_on_reconnect 1336 | if isinstance(host, tuple): 1337 | host, self.weight = host 1338 | else: 1339 | self.weight = 1 1340 | 1341 | # parse the connection string 1342 | m = re.match(r'^(?Punix):(?P.*)$', host) 1343 | if not m: 1344 | m = re.match(r'^(?Pinet6):' 1345 | r'\[(?P[^\[\]]+)\](:(?P[0-9]+))?$', host) 1346 | if not m: 1347 | m = re.match(r'^(?Pinet):' 1348 | r'(?P[^:]+)(:(?P[0-9]+))?$', host) 1349 | if not m: 1350 | m = re.match(r'^(?P[^:]+)(:(?P[0-9]+))?$', host) 1351 | if not m: 1352 | raise ValueError('Unable to parse connection string: "%s"' % host) 1353 | 1354 | hostData = m.groupdict() 1355 | if hostData.get('proto') == 'unix': 1356 | self.family = socket.AF_UNIX 1357 | self.address = hostData['path'] 1358 | elif hostData.get('proto') == 'inet6': 1359 | self.family = socket.AF_INET6 1360 | self.ip = hostData['host'] 1361 | self.port = int(hostData.get('port') or 11211) 1362 | self.address = (self.ip, self.port) 1363 | else: 1364 | self.family = socket.AF_INET 1365 | self.ip = hostData['host'] 1366 | self.port = int(hostData.get('port') or 11211) 1367 | self.address = (self.ip, self.port) 1368 | 1369 | self.deaduntil = 0 1370 | self.socket = None 1371 | self.flush_on_next_connect = 0 1372 | 1373 | self.buffer = b'' 1374 | 1375 | def debuglog(self, str): 1376 | if self.debug: 1377 | sys.stderr.write("MemCached: %s\n" % str) 1378 | 1379 | def _check_dead(self): 1380 | if self.deaduntil and self.deaduntil > time.time(): 1381 | return 1 1382 | self.deaduntil = 0 1383 | return 0 1384 | 1385 | def connect(self): 1386 | if self._get_socket(): 1387 | return 1 1388 | return 0 1389 | 1390 | def mark_dead(self, reason): 1391 | self.debuglog("MemCache: {}: {}. Marking dead.".format(self, reason)) 1392 | self.deaduntil = time.time() + self.dead_retry 1393 | if self.flush_on_reconnect: 1394 | self.flush_on_next_connect = 1 1395 | self.close_socket() 1396 | 1397 | def _get_socket(self): 1398 | if self._check_dead(): 1399 | return None 1400 | if self.socket: 1401 | return self.socket 1402 | s = socket.socket(self.family, socket.SOCK_STREAM) 1403 | if hasattr(s, 'settimeout'): 1404 | s.settimeout(self.socket_timeout) 1405 | try: 1406 | s.connect(self.address) 1407 | except socket.timeout as msg: 1408 | self.mark_dead("connect: %s" % msg) 1409 | return None 1410 | except OSError as msg: 1411 | if isinstance(msg, tuple): 1412 | msg = msg[1] 1413 | self.mark_dead("connect: %s" % msg) 1414 | return None 1415 | self.socket = s 1416 | self.buffer = b'' 1417 | if self.flush_on_next_connect: 1418 | self.flush() 1419 | self.flush_on_next_connect = 0 1420 | return s 1421 | 1422 | def close_socket(self): 1423 | if self.socket: 1424 | self.socket.close() 1425 | self.socket = None 1426 | 1427 | def send_cmd(self, cmd): 1428 | if isinstance(cmd, str): 1429 | cmd = cmd.encode('utf8') 1430 | self.socket.sendall(cmd + b'\r\n') 1431 | 1432 | def send_cmds(self, cmds): 1433 | """cmds already has trailing \r\n's applied.""" 1434 | if isinstance(cmds, str): 1435 | cmds = cmds.encode('utf8') 1436 | self.socket.sendall(cmds) 1437 | 1438 | def readline(self, raise_exception=False): 1439 | """Read a line and return it. 1440 | 1441 | If "raise_exception" is set, raise _ConnectionDeadError if the 1442 | read fails, otherwise return an empty string. 1443 | """ 1444 | buf = self.buffer 1445 | if self.socket: 1446 | recv = self.socket.recv 1447 | else: 1448 | def recv(bufsize): 1449 | return b'' 1450 | 1451 | while True: 1452 | index = buf.find(b'\r\n') 1453 | if index >= 0: 1454 | break 1455 | data = recv(4096) 1456 | if not data: 1457 | # connection close, let's kill it and raise 1458 | self.mark_dead('connection closed in readline()') 1459 | if raise_exception: 1460 | raise _ConnectionDeadError() 1461 | else: 1462 | return '' 1463 | 1464 | buf += data 1465 | self.buffer = buf[index + 2:] 1466 | return buf[:index] 1467 | 1468 | def expect(self, text, raise_exception=False): 1469 | line = self.readline(raise_exception) 1470 | if self.debug and line != text: 1471 | text = text.decode('utf8') 1472 | log_line = line.decode('utf8', 'replace') 1473 | self.debuglog("while expecting %r, got unexpected response %r" 1474 | % (text, log_line)) 1475 | return line 1476 | 1477 | def recv(self, rlen): 1478 | self_socket_recv = self.socket.recv 1479 | buf = self.buffer 1480 | while len(buf) < rlen: 1481 | foo = self_socket_recv(max(rlen - len(buf), 4096)) 1482 | buf += foo 1483 | if not foo: 1484 | raise _Error('Read %d bytes, expecting %d, ' 1485 | 'read returned 0 length bytes' % (len(buf), rlen)) 1486 | self.buffer = buf[rlen:] 1487 | return buf[:rlen] 1488 | 1489 | def quit(self) -> None: 1490 | '''Send a "quit" command to remote server and wait for connection to close.''' 1491 | if self.socket: 1492 | self.send_cmd('quit') 1493 | 1494 | # We can't close the local socket until the remote end processes the quit 1495 | # command and sends us a FIN packet. When that happens, socket.recv() 1496 | # will stop blocking and return an empty string. If we try to close the 1497 | # socket before then, the OS will think we're initiating the connection 1498 | # close and will put the socket into TIME_WAIT. 1499 | self.socket.recv(1) 1500 | 1501 | # At this point, socket should be in CLOSE_WAIT. Closing the socket should 1502 | # release the port back to the OS. 1503 | self.close_socket() 1504 | 1505 | def flush(self): 1506 | self.send_cmd('flush_all') 1507 | self.expect(b'OK') 1508 | 1509 | def __str__(self): 1510 | d = '' 1511 | if self.deaduntil: 1512 | d = " (dead until %d)" % self.deaduntil 1513 | 1514 | if self.family == socket.AF_INET: 1515 | return "inet:%s:%d%s" % (self.address[0], self.address[1], d) 1516 | elif self.family == socket.AF_INET6: 1517 | return "inet6:[%s]:%d%s" % (self.address[0], self.address[1], d) 1518 | else: 1519 | return "unix:{}{}".format(self.address, d) 1520 | 1521 | 1522 | def _doctest(): 1523 | import doctest 1524 | import memcache 1525 | servers = ["127.0.0.1:11211"] 1526 | mc = memcache.Client(servers, debug=1) 1527 | globs = {"mc": mc} 1528 | results = doctest.testmod(memcache, globs=globs) 1529 | mc.disconnect_all() 1530 | print("Doctests: {}".format(results)) 1531 | if results.failed: 1532 | sys.exit(1) 1533 | 1534 | 1535 | # vim: ts=4 sw=4 et : 1536 | --------------------------------------------------------------------------------