├── .craft.yml ├── .github └── workflows │ ├── build.yml │ ├── release.yml │ └── test.yml ├── .gitignore ├── .python-version ├── CHANGES ├── LICENSE ├── Makefile ├── README.md ├── artwork └── logo.svg ├── docs ├── Makefile ├── _static │ └── rb.png ├── _themes │ └── rb_theme │ │ ├── layout.html │ │ ├── static │ │ └── rb.css_t │ │ └── theme.conf ├── conf.py ├── index.rst └── make.bat ├── hooks └── pre-commit ├── rb ├── __init__.py ├── _rediscommands.py ├── clients.py ├── cluster.py ├── ketama.py ├── poll.py ├── promise.py ├── router.py ├── testing.py └── utils.py ├── scripts └── bump-version.sh ├── setup.cfg ├── setup.py └── tests ├── conftest.py ├── test_cluster.py ├── test_ketama.py ├── test_poll.py ├── test_promise.py ├── test_router.py └── test_utils.py /.craft.yml: -------------------------------------------------------------------------------- 1 | minVersion: "0.18.0" 2 | github: 3 | owner: getsentry 4 | repo: rb 5 | changelog: CHANGES 6 | changelogPolicy: auto 7 | statusProvider: 8 | name: github 9 | artifactProvider: 10 | name: github 11 | targets: 12 | - name: pypi 13 | - name: github 14 | - name: sentry-pypi 15 | internalPypiRepo: getsentry/pypi 16 | 17 | requireNames: 18 | - /^rb-.+-py2.py3-none-any.whl$/ 19 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - release/** 8 | 9 | jobs: 10 | dist: 11 | name: Wheels 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | - uses: actions/setup-python@v5 17 | - run: | 18 | pip install wheel 19 | python setup.py bdist_wheel 20 | - uses: actions/upload-artifact@v4 21 | with: 22 | name: ${{ github.sha }} 23 | path: dist/* 24 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | version: 7 | description: Version to release 8 | required: true 9 | force: 10 | description: Force a release even when there are release-blockers (optional) 11 | required: false 12 | 13 | jobs: 14 | release: 15 | runs-on: ubuntu-latest 16 | name: "Release a new version" 17 | steps: 18 | - name: Get auth token 19 | id: token 20 | uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0 21 | with: 22 | app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }} 23 | private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }} 24 | 25 | - uses: actions/checkout@v4 26 | with: 27 | token: ${{ steps.token.outputs.token }} 28 | fetch-depth: 0 29 | 30 | - name: Prepare release 31 | uses: getsentry/action-prepare-release@v1 32 | env: 33 | GITHUB_TOKEN: ${{ steps.token.outputs.token }} 34 | with: 35 | version: ${{ github.event.inputs.version }} 36 | force: ${{ github.event.inputs.force }} 37 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - release/** 8 | pull_request: 9 | 10 | jobs: 11 | test: 12 | name: Run tests 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | matrix: 16 | os: [ubuntu-latest, macos-latest] 17 | python: ["3.8", "3.9", "3.10", "pypy-3.8"] 18 | REDIS_VERSION: ["<3", "<4", "<5"] 19 | env: 20 | REDIS_VERSION: ${{ matrix.REDIS_VERSION }} 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Setup Python 24 | uses: actions/setup-python@v5 25 | with: 26 | python-version: ${{ matrix.python }} 27 | 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip 31 | pip install pytest 32 | pip install --editable . 33 | - name: Install Redis 34 | run: | 35 | if [ "$RUNNER_OS" == "Linux" ]; then 36 | sudo apt update && sudo apt install redis-server --no-install-recommends -y 37 | elif [ "$RUNNER_OS" == "macOS" ]; then 38 | brew install --quiet redis 39 | else 40 | echo "$RUNNER_OS not supported" 41 | exit 1 42 | fi 43 | - name: Run tests 44 | run: | 45 | make test 46 | collector: 47 | needs: [test] 48 | if: always() 49 | runs-on: ubuntu-latest 50 | steps: 51 | - name: Check for failures 52 | if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') 53 | run: | 54 | echo "One of the dependent jobs have failed. You may need to re-run it." && exit 1 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | docs/_build 2 | *.pyc 3 | *.pyo 4 | .DS_Store 5 | .cache/ 6 | build 7 | dist 8 | *.egg-info 9 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.8 2 | -------------------------------------------------------------------------------- /CHANGES: -------------------------------------------------------------------------------- 1 | Rb Changelog 2 | ============ 3 | 4 | 1.10.0 5 | ------ 6 | 7 | ### Various fixes & improvements 8 | 9 | - add internal pypi deploy to `rb` (#54) by @asottile-sentry 10 | - set fetch-depth: 0 for release (#53) by @asottile-sentry 11 | - add compat for redis 5.x (#52) by @asottile-sentry 12 | - fix CI (#51) by @asottile-sentry 13 | 14 | 1.9.0 15 | ----- 16 | 17 | - Redis compatibility for 3.4.1 18 | 19 | 1.8 20 | ----------- 21 | 22 | - Python 3.6 compatibility 23 | - Redis compatibility for versions >=2.6,<3.4 24 | 25 | 1.7 26 | ----------- 27 | 28 | (released Jun 23rd 2017) 29 | 30 | - Ensure a connection is released to the pool after receiving a response, even 31 | if the result is an error. 32 | 33 | 1.6 34 | ----------- 35 | 36 | (released Nov 23rd 2016) 37 | 38 | - Support `options` keyword arguments passed to `execute_command`. 39 | 40 | 1.5 41 | ----------- 42 | 43 | (released Nov 23rd 2016) 44 | 45 | - Detect dead connections on pool checkout. 46 | 47 | 1.4 48 | ----------- 49 | 50 | (released on Feb 8th 2015) 51 | 52 | - Fixed cluster for host defaults support. 53 | - Changed poller to handle close explicitly. This should prevent 54 | bad loops in when the socket closes while writing. 55 | - Added support for execute_commands. 56 | 57 | 1.3.1 58 | ------------- 59 | 60 | (released on Oct 13th 2015) 61 | 62 | - Fixed an illogical constructor for the local client. 63 | - Fixed a problem with clearing out pending batches. 64 | - Hosts are now validated to not have holes in the two shipped routers 65 | which both depend on a gapless setup. 66 | - Connection errors now try to print out the original IO error's infos. 67 | 68 | 1.3 69 | ----------- 70 | 71 | (released on Oct 7th 2015) 72 | 73 | - Quickly fixed `target_key`'s behavior to make sense so that the 74 | result on the promise is the value instead of a dictionary of a 75 | single host. 76 | 77 | 1.2 78 | ----------- 79 | 80 | (released on Oct 7th 2015) 81 | 82 | - Added `target_key` to the fanout client to simplify targeting of hosts. 83 | 84 | 1.1.2 85 | ------------- 86 | 87 | (released on Sep 28th 2015) 88 | 89 | - Fixed command buffers for disabled max concurrency. 90 | - Fixed map manager timeouts. 91 | 92 | 1.1.1 93 | ------------- 94 | 95 | (released on Sep 15th 2015) 96 | 97 | - Made rb work with older versions of pyredis. 98 | 99 | 1.1 100 | ----------- 101 | 102 | (released on Sep 9th 2015) 103 | 104 | - Added internal support for async writes which improves performance 105 | and parallelism with large command batches where the command is 106 | larger than the kernel buffer size. 107 | 108 | 1.0 109 | ----------- 110 | 111 | (released on Sep 4th 2015) 112 | 113 | - Added support for automatic batching of GET and SET to MGET and MSET. 114 | - Added emulated `mget` and `mset` commands to promise based clients. 115 | - Fixed a bug with the HostInfo not comparing correctly. 116 | - Added support for epoll as an alternative to poll. 117 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2011-2012 DISQUS 191 | Copyright 2015 Functional Software Inc. 192 | 193 | Licensed under the Apache License, Version 2.0 (the "License"); 194 | you may not use this file except in compliance with the License. 195 | You may obtain a copy of the License at 196 | 197 | http://www.apache.org/licenses/LICENSE-2.0 198 | 199 | Unless required by applicable law or agreed to in writing, software 200 | distributed under the License is distributed on an "AS IS" BASIS, 201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 202 | See the License for the specific language governing permissions and 203 | limitations under the License. 204 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | setup-git: 2 | @echo "--> Installing git hooks" 3 | @pip install flake8 4 | @cd .git/hooks && ln -sf ../../hooks/* ./ 5 | 6 | test: 7 | @py.test -vv --tb=short 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rb [![test](https://github.com/getsentry/rb/actions/workflows/test.yml/badge.svg)](https://github.com/getsentry/rb/actions/workflows/test.yml) 2 | 3 | ![logo](https://github.com/getsentry/rb/blob/master/docs/_static/rb.png?raw=true) 4 | 5 | rb - the redis blaster. 6 | 7 | The fastest way to talk to many redis nodes. Can do routing as well as 8 | blindly blasting commands to many nodes. How does it work? 9 | 10 | For full documentation see [rb.rtfd.org](http://rb.rtfd.org/) 11 | 12 | ## Quickstart 13 | 14 | Set up a cluster: 15 | 16 | ```python 17 | from rb import Cluster 18 | 19 | cluster = Cluster({ 20 | 0: {'port': 6379}, 21 | 1: {'port': 6380}, 22 | 2: {'port': 6381}, 23 | 3: {'port': 6382}, 24 | }, host_defaults={ 25 | 'host': '127.0.0.1', 26 | }) 27 | ``` 28 | 29 | Automatic routing: 30 | 31 | ```python 32 | results = [] 33 | with cluster.map() as client: 34 | for key in range(100): 35 | client.get(key).then(lambda x: results.append(int(x or 0))) 36 | 37 | print('Sum: %s' % sum(results)) 38 | ``` 39 | 40 | Fanout: 41 | 42 | ```python 43 | with cluster.fanout(hosts=[0, 1, 2, 3]) as client: 44 | infos = client.info() 45 | ``` 46 | 47 | Fanout to all: 48 | 49 | ```python 50 | with cluster.fanout(hosts='all') as client: 51 | client.flushdb() 52 | ``` 53 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 41 | @echo 42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 43 | 44 | dirhtml: 45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 48 | 49 | singlehtml: 50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 51 | @echo 52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 53 | 54 | pickle: 55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 56 | @echo 57 | @echo "Build finished; now you can process the pickle files." 58 | 59 | json: 60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 61 | @echo 62 | @echo "Build finished; now you can process the JSON files." 63 | 64 | htmlhelp: 65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 66 | @echo 67 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 68 | ".hhp project file in $(BUILDDIR)/htmlhelp." 69 | 70 | qthelp: 71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 72 | @echo 73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Classy.qhcp" 76 | @echo "To view the help file:" 77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Classy.qhc" 78 | 79 | devhelp: 80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 81 | @echo 82 | @echo "Build finished." 83 | @echo "To view the help file:" 84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Classy" 85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Classy" 86 | @echo "# devhelp" 87 | 88 | epub: 89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 90 | @echo 91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 92 | 93 | latex: 94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 95 | @echo 96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 97 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 98 | "run these through (pdf)latex." 99 | 100 | latexpdf: latex 101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 102 | @echo "Running LaTeX files through pdflatex..." 103 | make -C $(BUILDDIR)/latex all-pdf 104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 105 | 106 | text: 107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 108 | @echo 109 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 110 | 111 | man: 112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 113 | @echo 114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 115 | 116 | changes: 117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 118 | @echo 119 | @echo "The overview file is in $(BUILDDIR)/changes." 120 | 121 | linkcheck: 122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 123 | @echo 124 | @echo "Link check complete; look for any errors in the above output " \ 125 | "or in $(BUILDDIR)/linkcheck/output.txt." 126 | 127 | doctest: 128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 129 | @echo "Testing of doctests in the sources finished, look at the " \ 130 | "results in $(BUILDDIR)/doctest/output.txt." 131 | -------------------------------------------------------------------------------- /docs/_static/rb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/getsentry/rb/ec9cc3358351f858b5d0ee83896336bcad434058/docs/_static/rb.png -------------------------------------------------------------------------------- /docs/_themes/rb_theme/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "basic/layout.html" %} 2 | {% block header %} 3 | {{ super() }} 4 | {% if pagename == 'index' %} 5 |
6 | {% endif %} 7 | {% endblock %} 8 | {% block footer %} 9 | {% if pagename == 'index' %} 10 |
11 | {% endif %} 12 | {% endblock %} 13 | {# do not display relbars #} 14 | {% block relbar1 %}{% endblock %} 15 | {% block relbar2 %} 16 | {% if theme_github_fork %} 17 | Fork me on GitHub 19 | {% endif %} 20 | {% endblock %} 21 | {% block sidebar1 %}{% endblock %} 22 | {% block sidebar2 %}{% endblock %} 23 | -------------------------------------------------------------------------------- /docs/_themes/rb_theme/static/rb.css_t: -------------------------------------------------------------------------------- 1 | @import url("basic.css"); 2 | @import url(http://fonts.googleapis.com/css?family=Roboto+Mono:400,700italic,700,400italic); 3 | 4 | /* -- page layout ----------------------------------------------------------- */ 5 | 6 | body { 7 | font-family: 'Verdana', sans-serif; 8 | font-weight: 300; 9 | font-size: 17px; 10 | color: #000; 11 | background: white; 12 | margin: 0; 13 | padding: 0; 14 | } 15 | 16 | div.documentwrapper { 17 | float: left; 18 | width: 100%; 19 | } 20 | 21 | div.bodywrapper { 22 | margin: 40px auto 0 auto; 23 | max-width: 800px; 24 | } 25 | 26 | hr { 27 | border: 1px solid #B1B4B6; 28 | } 29 | 30 | div.body { 31 | background-color: #ffffff; 32 | color: #3E4349; 33 | padding: 0 30px 30px 30px; 34 | } 35 | 36 | img.floatingflask { 37 | padding: 0 0 10px 10px; 38 | float: right; 39 | } 40 | 41 | div.footer { 42 | text-align: right; 43 | color: #888; 44 | padding: 10px; 45 | font-size: 14px; 46 | width: 650px; 47 | margin: 0 auto 40px auto; 48 | } 49 | 50 | div.footer a { 51 | color: #888; 52 | text-decoration: underline; 53 | } 54 | 55 | div.related { 56 | line-height: 32px; 57 | color: #888; 58 | } 59 | 60 | div.related ul { 61 | padding: 0 0 0 10px; 62 | } 63 | 64 | div.related a { 65 | color: #444; 66 | } 67 | 68 | /* -- body styles ----------------------------------------------------------- */ 69 | 70 | a { 71 | color: white; 72 | background: black; 73 | font-weight: bold; 74 | text-decoration: none; 75 | } 76 | 77 | a:hover { 78 | color: #888; 79 | background: transparent; 80 | text-decoration: underline; 81 | } 82 | 83 | div.body { 84 | padding-bocodeom: 40px; /* saved for footer */ 85 | } 86 | 87 | div.body h1, 88 | div.body h2, 89 | div.body h3, 90 | div.body h4, 91 | div.body h5, 92 | div.body h6 { 93 | font-family: 'Verdana', sans-serif; 94 | font-weight: bold; 95 | margin: 30px 0px 10px 0px; 96 | padding: 0; 97 | color: black; 98 | } 99 | 100 | div.body h1:before { 101 | content: ""; 102 | display: block; 103 | background: url(rb.png) no-repeat center center; 104 | background-size: 100%; 105 | width: 256px; 106 | height: 246px; 107 | float: right; 108 | margin: 0 0 25px 25px; 109 | } 110 | 111 | div.body h2 { font-size: 180%; } 112 | div.body h3 { font-size: 150%; } 113 | div.body h4 { font-size: 130%; } 114 | div.body h5 { font-size: 100%; } 115 | div.body h6 { font-size: 100%; } 116 | 117 | a.headerlink { 118 | color: white; 119 | padding: 0 4px; 120 | text-decoration: none; 121 | } 122 | 123 | a.headerlink:hover { 124 | color: #444; 125 | background: #eaeaea; 126 | } 127 | 128 | div.body p, div.body dd, div.body li { 129 | line-height: 1.4em; 130 | } 131 | 132 | div.admonition { 133 | background: #fafafa; 134 | margin: 20px -30px; 135 | padding: 10px 30px; 136 | border-top: 1px solid #ccc; 137 | border-bocodeom: 1px solid #ccc; 138 | } 139 | 140 | div.admonition p.admonition-title { 141 | font-family: 'Garamond', 'Georgia', serif; 142 | font-weight: normal; 143 | font-size: 24px; 144 | margin: 0 0 10px 0; 145 | padding: 0; 146 | line-height: 1; 147 | } 148 | 149 | div.admonition p.last { 150 | margin-bocodeom: 0; 151 | } 152 | 153 | div.highlight{ 154 | background-color: white; 155 | } 156 | 157 | dt:target, .highlight { 158 | background: #FAF3E8; 159 | } 160 | 161 | div.note { 162 | background-color: #eee; 163 | border: 1px solid #ccc; 164 | } 165 | 166 | div.seealso { 167 | background-color: #ffc; 168 | border: 1px solid #ff6; 169 | } 170 | 171 | div.topic { 172 | background-color: #eee; 173 | } 174 | 175 | div.warning { 176 | background-color: #ffe4e4; 177 | border: 1px solid #f66; 178 | } 179 | 180 | p.admonition-title { 181 | display: inline; 182 | } 183 | 184 | p.admonition-title:after { 185 | content: ":"; 186 | } 187 | 188 | pre, code { 189 | font-family: 'Roboto Mono', monospace; 190 | font-size: 1em; 191 | } 192 | 193 | img.screenshot { 194 | } 195 | 196 | code.descname, code.descclassname { 197 | font-size: 0.95em; 198 | } 199 | 200 | code.descname { 201 | padding-right: 0.08em; 202 | } 203 | 204 | img.screenshot { 205 | -moz-box-shadow: 2px 2px 4px #eee; 206 | -webkit-box-shadow: 2px 2px 4px #eee; 207 | box-shadow: 2px 2px 4px #eee; 208 | } 209 | 210 | table.docutils { 211 | border: 1px solid #888; 212 | -moz-box-shadow: 2px 2px 4px #eee; 213 | -webkit-box-shadow: 2px 2px 4px #eee; 214 | box-shadow: 2px 2px 4px #eee; 215 | } 216 | 217 | table.docutils td, table.docutils th { 218 | border: 1px solid #888; 219 | padding: 0.25em 0.7em; 220 | } 221 | 222 | table.field-list, table.footnote { 223 | border: none; 224 | -moz-box-shadow: none; 225 | -webkit-box-shadow: none; 226 | box-shadow: none; 227 | } 228 | 229 | table.footnote { 230 | margin: 15px 0; 231 | width: 100%; 232 | border: 1px solid #eee; 233 | } 234 | 235 | table.field-list th { 236 | padding: 0 0.8em 0 0; 237 | } 238 | 239 | table.field-list td { 240 | padding: 0; 241 | } 242 | 243 | table.footnote td { 244 | padding: 0.5em; 245 | } 246 | 247 | dl { 248 | margin: 0; 249 | padding: 0; 250 | } 251 | 252 | dl dd { 253 | margin-left: 30px; 254 | } 255 | 256 | pre { 257 | margin: 15px 0; 258 | line-height: 1.4em; 259 | padding: 10px 20px; 260 | background: #eee; 261 | } 262 | 263 | a.reference.internal { 264 | background: transparent; 265 | color: black; 266 | } 267 | 268 | code, a code, code.xref { 269 | background-color: #eee; 270 | color: #222; 271 | /* padding: 1px 2px; */ 272 | } 273 | 274 | a:hover code { 275 | background: black; 276 | color: white; 277 | } 278 | -------------------------------------------------------------------------------- /docs/_themes/rb_theme/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = rb.css 4 | nosidebar = true 5 | 6 | [options] 7 | index_logo = '' 8 | github_fork = 9 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # rb documentation build configuration file, created by 4 | # sphinx-quickstart on Mon Apr 26 19:53:01 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | sys.path.append(os.path.abspath('_themes')) 20 | sys.path.append(os.path.abspath('..')) 21 | 22 | # -- General configuration ----------------------------------------------------- 23 | 24 | # If your documentation needs a minimal Sphinx version, state it here. 25 | #needs_sphinx = '1.0' 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be extensions 28 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 | extensions = ['sphinx.ext.autodoc'] 30 | 31 | # Add any paths that contain templates here, relative to this directory. 32 | templates_path = ['_templates'] 33 | 34 | # The suffix of source filenames. 35 | source_suffix = '.rst' 36 | 37 | # The encoding of source files. 38 | #source_encoding = 'utf-8-sig' 39 | 40 | # The master toctree document. 41 | master_doc = 'index' 42 | 43 | # General information about the project. 44 | project = u'rb' 45 | copyright = u'2015, Function Software Inc.' 46 | 47 | # The version info for the project you're documenting, acts as replacement for 48 | # |version| and |release|, also used in various other places throughout the 49 | # built documents. 50 | # 51 | # The short X.Y version. 52 | version = '1.0' 53 | # The full version, including alpha/beta/rc tags. 54 | release = '1.0' 55 | 56 | # The language for content autogenerated by Sphinx. Refer to documentation 57 | # for a list of supported languages. 58 | #language = None 59 | 60 | # There are two options for replacing |today|: either, you set today to some 61 | # non-false value, then it is used: 62 | #today = '' 63 | # Else, today_fmt is used as the format for a strftime call. 64 | #today_fmt = '%B %d, %Y' 65 | 66 | # List of patterns, relative to source directory, that match files and 67 | # directories to ignore when looking for source files. 68 | exclude_patterns = ['_build'] 69 | 70 | # The reST default role (used for this markup: `text`) to use for all documents. 71 | #default_role = None 72 | 73 | # If true, '()' will be appended to :func: etc. cross-reference text. 74 | #add_function_parentheses = True 75 | 76 | # If true, the current module name will be prepended to all description 77 | # unit titles (such as .. function::). 78 | #add_module_names = True 79 | 80 | # If true, sectionauthor and moduleauthor directives will be shown in the 81 | # output. They are ignored by default. 82 | #show_authors = False 83 | 84 | # A list of ignored prefixes for module index sorting. 85 | #modindex_common_prefix = [] 86 | 87 | 88 | # -- Options for HTML output --------------------------------------------------- 89 | 90 | # The theme to use for HTML and HTML Help pages. Major themes that come with 91 | # Sphinx are currently 'default' and 'sphinxdoc'. 92 | html_theme = 'rb_theme' 93 | 94 | # Theme options are theme-specific and customize the look and feel of a theme 95 | # further. For a list of options available for each theme, see the 96 | # documentation. 97 | html_theme_options = { 98 | 'index_logo': 'rb.png', 99 | 'github_fork': 'getsentry/rb' 100 | } 101 | 102 | # Add any paths that contain custom themes here, relative to this directory. 103 | html_theme_path = ['_themes'] 104 | 105 | # The name for this set of Sphinx documents. If None, it defaults to 106 | # " v documentation". 107 | html_title = 'rb' 108 | 109 | # A shorter title for the navigation bar. Default is the same as html_title. 110 | #html_short_title = None 111 | 112 | # The name of an image file (relative to this directory) to place at the top 113 | # of the sidebar. 114 | #html_logo = None 115 | 116 | # The name of an image file (within the static path) to use as favicon of the 117 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 118 | # pixels large. 119 | #html_favicon = None 120 | 121 | # Add any paths that contain custom static files (such as style sheets) here, 122 | # relative to this directory. They are copied after the builtin static files, 123 | # so a file named "default.css" will overwrite the builtin "default.css". 124 | html_static_path = ['_static'] 125 | 126 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 127 | # using the given strftime format. 128 | #html_last_updated_fmt = '%b %d, %Y' 129 | 130 | # If true, SmartyPants will be used to convert quotes and dashes to 131 | # typographically correct entities. 132 | #html_use_smartypants = True 133 | 134 | # Custom sidebar templates, maps document names to template names. 135 | #html_sidebars = {} 136 | 137 | # Additional templates that should be rendered to pages, maps page names to 138 | # template names. 139 | #html_additional_pages = {} 140 | 141 | # If false, no module index is generated. 142 | #html_domain_indices = True 143 | 144 | # If false, no index is generated. 145 | #html_use_index = True 146 | 147 | # If true, the index is split into individual pages for each letter. 148 | #html_split_index = False 149 | 150 | # If true, links to the reST sources are added to the pages. 151 | #html_show_sourcelink = True 152 | 153 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 154 | #html_show_sphinx = True 155 | 156 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 157 | #html_show_copyright = True 158 | 159 | # If true, an OpenSearch description file will be output, and all pages will 160 | # contain a tag referring to it. The value of this option must be the 161 | # base URL from which the finished HTML is served. 162 | #html_use_opensearch = '' 163 | 164 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 165 | #html_file_suffix = '' 166 | 167 | # Output file base name for HTML help builder. 168 | htmlhelp_basename = 'rbdoc' 169 | 170 | 171 | # -- Options for LaTeX output -------------------------------------------------- 172 | 173 | # The paper size ('letter' or 'a4'). 174 | #latex_paper_size = 'letter' 175 | 176 | # The font size ('10pt', '11pt' or '12pt'). 177 | #latex_font_size = '10pt' 178 | 179 | # Grouping the document tree into LaTeX files. List of tuples 180 | # (source start file, target name, title, author, documentclass [howto/manual]). 181 | latex_documents = [ 182 | ('index', 'rb.tex', u'rb documentation', 183 | u'Function Software Inc.', 'manual'), 184 | ] 185 | 186 | # The name of an image file (relative to this directory) to place at the top of 187 | # the title page. 188 | #latex_logo = None 189 | 190 | # For "manual" documents, if this is true, then toplevel headings are parts, 191 | # not chapters. 192 | #latex_use_parts = False 193 | 194 | # Additional stuff for the LaTeX preamble. 195 | #latex_preamble = '' 196 | 197 | # Documents to append as an appendix to all manuals. 198 | #latex_appendices = [] 199 | 200 | # If false, no module index is generated. 201 | #latex_domain_indices = True 202 | 203 | pygments_style = 'tango' 204 | 205 | 206 | # -- Options for manual page output -------------------------------------------- 207 | 208 | # One entry per manual page. List of tuples 209 | # (source start file, name, description, authors, manual section). 210 | man_pages = [ 211 | ('index', 'rb', u'rb documentation', 212 | [u'Function Software Inc.'], 1) 213 | ] 214 | 215 | intersphinx_mapping = { 216 | } 217 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | rb: the redis blaster 2 | ===================== 3 | 4 | .. module:: rb 5 | 6 | Rb, the redis blaster, is a library that implements non-replicated 7 | sharding for redis. It implements a custom routing system on top of 8 | python redis that allows you to automatically target different servers 9 | without having to manually route requests to the individual nodes. 10 | 11 | It does not implement all functionality of redis and does not attempt to 12 | do so. You can at any point get a client to a specific host, but for the 13 | most part the assumption is that your operations are limited to basic 14 | key/value operations that can be routed to different nodes automatically. 15 | 16 | What you can do: 17 | 18 | * automatically target hosts for single-key operations 19 | * execute commands against all or a subset of nodes 20 | * do all of that in parallel 21 | 22 | Installation 23 | ------------ 24 | 25 | rb is available on PyPI and can be installed from there:: 26 | 27 | $ pip install rb 28 | 29 | Configuration 30 | ------------- 31 | 32 | Getting started with rb is super easy. If you have been using py-redis 33 | before you will feel right at home. The main difference is that instead 34 | of connecting to a single host, you configure a cluster to connect to 35 | multiple:: 36 | 37 | from rb import Cluster 38 | 39 | cluster = Cluster(hosts={ 40 | 0: {'port': 6379}, 41 | 1: {'port': 6380}, 42 | 2: {'port': 6381}, 43 | 3: {'port': 6382}, 44 | 4: {'port': 6379}, 45 | 5: {'port': 6380}, 46 | 6: {'port': 6381}, 47 | 7: {'port': 6382}, 48 | }, host_defaults={ 49 | 'host': '127.0.0.1', 50 | }) 51 | 52 | In this case we set up 8 nodes on four different server processes on the 53 | same host. The `hosts` parameter is a mapping of hosts to connect to. 54 | The key of the dictionary is the host ID (an integer) and the value is 55 | a dictionary of parameters. The `host_defaults` is a dictionary of 56 | optional defaults that is filled in for all hosts. This is useful if you 57 | want to share some common defaults that repeat (in this case all hosts 58 | connect to localhost). 59 | 60 | In the default configuration the :class:`PartitionRouter` is used for 61 | routing. 62 | 63 | Routing 64 | ------- 65 | 66 | Now that the cluster is constructed we can use 67 | :meth:`Cluster.get_routing_client` to get a redis client that 68 | automatically routes to the right redis nodes for each command:: 69 | 70 | client = cluster.get_routing_client() 71 | results = {} 72 | for key in keys_to_look_up: 73 | results[key] = client.get(key) 74 | 75 | The client works pretty much exactly like a standard pyredis 76 | `StrictClient` with the main difference that it can only execute commands 77 | that involve exactly one key. 78 | 79 | This basic operation however runs in series. What makes rb useful is that 80 | it can automatically build redis pipelines and send out queries to many 81 | hosts in parallel. This however changes the usage slightly as now the 82 | value is not immediately available:: 83 | 84 | results = {} 85 | with cluster.map() as client: 86 | for key in keys_to_look_up: 87 | results[key] = client.get(key) 88 | 89 | While it looks similar so far, instead of storing the actual values in the 90 | result dictionary, :class:`Promise` objects are stored instead. When the 91 | map context manager ends they are guaranteed however to have been executed 92 | and you can access the :attr:`Promise.value` attribute to get the value:: 93 | 94 | for key, promise in results.iteritems(): 95 | print '%s: %s' % (key, promise.value) 96 | 97 | If you want to send a command to all participating hosts (for instance to 98 | delete the database) you can use the :meth:`Cluster.all` method:: 99 | 100 | with cluster.all() as client: 101 | client.flushdb() 102 | 103 | If you do that, the promise value is a dictionary with the host IDs as 104 | keys and the results as value. As an example:: 105 | 106 | with cluster.all() as client: 107 | results = client.info() 108 | for host_id, info in results.iteritems(): 109 | print 'host %s is running %s' % (host_id, info['os']) 110 | 111 | To explicitly target some hosts you can use :meth:`Cluster.fanout` which 112 | accepts a list of host IDs to send the command to. 113 | 114 | API 115 | --- 116 | 117 | This is the entire reference of the public API. Note that this library 118 | extends the Python redis library so some of these classes have more 119 | functionality for which you will need to consult the py-redis library. 120 | 121 | Cluster 122 | ``````` 123 | 124 | .. autoclass:: Cluster 125 | :members: 126 | 127 | Clients 128 | ``````` 129 | 130 | .. autoclass:: RoutingClient 131 | :members: 132 | 133 | .. autoclass:: MappingClient 134 | :members: 135 | 136 | .. autoclass:: FanoutClient 137 | :members: 138 | 139 | Promise 140 | ``````` 141 | 142 | .. autoclass:: Promise 143 | :members: 144 | 145 | Routers 146 | ``````` 147 | 148 | .. autoclass:: BaseRouter 149 | :members: 150 | 151 | .. autoclass:: ConsistentHashingRouter 152 | :members: 153 | 154 | .. autoclass:: PartitionRouter 155 | :members: 156 | 157 | .. autoexception:: UnroutableCommand 158 | 159 | Testing 160 | ``````` 161 | 162 | .. autoclass:: rb.testing.TestSetup 163 | 164 | .. autofunction:: rb.testing.make_test_cluster 165 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | if NOT "%PAPER%" == "" ( 11 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 12 | ) 13 | 14 | if "%1" == "" goto help 15 | 16 | if "%1" == "help" ( 17 | :help 18 | echo.Please use `make ^` where ^ is one of 19 | echo. html to make standalone HTML files 20 | echo. dirhtml to make HTML files named index.html in directories 21 | echo. singlehtml to make a single large HTML file 22 | echo. pickle to make pickle files 23 | echo. json to make JSON files 24 | echo. htmlhelp to make HTML files and a HTML help project 25 | echo. qthelp to make HTML files and a qthelp project 26 | echo. devhelp to make HTML files and a Devhelp project 27 | echo. epub to make an epub 28 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 29 | echo. text to make text files 30 | echo. man to make manual pages 31 | echo. changes to make an overview over all changed/added/deprecated items 32 | echo. linkcheck to check all external links for integrity 33 | echo. doctest to run all doctests embedded in the documentation if enabled 34 | goto end 35 | ) 36 | 37 | if "%1" == "clean" ( 38 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 39 | del /q /s %BUILDDIR%\* 40 | goto end 41 | ) 42 | 43 | if "%1" == "html" ( 44 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 45 | echo. 46 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 47 | goto end 48 | ) 49 | 50 | if "%1" == "dirhtml" ( 51 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 52 | echo. 53 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 54 | goto end 55 | ) 56 | 57 | if "%1" == "singlehtml" ( 58 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 59 | echo. 60 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 61 | goto end 62 | ) 63 | 64 | if "%1" == "pickle" ( 65 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 66 | echo. 67 | echo.Build finished; now you can process the pickle files. 68 | goto end 69 | ) 70 | 71 | if "%1" == "json" ( 72 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 73 | echo. 74 | echo.Build finished; now you can process the JSON files. 75 | goto end 76 | ) 77 | 78 | if "%1" == "htmlhelp" ( 79 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 80 | echo. 81 | echo.Build finished; now you can run HTML Help Workshop with the ^ 82 | .hhp project file in %BUILDDIR%/htmlhelp. 83 | goto end 84 | ) 85 | 86 | if "%1" == "qthelp" ( 87 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 88 | echo. 89 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 90 | .qhcp project file in %BUILDDIR%/qthelp, like this: 91 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Classy.qhcp 92 | echo.To view the help file: 93 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Classy.ghc 94 | goto end 95 | ) 96 | 97 | if "%1" == "devhelp" ( 98 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 99 | echo. 100 | echo.Build finished. 101 | goto end 102 | ) 103 | 104 | if "%1" == "epub" ( 105 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 106 | echo. 107 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 108 | goto end 109 | ) 110 | 111 | if "%1" == "latex" ( 112 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 113 | echo. 114 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 115 | goto end 116 | ) 117 | 118 | if "%1" == "text" ( 119 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 120 | echo. 121 | echo.Build finished. The text files are in %BUILDDIR%/text. 122 | goto end 123 | ) 124 | 125 | if "%1" == "man" ( 126 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 127 | echo. 128 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 129 | goto end 130 | ) 131 | 132 | if "%1" == "changes" ( 133 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 134 | echo. 135 | echo.The overview file is in %BUILDDIR%/changes. 136 | goto end 137 | ) 138 | 139 | if "%1" == "linkcheck" ( 140 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 141 | echo. 142 | echo.Link check complete; look for any errors in the above output ^ 143 | or in %BUILDDIR%/linkcheck/output.txt. 144 | goto end 145 | ) 146 | 147 | if "%1" == "doctest" ( 148 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 149 | echo. 150 | echo.Testing of doctests in the sources finished, look at the ^ 151 | results in %BUILDDIR%/doctest/output.txt. 152 | goto end 153 | ) 154 | 155 | :end 156 | -------------------------------------------------------------------------------- /hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import glob 4 | import os 5 | import sys 6 | 7 | os.environ['PYFLAKES_NODOCTEST'] = '1' 8 | 9 | # pep8.py uses sys.argv to find setup.cfg 10 | sys.argv = [os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)] 11 | 12 | # git usurbs your bin path for hooks and will always run system python 13 | if 'VIRTUAL_ENV' in os.environ: 14 | site_packages = glob.glob( 15 | '%s/lib/*/site-packages' % os.environ['VIRTUAL_ENV'])[0] 16 | sys.path.insert(0, site_packages) 17 | 18 | 19 | def py_lint(files_modified): 20 | from flake8.main import DEFAULT_CONFIG 21 | from flake8.engine import get_style_guide 22 | 23 | # remove non-py files and files which no longer exist 24 | files_modified = filter(lambda x: x.endswith('.py'), files_modified) 25 | 26 | if not files_modified: 27 | return False 28 | 29 | flake8_style = get_style_guide(config_file=DEFAULT_CONFIG) 30 | report = flake8_style.check_files(files_modified) 31 | 32 | return report.total_errors != 0 33 | 34 | 35 | def main(): 36 | from flake8.hooks import run 37 | 38 | gitcmd = "git diff-index --cached --name-only HEAD" 39 | 40 | _, files_modified, _ = run(gitcmd) 41 | 42 | files_modified = filter(lambda x: os.path.exists(x), files_modified) 43 | 44 | if py_lint(files_modified): 45 | return 1 46 | return 0 47 | 48 | if __name__ == '__main__': 49 | sys.exit(main()) 50 | -------------------------------------------------------------------------------- /rb/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | rb 3 | ~~ 4 | 5 | The redis blaster. 6 | 7 | :copyright: (c) 2015 Functional Software Inc. 8 | :license: Apache License 2.0, see LICENSE for more details. 9 | """ 10 | from rb.cluster import Cluster 11 | from rb.clients import RoutingClient, MappingClient, FanoutClient 12 | from rb.router import ( 13 | BaseRouter, 14 | ConsistentHashingRouter, 15 | PartitionRouter, 16 | UnroutableCommand, 17 | ) 18 | from rb.promise import Promise 19 | 20 | 21 | __version__ = "1.10.0" 22 | 23 | __all__ = [ 24 | # cluster 25 | "Cluster", 26 | # client 27 | "RoutingClient", 28 | "MappingClient", 29 | "FanoutClient", 30 | # router 31 | "BaseRouter", 32 | "ConsistentHashingRouter", 33 | "PartitionRouter", 34 | "UnroutableCommand", 35 | # promise 36 | "Promise", 37 | ] 38 | -------------------------------------------------------------------------------- /rb/_rediscommands.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | COMMANDS = { 4 | "APPEND": {"arity": 3, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 5 | "AUTH": { 6 | "arity": 2, 7 | "flags": ["readonly", "noscript", "loading", "stale", "fast"], 8 | "key_spec": (0, 0, 0), 9 | }, 10 | "BGREWRITEAOF": {"arity": 1, "flags": ["readonly", "admin"], "key_spec": (0, 0, 0)}, 11 | "BGSAVE": {"arity": 1, "flags": ["readonly", "admin"], "key_spec": (0, 0, 0)}, 12 | "BITCOUNT": {"arity": -2, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 13 | "BITOP": {"arity": -4, "flags": ["write", "denyoom"], "key_spec": (2, -1, 1)}, 14 | "BITPOS": {"arity": -3, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 15 | "BLPOP": {"arity": -3, "flags": ["write", "noscript"], "key_spec": (1, -2, 1)}, 16 | "BRPOP": {"arity": -3, "flags": ["write", "noscript"], "key_spec": (1, 1, 1)}, 17 | "BRPOPLPUSH": { 18 | "arity": 4, 19 | "flags": ["write", "denyoom", "noscript"], 20 | "key_spec": (1, 2, 1), 21 | }, 22 | "CLIENT": {"arity": -2, "flags": ["readonly", "admin"], "key_spec": (0, 0, 0)}, 23 | "COMMAND": { 24 | "arity": 0, 25 | "flags": ["readonly", "loading", "stale"], 26 | "key_spec": (0, 0, 0), 27 | }, 28 | "CONFIG": { 29 | "arity": -2, 30 | "flags": ["readonly", "admin", "stale"], 31 | "key_spec": (0, 0, 0), 32 | }, 33 | "DBSIZE": {"arity": 1, "flags": ["readonly", "fast"], "key_spec": (0, 0, 0)}, 34 | "DEBUG": {"arity": -2, "flags": ["admin", "noscript"], "key_spec": (0, 0, 0)}, 35 | "DECR": {"arity": 2, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)}, 36 | "DECRBY": { 37 | "arity": 3, 38 | "flags": ["write", "denyoom", "fast"], 39 | "key_spec": (1, 1, 1), 40 | }, 41 | "DEL": {"arity": -2, "flags": ["write"], "key_spec": (1, -1, 1)}, 42 | "DISCARD": { 43 | "arity": 1, 44 | "flags": ["readonly", "noscript", "fast"], 45 | "key_spec": (0, 0, 0), 46 | }, 47 | "DUMP": {"arity": 2, "flags": ["readonly", "admin"], "key_spec": (1, 1, 1)}, 48 | "ECHO": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (0, 0, 0)}, 49 | "EVAL": {"arity": -3, "flags": ["noscript", "movablekeys"], "key_spec": (0, 0, 0)}, 50 | "EVALSHA": { 51 | "arity": -3, 52 | "flags": ["noscript", "movablekeys"], 53 | "key_spec": (0, 0, 0), 54 | }, 55 | "EXEC": {"arity": 1, "flags": ["noscript", "skip_monitor"], "key_spec": (0, 0, 0)}, 56 | "EXISTS": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 57 | "EXPIRE": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 58 | "EXPIREAT": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 59 | "FLUSHALL": {"arity": 1, "flags": ["write"], "key_spec": (0, 0, 0)}, 60 | "FLUSHDB": {"arity": 1, "flags": ["write"], "key_spec": (0, 0, 0)}, 61 | "GET": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 62 | "GETBIT": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 63 | "GETRANGE": {"arity": 4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 64 | "GETSET": {"arity": 3, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 65 | "HDEL": {"arity": -3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 66 | "HEXISTS": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 67 | "HGET": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 68 | "HGETALL": {"arity": 2, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 69 | "HINCRBY": { 70 | "arity": 4, 71 | "flags": ["write", "denyoom", "fast"], 72 | "key_spec": (1, 1, 1), 73 | }, 74 | "HINCRBYFLOAT": { 75 | "arity": 4, 76 | "flags": ["write", "denyoom", "fast"], 77 | "key_spec": (1, 1, 1), 78 | }, 79 | "HKEYS": { 80 | "arity": 2, 81 | "flags": ["readonly", "sort_for_script"], 82 | "key_spec": (1, 1, 1), 83 | }, 84 | "HLEN": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 85 | "HMGET": {"arity": -3, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 86 | "HMSET": {"arity": -4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 87 | "HSCAN": {"arity": -3, "flags": ["readonly", "random"], "key_spec": (1, 1, 1)}, 88 | "HSET": {"arity": 4, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)}, 89 | "HSETNX": { 90 | "arity": 4, 91 | "flags": ["write", "denyoom", "fast"], 92 | "key_spec": (1, 1, 1), 93 | }, 94 | "HVALS": { 95 | "arity": 2, 96 | "flags": ["readonly", "sort_for_script"], 97 | "key_spec": (1, 1, 1), 98 | }, 99 | "INCR": {"arity": 2, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)}, 100 | "INCRBY": { 101 | "arity": 3, 102 | "flags": ["write", "denyoom", "fast"], 103 | "key_spec": (1, 1, 1), 104 | }, 105 | "INCRBYFLOAT": { 106 | "arity": 3, 107 | "flags": ["write", "denyoom", "fast"], 108 | "key_spec": (1, 1, 1), 109 | }, 110 | "INFO": { 111 | "arity": -1, 112 | "flags": ["readonly", "loading", "stale"], 113 | "key_spec": (0, 0, 0), 114 | }, 115 | "KEYS": { 116 | "arity": 2, 117 | "flags": ["readonly", "sort_for_script"], 118 | "key_spec": (0, 0, 0), 119 | }, 120 | "LASTSAVE": { 121 | "arity": 1, 122 | "flags": ["readonly", "random", "fast"], 123 | "key_spec": (0, 0, 0), 124 | }, 125 | "LATENCY": { 126 | "arity": -2, 127 | "flags": ["readonly", "admin", "noscript", "loading", "stale"], 128 | "key_spec": (0, 0, 0), 129 | }, 130 | "LINDEX": {"arity": 3, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 131 | "LINSERT": {"arity": 5, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 132 | "LLEN": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 133 | "LPOP": {"arity": 2, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 134 | "LPUSH": { 135 | "arity": -3, 136 | "flags": ["write", "denyoom", "fast"], 137 | "key_spec": (1, 1, 1), 138 | }, 139 | "LPUSHX": { 140 | "arity": 3, 141 | "flags": ["write", "denyoom", "fast"], 142 | "key_spec": (1, 1, 1), 143 | }, 144 | "LRANGE": {"arity": 4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 145 | "LREM": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)}, 146 | "LSET": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 147 | "LTRIM": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)}, 148 | "MGET": {"arity": -2, "flags": ["readonly"], "key_spec": (1, -1, 1)}, 149 | "MIGRATE": {"arity": 6, "flags": ["write", "admin"], "key_spec": (0, 0, 0)}, 150 | "MONITOR": { 151 | "arity": 1, 152 | "flags": ["readonly", "admin", "noscript"], 153 | "key_spec": (0, 0, 0), 154 | }, 155 | "MOVE": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 156 | "MSET": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 2)}, 157 | "MSETNX": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 2)}, 158 | "MULTI": { 159 | "arity": 1, 160 | "flags": ["readonly", "noscript", "fast"], 161 | "key_spec": (0, 0, 0), 162 | }, 163 | "OBJECT": {"arity": 3, "flags": ["readonly"], "key_spec": (2, 2, 2)}, 164 | "PERSIST": {"arity": 2, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 165 | "PEXPIRE": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 166 | "PEXPIREAT": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 167 | "PFADD": { 168 | "arity": -2, 169 | "flags": ["write", "denyoom", "fast"], 170 | "key_spec": (1, 1, 1), 171 | }, 172 | "PFCOUNT": {"arity": -2, "flags": ["write"], "key_spec": (1, 1, 1)}, 173 | "PFDEBUG": {"arity": -3, "flags": ["write"], "key_spec": (0, 0, 0)}, 174 | "PFMERGE": {"arity": -2, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)}, 175 | "PFSELFTEST": {"arity": 1, "flags": ["readonly"], "key_spec": (0, 0, 0)}, 176 | "PING": {"arity": 1, "flags": ["readonly", "stale", "fast"], "key_spec": (0, 0, 0)}, 177 | "PSETEX": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 178 | "PSUBSCRIBE": { 179 | "arity": -2, 180 | "flags": ["readonly", "pubsub", "noscript", "loading", "stale"], 181 | "key_spec": (0, 0, 0), 182 | }, 183 | "PSYNC": { 184 | "arity": 3, 185 | "flags": ["readonly", "admin", "noscript"], 186 | "key_spec": (0, 0, 0), 187 | }, 188 | "PTTL": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 189 | "PUBLISH": { 190 | "arity": 3, 191 | "flags": ["readonly", "pubsub", "loading", "stale", "fast"], 192 | "key_spec": (0, 0, 0), 193 | }, 194 | "PUBSUB": { 195 | "arity": -2, 196 | "flags": ["readonly", "pubsub", "random", "loading", "stale"], 197 | "key_spec": (0, 0, 0), 198 | }, 199 | "PUNSUBSCRIBE": { 200 | "arity": -1, 201 | "flags": ["readonly", "pubsub", "noscript", "loading", "stale"], 202 | "key_spec": (0, 0, 0), 203 | }, 204 | "RANDOMKEY": {"arity": 1, "flags": ["readonly", "random"], "key_spec": (0, 0, 0)}, 205 | "RENAME": {"arity": 3, "flags": ["write"], "key_spec": (1, 2, 1)}, 206 | "RENAMENX": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 2, 1)}, 207 | "REPLCONF": { 208 | "arity": -1, 209 | "flags": ["readonly", "admin", "noscript", "loading", "stale"], 210 | "key_spec": (0, 0, 0), 211 | }, 212 | "RESTORE": { 213 | "arity": 4, 214 | "flags": ["write", "denyoom", "admin"], 215 | "key_spec": (1, 1, 1), 216 | }, 217 | "ROLE": { 218 | "arity": 1, 219 | "flags": ["admin", "noscript", "loading", "stale"], 220 | "key_spec": (0, 0, 0), 221 | }, 222 | "RPOP": {"arity": 2, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 223 | "RPOPLPUSH": {"arity": 3, "flags": ["write", "denyoom"], "key_spec": (1, 2, 1)}, 224 | "RPUSH": { 225 | "arity": -3, 226 | "flags": ["write", "denyoom", "fast"], 227 | "key_spec": (1, 1, 1), 228 | }, 229 | "RPUSHX": { 230 | "arity": 3, 231 | "flags": ["write", "denyoom", "fast"], 232 | "key_spec": (1, 1, 1), 233 | }, 234 | "SADD": {"arity": -3, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)}, 235 | "SAVE": { 236 | "arity": 1, 237 | "flags": ["readonly", "admin", "noscript"], 238 | "key_spec": (0, 0, 0), 239 | }, 240 | "SCAN": {"arity": -2, "flags": ["readonly", "random"], "key_spec": (0, 0, 0)}, 241 | "SCARD": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 242 | "SCRIPT": { 243 | "arity": -2, 244 | "flags": ["readonly", "admin", "noscript"], 245 | "key_spec": (0, 0, 0), 246 | }, 247 | "SDIFF": { 248 | "arity": -2, 249 | "flags": ["readonly", "sort_for_script"], 250 | "key_spec": (1, -1, 1), 251 | }, 252 | "SDIFFSTORE": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)}, 253 | "SELECT": { 254 | "arity": 2, 255 | "flags": ["readonly", "loading", "fast"], 256 | "key_spec": (0, 0, 0), 257 | }, 258 | "SET": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 259 | "SETBIT": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 260 | "SETEX": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 261 | "SETNX": {"arity": 3, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)}, 262 | "SETRANGE": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 263 | "SHUTDOWN": { 264 | "arity": -1, 265 | "flags": ["readonly", "admin", "loading", "stale"], 266 | "key_spec": (0, 0, 0), 267 | }, 268 | "SINTER": { 269 | "arity": -2, 270 | "flags": ["readonly", "sort_for_script"], 271 | "key_spec": (1, -1, 1), 272 | }, 273 | "SINTERSTORE": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)}, 274 | "SISMEMBER": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 275 | "SLAVEOF": { 276 | "arity": 3, 277 | "flags": ["admin", "noscript", "stale"], 278 | "key_spec": (0, 0, 0), 279 | }, 280 | "SLOWLOG": {"arity": -2, "flags": ["readonly"], "key_spec": (0, 0, 0)}, 281 | "SMEMBERS": { 282 | "arity": 2, 283 | "flags": ["readonly", "sort_for_script"], 284 | "key_spec": (1, 1, 1), 285 | }, 286 | "SMOVE": {"arity": 4, "flags": ["write", "fast"], "key_spec": (1, 2, 1)}, 287 | "SORT": {"arity": -2, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)}, 288 | "SPOP": { 289 | "arity": 2, 290 | "flags": ["write", "noscript", "random", "fast"], 291 | "key_spec": (1, 1, 1), 292 | }, 293 | "SRANDMEMBER": { 294 | "arity": -2, 295 | "flags": ["readonly", "random"], 296 | "key_spec": (1, 1, 1), 297 | }, 298 | "SREM": {"arity": -3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 299 | "SSCAN": {"arity": -3, "flags": ["readonly", "random"], "key_spec": (1, 1, 1)}, 300 | "STRLEN": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 301 | "SUBSCRIBE": { 302 | "arity": -2, 303 | "flags": ["readonly", "pubsub", "noscript", "loading", "stale"], 304 | "key_spec": (0, 0, 0), 305 | }, 306 | "SUBSTR": {"arity": 4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 307 | "SUNION": { 308 | "arity": -2, 309 | "flags": ["readonly", "sort_for_script"], 310 | "key_spec": (1, -1, 1), 311 | }, 312 | "SUNIONSTORE": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)}, 313 | "SYNC": { 314 | "arity": 1, 315 | "flags": ["readonly", "admin", "noscript"], 316 | "key_spec": (0, 0, 0), 317 | }, 318 | "TIME": { 319 | "arity": 1, 320 | "flags": ["readonly", "random", "fast"], 321 | "key_spec": (0, 0, 0), 322 | }, 323 | "TTL": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 324 | "TYPE": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 325 | "UNSUBSCRIBE": { 326 | "arity": -1, 327 | "flags": ["readonly", "pubsub", "noscript", "loading", "stale"], 328 | "key_spec": (0, 0, 0), 329 | }, 330 | "UNWATCH": { 331 | "arity": 1, 332 | "flags": ["readonly", "noscript", "fast"], 333 | "key_spec": (0, 0, 0), 334 | }, 335 | "WATCH": { 336 | "arity": -2, 337 | "flags": ["readonly", "noscript", "fast"], 338 | "key_spec": (1, -1, 1), 339 | }, 340 | "ZADD": {"arity": -4, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)}, 341 | "ZCARD": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 342 | "ZCOUNT": {"arity": 4, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 343 | "ZINCRBY": { 344 | "arity": 4, 345 | "flags": ["write", "denyoom", "fast"], 346 | "key_spec": (1, 1, 1), 347 | }, 348 | "ZINTERSTORE": { 349 | "arity": -4, 350 | "flags": ["write", "denyoom", "movablekeys"], 351 | "key_spec": (0, 0, 0), 352 | }, 353 | "ZLEXCOUNT": {"arity": 4, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 354 | "ZRANGE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 355 | "ZRANGEBYLEX": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 356 | "ZRANGEBYSCORE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 357 | "ZRANK": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 358 | "ZREM": {"arity": -3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)}, 359 | "ZREMRANGEBYLEX": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)}, 360 | "ZREMRANGEBYRANK": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)}, 361 | "ZREMRANGEBYSCORE": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)}, 362 | "ZREVRANGE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 363 | "ZREVRANGEBYLEX": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 364 | "ZREVRANGEBYSCORE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)}, 365 | "ZREVRANK": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 366 | "ZSCAN": {"arity": -3, "flags": ["readonly", "random"], "key_spec": (1, 1, 1)}, 367 | "ZSCORE": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)}, 368 | "ZUNIONSTORE": { 369 | "arity": -4, 370 | "flags": ["write", "denyoom", "movablekeys"], 371 | "key_spec": (0, 0, 0), 372 | }, 373 | } 374 | 375 | 376 | if __name__ == "__main__": 377 | import redis 378 | import pprint 379 | 380 | rv = {} 381 | for row in redis.Redis().execute_command("COMMAND"): 382 | cmd, arity, flags, first_key, last_key, step_count = row 383 | rv[cmd.upper()] = { 384 | "arity": arity, 385 | "flags": flags, 386 | "key_spec": (int(first_key), int(last_key), int(step_count)), 387 | } 388 | 389 | tail = [] 390 | with open(__file__.rstrip("co"), "r+") as f: 391 | for line in f: 392 | if line.strip() == "if __name__ == '__main__':": 393 | tail.append(line) 394 | tail.extend(f) 395 | break 396 | 397 | f.seek(0) 398 | f.truncate(0) 399 | f.write( 400 | "# flake8: noqa\n\nCOMMANDS = %s\n\n\n%s" 401 | % (pprint.pformat(rv, width=74), "".join(tail)) 402 | ) 403 | -------------------------------------------------------------------------------- /rb/clients.py: -------------------------------------------------------------------------------- 1 | import time 2 | import errno 3 | import socket 4 | 5 | from weakref import ref as weakref 6 | 7 | from redis import StrictRedis 8 | from redis.client import list_or_args 9 | from redis.exceptions import ConnectionError 10 | 11 | try: 12 | from redis.exceptions import TimeoutError 13 | except ImportError: 14 | TimeoutError = ConnectionError 15 | 16 | from rb.promise import Promise 17 | from rb.poll import poll, is_closed 18 | from rb.utils import izip, iteritems 19 | 20 | 21 | AUTO_BATCH_COMMANDS = { 22 | "GET": ("MGET", True), 23 | "SET": ("MSET", False), 24 | } 25 | 26 | 27 | def assert_open(client): 28 | if client.closed: 29 | raise ValueError("I/O operation on closed file") 30 | 31 | 32 | def merge_batch(command_name, arg_promise_tuples): 33 | batch_command, list_response = AUTO_BATCH_COMMANDS[command_name] 34 | 35 | if len(arg_promise_tuples) == 1: 36 | args, promise = arg_promise_tuples[0] 37 | return command_name, args, {}, promise 38 | 39 | promise = Promise() 40 | 41 | @promise.done 42 | def on_success(value): 43 | if list_response: 44 | for item, (_, promise) in izip(value, arg_promise_tuples): 45 | promise.resolve(item) 46 | else: 47 | for _, promise in arg_promise_tuples: 48 | promise.resolve(value) 49 | 50 | args = [] 51 | for individual_args, _ in arg_promise_tuples: 52 | args.extend(individual_args) 53 | 54 | return batch_command, args, {}, promise 55 | 56 | 57 | def auto_batch_commands(commands): 58 | """Given a pipeline of commands this attempts to merge the commands 59 | into more efficient ones if that is possible. 60 | """ 61 | pending_batch = None 62 | 63 | for command_name, args, options, promise in commands: 64 | # This command cannot be batched, return it as such. 65 | if command_name not in AUTO_BATCH_COMMANDS: 66 | if pending_batch: 67 | yield merge_batch(*pending_batch) 68 | pending_batch = None 69 | yield command_name, args, options, promise 70 | continue 71 | 72 | assert not options, "batch commands cannot merge options" 73 | if pending_batch and pending_batch[0] == command_name: 74 | pending_batch[1].append((args, promise)) 75 | else: 76 | if pending_batch: 77 | yield merge_batch(*pending_batch) 78 | pending_batch = (command_name, [(args, promise)]) 79 | 80 | if pending_batch: 81 | yield merge_batch(*pending_batch) 82 | 83 | 84 | class CommandBuffer(object): 85 | """The command buffer is an internal construct """ 86 | 87 | def __init__(self, host_id, connect, auto_batch=True): 88 | self.host_id = host_id 89 | self.connection = None 90 | self._connect_func = connect 91 | self.connect() 92 | self.commands = [] 93 | self.pending_responses = [] 94 | self.auto_batch = auto_batch 95 | self.sent_something = False 96 | self.reconnects = 0 97 | self._send_buf = [] 98 | 99 | @property 100 | def closed(self): 101 | """Indicates if the command buffer is closed.""" 102 | return self.connection is None or self.connection._sock is None 103 | 104 | def connect(self): 105 | if self.connection is not None: 106 | return 107 | self.connection = self._connect_func() 108 | # Ensure we're connected. Without this, we won't have a socket 109 | # we can select over. 110 | self.connection.connect() 111 | 112 | def reconnect(self): 113 | if self.sent_something: 114 | raise RuntimeError( 115 | "Cannot reset command buffer that already " "sent out data." 116 | ) 117 | if self.reconnects > 5: 118 | return False 119 | self.reconnects += 1 120 | self.connection = None 121 | self.connect() 122 | return True 123 | 124 | def fileno(self): 125 | """Returns the file number of the underlying connection's socket 126 | to be able to select over it. 127 | """ 128 | assert_open(self) 129 | return self.connection._sock.fileno() 130 | 131 | def enqueue_command(self, command_name, args, options): 132 | """Enqueue a new command into this pipeline.""" 133 | assert_open(self) 134 | promise = Promise() 135 | self.commands.append((command_name, args, options, promise)) 136 | return promise 137 | 138 | @property 139 | def has_pending_requests(self): 140 | """Indicates if there are outstanding pending requests on this 141 | buffer. 142 | """ 143 | return bool(self._send_buf or self.commands) 144 | 145 | def send_buffer(self): 146 | """Utility function that sends the buffer into the provided socket. 147 | The buffer itself will slowly clear out and is modified in place. 148 | """ 149 | buf = self._send_buf 150 | sock = self.connection._sock 151 | try: 152 | timeout = sock.gettimeout() 153 | sock.setblocking(False) 154 | try: 155 | for idx, item in enumerate(buf): 156 | sent = 0 157 | while 1: 158 | try: 159 | sent = sock.send(item) 160 | except IOError as e: 161 | if e.errno == errno.EAGAIN: 162 | continue 163 | elif e.errno == errno.EWOULDBLOCK: 164 | break 165 | raise 166 | self.sent_something = True 167 | break 168 | if sent < len(item): 169 | buf[: idx + 1] = [item[sent:]] 170 | break 171 | else: 172 | del buf[:] 173 | finally: 174 | sock.settimeout(timeout) 175 | except IOError as e: 176 | self.connection.disconnect() 177 | if isinstance(e, socket.timeout): 178 | raise TimeoutError("Timeout writing to socket (host %s)" % self.host_id) 179 | raise ConnectionError( 180 | "Error while writing to socket (host %s): %s" % (self.host_id, e) 181 | ) 182 | 183 | def send_pending_requests(self): 184 | """Sends all pending requests into the connection. The default is 185 | to only send pending data that fits into the socket without blocking. 186 | This returns `True` if all data was sent or `False` if pending data 187 | is left over. 188 | """ 189 | assert_open(self) 190 | 191 | unsent_commands = self.commands 192 | if unsent_commands: 193 | self.commands = [] 194 | 195 | if self.auto_batch: 196 | unsent_commands = auto_batch_commands(unsent_commands) 197 | 198 | buf = [] 199 | for command_name, args, options, promise in unsent_commands: 200 | buf.append((command_name,) + tuple(args)) 201 | self.pending_responses.append((command_name, options, promise)) 202 | 203 | cmds = self.connection.pack_commands(buf) 204 | self._send_buf.extend(cmds) 205 | 206 | if not self._send_buf: 207 | return True 208 | 209 | self.send_buffer() 210 | return not self._send_buf 211 | 212 | def wait_for_responses(self, client): 213 | """Waits for all responses to come back and resolves the 214 | eventual results. 215 | """ 216 | assert_open(self) 217 | 218 | if self.has_pending_requests: 219 | raise RuntimeError( 220 | "Cannot wait for responses if there are " 221 | "pending requests outstanding. You need " 222 | "to wait for pending requests to be sent " 223 | "first." 224 | ) 225 | 226 | pending = self.pending_responses 227 | self.pending_responses = [] 228 | for command_name, options, promise in pending: 229 | value = client.parse_response(self.connection, command_name, **options) 230 | promise.resolve(value) 231 | 232 | 233 | class RoutingPool(object): 234 | """The routing pool works together with the routing client to 235 | internally dispatch through the cluster's router to the correct 236 | internal connection pool. 237 | """ 238 | 239 | def __init__(self, cluster): 240 | self.cluster = cluster 241 | 242 | def get_connection(self, command_name, shard_hint=None): 243 | host_id = shard_hint 244 | if host_id is None: 245 | raise RuntimeError("The routing pool requires the host id " "as shard hint") 246 | 247 | real_pool = self.cluster.get_pool_for_host(host_id) 248 | 249 | # When we check something out from the real underlying pool it's 250 | # very much possible that the connection is stale. This is why we 251 | # check out up to 10 connections which are either not connected 252 | # yet or verified alive. 253 | for _ in range(10): 254 | con = real_pool.get_connection(command_name) 255 | if con._sock is None or not is_closed(con._sock): 256 | con.__creating_pool = weakref(real_pool) 257 | return con 258 | 259 | raise ConnectionError( 260 | "Failed to check out a valid connection " "(host %s)" % host_id 261 | ) 262 | 263 | def release(self, connection): 264 | # The real pool is referenced by the connection through an 265 | # internal weakref. If the weakref is broken it means the 266 | # pool is already gone and we do not need to release the 267 | # connection. 268 | try: 269 | real_pool = connection.__creating_pool() 270 | except (AttributeError, TypeError): 271 | real_pool = None 272 | 273 | if real_pool is not None: 274 | real_pool.release(connection) 275 | 276 | def disconnect(self): 277 | self.cluster.disconnect_pools() 278 | 279 | def reset(self): 280 | pass 281 | 282 | 283 | class BaseClient(StrictRedis): 284 | pass 285 | 286 | 287 | class RoutingBaseClient(BaseClient): 288 | def __init__(self, connection_pool, auto_batch=True): 289 | BaseClient.__init__(self, connection_pool=connection_pool) 290 | self.auto_batch = auto_batch 291 | 292 | def pubsub(self, **kwargs): 293 | raise NotImplementedError("Pubsub is unsupported.") 294 | 295 | def pipeline(self, transaction=True, shard_hint=None): 296 | raise NotImplementedError( 297 | "Manual pipelines are unsupported. rb " "automatically pipelines commands." 298 | ) 299 | 300 | def lock(self, *args, **kwargs): 301 | raise NotImplementedError("Locking is not supported.") 302 | 303 | 304 | class MappingClient(RoutingBaseClient): 305 | """The routing client uses the cluster's router to target an individual 306 | node automatically based on the key of the redis command executed. 307 | 308 | For the parameters see :meth:`Cluster.map`. 309 | """ 310 | 311 | def __init__(self, connection_pool, max_concurrency=None, auto_batch=True): 312 | RoutingBaseClient.__init__( 313 | self, connection_pool=connection_pool, auto_batch=auto_batch 314 | ) 315 | # careful. If you introduce any other variables here, then make 316 | # sure that FanoutClient.target still works correctly! 317 | self._max_concurrency = max_concurrency 318 | self._cb_poll = poll() 319 | 320 | # For the mapping client we can fix up some redis standard commands 321 | # as we are promise based and have some flexibility here. 322 | 323 | def mget(self, keys, *args): 324 | args = list_or_args(keys, args) 325 | return Promise.all([self.get(arg) for arg in args]) 326 | 327 | def mset(self, *args, **kwargs): 328 | return Promise.all( 329 | [self.set(k, v) for k, v in iteritems(dict(*args, **kwargs))] 330 | ).then(lambda x: None) 331 | 332 | # Standard redis methods 333 | 334 | def execute_command(self, *args, **options): 335 | router = self.connection_pool.cluster.get_router() 336 | host_id = router.get_host_for_command(args[0], args[1:]) 337 | buf = self._get_command_buffer(host_id, args[0]) 338 | return buf.enqueue_command(args[0], args[1:], options) 339 | 340 | # Custom Internal API 341 | 342 | def _get_command_buffer(self, host_id, command_name): 343 | """Returns the command buffer for the given command and arguments.""" 344 | buf = self._cb_poll.get(host_id) 345 | if buf is not None: 346 | return buf 347 | 348 | if self._max_concurrency is not None: 349 | while len(self._cb_poll) >= self._max_concurrency: 350 | self.join(timeout=1.0) 351 | 352 | def connect(): 353 | return self.connection_pool.get_connection(command_name, shard_hint=host_id) 354 | 355 | buf = CommandBuffer(host_id, connect, self.auto_batch) 356 | self._cb_poll.register(host_id, buf) 357 | return buf 358 | 359 | def _release_command_buffer(self, command_buffer): 360 | """This is called by the command buffer when it closes.""" 361 | if command_buffer.closed: 362 | return 363 | 364 | self._cb_poll.unregister(command_buffer.host_id) 365 | self.connection_pool.release(command_buffer.connection) 366 | command_buffer.connection = None 367 | 368 | def _send_or_reconnect(self, command_buffer): 369 | try: 370 | command_buffer.send_pending_requests() 371 | except ConnectionError as e: 372 | self._try_reconnect(command_buffer, e) 373 | 374 | def _try_reconnect(self, command_buffer, err=None): 375 | # If something was sent before, we can't do anything at which 376 | # point we just reraise the underlying error. 377 | if command_buffer.sent_something: 378 | raise err or ConnectionError( 379 | "Cannot reconnect when data was " "already sent." 380 | ) 381 | self._release_command_buffer(command_buffer) 382 | # If we cannot reconnect, reraise the error. 383 | if not command_buffer.reconnect(): 384 | raise err or ConnectionError("Too many attempts to reconnect.") 385 | self._cb_poll.register(command_buffer.host_id, command_buffer) 386 | 387 | # Custom Public API 388 | 389 | def join(self, timeout=None): 390 | """Waits for all outstanding responses to come back or the timeout 391 | to be hit. 392 | """ 393 | remaining = timeout 394 | 395 | while self._cb_poll and (remaining is None or remaining > 0): 396 | now = time.time() 397 | rv = self._cb_poll.poll(remaining) 398 | if remaining is not None: 399 | remaining -= time.time() - now 400 | 401 | for command_buffer, event in rv: 402 | # This command buffer still has pending requests which 403 | # means we have to send them out first before we can read 404 | # all the data from it. 405 | if command_buffer.has_pending_requests: 406 | if event == "close": 407 | self._try_reconnect(command_buffer) 408 | elif event == "write": 409 | self._send_or_reconnect(command_buffer) 410 | 411 | # The general assumption is that all response is available 412 | # or this might block. On reading we do not use async 413 | # receiving. This generally works because latency in the 414 | # network is low and redis is super quick in sending. It 415 | # does not make a lot of sense to complicate things here. 416 | elif event in ("read", "close"): 417 | try: 418 | command_buffer.wait_for_responses(self) 419 | finally: 420 | self._release_command_buffer(command_buffer) 421 | 422 | if self._cb_poll and timeout is not None: 423 | raise TimeoutError("Did not receive all data in time.") 424 | 425 | def cancel(self): 426 | """Cancels all outstanding requests.""" 427 | for command_buffer in self._cb_poll: 428 | self._release_command_buffer(command_buffer) 429 | 430 | 431 | class FanoutClient(MappingClient): 432 | """This works similar to the :class:`MappingClient` but instead of 433 | using the router to target hosts, it sends the commands to all manually 434 | specified hosts. 435 | 436 | The results are accumulated in a dictionary keyed by the `host_id`. 437 | 438 | For the parameters see :meth:`Cluster.fanout`. 439 | """ 440 | 441 | def __init__(self, hosts, connection_pool, max_concurrency=None, auto_batch=True): 442 | MappingClient.__init__( 443 | self, connection_pool, max_concurrency, auto_batch=auto_batch 444 | ) 445 | self._target_hosts = hosts 446 | self.__is_retargeted = False 447 | self.__resolve_singular_result = False 448 | 449 | def target(self, hosts): 450 | """Temporarily retarget the client for one call. This is useful 451 | when having to deal with a subset of hosts for one call. 452 | """ 453 | if self.__is_retargeted: 454 | raise TypeError("Cannot use target more than once.") 455 | rv = FanoutClient( 456 | hosts, 457 | connection_pool=self.connection_pool, 458 | max_concurrency=self._max_concurrency, 459 | ) 460 | rv._cb_poll = self._cb_poll 461 | rv.__is_retargeted = True 462 | return rv 463 | 464 | def target_key(self, key): 465 | """Temporarily retarget the client for one call to route 466 | specifically to the one host that the given key routes to. In 467 | that case the result on the promise is just the one host's value 468 | instead of a dictionary. 469 | 470 | .. versionadded:: 1.3 471 | """ 472 | router = self.connection_pool.cluster.get_router() 473 | host_id = router.get_host_for_key(key) 474 | rv = self.target([host_id]) 475 | rv.__resolve_singular_result = True 476 | return rv 477 | 478 | def execute_command(self, *args, **options): 479 | promises = {} 480 | 481 | hosts = self._target_hosts 482 | if hosts == "all": 483 | hosts = list(self.connection_pool.cluster.hosts.keys()) 484 | elif hosts is None: 485 | raise RuntimeError("Fanout client was not targeted to hosts.") 486 | 487 | for host_id in hosts: 488 | buf = self._get_command_buffer(host_id, args[0]) 489 | promise = buf.enqueue_command(args[0], args[1:], options) 490 | if self.__resolve_singular_result and len(hosts) == 1: 491 | return promise 492 | promises[host_id] = promise 493 | 494 | return Promise.all(promises) 495 | 496 | 497 | class RoutingClient(RoutingBaseClient): 498 | """A client that can route to individual targets. 499 | 500 | For the parameters see :meth:`Cluster.get_routing_client`. 501 | """ 502 | 503 | def __init__(self, cluster, auto_batch=True): 504 | RoutingBaseClient.__init__( 505 | self, connection_pool=RoutingPool(cluster), auto_batch=auto_batch 506 | ) 507 | 508 | # Standard redis methods 509 | 510 | def execute_command(self, *args, **options): 511 | pool = self.connection_pool 512 | command_name = args[0] 513 | command_args = args[1:] 514 | router = self.connection_pool.cluster.get_router() 515 | host_id = router.get_host_for_command(command_name, command_args) 516 | connection = pool.get_connection(command_name, shard_hint=host_id) 517 | try: 518 | connection.send_command(*args) 519 | return self.parse_response(connection, command_name, **options) 520 | except (ConnectionError, TimeoutError) as e: 521 | connection.disconnect() 522 | if not connection.retry_on_timeout and isinstance(e, TimeoutError): 523 | raise 524 | connection.send_command(*args) 525 | return self.parse_response(connection, command_name, **options) 526 | finally: 527 | pool.release(connection) 528 | 529 | # Custom Public API 530 | 531 | def get_mapping_client(self, max_concurrency=64, auto_batch=None): 532 | """Returns a thread unsafe mapping client. This client works 533 | similar to a redis pipeline and returns eventual result objects. 534 | It needs to be joined on to work properly. Instead of using this 535 | directly you shold use the :meth:`map` context manager which 536 | automatically joins. 537 | 538 | Returns an instance of :class:`MappingClient`. 539 | """ 540 | if auto_batch is None: 541 | auto_batch = self.auto_batch 542 | return MappingClient( 543 | connection_pool=self.connection_pool, 544 | max_concurrency=max_concurrency, 545 | auto_batch=auto_batch, 546 | ) 547 | 548 | def get_fanout_client(self, hosts, max_concurrency=64, auto_batch=None): 549 | """Returns a thread unsafe fanout client. 550 | 551 | Returns an instance of :class:`FanoutClient`. 552 | """ 553 | if auto_batch is None: 554 | auto_batch = self.auto_batch 555 | return FanoutClient( 556 | hosts, 557 | connection_pool=self.connection_pool, 558 | max_concurrency=max_concurrency, 559 | auto_batch=auto_batch, 560 | ) 561 | 562 | def map(self, timeout=None, max_concurrency=64, auto_batch=None): 563 | """Returns a context manager for a map operation. This runs 564 | multiple queries in parallel and then joins in the end to collect 565 | all results. 566 | 567 | In the context manager the client available is a 568 | :class:`MappingClient`. Example usage:: 569 | 570 | results = {} 571 | with cluster.map() as client: 572 | for key in keys_to_fetch: 573 | results[key] = client.get(key) 574 | for key, promise in results.iteritems(): 575 | print '%s => %s' % (key, promise.value) 576 | """ 577 | return MapManager( 578 | self.get_mapping_client(max_concurrency, auto_batch), timeout=timeout 579 | ) 580 | 581 | def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_batch=None): 582 | """Returns a context manager for a map operation that fans out to 583 | manually specified hosts instead of using the routing system. This 584 | can for instance be used to empty the database on all hosts. The 585 | context manager returns a :class:`FanoutClient`. Example usage:: 586 | 587 | with cluster.fanout(hosts=[0, 1, 2, 3]) as client: 588 | results = client.info() 589 | for host_id, info in results.value.iteritems(): 590 | print '%s -> %s' % (host_id, info['is']) 591 | 592 | The promise returned accumulates all results in a dictionary keyed 593 | by the `host_id`. 594 | 595 | The `hosts` parameter is a list of `host_id`\s or alternatively the 596 | string ``'all'`` to send the commands to all hosts. 597 | 598 | The fanout APi needs to be used with a lot of care as it can cause 599 | a lot of damage when keys are written to hosts that do not expect 600 | them. 601 | """ 602 | return MapManager( 603 | self.get_fanout_client(hosts, max_concurrency, auto_batch), timeout=timeout 604 | ) 605 | 606 | 607 | class LocalClient(BaseClient): 608 | """The local client is just a convenient method to target one specific 609 | host. 610 | """ 611 | 612 | def __init__(self, connection_pool=None, **kwargs): 613 | if connection_pool is None: 614 | raise TypeError("The local client needs a connection pool") 615 | BaseClient.__init__(self, connection_pool=connection_pool, **kwargs) 616 | 617 | 618 | class MapManager(object): 619 | """Helps with mapping.""" 620 | 621 | def __init__(self, mapping_client, timeout): 622 | self.mapping_client = mapping_client 623 | self.timeout = timeout 624 | self.entered = None 625 | 626 | def __enter__(self): 627 | self.entered = time.time() 628 | return self.mapping_client 629 | 630 | def __exit__(self, exc_type, exc_value, tb): 631 | if exc_type is not None: 632 | self.mapping_client.cancel() 633 | else: 634 | timeout = self.timeout 635 | if timeout is not None: 636 | timeout = max(1, timeout - (time.time() - self.entered)) 637 | self.mapping_client.join(timeout=timeout) 638 | -------------------------------------------------------------------------------- /rb/cluster.py: -------------------------------------------------------------------------------- 1 | from redis.connection import ConnectionPool, UnixDomainSocketConnection 2 | 3 | try: 4 | from redis.commands.core import Script # redis>=5 5 | except ImportError: 6 | from redis.client import Script # redis<5 7 | 8 | try: 9 | from redis.connection import SSLConnection 10 | except ImportError: 11 | SSLConnection = None 12 | 13 | import functools 14 | from hashlib import sha1 15 | from threading import Lock 16 | 17 | from rb.router import PartitionRouter 18 | from rb.clients import RoutingClient, LocalClient 19 | from rb.utils import integer_types, iteritems, itervalues 20 | 21 | 22 | class HostInfo(object): 23 | def __init__( 24 | self, 25 | host_id, 26 | host, 27 | port, 28 | unix_socket_path=None, 29 | db=0, 30 | password=None, 31 | ssl=False, 32 | ssl_options=None, 33 | ): 34 | self.host_id = host_id 35 | self.host = host 36 | self.unix_socket_path = unix_socket_path 37 | self.port = port 38 | self.db = db 39 | self.password = password 40 | self.ssl = ssl 41 | self.ssl_options = ssl_options 42 | 43 | def __eq__(self, other): 44 | if self.__class__ is not other.__class__: 45 | return NotImplemented 46 | return self.host_id == other.host_id 47 | 48 | def __ne__(self, other): 49 | rv = self.__eq__(other) 50 | if rv is NotImplemented: 51 | return NotImplemented 52 | return not rv 53 | 54 | def __hash__(self): 55 | return self.host_id 56 | 57 | def __repr__(self): 58 | return "<%s %s>" % ( 59 | self.__class__.__name__, 60 | " ".join("%s=%r" % x for x in sorted(self.__dict__.items())), 61 | ) 62 | 63 | 64 | def _iter_hosts(iterable): 65 | if isinstance(iterable, dict): 66 | iterable = iteritems(iterable) 67 | for item in iterable: 68 | if isinstance(item, tuple): 69 | host_id, cfg = item 70 | cfg = dict(cfg) 71 | cfg["host_id"] = host_id 72 | else: 73 | cfg = item 74 | yield cfg 75 | 76 | 77 | class Cluster(object): 78 | """The cluster is the core object behind rb. It holds the connection 79 | pools to the individual nodes and can be shared for the duration of 80 | the application in a central location. 81 | 82 | Basic example of a cluster over four redis instances with the default 83 | router:: 84 | 85 | cluster = Cluster(hosts={ 86 | 0: {'port': 6379}, 87 | 1: {'port': 6380}, 88 | 2: {'port': 6381}, 89 | 3: {'port': 6382}, 90 | }, host_defaults={ 91 | 'host': '127.0.0.1', 92 | }) 93 | 94 | `hosts` is a dictionary of hosts which maps the number host IDs to 95 | configuration parameters. The parameters correspond to the signature 96 | of the :meth:`add_host` function. The defaults for these parameters 97 | are pulled from `host_defaults`. To override the pool class the 98 | `pool_cls` and `pool_options` parameters can be used. The same 99 | applies to `router_cls` and `router_options` for the router. The pool 100 | options are useful for setting socket timeouts and similar parameters. 101 | """ 102 | 103 | def __init__( 104 | self, 105 | hosts, 106 | host_defaults=None, 107 | pool_cls=None, 108 | pool_options=None, 109 | router_cls=None, 110 | router_options=None, 111 | ): 112 | if pool_cls is None: 113 | pool_cls = ConnectionPool 114 | if router_cls is None: 115 | router_cls = PartitionRouter 116 | self._lock = Lock() 117 | self.pool_cls = pool_cls 118 | self.pool_options = pool_options 119 | self.router_cls = router_cls 120 | self.router_options = router_options 121 | self._pools = {} 122 | self._router = None 123 | self.hosts = {} 124 | self._hosts_age = 0 125 | self.host_defaults = host_defaults or {} 126 | for host_config in _iter_hosts(hosts): 127 | if self.host_defaults: 128 | for k, v in iteritems(self.host_defaults): 129 | host_config.setdefault(k, v) 130 | self.add_host(**host_config) 131 | 132 | def add_host( 133 | self, 134 | host_id=None, 135 | host="localhost", 136 | port=6379, 137 | unix_socket_path=None, 138 | db=0, 139 | password=None, 140 | ssl=False, 141 | ssl_options=None, 142 | ): 143 | """Adds a new host to the cluster. This is only really useful for 144 | unittests as normally hosts are added through the constructor and 145 | changes after the cluster has been used for the first time are 146 | unlikely to make sense. 147 | """ 148 | if host_id is None: 149 | raise RuntimeError("Host ID is required") 150 | elif not isinstance(host_id, integer_types): 151 | raise ValueError("The host ID has to be an integer") 152 | host_id = int(host_id) 153 | with self._lock: 154 | if host_id in self.hosts: 155 | raise TypeError("Two hosts share the same host id (%r)" % (host_id,)) 156 | self.hosts[host_id] = HostInfo( 157 | host_id=host_id, 158 | host=host, 159 | port=port, 160 | db=db, 161 | unix_socket_path=unix_socket_path, 162 | password=password, 163 | ssl=ssl, 164 | ssl_options=ssl_options, 165 | ) 166 | self._hosts_age += 1 167 | 168 | def remove_host(self, host_id): 169 | """Removes a host from the client. This only really useful for 170 | unittests. 171 | """ 172 | with self._lock: 173 | rv = self._hosts.pop(host_id, None) is not None 174 | pool = self._pools.pop(host_id, None) 175 | if pool is not None: 176 | pool.disconnect() 177 | self._hosts_age += 1 178 | return rv 179 | 180 | def disconnect_pools(self): 181 | """Disconnects all connections from the internal pools.""" 182 | with self._lock: 183 | for pool in itervalues(self._pools): 184 | pool.disconnect() 185 | self._pools.clear() 186 | 187 | def get_router(self): 188 | """Returns the router for the cluster. If the cluster reconfigures 189 | the router will be recreated. Usually you do not need to interface 190 | with the router yourself as the cluster's routing client does that 191 | automatically. 192 | 193 | This returns an instance of :class:`BaseRouter`. 194 | """ 195 | cached_router = self._router 196 | ref_age = self._hosts_age 197 | 198 | if cached_router is not None: 199 | router, router_age = cached_router 200 | if router_age == ref_age: 201 | return router 202 | 203 | with self._lock: 204 | router = self.router_cls(self, **(self.router_options or {})) 205 | self._router = (router, ref_age) 206 | return router 207 | 208 | def get_pool_for_host(self, host_id): 209 | """Returns the connection pool for the given host. 210 | 211 | This connection pool is used by the redis clients to make sure 212 | that it does not have to reconnect constantly. If you want to use 213 | a custom redis client you can pass this in as connection pool 214 | manually. 215 | """ 216 | if isinstance(host_id, HostInfo): 217 | host_info = host_id 218 | host_id = host_info.host_id 219 | else: 220 | host_info = self.hosts.get(host_id) 221 | if host_info is None: 222 | raise LookupError("Host %r does not exist" % (host_id,)) 223 | 224 | rv = self._pools.get(host_id) 225 | if rv is not None: 226 | return rv 227 | with self._lock: 228 | rv = self._pools.get(host_id) 229 | if rv is None: 230 | opts = dict(self.pool_options or ()) 231 | opts["db"] = host_info.db 232 | opts["password"] = host_info.password 233 | if host_info.unix_socket_path is not None: 234 | opts["path"] = host_info.unix_socket_path 235 | opts["connection_class"] = UnixDomainSocketConnection 236 | if host_info.ssl: 237 | raise TypeError( 238 | "SSL is not supported for unix " "domain sockets." 239 | ) 240 | else: 241 | opts["host"] = host_info.host 242 | opts["port"] = host_info.port 243 | if host_info.ssl: 244 | if SSLConnection is None: 245 | raise TypeError( 246 | "This version of py-redis does " 247 | "not support SSL connections." 248 | ) 249 | opts["connection_class"] = SSLConnection 250 | opts.update( 251 | ("ssl_" + k, v) 252 | for k, v in iteritems(host_info.ssl_options or {}) 253 | ) 254 | rv = self.pool_cls(**opts) 255 | self._pools[host_id] = rv 256 | return rv 257 | 258 | def get_local_client(self, host_id): 259 | """Returns a localized client for a specific host ID. This client 260 | works like a regular Python redis client and returns results 261 | immediately. 262 | """ 263 | return LocalClient(connection_pool=self.get_pool_for_host(host_id)) 264 | 265 | def get_local_client_for_key(self, key): 266 | """Similar to :meth:`get_local_client_for_key` but returns the 267 | client based on what the router says the key destination is. 268 | """ 269 | return self.get_local_client(self.get_router().get_host_for_key(key)) 270 | 271 | def get_routing_client(self, auto_batch=True): 272 | """Returns a routing client. This client is able to automatically 273 | route the requests to the individual hosts. It's thread safe and 274 | can be used similar to the host local client but it will refused 275 | to execute commands that cannot be directly routed to an 276 | individual node. 277 | 278 | The default behavior for the routing client is to attempt to batch 279 | eligible commands into batch versions thereof. For instance multiple 280 | `GET` commands routed to the same node can end up merged into an 281 | `MGET` command. This behavior can be disabled by setting `auto_batch` 282 | to `False`. This can be useful for debugging because `MONITOR` will 283 | more accurately reflect the commands issued in code. 284 | 285 | See :class:`RoutingClient` for more information. 286 | """ 287 | return RoutingClient(self, auto_batch=auto_batch) 288 | 289 | def map(self, timeout=None, max_concurrency=64, auto_batch=True): 290 | """Shortcut context manager for getting a routing client, beginning 291 | a map operation and joining over the result. `max_concurrency` 292 | defines how many outstanding parallel queries can exist before an 293 | implicit join takes place. 294 | 295 | In the context manager the client available is a 296 | :class:`MappingClient`. Example usage:: 297 | 298 | results = {} 299 | with cluster.map() as client: 300 | for key in keys_to_fetch: 301 | results[key] = client.get(key) 302 | for key, promise in results.iteritems(): 303 | print '%s => %s' % (key, promise.value) 304 | """ 305 | return self.get_routing_client(auto_batch).map( 306 | timeout=timeout, max_concurrency=max_concurrency 307 | ) 308 | 309 | def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_batch=True): 310 | """Shortcut context manager for getting a routing client, beginning 311 | a fanout operation and joining over the result. 312 | 313 | In the context manager the client available is a 314 | :class:`FanoutClient`. Example usage:: 315 | 316 | with cluster.fanout(hosts='all') as client: 317 | client.flushdb() 318 | """ 319 | return self.get_routing_client(auto_batch).fanout( 320 | hosts=hosts, timeout=timeout, max_concurrency=max_concurrency 321 | ) 322 | 323 | def all(self, timeout=None, max_concurrency=64, auto_batch=True): 324 | """Fanout to all hosts. Works otherwise exactly like :meth:`fanout`. 325 | 326 | Example:: 327 | 328 | with cluster.all() as client: 329 | client.flushdb() 330 | """ 331 | return self.fanout( 332 | "all", 333 | timeout=timeout, 334 | max_concurrency=max_concurrency, 335 | auto_batch=auto_batch, 336 | ) 337 | 338 | def execute_commands(self, mapping, *args, **kwargs): 339 | """Concurrently executes a sequence of commands on a Redis cluster that 340 | are associated with a routing key, returning a new mapping where 341 | values are a list of results that correspond to the command in the same 342 | position. For example:: 343 | 344 | >>> cluster.execute_commands({ 345 | ... 'foo': [ 346 | ... ('PING',), 347 | ... ('TIME',), 348 | ... ], 349 | ... 'bar': [ 350 | ... ('CLIENT', 'GETNAME'), 351 | ... ], 352 | ... }) 353 | {'bar': [], 354 | 'foo': [, ]} 355 | 356 | Commands that are instances of :class:`redis.client.Script` will first 357 | be checked for their existence on the target nodes then loaded on the 358 | targets before executing and can be interleaved with other commands:: 359 | 360 | >>> from redis.client import Script 361 | >>> TestScript = Script(None, 'return {KEYS, ARGV}') 362 | >>> cluster.execute_commands({ 363 | ... 'foo': [ 364 | ... (TestScript, ('key:1', 'key:2'), range(0, 3)), 365 | ... ], 366 | ... 'bar': [ 367 | ... (TestScript, ('key:3', 'key:4'), range(3, 6)), 368 | ... ], 369 | ... }) 370 | {'bar': [], 371 | 'foo': []} 372 | 373 | Internally, :class:`FanoutClient` is used for issuing commands. 374 | """ 375 | 376 | def is_script_command(command): 377 | return isinstance(command[0], Script) 378 | 379 | def check_script_load_result(script, result): 380 | if script.sha != result: 381 | raise AssertionError( 382 | "Hash mismatch loading {!r}: expected {!r}, got {!r}".format( 383 | script, script.sha, result, 384 | ) 385 | ) 386 | 387 | # Run through all the commands and check to see if there are any 388 | # scripts, and whether or not they have been loaded onto the target 389 | # hosts. 390 | exists = {} 391 | with self.fanout(*args, **kwargs) as client: 392 | for key, commands in mapping.items(): 393 | targeted = client.target_key(key) 394 | for command in filter(is_script_command, commands): 395 | script = command[0] 396 | 397 | # Set the script hash if it hasn't already been set. 398 | if not script.sha: 399 | script.sha = sha1(script.script.encode("utf-8")).hexdigest() 400 | 401 | # Check if the script has been loaded on each host that it 402 | # will be executed on. 403 | for host in targeted._target_hosts: 404 | if script not in exists.setdefault(host, {}): 405 | exists[host][script] = targeted.execute_command( 406 | "SCRIPT EXISTS", script.sha 407 | ) 408 | 409 | # Execute the pending commands, loading scripts onto servers where they 410 | # do not already exist. 411 | results = {} 412 | with self.fanout(*args, **kwargs) as client: 413 | for key, commands in mapping.items(): 414 | results[key] = [] 415 | targeted = client.target_key(key) 416 | for command in commands: 417 | # If this command is a script, we need to check and see if 418 | # it needs to be loaded before execution. 419 | if is_script_command(command): 420 | script = command[0] 421 | for host in targeted._target_hosts: 422 | if script in exists[host]: 423 | result = exists[host].pop(script) 424 | if not result.value[0]: 425 | targeted.execute_command( 426 | "SCRIPT LOAD", script.script 427 | ).done( 428 | on_success=functools.partial( 429 | check_script_load_result, script 430 | ) 431 | ) 432 | keys, arguments = command[1:] 433 | parameters = list(keys) + list(arguments) 434 | results[key].append( 435 | targeted.execute_command( 436 | "EVALSHA", script.sha, len(keys), *parameters 437 | ) 438 | ) 439 | else: 440 | results[key].append(targeted.execute_command(*command)) 441 | 442 | return results 443 | -------------------------------------------------------------------------------- /rb/ketama.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import math 3 | 4 | from bisect import bisect 5 | 6 | from rb.utils import text_type, integer_types, bytes_type 7 | 8 | 9 | def md5_bytes(key): 10 | if isinstance(key, text_type): 11 | k = key.encode("utf-8") 12 | elif isinstance(key, integer_types): 13 | k = text_type(key).encode("utf-8") 14 | else: 15 | k = bytes_type(key) 16 | 17 | return bytearray(hashlib.md5(k).digest()) 18 | 19 | 20 | class Ketama(object): 21 | """This class implements the Ketama consistent hashing algorithm. 22 | """ 23 | 24 | def __init__(self, nodes=None, weights=None): 25 | self._nodes = set(nodes or []) 26 | self._weights = weights if weights else {} 27 | 28 | self._rebuild_circle() 29 | 30 | def _rebuild_circle(self): 31 | """Updates the hash ring.""" 32 | self._hashring = {} 33 | self._sorted_keys = [] 34 | total_weight = 0 35 | for node in self._nodes: 36 | total_weight += self._weights.get(node, 1) 37 | 38 | for node in self._nodes: 39 | weight = self._weights.get(node, 1) 40 | 41 | ks = math.floor((40 * len(self._nodes) * weight) / total_weight) 42 | 43 | for i in range(0, int(ks)): 44 | k = md5_bytes("%s-%s-salt" % (node, i)) 45 | 46 | for l in range(0, 4): 47 | key = ( 48 | (k[3 + l * 4] << 24) 49 | | (k[2 + l * 4] << 16) 50 | | (k[1 + l * 4] << 8) 51 | | k[l * 4] 52 | ) 53 | self._hashring[key] = node 54 | self._sorted_keys.append(key) 55 | 56 | self._sorted_keys.sort() 57 | 58 | def _get_node_pos(self, key): 59 | """Return node position(integer) for a given key or None.""" 60 | if not self._hashring: 61 | return 62 | 63 | k = md5_bytes(key) 64 | key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0] 65 | 66 | nodes = self._sorted_keys 67 | pos = bisect(nodes, key) 68 | 69 | if pos == len(nodes): 70 | return 0 71 | return pos 72 | 73 | def remove_node(self, node): 74 | """Removes node from circle and rebuild it.""" 75 | try: 76 | self._nodes.remove(node) 77 | del self._weights[node] 78 | except (KeyError, ValueError): 79 | pass 80 | self._rebuild_circle() 81 | 82 | def add_node(self, node, weight=1): 83 | """Adds node to circle and rebuild it.""" 84 | self._nodes.add(node) 85 | self._weights[node] = weight 86 | self._rebuild_circle() 87 | 88 | def get_node(self, key): 89 | """Return node for a given key. Else return None.""" 90 | pos = self._get_node_pos(key) 91 | if pos is None: 92 | return None 93 | return self._hashring[self._sorted_keys[pos]] 94 | -------------------------------------------------------------------------------- /rb/poll.py: -------------------------------------------------------------------------------- 1 | import fcntl 2 | import array 3 | import select 4 | import termios 5 | 6 | 7 | class BasePoller(object): 8 | is_available = False 9 | 10 | def __init__(self): 11 | self.objects = {} 12 | 13 | def register(self, key, f): 14 | self.objects[key] = f 15 | 16 | def unregister(self, key): 17 | return self.objects.pop(key, None) 18 | 19 | def poll(self, timeout=None): 20 | raise NotImplementedError() 21 | 22 | def get(self, key): 23 | return self.objects.get(key) 24 | 25 | def __len__(self): 26 | return len(self.objects) 27 | 28 | def __iter__(self): 29 | # Make a copy when iterating so that modifications to this object 30 | # are possible while we're going over it. 31 | return iter(self.objects.values()) 32 | 33 | 34 | class SelectPoller(BasePoller): 35 | is_available = hasattr(select, "select") 36 | 37 | def poll(self, timeout=None): 38 | objs = list(self.objects.values()) 39 | rlist, wlist, xlist = select.select(objs, objs, [], timeout) 40 | if xlist: 41 | raise RuntimeError("Got unexpected OOB data") 42 | return [(x, "read") for x in rlist] + [(x, "write") for x in wlist] 43 | 44 | 45 | class PollPoller(BasePoller): 46 | is_available = hasattr(select, "poll") 47 | 48 | def __init__(self): 49 | BasePoller.__init__(self) 50 | self.pollobj = select.poll() 51 | self.fd_to_object = {} 52 | 53 | def register(self, key, f): 54 | BasePoller.register(self, key, f) 55 | self.pollobj.register( 56 | f.fileno(), select.POLLIN | select.POLLOUT | select.POLLHUP 57 | ) 58 | self.fd_to_object[f.fileno()] = f 59 | 60 | def unregister(self, key): 61 | rv = BasePoller.unregister(self, key) 62 | if rv is not None: 63 | self.pollobj.unregister(rv.fileno()) 64 | self.fd_to_object.pop(rv.fileno(), None) 65 | return rv 66 | 67 | def poll(self, timeout=None): 68 | rv = [] 69 | for fd, event in self.pollobj.poll(timeout): 70 | obj = self.fd_to_object[fd] 71 | if event & select.POLLIN: 72 | rv.append((obj, "read")) 73 | if event & select.POLLOUT: 74 | rv.append((obj, "write")) 75 | if event & select.POLLHUP: 76 | rv.append((obj, "close")) 77 | return rv 78 | 79 | 80 | class KQueuePoller(BasePoller): 81 | is_available = hasattr(select, "kqueue") 82 | 83 | def __init__(self): 84 | BasePoller.__init__(self) 85 | self.kqueue = select.kqueue() 86 | self.events = [] 87 | self.event_to_object = {} 88 | 89 | def register(self, key, f): 90 | BasePoller.register(self, key, f) 91 | r_event = select.kevent( 92 | f.fileno(), 93 | filter=select.KQ_FILTER_READ, 94 | flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE, 95 | ) 96 | self.events.append(r_event) 97 | w_event = select.kevent( 98 | f.fileno(), 99 | filter=select.KQ_FILTER_WRITE, 100 | flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE, 101 | ) 102 | self.events.append(w_event) 103 | self.event_to_object[f.fileno()] = f 104 | 105 | def unregister(self, key): 106 | rv = BasePoller.unregister(self, key) 107 | if rv is not None: 108 | fd = rv.fileno() 109 | self.events = [x for x in self.events if x.ident != fd] 110 | self.event_to_object.pop(fd, None) 111 | return rv 112 | 113 | def poll(self, timeout=None): 114 | events = self.kqueue.control(self.events, 128, timeout) 115 | rv = [] 116 | for ev in events: 117 | obj = self.event_to_object.get(ev.ident) 118 | if obj is None: 119 | # It happens surprisingly frequently that kqueue returns 120 | # write events things no longer in the kqueue. Not sure 121 | # why 122 | continue 123 | if ev.filter == select.KQ_FILTER_READ: 124 | rv.append((obj, "read")) 125 | elif ev.filter == select.KQ_FILTER_WRITE: 126 | rv.append((obj, "write")) 127 | if ev.flags & select.KQ_EV_EOF: 128 | rv.append((obj, "close")) 129 | return rv 130 | 131 | 132 | class EpollPoller(BasePoller): 133 | is_available = hasattr(select, "epoll") 134 | 135 | def __init__(self): 136 | BasePoller.__init__(self) 137 | self.epoll = select.epoll() 138 | self.fd_to_object = {} 139 | 140 | def register(self, key, f): 141 | BasePoller.register(self, key, f) 142 | self.epoll.register( 143 | f.fileno(), select.EPOLLIN | select.EPOLLHUP | select.EPOLLOUT 144 | ) 145 | self.fd_to_object[f.fileno()] = f 146 | 147 | def unregister(self, key): 148 | rv = BasePoller.unregister(self, key) 149 | if rv is not None: 150 | self.epoll.unregister(rv.fileno()) 151 | self.fd_to_object.pop(rv.fileno(), None) 152 | return rv 153 | 154 | def poll(self, timeout=None): 155 | if timeout is None: 156 | timeout = -1 157 | rv = [] 158 | for fd, event in self.epoll.poll(timeout): 159 | obj = self.fd_to_object[fd] 160 | if event & select.EPOLLIN: 161 | rv.append((obj, "read")) 162 | if event & select.EPOLLOUT: 163 | rv.append((obj, "write")) 164 | if event & select.EPOLLHUP: 165 | rv.append((obj, "close")) 166 | return rv 167 | 168 | 169 | def _is_closed_select(f): 170 | rlist, wlist, _ = select.select([f], [f], [], 0.0) 171 | if not rlist and not wlist: 172 | return False 173 | buf = array.array("i", [0]) 174 | fcntl.ioctl(f.fileno(), termios.FIONREAD, buf) 175 | return buf[0] == 0 176 | 177 | 178 | def _is_closed_poll(f): 179 | poll = select.poll() 180 | poll.register(f.fileno(), select.POLLHUP) 181 | for _, event in poll.poll(0.0): 182 | if event == "close": 183 | return True 184 | return False 185 | 186 | 187 | def _is_closed_kqueue(f): 188 | kqueue = select.kqueue() 189 | event = select.kevent( 190 | f.fileno(), 191 | filter=select.KQ_FILTER_READ, 192 | flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE, 193 | ) 194 | for event in kqueue.control([event], 128, 0.0): 195 | if event.flags & select.KQ_EV_EOF: 196 | return True 197 | return False 198 | 199 | 200 | def is_closed(f): 201 | if KQueuePoller.is_available: 202 | return _is_closed_kqueue(f) 203 | if PollPoller.is_available: 204 | return _is_closed_poll(f) 205 | return _is_closed_select(f) 206 | 207 | 208 | available_pollers = [ 209 | poll 210 | for poll in [KQueuePoller, PollPoller, EpollPoller, SelectPoller] 211 | if poll.is_available 212 | ] 213 | poll = available_pollers[0] 214 | -------------------------------------------------------------------------------- /rb/promise.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | from rb.utils import iteritems 4 | 5 | 6 | class Promise(object): 7 | """A promise object that attempts to mirror the ES6 APIs for promise 8 | objects. Unlike ES6 promises this one however also directly gives 9 | access to the underlying value and it has some slightly different 10 | static method names as this promise can be resolved externally. 11 | """ 12 | 13 | __slots__ = ("value", "reason", "_state", "_callbacks", "_errbacks") 14 | 15 | def __init__(self): 16 | #: the value that this promise holds if it's resolved. 17 | self.value = None 18 | #: the reason for this promise if it's rejected. 19 | self.reason = None 20 | self._state = "pending" 21 | self._callbacks = [] 22 | self._errbacks = [] 23 | 24 | @staticmethod 25 | def resolved(value): 26 | """Creates a promise object resolved with a certain value.""" 27 | p = Promise() 28 | p._state = "resolved" 29 | p.value = value 30 | return p 31 | 32 | @staticmethod 33 | def rejected(reason): 34 | """Creates a promise object rejected with a certain value.""" 35 | p = Promise() 36 | p._state = "rejected" 37 | p.reason = reason 38 | return p 39 | 40 | @staticmethod 41 | def all(iterable_or_dict): 42 | """A promise that resolves when all passed promises resolve. You can 43 | either pass a list or a dictionary of promises. 44 | """ 45 | if isinstance(iterable_or_dict, dict): 46 | return _promise_from_dict(iterable_or_dict) 47 | return _promise_from_iterable(iterable_or_dict) 48 | 49 | def resolve(self, value): 50 | """Resolves the promise with the given value.""" 51 | if self is value: 52 | raise TypeError("Cannot resolve promise with itself.") 53 | 54 | if isinstance(value, Promise): 55 | value.done(self.resolve, self.reject) 56 | return 57 | 58 | if self._state != "pending": 59 | raise RuntimeError("Promise is no longer pending.") 60 | 61 | self.value = value 62 | self._state = "resolved" 63 | callbacks = self._callbacks 64 | self._callbacks = None 65 | for callback in callbacks: 66 | callback(value) 67 | 68 | def reject(self, reason): 69 | """Rejects the promise with the given reason.""" 70 | if self._state != "pending": 71 | raise RuntimeError("Promise is no longer pending.") 72 | 73 | self.reason = reason 74 | self._state = "rejected" 75 | errbacks = self._errbacks 76 | self._errbacks = None 77 | for errback in errbacks: 78 | errback(reason) 79 | 80 | @property 81 | def is_pending(self): 82 | """`True` if the promise is still pending, `False` otherwise.""" 83 | return self._state == "pending" 84 | 85 | @property 86 | def is_resolved(self): 87 | """`True` if the promise was resolved, `False` otherwise.""" 88 | return self._state == "resolved" 89 | 90 | @property 91 | def is_rejected(self): 92 | """`True` if the promise was rejected, `False` otherwise.""" 93 | return self._state == "rejected" 94 | 95 | def done(self, on_success=None, on_failure=None): 96 | """Attaches some callbacks to the promise and returns the promise.""" 97 | if on_success is not None: 98 | if self._state == "pending": 99 | self._callbacks.append(on_success) 100 | elif self._state == "resolved": 101 | on_success(self.value) 102 | if on_failure is not None: 103 | if self._state == "pending": 104 | self._errbacks.append(on_failure) 105 | elif self._state == "rejected": 106 | on_failure(self.reason) 107 | return self 108 | 109 | def then(self, success=None, failure=None): 110 | """A utility method to add success and/or failure callback to the 111 | promise which will also return another promise in the process. 112 | """ 113 | rv = Promise() 114 | 115 | def on_success(v): 116 | try: 117 | rv.resolve(success(v)) 118 | except Exception as e: 119 | rv.reject(e) 120 | 121 | def on_failure(r): 122 | try: 123 | rv.resolve(failure(r)) 124 | except Exception as e: 125 | rv.reject(e) 126 | 127 | self.done(on_success, on_failure) 128 | return rv 129 | 130 | def __repr__(self): 131 | if self._state == "pending": 132 | v = "(pending)" 133 | elif self._state == "rejected": 134 | v = repr(self.reason) + " (rejected)" 135 | else: 136 | v = repr(self.value) 137 | return "<%s %s>" % (self.__class__.__name__, v,) 138 | 139 | 140 | def _ensure_promise(value): 141 | return value if isinstance(value, Promise) else Promise.resolved(value) 142 | 143 | 144 | def _promise_from_iterable(iterable): 145 | l = [_ensure_promise(x) for x in iterable] 146 | if not l: 147 | return Promise.resolved([]) 148 | 149 | pending = set(l) 150 | rv = Promise() 151 | 152 | def on_success(promise, value): 153 | pending.discard(promise) 154 | if not pending: 155 | rv.resolve([p.value for p in l]) 156 | 157 | for promise in l: 158 | promise.done(partial(on_success, promise), rv.reject) 159 | 160 | return rv 161 | 162 | 163 | def _promise_from_dict(d): 164 | d = dict((k, _ensure_promise(v)) for k, v in iteritems(d)) 165 | if not d: 166 | return Promise.resolved({}) 167 | 168 | pending = set(d.keys()) 169 | rv = Promise() 170 | 171 | def on_success(key, value): 172 | pending.discard(key) 173 | if not pending: 174 | rv.resolve(dict((k, p.value) for k, p in iteritems(d))) 175 | 176 | for key, promise in iteritems(d): 177 | promise.done(partial(on_success, key), rv.reject) 178 | 179 | return rv 180 | -------------------------------------------------------------------------------- /rb/router.py: -------------------------------------------------------------------------------- 1 | from weakref import ref as weakref 2 | 3 | from rb.ketama import Ketama 4 | from rb.utils import text_type, bytes_type, integer_types, crc32 5 | from rb._rediscommands import COMMANDS 6 | 7 | 8 | class UnroutableCommand(Exception): 9 | """Raised if a command was issued that cannot be routed through the 10 | router to a single host. 11 | """ 12 | 13 | 14 | class BadHostSetup(Exception): 15 | """Raised if the cluster's host setup is not compatible with the 16 | router. 17 | """ 18 | 19 | 20 | def extract_keys(args, key_spec): 21 | first, last, step = key_spec 22 | 23 | rv = [] 24 | for idx, arg in enumerate(args, 1): 25 | if last >= 0 and idx > last: 26 | break 27 | if idx >= first and ((idx - first) % step) == 0: 28 | rv.append(arg) 29 | return rv 30 | 31 | 32 | def assert_gapless_hosts(hosts): 33 | if not hosts: 34 | raise BadHostSetup("No hosts were configured.") 35 | for x in range(len(hosts)): 36 | if hosts.get(x) is None: 37 | raise BadHostSetup( 38 | 'Expected host with ID "%d" but no such ' "host was found." % x 39 | ) 40 | 41 | 42 | class BaseRouter(object): 43 | """Baseclass for all routers. If you want to implement a custom router 44 | this is what you subclass. 45 | """ 46 | 47 | def __init__(self, cluster): 48 | # this is a weakref because the router is cached on the cluster 49 | # and otherwise we end up in circular reference land and we are 50 | # having problems being garbage collected. 51 | self._cluster = weakref(cluster) 52 | 53 | @property 54 | def cluster(self): 55 | """Reference back to the :class:`Cluster` this router belongs to.""" 56 | rv = self._cluster() 57 | if rv is None: 58 | raise RuntimeError("Cluster went away") 59 | return rv 60 | 61 | def get_key(self, command, args): 62 | """Returns the key a command operates on.""" 63 | spec = COMMANDS.get(command.upper()) 64 | 65 | if spec is None: 66 | raise UnroutableCommand( 67 | 'The command "%r" is unknown to the ' 68 | "router and cannot be handled as a " 69 | "result." % command 70 | ) 71 | 72 | if "movablekeys" in spec["flags"]: 73 | raise UnroutableCommand( 74 | 'The keys for "%r" are movable and ' 75 | "as such cannot be routed to a single " 76 | "host." 77 | ) 78 | 79 | keys = extract_keys(args, spec["key_spec"]) 80 | if len(keys) == 1: 81 | return keys[0] 82 | elif not keys: 83 | raise UnroutableCommand( 84 | 'The command "%r" does not operate on a key which means ' 85 | "that no suitable host could be determined. Consider " 86 | "using a fanout instead." 87 | ) 88 | 89 | raise UnroutableCommand( 90 | 'The command "%r" operates on multiple keys (%d passed) which is ' 91 | "something that is not supported." % (command, len(keys)) 92 | ) 93 | 94 | def get_host_for_command(self, command, args): 95 | """Returns the host this command should be executed against.""" 96 | return self.get_host_for_key(self.get_key(command, args)) 97 | 98 | def get_host_for_key(self, key): 99 | """Perform routing and return host_id of the target. 100 | 101 | Subclasses need to implement this. 102 | """ 103 | raise NotImplementedError() 104 | 105 | 106 | class ConsistentHashingRouter(BaseRouter): 107 | """Router that returns the host_id based on a consistent hashing 108 | algorithm. The consistent hashing algorithm only works if a key 109 | argument is provided. 110 | 111 | This router requires that the hosts are gapless which means that 112 | the IDs for N hosts range from 0 to N-1. 113 | """ 114 | 115 | def __init__(self, cluster): 116 | BaseRouter.__init__(self, cluster) 117 | self._host_id_id_map = dict(self.cluster.hosts.items()) 118 | self._hash = Ketama(self._host_id_id_map.values()) 119 | assert_gapless_hosts(self.cluster.hosts) 120 | 121 | def get_host_for_key(self, key): 122 | rv = self._hash.get_node(key) 123 | if rv is None: 124 | raise UnroutableCommand("Did not find a suitable " "host for the key.") 125 | return rv 126 | 127 | 128 | class PartitionRouter(BaseRouter): 129 | """A straightforward router that just individually routes commands to 130 | single nodes based on a simple ``crc32 % node_count`` setup. 131 | 132 | This router requires that the hosts are gapless which means that 133 | the IDs for N hosts range from 0 to N-1. 134 | """ 135 | 136 | def __init__(self, cluster): 137 | BaseRouter.__init__(self, cluster) 138 | assert_gapless_hosts(self.cluster.hosts) 139 | 140 | def get_host_for_key(self, key): 141 | if isinstance(key, text_type): 142 | k = key.encode("utf-8") 143 | elif isinstance(key, integer_types): 144 | k = text_type(key).encode("utf-8") 145 | else: 146 | k = bytes_type(key) 147 | return crc32(k) % len(self.cluster.hosts) 148 | -------------------------------------------------------------------------------- /rb/testing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import uuid 4 | import shutil 5 | import socket 6 | import tempfile 7 | 8 | from contextlib import contextmanager 9 | from subprocess import Popen, PIPE 10 | 11 | from rb.cluster import Cluster 12 | from rb.utils import itervalues 13 | 14 | devnull = open(os.devnull, "r+") 15 | 16 | 17 | class Server(object): 18 | def __init__(self, cl, socket_path): 19 | self._cl = cl 20 | self.socket_path = socket_path 21 | 22 | def test_connection(self): 23 | try: 24 | s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 25 | s.connect(self.socket_path) 26 | except IOError: 27 | return False 28 | return True 29 | 30 | def signal_stop(self): 31 | if self._cl is not None: 32 | self._cl.kill() 33 | 34 | def close(self): 35 | if self._cl is not None: 36 | self.signal_stop() 37 | self._cl.wait() 38 | self._cl = None 39 | try: 40 | os.remove(self.socket_path) 41 | except OSError: 42 | pass 43 | 44 | 45 | class TestSetup(object): 46 | """The test setup is a convenient way to spawn multiple redis servers 47 | for testing and to shut them down automatically. This can be used as 48 | a context manager to automatically terminate the clients. 49 | """ 50 | 51 | def __init__(self, servers=4, databases_each=8, server_executable="redis-server"): 52 | self._fd_dir = tempfile.mkdtemp() 53 | self.databases_each = databases_each 54 | self.server_executable = server_executable 55 | self.servers = [] 56 | 57 | for server in range(servers): 58 | self.spawn_server() 59 | 60 | def __enter__(self): 61 | return self 62 | 63 | def __exit__(self, exc_type, exc_value, tb): 64 | self.close() 65 | 66 | def make_cluster(self): 67 | """Creates a correctly configured cluster from the servers 68 | spawned. This also automatically waits for the servers to be up. 69 | """ 70 | self.wait_for_servers() 71 | hosts = [] 72 | host_id = 0 73 | for server in self.servers: 74 | for x in range(self.databases_each): 75 | hosts.append( 76 | { 77 | "host_id": host_id, 78 | "unix_socket_path": server.socket_path, 79 | "db": x, 80 | } 81 | ) 82 | host_id += 1 83 | return Cluster( 84 | hosts, pool_options={"encoding": "utf-8", "decode_responses": True} 85 | ) 86 | 87 | def spawn_server(self): 88 | """Spawns a new server and adds it to the pool.""" 89 | socket_path = os.path.join(self._fd_dir, str(uuid.uuid4())) 90 | cl = Popen([self.server_executable, "-"], stdin=PIPE, stdout=devnull) 91 | cl.stdin.write( 92 | ( 93 | """ 94 | port 0 95 | unixsocket %(path)s 96 | databases %(databases)d 97 | save "" 98 | """ 99 | % {"path": socket_path, "databases": self.databases_each,} 100 | ).encode("utf-8") 101 | ) 102 | cl.stdin.flush() 103 | cl.stdin.close() 104 | self.servers.append(Server(cl, socket_path)) 105 | 106 | def wait_for_servers(self, timeout=10): 107 | """Waits for all servers to to be up and running.""" 108 | unconnected_servers = dict((x.socket_path, x) for x in self.servers) 109 | now = time.time() 110 | while unconnected_servers: 111 | for server in itervalues(unconnected_servers): 112 | if server.test_connection(): 113 | unconnected_servers.pop(server.socket_path, None) 114 | break 115 | if time.time() > now + timeout: 116 | return False 117 | if unconnected_servers: 118 | time.sleep(0.05) 119 | 120 | return True 121 | 122 | def close(self): 123 | """Closes the test setup which shuts down all redis servers.""" 124 | for server in self.servers: 125 | server.signal_stop() 126 | for server in self.servers: 127 | server.close() 128 | try: 129 | shutil.rmtree(self._fd_dir) 130 | except (OSError, IOError): 131 | pass 132 | 133 | def __del__(self): 134 | try: 135 | self.close() 136 | except Exception: 137 | pass 138 | 139 | 140 | @contextmanager 141 | def make_test_cluster(*args, **kwargs): 142 | """Convenient shortcut for creating a test setup and then a cluster 143 | from it. This must be used as a context manager:: 144 | 145 | from rb.testing import make_test_cluster 146 | with make_test_cluster() as cluster: 147 | ... 148 | """ 149 | with TestSetup(*args, **kwargs) as ts: 150 | cluster = ts.make_cluster() 151 | try: 152 | yield cluster 153 | finally: 154 | cluster.disconnect_pools() 155 | -------------------------------------------------------------------------------- /rb/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import sys 4 | 5 | PY2 = sys.version_info[0] == 2 6 | 7 | if PY2: 8 | integer_types = (int, long) 9 | text_type = unicode 10 | bytes_type = str 11 | 12 | def iteritems(d, **kw): 13 | return iter(d.iteritems(**kw)) 14 | 15 | def itervalues(d, **kw): 16 | return iter(d.itervalues(**kw)) 17 | 18 | from itertools import izip 19 | 20 | from binascii import crc32 21 | else: 22 | integer_types = (int,) 23 | text_type = str 24 | bytes_type = bytes 25 | 26 | izip = zip 27 | 28 | def iteritems(d, **kw): 29 | return iter(d.items(**kw)) 30 | 31 | def itervalues(d, **kw): 32 | return iter(d.values(**kw)) 33 | 34 | from binascii import crc32 as _crc32 35 | 36 | # In python3 crc32 was changed to never return a signed value, which is 37 | # different from the python2 implementation. As noted in 38 | # https://docs.python.org/3/library/binascii.html#binascii.crc32 39 | # 40 | # Note the documentation suggests the following: 41 | # 42 | # > Changed in version 3.0: The result is always unsigned. To generate the 43 | # > same numeric value across all Python versions and platforms, use 44 | # > crc32(data) & 0xffffffff. 45 | # 46 | # However this will not work when transitioning between versions, as the 47 | # value MUST match what was generated in python 2. 48 | # 49 | # We can sign the return value using the following bit math to ensure we 50 | # match the python2 output of crc32. 51 | def crc32(*args): 52 | rt = _crc32(*args) 53 | return rt - ((rt & 0x80000000) << 1) 54 | -------------------------------------------------------------------------------- /scripts/bump-version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 5 | cd $SCRIPT_DIR/.. 6 | 7 | OLD_VERSION="$1" 8 | NEW_VERSION="$2" 9 | 10 | sed -i -e "s/^__version__ = "'".*"'"\$/__version__ = "'"'"$NEW_VERSION"'"'"/" rb/__init__.py 11 | 12 | echo "New version: $NEW_VERSION" 13 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal = 1 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import re 2 | import ast 3 | import os 4 | from setuptools import setup 5 | 6 | 7 | _version_re = re.compile(r"__version__\s+=\s+(.*)") 8 | 9 | 10 | with open("rb/__init__.py", "rb") as f: 11 | version = str( 12 | ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)) 13 | ) 14 | 15 | install_requires = ["redis>=2.6,!=3.4.0"] 16 | 17 | # override django version in requirements file if DJANGO_VERSION is set 18 | REDIS_VERSION = os.environ.get('REDIS_VERSION') 19 | if REDIS_VERSION: 20 | install_requires = [ 21 | u'redis{}'.format(REDIS_VERSION) 22 | if r.startswith('redis>=') else r 23 | for r in install_requires 24 | ] 25 | 26 | 27 | setup( 28 | name="rb", 29 | author="Functional Software Inc.", 30 | author_email="hello@getsentry.com", 31 | version=version, 32 | url="http://github.com/getsentry/rb", 33 | packages=["rb"], 34 | description="rb, the redis blaster", 35 | install_requires=install_requires, 36 | classifiers=[ 37 | "License :: OSI Approved :: Apache Software License", 38 | "Programming Language :: Python", 39 | ], 40 | ) 41 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from rb.testing import make_test_cluster 4 | 5 | 6 | @pytest.fixture 7 | def cluster(request): 8 | mgr = make_test_cluster() 9 | cluster = mgr.__enter__() 10 | 11 | @request.addfinalizer 12 | def cleanup(): 13 | mgr.__exit__(None, None, None) 14 | 15 | return cluster 16 | -------------------------------------------------------------------------------- /tests/test_cluster.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | 4 | import redis 5 | from redis.exceptions import ResponseError 6 | 7 | from rb.cluster import Cluster 8 | from rb.router import UnroutableCommand 9 | from rb.promise import Promise 10 | from rb.utils import text_type 11 | 12 | try: 13 | from redis.commands.core import Script 14 | except ImportError: 15 | from redis.client import Script 16 | 17 | 18 | def test_basic_interface(): 19 | cluster = Cluster( 20 | {0: {"db": 0}, 1: {"db": 2}, 2: {"db": 4, "host": "127.0.0.1"},}, 21 | host_defaults={"password": "pass",}, 22 | pool_options={"encoding": "utf-8", "decode_responses": True}, 23 | ) 24 | 25 | assert len(cluster.hosts) == 3 26 | 27 | assert cluster.hosts[0].host_id == 0 28 | assert cluster.hosts[0].db == 0 29 | assert cluster.hosts[0].host == "localhost" 30 | assert cluster.hosts[0].port == 6379 31 | assert cluster.hosts[0].password == "pass" 32 | 33 | assert cluster.hosts[1].host_id == 1 34 | assert cluster.hosts[1].db == 2 35 | assert cluster.hosts[1].host == "localhost" 36 | assert cluster.hosts[1].port == 6379 37 | assert cluster.hosts[1].password == "pass" 38 | 39 | assert cluster.hosts[2].host_id == 2 40 | assert cluster.hosts[2].db == 4 41 | assert cluster.hosts[2].host == "127.0.0.1" 42 | assert cluster.hosts[2].port == 6379 43 | assert cluster.hosts[2].password == "pass" 44 | 45 | 46 | def test_router_access(): 47 | cluster = Cluster( 48 | {0: {"db": 0},}, pool_options={"encoding": "utf-8", "decode_responses": True} 49 | ) 50 | 51 | router = cluster.get_router() 52 | assert router.cluster is cluster 53 | assert cluster.get_router() is router 54 | 55 | cluster.add_host(1, {"db": 1}) 56 | new_router = cluster.get_router() 57 | assert new_router is not router 58 | 59 | 60 | def test_basic_cluster(cluster): 61 | iterations = 10000 62 | 63 | with cluster.map() as client: 64 | for x in range(iterations): 65 | client.set("key-%06d" % x, x) 66 | responses = [] 67 | with cluster.map() as client: 68 | for x in range(iterations): 69 | responses.append(client.get("key-%06d" % x)) 70 | ref_sum = sum(int(x.value) for x in responses) 71 | assert ref_sum == sum(range(iterations)) 72 | 73 | 74 | def test_basic_cluster_disabled_batch(cluster): 75 | iterations = 10000 76 | 77 | with cluster.map(auto_batch=False) as client: 78 | for x in range(iterations): 79 | client.set("key-%06d" % x, x) 80 | responses = [] 81 | with cluster.map(auto_batch=False) as client: 82 | for x in range(iterations): 83 | responses.append(client.get("key-%06d" % x)) 84 | ref_sum = sum(int(x.value) for x in responses) 85 | assert ref_sum == sum(range(iterations)) 86 | 87 | 88 | def make_zset_data(x): 89 | return [(str(i), float(i)) for i in range(x, x + 10)] 90 | 91 | 92 | def test_simple_api(cluster): 93 | client = cluster.get_routing_client() 94 | with client.map() as map_client: 95 | for x in range(10): 96 | map_client.set("key:%d" % x, x) 97 | if redis.VERSION >= (3, 0, 0): 98 | map_client.zadd("zset:%d" % x, dict(make_zset_data(x))) 99 | else: 100 | map_client.zadd("zset:%d" % x, **dict(make_zset_data(x))) 101 | 102 | for x in range(10): 103 | assert client.get("key:%d" % x) == str(x) 104 | assert client.zrange("zset:%d" % x, 0, -1, withscores=True) == make_zset_data(x) 105 | 106 | results = [] # (promise, expected result) 107 | with client.map() as map_client: 108 | for x in range(10): 109 | results.append( 110 | ( 111 | map_client.zrange("zset:%d" % x, 0, -1, withscores=True), 112 | make_zset_data(x), 113 | ) 114 | ) 115 | 116 | for promise, expectation in results: 117 | assert promise.value == expectation 118 | 119 | with client.map() as map_client: 120 | for x in range(10): 121 | map_client.delete("key:%d" % x) 122 | 123 | for x in range(10): 124 | assert client.get("key:%d" % x) is None 125 | 126 | 127 | def test_routing_client_releases_connection_on_error(cluster): 128 | client = cluster.get_routing_client() 129 | with pytest.raises(ResponseError): 130 | client.sadd("key") 131 | 132 | host = cluster.get_router().get_host_for_command("sadd", ["key"]) 133 | pool = cluster.get_pool_for_host(host) 134 | assert len(pool._available_connections) == pool._created_connections 135 | 136 | 137 | def test_mapping_client_releases_connection_on_error(cluster): 138 | client = cluster.get_routing_client().get_mapping_client() 139 | client.sadd("key") 140 | with pytest.raises(ResponseError): 141 | client.join() 142 | 143 | host = cluster.get_router().get_host_for_command("sadd", ["key"]) 144 | pool = cluster.get_pool_for_host(host) 145 | assert len(pool._available_connections) == pool._created_connections 146 | 147 | 148 | def test_managed_mapping_client_releases_connection_on_error(cluster): 149 | with pytest.raises(ResponseError): 150 | with cluster.get_routing_client().map() as client: 151 | client.sadd("key") 152 | 153 | host = cluster.get_router().get_host_for_command("sadd", ["key"]) 154 | pool = cluster.get_pool_for_host(host) 155 | assert len(pool._available_connections) == pool._created_connections 156 | 157 | 158 | def test_multi_keys_rejected(cluster): 159 | client = cluster.get_routing_client() 160 | 161 | # Okay 162 | with client.map() as map_client: 163 | map_client.delete("key") 164 | 165 | # Not okay 166 | with client.map() as map_client: 167 | with pytest.raises(UnroutableCommand): 168 | map_client.delete("key1", "key2") 169 | 170 | 171 | def test_promise_api(cluster): 172 | results = [] 173 | with cluster.map() as client: 174 | for x in range(10): 175 | client.set("key-%d" % x, x) 176 | for x in range(10): 177 | client.get("key-%d" % x).then(lambda x: results.append(int(x))) 178 | assert sorted(results) == list(range(10)) 179 | 180 | 181 | def test_fanout_api(cluster): 182 | for host_id in cluster.hosts: 183 | client = cluster.get_local_client(host_id) 184 | client.set("foo", str(host_id)) 185 | if redis.VERSION >= (3, 0, 0): 186 | client.zadd("zset", dict(make_zset_data(host_id))) 187 | else: 188 | client.zadd("zset", **dict(make_zset_data(host_id))) 189 | 190 | with cluster.fanout(hosts="all") as client: 191 | get_result = client.get("foo") 192 | zrange_result = client.zrange("zset", 0, -1, withscores=True) 193 | 194 | for host_id in cluster.hosts: 195 | assert get_result.value[host_id] == str(host_id) 196 | assert zrange_result.value[host_id] == make_zset_data(host_id) 197 | 198 | 199 | def test_fanout_key_target(cluster): 200 | with cluster.fanout() as client: 201 | c = client.target_key("foo") 202 | c.set("foo", "42") 203 | promise = c.get("foo") 204 | assert promise.value == "42" 205 | 206 | client = cluster.get_routing_client() 207 | assert client.get("foo") == "42" 208 | 209 | 210 | def test_fanout_targeting_api(cluster): 211 | with cluster.fanout() as client: 212 | client.target(hosts=[0, 1]).set("foo", 42) 213 | rv = client.target(hosts="all").get("foo") 214 | 215 | assert list(rv.value.values()).count("42") == 2 216 | 217 | # Without hosts this should fail 218 | with cluster.fanout() as client: 219 | pytest.raises(RuntimeError, client.get, "bar") 220 | 221 | 222 | def test_emulated_batch_apis(cluster): 223 | with cluster.map() as map_client: 224 | promise = map_client.mset(dict(("key:%s" % x, x) for x in range(10))) 225 | assert promise.value is None 226 | with cluster.map() as map_client: 227 | promise = map_client.mget(["key:%s" % x for x in range(10)]) 228 | assert promise.value == list(map(text_type, range(10))) 229 | 230 | 231 | def test_batch_promise_all(cluster): 232 | with cluster.map() as client: 233 | client.set("1", "a") 234 | client.set("2", "b") 235 | client.set("3", "c") 236 | client.set("4", "d") 237 | client.hset("a", "b", "XXX") 238 | 239 | with cluster.map() as client: 240 | rv = Promise.all( 241 | [client.mget("1", "2"), client.hget("a", "b"), client.mget("3", "4"),] 242 | ) 243 | assert rv.value == [["a", "b"], "XXX", ["c", "d"]] 244 | 245 | 246 | def test_execute_commands(cluster): 247 | TestScript = Script(cluster.get_local_client(0), "return {KEYS, ARGV}",) 248 | 249 | # XXX: redis<2.10.6 didn't require that a ``Script`` be instantiated with a 250 | # valid client as part of the constructor, which resulted in the SHA not 251 | # actually being set until the script was executed. To ensure the legacy 252 | # behavior still works, we manually unset the cached SHA before executing. 253 | actual_script_hash = TestScript.sha 254 | TestScript.sha = None 255 | 256 | results = cluster.execute_commands( 257 | { 258 | "foo": [ 259 | ("SET", "foo", "1"), 260 | (TestScript, ("key",), ("value",)), 261 | ("GET", "foo"), 262 | ], 263 | "bar": [ 264 | ("INCRBY", "bar", "2"), 265 | (TestScript, ("key",), ("value",)), 266 | ("GET", "bar"), 267 | ], 268 | } 269 | ) 270 | 271 | assert TestScript.sha == actual_script_hash 272 | 273 | assert results["foo"][0].value 274 | assert results["foo"][1].value == [["key"], ["value"]] 275 | assert results["foo"][2].value == "1" 276 | 277 | assert results["bar"][0].value == 2 278 | assert results["bar"][1].value == [["key"], ["value"]] 279 | assert results["bar"][2].value == "2" 280 | 281 | 282 | def test_reconnect(cluster): 283 | with cluster.map() as client: 284 | for x in range(10): 285 | client.set(text_type(x), text_type(x)) 286 | 287 | with cluster.all() as client: 288 | client.config_set("timeout", 1) 289 | 290 | time.sleep(2) 291 | 292 | with cluster.map() as client: 293 | rv = Promise.all([client.get(text_type(x)) for x in range(10)]) 294 | 295 | assert rv.value == list(map(text_type, range(10))) 296 | -------------------------------------------------------------------------------- /tests/test_ketama.py: -------------------------------------------------------------------------------- 1 | from rb.ketama import Ketama 2 | 3 | 4 | def test_basic(): 5 | def test(k): 6 | data = {} 7 | for i in range(1000): 8 | tower = k.get_node("a%s" % i) 9 | data.setdefault(tower, 0) 10 | data[tower] += 1 11 | 12 | return [ 13 | k.get_node("Apple"), 14 | k.get_node("Hello"), 15 | k.get_node("Data"), 16 | k.get_node("Computer"), 17 | ] 18 | 19 | k = Ketama( 20 | [ 21 | "192.168.0.1:6000", 22 | "192.168.0.1:6001", 23 | "192.168.0.1:6002", 24 | "192.168.0.1:6003", 25 | "192.168.0.1:6004", 26 | "192.168.0.1:6005", 27 | "192.168.0.1:6006", 28 | "192.168.0.1:6008", 29 | "192.168.0.1:6007", 30 | ] 31 | ) 32 | assert test(k) == [ 33 | "192.168.0.1:6002", 34 | "192.168.0.1:6007", 35 | "192.168.0.1:6004", 36 | "192.168.0.1:6004", 37 | ] 38 | 39 | k.remove_node("192.168.0.1:6007") 40 | assert test(k) == [ 41 | "192.168.0.1:6002", 42 | "192.168.0.1:6000", 43 | "192.168.0.1:6004", 44 | "192.168.0.1:6004", 45 | ] 46 | 47 | k.add_node("192.168.0.1:6007") 48 | assert test(k) == [ 49 | "192.168.0.1:6002", 50 | "192.168.0.1:6007", 51 | "192.168.0.1:6004", 52 | "192.168.0.1:6004", 53 | ] 54 | -------------------------------------------------------------------------------- /tests/test_poll.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from rb import clients 4 | from rb.poll import available_pollers 5 | from rb.utils import text_type 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "poll", available_pollers, ids=[x.__name__ for x in available_pollers] 10 | ) 11 | def test_simple_api(cluster, poll, monkeypatch): 12 | monkeypatch.setattr(clients, "poll", poll) 13 | 14 | client = cluster.get_routing_client() 15 | with client.map() as map_client: 16 | for x in range(10): 17 | map_client.set("key:%s" % x, x) 18 | 19 | for x in range(10): 20 | assert client.get("key:%d" % x) == text_type(x) 21 | -------------------------------------------------------------------------------- /tests/test_promise.py: -------------------------------------------------------------------------------- 1 | from rb.promise import Promise 2 | 3 | 4 | def test_resolved_promise(): 5 | p = Promise.resolved(42) 6 | assert p.is_resolved 7 | assert not p.is_pending 8 | assert not p.is_rejected 9 | assert p.value == 42 10 | 11 | 12 | def test_rejected_promise(): 13 | err = RuntimeError("So fail") 14 | p = Promise.rejected(err) 15 | assert not p.is_resolved 16 | assert not p.is_pending 17 | assert p.is_rejected 18 | assert p.reason == err 19 | 20 | 21 | def test_success_callbacks(): 22 | results = [] 23 | 24 | p = Promise() 25 | assert p.is_pending 26 | p.done(results.append) 27 | 28 | assert results == [] 29 | p.resolve(42) 30 | assert results == [42] 31 | 32 | p = Promise.resolved(23) 33 | p.done(results.append) 34 | 35 | assert results == [42, 23] 36 | 37 | 38 | def test_failure_callbacks(): 39 | results = [] 40 | 41 | p = Promise() 42 | assert p.is_pending 43 | p.done(on_failure=results.append) 44 | 45 | assert results == [] 46 | p.reject(42) 47 | assert results == [42] 48 | 49 | p = Promise.rejected(23) 50 | p.done(on_failure=results.append) 51 | 52 | assert results == [42, 23] 53 | 54 | 55 | def test_promise_then(): 56 | p = Promise.resolved([1, 2, 3]) 57 | 58 | def on_success(value): 59 | return value + [4] 60 | 61 | p2 = p.then(success=on_success) 62 | assert p2.value == [1, 2, 3, 4] 63 | 64 | 65 | def test_promise_all(): 66 | p = Promise.all([]) 67 | assert p.is_resolved 68 | assert p.value == [] 69 | 70 | p = Promise.all({}) 71 | assert p.is_resolved 72 | assert p.value == {} 73 | 74 | p = Promise.all([Promise.resolved(1), Promise.resolved(2), Promise.resolved(3),]) 75 | 76 | assert p.is_resolved 77 | assert p.value == [1, 2, 3] 78 | 79 | p = Promise.all( 80 | { 81 | "key1": Promise.resolved(1), 82 | "key2": Promise.resolved(2), 83 | "key3": Promise.resolved(3), 84 | } 85 | ) 86 | 87 | assert p.is_resolved 88 | assert p.value == {"key1": 1, "key2": 2, "key3": 3} 89 | 90 | p = Promise.all([Promise.resolved(1), Promise.rejected(2), Promise.resolved(3),]) 91 | assert p.is_rejected 92 | assert p.reason == 2 93 | 94 | 95 | def test_auto_coercion(): 96 | p = Promise.all([1, 2, Promise.resolved(3)]) 97 | assert p.is_resolved 98 | assert p.value == [1, 2, 3] 99 | 100 | p = Promise.all({1: 1, 2: 2, 3: Promise.resolved(3)}) 101 | assert p.is_resolved 102 | assert p.value == {1: 1, 2: 2, 3: 3} 103 | -------------------------------------------------------------------------------- /tests/test_router.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from rb.cluster import Cluster 4 | from rb.router import UnroutableCommand, extract_keys, BadHostSetup 5 | 6 | 7 | def test_router_key_routing(): 8 | cluster = Cluster({0: {"db": 0},}) 9 | 10 | router = cluster.get_router() 11 | assert router.get_key("INCR", ["foo"]) == "foo" 12 | assert router.get_key("GET", ["bar"]) == "bar" 13 | 14 | with pytest.raises(UnroutableCommand): 15 | router.get_key("MGET", ["foo", "bar", "baz"]) 16 | 17 | with pytest.raises(UnroutableCommand): 18 | router.get_key("UNKNOWN", []) 19 | 20 | 21 | def test_host_validation(): 22 | cluster = Cluster(hosts={1: {}}) 23 | try: 24 | cluster.get_router() 25 | except BadHostSetup as e: 26 | assert 'Expected host with ID "0"' in str(e) 27 | else: 28 | raise Exception("Expected runtime error") 29 | 30 | 31 | def test_router_basics(): 32 | cluster = Cluster({0: {"db": 0}, 1: {"db": 1}, 2: {"db": 2},}) 33 | 34 | router = cluster.get_router() 35 | assert router.get_host_for_command("INCR", ["foo"]) == 1 36 | assert router.get_host_for_command("INCR", ["bar"]) == 2 37 | assert router.get_host_for_command("INCR", ["baz"]) == 0 38 | 39 | assert router.get_host_for_key("foo") == 1 40 | assert router.get_host_for_key("bar") == 2 41 | assert router.get_host_for_key("baz") == 0 42 | 43 | 44 | def test_key_extraction(): 45 | assert extract_keys(["foo"], (1, 1, 1)) 46 | assert extract_keys(["foo", "value", "foo2", "value2"], (1, -1, 2)) == [ 47 | "foo", 48 | "foo2", 49 | ] 50 | assert extract_keys(["extra", "foo", "value", "foo2", "value2"], (2, -1, 2)) == [ 51 | "foo", 52 | "foo2", 53 | ] 54 | assert extract_keys(["foo", "foo2"], (1, -1, 1)) == ["foo", "foo2"] 55 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from rb.utils import bytes_type, crc32 4 | 5 | 6 | def test_crc32(): 7 | """ 8 | Test that we get consistent values from python 2/3 9 | """ 10 | assert crc32("test".encode("utf-8")) == -662733300 11 | --------------------------------------------------------------------------------