├── .gitignore ├── .travis.yml ├── AUTHORS.rst ├── CONTRIBUTING.rst ├── HISTORY.rst ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── docs ├── Makefile ├── authors.rst ├── conf.py ├── consumer.rst ├── contrib.rst ├── contributing.rst ├── history.rst ├── index.rst ├── lookupd.rst ├── make.bat ├── message.rst ├── nsqd.rst ├── producer.rst ├── signals.rst └── upgrading.rst ├── gnsq ├── __init__.py ├── backofftimer.py ├── consumer.py ├── contrib │ ├── __init__.py │ ├── batch.py │ ├── giveup.py │ ├── queue.py │ └── sentry.py ├── decorators.py ├── errors.py ├── httpclient.py ├── lookupd.py ├── message.py ├── nsqd.py ├── producer.py ├── protocol.py ├── reader.py ├── states.py ├── stream │ ├── __init__.py │ ├── compression.py │ ├── defalte.py │ ├── snappy.py │ └── stream.py ├── util.py └── version.py ├── requirements.dev.txt ├── requirements.docs.txt ├── requirements.test.txt ├── setup.cfg ├── setup.py ├── tests ├── cert.pem ├── conftest.py ├── install-nsq.sh ├── integration_server.py ├── key.pem ├── mock_server.py ├── test_basic.py ├── test_command.py ├── test_consumer.py ├── test_lookupd.py ├── test_message.py ├── test_nsqd.py ├── test_nsqd_http.py ├── test_producer.py └── test_reader.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Distribution / packaging 7 | .Python 8 | build/ 9 | develop-eggs/ 10 | dist/ 11 | downloads/ 12 | eggs/ 13 | .eggs/ 14 | lib/ 15 | lib64/ 16 | parts/ 17 | sdist/ 18 | var/ 19 | wheels/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | MANIFEST 24 | 25 | # Installer logs 26 | pip-log.txt 27 | pip-delete-this-directory.txt 28 | 29 | # Unit test / coverage reports 30 | htmlcov/ 31 | .tox/ 32 | .coverage 33 | .coverage.* 34 | .cache 35 | nosetests.xml 36 | coverage.xml 37 | *.cover 38 | .pytest_cache/ 39 | 40 | # Sphinx documentation 41 | docs/_build/ 42 | docs/gnsq.rst 43 | docs/gnsq.contrib.rst 44 | docs/gnsq.stream.rst 45 | docs/modules.rst 46 | 47 | # pyenv 48 | .python-version 49 | 50 | # Environments 51 | .env 52 | .venv 53 | env/ 54 | venv/ 55 | ENV/ 56 | env.bak/ 57 | venv.bak/ 58 | 59 | # mypy 60 | .mypy_cache/ 61 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Config file for automatic testing at travis-ci.org 2 | dist: xenial 3 | language: python 4 | 5 | python: 6 | - "3.8" 7 | - "3.7" 8 | - "3.6" 9 | - "3.5" 10 | - "3.4" 11 | - "2.7" 12 | - "pypy3.5" 13 | 14 | env: 15 | - NSQ_VERSION=1.0.0-compat GO_VERSION=1.8 16 | - NSQ_VERSION=1.1.0 GO_VERSION=1.10.3 17 | 18 | matrix: 19 | include: 20 | - env: TOXENV=docs 21 | - env: TOXENV=lint 22 | 23 | install: 24 | - sudo apt-get install libsnappy-dev 25 | - pip install -U tox tox-travis 26 | - bash ./tests/install-nsq.sh 27 | 28 | script: tox 29 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Credits 3 | ======= 4 | 5 | Development Lead 6 | ---------------- 7 | 8 | * Trevor Olson 9 | 10 | Contributors 11 | ------------ 12 | 13 | None yet. Why not be the first? -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributing 3 | ============ 4 | 5 | Contributions are welcome, and they are greatly appreciated! Every 6 | little bit helps, and credit will always be given. 7 | 8 | You can contribute in many ways: 9 | 10 | Types of Contributions 11 | ---------------------- 12 | 13 | Report Bugs 14 | ~~~~~~~~~~~ 15 | 16 | Report bugs at https://github.com/wtolson/gnsq/issues. 17 | 18 | If you are reporting a bug, please include: 19 | 20 | * Your operating system name and version. 21 | * Any details about your local setup that might be helpful in troubleshooting. 22 | * Detailed steps to reproduce the bug. 23 | 24 | Fix Bugs 25 | ~~~~~~~~ 26 | 27 | Look through the GitHub issues for bugs. Anything tagged with "bug" 28 | is open to whoever wants to implement it. 29 | 30 | Implement Features 31 | ~~~~~~~~~~~~~~~~~~ 32 | 33 | Look through the GitHub issues for features. Anything tagged with "feature" 34 | is open to whoever wants to implement it. 35 | 36 | Write Documentation 37 | ~~~~~~~~~~~~~~~~~~~ 38 | 39 | gnsq could always use more documentation, whether as part of the 40 | official gnsq docs, in docstrings, or even on the web in blog posts, 41 | articles, and such. 42 | 43 | Submit Feedback 44 | ~~~~~~~~~~~~~~~ 45 | 46 | The best way to send feedback is to file an issue at 47 | https://github.com/wtolson/gnsq/issues. 48 | 49 | If you are proposing a feature: 50 | 51 | * Explain in detail how it would work. 52 | * Keep the scope as narrow as possible, to make it easier to implement. 53 | * Remember that this is a volunteer-driven project, and that contributions 54 | are welcome :) 55 | 56 | Get Started! 57 | ------------ 58 | 59 | Ready to contribute? Here's how to set up `gnsq` for local development. 60 | 61 | 1. Fork the `gnsq` repo on GitHub. 62 | 2. Clone your fork locally:: 63 | 64 | $ git clone git@github.com:your_name_here/gnsq.git 65 | 66 | 3. Install your local copy into a virtualenv. Assuming you have 67 | virtualenvwrapper and libsnappy installed, this is how you set up your fork 68 | for local development:: 69 | 70 | $ mkvirtualenv gnsq 71 | $ cd gnsq/ 72 | $ pip install -r requirements.dev.txt -r requirements.docs.txt 73 | 74 | 4. Create a branch for local development:: 75 | 76 | $ git checkout -b name-of-your-bugfix-or-feature 77 | 78 | Now you can make your changes locally. 79 | 80 | 5. When you're done making changes, check that your changes pass flake8 and the 81 | tests, including testing other Python versions with tox:: 82 | 83 | $ flake8 gnsq tests 84 | $ pytest 85 | $ tox 86 | 87 | To get flake8 and tox, just pip install them into your virtualenv. 88 | 89 | 6. Commit your changes and push your branch to GitHub:: 90 | 91 | $ git add . 92 | $ git commit -m "Your detailed description of your changes." 93 | $ git push origin name-of-your-bugfix-or-feature 94 | 95 | 7. Submit a pull request through the GitHub website. 96 | 97 | Pull Request Guidelines 98 | ----------------------- 99 | 100 | Before you submit a pull request, check that it meets these guidelines: 101 | 102 | 1. The pull request should include tests. 103 | 2. If the pull request adds functionality, the docs should be updated. Put 104 | your new functionality into a function with a docstring, and add the 105 | feature to the list in README.rst. 106 | 3. The pull request should work for Python 2.6 and 2.7. Check 107 | https://travis-ci.org/wtolson/gnsq/pull_requests 108 | and make sure that the tests pass for all supported Python versions. 109 | 110 | Tips 111 | ---- 112 | 113 | To run a subset of tests:: 114 | 115 | $ pytest tests/test_basic.py 116 | -------------------------------------------------------------------------------- /HISTORY.rst: -------------------------------------------------------------------------------- 1 | .. :changelog: 2 | 3 | History 4 | ------- 5 | 6 | 7 | 1.0.2 (2020-01-08) 8 | ~~~~~~~~~~~~~~~~~~ 9 | 10 | * Fix python3 bug in the giveup handler 11 | * Fix bug returning json data from older nsq servers 12 | * Batch handler now checks if messages have been responded to before requeuing 13 | 14 | 15 | 1.0.1 (2019-04-24) 16 | ~~~~~~~~~~~~~~~~~~ 17 | 18 | * Fix long description in packaging 19 | 20 | 21 | 1.0.0 (2019-04-24) 22 | ~~~~~~~~~~~~~~~~~~ 23 | 24 | * Drop support for python 2.6 and python 3.3, add support for python 3.7 25 | * Drop support for nsq < 1.0.0 26 | * Handle changing connections during redistribute ready 27 | * Add create topic and create channel to LookupdClient 28 | * Add pause and unpause topic to NsqdHTTPClient 29 | * Add ability to filter NsqdHTTPClient stats by topic/channel 30 | * Add text format for NsqdHTTPClient stats 31 | * Add binary multipublish over http 32 | * Add queue handler to the contrib package 33 | * Add Producer class, a high level tcp message writer 34 | * Fixed detecting if consumer is starved 35 | * Optimizations to better distribute ready state among the nsqd connections 36 | * Detect starved consumers when batching messages 37 | * [DEPRECATED] :class:`~gnsq.Nsqd` is deprecated. Use 38 | :class:`~gnsq.NsqdTCPClient` or :class:`~gnsq.NsqdHTTPClient` instead. See 39 | :ref:`upgrading-to-100` for more information. 40 | * [DEPRECATED] :class:`~gnsq.Lookupd` is deprecated. Use 41 | :class:`~gnsq.LookupdClient` instead. See :ref:`upgrading-to-100` for more 42 | information. 43 | * [DEPRECATED] :class:`~gnsq.Reader` is deprecated. Use :class:`~gnsq.Consumer` 44 | instead. See :ref:`upgrading-to-100` for more information. 45 | 46 | 47 | 0.4.0 (2017-06-13) 48 | ~~~~~~~~~~~~~~~~~~ 49 | 50 | * #13 - Allow use with nsq v1.0.0 (thanks @daroot) 51 | * Add contrib package with utilities. 52 | 53 | 54 | 0.3.3 (2016-09-25) 55 | ~~~~~~~~~~~~~~~~~~ 56 | 57 | * #11 - Make sure all socket data is sent. 58 | * #5 - Add support for DPUB (defered publish). 59 | 60 | 61 | 0.3.2 (2016-04-10) 62 | ~~~~~~~~~~~~~~~~~~ 63 | 64 | * Add support for Python 3 and PyPy. 65 | * #7 - Fix undeclared variable in compression socket. 66 | 67 | 68 | 0.3.1 (2015-11-06) 69 | ~~~~~~~~~~~~~~~~~~ 70 | 71 | * Fix negative in flight causing not throttling after backoff. 72 | 73 | 74 | 0.3.0 (2015-06-14) 75 | ~~~~~~~~~~~~~~~~~~ 76 | 77 | * Fix extra backoff success/failures during backoff period. 78 | * Fix case where handle_backoff is never called. 79 | * Add backoff parameter to message.requeue(). 80 | * Allow overriding backoff on NSQRequeueMessage error. 81 | * Handle connection failures while starting/completing backoff. 82 | 83 | 84 | 0.2.3 (2015-02-16) 85 | ~~~~~~~~~~~~~~~~~~ 86 | 87 | * Remove disconnected nsqd messages from the worker queue. 88 | * #4 - Fix crash in Reader.random_ready_conn (thanks @ianpreston). 89 | 90 | 91 | 0.2.2 (2015-01-12) 92 | ~~~~~~~~~~~~~~~~~~ 93 | 94 | * Allow finishing and requeuing in sync handlers. 95 | 96 | 97 | 0.2.1 (2015-01-12) 98 | ~~~~~~~~~~~~~~~~~~ 99 | 100 | * Topics and channels are now valid to 64 characters. 101 | * Ephemeral topics are now valid. 102 | * Adjustable backoff behavior. 103 | 104 | 105 | 0.2.0 (2014-08-03) 106 | ~~~~~~~~~~~~~~~~~~ 107 | 108 | * Warn on connection failure. 109 | * Add extra requires for snappy. 110 | * Add support for nsq auth protocol. 111 | 112 | 113 | 0.1.4 (2014-07-24) 114 | ~~~~~~~~~~~~~~~~~~ 115 | 116 | * Preemptively update ready count. 117 | * Dependency and contributing documentation. 118 | * Support for nsq back to 0.2.24. 119 | 120 | 121 | 0.1.3 (2014-07-08) 122 | ~~~~~~~~~~~~~~~~~~ 123 | 124 | * Block as expected on start, even if already started. 125 | * Raise runtime error if starting the reader without a message handler. 126 | * Add on_close signal to the reader. 127 | * Allow upgrading to tls+snappy or tls+deflate. 128 | 129 | 130 | 0.1.2 (2014-07-08) 131 | ~~~~~~~~~~~~~~~~~~ 132 | 133 | * Flush delfate buffer for each message. 134 | 135 | 136 | 0.1.1 (2014-07-07) 137 | ~~~~~~~~~~~~~~~~~~ 138 | 139 | * Fix packaging stream submodule. 140 | * Send queued messages before closing socket. 141 | * Continue to read from socket on EAGAIN 142 | 143 | 144 | 0.1.0 (2014-07-07) 145 | ~~~~~~~~~~~~~~~~~~ 146 | 147 | * First release on PyPI. 148 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Trevor Olson 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | 6 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | 8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 9 | 10 | * Neither the name of gnsq nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 11 | 12 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include AUTHORS.rst 2 | include CONTRIBUTING.rst 3 | include HISTORY.rst 4 | include LICENSE 5 | include README.rst 6 | 7 | recursive-include tests * 8 | recursive-exclude * __pycache__ 9 | recursive-exclude * *.py[co] 10 | 11 | recursive-include docs *.rst conf.py Makefile make.bat -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean-pyc clean-build clean-pyc clean-docs clean-coverage clean-tests docs clean 2 | 3 | help: 4 | @echo "clean-build - remove build artifacts" 5 | @echo "clean-pyc - remove Python file artifacts" 6 | @echo "lint - check style with flake8" 7 | @echo "test - run tests quickly with the default Python" 8 | @echo "test-all - run tests on every Python version with tox" 9 | @echo "coverage - check code coverage quickly with the default Python" 10 | @echo "docs - generate Sphinx HTML documentation, including API docs" 11 | @echo "release - package and upload a release" 12 | @echo "dist - package" 13 | 14 | clean: clean-build clean-pyc clean-docs clean-coverage clean-tests 15 | 16 | clean-build: 17 | rm -fr build/ 18 | rm -fr dist/ 19 | rm -fr *.egg-info 20 | 21 | clean-pyc: 22 | find . -type f -name "*.py[co]" -delete 23 | find . -type f -name '*~' -delete 24 | find . -type d -name "__pycache__" -delete 25 | 26 | clean-docs: 27 | rm -f docs/gnsq.rst 28 | rm -f docs/gnsq.contrib.rst 29 | rm -f docs/gnsq.stream.rst 30 | rm -f docs/modules.rst 31 | $(MAKE) -C docs clean 32 | 33 | clean-coverage: 34 | rm -f .coverage 35 | rm -fr htmlcov/ 36 | 37 | clean-tests: 38 | rm -fr .tox 39 | rm -fr .cache 40 | rm -fr .pytest_cache 41 | 42 | lint: 43 | flake8 gnsq tests 44 | 45 | test: 46 | pytest tests 47 | 48 | test-fast: 49 | pytest tests --fast 50 | 51 | test-all: 52 | tox 53 | 54 | coverage: 55 | pytest --cov gnsq --cov-report html tests 56 | open htmlcov/index.html 57 | 58 | docs: clean-docs 59 | sphinx-apidoc -o docs/ gnsq 60 | $(MAKE) -C docs html 61 | open docs/_build/html/index.html 62 | 63 | release: dist 64 | twine check dist/* 65 | twine upload dist/* 66 | 67 | dist: clean 68 | python setup.py sdist 69 | python setup.py bdist_wheel 70 | ls -l dist 71 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | =============================== 2 | gnsq 3 | =============================== 4 | 5 | .. image:: https://img.shields.io/pypi/v/gnsq.svg 6 | :target: https://pypi.python.org/pypi/gnsq 7 | 8 | .. image:: https://img.shields.io/travis/wtolson/gnsq.svg 9 | :target: https://travis-ci.org/wtolson/gnsq 10 | 11 | .. image:: https://readthedocs.org/projects/gnsq/badge/?version=latest 12 | :target: https://gnsq.readthedocs.io/en/latest/?badge=latest 13 | :alt: Documentation Status 14 | 15 | 16 | A `gevent`_ based python client for `NSQ`_ distributed messaging platform. 17 | 18 | Features include: 19 | 20 | * Free software: BSD license 21 | * Documentation: https://gnsq.readthedocs.org 22 | * Battle tested on billions and billions of messages `` 23 | * Based on `gevent`_ for fast concurrent networking 24 | * Fast and flexible signals with `Blinker`_ 25 | * Automatic nsqlookupd discovery and back-off 26 | * Support for TLS, DEFLATE, and Snappy 27 | * Full HTTP clients for both nsqd and nsqlookupd 28 | 29 | Installation 30 | ------------ 31 | 32 | At the command line:: 33 | 34 | $ easy_install gnsq 35 | 36 | Or even better, if you have virtualenvwrapper installed:: 37 | 38 | $ mkvirtualenv gnsq 39 | $ pip install gnsq 40 | 41 | Currently there is support for Python 2.7+, Python 3.4+ and PyPy. 42 | 43 | Usage 44 | ----- 45 | 46 | First make sure nsq is `installed and running`_. Next create a producer and 47 | publish some messages to your topic:: 48 | 49 | import gnsq 50 | 51 | producer = gnsq.Producer('localhost:4150') 52 | producer.start() 53 | 54 | producer.publish('topic', 'hello gevent!') 55 | producer.publish('topic', 'hello nsq!') 56 | 57 | Then create a Consumer to consume messages from your topic:: 58 | 59 | consumer = gnsq.Consumer('topic', 'channel', 'localhost:4150') 60 | 61 | @consumer.on_message.connect 62 | def handler(consumer, message): 63 | print 'got message:', message.body 64 | 65 | consumer.start() 66 | 67 | Compatibility 68 | ------------- 69 | 70 | For **NSQ 1.0** and later, use the major version 1 (``1.x.y``) of gnsq. 71 | 72 | For **NSQ 0.3.8** and earlier, use the major version 0 (``0.x.y``) of the 73 | library. 74 | 75 | The recommended way to set your requirements in your `setup.py` or 76 | `requirements.txt` is:: 77 | 78 | # NSQ 1.x.y 79 | gnsq>=1.0.0 80 | 81 | # NSQ 0.x.y 82 | gnsq<1.0.0 83 | 84 | Dependencies 85 | ------------ 86 | 87 | Optional snappy support depends on the `python-snappy` package which in turn 88 | depends on libsnappy:: 89 | 90 | # Debian 91 | $ sudo apt-get install libsnappy-dev 92 | 93 | # Or OS X 94 | $ brew install snappy 95 | 96 | # And then install python-snappy 97 | $ pip install python-snappy 98 | 99 | Contributing 100 | ------------ 101 | 102 | Feedback, issues, and contributions are always gratefully welcomed. See the 103 | `contributing guide`_ for details on how to help and setup a development 104 | environment. 105 | 106 | 107 | .. _gevent: http://gevent.org/ 108 | .. _NSQ: http://nsq.io/ 109 | .. _Blinker: http://pythonhosted.org/blinker/ 110 | .. _installed and running: http://nsq.io/overview/quick_start.html 111 | .. _contributing guide: https://github.com/wtolson/gnsq/blob/master/CONTRIBUTING.rst 112 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = gnsq 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../AUTHORS.rst -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | import os 16 | import sys 17 | sys.path.insert(0, os.path.abspath('.')) # noqa: E402 18 | 19 | from gnsq import __version__ 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = 'gnsq' 24 | copyright = '2018, Trevor Olson' 25 | author = 'Trevor Olson' 26 | 27 | # The short X.Y version 28 | version = __version__ 29 | # The full version, including alpha/beta/rc tags 30 | release = __version__ 31 | 32 | 33 | # -- General configuration --------------------------------------------------- 34 | 35 | # If your documentation needs a minimal Sphinx version, state it here. 36 | # 37 | # needs_sphinx = '1.0' 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | 'sphinx.ext.autodoc', 44 | 'sphinx.ext.viewcode', 45 | ] 46 | 47 | # Add any paths that contain templates here, relative to this directory. 48 | templates_path = ['_templates'] 49 | 50 | # The suffix(es) of source filenames. 51 | # You can specify multiple suffix as a list of string: 52 | # 53 | # source_suffix = ['.rst', '.md'] 54 | source_suffix = '.rst' 55 | 56 | # The master toctree document. 57 | master_doc = 'index' 58 | 59 | # The language for content autogenerated by Sphinx. Refer to documentation 60 | # for a list of supported languages. 61 | # 62 | # This is also used if you do content translation via gettext catalogs. 63 | # Usually you set "language" from the command line for these cases. 64 | language = None 65 | 66 | # List of patterns, relative to source directory, that match files and 67 | # directories to ignore when looking for source files. 68 | # This pattern also affects html_static_path and html_extra_path . 69 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 70 | 71 | # The name of the Pygments (syntax highlighting) style to use. 72 | pygments_style = 'sphinx' 73 | 74 | 75 | # -- Options for HTML output ------------------------------------------------- 76 | 77 | # The theme to use for HTML and HTML Help pages. See the documentation for 78 | # a list of builtin themes. 79 | # 80 | html_theme = 'sphinx_rtd_theme' 81 | 82 | # Theme options are theme-specific and customize the look and feel of a theme 83 | # further. For a list of options available for each theme, see the 84 | # documentation. 85 | # 86 | # html_theme_options = {} 87 | 88 | # Add any paths that contain custom static files (such as style sheets) here, 89 | # relative to this directory. They are copied after the builtin static files, 90 | # so a file named "default.css" will overwrite the builtin "default.css". 91 | html_static_path = ['_static'] 92 | 93 | # Custom sidebar templates, must be a dictionary that maps document names 94 | # to template names. 95 | # 96 | # The default sidebars (for documents that don't match any pattern) are 97 | # defined by theme itself. Builtin themes are using these templates by 98 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 99 | # 'searchbox.html']``. 100 | # 101 | # html_sidebars = {} 102 | 103 | html_use_index = False 104 | 105 | 106 | # -- Options for HTMLHelp output --------------------------------------------- 107 | 108 | # Output file base name for HTML help builder. 109 | htmlhelp_basename = 'gnsqdoc' 110 | 111 | 112 | # -- Options for LaTeX output ------------------------------------------------ 113 | 114 | latex_elements = { 115 | # The paper size ('letterpaper' or 'a4paper'). 116 | # 117 | # 'papersize': 'letterpaper', 118 | 119 | # The font size ('10pt', '11pt' or '12pt'). 120 | # 121 | # 'pointsize': '10pt', 122 | 123 | # Additional stuff for the LaTeX preamble. 124 | # 125 | # 'preamble': '', 126 | 127 | # Latex figure (float) alignment 128 | # 129 | # 'figure_align': 'htbp', 130 | } 131 | 132 | # Grouping the document tree into LaTeX files. List of tuples 133 | # (source start file, target name, title, 134 | # author, documentclass [howto, manual, or own class]). 135 | latex_documents = [ 136 | (master_doc, 'gnsq.tex', 'gnsq Documentation', 137 | 'Trevor Olson', 'manual'), 138 | ] 139 | 140 | 141 | # -- Options for manual page output ------------------------------------------ 142 | 143 | # One entry per manual page. List of tuples 144 | # (source start file, name, description, authors, manual section). 145 | man_pages = [ 146 | (master_doc, 'gnsq', 'gnsq Documentation', 147 | [author], 1) 148 | ] 149 | 150 | 151 | # -- Options for Texinfo output ---------------------------------------------- 152 | 153 | # Grouping the document tree into Texinfo files. List of tuples 154 | # (source start file, target name, title, author, 155 | # dir menu entry, description, category) 156 | texinfo_documents = [ 157 | (master_doc, 'gnsq', 'gnsq Documentation', 158 | author, 'gnsq', 'One line description of project.', 159 | 'Miscellaneous'), 160 | ] 161 | 162 | 163 | # -- Extension configuration ------------------------------------------------- 164 | -------------------------------------------------------------------------------- /docs/consumer.rst: -------------------------------------------------------------------------------- 1 | Consumer: high-level message reader 2 | ----------------------------------- 3 | 4 | .. autoclass:: gnsq.Consumer 5 | :members: 6 | :inherited-members: 7 | -------------------------------------------------------------------------------- /docs/contrib.rst: -------------------------------------------------------------------------------- 1 | Contrib modules 2 | --------------- 3 | 4 | Patterns and best practices for gnsq made code. 5 | 6 | 7 | Batching messages 8 | ~~~~~~~~~~~~~~~~~ 9 | 10 | 11 | .. autoclass:: gnsq.contrib.batch.BatchHandler 12 | :members: 13 | :inherited-members: 14 | 15 | 16 | Giveup handlers 17 | ~~~~~~~~~~~~~~~ 18 | 19 | 20 | .. autoclass:: gnsq.contrib.giveup.LogGiveupHandler 21 | :members: 22 | :inherited-members: 23 | 24 | 25 | .. autoclass:: gnsq.contrib.giveup.JSONLogGiveupHandler 26 | :members: 27 | :inherited-members: 28 | 29 | 30 | .. autoclass:: gnsq.contrib.giveup.NsqdGiveupHandler 31 | :members: 32 | :inherited-members: 33 | 34 | 35 | Concurrency 36 | ~~~~~~~~~~~ 37 | 38 | 39 | .. autoclass:: gnsq.contrib.queue.QueueHandler 40 | :members: 41 | :inherited-members: 42 | :exclude-members: copy, put, put_nowait 43 | 44 | 45 | .. autoclass:: gnsq.contrib.queue.ChannelHandler 46 | 47 | 48 | Error logging 49 | ~~~~~~~~~~~~~ 50 | 51 | 52 | .. autoclass:: gnsq.contrib.sentry.SentryExceptionHandler 53 | :members: 54 | :inherited-members: 55 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/history.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../HISTORY.rst 2 | 3 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. complexity documentation master file, created by 2 | sphinx-quickstart on Tue Jul 9 22:26:36 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to gnsq's documentation! 7 | ================================ 8 | 9 | .. include:: ../README.rst 10 | 11 | Contents 12 | ======== 13 | 14 | .. toctree:: 15 | :maxdepth: 2 16 | 17 | consumer 18 | producer 19 | nsqd 20 | lookupd 21 | message 22 | signals 23 | contrib 24 | contributing 25 | authors 26 | upgrading 27 | history 28 | 29 | Indices and tables 30 | ================== 31 | 32 | * :ref:`genindex` 33 | * :ref:`modindex` 34 | * :ref:`search` 35 | -------------------------------------------------------------------------------- /docs/lookupd.rst: -------------------------------------------------------------------------------- 1 | Nsqlookupd client 2 | ----------------- 3 | 4 | .. autoclass:: gnsq.LookupdClient 5 | :members: 6 | :inherited-members: 7 | 8 | 9 | .. autoclass:: gnsq.Lookupd 10 | :members: 11 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=gnsq 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/message.rst: -------------------------------------------------------------------------------- 1 | NSQ Message 2 | ----------- 3 | 4 | .. autoclass:: gnsq.Message 5 | :members: 6 | :inherited-members: 7 | -------------------------------------------------------------------------------- /docs/nsqd.rst: -------------------------------------------------------------------------------- 1 | Nsqd clients 2 | ------------ 3 | 4 | .. autoclass:: gnsq.NsqdHTTPClient 5 | :members: 6 | :inherited-members: 7 | 8 | 9 | .. autoclass:: gnsq.NsqdTCPClient 10 | :members: 11 | :inherited-members: 12 | 13 | 14 | .. autoclass:: gnsq.Nsqd 15 | :members: 16 | -------------------------------------------------------------------------------- /docs/producer.rst: -------------------------------------------------------------------------------- 1 | Producer: high-level message writer 2 | ----------------------------------- 3 | 4 | .. autoclass:: gnsq.Producer 5 | :members: 6 | :inherited-members: 7 | -------------------------------------------------------------------------------- /docs/signals.rst: -------------------------------------------------------------------------------- 1 | Signals 2 | ------- 3 | 4 | Both :doc:`Consumer ` and :doc:`NsqdTCPClient ` classes expose 5 | various signals provided by the `Blinker`_ library. 6 | 7 | Subscribing to signals 8 | ~~~~~~~~~~~~~~~~~~~~~~ 9 | 10 | To subscribe to a signal, you can use the 11 | :meth:`~blinker.base.Signal.connect` method of a signal. The first 12 | argument is the function that should be called when the signal is emitted, 13 | the optional second argument specifies a sender. To unsubscribe from a 14 | signal, you can use the :meth:`~blinker.base.Signal.disconnect` method. :: 15 | 16 | def error_handler(consumer, error): 17 | print 'Got an error:', error 18 | 19 | consumer.on_error.connect(error_handler) 20 | 21 | You can also easily subscribe to signals by using 22 | :meth:`~blinker.base.NamedSignal.connect` as a decorator:: 23 | 24 | @consumer.on_giving_up.connect 25 | def handle_giving_up(consumer, message): 26 | print 'Giving up on:', message.id 27 | 28 | .. _Blinker: https://pypi.python.org/pypi/blinker 29 | -------------------------------------------------------------------------------- /docs/upgrading.rst: -------------------------------------------------------------------------------- 1 | Upgrading to Newer Releases 2 | =========================== 3 | 4 | This section of the documentation enumerates all the changes in gnsq from 5 | release to release and how you can change your code to have a painless 6 | updating experience. 7 | 8 | Use the :command:`pip` command to upgrade your existing Flask installation by 9 | providing the ``--upgrade`` parameter:: 10 | 11 | $ pip install --upgrade gnsq 12 | 13 | 14 | .. _upgrading-to-100: 15 | 16 | Version 1.0.0 17 | ------------- 18 | 19 | While there are no breaking changes in version 1.0.0, much of the interface has 20 | been deprecated to both simplify the api and bring it into better compliance 21 | with the recommended naming schemes for nsq clients. Existing code should work 22 | as is and deprecation warnings will be emitted for any code paths that need to 23 | be changed. 24 | 25 | Deprecated Reader 26 | ~~~~~~~~~~~~~~~~~ 27 | 28 | The main interface has been renamed from :class:`~gnsq.Reader` to 29 | :class:`~gnsq.Consumer`. The api remains largely the same and can be swapped out 30 | directly in most cases. 31 | 32 | Async messages 33 | `````````````` 34 | 35 | The ``async`` flag has been removed from the :class:`~gnsq.Consumer`. Instead 36 | :class:`messages ` has a 37 | :meth:`message.enable_async() ` 38 | method that may be used to indicate that a message will be handled 39 | asynchronous. 40 | 41 | Max concurrency 42 | ``````````````` 43 | 44 | The ``max_concurrency`` parameter has been removed from 45 | :class:`~gnsq.Consumer`. If you wish to replicate this behavior, you should use 46 | the :class:`gnsq.contrib.QueueHandler` in conjunction with a worker pool:: 47 | 48 | from gevent.pool import Pool 49 | from gnsq import Consumer 50 | from gnsq.contrib.queue import QueueHandler 51 | 52 | MAX_CONCURRENCY = 4 53 | 54 | # Create your consumer as usual 55 | consumer = Consumer( 56 | 'topic', 'worker', 'localhost:4150', max_in_flight=16) 57 | 58 | # Connect a queue handler to the on message signal 59 | queue = QueueHandler() 60 | consumer.on_message.connect(queue) 61 | 62 | # Start your consumer without blocking or in a separate greenlet 63 | consumer.start(block=False) 64 | 65 | # If you want to limit your concurrency to a single greenlet, simply loop 66 | # over the queue in a for loop, or you can use a worker pool to distribute 67 | # the work. 68 | pool = Pool(MAX_CONCURRENCY) 69 | results = pool.imap_unordered(queue, my_handler) 70 | 71 | # Consume the results from the pool 72 | for result in results: 73 | pass 74 | 75 | Deprecated Nsqd 76 | ~~~~~~~~~~~~~~~ 77 | 78 | The :class:`~gnsq.Nsqd` client has been split into two classes, corresponding 79 | to the tcp and http APIs. The new classes are :class:`~gnsq.NsqdTCPClient` and 80 | :class:`~gnsq.NsqdHTTPClient` respectively. 81 | 82 | The methods `publish_tcp`, `publish_http`, `multipublish_tcp`, and 83 | `multipublish_http` have been removed from the new classes. 84 | 85 | Deprecated Lookupd 86 | ~~~~~~~~~~~~~~~~~~ 87 | 88 | The :class:`~gnsq.Lookupd` class has been replaced by 89 | :class:`~gnsq.LookupdClient`. :class:`~gnsq.LookupdClient` can be constructed 90 | using the ``host`` and ``port`` or by passing the url to 91 | :meth:`LookupdClient.from_url() ` instead. 92 | 93 | The method :meth:`~gnsq.Lookupd.tombstone_topic_producer` 94 | has been renamed to :func:`~gnsq.LookupdClient.tombstone_topic`. 95 | -------------------------------------------------------------------------------- /gnsq/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | 4 | from .consumer import Consumer 5 | from .reader import Reader 6 | from .producer import Producer 7 | from .nsqd import Nsqd, NsqdTCPClient, NsqdHTTPClient 8 | from .lookupd import Lookupd, LookupdClient 9 | from .message import Message 10 | from .backofftimer import BackoffTimer 11 | from .version import __version__ 12 | 13 | __author__ = 'Trevor Olson' 14 | __email__ = 'trevor@heytrevor.com' 15 | __version__ = __version__ 16 | 17 | __all__ = [ 18 | 'Consumer', 19 | 'Reader', 20 | 'Producer', 21 | 'Nsqd', 22 | 'NsqdTCPClient', 23 | 'NsqdHTTPClient', 24 | 'Lookupd', 25 | 'LookupdClient', 26 | 'Message', 27 | 'BackoffTimer', 28 | ] 29 | -------------------------------------------------------------------------------- /gnsq/backofftimer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | import random 4 | 5 | 6 | class BackoffTimer(object): 7 | def __init__(self, ratio=1, max_interval=None, min_interval=None): 8 | self.c = 0 9 | self.ratio = ratio 10 | 11 | self.max_interval = max_interval 12 | self.min_interval = min_interval 13 | 14 | def is_reset(self): 15 | return self.c == 0 16 | 17 | def reset(self): 18 | self.c = 0 19 | return self 20 | 21 | def success(self): 22 | self.c = max(self.c - 1, 0) 23 | return self 24 | 25 | def failure(self): 26 | self.c += 1 27 | return self 28 | 29 | def get_interval(self): 30 | k = pow(2, self.c) - 1 31 | interval = random.random() * k * self.ratio 32 | 33 | if self.max_interval is not None: 34 | interval = min(interval, self.max_interval) 35 | 36 | if self.min_interval is not None: 37 | interval = max(interval, self.min_interval) 38 | 39 | return interval 40 | -------------------------------------------------------------------------------- /gnsq/consumer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, division 3 | 4 | import logging 5 | import random 6 | import time 7 | 8 | from collections import defaultdict 9 | from itertools import cycle 10 | 11 | import blinker 12 | import gevent 13 | 14 | from gevent.event import Event 15 | from gevent.pool import Group 16 | 17 | from .backofftimer import BackoffTimer 18 | from .decorators import cached_property 19 | from .errors import NSQException, NSQRequeueMessage, NSQSocketError 20 | from .nsqd import NsqdTCPClient 21 | from .states import INIT, RUNNING, BACKOFF, THROTTLED, CLOSED 22 | from .util import parse_nsqds, parse_lookupds 23 | 24 | 25 | class Consumer(object): 26 | """High level NSQ consumer. 27 | 28 | A Consumer will connect to the nsqd tcp addresses or poll the provided 29 | nsqlookupd http addresses for the configured topic and send signals to 30 | message handlers connected to the :attr:`on_message` signal or provided by 31 | ``message_handler``. 32 | 33 | Messages will automatically be finished when the message handle returns 34 | unless :meth:`message.enable_async() ` is called. 35 | If an exception occurs or :class:`~gnsq.errors.NSQRequeueMessage` is raised, 36 | the message will be requeued. 37 | 38 | The Consumer will handle backing off of failed messages up to a configurable 39 | ``max_interval`` as well as automatically reconnecting to dropped 40 | connections. 41 | 42 | Example usage:: 43 | 44 | from gnsq import Consumer 45 | 46 | consumer = gnsq.Consumer('topic', 'channel', 'localhost:4150') 47 | 48 | @consumer.on_message.connect 49 | def handler(consumer, message): 50 | print 'got message:', message.body 51 | 52 | consumer.start() 53 | 54 | :param topic: specifies the desired NSQ topic 55 | 56 | :param channel: specifies the desired NSQ channel 57 | 58 | :param nsqd_tcp_addresses: a sequence of string addresses of the nsqd 59 | instances this consumer should connect to 60 | 61 | :param lookupd_http_addresses: a sequence of string addresses of the 62 | nsqlookupd instances this consumer should query for producers of the 63 | specified topic 64 | 65 | :param name: a string that is used for logging messages (defaults to 66 | ``'gnsq.consumer.{topic}.{channel}'``) 67 | 68 | :param message_handler: the callable that will be executed for each message 69 | received 70 | 71 | :param max_tries: the maximum number of attempts the consumer will make to 72 | process a message after which messages will be automatically discarded 73 | 74 | :param max_in_flight: the maximum number of messages this consumer will 75 | pipeline for processing. this value will be divided evenly amongst the 76 | configured/discovered nsqd producers 77 | 78 | :param requeue_delay: the default delay to use when requeueing a failed 79 | message 80 | 81 | :param lookupd_poll_interval: the amount of time in seconds between querying 82 | all of the supplied nsqlookupd instances. A random amount of time based 83 | on this value will be initially introduced in order to add jitter when 84 | multiple consumers are running 85 | 86 | :param lookupd_poll_jitter: the maximum fractional amount of jitter to add 87 | to the lookupd poll loop. This helps evenly distribute requests even if 88 | multiple consumers restart at the same time. 89 | 90 | :param low_ready_idle_timeout: the amount of time in seconds to wait for a 91 | message from a producer when in a state where RDY counts are 92 | re-distributed (ie. `max_in_flight` < `num_producers`) 93 | 94 | :param max_backoff_duration: the maximum time we will allow a backoff state 95 | to last in seconds. If zero, backoff wil not occur 96 | 97 | :param backoff_on_requeue: if ``False``, backoff will only occur on 98 | exception 99 | 100 | :param **kwargs: passed to :class:`~gnsq.NsqdTCPClient` initialization 101 | """ 102 | def __init__(self, topic, channel, nsqd_tcp_addresses=[], 103 | lookupd_http_addresses=[], name=None, message_handler=None, 104 | max_tries=5, max_in_flight=1, requeue_delay=0, 105 | lookupd_poll_interval=60, lookupd_poll_jitter=0.3, 106 | low_ready_idle_timeout=10, max_backoff_duration=128, 107 | backoff_on_requeue=True, **kwargs): 108 | if not nsqd_tcp_addresses and not lookupd_http_addresses: 109 | raise ValueError('must specify at least one nsqd or lookupd') 110 | 111 | self.nsqd_tcp_addresses = parse_nsqds(nsqd_tcp_addresses) 112 | self.lookupds = parse_lookupds(lookupd_http_addresses) 113 | self.iterlookupds = cycle(self.lookupds) 114 | 115 | self.topic = topic 116 | self.channel = channel 117 | self.max_tries = max_tries 118 | self.max_in_flight = max_in_flight 119 | self.requeue_delay = requeue_delay 120 | self.lookupd_poll_interval = lookupd_poll_interval 121 | self.lookupd_poll_jitter = lookupd_poll_jitter 122 | self.low_ready_idle_timeout = low_ready_idle_timeout 123 | self.backoff_on_requeue = backoff_on_requeue 124 | self.max_backoff_duration = max_backoff_duration 125 | self.conn_kwargs = kwargs 126 | 127 | if name: 128 | self.name = name 129 | else: 130 | self.name = '%s.%s.%s' % (__name__, self.topic, self.channel) 131 | 132 | if message_handler is not None: 133 | self.on_message.connect(message_handler, weak=False) 134 | 135 | self.logger = logging.getLogger(self.name) 136 | 137 | self._state = INIT 138 | self._redistributed_ready_event = Event() 139 | self._connection_backoffs = defaultdict(self._create_backoff) 140 | self._message_backoffs = defaultdict(self._create_backoff) 141 | 142 | self._connections = {} 143 | self._workers = Group() 144 | self._killables = Group() 145 | 146 | @cached_property 147 | def on_message(self): 148 | """Emitted when a message is received. 149 | 150 | The signal sender is the consumer and the ``message`` is sent as an 151 | argument. The ``message_handler`` param is connected to this signal. 152 | """ 153 | return blinker.Signal(doc='Emitted when a message is received.') 154 | 155 | @cached_property 156 | def on_response(self): 157 | """Emitted when a response is received. 158 | 159 | The signal sender is the consumer and the ``response`` is sent as an 160 | argument. 161 | """ 162 | return blinker.Signal(doc='Emitted when a response is received.') 163 | 164 | @cached_property 165 | def on_error(self): 166 | """Emitted when an error is received. 167 | 168 | The signal sender is the consumer and the ``error`` is sent as an 169 | argument. 170 | """ 171 | return blinker.Signal(doc='Emitted when a error is received.') 172 | 173 | @cached_property 174 | def on_finish(self): 175 | """Emitted after a message is successfully finished. 176 | 177 | The signal sender is the consumer and the ``message_id`` is sent as an 178 | argument. 179 | """ 180 | return blinker.Signal(doc='Emitted after the a message is finished.') 181 | 182 | @cached_property 183 | def on_requeue(self): 184 | """Emitted after a message is requeued. 185 | 186 | The signal sender is the consumer and the ``message_id`` and ``timeout`` 187 | are sent as arguments. 188 | """ 189 | return blinker.Signal(doc='Emitted after the a message is requeued.') 190 | 191 | @cached_property 192 | def on_giving_up(self): 193 | """Emitted after a giving up on a message. 194 | 195 | Emitted when a message has exceeded the maximum number of attempts 196 | (``max_tries``) and will no longer be requeued. This is useful to 197 | perform tasks such as writing to disk, collecting statistics etc. The 198 | signal sender is the consumer and the ``message`` is sent as an 199 | argument. 200 | """ 201 | return blinker.Signal(doc='Sent after a giving up on a message.') 202 | 203 | @cached_property 204 | def on_auth(self): 205 | """Emitted after a connection is successfully authenticated. 206 | 207 | The signal sender is the consumer and the ``conn`` and parsed 208 | ``response`` are sent as arguments. 209 | """ 210 | return blinker.Signal(doc='Emitted when a response is received.') 211 | 212 | @cached_property 213 | def on_exception(self): 214 | """Emitted when an exception is caught while handling a message. 215 | 216 | The signal sender is the consumer and the ``message`` and ``error`` are 217 | sent as arguments. 218 | """ 219 | return blinker.Signal(doc='Emitted when an exception is caught.') 220 | 221 | @cached_property 222 | def on_close(self): 223 | """Emitted after :meth:`close`. 224 | 225 | The signal sender is the consumer. 226 | """ 227 | return blinker.Signal(doc='Emitted after the consumer is closed.') 228 | 229 | def start(self, block=True): 230 | """Start discovering and listing to connections.""" 231 | if self._state == INIT: 232 | if not any(self.on_message.receivers_for(blinker.ANY)): 233 | raise RuntimeError('no receivers connected to on_message') 234 | 235 | self.logger.debug('starting %s...', self.name) 236 | self._state = RUNNING 237 | self.query_nsqd() 238 | 239 | if self.lookupds: 240 | self.query_lookupd() 241 | self._killables.add(self._workers.spawn(self._poll_lookupd)) 242 | 243 | self._killables.add(self._workers.spawn(self._poll_ready)) 244 | 245 | else: 246 | self.logger.warning('%s already started', self.name) 247 | 248 | if block: 249 | self.join() 250 | 251 | def close(self): 252 | """Immediately close all connections and stop workers.""" 253 | if not self.is_running: 254 | return 255 | 256 | self._state = CLOSED 257 | 258 | self.logger.debug('killing %d worker(s)', len(self._killables)) 259 | self._killables.kill(block=False) 260 | 261 | self.logger.debug('closing %d connection(s)', len(self._connections)) 262 | for conn in self._connections: 263 | conn.close_stream() 264 | 265 | self.on_close.send(self) 266 | 267 | def join(self, timeout=None, raise_error=False): 268 | """Block until all connections have closed and workers stopped.""" 269 | self._workers.join(timeout, raise_error) 270 | 271 | @property 272 | def is_running(self): 273 | """Check if consumer is currently running.""" 274 | return self._state == RUNNING 275 | 276 | @property 277 | def is_starved(self): 278 | """Evaluate whether any of the connections are starved. 279 | 280 | This property should be used by message handlers to reliably identify 281 | when to process a batch of messages. 282 | """ 283 | return any(conn.is_starved for conn in self._connections) 284 | 285 | @property 286 | def total_ready_count(self): 287 | return sum(c.ready_count for c in self._connections) 288 | 289 | @property 290 | def total_in_flight(self): 291 | return sum(c.in_flight for c in self._connections) 292 | 293 | def query_nsqd(self): 294 | self.logger.debug('querying nsqd...') 295 | for address in self.nsqd_tcp_addresses: 296 | address, port = address.split(':') 297 | self.connect_to_nsqd(address, int(port)) 298 | 299 | def query_lookupd(self): 300 | self.logger.debug('querying lookupd...') 301 | lookupd = next(self.iterlookupds) 302 | 303 | try: 304 | producers = lookupd.lookup(self.topic)['producers'] 305 | self.logger.debug('found %d producers', len(producers)) 306 | 307 | except Exception as error: 308 | self.logger.warning( 309 | 'Failed to lookup %s on %s (%s)', 310 | self.topic, lookupd.address, error) 311 | return 312 | 313 | for producer in producers: 314 | self.connect_to_nsqd( 315 | producer['broadcast_address'], producer['tcp_port']) 316 | 317 | def _poll_lookupd(self): 318 | try: 319 | delay = self.lookupd_poll_interval * self.lookupd_poll_jitter 320 | gevent.sleep(random.random() * delay) 321 | 322 | while True: 323 | gevent.sleep(self.lookupd_poll_interval) 324 | self.query_lookupd() 325 | 326 | except gevent.GreenletExit: 327 | pass 328 | 329 | def _poll_ready(self): 330 | try: 331 | while True: 332 | if self._redistributed_ready_event.wait(5): 333 | self._redistributed_ready_event.clear() 334 | self._redistribute_ready_state() 335 | 336 | except gevent.GreenletExit: 337 | pass 338 | 339 | def _redistribute_ready_state(self): 340 | if not self.is_running: 341 | return 342 | 343 | if len(self._connections) > self.max_in_flight: 344 | ready_state = self._get_unsaturated_ready_state() 345 | else: 346 | ready_state = self._get_saturated_ready_state() 347 | 348 | for conn, count in ready_state.items(): 349 | if conn.ready_count == count: 350 | self.logger.debug('[%s] RDY count already %d', conn, count) 351 | continue 352 | 353 | self.logger.debug('[%s] sending RDY %d', conn, count) 354 | 355 | try: 356 | conn.ready(count) 357 | except NSQSocketError as error: 358 | self.logger.warning( 359 | '[%s] RDY %d failed (%r)', conn, count, error) 360 | 361 | def _get_unsaturated_ready_state(self): 362 | ready_state = {} 363 | active = [] 364 | 365 | for conn, state in self._connections.items(): 366 | if state == BACKOFF: 367 | ready_state[conn] = 0 368 | 369 | elif state in (RUNNING, THROTTLED): 370 | active.append(conn) 371 | 372 | random.shuffle(active) 373 | 374 | for conn in active[self.max_in_flight:]: 375 | ready_state[conn] = 0 376 | 377 | for conn in active[:self.max_in_flight]: 378 | ready_state[conn] = 1 379 | 380 | return ready_state 381 | 382 | def _get_saturated_ready_state(self): 383 | ready_state = {} 384 | active = [] 385 | now = time.time() 386 | 387 | for conn, state in self._connections.items(): 388 | if state == BACKOFF: 389 | ready_state[conn] = 0 390 | 391 | elif state == THROTTLED: 392 | ready_state[conn] = 1 393 | 394 | elif state == RUNNING: 395 | if (now - conn.last_message) > self.low_ready_idle_timeout: 396 | self.logger.info( 397 | '[%s] idle connection, giving up RDY count', conn) 398 | ready_state[conn] = 1 399 | 400 | else: 401 | active.append(conn) 402 | 403 | if not active: 404 | return ready_state 405 | 406 | ready_available = self.max_in_flight - sum(ready_state.values()) 407 | connection_max_in_flight = ready_available // len(active) 408 | 409 | for conn in active: 410 | ready_state[conn] = connection_max_in_flight 411 | 412 | for conn in random.sample(active, ready_available % len(active)): 413 | ready_state[conn] += 1 414 | 415 | return ready_state 416 | 417 | def redistribute_ready_state(self): 418 | self._redistributed_ready_event.set() 419 | 420 | def connect_to_nsqd(self, address, port): 421 | if not self.is_running: 422 | return 423 | 424 | conn = NsqdTCPClient(address, port, **self.conn_kwargs) 425 | if conn in self._connections: 426 | self.logger.debug('[%s] already connected', conn) 427 | return 428 | 429 | self._connections[conn] = INIT 430 | self.logger.debug('[%s] connecting...', conn) 431 | 432 | conn.on_message.connect(self.handle_message) 433 | conn.on_response.connect(self.handle_response) 434 | conn.on_error.connect(self.handle_error) 435 | conn.on_finish.connect(self.handle_finish) 436 | conn.on_requeue.connect(self.handle_requeue) 437 | conn.on_auth.connect(self.handle_auth) 438 | 439 | try: 440 | conn.connect() 441 | conn.identify() 442 | 443 | if conn.max_ready_count < self.max_in_flight: 444 | msg = ( 445 | '[%s] max RDY count %d < consumer max in flight %d, ' 446 | 'truncation possible') 447 | 448 | self.logger.warning( 449 | msg, conn, conn.max_ready_count, self.max_in_flight) 450 | 451 | conn.subscribe(self.topic, self.channel) 452 | 453 | except NSQException as error: 454 | self.logger.warning('[%s] connection failed (%r)', conn, error) 455 | self.handle_connection_failure(conn) 456 | return 457 | 458 | # Check if we've closed since we started 459 | if not self.is_running: 460 | self.handle_connection_failure(conn) 461 | return 462 | 463 | self.logger.info('[%s] connection successful', conn) 464 | self.handle_connection_success(conn) 465 | 466 | def _listen(self, conn): 467 | try: 468 | conn.listen() 469 | except NSQException as error: 470 | self.logger.warning('[%s] connection lost (%r)', conn, error) 471 | 472 | self.handle_connection_failure(conn) 473 | 474 | def handle_connection_success(self, conn): 475 | self._connections[conn] = THROTTLED 476 | self._workers.spawn(self._listen, conn) 477 | self.redistribute_ready_state() 478 | 479 | if str(conn) not in self.nsqd_tcp_addresses: 480 | return 481 | 482 | self._connection_backoffs[conn].success() 483 | 484 | def handle_connection_failure(self, conn): 485 | del self._connections[conn] 486 | conn.close_stream() 487 | 488 | if not self.is_running: 489 | return 490 | 491 | self.redistribute_ready_state() 492 | 493 | if str(conn) not in self.nsqd_tcp_addresses: 494 | return 495 | 496 | seconds = self._connection_backoffs[conn].failure().get_interval() 497 | self.logger.debug('[%s] retrying in %ss', conn, seconds) 498 | 499 | gevent.spawn_later( 500 | seconds, self.connect_to_nsqd, conn.address, conn.port) 501 | 502 | def handle_auth(self, conn, response): 503 | metadata = [] 504 | if response.get('identity'): 505 | metadata.append("Identity: %r" % response['identity']) 506 | 507 | if response.get('permission_count'): 508 | metadata.append("Permissions: %d" % response['permission_count']) 509 | 510 | if response.get('identity_url'): 511 | metadata.append(response['identity_url']) 512 | 513 | self.logger.info('[%s] AUTH accepted %s', conn, ' '.join(metadata)) 514 | self.on_auth.send(self, conn=conn, response=response) 515 | 516 | def handle_response(self, conn, response): 517 | self.logger.debug('[%s] response: %s', conn, response) 518 | self.on_response.send(self, response=response) 519 | 520 | def handle_error(self, conn, error): 521 | self.logger.debug('[%s] error: %s', conn, error) 522 | self.on_error.send(self, error=error) 523 | 524 | def _handle_message(self, message): 525 | if self.max_tries and message.attempts > self.max_tries: 526 | self.logger.warning( 527 | "giving up on message '%s' after max tries %d", 528 | message.id, self.max_tries) 529 | self.on_giving_up.send(self, message=message) 530 | return message.finish() 531 | 532 | self.on_message.send(self, message=message) 533 | 534 | if not self.is_running: 535 | return 536 | 537 | if message.is_async(): 538 | return 539 | 540 | if message.has_responded(): 541 | return 542 | 543 | message.finish() 544 | 545 | def handle_message(self, conn, message): 546 | self.logger.debug('[%s] got message: %s', conn, message.id) 547 | 548 | try: 549 | return self._handle_message(message) 550 | 551 | except NSQRequeueMessage as error: 552 | if error.backoff is None: 553 | backoff = self.backoff_on_requeue 554 | else: 555 | backoff = error.backoff 556 | 557 | except Exception as error: 558 | backoff = True 559 | self.logger.exception( 560 | '[%s] caught exception while handling message', conn) 561 | self.on_exception.send(self, message=message, error=error) 562 | 563 | if not self.is_running: 564 | return 565 | 566 | if message.has_responded(): 567 | return 568 | 569 | try: 570 | message.requeue(self.requeue_delay, backoff) 571 | except NSQException as error: 572 | self.logger.warning( 573 | '[%s] error requeueing message (%r)', conn, error) 574 | 575 | def _create_backoff(self): 576 | return BackoffTimer(max_interval=self.max_backoff_duration) 577 | 578 | def _start_backoff(self, conn): 579 | self._connections[conn] = BACKOFF 580 | 581 | interval = self._message_backoffs[conn].get_interval() 582 | gevent.spawn_later(interval, self._start_throttled, conn) 583 | 584 | self.logger.info('[%s] backing off for %s seconds', conn, interval) 585 | self.redistribute_ready_state() 586 | 587 | def _start_throttled(self, conn): 588 | if self._connections.get(conn) != BACKOFF: 589 | return 590 | 591 | self._connections[conn] = THROTTLED 592 | self.logger.info('[%s] testing backoff state with RDY 1', conn) 593 | self.redistribute_ready_state() 594 | 595 | def _complete_backoff(self, conn): 596 | if self._message_backoffs[conn].is_reset(): 597 | self._connections[conn] = RUNNING 598 | self.logger.info('throttle complete, resuming normal operation') 599 | self.redistribute_ready_state() 600 | else: 601 | self._start_backoff(conn) 602 | 603 | def _finish_message(self, conn, backoff): 604 | if not self.max_backoff_duration: 605 | return 606 | 607 | try: 608 | state = self._connections[conn] 609 | except KeyError: 610 | return 611 | 612 | if state == BACKOFF: 613 | return 614 | 615 | if backoff: 616 | self._message_backoffs[conn].failure() 617 | self._start_backoff(conn) 618 | 619 | elif state == THROTTLED: 620 | self._message_backoffs[conn].success() 621 | self._complete_backoff(conn) 622 | 623 | def handle_finish(self, conn, message_id): 624 | self.logger.debug('[%s] finished message: %s', conn, message_id) 625 | self._finish_message(conn, backoff=False) 626 | self.on_finish.send(self, message_id=message_id) 627 | 628 | def handle_requeue(self, conn, message_id, timeout, backoff): 629 | self.logger.debug( 630 | '[%s] requeued message: %s (%s)', conn, message_id, timeout) 631 | self._finish_message(conn, backoff=backoff) 632 | self.on_requeue.send(self, message_id=message_id, timeout=timeout) 633 | -------------------------------------------------------------------------------- /gnsq/contrib/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """gnsq.contrib 3 | 4 | Patterns and best practices for gnsq made code. 5 | """ 6 | -------------------------------------------------------------------------------- /gnsq/contrib/batch.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | import warnings 4 | 5 | import gevent.queue 6 | import gevent.pool 7 | 8 | from gnsq.errors import NSQException 9 | 10 | 11 | TIMEOUT_WARNING = 'batching timed out. batch size may be to large' 12 | STARVED_WARNING = 'consumer is starved. batch size may be to large' 13 | 14 | 15 | class BatchHandler(object): 16 | """Batch message handler for gnsq. 17 | 18 | It is recommended to use a max inflight greater than the batch size. 19 | 20 | Example usage:: 21 | 22 | >>> consumer = Consumer('topic', 'worker', max_in_flight=16) 23 | >>> consumer.on_message.connect(BatchHandler(8, my_handler), weak=False) 24 | """ 25 | def __init__(self, batch_size, handle_batch=None, handle_message=None, 26 | handle_batch_error=None, handle_message_error=None, 27 | timeout=10, spawn=gevent.spawn): 28 | self.logger = logging.getLogger(__name__) 29 | self.message_channel = gevent.queue.Channel() 30 | self.batch_size = batch_size 31 | self.timeout = timeout 32 | 33 | if isinstance(spawn, int): 34 | spawn = gevent.pool.Pool(spawn).spawn 35 | 36 | self.spawn = spawn 37 | 38 | if handle_batch is not None: 39 | self.handle_batch = handle_batch 40 | 41 | if handle_message is not None: 42 | self.handle_message = handle_message 43 | 44 | if handle_batch_error is not None: 45 | self.handle_batch_error = handle_batch_error 46 | 47 | if handle_message_error is not None: 48 | self.handle_message_error = handle_message_error 49 | 50 | self.worker = gevent.spawn(self._run) 51 | 52 | def __call__(self, consumer, message): 53 | message.enable_async() 54 | self.message_channel.put(message) 55 | 56 | if consumer.is_starved: 57 | self.message_channel.put(StopIteration) 58 | 59 | def _run(self): 60 | while True: 61 | messages = [] 62 | 63 | while len(messages) < self.batch_size: 64 | try: 65 | message = self.message_channel.get(timeout=self.timeout) 66 | except gevent.queue.Empty: 67 | warnings.warn(TIMEOUT_WARNING, RuntimeWarning) 68 | break 69 | 70 | if message is StopIteration: 71 | warnings.warn(STARVED_WARNING, RuntimeWarning) 72 | break 73 | 74 | messages.append(message) 75 | 76 | if messages: 77 | self.spawn(self.run_batch, messages) 78 | 79 | def finish_message(self, message): 80 | if message.has_responded(): 81 | return 82 | try: 83 | message.finish() 84 | except NSQException as error: 85 | self.logger.warning('error finishing message (%r)', error) 86 | 87 | def finish_messages(self, messages): 88 | for message in messages: 89 | self.finish_message(message) 90 | 91 | def requeue_message(self, message): 92 | if message.has_responded(): 93 | return 94 | try: 95 | message.requeue() 96 | except NSQException as error: 97 | self.logger.warning('error requeueing message (%r)', error) 98 | 99 | def requeue_messages(self, messages): 100 | for message in messages: 101 | if message.has_responded(): 102 | continue 103 | self.requeue_message(message) 104 | 105 | def run_batch(self, messages): 106 | batch = [] 107 | 108 | for message in messages: 109 | try: 110 | batch.append(self.handle_message(message)) 111 | except Exception as error: 112 | self.logger.exception('caught exception while handling message') 113 | self.handle_message_error(error, message) 114 | self.requeue_message(message) 115 | 116 | if batch: 117 | try: 118 | self.handle_batch(batch) 119 | except Exception as error: 120 | self.logger.exception('caught exception while handling batch') 121 | self.handle_batch_error(error, messages, batch) 122 | self.requeue_messages(messages) 123 | return 124 | 125 | self.finish_messages(messages) 126 | 127 | def handle_message(self, message): 128 | """Handle a single message. 129 | 130 | Over ride this to provide some processing and an individual message. 131 | The result of this function is what is passed to :meth:`handle_batch`. 132 | This may be overridden or passed into the constructor. By default it 133 | simply returns the message. 134 | 135 | Raising an exception in :meth:`handle_message` will cause that message 136 | to be requeued and excluded from the batch. 137 | """ 138 | return message 139 | 140 | def handle_batch(self, messages): 141 | """Handle a batch message. 142 | 143 | Processes a batch of messages. You must provide a :meth:`handle_batch` 144 | function to the constructor or override this method. 145 | 146 | Raising an exception in :meth:`handle_batch` will cause all messages in 147 | the batch to be requeued. 148 | """ 149 | raise RuntimeError('handle_message must be overridden') 150 | 151 | def handle_message_error(self, error, message): 152 | """Handle an exception processesing an individual message. 153 | 154 | This may be overridden or passed into the constructor. 155 | """ 156 | pass 157 | 158 | def handle_batch_error(self, error, messages, batch): 159 | """Handle an exception processsing a batch of messages. 160 | 161 | This may be overridden or passed into the constructor. 162 | """ 163 | pass 164 | -------------------------------------------------------------------------------- /gnsq/contrib/giveup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import itertools 3 | import json 4 | import sys 5 | 6 | import gnsq 7 | 8 | 9 | class LogGiveupHandler(object): 10 | """Log messages on giveup. 11 | 12 | Writes the message body to the log. This can be customized by subclassing 13 | and implementing :meth:`format_message`. Assuming messages do not requeued 14 | using the `to_nsq` utility. 15 | 16 | Example usage:: 17 | 18 | >>> fp = open('topic.__BURY__.log', 'w') 19 | >>> consumer.on_giving_up.connect( 20 | ... LogGiveupHandler(fp.write), weak=False) 21 | """ 22 | def __init__(self, log=sys.stdout.write, newline='\n'): 23 | self.log = log 24 | self.newline = newline 25 | 26 | def format_message(self, message): 27 | return message.body 28 | 29 | def __call__(self, consumer, message): 30 | self.log(self.format_message(message) + self.newline) 31 | 32 | 33 | class JSONLogGiveupHandler(LogGiveupHandler): 34 | """Log messages as json on giveup. 35 | 36 | Works like :class:`LogGiveupHandler` but serializes the message details as 37 | json before writing to the log. 38 | 39 | Example usage:: 40 | 41 | >>> fp = open('topic.__BURY__.log', 'w') 42 | >>> consumer.on_giving_up.connect( 43 | ... JSONLogGiveupHandler(fp.write), weak=False) 44 | """ 45 | def format_message(self, message): 46 | return json.dumps({ 47 | 'timestamp': message.timestamp, 48 | 'attempts': message.attempts, 49 | 'id': message.id, 50 | 'body': message.body, 51 | }) 52 | 53 | 54 | class NsqdGiveupHandler(object): 55 | """Send messages by to nsq on giveup. 56 | 57 | Forwards the message body to the given topic where it can be inspected and 58 | requeued. This can be customized by subclassing and implementing 59 | :meth:`format_message`. Messages can be requeued with the `nsq_to_nsq` 60 | utility. 61 | 62 | Example usage:: 63 | 64 | >>> giveup_handler = NsqdGiveupHandler('topic.__BURY__') 65 | >>> consumer.on_giving_up.connect(giveup_handler) 66 | """ 67 | 68 | def __init__(self, topic, nsqd_hosts=['localhost'], 69 | nsqd_class=gnsq.NsqdHTTPClient): 70 | if not nsqd_hosts: 71 | raise ValueError('at least one nsqd host is required') 72 | self.topic = topic 73 | self.nsqds = itertools.cycle([nsqd_class(host) for host in nsqd_hosts]) 74 | 75 | def format_message(self, message): 76 | return message.body 77 | 78 | def __call__(self, consumer, message): 79 | nsq = next(self.nsqds) 80 | nsq.publish(self.topic, self.format_message(message)) 81 | -------------------------------------------------------------------------------- /gnsq/contrib/queue.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import gevent.queue 3 | 4 | 5 | class QueueHandler(gevent.queue.Queue): 6 | """Iterator like api for gnsq. 7 | 8 | Example usage:: 9 | 10 | >>> queue = QueueHandler() 11 | >>> consumer = Consumer('topic', 'worker', max_in_flight=16) 12 | >>> consumer.on_message.connect(queue) 13 | >>> consumer.start(block=False) 14 | >>> for message in queue: 15 | ... print(message.body) 16 | ... message.finish() 17 | 18 | Or give it to a pool:: 19 | 20 | >>> gevent.pool.Pool().map(queue, my_handler) 21 | 22 | :param maxsize: maximum number of messages that can be queued. If less than 23 | or equal to zero or None, the queue size is infinite. 24 | """ 25 | 26 | def __call__(self, consumer, message): 27 | message.enable_async() 28 | self.put(message) 29 | 30 | 31 | class ChannelHandler(gevent.queue.Channel): 32 | """Iterator like api for gnsq. 33 | 34 | Like :class:`QueueHandler` with a ``maxsize`` of ``1``. 35 | """ 36 | 37 | def __call__(self, consumer, message): 38 | message.enable_async() 39 | self.put(message) 40 | -------------------------------------------------------------------------------- /gnsq/contrib/sentry.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | class SentryExceptionHandler(object): 5 | """Log gnsq exceptions to sentry. 6 | 7 | Example usage:: 8 | 9 | >>> from raven import Sentry 10 | >>> sentry = Sentry() 11 | >>> consumer.on_exception.connect( 12 | ... SentryExceptionHandler(sentry), weak=False) 13 | """ 14 | 15 | def __init__(self, client): 16 | self.client = client 17 | 18 | def message_extra(self, message): 19 | return { 20 | 'id': message.id, 21 | 'timestamp': message.timestamp, 22 | 'attempts': message.attempts, 23 | 'body': message.body, 24 | } 25 | 26 | def __call__(self, consumer, message, error): 27 | extra = {} 28 | 29 | if message: 30 | extra['message'] = self.message_extra(message) 31 | 32 | self.client.captureException(extra=extra) 33 | -------------------------------------------------------------------------------- /gnsq/decorators.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import functools 3 | import warnings 4 | 5 | 6 | class cached_property(object): 7 | """A decorator that converts a function into a lazy property.""" 8 | 9 | def __init__(self, func, name=None, doc=None): 10 | self.__name__ = name or func.__name__ 11 | self.__module__ = func.__module__ 12 | self.__doc__ = doc or func.__doc__ 13 | self.func = func 14 | 15 | def __get__(self, obj, type=None): 16 | if obj is None: 17 | return self 18 | 19 | if self.__name__ in obj.__dict__: 20 | return obj.__dict__[self.__name__] 21 | 22 | value = obj.__dict__[self.__name__] = self.func(obj) 23 | return value 24 | 25 | 26 | def deprecated(fn): 27 | """Mark a function as deprecated and warn the user on use.""" 28 | @functools.wraps(fn) 29 | def wrapper(*args, **kwargs): 30 | warnings.warn(fn.__doc__.split('\n')[0], 31 | category=DeprecationWarning, stacklevel=2) 32 | return fn(*args, **kwargs) 33 | return wrapper 34 | -------------------------------------------------------------------------------- /gnsq/errors.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | import socket 4 | 5 | 6 | class NSQException(Exception): 7 | pass 8 | 9 | 10 | class NSQRequeueMessage(NSQException): 11 | def __init__(self, backoff=None): 12 | self.backoff = backoff 13 | super(NSQRequeueMessage, self).__init__() 14 | 15 | 16 | class NSQNoConnections(NSQException): 17 | pass 18 | 19 | 20 | class NSQHttpError(NSQException): 21 | pass 22 | 23 | 24 | class NSQSocketError(socket.error, NSQException): 25 | pass 26 | 27 | 28 | class NSQFrameError(NSQException): 29 | pass 30 | 31 | 32 | class NSQErrorCode(NSQException): 33 | fatal = True 34 | 35 | 36 | class NSQInvalid(NSQErrorCode): 37 | """E_INVALID""" 38 | pass 39 | 40 | 41 | class NSQBadBody(NSQErrorCode): 42 | """E_BAD_BODY""" 43 | pass 44 | 45 | 46 | class NSQBadTopic(NSQErrorCode): 47 | """E_BAD_TOPIC""" 48 | pass 49 | 50 | 51 | class NSQBadChannel(NSQErrorCode): 52 | """E_BAD_CHANNEL""" 53 | pass 54 | 55 | 56 | class NSQBadMessage(NSQErrorCode): 57 | """E_BAD_MESSAGE""" 58 | pass 59 | 60 | 61 | class NSQPutFailed(NSQErrorCode): 62 | """E_PUT_FAILED""" 63 | pass 64 | 65 | 66 | class NSQPubFailed(NSQErrorCode): 67 | """E_PUB_FAILED""" 68 | 69 | 70 | class NSQMPubFailed(NSQErrorCode): 71 | """E_MPUB_FAILED""" 72 | 73 | 74 | class NSQAuthDisabled(NSQErrorCode): 75 | """E_AUTH_DISABLED""" 76 | 77 | 78 | class NSQAuthFailed(NSQErrorCode): 79 | """E_AUTH_FAILED""" 80 | 81 | 82 | class NSQUnauthorized(NSQErrorCode): 83 | """E_UNAUTHORIZED""" 84 | 85 | 86 | class NSQFinishFailed(NSQErrorCode): 87 | """E_FIN_FAILED""" 88 | fatal = False 89 | 90 | 91 | class NSQRequeueFailed(NSQErrorCode): 92 | """E_REQ_FAILED""" 93 | fatal = False 94 | 95 | 96 | class NSQTouchFailed(NSQErrorCode): 97 | """E_TOUCH_FAILED""" 98 | fatal = False 99 | 100 | 101 | ERROR_CODES = { 102 | b'E_INVALID': NSQInvalid, 103 | b'E_BAD_BODY': NSQBadBody, 104 | b'E_BAD_TOPIC': NSQBadTopic, 105 | b'E_BAD_CHANNEL': NSQBadChannel, 106 | b'E_BAD_MESSAGE': NSQBadMessage, 107 | b'E_PUT_FAILED': NSQPutFailed, 108 | b'E_PUB_FAILED': NSQPubFailed, 109 | b'E_MPUB_FAILED': NSQMPubFailed, 110 | b'E_FINISH_FAILED': NSQFinishFailed, 111 | b'E_AUTH_DISABLED': NSQAuthDisabled, 112 | b'E_AUTH_FAILED': NSQAuthFailed, 113 | b'E_UNAUTHORIZED': NSQUnauthorized, 114 | b'E_FIN_FAILED': NSQFinishFailed, 115 | b'E_REQUEUE_FAILED': NSQRequeueFailed, 116 | b'E_REQ_FAILED': NSQRequeueFailed, 117 | b'E_TOUCH_FAILED': NSQTouchFailed 118 | } 119 | 120 | 121 | def make_error(error_code): 122 | parts = error_code.split(None, 1) 123 | return ERROR_CODES.get(parts[0], NSQErrorCode)(parts[-1]) 124 | -------------------------------------------------------------------------------- /gnsq/httpclient.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | 4 | import json 5 | 6 | import urllib3 7 | 8 | from .errors import NSQHttpError 9 | from .version import __version__ 10 | 11 | USERAGENT = 'gnsq/{}'.format(__version__) 12 | 13 | 14 | def _encode(value): 15 | if isinstance(value, bytes): 16 | return value 17 | return value.encode('utf-8') 18 | 19 | 20 | def _encode_dict(value): 21 | if value is None: 22 | return None 23 | return {_encode(k): _encode(v) for k, v in value.items()} 24 | 25 | 26 | class HTTPClient(object): 27 | @classmethod 28 | def from_url(cls, url, **kwargs): 29 | """Create a client from a url.""" 30 | url = urllib3.util.parse_url(url) 31 | if url.host: 32 | kwargs.setdefault('host', url.host) 33 | 34 | if url.port: 35 | kwargs.setdefault('port', url.port) 36 | 37 | if url.scheme == 'https': 38 | kwargs.setdefault('connection_class', urllib3.HTTPSConnectionPool) 39 | 40 | return cls(**kwargs) 41 | 42 | def __init__(self, host, port, useragent=USERAGENT, 43 | connection_class=urllib3.HTTPConnectionPool, **kwargs): 44 | self.useragent = useragent 45 | self._connection = connection_class(host, port, **kwargs) 46 | 47 | @property 48 | def scheme(self): 49 | return self._connection.scheme 50 | 51 | @property 52 | def host(self): 53 | return self._connection.host 54 | 55 | @property 56 | def port(self): 57 | return self._connection.port 58 | 59 | @property 60 | def address(self): 61 | return '{}://{}:{}/'.format(self.scheme, self.host, self.port) 62 | 63 | def _request(self, method, url, headers={}, fields=None, **kwargs): 64 | headers = dict(headers) 65 | headers.setdefault('Accept', 'application/vnd.nsq version=1.0') 66 | headers.setdefault('User-Agent', self.useragent) 67 | 68 | response = self._connection.request_encode_url( 69 | method, url, headers=_encode_dict(headers), 70 | fields=_encode_dict(fields), **kwargs) 71 | 72 | if 'application/json' in response.getheader('content-type', ''): 73 | return self._http_check_json(response) 74 | 75 | return self._http_check(response) 76 | 77 | def _http_check(self, response): 78 | if response.status != 200: 79 | raise NSQHttpError('http error <{}>'.format(response.status)) 80 | return response.data 81 | 82 | def _http_check_json(self, response): 83 | try: 84 | data = json.loads(response.data.decode('utf-8')) 85 | except ValueError: 86 | return self._http_check(response) 87 | 88 | if response.status != 200: 89 | status_txt = data.get('status_txt', 'http error') 90 | raise NSQHttpError('{} <{}>'.format(status_txt, response.status)) 91 | 92 | # Handle 1.0.0-compat vs 0.x versions 93 | try: 94 | return data['data'] 95 | except KeyError: 96 | return data 97 | 98 | def __repr__(self): 99 | return '<{!s} {!r}>'.format(type(self).__name__, self.address) 100 | -------------------------------------------------------------------------------- /gnsq/lookupd.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | 4 | import urllib3 5 | 6 | from . import protocol as nsq 7 | from .decorators import deprecated 8 | from .httpclient import HTTPClient 9 | 10 | 11 | class LookupdClient(HTTPClient): 12 | """Low level http client for nsqlookupd. 13 | 14 | :param host: nsqlookupd host address (default: localhost) 15 | 16 | :param port: nsqlookupd http port (default: 4161) 17 | 18 | :param useragent: useragent sent to nsqlookupd (default: 19 | ``/``) 20 | 21 | :param connection_class: override the http connection class 22 | """ 23 | def __init__(self, host='localhost', port=4161, **kwargs): 24 | super(LookupdClient, self).__init__(host, port, **kwargs) 25 | 26 | def lookup(self, topic): 27 | """Returns producers for a topic.""" 28 | nsq.assert_valid_topic_name(topic) 29 | return self._request('GET', '/lookup', fields={'topic': topic}) 30 | 31 | def topics(self): 32 | """Returns all known topics.""" 33 | return self._request('GET', '/topics') 34 | 35 | def channels(self, topic): 36 | """Returns all known channels of a topic.""" 37 | nsq.assert_valid_topic_name(topic) 38 | return self._request('GET', '/channels', fields={'topic': topic}) 39 | 40 | def nodes(self): 41 | """Returns all known nsqd.""" 42 | return self._request('GET', '/nodes') 43 | 44 | def create_topic(self, topic): 45 | """Add a topic to nsqlookupd's registry.""" 46 | nsq.assert_valid_topic_name(topic) 47 | return self._request('POST', '/topic/create', fields={'topic': topic}) 48 | 49 | def delete_topic(self, topic): 50 | """Deletes an existing topic.""" 51 | nsq.assert_valid_topic_name(topic) 52 | return self._request('POST', '/topic/delete', fields={'topic': topic}) 53 | 54 | def create_channel(self, topic, channel): 55 | """Add a channel to nsqlookupd's registry.""" 56 | nsq.assert_valid_topic_name(topic) 57 | nsq.assert_valid_channel_name(channel) 58 | return self._request('POST', '/channel/create', 59 | fields={'topic': topic, 'channel': channel}) 60 | 61 | def delete_channel(self, topic, channel): 62 | """Deletes an existing channel of an existing topic.""" 63 | nsq.assert_valid_topic_name(topic) 64 | nsq.assert_valid_channel_name(channel) 65 | return self._request('POST', '/channel/delete', 66 | fields={'topic': topic, 'channel': channel}) 67 | 68 | def tombstone_topic(self, topic, node): 69 | """Tombstones a specific producer of an existing topic.""" 70 | nsq.assert_valid_topic_name(topic) 71 | return self._request('POST', '/topic/tombstone', 72 | fields={'topic': topic, 'node': node}) 73 | 74 | def ping(self): 75 | """Monitoring endpoint. 76 | 77 | :returns: should return `"OK"`, otherwise raises an exception. 78 | """ 79 | return self._request('GET', '/ping') 80 | 81 | def info(self): 82 | """Returns version information.""" 83 | return self._request('GET', '/info') 84 | 85 | 86 | class Lookupd(LookupdClient): 87 | """Use :class:`LookupdClient` instead. 88 | 89 | .. deprecated:: 1.0.0 90 | """ 91 | 92 | @deprecated 93 | def __init__(self, address='http://localhost:4161/', **kwargs): 94 | """Use :meth:`LookupdClient.from_url` instead. 95 | 96 | .. deprecated:: 1.0.0 97 | """ 98 | self.address = address 99 | 100 | url = urllib3.util.parse_url(address) 101 | if url.host: 102 | kwargs.setdefault('host', url.host) 103 | 104 | if url.port: 105 | kwargs.setdefault('port', url.port) 106 | 107 | if url.scheme == 'https': 108 | kwargs.setdefault('connection_class', urllib3.HTTPSConnectionPool) 109 | 110 | return super(Lookupd, self).__init__(**kwargs) 111 | 112 | @property 113 | @deprecated 114 | def base_url(self): 115 | """Use :attr:`LookupdClient.address` instead. 116 | 117 | .. deprecated:: 1.0.0 118 | """ 119 | return self.address 120 | 121 | @deprecated 122 | def tombstone_topic_producer(self, topic, node): 123 | """Use :meth:`LookupdClient.tombstone_topic` instead. 124 | 125 | .. deprecated:: 1.0.0 126 | """ 127 | return self.tombstone_topic(topic, node) 128 | -------------------------------------------------------------------------------- /gnsq/message.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | import blinker 4 | from .decorators import cached_property 5 | from .errors import NSQException 6 | 7 | 8 | class Message(object): 9 | """A class representing a message received from nsqd.""" 10 | def __init__(self, timestamp, attempts, id, body): 11 | self.timestamp = timestamp 12 | self.attempts = attempts 13 | self.id = id 14 | self.body = body 15 | self._has_responded = False 16 | self._is_async = False 17 | 18 | @cached_property 19 | def on_finish(self): 20 | """Emitted after :meth:`finish`. 21 | 22 | The signal sender is the message instance. 23 | """ 24 | return blinker.Signal(doc='Emitted after message is finished.') 25 | 26 | @cached_property 27 | def on_requeue(self): 28 | """Emitted after :meth:`requeue`. 29 | 30 | The signal sender is the message instance and sends the ``timeout`` and 31 | a ``backoff`` flag as arguments. 32 | """ 33 | return blinker.Signal(doc='Emitted after message is requeued.') 34 | 35 | @cached_property 36 | def on_touch(self): 37 | """Emitted after :meth:`touch`. 38 | 39 | The signal sender is the message instance. 40 | """ 41 | return blinker.Signal(doc='Emitted after message is touched.') 42 | 43 | def enable_async(self): 44 | """Enables asynchronous processing for this message. 45 | 46 | :class:`~gnsq.Consumer` will not automatically respond to the message 47 | upon return of :meth:`~gnsq.Consumer.handle_message`. 48 | """ 49 | self._is_async = True 50 | 51 | def is_async(self): 52 | """Returns whether or not asynchronous processing has been enabled.""" 53 | return self._is_async 54 | 55 | def has_responded(self): 56 | """Returns whether or not this message has been responded to.""" 57 | return self._has_responded 58 | 59 | def finish(self): 60 | """ 61 | Respond to nsqd that you’ve processed this message successfully 62 | (or would like to silently discard it). 63 | """ 64 | if self._has_responded: 65 | raise NSQException('already responded') 66 | self._has_responded = True 67 | self.on_finish.send(self) 68 | 69 | def requeue(self, time_ms=0, backoff=True): 70 | """ 71 | Respond to nsqd that you’ve failed to process this message successfully 72 | (and would like it to be requeued). 73 | """ 74 | if self._has_responded: 75 | raise NSQException('already responded') 76 | self._has_responded = True 77 | self.on_requeue.send(self, timeout=time_ms, backoff=backoff) 78 | 79 | def touch(self): 80 | """Respond to nsqd that you need more time to process the message.""" 81 | if self._has_responded: 82 | raise NSQException('already responded') 83 | self.on_touch.send(self) 84 | -------------------------------------------------------------------------------- /gnsq/nsqd.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | 4 | import json 5 | import time 6 | 7 | import blinker 8 | 9 | from gevent import socket 10 | 11 | from . import protocol as nsq 12 | from . import errors 13 | 14 | from .decorators import cached_property, deprecated 15 | from .httpclient import HTTPClient, USERAGENT 16 | from .message import Message 17 | from .states import CONNECTED, DISCONNECTED, INIT 18 | from .stream import Stream 19 | 20 | HOSTNAME = socket.gethostname() 21 | SHORTNAME = HOSTNAME.split('.')[0] 22 | 23 | 24 | class NsqdTCPClient(object): 25 | """Low level object representing a TCP connection to nsqd. 26 | 27 | :param address: the host or ip address of the nsqd 28 | 29 | :param port: the nsqd tcp port to connect to 30 | 31 | :param timeout: the timeout for read/write operations (in seconds) 32 | 33 | :param client_id: an identifier used to disambiguate this client (defaults 34 | to the first part of the hostname) 35 | 36 | :param hostname: the hostname where the client is deployed (defaults to the 37 | clients hostname) 38 | 39 | :param heartbeat_interval: the amount of time in seconds to negotiate with 40 | the connected producers to send heartbeats (requires nsqd 0.2.19+) 41 | 42 | :param output_buffer_size: size of the buffer (in bytes) used by nsqd for 43 | buffering writes to this connection 44 | 45 | :param output_buffer_timeout: timeout (in ms) used by nsqd before flushing 46 | buffered writes (set to 0 to disable). Warning: configuring clients with 47 | an extremely low (< 25ms) output_buffer_timeout has a significant effect 48 | on nsqd CPU usage (particularly with > 50 clients connected). 49 | 50 | :param tls_v1: enable TLS v1 encryption (requires nsqd 0.2.22+) 51 | 52 | :param tls_options: dictionary of options to pass to `ssl.wrap_socket() 53 | `_ 54 | 55 | :param snappy: enable Snappy stream compression (requires nsqd 0.2.23+) 56 | 57 | :param deflate: enable deflate stream compression (requires nsqd 0.2.23+) 58 | 59 | :param deflate_level: configure the deflate compression level for this 60 | connection (requires nsqd 0.2.23+) 61 | 62 | :param sample_rate: take only a sample of the messages being sent to the 63 | client. Not setting this or setting it to 0 will ensure you get all the 64 | messages destined for the client. Sample rate can be greater than 0 or 65 | less than 100 and the client will receive that percentage of the message 66 | traffic. (requires nsqd 0.2.25+) 67 | 68 | :param auth_secret: a string passed when using nsq auth (requires 69 | nsqd 0.2.29+) 70 | 71 | :param user_agent: a string identifying the agent for this client in the 72 | spirit of HTTP (default: ``/``) (requires 73 | nsqd 0.2.25+) 74 | """ 75 | def __init__( 76 | self, 77 | address='127.0.0.1', 78 | port=4150, 79 | timeout=60.0, 80 | client_id=None, 81 | hostname=None, 82 | heartbeat_interval=30, 83 | output_buffer_size=16 * 1024, 84 | output_buffer_timeout=250, 85 | tls_v1=False, 86 | tls_options=None, 87 | snappy=False, 88 | deflate=False, 89 | deflate_level=6, 90 | sample_rate=0, 91 | auth_secret=None, 92 | user_agent=USERAGENT, 93 | ): 94 | self.address = address 95 | self.port = port 96 | self.timeout = timeout 97 | 98 | self.client_id = client_id or SHORTNAME 99 | self.hostname = hostname or HOSTNAME 100 | self.heartbeat_interval = 1000 * heartbeat_interval 101 | self.output_buffer_size = output_buffer_size 102 | self.output_buffer_timeout = output_buffer_timeout 103 | self.tls_v1 = tls_v1 104 | self.tls_options = tls_options 105 | self.snappy = snappy 106 | self.deflate = deflate 107 | self.deflate_level = deflate_level 108 | self.sample_rate = sample_rate 109 | self.auth_secret = auth_secret 110 | self.user_agent = user_agent 111 | 112 | self.state = INIT 113 | self.last_response = time.time() 114 | self.last_message = time.time() 115 | self.ready_count = 0 116 | self.in_flight = 0 117 | self.max_ready_count = 2500 118 | 119 | self._frame_handlers = { 120 | nsq.FRAME_TYPE_RESPONSE: self.handle_response, 121 | nsq.FRAME_TYPE_ERROR: self.handle_error, 122 | nsq.FRAME_TYPE_MESSAGE: self.handle_message 123 | } 124 | 125 | @cached_property 126 | def on_message(self): 127 | """Emitted when a message frame is received. 128 | 129 | The signal sender is the connection and the ``message`` is sent as an 130 | argument. 131 | """ 132 | return blinker.Signal(doc='Emitted when a message frame is received.') 133 | 134 | @cached_property 135 | def on_response(self): 136 | """Emitted when a response frame is received. 137 | 138 | The signal sender is the connection and the ``response`` is sent as an 139 | argument. 140 | """ 141 | return blinker.Signal(doc='Emitted when a response frame is received.') 142 | 143 | @cached_property 144 | def on_error(self): 145 | """Emitted when an error frame is received. 146 | 147 | The signal sender is the connection and the ``error`` is sent as an 148 | argument. 149 | """ 150 | return blinker.Signal(doc='Emitted when a error frame is received.') 151 | 152 | @cached_property 153 | def on_finish(self): 154 | """Emitted after :meth:`finish`. 155 | 156 | Sent after a message owned by this connection is successfully finished. 157 | The signal sender is the connection and the ``message_id`` is sent as an 158 | argument. 159 | """ 160 | return blinker.Signal(doc='Emitted after the a message is finished.') 161 | 162 | @cached_property 163 | def on_requeue(self): 164 | """Emitted after :meth:`requeue`. 165 | 166 | Sent after a message owned by this connection is requeued. The signal 167 | sender is the connection and the ``message_id``, ``timeout`` and 168 | ``backoff`` flag are sent as arguments. 169 | """ 170 | return blinker.Signal(doc='Emitted after the a message is requeued.') 171 | 172 | @cached_property 173 | def on_auth(self): 174 | """Emitted after the connection is successfully authenticated. 175 | 176 | The signal sender is the connection and the parsed ``response`` is sent 177 | as arguments. 178 | """ 179 | return blinker.Signal( 180 | doc='Emitted after the connection is successfully authenticated.') 181 | 182 | @cached_property 183 | def on_close(self): 184 | """Emitted after :meth:`close_stream`. 185 | 186 | Sent after the connection socket has closed. The signal sender is the 187 | connection. 188 | """ 189 | return blinker.Signal(doc='Emitted after the connection is closed.') 190 | 191 | @property 192 | def is_connected(self): 193 | """Check if the client is currently connected.""" 194 | return self.state == CONNECTED 195 | 196 | @property 197 | def is_starved(self): 198 | """Evaluate whether the connection is starved. 199 | 200 | This property should be used by message handlers to reliably identify 201 | when to process a batch of messages. 202 | """ 203 | return self.in_flight >= max(self.ready_count * 0.85, 1) 204 | 205 | def connect(self): 206 | """Initialize connection to the nsqd.""" 207 | if self.state == DISCONNECTED: 208 | raise errors.NSQException('connection already closed') 209 | 210 | if self.is_connected: 211 | return 212 | 213 | stream = Stream(self.address, self.port, self.timeout) 214 | stream.connect() 215 | 216 | self.stream = stream 217 | self.state = CONNECTED 218 | self.send(nsq.MAGIC_V2) 219 | 220 | def close_stream(self): 221 | """Close the underlying socket.""" 222 | if not self.is_connected: 223 | return 224 | 225 | self.stream.close() 226 | self.state = DISCONNECTED 227 | self.on_close.send(self) 228 | 229 | def send(self, data): 230 | try: 231 | return self.stream.send(data) 232 | except Exception: 233 | self.close_stream() 234 | raise 235 | 236 | def _read_response(self): 237 | try: 238 | size = nsq.unpack_size(self.stream.read(4)) 239 | return self.stream.read(size) 240 | except Exception: 241 | self.close_stream() 242 | raise 243 | 244 | def read_response(self): 245 | """Read an individual response from nsqd. 246 | 247 | :returns: tuple of the frame type and the processed data. 248 | """ 249 | response = self._read_response() 250 | frame, data = nsq.unpack_response(response) 251 | self.last_response = time.time() 252 | 253 | if frame not in self._frame_handlers: 254 | raise errors.NSQFrameError('unknown frame {}'.format(frame)) 255 | 256 | frame_handler = self._frame_handlers[frame] 257 | processed_data = frame_handler(data) 258 | 259 | return frame, processed_data 260 | 261 | def handle_response(self, data): 262 | if data == nsq.HEARTBEAT: 263 | self.nop() 264 | 265 | self.on_response.send(self, response=data) 266 | return data 267 | 268 | def handle_error(self, data): 269 | error = errors.make_error(data) 270 | self.on_error.send(self, error=error) 271 | 272 | if error.fatal: 273 | self.close_stream() 274 | 275 | return error 276 | 277 | def handle_message(self, data): 278 | self.last_message = time.time() 279 | self.in_flight += 1 280 | 281 | message = Message(*nsq.unpack_message(data)) 282 | message.on_finish.connect(self.handle_finish) 283 | message.on_requeue.connect(self.handle_requeue) 284 | message.on_touch.connect(self.handle_touch) 285 | 286 | self.on_message.send(self, message=message) 287 | return message 288 | 289 | def handle_finish(self, message): 290 | self.finish(message.id) 291 | 292 | def handle_requeue(self, message, timeout, backoff): 293 | self.requeue(message.id, timeout, backoff) 294 | 295 | def handle_touch(self, message): 296 | self.touch(message.id) 297 | 298 | def finish_inflight(self): 299 | self.in_flight -= 1 300 | 301 | def listen(self): 302 | """Listen to incoming responses until the connection closes.""" 303 | while self.is_connected: 304 | self.read_response() 305 | 306 | def check_ok(self, expected=nsq.OK): 307 | frame, data = self.read_response() 308 | if frame == nsq.FRAME_TYPE_ERROR: 309 | raise data 310 | 311 | if frame != nsq.FRAME_TYPE_RESPONSE: 312 | raise errors.NSQException('expected response frame') 313 | 314 | if data != expected: 315 | raise errors.NSQException('unexpected response {!r}'.format(data)) 316 | 317 | def upgrade_to_tls(self): 318 | self.stream.upgrade_to_tls(**self.tls_options) 319 | self.check_ok() 320 | 321 | def upgrade_to_snappy(self): 322 | self.stream.upgrade_to_snappy() 323 | self.check_ok() 324 | 325 | def upgrade_to_defalte(self): 326 | self.stream.upgrade_to_defalte(self.deflate_level) 327 | self.check_ok() 328 | 329 | def identify(self): 330 | """Update client metadata on the server and negotiate features. 331 | 332 | :returns: nsqd response data if there was feature negotiation, 333 | otherwise ``None`` 334 | """ 335 | self.send(nsq.identify({ 336 | # nsqd 0.2.28+ 337 | 'client_id': self.client_id, 338 | 'hostname': self.hostname, 339 | 340 | # nsqd 0.2.19+ 341 | 'feature_negotiation': True, 342 | 'heartbeat_interval': self.heartbeat_interval, 343 | 344 | # nsqd 0.2.21+ 345 | 'output_buffer_size': self.output_buffer_size, 346 | 'output_buffer_timeout': self.output_buffer_timeout, 347 | 348 | # nsqd 0.2.22+ 349 | 'tls_v1': self.tls_v1, 350 | 351 | # nsqd 0.2.23+ 352 | 'snappy': self.snappy, 353 | 'deflate': self.deflate, 354 | 'deflate_level': self.deflate_level, 355 | 356 | # nsqd nsqd 0.2.25+ 357 | 'sample_rate': self.sample_rate, 358 | 'user_agent': self.user_agent, 359 | })) 360 | 361 | frame, data = self.read_response() 362 | 363 | if frame == nsq.FRAME_TYPE_ERROR: 364 | raise data 365 | 366 | if data == nsq.OK: 367 | return 368 | 369 | try: 370 | data = json.loads(data.decode('utf-8')) 371 | 372 | except ValueError: 373 | self.close_stream() 374 | raise errors.NSQException( 375 | 'failed to parse IDENTIFY response JSON from nsqd: ' 376 | '{!r}'.format(data)) 377 | 378 | self.max_ready_count = data.get('max_rdy_count', self.max_ready_count) 379 | 380 | if self.tls_v1 and data.get('tls_v1'): 381 | self.upgrade_to_tls() 382 | 383 | if self.snappy and data.get('snappy'): 384 | self.upgrade_to_snappy() 385 | 386 | elif self.deflate and data.get('deflate'): 387 | self.deflate_level = data.get('deflate_level', self.deflate_level) 388 | self.upgrade_to_defalte() 389 | 390 | if self.auth_secret and data.get('auth_required'): 391 | self.auth() 392 | 393 | return data 394 | 395 | def auth(self): 396 | """Send authorization secret to nsqd.""" 397 | self.send(nsq.auth(self.auth_secret)) 398 | frame, data = self.read_response() 399 | 400 | if frame == nsq.FRAME_TYPE_ERROR: 401 | raise data 402 | 403 | try: 404 | response = json.loads(data.decode('utf-8')) 405 | except ValueError: 406 | self.close_stream() 407 | raise errors.NSQException( 408 | 'failed to parse AUTH response JSON from nsqd: ' 409 | '{!r}'.format(data)) 410 | 411 | self.on_auth.send(self, response=response) 412 | return response 413 | 414 | def subscribe(self, topic, channel): 415 | """Subscribe to a nsq `topic` and `channel`.""" 416 | self.send(nsq.subscribe(topic, channel)) 417 | 418 | def publish(self, topic, data, defer=None): 419 | """Publish a message to the given topic over tcp. 420 | 421 | :param topic: the topic to publish to 422 | 423 | :param data: bytestring data to publish 424 | 425 | :param defer: duration in milliseconds to defer before publishing 426 | (requires nsq 0.3.6) 427 | """ 428 | if defer is None: 429 | self.send(nsq.publish(topic, data)) 430 | else: 431 | self.send(nsq.deferpublish(topic, data, defer)) 432 | 433 | def multipublish(self, topic, messages): 434 | """Publish an iterable of messages to the given topic over http. 435 | 436 | :param topic: the topic to publish to 437 | 438 | :param messages: iterable of bytestrings to publish 439 | """ 440 | self.send(nsq.multipublish(topic, messages)) 441 | 442 | def ready(self, count): 443 | """Indicate you are ready to receive ``count`` messages.""" 444 | self.ready_count = count 445 | self.send(nsq.ready(count)) 446 | 447 | def finish(self, message_id): 448 | """Finish a message (indicate successful processing).""" 449 | self.send(nsq.finish(message_id)) 450 | self.finish_inflight() 451 | self.on_finish.send(self, message_id=message_id) 452 | 453 | def requeue(self, message_id, timeout=0, backoff=True): 454 | """Re-queue a message (indicate failure to process).""" 455 | self.send(nsq.requeue(message_id, timeout)) 456 | self.finish_inflight() 457 | self.on_requeue.send( 458 | self, 459 | message_id=message_id, 460 | timeout=timeout, 461 | backoff=backoff 462 | ) 463 | 464 | def touch(self, message_id): 465 | """Reset the timeout for an in-flight message.""" 466 | self.send(nsq.touch(message_id)) 467 | 468 | def close(self): 469 | """Indicate no more messages should be sent.""" 470 | self.send(nsq.close()) 471 | 472 | def nop(self): 473 | """Send no-op to nsqd. Used to keep connection alive.""" 474 | self.send(nsq.nop()) 475 | 476 | def __str__(self): 477 | return '{}:{}'.format(self.address, self.port) 478 | 479 | def __hash__(self): 480 | return hash(str(self)) 481 | 482 | def __eq__(self, other): 483 | return isinstance(other, type(self)) and str(self) == str(other) 484 | 485 | def __cmp__(self, other): 486 | return hash(self) - hash(other) 487 | 488 | def __lt__(self, other): 489 | return hash(self) < hash(other) 490 | 491 | 492 | class NsqdHTTPClient(HTTPClient): 493 | """Low level http client for nsqd. 494 | 495 | :param host: nsqd host address (default: localhost) 496 | 497 | :param port: nsqd http port (default: 4151) 498 | 499 | :param useragent: useragent sent to nsqd (default: 500 | ``/``) 501 | 502 | :param connection_class: override the http connection class 503 | """ 504 | def __init__(self, host='localhost', port=4151, **kwargs): 505 | super(NsqdHTTPClient, self).__init__(host, port, **kwargs) 506 | 507 | def publish(self, topic, data, defer=None): 508 | """Publish a message to the given topic over http. 509 | 510 | :param topic: the topic to publish to 511 | 512 | :param data: bytestring data to publish 513 | 514 | :param defer: duration in millisconds to defer before publishing 515 | (requires nsq 0.3.6) 516 | """ 517 | nsq.assert_valid_topic_name(topic) 518 | fields = {'topic': topic} 519 | 520 | if defer is not None: 521 | fields['defer'] = '{}'.format(defer) 522 | 523 | return self._request('POST', '/pub', fields=fields, body=data) 524 | 525 | def _validate_mpub_message(self, message): 526 | if b'\n' not in message: 527 | return message 528 | raise errors.NSQException( 529 | 'newlines are not allowed in http multipublish') 530 | 531 | def multipublish(self, topic, messages, binary=False): 532 | """Publish an iterable of messages to the given topic over http. 533 | 534 | :param topic: the topic to publish to 535 | 536 | :param messages: iterable of bytestrings to publish 537 | 538 | :param binary: enable binary mode. defaults to False 539 | (requires nsq 1.0.0) 540 | 541 | By default multipublish expects messages to be delimited by ``"\\n"``, 542 | use the binary flag to enable binary mode where the POST body is 543 | expected to be in the following wire protocol format. 544 | """ 545 | nsq.assert_valid_topic_name(topic) 546 | fields = {'topic': topic} 547 | 548 | if binary: 549 | fields['binary'] = 'true' 550 | body = nsq.multipublish_body(messages) 551 | else: 552 | body = b'\n'.join(self._validate_mpub_message(m) for m in messages) 553 | 554 | return self._request('POST', '/mpub', fields=fields, body=body) 555 | 556 | def create_topic(self, topic): 557 | """Create a topic.""" 558 | nsq.assert_valid_topic_name(topic) 559 | return self._request('POST', '/topic/create', fields={'topic': topic}) 560 | 561 | def delete_topic(self, topic): 562 | """Delete a topic.""" 563 | nsq.assert_valid_topic_name(topic) 564 | return self._request('POST', '/topic/delete', fields={'topic': topic}) 565 | 566 | def create_channel(self, topic, channel): 567 | """Create a channel for an existing topic.""" 568 | nsq.assert_valid_topic_name(topic) 569 | nsq.assert_valid_channel_name(channel) 570 | return self._request('POST', '/channel/create', 571 | fields={'topic': topic, 'channel': channel}) 572 | 573 | def delete_channel(self, topic, channel): 574 | """Delete an existing channel for an existing topic.""" 575 | nsq.assert_valid_topic_name(topic) 576 | nsq.assert_valid_channel_name(channel) 577 | return self._request('POST', '/channel/delete', 578 | fields={'topic': topic, 'channel': channel}) 579 | 580 | def empty_topic(self, topic): 581 | """Empty all the queued messages for an existing topic.""" 582 | nsq.assert_valid_topic_name(topic) 583 | return self._request('POST', '/topic/empty', fields={'topic': topic}) 584 | 585 | def empty_channel(self, topic, channel): 586 | """Empty all the queued messages for an existing channel.""" 587 | nsq.assert_valid_topic_name(topic) 588 | nsq.assert_valid_channel_name(channel) 589 | return self._request('POST', '/channel/empty', 590 | fields={'topic': topic, 'channel': channel}) 591 | 592 | def pause_topic(self, topic): 593 | """Pause message flow to all channels on an existing topic. 594 | 595 | Messages will queue at topic. 596 | """ 597 | nsq.assert_valid_topic_name(topic) 598 | return self._request('POST', '/topic/pause', fields={'topic': topic}) 599 | 600 | def unpause_topic(self, topic): 601 | """Resume message flow to channels of an existing, paused, topic.""" 602 | nsq.assert_valid_topic_name(topic) 603 | return self._request('POST', '/topic/unpause', fields={'topic': topic}) 604 | 605 | def pause_channel(self, topic, channel): 606 | """Pause message flow to consumers of an existing channel. 607 | 608 | Messages will queue at channel. 609 | """ 610 | nsq.assert_valid_topic_name(topic) 611 | nsq.assert_valid_channel_name(channel) 612 | return self._request('POST', '/channel/pause', 613 | fields={'topic': topic, 'channel': channel}) 614 | 615 | def unpause_channel(self, topic, channel): 616 | """Resume message flow to consumers of an existing, paused, channel.""" 617 | nsq.assert_valid_topic_name(topic) 618 | nsq.assert_valid_channel_name(channel) 619 | return self._request('POST', '/channel/unpause', 620 | fields={'topic': topic, 'channel': channel}) 621 | 622 | def stats(self, topic=None, channel=None, text=False): 623 | """Return internal instrumented statistics. 624 | 625 | :param topic: (optional) filter to topic 626 | 627 | :param channel: (optional) filter to channel 628 | 629 | :param text: return the stats as a string (default: ``False``) 630 | """ 631 | if text: 632 | fields = {'format': 'text'} 633 | else: 634 | fields = {'format': 'json'} 635 | 636 | if topic: 637 | nsq.assert_valid_topic_name(topic) 638 | fields['topic'] = topic 639 | 640 | if channel: 641 | nsq.assert_valid_channel_name(channel) 642 | fields['channel'] = channel 643 | 644 | return self._request('GET', '/stats', fields=fields) 645 | 646 | def ping(self): 647 | """Monitoring endpoint. 648 | 649 | :returns: should return ``"OK"``, otherwise raises an exception. 650 | """ 651 | return self._request('GET', '/ping') 652 | 653 | def info(self): 654 | """Returns version information.""" 655 | return self._request('GET', '/info') 656 | 657 | 658 | class Nsqd(object): 659 | """Use :class:`NsqdTCPClient` or :class:`NsqdHTTPClient` instead. 660 | 661 | .. deprecated:: 1.0.0 662 | """ 663 | @deprecated 664 | def __init__(self, address='127.0.0.1', tcp_port=4150, http_port=4151, 665 | **kwargs): 666 | """Use :class:`NsqdTCPClient` or :class:`NsqdHTTPClient` instead. 667 | 668 | .. deprecated:: 1.0.0 669 | """ 670 | self.address = address 671 | self.tcp_port = tcp_port 672 | self.http_port = http_port 673 | 674 | self.__tcp_client = NsqdTCPClient(address, tcp_port, **kwargs) 675 | self.__http_client = NsqdHTTPClient(address, http_port) 676 | 677 | @property 678 | def base_url(self): 679 | return 'http://{}:{}/'.format(self.address, self.http_port) 680 | 681 | @deprecated 682 | def publish_tcp(self, topic, data, **kwargs): 683 | """Use :meth:`NsqdTCPClient.publish` instead. 684 | 685 | .. deprecated:: 1.0.0 686 | """ 687 | return self.__tcp_client.publish(topic, data, **kwargs) 688 | 689 | @deprecated 690 | def publish_http(self, topic, data, **kwargs): 691 | """Use :meth:`NsqdHTTPClient.publish` instead. 692 | 693 | .. deprecated:: 1.0.0 694 | """ 695 | self.__http_client.publish(topic, data, **kwargs) 696 | 697 | def publish(self, topic, data, *args, **kwargs): 698 | if self.__tcp_client.is_connected: 699 | return self.__tcp_client.publish(topic, data, *args, **kwargs) 700 | else: 701 | return self.__http_client.publish(topic, data, *args, **kwargs) 702 | 703 | @deprecated 704 | def multipublish_tcp(self, topic, messages, **kwargs): 705 | """Use :meth:`NsqdTCPClient.multipublish` instead. 706 | 707 | .. deprecated:: 1.0.0 708 | """ 709 | return self.__tcp_client.multipublish(topic, messages, **kwargs) 710 | 711 | @deprecated 712 | def multipublish_http(self, topic, messages, **kwargs): 713 | """Use :meth:`NsqdHTTPClient.multipublish` instead. 714 | 715 | .. deprecated:: 1.0.0 716 | """ 717 | return self.__http_client.multipublish(topic, messages, **kwargs) 718 | 719 | def multipublish(self, topic, messages, *args, **kwargs): 720 | if self.__tcp_client.is_connected: 721 | return self.__tcp_client.multipublish( 722 | topic, messages, *args, **kwargs) 723 | else: 724 | return self.__http_client.multipublish( 725 | topic, messages, *args, **kwargs) 726 | 727 | def __getattr__(self, name): 728 | for client in (self.__tcp_client, self.__http_client): 729 | try: 730 | return getattr(client, name) 731 | except AttributeError: 732 | pass 733 | 734 | return super(Nsqd, self).__getattr__(name) 735 | 736 | def __str__(self): 737 | return '{}:{}'.format(self.address, self.tcp_port) 738 | 739 | def __hash__(self): 740 | return hash(str(self)) 741 | 742 | def __eq__(self, other): 743 | return isinstance(other, type(self)) and str(self) == str(other) 744 | 745 | def __cmp__(self, other): 746 | return hash(self) - hash(other) 747 | 748 | def __lt__(self, other): 749 | return hash(self) < hash(other) 750 | -------------------------------------------------------------------------------- /gnsq/producer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, division 3 | 4 | import logging 5 | from collections import defaultdict, deque 6 | 7 | import blinker 8 | import gevent 9 | 10 | from gevent.event import AsyncResult 11 | from gevent.pool import Group 12 | from gevent.queue import Queue, Empty 13 | 14 | from . import protocol as nsq 15 | 16 | from .backofftimer import BackoffTimer 17 | from .decorators import cached_property 18 | from .errors import NSQException, NSQNoConnections 19 | from .nsqd import NsqdTCPClient 20 | from .states import INIT, RUNNING, CLOSED 21 | from .util import parse_nsqds 22 | 23 | 24 | class Producer(object): 25 | """High level NSQ producer. 26 | 27 | A Producer will connect to the nsqd tcp addresses and support async 28 | publishing (``PUB`` & ``MPUB`` & ``DPUB``) of messages to `nsqd` over the 29 | TCP protocol. 30 | 31 | Example publishing a message:: 32 | 33 | from gnsq import Producer 34 | 35 | producer = Producer('localhost:4150') 36 | producer.start() 37 | producer.publish('topic', b'hello world') 38 | 39 | :param nsqd_tcp_addresses: a sequence of string addresses of the nsqd 40 | instances this consumer should connect to 41 | 42 | :param max_backoff_duration: the maximum time we will allow a backoff state 43 | to last in seconds. If zero, backoff wil not occur 44 | 45 | :param **kwargs: passed to :class:`~gnsq.NsqdTCPClient` initialization 46 | """ 47 | def __init__(self, nsqd_tcp_addresses=[], max_backoff_duration=128, 48 | **kwargs): 49 | if not nsqd_tcp_addresses: 50 | raise ValueError('must specify at least one nsqd or lookupd') 51 | 52 | self.nsqd_tcp_addresses = parse_nsqds(nsqd_tcp_addresses) 53 | self.max_backoff_duration = max_backoff_duration 54 | self.conn_kwargs = kwargs 55 | self.logger = logging.getLogger(__name__) 56 | 57 | self._state = INIT 58 | self._connections = Queue() 59 | self._connection_backoffs = defaultdict(self._create_backoff) 60 | self._response_queues = {} 61 | self._workers = Group() 62 | 63 | @cached_property 64 | def on_response(self): 65 | """Emitted when a response is received. 66 | 67 | The signal sender is the consumer and the ` ` is sent as an 68 | argument. 69 | """ 70 | return blinker.Signal(doc='Emitted when a response is received.') 71 | 72 | @cached_property 73 | def on_error(self): 74 | """Emitted when an error is received. 75 | 76 | The signal sender is the consumer and the ``error`` is sent as an 77 | argument. 78 | """ 79 | return blinker.Signal(doc='Emitted when a error is received.') 80 | 81 | @cached_property 82 | def on_auth(self): 83 | """Emitted after a connection is successfully authenticated. 84 | 85 | The signal sender is the consumer and the ``conn`` and parsed 86 | ``response`` are sent as arguments. 87 | """ 88 | return blinker.Signal(doc='Emitted when a response is received.') 89 | 90 | @cached_property 91 | def on_close(self): 92 | """Emitted after :meth:`close`. 93 | 94 | The signal sender is the consumer. 95 | """ 96 | return blinker.Signal(doc='Emitted after the consumer is closed.') 97 | 98 | def start(self): 99 | """Start discovering and listing to connections.""" 100 | if self._state == CLOSED: 101 | raise NSQException('producer already closed') 102 | 103 | if self.is_running: 104 | self.logger.warning('producer already started') 105 | return 106 | 107 | self.logger.debug('starting producer...') 108 | self._state = RUNNING 109 | 110 | for address in self.nsqd_tcp_addresses: 111 | address, port = address.split(':') 112 | self.connect_to_nsqd(address, int(port)) 113 | 114 | def close(self): 115 | """Immediately close all connections and stop workers.""" 116 | if not self.is_running: 117 | return 118 | 119 | self._state = CLOSED 120 | self.logger.debug('closing connection(s)') 121 | 122 | while True: 123 | try: 124 | conn = self._connections.get(block=False) 125 | except Empty: 126 | break 127 | 128 | conn.close_stream() 129 | 130 | self.on_close.send(self) 131 | 132 | def join(self, timeout=None, raise_error=False): 133 | """Block until all connections have closed and workers stopped.""" 134 | self._workers.join(timeout, raise_error) 135 | 136 | @property 137 | def is_running(self): 138 | """Check if the producer is currently running.""" 139 | return self._state == RUNNING 140 | 141 | def connect_to_nsqd(self, address, port): 142 | if not self.is_running: 143 | return 144 | 145 | conn = NsqdTCPClient(address, port, **self.conn_kwargs) 146 | self.logger.debug('[%s] connecting...', conn) 147 | 148 | conn.on_response.connect(self.handle_response) 149 | conn.on_error.connect(self.handle_error) 150 | conn.on_auth.connect(self.handle_auth) 151 | 152 | try: 153 | conn.connect() 154 | conn.identify() 155 | 156 | except NSQException as error: 157 | self.logger.warning('[%s] connection failed (%r)', conn, error) 158 | self.handle_connection_failure(conn) 159 | return 160 | 161 | # Check if we've closed since we started 162 | if not self.is_running: 163 | self.handle_connection_failure(conn) 164 | return 165 | 166 | self.logger.info('[%s] connection successful', conn) 167 | self.handle_connection_success(conn) 168 | 169 | def _listen(self, conn): 170 | try: 171 | conn.listen() 172 | except NSQException as error: 173 | self.logger.warning('[%s] connection lost (%r)', conn, error) 174 | 175 | self.handle_connection_failure(conn) 176 | 177 | def handle_connection_success(self, conn): 178 | self._response_queues[conn] = deque() 179 | self._put_connection(conn) 180 | self._workers.spawn(self._listen, conn) 181 | self._connection_backoffs[conn].success() 182 | 183 | def handle_connection_failure(self, conn): 184 | conn.close_stream() 185 | self._clear_responses(conn, NSQException('connection closed')) 186 | 187 | if not self.is_running: 188 | return 189 | 190 | seconds = self._connection_backoffs[conn].failure().get_interval() 191 | self.logger.debug('[%s] retrying in %ss', conn, seconds) 192 | 193 | gevent.spawn_later( 194 | seconds, self.connect_to_nsqd, conn.address, conn.port) 195 | 196 | def handle_auth(self, conn, response): 197 | metadata = [] 198 | if response.get('identity'): 199 | metadata.append("Identity: %r" % response['identity']) 200 | 201 | if response.get('permission_count'): 202 | metadata.append("Permissions: %d" % response['permission_count']) 203 | 204 | if response.get('identity_url'): 205 | metadata.append(response['identity_url']) 206 | 207 | self.logger.info('[%s] AUTH accepted %s', conn, ' '.join(metadata)) 208 | self.on_auth.send(self, conn=conn, response=response) 209 | 210 | def handle_response(self, conn, response): 211 | self.logger.debug('[%s] response: %s', conn, response) 212 | 213 | if response == nsq.OK: 214 | result = self._response_queues[conn].popleft() 215 | result.set(response) 216 | 217 | self.on_response.send(self, response=response) 218 | 219 | def handle_error(self, conn, error): 220 | self.logger.debug('[%s] error: %s', conn, error) 221 | self._clear_responses(conn, error) 222 | self.on_error.send(self, error=error) 223 | 224 | def _create_backoff(self): 225 | return BackoffTimer(max_interval=self.max_backoff_duration) 226 | 227 | def _clear_responses(self, conn, error): 228 | # All relevent errors are fatal 229 | for result in self._response_queues.pop(conn, []): 230 | result.set_exception(error) 231 | 232 | def _get_connection(self, block=True, timeout=None): 233 | if not self.is_running: 234 | raise NSQException('producer not running') 235 | 236 | while True: 237 | try: 238 | conn = self._connections.get(block=block, timeout=timeout) 239 | except Empty: 240 | raise NSQNoConnections 241 | 242 | if conn.is_connected: 243 | return conn 244 | 245 | # Discard closed connections 246 | 247 | def _put_connection(self, conn): 248 | if not self.is_running: 249 | return 250 | self._connections.put(conn) 251 | 252 | def publish(self, topic, data, defer=None, block=True, timeout=None, 253 | raise_error=True): 254 | """Publish a message to the given topic. 255 | 256 | :param topic: the topic to publish to 257 | 258 | :param data: bytestring data to publish 259 | 260 | :param defer: duration in milliseconds to defer before publishing 261 | (requires nsq 0.3.6) 262 | 263 | :param block: wait for a connection to become available before 264 | publishing the message. If block is `False` and no connections 265 | are available, :class:`~gnsq.errors.NSQNoConnections` is raised 266 | 267 | :param timeout: if timeout is a positive number, it blocks at most 268 | ``timeout`` seconds before raising 269 | :class:`~gnsq.errors.NSQNoConnections` 270 | 271 | :param raise_error: if ``True``, it blocks until a response is received 272 | from the nsqd server, and any error response is raised. Otherwise 273 | an :class:`~gevent.event.AsyncResult` is returned 274 | """ 275 | result = AsyncResult() 276 | conn = self._get_connection(block=block, timeout=timeout) 277 | 278 | try: 279 | self._response_queues[conn].append(result) 280 | conn.publish(topic, data, defer=defer) 281 | finally: 282 | self._put_connection(conn) 283 | 284 | if raise_error: 285 | return result.get() 286 | 287 | return result 288 | 289 | def multipublish(self, topic, messages, block=True, timeout=None, 290 | raise_error=True): 291 | """Publish an iterable of messages to the given topic. 292 | 293 | :param topic: the topic to publish to 294 | 295 | :param messages: iterable of bytestrings to publish 296 | 297 | :param block: wait for a connection to become available before 298 | publishing the message. If block is `False` and no connections 299 | are available, :class:`~gnsq.errors.NSQNoConnections` is raised 300 | 301 | :param timeout: if timeout is a positive number, it blocks at most 302 | ``timeout`` seconds before raising 303 | :class:`~gnsq.errors.NSQNoConnections` 304 | 305 | :param raise_error: if ``True``, it blocks until a response is received 306 | from the nsqd server, and any error response is raised. Otherwise 307 | an :class:`~gevent.event.AsyncResult` is returned 308 | """ 309 | result = AsyncResult() 310 | conn = self._get_connection(block=block, timeout=timeout) 311 | 312 | try: 313 | self._response_queues[conn].append(result) 314 | conn.multipublish(topic, messages) 315 | finally: 316 | self._put_connection(conn) 317 | 318 | if raise_error: 319 | return result.get() 320 | 321 | return result 322 | -------------------------------------------------------------------------------- /gnsq/protocol.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | 4 | import json 5 | import re 6 | import struct 7 | 8 | import six 9 | 10 | __all__ = [ 11 | 'MAGIC_V2', 12 | 'FRAME_TYPE_RESPONSE', 13 | 'FRAME_TYPE_ERROR', 14 | 'FRAME_TYPE_MESSAGE', 15 | 'unpack_size', 16 | 'unpack_response', 17 | 'unpack_message', 18 | 'subscribe', 19 | 'publish', 20 | 'ready', 21 | 'finish', 22 | 'requeue', 23 | 'close', 24 | 'nop', 25 | ] 26 | 27 | MAGIC_V2 = b' V2' 28 | NEWLINE = b'\n' 29 | SPACE = b' ' 30 | EMPTY = b'' 31 | HEARTBEAT = b'_heartbeat_' 32 | OK = b'OK' 33 | 34 | IDENTIFY = b'IDENTIFY' 35 | AUTH = b'AUTH' 36 | SUB = b'SUB' 37 | PUB = b'PUB' 38 | MPUB = b'MPUB' 39 | DPUB = b'DPUB' 40 | RDY = b'RDY' 41 | FIN = b'FIN' 42 | REQ = b'REQ' 43 | TOUCH = b'TOUCH' 44 | CLS = b'CLS' 45 | NOP = b'NOP' 46 | 47 | FRAME_TYPE_RESPONSE = 0 48 | FRAME_TYPE_ERROR = 1 49 | FRAME_TYPE_MESSAGE = 2 50 | 51 | 52 | # 53 | # Helpers 54 | # 55 | VALID_NAME_RE = re.compile(r'^[\.a-zA-Z0-9_-]+(#ephemeral)?$') 56 | 57 | 58 | def _valid_name(name): 59 | if not 0 < len(name) < 65: 60 | return False 61 | return bool(VALID_NAME_RE.match(name)) 62 | 63 | 64 | def valid_topic_name(topic): 65 | return _valid_name(topic) 66 | 67 | 68 | def valid_channel_name(channel): 69 | return _valid_name(channel) 70 | 71 | 72 | def assert_valid_topic_name(topic): 73 | if valid_topic_name(topic): 74 | return 75 | raise ValueError('invalid topic name') 76 | 77 | 78 | def assert_valid_channel_name(channel): 79 | if valid_channel_name(channel): 80 | return 81 | raise ValueError('invalid channel name') 82 | 83 | 84 | # 85 | # Responses 86 | # 87 | def unpack_size(data): 88 | return struct.unpack('>l', data)[0] 89 | 90 | 91 | def unpack_response(data): 92 | return unpack_size(data[:4]), data[4:] 93 | 94 | 95 | def unpack_message(data): 96 | timestamp = struct.unpack('>q', data[:8])[0] 97 | attempts = struct.unpack('>h', data[8:10])[0] 98 | message_id = data[10:26] 99 | body = data[26:] 100 | return timestamp, attempts, message_id, body 101 | 102 | 103 | # 104 | # Commands 105 | # 106 | def _packsize(data): 107 | return struct.pack('>l', len(data)) 108 | 109 | 110 | def _packbody(body): 111 | if body is None: 112 | return EMPTY 113 | if not isinstance(body, bytes): 114 | raise TypeError('message body must be a byte string') 115 | return _packsize(body) + body 116 | 117 | 118 | def _encode_param(data): 119 | if isinstance(data, bytes): 120 | return data 121 | return data.encode('utf-8') 122 | 123 | 124 | def _command(cmd, body, *params): 125 | params = tuple(_encode_param(p) for p in params) 126 | return EMPTY.join((SPACE.join((cmd,) + params), NEWLINE, _packbody(body))) 127 | 128 | 129 | def identify(data): 130 | return _command(IDENTIFY, six.b(json.dumps(data))) 131 | 132 | 133 | def auth(secret): 134 | return _command(AUTH, secret) 135 | 136 | 137 | def subscribe(topic_name, channel_name): 138 | assert_valid_topic_name(topic_name) 139 | assert_valid_channel_name(channel_name) 140 | return _command(SUB, None, topic_name, channel_name) 141 | 142 | 143 | def publish(topic_name, data): 144 | assert_valid_topic_name(topic_name) 145 | return _command(PUB, data, topic_name) 146 | 147 | 148 | def multipublish_body(messages): 149 | data = EMPTY.join(_packbody(m) for m in messages) 150 | return _packsize(messages) + data 151 | 152 | 153 | def multipublish(topic_name, messages): 154 | assert_valid_topic_name(topic_name) 155 | return _command(MPUB, multipublish_body(messages), topic_name) 156 | 157 | 158 | def deferpublish(topic_name, data, delay_ms): 159 | assert_valid_topic_name(topic_name) 160 | return _command(DPUB, data, topic_name, six.b('{}'.format(delay_ms))) 161 | 162 | 163 | def ready(count): 164 | if not isinstance(count, int): 165 | raise TypeError('ready count must be an integer') 166 | 167 | if count < 0: 168 | raise ValueError('ready count cannot be negative') 169 | 170 | return _command(RDY, None, six.b('{}'.format(count))) 171 | 172 | 173 | def finish(message_id): 174 | return _command(FIN, None, message_id) 175 | 176 | 177 | def requeue(message_id, timeout=0): 178 | if not isinstance(timeout, int): 179 | raise TypeError('requeue timeout must be an integer') 180 | return _command(REQ, None, message_id, six.b('{}'.format(timeout))) 181 | 182 | 183 | def touch(message_id): 184 | return _command(TOUCH, None, message_id) 185 | 186 | 187 | def close(): 188 | return _command(CLS, None) 189 | 190 | 191 | def nop(): 192 | return _command(NOP, None) 193 | -------------------------------------------------------------------------------- /gnsq/reader.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, division 3 | 4 | import random 5 | from multiprocessing import cpu_count 6 | 7 | from gevent.queue import Queue 8 | 9 | from .consumer import Consumer 10 | from .decorators import deprecated 11 | from .errors import NSQNoConnections 12 | from .states import INIT 13 | 14 | 15 | class Reader(Consumer): 16 | """Use :class:`~gnsq.Consumer` instead. 17 | 18 | .. deprecated:: 1.0.0 19 | """ 20 | @deprecated 21 | def __init__(self, *args, **kwargs): 22 | """Use :class:`~gnsq.Consumer` instead. 23 | 24 | .. deprecated:: 1.0.0 25 | """ 26 | setattr(self, 'async', kwargs.pop('async', False)) 27 | 28 | max_concurrency = kwargs.pop('max_concurrency', 0) 29 | 30 | if max_concurrency < 0: 31 | self.max_concurrency = cpu_count() 32 | else: 33 | self.max_concurrency = max_concurrency 34 | 35 | if self.max_concurrency: 36 | self.queue = Queue() 37 | else: 38 | self.queue = None 39 | 40 | super(Reader, self).__init__(*args, **kwargs) 41 | 42 | def start(self, *args, **kwargs): 43 | if self._state == INIT: 44 | for _ in range(self.max_concurrency): 45 | self._killables.add(self._workers.spawn(self._run)) 46 | 47 | return super(Reader, self).start(*args, **kwargs) 48 | 49 | def handle_message(self, conn, message): 50 | if self.max_concurrency: 51 | self.logger.debug('[%s] queueing message: %s' % (conn, message.id)) 52 | self.queue.put((conn, message)) 53 | else: 54 | super(Reader, self).handle_message(conn, message) 55 | 56 | @deprecated 57 | def publish(self, topic, message): 58 | """Use :class:`~gnsq.Producer` instead. 59 | 60 | .. deprecated:: 1.0.0 61 | """ 62 | if not self.connections: 63 | raise NSQNoConnections() 64 | conn = random.choice(list(self.connections)) 65 | conn.publish(topic, message) 66 | 67 | def _handle_message(self, message): 68 | if getattr(self, 'async'): 69 | message.enable_async() 70 | return super(Reader, self)._handle_message(message) 71 | 72 | def _run(self): 73 | for conn, message in self.queue: 74 | if not conn.is_connected: 75 | continue 76 | super(Reader, self).handle_message(conn, message) 77 | -------------------------------------------------------------------------------- /gnsq/states.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Connection states.""" 3 | 4 | INIT = 0 5 | CONNECTED = 1 6 | DISCONNECTED = 2 7 | RUNNING = 3 8 | BACKOFF = 4 9 | THROTTLED = 5 10 | CLOSED = 6 11 | -------------------------------------------------------------------------------- /gnsq/stream/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | from .stream import Stream 4 | 5 | 6 | __all__ = [ 7 | 'Stream' 8 | ] 9 | -------------------------------------------------------------------------------- /gnsq/stream/compression.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | from errno import EWOULDBLOCK 4 | from gnsq.errors import NSQSocketError 5 | 6 | 7 | class CompressionSocket(object): 8 | def __init__(self, socket): 9 | self._socket = socket 10 | self._bootstrapped = None 11 | 12 | def __getattr__(self, name): 13 | return getattr(self._socket, name) 14 | 15 | def bootstrap(self, data): 16 | if not data: 17 | return 18 | self._bootstrapped = self.decompress(data) 19 | 20 | def recv(self, size): 21 | if self._bootstrapped: 22 | data = self._bootstrapped 23 | self._bootstrapped = None 24 | return data 25 | 26 | chunk = self._socket.recv(size) 27 | if not chunk: 28 | return chunk 29 | 30 | uncompressed = self.decompress(chunk) 31 | if not uncompressed: 32 | raise NSQSocketError(EWOULDBLOCK, 'Operation would block') 33 | 34 | return uncompressed 35 | 36 | def sendall(self, data): 37 | self._socket.sendall(self.compress(data)) 38 | -------------------------------------------------------------------------------- /gnsq/stream/defalte.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | import zlib 4 | from .compression import CompressionSocket 5 | 6 | 7 | class DefalteSocket(CompressionSocket): 8 | def __init__(self, socket, level): 9 | wbits = -zlib.MAX_WBITS 10 | self._decompressor = zlib.decompressobj(wbits) 11 | self._compressor = zlib.compressobj(level, zlib.DEFLATED, wbits) 12 | super(DefalteSocket, self).__init__(socket) 13 | 14 | def compress(self, data): 15 | data = self._compressor.compress(data) 16 | return data + self._compressor.flush(zlib.Z_SYNC_FLUSH) 17 | 18 | def decompress(self, data): 19 | return self._decompressor.decompress(data) 20 | 21 | def close(self): 22 | self._socket.sendall(self._compressor.flush(zlib.Z_FINISH)) 23 | self._socket.close() 24 | -------------------------------------------------------------------------------- /gnsq/stream/snappy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | import snappy 4 | from .compression import CompressionSocket 5 | 6 | 7 | class SnappySocket(CompressionSocket): 8 | def __init__(self, socket): 9 | self._decompressor = snappy.StreamDecompressor() 10 | self._compressor = snappy.StreamCompressor() 11 | super(SnappySocket, self).__init__(socket) 12 | 13 | def compress(self, data): 14 | return self._compressor.add_chunk(data, compress=True) 15 | 16 | def decompress(self, data): 17 | return self._decompressor.decompress(data) 18 | -------------------------------------------------------------------------------- /gnsq/stream/stream.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | 4 | from mmap import PAGESIZE 5 | from errno import ENOTCONN, EDEADLK, EAGAIN, EWOULDBLOCK 6 | 7 | import six 8 | 9 | import gevent 10 | from gevent import socket 11 | from gevent.lock import Semaphore 12 | from gevent.ssl import SSLSocket, PROTOCOL_TLSv1_2, CERT_NONE 13 | 14 | from gnsq.errors import NSQSocketError 15 | 16 | try: 17 | from .snappy import SnappySocket 18 | except ImportError: 19 | SnappySocket = None # pyflakes.ignore 20 | 21 | from .defalte import DefalteSocket 22 | 23 | 24 | class Stream(object): 25 | def __init__(self, address, port, timeout, buffer_size=PAGESIZE, 26 | lock_class=Semaphore): 27 | self.address = address 28 | self.port = port 29 | self.timeout = timeout 30 | 31 | self.buffer = b'' 32 | self.buffer_size = buffer_size 33 | 34 | self.socket = None 35 | self.lock = lock_class() 36 | 37 | @property 38 | def is_connected(self): 39 | return self.socket is not None 40 | 41 | def ensure_connection(self): 42 | if self.is_connected: 43 | return 44 | raise NSQSocketError(ENOTCONN, 'Socket is not connected') 45 | 46 | def connect(self): 47 | if self.is_connected: 48 | return 49 | 50 | try: 51 | self.socket = socket.create_connection( 52 | address=(self.address, self.port), 53 | timeout=self.timeout, 54 | ) 55 | 56 | except socket.error as error: 57 | six.raise_from(NSQSocketError(*error.args), error) 58 | 59 | def read(self, size): 60 | while len(self.buffer) < size: 61 | self.ensure_connection() 62 | 63 | try: 64 | packet = self.socket.recv(self.buffer_size) 65 | except socket.error as error: 66 | if error.errno in (EDEADLK, EAGAIN, EWOULDBLOCK): 67 | gevent.sleep() 68 | continue 69 | six.raise_from(NSQSocketError(*error.args), error) 70 | 71 | if not packet: 72 | self.close() 73 | 74 | self.buffer += packet 75 | 76 | data = self.buffer[:size] 77 | self.buffer = self.buffer[size:] 78 | 79 | return data 80 | 81 | def send(self, data): 82 | self.ensure_connection() 83 | 84 | with self.lock: 85 | try: 86 | return self.socket.sendall(data) 87 | except socket.error as error: 88 | six.raise_from(NSQSocketError(*error.args), error) 89 | 90 | def consume_buffer(self): 91 | data = self.buffer 92 | self.buffer = b'' 93 | return data 94 | 95 | def close(self): 96 | if not self.is_connected: 97 | return 98 | 99 | socket = self.socket 100 | self.socket = None 101 | self.buffer = b'' 102 | 103 | socket.close() 104 | 105 | def upgrade_to_tls( 106 | self, 107 | keyfile=None, 108 | certfile=None, 109 | cert_reqs=CERT_NONE, 110 | ca_certs=None, 111 | ssl_version=PROTOCOL_TLSv1_2 112 | ): 113 | self.ensure_connection() 114 | 115 | try: 116 | self.socket = SSLSocket( 117 | self.socket, 118 | keyfile=keyfile, 119 | certfile=certfile, 120 | cert_reqs=cert_reqs, 121 | ca_certs=ca_certs, 122 | ssl_version=ssl_version, 123 | ) 124 | except socket.error as error: 125 | six.raise_from(NSQSocketError(*error.args), error) 126 | 127 | def upgrade_to_snappy(self): 128 | if SnappySocket is None: 129 | raise RuntimeError('snappy requires the python-snappy package') 130 | 131 | self.ensure_connection() 132 | self.socket = SnappySocket(self.socket) 133 | self.socket.bootstrap(self.consume_buffer()) 134 | 135 | def upgrade_to_defalte(self, level): 136 | self.ensure_connection() 137 | self.socket = DefalteSocket(self.socket, level) 138 | self.socket.bootstrap(self.consume_buffer()) 139 | -------------------------------------------------------------------------------- /gnsq/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import six 4 | 5 | from .lookupd import LookupdClient 6 | 7 | 8 | def normalize_nsqd_address(address): 9 | if not isinstance(address, six.string_types): 10 | raise TypeError('nsqd address must be a string') 11 | 12 | host, _, port = address.partition(':') 13 | if port: 14 | try: 15 | port = int(port, 10) 16 | except ValueError: 17 | raise ValueError('invalid nsqd port') 18 | 19 | else: 20 | port = 4150 21 | 22 | return '{host}:{port}'.format(host=host, port=port) 23 | 24 | 25 | def parse_nsqds(nsqd_tcp_addresses): 26 | if isinstance(nsqd_tcp_addresses, six.string_types): 27 | return set([normalize_nsqd_address(nsqd_tcp_addresses)]) 28 | 29 | elif isinstance(nsqd_tcp_addresses, (list, tuple, set)): 30 | return set(normalize_nsqd_address(addr) for addr in nsqd_tcp_addresses) 31 | 32 | raise TypeError('nsqd_tcp_addresses must be a list, set or tuple') 33 | 34 | 35 | def parse_lookupds(lookupd_http_addresses): 36 | if isinstance(lookupd_http_addresses, six.string_types): 37 | return [LookupdClient.from_url(lookupd_http_addresses)] 38 | 39 | if not isinstance(lookupd_http_addresses, (list, tuple)): 40 | msg = 'lookupd_http_addresses must be a list, set or tuple' 41 | raise TypeError(msg) 42 | 43 | lookupd = [LookupdClient.from_url(a) for a in lookupd_http_addresses] 44 | random.shuffle(lookupd) 45 | 46 | return lookupd 47 | -------------------------------------------------------------------------------- /gnsq/version.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # also update in setup.py 3 | __version__ = '1.0.2' 4 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | # Install gnsq itself 2 | -e . 3 | 4 | # Install our test and docs requirements 5 | -r requirements.docs.txt 6 | -r requirements.test.txt 7 | 8 | # Install our development requirements 9 | flake8 10 | tox 11 | twine 12 | pip>=10.0.1 13 | wheel>=0.31.1 14 | -------------------------------------------------------------------------------- /requirements.docs.txt: -------------------------------------------------------------------------------- 1 | doc8 2 | sphinx 3 | sphinx_rtd_theme 4 | -------------------------------------------------------------------------------- /requirements.test.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-cov 3 | pytest-timeout 4 | python-snappy 5 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal = 1 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from setuptools import setup 4 | 5 | 6 | with open('README.rst') as readme_file: 7 | readme = readme_file.read() 8 | 9 | 10 | setup( 11 | name='gnsq', 12 | version='1.0.2', 13 | description='A gevent based python client for NSQ.', 14 | long_description=readme, 15 | long_description_content_type='text/x-rst', 16 | author='Trevor Olson', 17 | author_email='trevor@heytrevor.com', 18 | url='https://github.com/wtolson/gnsq', 19 | packages=[ 20 | 'gnsq', 21 | 'gnsq.contrib', 22 | 'gnsq.stream', 23 | ], 24 | package_dir={'gnsq': 'gnsq'}, 25 | include_package_data=True, 26 | install_requires=[ 27 | 'blinker', 28 | 'gevent', 29 | 'six', 30 | 'urllib3', 31 | ], 32 | extras_require={ 33 | 'snappy': ['python-snappy'], 34 | }, 35 | license="BSD", 36 | zip_safe=False, 37 | keywords='gnsq', 38 | classifiers=[ 39 | 'Development Status :: 5 - Production/Stable', 40 | 'Intended Audience :: Developers', 41 | 'License :: OSI Approved :: BSD License', 42 | 'Natural Language :: English', 43 | "Programming Language :: Python :: 2", 44 | 'Programming Language :: Python :: 2.7', 45 | 'Programming Language :: Python :: 3', 46 | 'Programming Language :: Python :: 3.4', 47 | 'Programming Language :: Python :: 3.5', 48 | 'Programming Language :: Python :: 3.6', 49 | 'Programming Language :: Python :: 3.7', 50 | 'Programming Language :: Python :: 3.8', 51 | ] 52 | ) 53 | -------------------------------------------------------------------------------- /tests/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEbjCCA1agAwIBAgIJAK6x7y6AwBmLMA0GCSqGSIb3DQEBBQUAMIGAMQswCQYD 3 | VQQGEwJVUzERMA8GA1UECBMITmV3IFlvcmsxFjAUBgNVBAcTDU5ldyBZb3JrIENp 4 | dHkxDDAKBgNVBAoTA05TUTETMBEGA1UEAxMKdGVzdC5sb2NhbDEjMCEGCSqGSIb3 5 | DQEJARYUbXJlaWZlcnNvbkBnbWFpbC5jb20wHhcNMTMwNjI4MDA0MzQ4WhcNMTYw 6 | NDE3MDA0MzQ4WjCBgDELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE5ldyBZb3JrMRYw 7 | FAYDVQQHEw1OZXcgWW9yayBDaXR5MQwwCgYDVQQKEwNOU1ExEzARBgNVBAMTCnRl 8 | c3QubG9jYWwxIzAhBgkqhkiG9w0BCQEWFG1yZWlmZXJzb25AZ21haWwuY29tMIIB 9 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnX0KB+svwy+yHU2qggz/EaGg 10 | craKShagKo+9M9y5HLM852ngk5c+t+tJJbx3N954Wr1FXBuGIv1ltU05rU4zhvBS 11 | 25tVP1UIEnT5pBt2TeetLkl199Y7fxh1hKmnwJMG3fy3VZdNXEndBombXMmtXpQY 12 | shuEJHKeUNDbQKz5X+GjEdkTPO/HY/VMHsxS23pbSimQozMg3hvLIdgv0aS3QECz 13 | ydZBgTPThy3uDtHIuCpxCwXd/vDF68ATlYgo3h3lh2vxNwM/pjklIUhzMh4XaKQF 14 | 7m3/0KbtUcXfy0QHueeuMr11E9MAFNyRN4xf9Fk1yB97KJ3PJBTC5WD/m1nW+QID 15 | AQABo4HoMIHlMB0GA1UdDgQWBBR3HMBws4lmYYSIgwoZsfW+bbgaMjCBtQYDVR0j 16 | BIGtMIGqgBR3HMBws4lmYYSIgwoZsfW+bbgaMqGBhqSBgzCBgDELMAkGA1UEBhMC 17 | VVMxETAPBgNVBAgTCE5ldyBZb3JrMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MQww 18 | CgYDVQQKEwNOU1ExEzARBgNVBAMTCnRlc3QubG9jYWwxIzAhBgkqhkiG9w0BCQEW 19 | FG1yZWlmZXJzb25AZ21haWwuY29tggkArrHvLoDAGYswDAYDVR0TBAUwAwEB/zAN 20 | BgkqhkiG9w0BAQUFAAOCAQEANOYTbanW2iyV1v4oYpcM/y3TWcQKzSME8D2SGFZb 21 | dbMYU81hH3TTlQdvyeh3FAcdjhKE8Xi/RfNNjEslTBscdKXePGpZg6eXRNJzPP5K 22 | KZPf5u6tcpAeUOKrMqbGwbE+h2QixxG1EoVQtE421szsU2P7nHRTdHzKFRnOerfl 23 | Phm3NocR0P40Rv7WKdxpOvqc+XKf0onTruoVYoPWGpwcLixCG0zu4ZQ23/L/Dy18 24 | 4u70Hbq6O/6kq9FBFaDNp3IhiEdu2Cq6ZplU6bL9XDF27KIEErHwtuqBHVlMG+zB 25 | oH/k9vZvwH7OwAjHdKp+1yeZFLYC8K5hjFIHqcdwpZCNIg== 26 | -----END CERTIFICATE----- 27 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | def pytest_addoption(parser): 5 | parser.addoption( 6 | '--fast', 7 | action='store_true', 8 | help='do not run slow tests' 9 | ) 10 | 11 | 12 | def pytest_runtest_setup(item): 13 | if 'slow' in item.keywords and item.config.getoption('--fast'): 14 | pytest.skip('skiping because of --fast') 15 | -------------------------------------------------------------------------------- /tests/install-nsq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$NSQ_VERSION" ]; then 4 | exit 0 5 | fi 6 | 7 | 8 | MIRROR="https://s3.amazonaws.com/bitly-downloads/nsq" 9 | FILENAME="nsq-$NSQ_VERSION.linux-amd64.go$GO_VERSION.tar.gz" 10 | 11 | curl "$MIRROR/$FILENAME" | sudo tar --strip-components 1 -C /usr/local -zxvf - 12 | -------------------------------------------------------------------------------- /tests/integration_server.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import io 3 | import os 4 | import random 5 | import re 6 | import shutil 7 | import subprocess 8 | import sys 9 | import tempfile 10 | 11 | 12 | class BaseIntegrationServer(object): 13 | protocols = ('TCP', 'HTTP') 14 | 15 | protocol_re = re.compile(' '.join([ 16 | r'(?P[A-Z]+):', 17 | r'listening on', 18 | r'(?P
(?:[0-9]{1,3}\.){3}[0-9]{1,3}):(?P[0-9]+)', 19 | ])) 20 | 21 | version_re = re.compile(' '.join([ 22 | r'(?P[a-z]+)', 23 | r'v(?P[0-9]+\.[0-9]+\.[0-9]+)(?:-[a-z]+)?', 24 | r'\(built w\/(?P[a-z0-9.-]+)\)' 25 | ])) 26 | 27 | def __init__(self, address='127.0.0.1'): 28 | self.address = address 29 | self.protocol_ports = {} 30 | self.data_path = tempfile.mkdtemp() 31 | self.name, self.version, self.go_version = self._parse_version() 32 | 33 | def _parse_version(self): 34 | output = subprocess.check_output([self.executable, '--version']) 35 | match = self.version_re.match(output.decode('utf-8')) 36 | name = match.group('name') 37 | version = tuple(int(v) for v in match.group('version').split('.')) 38 | go_version = match.group('go_version') 39 | return name, version, go_version 40 | 41 | @property 42 | def tcp_port(self): 43 | return self.protocol_ports['TCP'] 44 | 45 | @property 46 | def tcp_address(self): 47 | return '%s:%d' % (self.address, self.tcp_port) 48 | 49 | @property 50 | def http_port(self): 51 | return self.protocol_ports['HTTP'] 52 | 53 | @property 54 | def http_address(self): 55 | return '%s:%d' % (self.address, self.http_port) 56 | 57 | def _random_port(self): 58 | if self.version < (0, 3, 5): 59 | return random.randint(10000, 65535) 60 | return 0 61 | 62 | def _random_address(self): 63 | return '%s:%d' % (self.address, self._random_port()) 64 | 65 | def _parse_protocol_ports(self): 66 | while len(self.protocol_ports) < len(self.protocols): 67 | line = self.child.stderr.readline() 68 | sys.stderr.write(line) 69 | 70 | if not line: 71 | raise Exception('server exited prematurely') 72 | 73 | if 'listening on' not in line: 74 | continue 75 | 76 | match = self.protocol_re.search(line) 77 | if not match: 78 | raise Exception('unexpected line: %r' % line) 79 | 80 | protocol = match.group('protocol') 81 | if protocol not in self.protocols: 82 | continue 83 | 84 | port = int(match.group('port'), 10) 85 | self.protocol_ports[protocol] = port 86 | 87 | def __enter__(self): 88 | sys.stderr.write('running: %s\n' % ' '.join(self.cmd)) 89 | self.child = subprocess.Popen(self.cmd, stderr=subprocess.PIPE) 90 | if sys.version_info[0] == 3: 91 | self.child.stderr = io.TextIOWrapper(self.child.stderr, 'utf-8') 92 | self._parse_protocol_ports() 93 | return self 94 | 95 | def __exit__(self, exc_type, exc_value, traceback): 96 | try: 97 | self.child.terminate() 98 | except OSError as error: 99 | if error.errno == errno.ESRCH: 100 | return 101 | raise 102 | 103 | while True: 104 | line = self.child.stderr.readline() 105 | if not line: 106 | break 107 | sys.stderr.write(line) 108 | 109 | self.child.wait() 110 | shutil.rmtree(self.data_path) 111 | 112 | 113 | class NsqdIntegrationServer(BaseIntegrationServer): 114 | executable = 'nsqd' 115 | 116 | tls_cert = os.path.join(os.path.dirname(__file__), 'cert.pem') 117 | tls_key = os.path.join(os.path.dirname(__file__), 'key.pem') 118 | 119 | def __init__(self, lookupd=None, **kwargs): 120 | super(NsqdIntegrationServer, self).__init__(**kwargs) 121 | 122 | if self.has_https(): 123 | self.protocols = ('TCP', 'HTTP', 'HTTPS') 124 | 125 | self.lookupd = lookupd 126 | 127 | def has_https(self): 128 | return self.version >= (0, 2, 28) 129 | 130 | @property 131 | def https_port(self): 132 | return self.protocol_ports['HTTPS'] 133 | 134 | @property 135 | def https_address(self): 136 | return '%s:%d' % (self.address, self.https_port) 137 | 138 | @property 139 | def cmd(self): 140 | cmd = [ 141 | self.executable, 142 | '--broadcast-address', self.address, 143 | '--tcp-address', self._random_address(), 144 | '--http-address', self._random_address(), 145 | '--data-path', self.data_path, 146 | '--tls-cert', self.tls_cert, 147 | '--tls-key', self.tls_key, 148 | ] 149 | 150 | if self.has_https(): 151 | cmd.extend(['--https-address', self._random_address()]) 152 | 153 | if self.lookupd: 154 | cmd.extend(['--lookupd-tcp-address', self.lookupd]) 155 | 156 | return cmd 157 | 158 | 159 | class LookupdIntegrationServer(BaseIntegrationServer): 160 | executable = 'nsqlookupd' 161 | 162 | @property 163 | def cmd(self): 164 | return [ 165 | self.executable, 166 | '--broadcast-address', self.address, 167 | '--tcp-address', self._random_address(), 168 | '--http-address', self._random_address(), 169 | ] 170 | -------------------------------------------------------------------------------- /tests/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAnX0KB+svwy+yHU2qggz/EaGgcraKShagKo+9M9y5HLM852ng 3 | k5c+t+tJJbx3N954Wr1FXBuGIv1ltU05rU4zhvBS25tVP1UIEnT5pBt2TeetLkl1 4 | 99Y7fxh1hKmnwJMG3fy3VZdNXEndBombXMmtXpQYshuEJHKeUNDbQKz5X+GjEdkT 5 | PO/HY/VMHsxS23pbSimQozMg3hvLIdgv0aS3QECzydZBgTPThy3uDtHIuCpxCwXd 6 | /vDF68ATlYgo3h3lh2vxNwM/pjklIUhzMh4XaKQF7m3/0KbtUcXfy0QHueeuMr11 7 | E9MAFNyRN4xf9Fk1yB97KJ3PJBTC5WD/m1nW+QIDAQABAoIBACvtfKbIywG+hAf4 8 | ad7skRjx5DcbA2e29+XnQfb9UgTXWd2SgrmoLi5OypBkCTzkKN3mfTo70yZfV8dC 9 | Sxwz+9tfnTz0DssjhKThS+CiaFVCkeOfSfBfKSlCQUVHrSrh18CDhP+yvDlJwQTZ 10 | zSQMfPcsh9bmJe2kqtQP7ZgUp1o+vaB8Sju8YYrO6FllxbdLRGm4pfvvrHIRRmXa 11 | oVHn0ei0JpwoTY9kHYht4LNeJnbP/MCWdmcuv3Gnel7jAlhaKab5aNIGr0Xe7aIQ 12 | iX6mpZ0/Rnt8o/XcTOg8l3ruIdVuySX6SYn08JMnfFkXdNYRVhoV1tC5ElWkaZLf 13 | hPmj2yECgYEAyts0R0b8cZ6HTAyuLm3ilw0s0v0/MM9ZtaqMRilr2WEtAhF0GpHG 14 | TzmGnii0WcTNXD7NTsNcECR/0ZpXPRleMczsL2Juwd4FkQ37h7hdKPseJNrfyHRg 15 | VolOFBX9H14C3wMB9cwdsG4Egw7fE27WCoreEquHgwFxl1zBrXKH088CgYEAxr8w 16 | BKZs0bF7LRrFT5pH8hpMLYHMYk8ZIOfgmEGVBKDQCOERPR9a9kqUss7wl/98LVNK 17 | RnFlyWD6Z0/QcQsLL4LjBeZJ25qEMc6JXm9VGAzhXA1ZkUofVoYCnG+f6KUn8CuJ 18 | /AcV2ZDFsEP10IiQG0hKsceXiwFEvEr8306tMrcCgYBLgnscSR0xAeyk71dq6vZc 19 | ecgEpcX+2kAvclOSzlpZ6WVCjtKkDT0/Qk+M0eQIQkybGLl9pxS+4Yc+s2/jy2yX 20 | pwsHvGE0AvwZeZX2eDcdSRR4bYy9ZixyKdwJeAHnyivRbaIuJ5Opl9pQGpoI9snv 21 | 1K9DTdw8dK4exKVHdgl/WwKBgDkmLsuXg4EEtPOyV/xc08VVNIR9Z2T5c7NXmeiO 22 | KyiKiWeUOF3ID2L07S9BfENozq9F3PzGjMtMXJSqibiHwW6nB1rh7mj8VHjx9+Q0 23 | xVZGFeNfX1r84mgB3uxW2LeQDhzsmB/lda37CC14TU3qhu2hawEV8IijE73FHlOk 24 | Dv+fAoGAI4/XO5o5tNn5Djo8gHmGMCbinUE9+VySxl7wd7PK8w2VSofO88ofixDk 25 | NX94yBYhg5WZcLdPm45RyUnq+WVQYz9IKUrdxLFTH+wxyzUqZCW7jgXCvWV+071q 26 | vqm9C+kndq+18/1VKuCSGWnF7Ay4lbsgPXY2s4VKRxcb3QpZSPU= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /tests/mock_server.py: -------------------------------------------------------------------------------- 1 | from gevent.event import AsyncResult 2 | from gevent.server import StreamServer 3 | 4 | 5 | class mock_server(object): 6 | def __init__(self, handler): 7 | self.handler = handler 8 | self.result = AsyncResult() 9 | self.server = StreamServer(('127.0.0.1', 0), self) 10 | 11 | def __call__(self, socket, address): 12 | try: 13 | self.result.set(self.handler(socket, address)) 14 | except Exception as error: 15 | self.result.set_exception(error) 16 | finally: 17 | socket.close() 18 | 19 | def __enter__(self): 20 | self.server.start() 21 | return self.server 22 | 23 | def __exit__(self, exc_type, exc_value, traceback): 24 | if exc_type is None: 25 | self.result.get() 26 | self.server.stop() 27 | -------------------------------------------------------------------------------- /tests/test_basic.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from six.moves import range 4 | from gnsq import BackoffTimer 5 | from gnsq import protocol as nsq 6 | 7 | 8 | @pytest.mark.parametrize('name,good', [ 9 | ('valid_name', True), 10 | ('invalid name with space', False), 11 | ('invalid_name_due_to_length_this_is_' + (4 * 'really_') + 'long', False), 12 | ('test-with_period.', True), 13 | ('test#ephemeral', True), 14 | ('test:ephemeral', False), 15 | ]) 16 | def test_topic_names(name, good): 17 | assert nsq.valid_topic_name(name) == good 18 | 19 | 20 | @pytest.mark.parametrize('name,good', [ 21 | ('test', True), 22 | ('test-with_period.', True), 23 | ('test#ephemeral', True), 24 | ('invalid_name_due_to_length_this_is_' + (4 * 'really_') + 'long', False), 25 | ('invalid name with space', False), 26 | ]) 27 | def test_channel_names(name, good): 28 | assert nsq.valid_channel_name(name) == good 29 | 30 | 31 | def test_assert_topic(): 32 | assert nsq.assert_valid_topic_name('topic') is None 33 | 34 | with pytest.raises(ValueError): 35 | nsq.assert_valid_topic_name('invalid name with space') 36 | 37 | 38 | def test_assert_channel(): 39 | assert nsq.assert_valid_channel_name('channel') is None 40 | 41 | with pytest.raises(ValueError): 42 | nsq.assert_valid_channel_name('invalid name with space') 43 | 44 | 45 | def test_invalid_commands(): 46 | with pytest.raises(TypeError): 47 | nsq.requeue('1234', None) 48 | 49 | with pytest.raises(TypeError): 50 | nsq.ready(None) 51 | 52 | with pytest.raises(ValueError): 53 | nsq.ready(-1) 54 | 55 | 56 | def test_backoff_timer(): 57 | timer = BackoffTimer(max_interval=1000) 58 | assert timer.get_interval() == 0 59 | assert timer.is_reset() 60 | 61 | timer.success() 62 | assert timer.get_interval() == 0 63 | assert timer.is_reset() 64 | 65 | timer.failure() 66 | assert timer.c == 1 67 | assert not timer.is_reset() 68 | 69 | for _ in range(100): 70 | interval = timer.get_interval() 71 | assert interval > 0 and interval < 2 72 | 73 | timer.failure() 74 | assert timer.c == 2 75 | 76 | for _ in range(100): 77 | interval = timer.get_interval() 78 | assert interval > 0 and interval < 4 79 | 80 | timer.success().success() 81 | assert timer.get_interval() == 0 82 | assert timer.is_reset() 83 | 84 | for _ in range(100): 85 | timer.failure() 86 | 87 | assert timer.c == 100 88 | assert timer.get_interval() <= 1000 89 | 90 | timer.reset() 91 | assert timer.c == 0 92 | assert timer.get_interval() == 0 93 | 94 | timer = BackoffTimer(min_interval=1000) 95 | assert timer.c == 0 96 | assert timer.get_interval() == 1000 97 | -------------------------------------------------------------------------------- /tests/test_command.py: -------------------------------------------------------------------------------- 1 | import json 2 | import struct 3 | 4 | import pytest 5 | import six 6 | 7 | from gnsq import protocol as nsq 8 | 9 | 10 | IDENTIFY_DICT_ASCII = {'a': 1, 'b': 2} 11 | IDENTIFY_DICT_UNICODE = {'c': u'w\xc3\xa5\xe2\x80\xa0'} 12 | IDENTIFY_BODY_ASCII = six.b(json.dumps(IDENTIFY_DICT_ASCII)) 13 | IDENTIFY_BODY_UNICODE = six.b(json.dumps(IDENTIFY_DICT_UNICODE)) 14 | 15 | MSGS = [b'asdf', b'ghjk', b'abcd'] 16 | MPUB_BODY = struct.pack('>l', len(MSGS)) + b''.join(struct.pack('>l', len(m)) + m for m in MSGS) 17 | 18 | 19 | @pytest.mark.parametrize('cmd_method,kwargs,result', [ 20 | ('identify', 21 | {'data': IDENTIFY_DICT_ASCII}, 22 | b'IDENTIFY\n' + struct.pack('>l', len(IDENTIFY_BODY_ASCII)) + 23 | IDENTIFY_BODY_ASCII), 24 | ('identify', 25 | {'data': IDENTIFY_DICT_UNICODE}, 26 | b'IDENTIFY\n' + struct.pack('>l', len(IDENTIFY_BODY_UNICODE)) + 27 | IDENTIFY_BODY_UNICODE), 28 | ('auth', 29 | {'secret': b'secret'}, 30 | b'AUTH\n' + struct.pack('>l', 6) + b'secret'), 31 | ('subscribe', 32 | {'topic_name': 'test_topic', 'channel_name': 'test_channel'}, 33 | b'SUB test_topic test_channel\n'), 34 | ('finish', 35 | {'message_id': 'test'}, 36 | b'FIN test\n'), 37 | ('finish', 38 | {'message_id': u'\u2020est \xfcn\xee\xe7\xf8\u2202\xe9'}, 39 | b'FIN \xe2\x80\xa0est \xc3\xbcn\xc3\xae\xc3\xa7\xc3\xb8\xe2\x88\x82\xc3\xa9\n'), 40 | ('requeue', 41 | {'message_id': 'test'}, 42 | b'REQ test 0\n'), 43 | ('requeue', 44 | {'message_id': 'test', 'timeout': 60}, 45 | b'REQ test 60\n'), 46 | ('touch', 47 | {'message_id': 'test'}, 48 | b'TOUCH test\n'), 49 | ('ready', 50 | {'count': 100}, 51 | b'RDY 100\n'), 52 | ('nop', 53 | {}, 54 | b'NOP\n'), 55 | ('publish', 56 | {'topic_name': 'test', 'data': MSGS[0]}, 57 | b'PUB test\n' + struct.pack('>l', len(MSGS[0])) + MSGS[0]), 58 | ('multipublish', 59 | {'topic_name': 'test', 'messages': MSGS}, 60 | b'MPUB test\n' + struct.pack('>l', len(MPUB_BODY)) + MPUB_BODY), 61 | ('deferpublish', 62 | {'topic_name': 'test', 'data': MSGS[0], 'delay_ms': 42}, 63 | b'DPUB test 42\n' + struct.pack('>l', len(MSGS[0])) + MSGS[0]), 64 | ]) 65 | def test_command(cmd_method, kwargs, result): 66 | assert getattr(nsq, cmd_method)(**kwargs) == result 67 | 68 | 69 | def test_unicode_body(): 70 | pytest.raises(TypeError, nsq.publish, 'topic', u'unicode body') 71 | -------------------------------------------------------------------------------- /tests/test_consumer.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, with_statement 2 | 3 | import os 4 | 5 | import pytest 6 | import gevent 7 | 8 | from gnsq import NsqdHTTPClient, Consumer, states 9 | from gnsq.errors import NSQSocketError 10 | 11 | from integration_server import LookupdIntegrationServer, NsqdIntegrationServer 12 | 13 | 14 | SLOW_TIMEOUT = int(os.environ.get('SLOW_TIMEOUT', '10'), 10) 15 | 16 | 17 | def test_basic(): 18 | with pytest.raises(ValueError): 19 | Consumer('test', 'test') 20 | 21 | with pytest.raises(TypeError): 22 | Consumer( 23 | topic='test', 24 | channel='test', 25 | nsqd_tcp_addresses=None, 26 | lookupd_http_addresses='http://localhost:4161/', 27 | ) 28 | 29 | with pytest.raises(TypeError): 30 | Consumer( 31 | topic='test', 32 | channel='test', 33 | nsqd_tcp_addresses='localhost:4150', 34 | lookupd_http_addresses=None, 35 | ) 36 | 37 | def message_handler(consumer, message): 38 | pass 39 | 40 | consumer = Consumer( 41 | topic='test', 42 | channel='test', 43 | name='test', 44 | nsqd_tcp_addresses='localhost:4150', 45 | lookupd_http_addresses='http://localhost:4161/', 46 | message_handler=message_handler 47 | ) 48 | 49 | assert consumer.name == 'test' 50 | assert len(consumer.on_message.receivers) == 1 51 | 52 | assert isinstance(consumer.nsqd_tcp_addresses, set) 53 | assert len(consumer.nsqd_tcp_addresses) == 1 54 | 55 | assert isinstance(consumer.lookupds, list) 56 | assert len(consumer.lookupds) == 1 57 | 58 | 59 | @pytest.mark.slow 60 | @pytest.mark.timeout(SLOW_TIMEOUT) 61 | def test_messages(): 62 | with NsqdIntegrationServer() as server: 63 | 64 | class Accounting(object): 65 | count = 0 66 | total = 500 67 | error = None 68 | 69 | conn = NsqdHTTPClient(server.address, server.http_port) 70 | for _ in range(Accounting.total): 71 | conn.publish('test', b'danger zone!') 72 | 73 | consumer = Consumer( 74 | topic='test', 75 | channel='test', 76 | nsqd_tcp_addresses=[server.tcp_address], 77 | max_in_flight=100, 78 | ) 79 | 80 | @consumer.on_exception.connect 81 | def error_handler(consumer, message, error): 82 | if isinstance(error, NSQSocketError): 83 | return 84 | Accounting.error = error 85 | consumer.close() 86 | 87 | @consumer.on_message.connect 88 | def handler(consumer, message): 89 | assert message.body == b'danger zone!' 90 | 91 | Accounting.count += 1 92 | if Accounting.count == Accounting.total: 93 | consumer.close() 94 | 95 | consumer.start() 96 | 97 | if Accounting.error: 98 | raise Accounting.error 99 | 100 | assert Accounting.count == Accounting.total 101 | 102 | 103 | @pytest.mark.slow 104 | @pytest.mark.timeout(SLOW_TIMEOUT) 105 | def test_lookupd(): 106 | with LookupdIntegrationServer() as lookupd_server: 107 | server1 = NsqdIntegrationServer(lookupd=lookupd_server.tcp_address) 108 | server2 = NsqdIntegrationServer(lookupd=lookupd_server.tcp_address) 109 | 110 | with server1, server2: 111 | class Accounting(object): 112 | count = 0 113 | total = 500 114 | concurrency = 0 115 | error = None 116 | 117 | for server in (server1, server2): 118 | conn = NsqdHTTPClient(server.address, server.http_port) 119 | 120 | for _ in range(Accounting.total // 2): 121 | conn.publish('test', b'danger zone!') 122 | 123 | consumer = Consumer( 124 | topic='test', 125 | channel='test', 126 | lookupd_http_addresses=lookupd_server.http_address, 127 | max_in_flight=32, 128 | ) 129 | 130 | @consumer.on_exception.connect 131 | def error_handler(consumer, message, error): 132 | if isinstance(error, NSQSocketError): 133 | return 134 | Accounting.error = error 135 | consumer.close() 136 | 137 | @consumer.on_message.connect 138 | def handler(consumer, message): 139 | assert message.body == b'danger zone!' 140 | 141 | Accounting.count += 1 142 | if Accounting.count == Accounting.total: 143 | consumer.close() 144 | 145 | gevent.sleep(0.1) 146 | consumer.start() 147 | 148 | if Accounting.error: 149 | raise Accounting.error 150 | 151 | assert Accounting.count == Accounting.total 152 | 153 | 154 | @pytest.mark.slow 155 | @pytest.mark.timeout(SLOW_TIMEOUT) 156 | def test_backoff(): 157 | with NsqdIntegrationServer() as server: 158 | conn = NsqdHTTPClient(server.address, server.http_port) 159 | 160 | for _ in range(500): 161 | conn.publish('test', 'danger zone!') 162 | 163 | consumer = Consumer( 164 | topic='test', 165 | channel='test', 166 | nsqd_tcp_addresses=[server.tcp_address], 167 | max_in_flight=100, 168 | message_handler=lambda consumer, message: None 169 | ) 170 | 171 | consumer.start(block=False) 172 | consumer._redistributed_ready_event.wait() 173 | 174 | conn = next(iter(consumer._connections)) 175 | consumer._message_backoffs[conn].failure() 176 | consumer._message_backoffs[conn].failure() 177 | consumer._start_backoff(conn) 178 | consumer._redistribute_ready_state() 179 | 180 | assert consumer._connections[conn] == states.BACKOFF 181 | assert consumer.total_ready_count == 0 182 | 183 | consumer._start_throttled(conn) 184 | consumer._redistribute_ready_state() 185 | consumer._redistribute_ready_state() 186 | 187 | assert consumer._connections[conn] == states.THROTTLED 188 | assert consumer.total_ready_count == 1 189 | 190 | consumer._message_backoffs[conn].success() 191 | consumer._complete_backoff(conn) 192 | consumer._redistribute_ready_state() 193 | 194 | assert consumer._connections[conn] == states.BACKOFF 195 | assert consumer.total_ready_count == 0 196 | 197 | consumer._start_throttled(conn) 198 | consumer._redistribute_ready_state() 199 | 200 | assert consumer._connections[conn] == states.THROTTLED 201 | assert consumer.total_ready_count == 1 202 | 203 | consumer._message_backoffs[conn].success() 204 | consumer._complete_backoff(conn) 205 | consumer._redistribute_ready_state() 206 | 207 | assert consumer._connections[conn] == states.RUNNING 208 | assert consumer.total_ready_count == 100 209 | 210 | 211 | def test_no_handlers(): 212 | consumer = Consumer('test', 'test', 'localhost:4150') 213 | with pytest.raises(RuntimeError): 214 | consumer.start(block=False) 215 | -------------------------------------------------------------------------------- /tests/test_lookupd.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | 3 | import pytest 4 | import gevent 5 | import gnsq 6 | 7 | from integration_server import ( 8 | LookupdIntegrationServer, 9 | NsqdIntegrationServer 10 | ) 11 | 12 | 13 | @pytest.mark.slow 14 | @pytest.mark.timeout(60) 15 | def test_basic(): 16 | with LookupdIntegrationServer() as server: 17 | lookupd = gnsq.LookupdClient(server.address, server.http_port) 18 | assert lookupd.ping() == b'OK' 19 | assert 'version' in lookupd.info() 20 | 21 | with pytest.raises(gnsq.errors.NSQHttpError): 22 | lookupd.lookup('topic') 23 | 24 | assert len(lookupd.topics()['topics']) == 0 25 | assert len(lookupd.channels('topic')['channels']) == 0 26 | assert len(lookupd.nodes()['producers']) == 0 27 | 28 | 29 | @pytest.mark.slow 30 | @pytest.mark.timeout(60) 31 | def test_lookup(): 32 | with LookupdIntegrationServer() as lookupd_server: 33 | nsqd_server = NsqdIntegrationServer(lookupd=lookupd_server.tcp_address) 34 | with NsqdIntegrationServer(lookupd=lookupd_server.tcp_address) as nsqd_server: 35 | lookupd = gnsq.LookupdClient(lookupd_server.address, lookupd_server.http_port) 36 | nsqd = gnsq.NsqdHTTPClient(nsqd_server.address, nsqd_server.http_port) 37 | gevent.sleep(0.1) 38 | 39 | assert len(lookupd.topics()['topics']) == 0 40 | assert len(lookupd.channels('topic')['channels']) == 0 41 | assert len(lookupd.nodes()['producers']) == 1 42 | 43 | nsqd.create_topic('topic') 44 | gevent.sleep(0.1) 45 | 46 | info = lookupd.lookup('topic') 47 | assert len(info['channels']) == 0 48 | assert len(info['producers']) == 1 49 | assert len(lookupd.topics()['topics']) == 1 50 | assert len(lookupd.channels('topic')['channels']) == 0 51 | 52 | nsqd.create_channel('topic', 'channel') 53 | gevent.sleep(0.1) 54 | 55 | info = lookupd.lookup('topic') 56 | assert len(info['channels']) == 1 57 | assert len(info['producers']) == 1 58 | assert len(lookupd.topics()['topics']) == 1 59 | assert len(lookupd.channels('topic')['channels']) == 1 60 | -------------------------------------------------------------------------------- /tests/test_message.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import gnsq 3 | 4 | 5 | class MockConnection(object): 6 | def __init__(self, message, operations): 7 | message.on_finish.connect(self.finish) 8 | message.on_requeue.connect(self.requeue) 9 | message.on_touch.connect(self.touch) 10 | self.operations = iter(operations) 11 | 12 | def finish(self, message): 13 | exp_name, exp_args = next(self.operations) 14 | assert exp_name == 'finish' 15 | assert exp_args == (message,) 16 | 17 | def requeue(self, message, timeout, backoff): 18 | exp_name, exp_args = next(self.operations) 19 | assert exp_name == 'requeue' 20 | assert exp_args == (message, timeout, backoff) 21 | 22 | def touch(self, message): 23 | exp_name, exp_args = next(self.operations) 24 | assert exp_name == 'touch' 25 | assert exp_args == (message,) 26 | 27 | def assert_finished(self): 28 | with pytest.raises(StopIteration): 29 | next(self.operations) 30 | 31 | 32 | def test_basic(): 33 | message = gnsq.Message(0, 42, '1234', 'sup') 34 | assert message.timestamp == 0 35 | assert message.attempts == 42 36 | assert message.id == '1234' 37 | assert message.body == 'sup' 38 | assert message.has_responded() is False 39 | 40 | 41 | def test_finish(): 42 | message = gnsq.Message(0, 42, '1234', 'sup') 43 | mock_conn = MockConnection(message, [ 44 | ('finish', (message,)), 45 | ]) 46 | assert message.has_responded() is False 47 | 48 | message.finish() 49 | assert message.has_responded() is True 50 | 51 | with pytest.raises(gnsq.errors.NSQException): 52 | message.finish() 53 | 54 | mock_conn.assert_finished() 55 | 56 | 57 | def test_requeue(): 58 | message = gnsq.Message(0, 42, '1234', 'sup') 59 | mock_conn = MockConnection(message, [ 60 | ('requeue', (message, 0, True)), 61 | ]) 62 | assert message.has_responded() is False 63 | 64 | message.requeue() 65 | assert message.has_responded() is True 66 | 67 | with pytest.raises(gnsq.errors.NSQException): 68 | message.requeue() 69 | 70 | mock_conn.assert_finished() 71 | 72 | 73 | def test_requeue_timeout(): 74 | message = gnsq.Message(0, 42, '1234', 'sup') 75 | mock_conn = MockConnection(message, [ 76 | ('requeue', (message, 1000, True)), 77 | ]) 78 | assert message.has_responded() is False 79 | 80 | message.requeue(1000) 81 | assert message.has_responded() is True 82 | 83 | with pytest.raises(gnsq.errors.NSQException): 84 | message.requeue(1000) 85 | 86 | mock_conn.assert_finished() 87 | 88 | 89 | def test_backoff(): 90 | message = gnsq.Message(0, 42, '1234', 'sup') 91 | mock_conn = MockConnection(message, [ 92 | ('requeue', (message, 0, False)), 93 | ]) 94 | assert message.has_responded() is False 95 | 96 | message.requeue(backoff=False) 97 | assert message.has_responded() is True 98 | 99 | with pytest.raises(gnsq.errors.NSQException): 100 | message.requeue() 101 | 102 | mock_conn.assert_finished() 103 | 104 | 105 | def test_touch(): 106 | message = gnsq.Message(0, 42, '1234', 'sup') 107 | mock_conn = MockConnection(message, [ 108 | ('touch', (message,)), 109 | ('touch', (message,)), 110 | ('touch', (message,)), 111 | ('finish', (message,)), 112 | ]) 113 | assert message.has_responded() is False 114 | 115 | message.touch() 116 | message.touch() 117 | message.touch() 118 | assert message.has_responded() is False 119 | 120 | message.finish() 121 | assert message.has_responded() is True 122 | 123 | with pytest.raises(gnsq.errors.NSQException): 124 | message.touch() 125 | 126 | mock_conn.assert_finished() 127 | -------------------------------------------------------------------------------- /tests/test_nsqd.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | 3 | import sys 4 | import struct 5 | import json 6 | import ssl 7 | import pytest 8 | import gevent 9 | import six 10 | 11 | from six.moves import range 12 | from itertools import product 13 | from gnsq import NsqdTCPClient, Message, states, errors 14 | from gnsq import protocol as nsq 15 | from gnsq.stream.stream import SSLSocket, DefalteSocket, SnappySocket 16 | 17 | from mock_server import mock_server 18 | from integration_server import NsqdIntegrationServer 19 | 20 | 21 | BAD_GEVENT = all([ 22 | sys.version_info > (2, 7, 8), 23 | sys.version_info < (3, 0), 24 | gevent.version_info < (1, 0, 2), 25 | ]) 26 | 27 | 28 | def mock_response(frame_type, data): 29 | body_size = 4 + len(data) 30 | body_size_packed = struct.pack('>l', body_size) 31 | frame_type_packed = struct.pack('>l', frame_type) 32 | return body_size_packed + frame_type_packed + data 33 | 34 | 35 | def mock_response_message(timestamp, attempts, id, body): 36 | timestamp_packed = struct.pack('>q', timestamp) 37 | attempts_packed = struct.pack('>h', attempts) 38 | id = six.b('%016d' % id) 39 | data = timestamp_packed + attempts_packed + id + body 40 | return mock_response(nsq.FRAME_TYPE_MESSAGE, data) 41 | 42 | 43 | def test_connection(): 44 | @mock_server 45 | def handle(socket, address): 46 | assert socket.recv(4) == b' V2' 47 | assert socket.recv(1) == b'' 48 | 49 | with handle as server: 50 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 51 | assert conn.state == states.INIT 52 | 53 | conn.connect() 54 | assert conn.state == states.CONNECTED 55 | 56 | conn.connect() 57 | assert conn.state == states.CONNECTED 58 | 59 | conn.close_stream() 60 | assert conn.state == states.DISCONNECTED 61 | 62 | 63 | def test_disconnected(): 64 | @mock_server 65 | def handle(socket, address): 66 | assert socket.recv(4) == b' V2' 67 | assert socket.recv(1) == b'' 68 | 69 | with handle as server: 70 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 71 | conn.connect() 72 | conn.close_stream() 73 | assert conn.state == states.DISCONNECTED 74 | 75 | with pytest.raises(errors.NSQSocketError): 76 | conn.nop() 77 | 78 | with pytest.raises(errors.NSQSocketError): 79 | conn.read_response() 80 | 81 | 82 | @pytest.mark.parametrize('body', [ 83 | b'hello world', 84 | b'', 85 | b'{"some": "json data"}', 86 | ]) 87 | def test_read(body): 88 | @mock_server 89 | def handle(socket, address): 90 | assert socket.recv(4) == b' V2' 91 | socket.sendall(struct.pack('>l', len(body))) 92 | socket.sendall(body) 93 | assert socket.recv(1) == b'' 94 | 95 | with handle as server: 96 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 97 | conn.connect() 98 | 99 | assert conn._read_response() == body 100 | conn.close_stream() 101 | 102 | 103 | def test_identify(): 104 | @mock_server 105 | def handle(socket, address): 106 | assert socket.recv(4) == b' V2' 107 | assert socket.recv(9) == b'IDENTIFY\n' 108 | 109 | size = nsq.unpack_size(socket.recv(4)) 110 | data = json.loads(socket.recv(size).decode('utf-8')) 111 | 112 | assert 'gnsq' in data['user_agent'] 113 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, b'OK')) 114 | 115 | with handle as server: 116 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 117 | conn.connect() 118 | 119 | assert conn.identify() is None 120 | 121 | 122 | def test_negotiation(): 123 | @mock_server 124 | def handle(socket, address): 125 | assert socket.recv(4) == b' V2' 126 | assert socket.recv(9) == b'IDENTIFY\n' 127 | 128 | size = nsq.unpack_size(socket.recv(4)) 129 | data = json.loads(socket.recv(size).decode('utf-8')) 130 | 131 | assert 'gnsq' in data['user_agent'] 132 | resp = six.b(json.dumps({'test': 42})) 133 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, resp)) 134 | 135 | with handle as server: 136 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 137 | conn.connect() 138 | 139 | assert conn.identify()['test'] == 42 140 | 141 | 142 | @pytest.mark.parametrize('command,args,resp', [ 143 | ('subscribe', ('topic', 'channel'), b'SUB topic channel\n'), 144 | ('subscribe', ('foo', 'bar'), b'SUB foo bar\n'), 145 | ('ready', (0,), b'RDY 0\n'), 146 | ('ready', (1,), b'RDY 1\n'), 147 | ('ready', (42,), b'RDY 42\n'), 148 | ('finish', ('0000000000000000',), b'FIN 0000000000000000\n'), 149 | ('finish', ('deadbeafdeadbeaf',), b'FIN deadbeafdeadbeaf\n'), 150 | ('requeue', ('0000000000000000',), b'REQ 0000000000000000 0\n'), 151 | ('requeue', ('deadbeafdeadbeaf', 0), b'REQ deadbeafdeadbeaf 0\n'), 152 | ('requeue', ('deadbeafdeadbeaf', 42), b'REQ deadbeafdeadbeaf 42\n'), 153 | ('touch', ('0000000000000000',), b'TOUCH 0000000000000000\n'), 154 | ('touch', ('deadbeafdeadbeaf',), b'TOUCH deadbeafdeadbeaf\n'), 155 | ('close', (), b'CLS\n'), 156 | ('nop', (), b'NOP\n'), 157 | ]) 158 | def test_command(command, args, resp): 159 | @mock_server 160 | def handle(socket, address): 161 | assert socket.recv(4) == b' V2' 162 | assert socket.recv(len(resp)) == resp 163 | 164 | with handle as server: 165 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 166 | conn.connect() 167 | getattr(conn, command)(*args) 168 | 169 | 170 | def test_publish(): 171 | @mock_server 172 | def handle(socket, address): 173 | assert socket.recv(4) == b' V2' 174 | assert socket.recv(10) == b'PUB topic\n' 175 | 176 | assert nsq.unpack_size(socket.recv(4)) == 3 177 | assert socket.recv(3) == b'sup' 178 | 179 | with handle as server: 180 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 181 | conn.connect() 182 | conn.publish('topic', b'sup') 183 | 184 | 185 | def test_multipublish(): 186 | @mock_server 187 | def handle(socket, address): 188 | assert socket.recv(4) == b' V2' 189 | assert socket.recv(11) == b'MPUB topic\n' 190 | 191 | size = nsq.unpack_size(socket.recv(4)) 192 | data = socket.recv(size) 193 | 194 | head, data = data[:4], data[4:] 195 | assert nsq.unpack_size(head) == 2 196 | 197 | for _ in range(2): 198 | head, data = data[:4], data[4:] 199 | assert nsq.unpack_size(head) == 3 200 | 201 | head, data = data[:3], data[3:] 202 | assert head == b'sup' 203 | 204 | assert data == b'' 205 | 206 | with handle as server: 207 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 208 | conn.connect() 209 | conn.multipublish('topic', [b'sup', b'sup']) 210 | 211 | 212 | def test_deferpublish(): 213 | @mock_server 214 | def handle(socket, address): 215 | assert socket.recv(4) == b' V2' 216 | assert socket.recv(14) == b'DPUB topic 42\n' 217 | 218 | assert nsq.unpack_size(socket.recv(4)) == 3 219 | assert socket.recv(3) == b'sup' 220 | 221 | with handle as server: 222 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 223 | conn.connect() 224 | conn.publish('topic', b'sup', defer=42) 225 | 226 | 227 | @pytest.mark.parametrize('error_msg,error,fatal', [ 228 | (b'E_INVALID cannot SUB in current state', 'NSQInvalid', True), 229 | (b'E_BAD_BODY MPUB failed to read body size', 'NSQBadBody', True), 230 | (b'E_BAD_TOPIC SUB topic name oh my god is not valid', 'NSQBadTopic', True), 231 | (b'E_BAD_CHANNEL SUB channel name !! is not valid', 'NSQBadChannel', True), 232 | (b'E_BAD_MESSAGE PUB failed to read message body', 'NSQBadMessage', True), 233 | (b'E_PUT_FAILED PUT failed', 'NSQPutFailed', True), 234 | (b'E_PUB_FAILED PUB failed', 'NSQPubFailed', True), 235 | (b'E_MPUB_FAILED MPUB failed', 'NSQMPubFailed', True), 236 | (b'E_AUTH_DISABLED AUTH Disabled', 'NSQAuthDisabled', True), 237 | (b'E_AUTH_FAILED AUTH failed', 'NSQAuthFailed', True), 238 | (b'E_UNAUTHORIZED AUTH No authorizations found', 'NSQUnauthorized', True), 239 | (b'E_FIN_FAILED FIN failed', 'NSQFinishFailed', False), 240 | (b'E_REQ_FAILED REQ failed', 'NSQRequeueFailed', False), 241 | (b'E_TOUCH_FAILED TOUCH failed', 'NSQTouchFailed', False), 242 | (b'some unknown error', 'NSQException', True), 243 | ]) 244 | def test_error(error_msg, error, fatal): 245 | @mock_server 246 | def handle(socket, address): 247 | assert socket.recv(4) == b' V2' 248 | socket.sendall(mock_response(nsq.FRAME_TYPE_ERROR, error_msg)) 249 | 250 | with handle as server: 251 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 252 | conn.connect() 253 | 254 | frame, resp = conn.read_response() 255 | assert frame == nsq.FRAME_TYPE_ERROR 256 | assert isinstance(resp, getattr(errors, error)) 257 | assert conn.is_connected != fatal 258 | 259 | 260 | def test_hashing(): 261 | conn1 = NsqdTCPClient('localhost', 1337) 262 | conn2 = NsqdTCPClient('localhost', 1337) 263 | assert conn1 == conn2 264 | assert not (conn1 < conn2) 265 | assert not (conn2 < conn1) 266 | 267 | test = {conn1: True} 268 | assert conn2 in test 269 | 270 | 271 | def test_sync_receive_messages(): 272 | @mock_server 273 | def handle(socket, address): 274 | assert socket.recv(4) == b' V2' 275 | assert socket.recv(9) == b'IDENTIFY\n' 276 | 277 | size = nsq.unpack_size(socket.recv(4)) 278 | data = json.loads(socket.recv(size).decode('utf-8')) 279 | 280 | assert isinstance(data, dict) 281 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, b'OK')) 282 | 283 | msg = b'SUB topic channel\n' 284 | assert socket.recv(len(msg)) == msg 285 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, b'OK')) 286 | 287 | for i in range(10): 288 | assert socket.recv(6) == b'RDY 1\n' 289 | 290 | body = six.b(json.dumps({'data': {'test_key': i}})) 291 | ts = i * 1000 * 1000 292 | socket.sendall(mock_response_message(ts, i, i, body)) 293 | 294 | with handle as server: 295 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 296 | conn.connect() 297 | 298 | assert conn.identify() is None 299 | 300 | conn.subscribe('topic', 'channel') 301 | frame, data = conn.read_response() 302 | 303 | assert frame == nsq.FRAME_TYPE_RESPONSE 304 | assert data == b'OK' 305 | 306 | for i in range(10): 307 | conn.ready(1) 308 | frame, msg = conn.read_response() 309 | 310 | assert frame == nsq.FRAME_TYPE_MESSAGE 311 | assert isinstance(msg, Message) 312 | assert msg.timestamp == i * 1000 * 1000 313 | assert msg.id == six.b('%016d' % i) 314 | assert msg.attempts == i 315 | assert json.loads(msg.body.decode('utf-8'))['data']['test_key'] == i 316 | 317 | 318 | def test_sync_heartbeat(): 319 | @mock_server 320 | def handle(socket, address): 321 | assert socket.recv(4) == b' V2' 322 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, b'_heartbeat_')) 323 | assert socket.recv(4) == b'NOP\n' 324 | 325 | with handle as server: 326 | conn = NsqdTCPClient('127.0.0.1', server.server_port) 327 | conn.connect() 328 | 329 | frame, data = conn.read_response() 330 | assert frame == nsq.FRAME_TYPE_RESPONSE 331 | assert data == b'_heartbeat_' 332 | 333 | 334 | def test_auth(): 335 | @mock_server 336 | def handle(socket, address): 337 | assert socket.recv(4) == b' V2' 338 | assert socket.recv(5) == b'AUTH\n' 339 | 340 | assert nsq.unpack_size(socket.recv(4)) == 6 341 | assert socket.recv(6) == b'secret' 342 | 343 | resp = six.b(json.dumps({'identity': 'awesome'})) 344 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, resp)) 345 | 346 | with handle as server: 347 | conn = NsqdTCPClient( 348 | '127.0.0.1', server.server_port, auth_secret=b'secret') 349 | 350 | conn.connect() 351 | resp = conn.auth() 352 | assert resp['identity'] == 'awesome' 353 | 354 | 355 | def test_identify_auth(): 356 | @mock_server 357 | def handle(socket, address): 358 | assert socket.recv(4) == b' V2' 359 | assert socket.recv(9) == b'IDENTIFY\n' 360 | 361 | size = nsq.unpack_size(socket.recv(4)) 362 | data = json.loads(socket.recv(size).decode('utf-8')) 363 | assert 'gnsq' in data['user_agent'] 364 | 365 | resp = six.b(json.dumps({'auth_required': True})) 366 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, resp)) 367 | 368 | assert socket.recv(5) == b'AUTH\n' 369 | assert nsq.unpack_size(socket.recv(4)) == 6 370 | assert socket.recv(6) == b'secret' 371 | 372 | resp = six.b(json.dumps({'identity': 'awesome'})) 373 | socket.sendall(mock_response(nsq.FRAME_TYPE_RESPONSE, resp)) 374 | 375 | with handle as server: 376 | conn = NsqdTCPClient( 377 | '127.0.0.1', server.server_port, auth_secret=b'secret') 378 | 379 | @conn.on_auth.connect 380 | def assert_auth(conn, response): 381 | assert assert_auth.was_called is False 382 | assert_auth.was_called = True 383 | assert response['identity'] == 'awesome' 384 | 385 | assert_auth.was_called = False 386 | conn.connect() 387 | resp = conn.identify() 388 | 389 | assert resp['auth_required'] 390 | assert assert_auth.was_called 391 | 392 | 393 | @pytest.mark.parametrize('tls,deflate,snappy', product((True, False), repeat=3)) 394 | @pytest.mark.slow 395 | @pytest.mark.timeout(60) 396 | def test_socket_upgrades(tls, deflate, snappy): 397 | with NsqdIntegrationServer() as server: 398 | options = { 399 | 'address': server.address, 400 | 'port': server.tcp_port, 401 | 'deflate': deflate, 402 | 'snappy': snappy, 403 | } 404 | 405 | if tls: 406 | options.update({ 407 | 'tls_v1': True, 408 | 'tls_options': { 409 | 'keyfile': server.tls_key, 410 | 'certfile': server.tls_cert, 411 | } 412 | }) 413 | 414 | conn = NsqdTCPClient(**options) 415 | conn.connect() 416 | assert conn.state == states.CONNECTED 417 | 418 | if deflate and snappy: 419 | with pytest.raises(errors.NSQErrorCode): 420 | conn.identify() 421 | return 422 | 423 | if tls and BAD_GEVENT: 424 | with pytest.raises(AttributeError): 425 | conn.identify() 426 | return 427 | 428 | if tls and server.version < (0, 2, 28): 429 | with pytest.raises(ssl.SSLError): 430 | conn.identify() 431 | return 432 | 433 | resp = conn.identify() 434 | assert isinstance(resp, dict) 435 | 436 | assert resp['tls_v1'] is tls 437 | assert resp['deflate'] is deflate 438 | assert resp['snappy'] is snappy 439 | 440 | if tls and (deflate or snappy): 441 | assert isinstance(conn.stream.socket._socket, SSLSocket) 442 | elif tls: 443 | assert isinstance(conn.stream.socket, SSLSocket) 444 | 445 | if deflate: 446 | assert isinstance(conn.stream.socket, DefalteSocket) 447 | 448 | if snappy: 449 | assert isinstance(conn.stream.socket, SnappySocket) 450 | 451 | conn.publish('topic', b'sup') 452 | frame, data = conn.read_response() 453 | assert frame == nsq.FRAME_TYPE_RESPONSE 454 | assert data == b'OK' 455 | 456 | conn.subscribe('topic', 'channel') 457 | frame, data = conn.read_response() 458 | assert frame == nsq.FRAME_TYPE_RESPONSE 459 | assert data == b'OK' 460 | 461 | conn.ready(1) 462 | frame, data = conn.read_response() 463 | assert frame == nsq.FRAME_TYPE_MESSAGE 464 | assert data.body == b'sup' 465 | 466 | conn.close_stream() 467 | 468 | 469 | @pytest.mark.slow 470 | @pytest.mark.timeout(60) 471 | def test_cls_error(): 472 | with NsqdIntegrationServer() as server: 473 | conn = NsqdTCPClient(server.address, server.tcp_port) 474 | 475 | conn.connect() 476 | assert conn.state == states.CONNECTED 477 | 478 | conn.close() 479 | frame, error = conn.read_response() 480 | assert frame == nsq.FRAME_TYPE_ERROR 481 | assert isinstance(error, errors.NSQInvalid) 482 | -------------------------------------------------------------------------------- /tests/test_nsqd_http.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | 3 | import pytest 4 | import gnsq 5 | from integration_server import NsqdIntegrationServer 6 | 7 | 8 | @pytest.mark.slow 9 | @pytest.mark.timeout(60) 10 | def test_basic(): 11 | with NsqdIntegrationServer() as server: 12 | conn = gnsq.NsqdHTTPClient(server.address, server.http_port) 13 | assert conn.ping() == b'OK' 14 | assert 'topics' in conn.stats() 15 | assert 'version' in conn.info() 16 | 17 | 18 | @pytest.mark.slow 19 | @pytest.mark.timeout(60) 20 | def test_topics_channels(): 21 | with NsqdIntegrationServer() as server: 22 | conn = gnsq.NsqdHTTPClient(server.address, server.http_port) 23 | assert len(conn.stats()['topics']) == 0 24 | 25 | with pytest.raises(gnsq.errors.NSQHttpError): 26 | conn.delete_topic('topic') 27 | 28 | conn.create_topic('topic') 29 | topics = conn.stats()['topics'] 30 | assert len(topics) == 1 31 | assert topics[0]['topic_name'] == 'topic' 32 | 33 | conn.delete_topic('topic') 34 | assert len(conn.stats()['topics']) == 0 35 | 36 | with pytest.raises(gnsq.errors.NSQHttpError): 37 | conn.create_channel('topic', 'channel') 38 | 39 | with pytest.raises(gnsq.errors.NSQHttpError): 40 | conn.delete_channel('topic', 'channel') 41 | 42 | conn.create_topic('topic') 43 | assert len(conn.stats()['topics'][0]['channels']) == 0 44 | 45 | conn.create_channel('topic', 'channel') 46 | channels = conn.stats()['topics'][0]['channels'] 47 | assert len(channels) == 1 48 | assert channels[0]['channel_name'] == 'channel' 49 | 50 | conn.delete_channel('topic', 'channel') 51 | assert len(conn.stats()['topics'][0]['channels']) == 0 52 | 53 | 54 | @pytest.mark.slow 55 | @pytest.mark.timeout(60) 56 | def test_publish(): 57 | with NsqdIntegrationServer() as server: 58 | conn = gnsq.NsqdHTTPClient(server.address, server.http_port) 59 | 60 | conn.publish('topic', b'sup') 61 | assert conn.stats()['topics'][0]['depth'] == 1 62 | 63 | conn.multipublish('topic', [b'sup', b'sup']) 64 | assert conn.stats()['topics'][0]['depth'] == 3 65 | 66 | conn.multipublish('topic', iter([b'sup', b'sup', b'sup'])) 67 | assert conn.stats()['topics'][0]['depth'] == 6 68 | 69 | conn.empty_topic('topic') 70 | assert conn.stats()['topics'][0]['depth'] == 0 71 | 72 | conn.create_topic('topic') 73 | conn.create_channel('topic', 'channel') 74 | conn.publish('topic', b'sup') 75 | assert conn.stats()['topics'][0]['channels'][0]['depth'] == 1 76 | 77 | conn.empty_channel('topic', 'channel') 78 | assert conn.stats()['topics'][0]['channels'][0]['depth'] == 0 79 | 80 | if server.version < (0, 3, 6): 81 | return 82 | 83 | conn.publish('topic', b'sup', 60 * 1000) 84 | stats = conn.stats() 85 | assert stats['topics'][0]['channels'][0]['depth'] == 0 86 | assert stats['topics'][0]['channels'][0]['deferred_count'] == 1 87 | -------------------------------------------------------------------------------- /tests/test_producer.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, with_statement 2 | 3 | import os 4 | 5 | import pytest 6 | import gevent 7 | 8 | from gnsq import NsqdHTTPClient, Producer 9 | from gnsq.errors import NSQException, NSQNoConnections, NSQInvalid 10 | 11 | from integration_server import NsqdIntegrationServer 12 | 13 | 14 | SLOW_TIMEOUT = int(os.environ.get('SLOW_TIMEOUT', '10'), 10) 15 | 16 | 17 | @pytest.mark.slow 18 | @pytest.mark.timeout(SLOW_TIMEOUT) 19 | def test_publish(): 20 | with NsqdIntegrationServer() as server: 21 | producer = Producer(server.tcp_address) 22 | producer.start() 23 | 24 | for _ in range(100): 25 | producer.publish('test', b'hi') 26 | 27 | producer.close() 28 | producer.join() 29 | 30 | conn = NsqdHTTPClient(server.address, server.http_port) 31 | stats = conn.stats() 32 | 33 | assert stats['topics'][0]['depth'] == 100 34 | 35 | 36 | @pytest.mark.slow 37 | @pytest.mark.timeout(SLOW_TIMEOUT) 38 | def test_async_publish(): 39 | with NsqdIntegrationServer() as server: 40 | results = [] 41 | producer = Producer(server.tcp_address) 42 | producer.start() 43 | 44 | for _ in range(100): 45 | results.append(producer.publish('test', b'hi', raise_error=False)) 46 | 47 | gevent.joinall(results, raise_error=True) 48 | producer.close() 49 | producer.join() 50 | 51 | conn = NsqdHTTPClient(server.address, server.http_port) 52 | stats = conn.stats() 53 | 54 | assert stats['topics'][0]['depth'] == 100 55 | 56 | 57 | @pytest.mark.slow 58 | @pytest.mark.timeout(SLOW_TIMEOUT) 59 | def test_multipublish(): 60 | with NsqdIntegrationServer() as server: 61 | producer = Producer(server.tcp_address) 62 | producer.start() 63 | 64 | for _ in range(10): 65 | producer.multipublish('test', 10 * [b'hi']) 66 | 67 | producer.close() 68 | producer.join() 69 | 70 | conn = NsqdHTTPClient(server.address, server.http_port) 71 | stats = conn.stats() 72 | 73 | assert stats['topics'][0]['depth'] == 100 74 | 75 | 76 | @pytest.mark.slow 77 | @pytest.mark.timeout(SLOW_TIMEOUT) 78 | def test_async_multipublish(): 79 | with NsqdIntegrationServer() as server: 80 | results = [] 81 | producer = Producer(server.tcp_address) 82 | producer.start() 83 | 84 | for _ in range(10): 85 | result = producer.multipublish( 86 | 'test', 10 * [b'hi'], raise_error=False) 87 | results.append(result) 88 | 89 | gevent.joinall(results, raise_error=True) 90 | producer.close() 91 | producer.join() 92 | 93 | conn = NsqdHTTPClient(server.address, server.http_port) 94 | stats = conn.stats() 95 | 96 | assert stats['topics'][0]['depth'] == 100 97 | 98 | 99 | @pytest.mark.slow 100 | @pytest.mark.timeout(SLOW_TIMEOUT) 101 | def test_publish_error(): 102 | with NsqdIntegrationServer() as server: 103 | producer = Producer(server.tcp_address) 104 | producer.start() 105 | 106 | with pytest.raises(NSQInvalid): 107 | producer.publish('test', b'hi', defer=-1000) 108 | 109 | producer.close() 110 | producer.join() 111 | 112 | 113 | def test_not_running(): 114 | producer = Producer('192.0.2.1:4150') 115 | 116 | with pytest.raises(NSQException): 117 | producer.publish('topic', b'hi') 118 | 119 | 120 | def test_no_connections(): 121 | producer = Producer('192.0.2.1:4150', timeout=0.01) 122 | producer.start() 123 | 124 | with pytest.raises(NSQNoConnections): 125 | producer.publish('topic', b'hi', block=False) 126 | -------------------------------------------------------------------------------- /tests/test_reader.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, with_statement 2 | 3 | import multiprocessing 4 | import os 5 | 6 | import pytest 7 | import gevent 8 | 9 | from six.moves import range 10 | from gnsq import NsqdHTTPClient, Reader 11 | from gnsq.errors import NSQSocketError 12 | 13 | from integration_server import NsqdIntegrationServer 14 | 15 | 16 | SLOW_TIMEOUT = int(os.environ.get('SLOW_TIMEOUT', '10'), 10) 17 | 18 | 19 | def test_basic(): 20 | with pytest.raises(ValueError): 21 | Reader('test', 'test') 22 | 23 | with pytest.raises(TypeError): 24 | Reader( 25 | topic='test', 26 | channel='test', 27 | nsqd_tcp_addresses=None, 28 | lookupd_http_addresses='http://localhost:4161/', 29 | ) 30 | 31 | with pytest.raises(TypeError): 32 | Reader( 33 | topic='test', 34 | channel='test', 35 | nsqd_tcp_addresses='localhost:4150', 36 | lookupd_http_addresses=None, 37 | ) 38 | 39 | def message_handler(reader, message): 40 | pass 41 | 42 | reader = Reader( 43 | topic='test', 44 | channel='test', 45 | name='test', 46 | max_concurrency=-1, 47 | nsqd_tcp_addresses='localhost:4150', 48 | lookupd_http_addresses='http://localhost:4161/', 49 | message_handler=message_handler 50 | ) 51 | 52 | assert reader.name == 'test' 53 | assert reader.max_concurrency == multiprocessing.cpu_count() 54 | assert len(reader.on_message.receivers) == 1 55 | 56 | assert isinstance(reader.nsqd_tcp_addresses, set) 57 | assert len(reader.nsqd_tcp_addresses) == 1 58 | 59 | assert isinstance(reader.lookupds, list) 60 | assert len(reader.lookupds) == 1 61 | 62 | 63 | @pytest.mark.slow 64 | @pytest.mark.timeout(SLOW_TIMEOUT) 65 | def test_max_concurrency(): 66 | server1 = NsqdIntegrationServer() 67 | server2 = NsqdIntegrationServer() 68 | 69 | with server1, server2: 70 | class Accounting(object): 71 | count = 0 72 | total = 100 73 | concurrency = 0 74 | error = None 75 | 76 | for server in (server1, server2): 77 | conn = NsqdHTTPClient(server.address, server.http_port) 78 | 79 | for _ in range(Accounting.total // 2): 80 | conn.publish('test', b'danger zone!') 81 | 82 | reader = Reader( 83 | topic='test', 84 | channel='test', 85 | nsqd_tcp_addresses=[ 86 | server1.tcp_address, 87 | server2.tcp_address, 88 | ], 89 | max_in_flight=5, 90 | max_concurrency=1, 91 | ) 92 | 93 | @reader.on_exception.connect 94 | def error_handler(reader, message, error): 95 | if isinstance(error, NSQSocketError): 96 | return 97 | Accounting.error = error 98 | reader.close() 99 | 100 | @reader.on_message.connect 101 | def handler(reader, message): 102 | assert message.body == b'danger zone!' 103 | assert Accounting.concurrency == 0 104 | 105 | Accounting.concurrency += 1 106 | gevent.sleep() 107 | Accounting.concurrency -= 1 108 | 109 | Accounting.count += 1 110 | if Accounting.count == Accounting.total: 111 | reader.close() 112 | 113 | reader.start() 114 | 115 | if Accounting.error: 116 | raise Accounting.error 117 | 118 | assert Accounting.count == Accounting.total 119 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = lint, docs, py27, py34, py35, py36, py37, py38, pypy3.5 3 | 4 | [testenv] 5 | deps = -r{toxinidir}/requirements.test.txt 6 | 7 | setenv = 8 | PYTHONPATH={toxinidir}:{toxinidir}/gnsq 9 | 10 | commands = 11 | pytest tests 12 | 13 | [testenv:lint] 14 | deps = flake8 15 | commands = flake8 gnsq tests 16 | 17 | [testenv:docs] 18 | deps = -r{toxinidir}/requirements.docs.txt 19 | commands = sphinx-build -b html -d {envtmpdir}/doctrees docs docs/_build 20 | 21 | [flake8] 22 | max-line-length = 80 23 | exclude = tests docs 24 | max-complexity = 10 25 | 26 | [pytest] 27 | log_format = [%(asctime)s] [%(levelname)s] [%(name)s] %(message)s 28 | --------------------------------------------------------------------------------