├── .gitignore ├── .travis.yml ├── CHANGES.rst ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── docs ├── Makefile ├── api.rst ├── changelog.rst ├── conf.py ├── configuration.rst ├── index.rst ├── installation.rst └── make.bat ├── etc ├── cassandra │ ├── cassandra-cluster-template.yaml │ ├── cassandra.yaml │ └── log4j-server.properties ├── production.ini ├── queuey-dev.ini ├── queuey-test.ini ├── queuey.nginx.conf └── supervisord.conf ├── load_tester.py ├── queuey.spec ├── queuey ├── __init__.py ├── exceptions.py ├── resources.py ├── run.py ├── security.py ├── storage │ ├── __init__.py │ ├── cassandra.py │ ├── memory.py │ └── util.py ├── testing.py ├── tests │ ├── __init__.py │ ├── storage.py │ ├── test_cassandra.ini │ ├── test_cassandra.py │ ├── test_integrated.py │ ├── test_memory.ini │ └── test_memory.py ├── validators.py └── views.py ├── requirements.txt ├── runtests.py ├── setup.py ├── start.sh ├── sw └── virtualenv.py └── var └── README.txt /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg 2 | *.egg-info 3 | *.log 4 | *.pyc 5 | *.pid 6 | *$py.class 7 | *.pt.py 8 | *.sock 9 | *.txt.py 10 | *.ropeproject 11 | .Python 12 | .channel 13 | src/ 14 | zookeeper/ 15 | *~ 16 | .coverage 17 | .tox/ 18 | nosetests.xml 19 | build/ 20 | cassandra 21 | dist/ 22 | bin/ 23 | lib/ 24 | man/ 25 | include/ 26 | share/ 27 | deps 28 | docs/_build 29 | zookeeper.out 30 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.6" 4 | - "2.7" 5 | 6 | branches: 7 | only: 8 | - master 9 | 10 | notifications: 11 | email: 12 | - ben@groovie.org 13 | - hschlichting@mozilla.com 14 | irc: "irc.mozilla.org#services-dev" 15 | 16 | before_install: 17 | - sudo apt-get update >/dev/null 2>&1 18 | - sudo apt-get install libevent-dev libpcre3-dev >/dev/null 2>&1 19 | - sudo aptitude -y remove cassandra 20 | 21 | install: 22 | - make build 23 | - bin/pip install gevent >/dev/null 2>&1 24 | - make cassandra 25 | 26 | script: make test 27 | -------------------------------------------------------------------------------- /CHANGES.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | 0.9 (unreleased) 5 | ---------------- 6 | 7 | 8 | 0.8 (2012-08-28) 9 | ---------------- 10 | 11 | Features 12 | ******** 13 | 14 | - Compatibility with Cassandra 1.1 15 | - Add new API's to get, post and update messages by their message id 16 | - Add new memory storage backend for testing purposes. 17 | - Add metlog based metrics logging. 18 | - Use pycassa's system manager support to programmatically create the 19 | Cassandra schema during startup. 20 | 21 | Bug fixes 22 | ********* 23 | 24 | - Fix precision errors in server side message id to timestamp conversion. 25 | - Enforce message keys to be valid UUID1 instead of just any UUID. 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This Source Code Form is subject to the terms of the Mozilla Public 2 | License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | You can obtain one at http://mozilla.org/MPL/2.0/. 4 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.rst 2 | include *.sh 3 | include *.txt 4 | include MANIFEST.in 5 | include queuey.spec 6 | recursive-include etc * 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | APPNAME = queuey 2 | DEPS = 3 | HERE = $(shell pwd) 4 | BIN = $(HERE)/bin 5 | VIRTUALENV = virtualenv 6 | NOSE = bin/nosetests -s --with-xunit 7 | TESTS = $(APPNAME)/tests 8 | PYTHON = $(HERE)/bin/python 9 | BUILDAPP = $(HERE)/bin/buildapp 10 | BUILDRPMS = $(HERE)/bin/buildrpms 11 | PYPI = http://pypi.python.org/simple 12 | PYPIOPTIONS = -i $(PYPI) 13 | DOTCHANNEL := $(wildcard .channel) 14 | ifeq ($(strip $(DOTCHANNEL)),) 15 | CHANNEL = dev 16 | RPM_CHANNEL = prod 17 | else 18 | CHANNEL = `cat .channel` 19 | RPM_CHANNEL = `cat .channel` 20 | endif 21 | INSTALL = $(HERE)/bin/pip install 22 | PIP_DOWNLOAD_CACHE ?= /tmp/pip_cache 23 | INSTALLOPTIONS = --download-cache $(PIP_DOWNLOAD_CACHE) -U -i $(PYPI) \ 24 | --use-mirrors 25 | 26 | CASS_SERVER = localhost 27 | CASSANDRA_VERSION = 1.1.5 28 | 29 | ifdef PYPIEXTRAS 30 | PYPIOPTIONS += -e $(PYPIEXTRAS) 31 | INSTALLOPTIONS += -f $(PYPIEXTRAS) 32 | endif 33 | 34 | ifdef PYPISTRICT 35 | PYPIOPTIONS += -s 36 | ifdef PYPIEXTRAS 37 | HOST = `python -c "import urlparse; print urlparse.urlparse('$(PYPI)')[1] + ',' + urlparse.urlparse('$(PYPIEXTRAS)')[1]"` 38 | 39 | else 40 | HOST = `python -c "import urlparse; print urlparse.urlparse('$(PYPI)')[1]"` 41 | endif 42 | 43 | endif 44 | 45 | INSTALL += $(INSTALLOPTIONS) 46 | 47 | SW = sw 48 | CASSANDRA = $(BIN)/cassandra/bin/cassandra 49 | BUILD_DIRS = bin build deps include lib lib64 50 | 51 | 52 | .PHONY: all build test build_rpms mach 53 | .SILENT: lib python pip $(CASSANDRA) cassandra 54 | 55 | all: build 56 | 57 | $(BIN)/python: 58 | python $(SW)/virtualenv.py --distribute . >/dev/null 2>&1 59 | 60 | $(BIN)/pip: $(BIN)/python 61 | 62 | lib: $(BIN)/pip 63 | echo "Installing package pre-requisites..." 64 | $(INSTALL) -r requirements.txt 65 | echo "Running setup.py develop" 66 | $(PYTHON) setup.py develop 67 | 68 | $(CASSANDRA): 69 | @echo "Installing Cassandra" 70 | mkdir -p bin 71 | cd bin && \ 72 | curl --progress-bar http://downloads.datastax.com/community/dsc-cassandra-$(CASSANDRA_VERSION)-bin.tar.gz | tar -zx 73 | mv bin/dsc-cassandra-$(CASSANDRA_VERSION) bin/cassandra 74 | cp etc/cassandra/cassandra.yaml bin/cassandra/conf/cassandra.yaml 75 | cp etc/cassandra/log4j-server.properties bin/cassandra/conf/log4j-server.properties 76 | cd bin/cassandra/lib && \ 77 | curl --progress-bar -O http://java.net/projects/jna/sources/svn/content/trunk/jnalib/dist/jna.jar 78 | @echo "Finished installing Cassandra" 79 | 80 | cassandra: $(CASSANDRA) 81 | 82 | clean-env: 83 | rm -rf $(BUILD_DIRS) 84 | 85 | clean-cassandra: 86 | rm -rf cassandra bin/cassandra 87 | 88 | clean: clean-env 89 | 90 | build: lib 91 | $(BUILDAPP) -c $(CHANNEL) $(PYPIOPTIONS) $(DEPS) 92 | 93 | test: 94 | $(PYTHON) runtests.py 95 | 96 | test-python: 97 | $(NOSE) --with-coverage --cover-package=queuey --cover-erase \ 98 | --cover-inclusive $(APPNAME) 99 | 100 | build_rpms: 101 | rm -rf rpms/ 102 | $(BUILDRPMS) -c $(RPM_CHANNEL) $(DEPS) 103 | 104 | mach: build build_rpms 105 | mach clean 106 | mach yum install python26 python26-setuptools 107 | cd rpms; wget https://mrepo.mozilla.org/mrepo/5-x86_64/RPMS.mozilla-services/gunicorn-0.11.2-1moz.x86_64.rpm 108 | cd rpms; wget https://mrepo.mozilla.org/mrepo/5-x86_64/RPMS.mozilla/nginx-0.7.65-4.x86_64.rpm 109 | mach yum install rpms/* 110 | mach chroot python2.6 -m demoapp.run 111 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | Queuey 3 | ====== 4 | 5 | .. image:: https://secure.travis-ci.org/mozilla-services/queuey.png?branch=master 6 | :width: 82px 7 | :height: 13px 8 | :alt: Travis CI build report 9 | :target: https://secure.travis-ci.org/#!/mozilla-services/queuey 10 | 11 | Read the full documentation at http://queuey.readthedocs.org/ 12 | 13 | Wat? Another message queue? 14 | 15 | Given the proliferation of message queue's, one could be inclined to believe 16 | that inventing more is not the answer. Using an existing solution was 17 | attempted multiple times with most every existing message queue product. 18 | 19 | The others failed (for our use-cases). 20 | 21 | Queuey is meant to handle some unique conditions that most other message 22 | queue solutions either don't handle, or handle very poorly. Many of them for 23 | example are written for queues or pub/sub situations that don't require 24 | possibly longer term (multiple days) storage of not just many messages but 25 | huge quantities of queues. 26 | 27 | Queuey Assumptions and Features: 28 | 29 | - Messages may persist for upwards of 3 days 30 | - Range scans with timestamps to rewind and re-read messages in a queue 31 | - Millions of queues may be created 32 | - Message delivery characteristics need to be tweakable based on the 33 | specific cost-benefit for a Queuey deployment 34 | - HTTP API for easy access by a variety of clients, including AJAX 35 | - Authentication system to support multiple 'Application' access to Queuey 36 | with optional Browser-ID client authentication 37 | - A single deployment may support multiple Applications with varying 38 | message delivery characteristics, and authentication restricted queue 39 | access 40 | 41 | Queuey can be configured with varying message guarantees, such as: 42 | 43 | - Deliver once, and exactly once 44 | - Deliver at least once (and under rare conditions, maybe more) 45 | - Deliver no more than once (and under rare conditions, possibly not deliver) 46 | 47 | Changing the storage back-end and deployment strategies directly affects 48 | the message guarantee's. This enables the Queuey deployment to meet different 49 | requirements and performance thresholds. 50 | 51 | For more background on Queuey, see `the Mozilla wiki section on queuey `_. 52 | 53 | Requirements 54 | ============ 55 | 56 | Make sure you have the following software already 57 | installed before proceeding: 58 | 59 | - Java 1.6 60 | - Ant 61 | - Make 62 | - Python 2.7 (with virtualenv installed) 63 | 64 | 65 | Installation 66 | ============ 67 | 68 | After downloading the repository for the first time, 69 | cd into the directory and run:: 70 | 71 | $ make 72 | 73 | This will do the following: 74 | 75 | - Create a virtual python environment 76 | - Install required python packages into this environment 77 | 78 | Cassandra 79 | --------- 80 | 81 | To run Queuey, you need a storage back-end for the queues. The default 82 | storage back-end is Cassandra. This installation has been automated in 83 | Queuey's Makefile, to install Cassandra in the same directory as 84 | Queuey:: 85 | 86 | make cassandra 87 | 88 | Which will fetch the Cassandra server and set up the configuration. 89 | 90 | The default (Cassandra) stores its data and files inside the local Cassandra 91 | directory so as not to interfere with any existing Cassandra installations on 92 | the system. 93 | 94 | Running 95 | ======= 96 | 97 | Running the Cassandra Server: 98 | ----------------------------- 99 | 100 | The message store (used by the server to route messages) 101 | and the HTTP server must be started separately. The steps 102 | are (starting from the root project directory) 103 | 104 | :: 105 | 106 | ./bin/cassandra/bin/cassandra -p cassandra.pid 107 | 108 | To shut it down at any point in the future:: 109 | 110 | kill -2 `cat cassandra.pid` 111 | 112 | Running the Queuey Application: 113 | ------------------------------- 114 | 115 | It is recommended that you copy the `etc/queuey-dev.ini` file to 116 | `/etc/queuey.ini`. This will prevent accidental loss of configuration 117 | during an update. 118 | 119 | :: 120 | 121 | bin/pserve etc/queuey.ini 122 | 123 | Troubleshooting: 124 | ---------------- 125 | 126 | "Upgrading" Queuey may require reinitializing the schema. To reinitialize the 127 | schema, remove all data files. The new correct schema will be automatically 128 | created during the next Queuey startup. 129 | 130 | 1. Stop Cassandra:: 131 | 132 | kill -2 `cat cassandra.pid` 133 | 134 | 2. Remove the Cassandra data directory (not the Cassandra binary directory):: 135 | 136 | rm -rf ./cassandra 137 | 138 | 3. Start Cassandra:: 139 | 140 | ./bin/cassandra/bin/cassandra -p cassandra.pid 141 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/queuey.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/queuey.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/queuey" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/queuey" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _queuey_api: 2 | 3 | ========== 4 | Queuey API 5 | ========== 6 | 7 | The Queuey API is documented by URL and valid HTTP methods for each Queuey 8 | resource. Arrows around the name indicate variables in the URL. 9 | 10 | All calls return JSON, and unless otherwise indicated methods that take 11 | input in the body expect form-encoded variables. 12 | 13 | Queue Management 14 | ================ 15 | 16 | Access queue information, create, update, and delete queues. All calls to these 17 | methods must include an Authorization header with the application key:: 18 | 19 | Authorization: Application 20 | 21 | Calls missing a valid Authorization header or valid Application key will be 22 | rejected. 23 | 24 | .. http:method:: GET /v1/{application} 25 | 26 | :arg application: Application name 27 | :optparam integer limit: Amount of queues to list. 28 | :optparam string offset: A queue name to start with for paginating through the 29 | result 30 | :optparam boolean details: Whether additional queue details such as the 31 | consistency, type, partitions, and principles 32 | for the queue should also be returned. Defaults 33 | to false. 34 | :optparam boolean include_count: When including details, should the total 35 | message count be included? Defaults to 36 | false. 37 | 38 | Returns a list of queues for the application. No sorting or ordering for 39 | this operation is available, queues are not in any specific ordering but 40 | their order is consistent for proper pagination. 41 | 42 | Example response:: 43 | 44 | { 45 | 'status': 'ok', 46 | 'queues': [ 47 | {'queue_name': 'a queue'}, 48 | {'queue_name': 'another queue'} 49 | ] 50 | } 51 | 52 | Example response with details:: 53 | 54 | { 55 | 'status': 'ok', 56 | 'queues': [ 57 | { 58 | 'queue_name': 'ea2f39c0de9a4b9db6463123641631de', 59 | 'partitions': 1, 60 | 'created': 1322521547, 61 | 'type': 'user', 62 | 'count': 932 63 | }, 64 | { 65 | 'queue_name': 'another queue', 66 | 'partitions': 4, 67 | 'created': 1325243233, 68 | 'type': 'user', 69 | 'count': 232 70 | }, 71 | ] 72 | } 73 | 74 | .. http:method:: POST /v1/{application} 75 | 76 | :arg application: Application name 77 | :optparam queue_name: Name of the queue to create 78 | :optparam integer partitions: How many partitions the queue should have. 79 | Defaults to 1. 80 | :optparam type: Type of queue to create, defaults to ``user`` which 81 | requires authentication to access messages. 82 | :optparam consistency: Level of consistency for the queue, defaults to 83 | ``strong``. 84 | :optparam principles: List of App or Browser ID's separated 85 | with a comma if there's more than one 86 | 87 | Create a new queue for the application. Returns a JSON response indicating 88 | the status, the UUID4 hex string of the queue name (if a queue_name was not 89 | supplied), and the partitions created. 90 | 91 | Calling this method with no parameters at all will yield a response like 92 | the one below. 93 | 94 | Example response:: 95 | 96 | { 97 | 'status': 'ok', 98 | 'queue_name': 'ea2f39c0de9a4b9db6463123641631de', 99 | 'partitions': 1, 100 | 'type': 'user', 101 | 'consistency': 'strong' 102 | } 103 | 104 | .. http:method:: PUT /v1/{application}/{queue_name} 105 | 106 | :arg application: Application name 107 | :arg queue_name: Queue name to access 108 | 109 | :optparam integer partitions: How many partitions the queue should have. 110 | :optparam type: Type of queue to create, 'user' or 'public'. 111 | :optparam consistency: Level of consistency for the queue. 112 | :optparam principles: List of App or Browser ID's separated 113 | with a comma if there's more than one 114 | 115 | Update queue parameters. Partitions may only be increased, not decreased. 116 | Other settings overwrite existing parameters for the queue, to modify the 117 | principles one should first fetch the existing ones, change them as 118 | appropriate and PUT the new ones. 119 | 120 | Example response:: 121 | 122 | { 123 | 'status': 'ok', 124 | 'queue_name': 'ea2f39c0de9a4b9db6463123641631de', 125 | 'partitions': 1, 126 | 'type': 'user', 127 | 'consistency': 'strong' 128 | } 129 | 130 | .. http:method:: DELETE /v1/{application}/{queue_name} 131 | 132 | :arg application: Application name 133 | :arg queue_name: Queue name to access 134 | 135 | Delete a queue and all its partitions and messages. 136 | 137 | Example success response:: 138 | 139 | {'status': 'ok'} 140 | 141 | Message Management 142 | ================== 143 | 144 | Create messages on a queue, get messages, and delete messages. Access varies 145 | depending on the queue, queues with a type of ``public`` 146 | may have messages viewed without any authentication. All other queue's require 147 | an Application key to create messages, and viewing messages varies depending 148 | on queue principles. By default an Application may create/view messages it 149 | creates unless a set of principles was registered for the queue. 150 | 151 | Responses containing messages include a message `timestamp`. The value is in 152 | seconds since the epoch in GMT. It uses a precision down to multiples of 100 153 | nanoseconds. The precision matches that of UUID1s used for the `message_id`. 154 | Note that double precision floating point numbers don't guarantee enough 155 | significant decimal digits to represent those numbers accurately. 156 | 157 | .. http:method:: GET /v1/{application}/{queue_name} 158 | 159 | :arg application: Application name 160 | :arg queue_name: Queue name to access 161 | :optparam since: All messages newer than this timestamp *or* message id. 162 | Should be formatted as seconds since epoch in GMT, or the 163 | hexadecimal message id. For exact results with single 164 | message accuracy use the hexadecimal message id. 165 | :optparam limit: Only return N amount of messages. 166 | :optparam order: Order of messages, can be set to either `ascending` or 167 | `descending`. Defaults to `ascending`. 168 | :optparam partitions: A specific partition number to retrieve messages from 169 | or a comma separated list of partitions. Defaults to 170 | retrieving messages from partition 1. 171 | 172 | Get messages from a queue. Messages are returned in order of newest to 173 | oldest. 174 | 175 | Example response:: 176 | 177 | { 178 | 'status': 'ok', 179 | 'messages': [ 180 | { 181 | 'message_id': '3a6592301e0911e190b1002500f0fa7c', 182 | 'timestamp': 1323973966282.637, 183 | 'body': 'jlaijwiel2432532jilj', 184 | 'partition': 1 185 | }, 186 | { 187 | 'message_id': '3a8553d71e0911e19262002500f0fa7c', 188 | 'timestamp': 1323973966918.241, 189 | 'body': 'ion12oibasdfjioawneilnf', 190 | 'partition': 2 191 | } 192 | ] 193 | } 194 | 195 | .. http:method:: POST /v1/{application}/{queue_name} 196 | 197 | :arg application: Application name 198 | :arg queue_name: Queue name to access 199 | 200 | A body containing a single message and optional partition 201 | value, or a set of message and partition pairs by number. 202 | 203 | When the partition is not specified, the message will be randomly 204 | assigned to one of the partitions for a queue (or just the first 205 | one if the queue has only one partition). 206 | 207 | A TTL can be specified per message, in seconds till it should expire 208 | and be unavailable. 209 | 210 | **Posting a batch of messages (Using JSON)** 211 | 212 | Include a ``Content-Type`` HTTP header set to ``application/json`` with 213 | a body like the following:: 214 | 215 | {'messages': [ 216 | { 217 | 'body': 'this is message 1', 218 | 'ttl': 3600, 219 | }, 220 | { 221 | 'body': 'this is message 2', 222 | 'partition': 3 223 | } 224 | ]} 225 | 226 | **Post an individual message** 227 | 228 | Any ``Content-Type`` header will be recorded with the message. The body 229 | is assumed to be the entirety of the POST body. The TTL or Partition can 230 | be set by including the appropriate value with either ``X-TTL`` or 231 | ``X-Partition`` HTTP headers in the request. 232 | 233 | Example POST as seen by server including both *optional* HTTP headers:: 234 | 235 | POST /v1/notifications/ea2f39c0de9a4b9db6463123641631de HTTP/1.1 236 | Host: site.running.queuey 237 | User-Agent: AwesomeClient 238 | Content-Length: 36 239 | Content-Type: application/text 240 | X-TTL: 3600 241 | X-Partition: 2 242 | 243 | A really cool message body to store. 244 | 245 | Example success response:: 246 | 247 | { 248 | 'status': 'ok', 249 | 'messages' [ 250 | { 251 | 'key': '3a6592301e0911e190b1002500f0fa7c', 252 | 'timestamp': 1323976306.988889, 253 | 'partition': 1 254 | }, 255 | ] 256 | } 257 | 258 | .. http:method:: GET /v1/{application}/{queue_name}/{messages} 259 | 260 | :arg application: Application name 261 | :arg queue_name: Queue name to access 262 | :arg messages: A single hex message id, or comma separated list of hex 263 | message id's. To indicate partitions for the messages, 264 | prefix the hex message with the partition number and a 265 | colon. 266 | 267 | Get messages by their message ids from a queue. This API acts as a search, 268 | so any message that cannot be found is omitted from the result. 269 | 270 | Example response:: 271 | 272 | { 273 | 'status': 'ok', 274 | 'messages': [ 275 | { 276 | 'message_id': '3a6592301e0911e190b1002500f0fa7c', 277 | 'timestamp': 1323973966282.637, 278 | 'body': 'jlaijwiel2432532jilj', 279 | 'partition': 1 280 | }, 281 | { 282 | 'message_id': '3a8553d71e0911e19262002500f0fa7c', 283 | 'timestamp': 1323973966918.241, 284 | 'body': 'ion12oibasdfjioawneilnf', 285 | 'partition': 2 286 | } 287 | ] 288 | } 289 | 290 | .. http:method:: PUT /v1/{application}/{queue_name}/{messages} 291 | 292 | :arg application: Application name 293 | :arg queue_name: Queue name to access 294 | :arg messages: A single hex message id, or comma separated list of hex 295 | message id's. To indicate partitions for the messages, 296 | prefix the hex message with the partition number and a 297 | colon. 298 | :optparam X-TTL: The message's TTL, defaults to three days. 299 | 300 | Overwrite existing messages with new data or create new messages. The body 301 | is assumed to be the entirety of the PUT body for all messages. There's no 302 | support for doing a batch update with different body or TTL values. 303 | 304 | Example PUT as seen by server:: 305 | 306 | PUT /v1/my_application/somequeuename/2%38cc967e0cf1e45e3b0d4926c90057caf HTTP/1.1 307 | Host: site.running.queuey 308 | User-Agent: AwesomeClient 309 | Content-Length: 9 310 | Content-Type: application/text 311 | X-TTL: 3600 312 | 313 | New text. 314 | 315 | Example success response:: 316 | 317 | {'status': 'ok'} 318 | 319 | 320 | .. http:method:: DELETE /v1/{application}/{queue_name}/{messages} 321 | 322 | :arg application: Application name 323 | :arg queue_name: Queue name to access 324 | :arg messages: A single hex message id, or comma separated list of hex 325 | message id's. To indicate partitions for the messages, 326 | prefix the hex message with the partition number and a 327 | colon. 328 | 329 | Delete a message, or multiple messages from a queue. The message ID must 330 | be prefixed with the partition number and a colon if the queue has multiple 331 | partitions to indicate which one contains the message. 332 | 333 | Example of deleting a message from partition 2:: 334 | 335 | # The %3 is a URL encoded colon 336 | DELETE /v1/my_application/somequeuename/2%38cc967e0cf1e45e3b0d4926c90057caf 337 | 338 | Example success response:: 339 | 340 | {'status': 'ok'} 341 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGES.rst 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # queuey documentation build configuration file, created by 4 | # sphinx-quickstart on Fri Feb 3 14:11:05 2012. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | #sys.path.insert(0, os.path.abspath('.')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 29 | 'sphinx.ext.viewcode', 'sphinx_http_domain'] 30 | 31 | # Add any paths that contain templates here, relative to this directory. 32 | templates_path = ['_templates'] 33 | 34 | # The suffix of source filenames. 35 | source_suffix = '.rst' 36 | 37 | # The encoding of source files. 38 | #source_encoding = 'utf-8-sig' 39 | 40 | # The master toctree document. 41 | master_doc = 'index' 42 | 43 | # General information about the project. 44 | project = u'queuey' 45 | copyright = u'2012, Mozilla Foundation' 46 | 47 | # The version info for the project you're documenting, acts as replacement for 48 | # |version| and |release|, also used in various other places throughout the 49 | # built documents. 50 | # 51 | # The short X.Y version. 52 | version = '0.9' 53 | # The full version, including alpha/beta/rc tags. 54 | release = '0.9' 55 | 56 | # The language for content autogenerated by Sphinx. Refer to documentation 57 | # for a list of supported languages. 58 | #language = None 59 | 60 | # There are two options for replacing |today|: either, you set today to some 61 | # non-false value, then it is used: 62 | #today = '' 63 | # Else, today_fmt is used as the format for a strftime call. 64 | #today_fmt = '%B %d, %Y' 65 | 66 | # List of patterns, relative to source directory, that match files and 67 | # directories to ignore when looking for source files. 68 | exclude_patterns = ['_build'] 69 | 70 | # The reST default role (used for this markup: `text`) to use for all documents. 71 | #default_role = None 72 | 73 | # If true, '()' will be appended to :func: etc. cross-reference text. 74 | #add_function_parentheses = True 75 | 76 | # If true, the current module name will be prepended to all description 77 | # unit titles (such as .. function::). 78 | #add_module_names = True 79 | 80 | # If true, sectionauthor and moduleauthor directives will be shown in the 81 | # output. They are ignored by default. 82 | #show_authors = False 83 | 84 | # The name of the Pygments (syntax highlighting) style to use. 85 | pygments_style = 'sphinx' 86 | 87 | # A list of ignored prefixes for module index sorting. 88 | #modindex_common_prefix = [] 89 | 90 | 91 | # -- Options for HTML output --------------------------------------------------- 92 | 93 | # The theme to use for HTML and HTML Help pages. See the documentation for 94 | # a list of builtin themes. 95 | html_theme = 'default' 96 | 97 | # Theme options are theme-specific and customize the look and feel of a theme 98 | # further. For a list of options available for each theme, see the 99 | # documentation. 100 | #html_theme_options = {} 101 | 102 | # Add any paths that contain custom themes here, relative to this directory. 103 | #html_theme_path = [] 104 | 105 | # The name for this set of Sphinx documents. If None, it defaults to 106 | # " v documentation". 107 | #html_title = None 108 | 109 | # A shorter title for the navigation bar. Default is the same as html_title. 110 | #html_short_title = None 111 | 112 | # The name of an image file (relative to this directory) to place at the top 113 | # of the sidebar. 114 | #html_logo = None 115 | 116 | # The name of an image file (within the static path) to use as favicon of the 117 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 118 | # pixels large. 119 | #html_favicon = None 120 | 121 | # Add any paths that contain custom static files (such as style sheets) here, 122 | # relative to this directory. They are copied after the builtin static files, 123 | # so a file named "default.css" will overwrite the builtin "default.css". 124 | html_static_path = ['_static'] 125 | 126 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 127 | # using the given strftime format. 128 | #html_last_updated_fmt = '%b %d, %Y' 129 | 130 | # If true, SmartyPants will be used to convert quotes and dashes to 131 | # typographically correct entities. 132 | #html_use_smartypants = True 133 | 134 | # Custom sidebar templates, maps document names to template names. 135 | #html_sidebars = {} 136 | 137 | # Additional templates that should be rendered to pages, maps page names to 138 | # template names. 139 | #html_additional_pages = {} 140 | 141 | # If false, no module index is generated. 142 | #html_domain_indices = True 143 | 144 | # If false, no index is generated. 145 | #html_use_index = True 146 | 147 | # If true, the index is split into individual pages for each letter. 148 | #html_split_index = False 149 | 150 | # If true, links to the reST sources are added to the pages. 151 | #html_show_sourcelink = True 152 | 153 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 154 | #html_show_sphinx = True 155 | 156 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 157 | #html_show_copyright = True 158 | 159 | # If true, an OpenSearch description file will be output, and all pages will 160 | # contain a tag referring to it. The value of this option must be the 161 | # base URL from which the finished HTML is served. 162 | #html_use_opensearch = '' 163 | 164 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 165 | #html_file_suffix = None 166 | 167 | # Output file base name for HTML help builder. 168 | htmlhelp_basename = 'queueydoc' 169 | 170 | 171 | # -- Options for LaTeX output -------------------------------------------------- 172 | 173 | latex_elements = { 174 | # The paper size ('letterpaper' or 'a4paper'). 175 | #'papersize': 'letterpaper', 176 | 177 | # The font size ('10pt', '11pt' or '12pt'). 178 | #'pointsize': '10pt', 179 | 180 | # Additional stuff for the LaTeX preamble. 181 | #'preamble': '', 182 | } 183 | 184 | # Grouping the document tree into LaTeX files. List of tuples 185 | # (source start file, target name, title, author, documentclass [howto/manual]). 186 | latex_documents = [ 187 | ('index', 'queuey.tex', u'queuey Documentation', 188 | u'Mozilla Foundation', 'manual'), 189 | ] 190 | 191 | # The name of an image file (relative to this directory) to place at the top of 192 | # the title page. 193 | #latex_logo = None 194 | 195 | # For "manual" documents, if this is true, then toplevel headings are parts, 196 | # not chapters. 197 | #latex_use_parts = False 198 | 199 | # If true, show page references after internal links. 200 | #latex_show_pagerefs = False 201 | 202 | # If true, show URL addresses after external links. 203 | #latex_show_urls = False 204 | 205 | # Documents to append as an appendix to all manuals. 206 | #latex_appendices = [] 207 | 208 | # If false, no module index is generated. 209 | #latex_domain_indices = True 210 | 211 | 212 | # -- Options for manual page output -------------------------------------------- 213 | 214 | # One entry per manual page. List of tuples 215 | # (source start file, name, description, authors, manual section). 216 | man_pages = [ 217 | ('index', 'queuey', u'queuey Documentation', 218 | [u'Mozilla Foundation'], 1) 219 | ] 220 | 221 | # If true, show URL addresses after external links. 222 | #man_show_urls = False 223 | 224 | 225 | # -- Options for Texinfo output ------------------------------------------------ 226 | 227 | # Grouping the document tree into Texinfo files. List of tuples 228 | # (source start file, target name, title, author, 229 | # dir menu entry, description, category) 230 | texinfo_documents = [ 231 | ('index', 'queuey', u'queuey Documentation', 232 | u'Mozilla Foundation', 'queuey', 'One line description of project.', 233 | 'Miscellaneous'), 234 | ] 235 | 236 | # Documents to append as an appendix to all manuals. 237 | #texinfo_appendices = [] 238 | 239 | # If false, no module index is generated. 240 | #texinfo_domain_indices = True 241 | 242 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 243 | #texinfo_show_urls = 'footnote' 244 | 245 | 246 | # Example configuration for intersphinx: refer to the Python standard library. 247 | intersphinx_mapping = {'http://docs.python.org/': None} 248 | -------------------------------------------------------------------------------- /docs/configuration.rst: -------------------------------------------------------------------------------- 1 | .. _configuration: 2 | 3 | ============= 4 | Configuration 5 | ============= 6 | 7 | Cassandra 8 | ========= 9 | 10 | Cassandra is configured via `cassandra.yaml` and `log4j-server.properties` 11 | files. Queuey doesn't have any specific configuration requirements for 12 | Cassandra, though availability and durability guarantees depend on 13 | appropriate Cassandra settings. 14 | 15 | Please refer to the `Datastax community edition documentation `_ 16 | for further details. 17 | 18 | Pyramid 19 | ======= 20 | 21 | Queuey is implemented on top of the `Pyramid web framework `_. 22 | Documentation for configuring WSGI servers and general deployment techniques 23 | therefor also apply to Queuey. The 24 | `Pyramid cookbook `_ 25 | contains some advice on a variety of web servers. 26 | 27 | The simplest example of a Pyramid pipeline contains of the following:: 28 | 29 | [app:pyramidapp] 30 | use = egg:queuey 31 | 32 | [filter:catcherror] 33 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 34 | 35 | [pipeline:main] 36 | pipeline = catcherror 37 | pyramidapp 38 | 39 | Queuey 40 | ====== 41 | 42 | Queuey is configured via an ini-style file, which is also used to configure 43 | general Pyramid settings. This ini file contains a number of sections. The 44 | following sections contain Queuey specific settings. 45 | 46 | [application_keys] 47 | ------------------ 48 | 49 | Contains a mapping of application name to application key. The application 50 | key acts as a shared secret between server and client. For example:: 51 | 52 | [application_keys] 53 | app_1 = f25bfb8fe200475c8a0532a9cbe7651e 54 | 55 | [storage] 56 | --------- 57 | 58 | Configures the storage for message data. 59 | 60 | backend 61 | The type of storage, for Cassandra use: 62 | `queuey.storage.cassandra.CassandraQueueBackend` 63 | 64 | Further settings are dependent on the storage. 65 | 66 | [metadata] 67 | ---------- 68 | 69 | Configures the storage for message metadata. 70 | 71 | backend 72 | The type of storage, for Cassandra use: 73 | `queuey.storage.cassandra.CassandraMetadata` 74 | 75 | Further settings are dependent on the storage. 76 | 77 | Cassandra storage options 78 | ------------------------- 79 | 80 | The Cassandra storages support the following additional settings: 81 | 82 | host 83 | A comma separated list of either `host` or `host:port` values specifying 84 | the Cassandra servers. Defaults to `localhost:9160`. 85 | 86 | username 87 | A username used for connecting to Cassandra's Thrift interface. 88 | 89 | password 90 | A password used for connecting to Cassandra's Thrift interface. 91 | 92 | multi_dc 93 | A boolean indicating whether or not Cassandra runs in a multi-datacenter 94 | environment, defaults to `False`. If enabled, read and write operations 95 | default to `LOCAL_QUORUM` instead of `QUORUM`. 96 | 97 | create_schema 98 | A boolean value indicating if the required Cassandra schema should be 99 | automatically created during startup. Defaults to `True`. If enabled the 100 | first server in the host list is used to create the schema. 101 | 102 | database 103 | The name of the keyspace, defaults to `MessageStore` for the storage and 104 | `MetadataStore` for the metadata section. 105 | 106 | [metlog] 107 | -------- 108 | 109 | Queuey uses metlog for logging metrics. For detailed information see the 110 | `metlog docs `_. 111 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | :start-line: 0 3 | :end-line: 52 4 | 5 | Reference Material 6 | ================== 7 | 8 | Reference material includes documentation for the `queuey` HTTP API. 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | installation 14 | configuration 15 | using 16 | api 17 | Changelog 18 | 19 | Source Code 20 | =========== 21 | 22 | All source code is available on `github under queuey `_. 23 | 24 | Indices and tables 25 | ================== 26 | 27 | * :ref:`genindex` 28 | * :ref:`modindex` 29 | * :ref:`glossary` 30 | 31 | License 32 | ======= 33 | 34 | ``queuey`` is offered under the MPL 2.0 license. 35 | 36 | Authors 37 | ======= 38 | 39 | ``queuey`` is made available by the `Mozilla Foundation`. 40 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation: 2 | 3 | ============ 4 | Installation 5 | ============ 6 | 7 | Queuey is composed of two main parts, the Queuey Python web application and 8 | the back-end storage layer used by the web application which defaults to 9 | Cassandra_. 10 | 11 | Cassandra_ can be setup either as a single node, or preferably as a cluster 12 | for higher throughput, availability, and durability. The Queuey web application 13 | should ideally be installed on separate web nodes to avoid sharing resources 14 | with Cassandra_ but in smaller setups should be fine on the same node. 15 | 16 | Installing the Queuey web application 17 | ===================================== 18 | 19 | Using the source 20 | ---------------- 21 | 22 | Pre-requisites: 23 | 24 | - Python >=2.6 25 | - Make 26 | 27 | Checkout the source and build using git from Github: 28 | 29 | .. code-block:: bash 30 | 31 | git clone git://github.com/mozilla-services/queuey.git 32 | make 33 | 34 | This will setup a new virtualenv in this directory with all the other tools 35 | installed that Queuey needs to run. 36 | 37 | Installing Cassandra 38 | ==================== 39 | 40 | It's recommended that Cassandra be installed using the Datastax_ 41 | `Cassandra Community Edition`_ as it goes through more testing then the latest 42 | open-source version and provides a smooth upgrade path for the Enterprise 43 | Edition should one wish to upgrade later. It also comes with support for the 44 | Datastax_ Opscenter_ to help manage the Cassandra_ cluster. 45 | 46 | There is complete documentation on the Datastax_ site that explains the 47 | installation in more detail, a quick-start is provided here based on those 48 | docs. 49 | 50 | Before continuing to install Cassandra_, you should make sure the machine 51 | you're installing to has the necessary pre-requisites:: 52 | 53 | Sun Java Runtime Environment 1.6.0_19 or later 54 | 55 | You can check to see what version of Java is available by running: 56 | 57 | .. code-block:: bash 58 | 59 | java -version 60 | 61 | Which should print something like: 62 | 63 | .. code-block:: bash 64 | 65 | Java(TM) SE Runtime Environment (build 1.6.0_29-b11-402-11D50b) 66 | 67 | If you're using an OpenJDK Java version, see the Datastax_ site for 68 | `Installing Sun JRE on Redhat Systems `_ or 69 | `Installing Sun JRE on Ubuntu systems `_. 70 | 71 | These directions include installing the opscenter agent, which reports cluster 72 | and node information for the opscenter dashboard. 73 | 74 | Using the source 75 | ---------------- 76 | 77 | If you installed Queuey using ``make`` above and Cassandra_ is being installed 78 | on the Queuey node, the Makefile includes Cassandra_ setup: 79 | 80 | .. code-block:: bash 81 | 82 | make cassandra 83 | 84 | If setting up a cluster, or not installing Cassandra_ on the same node as the 85 | Queuey webapp the following directions should be used. Also note that this 86 | step does not install opscenter, which also requires the following. 87 | 88 | Download the source tarball to the desired directory (first check for newer 89 | versions): 90 | 91 | .. code-block:: bash 92 | 93 | cd $HOME 94 | mkdir datastax 95 | cd datastax 96 | 97 | wget http://downloads.datastax.com/community/dsc-cassandra-1.1.2-bin.tar.gz 98 | 99 | # On your first node *only*, get opscenter 100 | wget http://downloads.datastax.com/community/opscenter-2.1.2-free.tar.gz 101 | 102 | # Untar the distributions 103 | tar -xzvf dsc-cassandra-1.1.2-bin.tar.gz 104 | tar -xzvf opscenter-2.1.2-free.tar.gz 105 | 106 | # Remove the tarballs 107 | rm *.tar.gz 108 | 109 | # Create a data/logging directory 110 | mkdir $HOME/datastax/cassandra-data 111 | 112 | The opscenter package only needs to be installed on a single node, as the 113 | opscenter agent for the other nodes will be configured and tar'd up after 114 | the setup is run on the main node. This is because the agent.tar.gz that will 115 | be created contains SSL authentication information to protect the agents 116 | communication. 117 | 118 | For more efficient performance, its recommended that JNA be installed to 119 | improve memory performance. 120 | 121 | 1. Download jna.jar from the `JNA project site `_. 122 | 2. Add jna.jar to $CASSANDRA_HOME/lib/ or otherwise place it on the CLASSPATH. 123 | 3. Edit the file /etc/security/limits.conf, adding the following entries for 124 | the user or group that runs Cassandra:: 125 | 126 | $USER soft memlock unlimited 127 | $USER hard memlock unlimited 128 | 129 | Via RPM's 130 | --------- 131 | 132 | See the `Datastax RPM installation instructions `_. 133 | 134 | 135 | .. _Cassandra: http://cassandra.apache.org/ 136 | .. _Cassandra Community Edition: http://www.datastax.com/products/community 137 | .. _Opscenter: http://www.datastax.com/products/opscenter 138 | .. _Datastax: http://www.datastax.com/ 139 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. linkcheck to check all external links for integrity 37 | echo. doctest to run all doctests embedded in the documentation if enabled 38 | goto end 39 | ) 40 | 41 | if "%1" == "clean" ( 42 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 43 | del /q /s %BUILDDIR%\* 44 | goto end 45 | ) 46 | 47 | if "%1" == "html" ( 48 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 49 | if errorlevel 1 exit /b 1 50 | echo. 51 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 52 | goto end 53 | ) 54 | 55 | if "%1" == "dirhtml" ( 56 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 57 | if errorlevel 1 exit /b 1 58 | echo. 59 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 60 | goto end 61 | ) 62 | 63 | if "%1" == "singlehtml" ( 64 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 68 | goto end 69 | ) 70 | 71 | if "%1" == "pickle" ( 72 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished; now you can process the pickle files. 76 | goto end 77 | ) 78 | 79 | if "%1" == "json" ( 80 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished; now you can process the JSON files. 84 | goto end 85 | ) 86 | 87 | if "%1" == "htmlhelp" ( 88 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can run HTML Help Workshop with the ^ 92 | .hhp project file in %BUILDDIR%/htmlhelp. 93 | goto end 94 | ) 95 | 96 | if "%1" == "qthelp" ( 97 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 98 | if errorlevel 1 exit /b 1 99 | echo. 100 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 101 | .qhcp project file in %BUILDDIR%/qthelp, like this: 102 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\queuey.qhcp 103 | echo.To view the help file: 104 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\queuey.ghc 105 | goto end 106 | ) 107 | 108 | if "%1" == "devhelp" ( 109 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 110 | if errorlevel 1 exit /b 1 111 | echo. 112 | echo.Build finished. 113 | goto end 114 | ) 115 | 116 | if "%1" == "epub" ( 117 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 118 | if errorlevel 1 exit /b 1 119 | echo. 120 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 121 | goto end 122 | ) 123 | 124 | if "%1" == "latex" ( 125 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 129 | goto end 130 | ) 131 | 132 | if "%1" == "text" ( 133 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The text files are in %BUILDDIR%/text. 137 | goto end 138 | ) 139 | 140 | if "%1" == "man" ( 141 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 145 | goto end 146 | ) 147 | 148 | if "%1" == "texinfo" ( 149 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 150 | if errorlevel 1 exit /b 1 151 | echo. 152 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 153 | goto end 154 | ) 155 | 156 | if "%1" == "gettext" ( 157 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 158 | if errorlevel 1 exit /b 1 159 | echo. 160 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 161 | goto end 162 | ) 163 | 164 | if "%1" == "changes" ( 165 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 166 | if errorlevel 1 exit /b 1 167 | echo. 168 | echo.The overview file is in %BUILDDIR%/changes. 169 | goto end 170 | ) 171 | 172 | if "%1" == "linkcheck" ( 173 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 174 | if errorlevel 1 exit /b 1 175 | echo. 176 | echo.Link check complete; look for any errors in the above output ^ 177 | or in %BUILDDIR%/linkcheck/output.txt. 178 | goto end 179 | ) 180 | 181 | if "%1" == "doctest" ( 182 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 183 | if errorlevel 1 exit /b 1 184 | echo. 185 | echo.Testing of doctests in the sources finished, look at the ^ 186 | results in %BUILDDIR%/doctest/output.txt. 187 | goto end 188 | ) 189 | 190 | :end 191 | -------------------------------------------------------------------------------- /etc/cassandra/cassandra-cluster-template.yaml: -------------------------------------------------------------------------------- 1 | ## SET THESE AS APPROPRIATE 2 | cluster_name: 'NAME OF CLUSTER' 3 | initial_token: 0 4 | listen_address: IP.ADDRESS.HERE 5 | rpc_address: IP.ADDRESS.HERE 6 | 7 | seed_provider: 8 | - class_name: org.apache.cassandra.locator.SimpleSeedProvider 9 | parameters: 10 | - seeds: "IP.ADDRESSES.HERE,LIKE.SO." 11 | 12 | # Cassandra directories 13 | data_file_directories: 14 | - cassandra/data 15 | commitlog_directory: cassandra/commitlog 16 | saved_caches_directory: cassandra/saved_caches 17 | 18 | # For workloads with more data than can fit in memory, Cassandra's 19 | # bottleneck will be reads that need to fetch data from 20 | # disk. "concurrent_reads" should be set to (16 * number_of_drives) in 21 | # order to allow the operations to enqueue low enough in the stack 22 | # that the OS and drives can reorder them. 23 | # 24 | # On the other hand, since writes are almost never IO bound, the ideal 25 | # number of "concurrent_writes" is dependent on the number of cores in 26 | # your system; (8 * number_of_cores) is a good rule of thumb. 27 | concurrent_reads: 32 28 | concurrent_writes: 32 29 | 30 | 31 | ## TUNE IF NEEDED 32 | # Default cluster config 33 | hinted_handoff_enabled: false 34 | authenticator: org.apache.cassandra.auth.AllowAllAuthenticator 35 | authority: org.apache.cassandra.auth.AllowAllAuthority 36 | partitioner: org.apache.cassandra.dht.RandomPartitioner 37 | commitlog_sync: periodic 38 | commitlog_sync_period_in_ms: 10000 39 | flush_largest_memtables_at: 0.75 40 | reduce_cache_sizes_at: 0.85 41 | reduce_cache_capacity_to: 0.6 42 | memtable_flush_queue_size: 4 43 | storage_port: 7000 44 | 45 | rpc_port: 9160 46 | rpc_keepalive: true 47 | rpc_server_type: hsha 48 | rpc_timeout_in_ms: 10000 49 | 50 | thrift_framed_transport_size_in_mb: 15 51 | thrift_max_message_length_in_mb: 16 52 | 53 | incremental_backups: false 54 | snapshot_before_compaction: false 55 | column_index_size_in_kb: 64 56 | in_memory_compaction_limit_in_mb: 64 57 | 58 | multithreaded_compaction: false 59 | compaction_throughput_mb_per_sec: 16 60 | compaction_preheat_key_cache: true 61 | 62 | endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch 63 | dynamic_snitch_update_interval_in_ms: 100 64 | dynamic_snitch_reset_interval_in_ms: 600000 65 | dynamic_snitch_badness_threshold: 0.1 66 | 67 | # request_scheduler -- Set this to a class that implements 68 | # RequestScheduler, which will schedule incoming client requests 69 | # according to the specific policy. This is useful for multi-tenancy 70 | # with a single Cassandra cluster. 71 | # NOTE: This is specifically for requests from the client and does 72 | # not affect inter node communication. 73 | # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place 74 | # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of 75 | # client requests to a node with a separate queue for each 76 | # request_scheduler_id. The scheduler is further customized by 77 | # request_scheduler_options as described below. 78 | request_scheduler: org.apache.cassandra.scheduler.NoScheduler 79 | 80 | # Scheduler Options vary based on the type of scheduler 81 | # NoScheduler - Has no options 82 | # RoundRobin 83 | # - throttle_limit -- The throttle_limit is the number of in-flight 84 | # requests per client. Requests beyond 85 | # that limit are queued up until 86 | # running requests can complete. 87 | # The value of 80 here is twice the number of 88 | # concurrent_reads + concurrent_writes. 89 | # - default_weight -- default_weight is optional and allows for 90 | # overriding the default which is 1. 91 | # - weights -- Weights are optional and will default to 1 or the 92 | # overridden default_weight. The weight translates into how 93 | # many requests are handled during each turn of the 94 | # RoundRobin, based on the scheduler id. 95 | # 96 | # request_scheduler_options: 97 | # throttle_limit: 80 98 | # default_weight: 5 99 | # weights: 100 | # Keyspace1: 1 101 | # Keyspace2: 5 102 | 103 | # request_scheduler_id -- An identifer based on which to perform 104 | # the request scheduling. Currently the only valid option is keyspace. 105 | # request_scheduler_id: keyspace 106 | 107 | index_interval: 128 108 | 109 | encryption_options: 110 | internode_encryption: none 111 | keystore: conf/.keystore 112 | keystore_password: cassandra 113 | truststore: conf/.truststore 114 | truststore_password: cassandra 115 | -------------------------------------------------------------------------------- /etc/cassandra/cassandra.yaml: -------------------------------------------------------------------------------- 1 | # Cassandra storage config YAML 2 | 3 | # NOTE: 4 | # See http://wiki.apache.org/cassandra/StorageConfiguration for 5 | # full explanations of configuration directives 6 | # /NOTE 7 | 8 | # The name of the cluster. This is mainly used to prevent machines in 9 | # one logical cluster from joining another. 10 | cluster_name: 'Test Cluster' 11 | 12 | # You should always specify InitialToken when setting up a production 13 | # cluster for the first time, and often when adding capacity later. 14 | # The principle is that each node should be given an equal slice of 15 | # the token ring; see http://wiki.apache.org/cassandra/Operations 16 | # for more details. 17 | # 18 | # If blank, Cassandra will request a token bisecting the range of 19 | # the heaviest-loaded existing node. If there is no load information 20 | # available, such as is the case with a new cluster, it will pick 21 | # a random token, which will lead to hot spots. 22 | initial_token: 23 | 24 | # See http://wiki.apache.org/cassandra/HintedHandoff 25 | hinted_handoff_enabled: true 26 | # this defines the maximum amount of time a dead host will have hints 27 | # generated. After it has been dead this long, hints will be dropped. 28 | max_hint_window_in_ms: 3600000 # one hour 29 | # Sleep this long after delivering each hint 30 | hinted_handoff_throttle_delay_in_ms: 1 31 | 32 | # The following setting populates the page cache on memtable flush and compaction 33 | # WARNING: Enable this setting only when the whole node's data fits in memory. 34 | # Defaults to: false 35 | # populate_io_cache_on_flush: false 36 | 37 | # authentication backend, implementing IAuthenticator; used to identify users 38 | authenticator: org.apache.cassandra.auth.AllowAllAuthenticator 39 | 40 | # authorization backend, implementing IAuthority; used to limit access/provide permissions 41 | authority: org.apache.cassandra.auth.AllowAllAuthority 42 | 43 | # The partitioner is responsible for distributing rows (by key) across 44 | # nodes in the cluster. Any IPartitioner may be used, including your 45 | # own as long as it is on the classpath. Out of the box, Cassandra 46 | # provides org.apache.cassandra.dht.RandomPartitioner 47 | # org.apache.cassandra.dht.ByteOrderedPartitioner, 48 | # org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated), 49 | # and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner 50 | # (deprecated). 51 | # 52 | # - RandomPartitioner distributes rows across the cluster evenly by md5. 53 | # When in doubt, this is the best option. 54 | # - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows 55 | # scanning rows in key order, but the ordering can generate hot spots 56 | # for sequential insertion workloads. 57 | # - OrderPreservingPartitioner is an obsolete form of BOP, that stores 58 | # - keys in a less-efficient format and only works with keys that are 59 | # UTF8-encoded Strings. 60 | # - CollatingOPP colates according to EN,US rules rather than lexical byte 61 | # ordering. Use this as an example if you need custom collation. 62 | # 63 | # See http://wiki.apache.org/cassandra/Operations for more on 64 | # partitioners and token selection. 65 | partitioner: org.apache.cassandra.dht.RandomPartitioner 66 | 67 | # directories where Cassandra should store data on disk. 68 | data_file_directories: 69 | - cassandra/data 70 | 71 | # commit log 72 | commitlog_directory: cassandra/commitlog 73 | 74 | # Maximum size of the key cache in memory. 75 | # 76 | # Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the 77 | # minimum, sometimes more. The key cache is fairly tiny for the amount of 78 | # time it saves, so it's worthwhile to use it at large numbers. 79 | # The row cache saves even more time, but must store the whole values of 80 | # its rows, so it is extremely space-intensive. It's best to only use the 81 | # row cache if you have hot rows or static rows. 82 | # 83 | # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. 84 | # 85 | # Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. 86 | key_cache_size_in_mb: 87 | 88 | # Duration in seconds after which Cassandra should 89 | # safe the keys cache. Caches are saved to saved_caches_directory as 90 | # specified in this configuration file. 91 | # 92 | # Saved caches greatly improve cold-start speeds, and is relatively cheap in 93 | # terms of I/O for the key cache. Row cache saving is much more expensive and 94 | # has limited use. 95 | # 96 | # Default is 14400 or 4 hours. 97 | key_cache_save_period: 14400 98 | 99 | # Number of keys from the key cache to save 100 | # Disabled by default, meaning all keys are going to be saved 101 | # key_cache_keys_to_save: 100 102 | 103 | # Maximum size of the row cache in memory. 104 | # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. 105 | # 106 | # Default value is 0, to disable row caching. 107 | row_cache_size_in_mb: 32 108 | 109 | # Duration in seconds after which Cassandra should 110 | # safe the row cache. Caches are saved to saved_caches_directory as specified 111 | # in this configuration file. 112 | # 113 | # Saved caches greatly improve cold-start speeds, and is relatively cheap in 114 | # terms of I/O for the key cache. Row cache saving is much more expensive and 115 | # has limited use. 116 | # 117 | # Default is 0 to disable saving the row cache. 118 | row_cache_save_period: 0 119 | 120 | # Number of keys from the row cache to save 121 | # Disabled by default, meaning all keys are going to be saved 122 | # row_cache_keys_to_save: 100 123 | 124 | # The provider for the row cache to use. 125 | # 126 | # Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider 127 | # 128 | # SerializingCacheProvider serialises the contents of the row and stores 129 | # it in native memory, i.e., off the JVM Heap. Serialized rows take 130 | # significantly less memory than "live" rows in the JVM, so you can cache 131 | # more rows in a given memory footprint. And storing the cache off-heap 132 | # means you can use smaller heap sizes, reducing the impact of GC pauses. 133 | # 134 | # It is also valid to specify the fully-qualified class name to a class 135 | # that implements org.apache.cassandra.cache.IRowCacheProvider. 136 | # 137 | # Defaults to SerializingCacheProvider 138 | row_cache_provider: SerializingCacheProvider 139 | 140 | # saved caches 141 | saved_caches_directory: cassandra/saved_caches 142 | 143 | # commitlog_sync may be either "periodic" or "batch." 144 | # When in batch mode, Cassandra won't ack writes until the commit log 145 | # has been fsynced to disk. It will wait up to 146 | # commitlog_sync_batch_window_in_ms milliseconds for other writes, before 147 | # performing the sync. 148 | # 149 | # commitlog_sync: batch 150 | # commitlog_sync_batch_window_in_ms: 50 151 | # 152 | # the other option is "periodic" where writes may be acked immediately 153 | # and the CommitLog is simply synced every commitlog_sync_period_in_ms 154 | # milliseconds. 155 | commitlog_sync: periodic 156 | commitlog_sync_period_in_ms: 10000 157 | 158 | # Configure the Size of the individual Commitlog file. The 159 | # default is 128 MB, which is almost always fine, but if you are 160 | # archiving commitlog segments (see commitlog_archiving.properties), 161 | # then you probably want a finer granularity of archiving; 16 MB 162 | # is reasonable. 163 | # 164 | # commitlog_segment_size_in_mb: 128 165 | 166 | # any class that implements the SeedProvider interface and has a 167 | # constructor that takes a Map of parameters will do. 168 | seed_provider: 169 | # Addresses of hosts that are deemed contact points. 170 | # Cassandra nodes use this list of hosts to find each other and learn 171 | # the topology of the ring. You must change this if you are running 172 | # multiple nodes! 173 | - class_name: org.apache.cassandra.locator.SimpleSeedProvider 174 | parameters: 175 | # seeds is actually a comma-delimited list of addresses. 176 | # Ex: ",," 177 | - seeds: "127.0.0.1" 178 | 179 | # emergency pressure valve: each time heap usage after a full (CMS) 180 | # garbage collection is above this fraction of the max, Cassandra will 181 | # flush the largest memtables. 182 | # 183 | # Set to 1.0 to disable. Setting this lower than 184 | # CMSInitiatingOccupancyFraction is not likely to be useful. 185 | # 186 | # RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: 187 | # it is most effective under light to moderate load, or read-heavy 188 | # workloads; under truly massive write load, it will often be too 189 | # little, too late. 190 | flush_largest_memtables_at: 0.75 191 | 192 | # emergency pressure valve #2: the first time heap usage after a full 193 | # (CMS) garbage collection is above this fraction of the max, 194 | # Cassandra will reduce cache maximum _capacity_ to the given fraction 195 | # of the current _size_. Should usually be set substantially above 196 | # flush_largest_memtables_at, since that will have less long-term 197 | # impact on the system. 198 | # 199 | # Set to 1.0 to disable. Setting this lower than 200 | # CMSInitiatingOccupancyFraction is not likely to be useful. 201 | reduce_cache_sizes_at: 0.85 202 | reduce_cache_capacity_to: 0.6 203 | 204 | # For workloads with more data than can fit in memory, Cassandra's 205 | # bottleneck will be reads that need to fetch data from 206 | # disk. "concurrent_reads" should be set to (16 * number_of_drives) in 207 | # order to allow the operations to enqueue low enough in the stack 208 | # that the OS and drives can reorder them. 209 | # 210 | # On the other hand, since writes are almost never IO bound, the ideal 211 | # number of "concurrent_writes" is dependent on the number of cores in 212 | # your system; (8 * number_of_cores) is a good rule of thumb. 213 | concurrent_reads: 32 214 | concurrent_writes: 32 215 | 216 | # Total memory to use for memtables. Cassandra will flush the largest 217 | # memtable when this much memory is used. 218 | # If omitted, Cassandra will set it to 1/3 of the heap. 219 | # memtable_total_space_in_mb: 2048 220 | 221 | # Total space to use for commitlogs. 222 | # If space gets above this value (it will round up to the next nearest 223 | # segment multiple), Cassandra will flush every dirty CF in the oldest 224 | # segment and remove it. 225 | # commitlog_total_space_in_mb: 4096 226 | 227 | # This sets the amount of memtable flush writer threads. These will 228 | # be blocked by disk io, and each one will hold a memtable in memory 229 | # while blocked. If you have a large heap and many data directories, 230 | # you can increase this value for better flush performance. 231 | # By default this will be set to the amount of data directories defined. 232 | #memtable_flush_writers: 1 233 | 234 | # the number of full memtables to allow pending flush, that is, 235 | # waiting for a writer thread. At a minimum, this should be set to 236 | # the maximum number of secondary indexes created on a single CF. 237 | memtable_flush_queue_size: 4 238 | 239 | # Whether to, when doing sequential writing, fsync() at intervals in 240 | # order to force the operating system to flush the dirty 241 | # buffers. Enable this to avoid sudden dirty buffer flushing from 242 | # impacting read latencies. Almost always a good idea on SSD:s; not 243 | # necessarily on platters. 244 | trickle_fsync: false 245 | trickle_fsync_interval_in_kb: 10240 246 | 247 | # TCP port, for commands and data 248 | storage_port: 7000 249 | 250 | # SSL port, for encrypted communication. Unused unless enabled in 251 | # encryption_options 252 | ssl_storage_port: 7001 253 | 254 | # Address to bind to and tell other Cassandra nodes to connect to. You 255 | # _must_ change this if you want multiple nodes to be able to 256 | # communicate! 257 | # 258 | # Leaving it blank leaves it up to InetAddress.getLocalHost(). This 259 | # will always do the Right Thing *if* the node is properly configured 260 | # (hostname, name resolution, etc), and the Right Thing is to use the 261 | # address associated with the hostname (it might not be). 262 | # 263 | # Setting this to 0.0.0.0 is always wrong. 264 | listen_address: localhost 265 | 266 | # Address to broadcast to other Cassandra nodes 267 | # Leaving this blank will set it to the same value as listen_address 268 | # broadcast_address: 1.2.3.4 269 | 270 | # The address to bind the Thrift RPC service to -- clients connect 271 | # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if 272 | # you want Thrift to listen on all interfaces. 273 | # 274 | # Leaving this blank has the same effect it does for ListenAddress, 275 | # (i.e. it will be based on the configured hostname of the node). 276 | rpc_address: localhost 277 | # port for Thrift to listen for clients on 278 | rpc_port: 9160 279 | 280 | # enable or disable keepalive on rpc connections 281 | rpc_keepalive: true 282 | 283 | # Cassandra provides three options for the RPC Server: 284 | # 285 | # sync -> One connection per thread in the rpc pool (see below). 286 | # For a very large number of clients, memory will be your limiting 287 | # factor; on a 64 bit JVM, 128KB is the minimum stack size per thread. 288 | # Connection pooling is very, very strongly recommended. 289 | # 290 | # async -> Nonblocking server implementation with one thread to serve 291 | # rpc connections. This is not recommended for high throughput use 292 | # cases. Async has been tested to be about 50% slower than sync 293 | # or hsha and is deprecated: it will be removed in the next major release. 294 | # 295 | # hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool 296 | # (see below) is used to manage requests, but the threads are multiplexed 297 | # across the different clients. 298 | # 299 | # The default is sync because on Windows hsha is about 30% slower. On Linux, 300 | # sync/hsha performance is about the same, with hsha of course using less memory. 301 | rpc_server_type: sync 302 | 303 | # Uncomment rpc_min|max|thread to set request pool size. 304 | # You would primarily set max for the sync server to safeguard against 305 | # misbehaved clients; if you do hit the max, Cassandra will block until one 306 | # disconnects before accepting more. The defaults for sync are min of 16 and max 307 | # unlimited. 308 | # 309 | # For the Hsha server, the min and max both default to quadruple the number of 310 | # CPU cores. 311 | # 312 | # This configuration is ignored by the async server. 313 | # 314 | # rpc_min_threads: 16 315 | # rpc_max_threads: 2048 316 | 317 | # uncomment to set socket buffer sizes on rpc connections 318 | # rpc_send_buff_size_in_bytes: 319 | # rpc_recv_buff_size_in_bytes: 320 | 321 | # Frame size for thrift (maximum field length). 322 | # 0 disables TFramedTransport in favor of TSocket. This option 323 | # is deprecated; we strongly recommend using Framed mode. 324 | thrift_framed_transport_size_in_mb: 15 325 | 326 | # The max length of a thrift message, including all fields and 327 | # internal thrift overhead. 328 | thrift_max_message_length_in_mb: 16 329 | 330 | # Set to true to have Cassandra create a hard link to each sstable 331 | # flushed or streamed locally in a backups/ subdirectory of the 332 | # Keyspace data. Removing these links is the operator's 333 | # responsibility. 334 | incremental_backups: false 335 | 336 | # Whether or not to take a snapshot before each compaction. Be 337 | # careful using this option, since Cassandra won't clean up the 338 | # snapshots for you. Mostly useful if you're paranoid when there 339 | # is a data format change. 340 | snapshot_before_compaction: false 341 | 342 | # Whether or not a snapshot is taken of the data before keyspace truncation 343 | # or dropping of column families. The STRONGLY advised default of true 344 | # should be used to provide data safety. If you set this flag to false, you will 345 | # lose data on truncation or drop. 346 | auto_snapshot: true 347 | 348 | # Add column indexes to a row after its contents reach this size. 349 | # Increase if your column values are large, or if you have a very large 350 | # number of columns. The competing causes are, Cassandra has to 351 | # deserialize this much of the row to read a single column, so you want 352 | # it to be small - at least if you do many partial-row reads - but all 353 | # the index data is read for each access, so you don't want to generate 354 | # that wastefully either. 355 | column_index_size_in_kb: 64 356 | 357 | # Size limit for rows being compacted in memory. Larger rows will spill 358 | # over to disk and use a slower two-pass compaction process. A message 359 | # will be logged specifying the row key. 360 | in_memory_compaction_limit_in_mb: 64 361 | 362 | # Number of simultaneous compactions to allow, NOT including 363 | # validation "compactions" for anti-entropy repair. Simultaneous 364 | # compactions can help preserve read performance in a mixed read/write 365 | # workload, by mitigating the tendency of small sstables to accumulate 366 | # during a single long running compactions. The default is usually 367 | # fine and if you experience problems with compaction running too 368 | # slowly or too fast, you should look at 369 | # compaction_throughput_mb_per_sec first. 370 | # 371 | # This setting has no effect on LeveledCompactionStrategy. 372 | # 373 | # concurrent_compactors defaults to the number of cores. 374 | # Uncomment to make compaction mono-threaded, the pre-0.8 default. 375 | #concurrent_compactors: 1 376 | 377 | # Multi-threaded compaction. When enabled, each compaction will use 378 | # up to one thread per core, plus one thread per sstable being merged. 379 | # This is usually only useful for SSD-based hardware: otherwise, 380 | # your concern is usually to get compaction to do LESS i/o (see: 381 | # compaction_throughput_mb_per_sec), not more. 382 | multithreaded_compaction: false 383 | 384 | # Throttles compaction to the given total throughput across the entire 385 | # system. The faster you insert data, the faster you need to compact in 386 | # order to keep the sstable count down, but in general, setting this to 387 | # 16 to 32 times the rate you are inserting data is more than sufficient. 388 | # Setting this to 0 disables throttling. Note that this account for all types 389 | # of compaction, including validation compaction. 390 | compaction_throughput_mb_per_sec: 16 391 | 392 | # Track cached row keys during compaction, and re-cache their new 393 | # positions in the compacted sstable. Disable if you use really large 394 | # key caches. 395 | compaction_preheat_key_cache: true 396 | 397 | # Throttles all outbound streaming file transfers on this node to the 398 | # given total throughput in Mbps. This is necessary because Cassandra does 399 | # mostly sequential IO when streaming data during bootstrap or repair, which 400 | # can lead to saturating the network connection and degrading rpc performance. 401 | # When unset, the default is 400 Mbps or 50 MB/s. 402 | # stream_throughput_outbound_megabits_per_sec: 400 403 | 404 | # Time to wait for a reply from other nodes before failing the command 405 | rpc_timeout_in_ms: 10000 406 | 407 | # Enable socket timeout for streaming operation. 408 | # When a timeout occurs during streaming, streaming is retried from the start 409 | # of the current file. This *can* involve re-streaming an important amount of 410 | # data, so you should avoid setting the value too low. 411 | # Default value is 0, which never timeout streams. 412 | # streaming_socket_timeout_in_ms: 0 413 | 414 | # phi value that must be reached for a host to be marked down. 415 | # most users should never need to adjust this. 416 | # phi_convict_threshold: 8 417 | 418 | # endpoint_snitch -- Set this to a class that implements 419 | # IEndpointSnitch. The snitch has two functions: 420 | # - it teaches Cassandra enough about your network topology to route 421 | # requests efficiently 422 | # - it allows Cassandra to spread replicas around your cluster to avoid 423 | # correlated failures. It does this by grouping machines into 424 | # "datacenters" and "racks." Cassandra will do its best not to have 425 | # more than one replica on the same "rack" (which may not actually 426 | # be a physical location) 427 | # 428 | # IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, 429 | # YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS 430 | # ARE PLACED. 431 | # 432 | # Out of the box, Cassandra provides 433 | # - SimpleSnitch: 434 | # Treats Strategy order as proximity. This improves cache locality 435 | # when disabling read repair, which can further improve throughput. 436 | # Only appropriate for single-datacenter deployments. 437 | # - PropertyFileSnitch: 438 | # Proximity is determined by rack and data center, which are 439 | # explicitly configured in cassandra-topology.properties. 440 | # - GossipingPropertyFileSnitch 441 | # The rack and datacenter for the local node are defined in 442 | # cassandra-rackdc.properties and propagated to other nodes via gossip. If 443 | # cassandra-topology.properties exists, it is used as a fallback, allowing 444 | # migration from the PropertyFileSnitch. 445 | # - RackInferringSnitch: 446 | # Proximity is determined by rack and data center, which are 447 | # assumed to correspond to the 3rd and 2nd octet of each node's 448 | # IP address, respectively. Unless this happens to match your 449 | # deployment conventions (as it did Facebook's), this is best used 450 | # as an example of writing a custom Snitch class. 451 | # - Ec2Snitch: 452 | # Appropriate for EC2 deployments in a single Region. Loads Region 453 | # and Availability Zone information from the EC2 API. The Region is 454 | # treated as the Datacenter, and the Availability Zone as the rack. 455 | # Only private IPs are used, so this will not work across multiple 456 | # Regions. 457 | # - Ec2MultiRegionSnitch: 458 | # Uses public IPs as broadcast_address to allow cross-region 459 | # connectivity. (Thus, you should set seed addresses to the public 460 | # IP as well.) You will need to open the storage_port or 461 | # ssl_storage_port on the public IP firewall. (For intra-Region 462 | # traffic, Cassandra will switch to the private IP after 463 | # establishing a connection.) 464 | # 465 | # You can use a custom Snitch by setting this to the full class name 466 | # of the snitch, which will be assumed to be on your classpath. 467 | endpoint_snitch: SimpleSnitch 468 | 469 | # controls how often to perform the more expensive part of host score 470 | # calculation 471 | dynamic_snitch_update_interval_in_ms: 100 472 | # controls how often to reset all host scores, allowing a bad host to 473 | # possibly recover 474 | dynamic_snitch_reset_interval_in_ms: 600000 475 | # if set greater than zero and read_repair_chance is < 1.0, this will allow 476 | # 'pinning' of replicas to hosts in order to increase cache capacity. 477 | # The badness threshold will control how much worse the pinned host has to be 478 | # before the dynamic snitch will prefer other replicas over it. This is 479 | # expressed as a double which represents a percentage. Thus, a value of 480 | # 0.2 means Cassandra would continue to prefer the static snitch values 481 | # until the pinned host was 20% worse than the fastest. 482 | dynamic_snitch_badness_threshold: 0.1 483 | 484 | # request_scheduler -- Set this to a class that implements 485 | # RequestScheduler, which will schedule incoming client requests 486 | # according to the specific policy. This is useful for multi-tenancy 487 | # with a single Cassandra cluster. 488 | # NOTE: This is specifically for requests from the client and does 489 | # not affect inter node communication. 490 | # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place 491 | # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of 492 | # client requests to a node with a separate queue for each 493 | # request_scheduler_id. The scheduler is further customized by 494 | # request_scheduler_options as described below. 495 | request_scheduler: org.apache.cassandra.scheduler.NoScheduler 496 | 497 | # Scheduler Options vary based on the type of scheduler 498 | # NoScheduler - Has no options 499 | # RoundRobin 500 | # - throttle_limit -- The throttle_limit is the number of in-flight 501 | # requests per client. Requests beyond 502 | # that limit are queued up until 503 | # running requests can complete. 504 | # The value of 80 here is twice the number of 505 | # concurrent_reads + concurrent_writes. 506 | # - default_weight -- default_weight is optional and allows for 507 | # overriding the default which is 1. 508 | # - weights -- Weights are optional and will default to 1 or the 509 | # overridden default_weight. The weight translates into how 510 | # many requests are handled during each turn of the 511 | # RoundRobin, based on the scheduler id. 512 | # 513 | # request_scheduler_options: 514 | # throttle_limit: 80 515 | # default_weight: 5 516 | # weights: 517 | # Keyspace1: 1 518 | # Keyspace2: 5 519 | 520 | # request_scheduler_id -- An identifer based on which to perform 521 | # the request scheduling. Currently the only valid option is keyspace. 522 | # request_scheduler_id: keyspace 523 | 524 | # index_interval controls the sampling of entries from the primrary 525 | # row index in terms of space versus time. The larger the interval, 526 | # the smaller and less effective the sampling will be. In technicial 527 | # terms, the interval coresponds to the number of index entries that 528 | # are skipped between taking each sample. All the sampled entries 529 | # must fit in memory. Generally, a value between 128 and 512 here 530 | # coupled with a large key cache size on CFs results in the best trade 531 | # offs. This value is not often changed, however if you have many 532 | # very small rows (many to an OS page), then increasing this will 533 | # often lower memory usage without a impact on performance. 534 | index_interval: 128 535 | 536 | # Enable or disable inter-node encryption 537 | # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that 538 | # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher 539 | # suite for authentication, key exchange and encryption of the actual data transfers. 540 | # NOTE: No custom encryption options are enabled at the moment 541 | # The available internode options are : all, none, dc, rack 542 | # 543 | # If set to dc cassandra will encrypt the traffic between the DCs 544 | # If set to rack cassandra will encrypt the traffic between the racks 545 | # 546 | # The passwords used in these options must match the passwords used when generating 547 | # the keystore and truststore. For instructions on generating these files, see: 548 | # http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore 549 | # 550 | encryption_options: 551 | internode_encryption: none 552 | keystore: conf/.keystore 553 | keystore_password: cassandra 554 | truststore: conf/.truststore 555 | truststore_password: cassandra 556 | # More advanced defaults below: 557 | # protocol: TLS 558 | # algorithm: SunX509 559 | # store_type: JKS 560 | # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] 561 | -------------------------------------------------------------------------------- /etc/cassandra/log4j-server.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # for production, you should probably set pattern to %c instead of %l. 18 | # (%l is slower.) 19 | 20 | # output messages into a rolling log file as well as stdout 21 | log4j.rootLogger=INFO,stdout,R 22 | 23 | # stdout 24 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 25 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 26 | log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n 27 | 28 | # rolling log file 29 | log4j.appender.R=org.apache.log4j.RollingFileAppender 30 | log4j.appender.R.maxFileSize=20MB 31 | log4j.appender.R.maxBackupIndex=50 32 | log4j.appender.R.layout=org.apache.log4j.PatternLayout 33 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n 34 | # Edit the next line to point to your logs directory 35 | log4j.appender.R.File=cassandra/logs/system.log 36 | 37 | # Application logging options 38 | #log4j.logger.org.apache.cassandra=DEBUG 39 | #log4j.logger.org.apache.cassandra.db=DEBUG 40 | #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG 41 | 42 | # Adding this to avoid thrift logging disconnect errors. 43 | log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR 44 | 45 | -------------------------------------------------------------------------------- /etc/production.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = queuey 4 | debug = false 5 | 6 | [metlog] 7 | backend = mozsvc.metrics.MetlogPlugin 8 | enabled = true 9 | sender_backend = metlog.senders.ZmqPubSender 10 | sender_bindstrs = tcp://127.0.0.1:5565 11 | 12 | [storage] 13 | backend = queuey.storage.cassandra.CassandraQueueBackend 14 | host = localhost 15 | database = MessageStore 16 | 17 | [metadata] 18 | backend = queuey.storage.cassandra.CassandraMetadata 19 | host = localhost 20 | database = MetadataStore 21 | 22 | [application_keys] 23 | queuey = f25bfb8fe200475c8a0532a9cbe7651e 24 | 25 | [filter:catcherror] 26 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 27 | 28 | [pipeline:main] 29 | pipeline = catcherror 30 | pyramidapp 31 | 32 | [app:pyramidapp] 33 | use = egg:queuey 34 | 35 | [server:main] 36 | use = egg:Paste#http 37 | host = 0.0.0.0 38 | port = 5000 39 | use_threadpool = True 40 | threadpool_workers = 60 41 | 42 | # Begin logging configuration 43 | 44 | [loggers] 45 | keys = root,queuey 46 | 47 | [handlers] 48 | keys = global,error 49 | 50 | [formatters] 51 | keys = generic 52 | 53 | [logger_root] 54 | level = WARNING 55 | handlers = global 56 | 57 | [logger_queuey] 58 | qualname= queuey 59 | level = DEBUG 60 | handlers = global,error 61 | propagate = 0 62 | 63 | [handler_global] 64 | class = StreamHandler 65 | args = (sys.stderr,) 66 | level = DEBUG 67 | formatter = generic 68 | 69 | [handler_error] 70 | class = handlers.RotatingFileHandler 71 | args = ('/var/log/queuey-error.log',) 72 | level = ERROR 73 | formatter = generic 74 | 75 | [formatter_generic] 76 | format = %(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s 77 | datefmt = %Y-%m-%d %H:%M:%S 78 | 79 | # End logging configuration 80 | -------------------------------------------------------------------------------- /etc/queuey-dev.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = queuey 4 | debug = true 5 | 6 | [metlog] 7 | logger = queuey 8 | backend = mozsvc.metrics.MetlogPlugin 9 | sender_class = metlog.senders.dev.StdOutSender 10 | 11 | [storage] 12 | backend = queuey.storage.cassandra.CassandraQueueBackend 13 | host = localhost 14 | 15 | [metadata] 16 | backend = queuey.storage.cassandra.CassandraMetadata 17 | host = localhost 18 | 19 | [application_keys] 20 | queuey = f25bfb8fe200475c8a0532a9cbe7651e 21 | 22 | [filter:catcherror] 23 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 24 | 25 | [pipeline:main] 26 | pipeline = catcherror 27 | pyramidapp 28 | 29 | [app:pyramidapp] 30 | use = egg:queuey 31 | 32 | [server:main] 33 | use = egg:Paste#http 34 | host = 0.0.0.0 35 | port = 5000 36 | 37 | # Begin logging configuration 38 | 39 | [loggers] 40 | keys = root, queuey 41 | 42 | [handlers] 43 | keys = console 44 | 45 | [formatters] 46 | keys = generic 47 | 48 | [logger_root] 49 | level = INFO 50 | handlers = console 51 | 52 | [logger_queuey] 53 | level = DEBUG 54 | handlers = 55 | qualname = queuey 56 | 57 | [handler_console] 58 | class = StreamHandler 59 | args = (sys.stderr,) 60 | level = NOTSET 61 | formatter = generic 62 | 63 | [formatter_generic] 64 | format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s 65 | 66 | # End logging configuration 67 | -------------------------------------------------------------------------------- /etc/queuey-test.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = queuey 4 | debug = false 5 | 6 | [metlog] 7 | logger = queuey 8 | backend = mozsvc.metrics.MetlogPlugin 9 | sender_backend = metlog.senders.ZmqPubSender 10 | sender_bindstrs = tcp://127.0.0.1:5565 11 | 12 | [storage] 13 | backend = queuey.storage.cassandra.CassandraQueueBackend 14 | host = 192.168.2.20,192.168.2.23 15 | database = MessageStore 16 | 17 | [metadata] 18 | backend = queuey.storage.cassandra.CassandraMetadata 19 | host = 192.168.2.20,192.168.2.23 20 | database = MetadataStore 21 | 22 | [application_keys] 23 | queuey = f25bfb8fe200475c8a0532a9cbe7651e 24 | 25 | [filter:catcherror] 26 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 27 | 28 | [pipeline:main] 29 | pipeline = catcherror 30 | pyramidapp 31 | 32 | [app:pyramidapp] 33 | use = egg:queuey 34 | 35 | [server:main] 36 | use = egg:gunicorn#main 37 | host = 0.0.0.0 38 | port = 5000 39 | workers = 5 40 | worker_class = gevent 41 | proc_name = queuey 42 | 43 | # Begin logging configuration 44 | 45 | [loggers] 46 | keys = root, queuey 47 | 48 | [handlers] 49 | keys = console 50 | 51 | [formatters] 52 | keys = generic 53 | 54 | [logger_root] 55 | level = INFO 56 | handlers = console 57 | 58 | [logger_queuey] 59 | level = DEBUG 60 | handlers = 61 | qualname = queuey 62 | 63 | [handler_console] 64 | class = StreamHandler 65 | args = (sys.stderr,) 66 | level = NOTSET 67 | formatter = generic 68 | 69 | [formatter_generic] 70 | format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s 71 | 72 | # End logging configuration 73 | -------------------------------------------------------------------------------- /etc/queuey.nginx.conf: -------------------------------------------------------------------------------- 1 | location / { 2 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 3 | proxy_set_header Host $http_host; 4 | proxy_redirect off; 5 | proxy_pass http://unix:/tmp/gunicorn-queuey.sock; 6 | } 7 | -------------------------------------------------------------------------------- /etc/supervisord.conf: -------------------------------------------------------------------------------- 1 | [unix_http_server] 2 | file=%(here)s/../var/supervisor.sock 3 | chmod=0600 4 | 5 | [inet_http_server] 6 | port = 127.0.0.1:4999 7 | 8 | [supervisorctl] 9 | serverurl=unix://%(here)s/../var/supervisor.sock 10 | 11 | [rpcinterface:supervisor] 12 | supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface 13 | 14 | [supervisord] 15 | logfile=%(here)s/../var/supervisord.log 16 | pidfile=%(here)s/../var/supervisord.pid 17 | childlogdir=%(here)s/../var 18 | directory=%(here)s/../ 19 | 20 | [program:cassandra] 21 | command = %(here)s/../bin/cassandra/bin/cassandra -f 22 | environment=JVM_EXTRA_OPTS="-Dcom.sun.management.jmxremote.port=8199" 23 | -------------------------------------------------------------------------------- /load_tester.py: -------------------------------------------------------------------------------- 1 | import os 2 | import base64 3 | import time 4 | from optparse import OptionParser 5 | 6 | from gevent import monkey 7 | from gevent.pool import Pool 8 | monkey.patch_all(thread=False) 9 | 10 | import requests 11 | 12 | auth = {} 13 | qurl = [] 14 | 15 | 16 | def files(filedir): 17 | files = os.listdir(filedir) 18 | file_map = [] 19 | for filename in files: 20 | if filename.endswith('dump'): 21 | continue 22 | with open(os.path.join(filedir, filename)) as f: 23 | data = f.read() 24 | 25 | dumpname = filename.rstrip('.json') + '.dump' 26 | with open(os.path.join(filedir, dumpname)) as f: 27 | data += f.read() 28 | file_map.append(base64.b64encode(data.encode('zlib'))) 29 | return file_map 30 | 31 | 32 | def send_msg(msg): 33 | requests.post(qurl[0], msg, headers=auth, config={'safe_mode': True}) 34 | 35 | 36 | if __name__ == '__main__': 37 | usage = "usage: %prog url_to_queue application_key" 38 | parser = OptionParser(usage=usage) 39 | parser.add_option("--concurrency", dest="concurrency", type="int", 40 | default=10, help="Concurrent requests") 41 | parser.add_option("--messages", dest="messages", type="int", 42 | default=10000, help="Amount of messages to send") 43 | parser.add_option("--message_size", dest="message_size", type="int", 44 | default=140, help="Message size (in bytes)") 45 | (options, args) = parser.parse_args() 46 | 47 | # Setup globals 48 | auth['Authorization'] = 'Application %s' % args[1] 49 | qurl.append(args[0]) 50 | 51 | print "Constructing %s messages of size %s..." % (options.messages, 52 | options.message_size) 53 | messages = [base64.b64encode(os.urandom(options.message_size)) for x in 54 | range(options.messages)] 55 | 56 | p = Pool(options.concurrency) 57 | start = time.time() 58 | p.map(send_msg, messages) 59 | total = time.time() - start 60 | print "Completed in %s seconds" % total 61 | print "Requests per seconds: %s" % (len(messages) / total) 62 | -------------------------------------------------------------------------------- /queuey.spec: -------------------------------------------------------------------------------- 1 | %define name python26-queuey 2 | %define pythonname queuey 3 | %define version 0.8 4 | %define release 2 5 | 6 | Summary: A Services app 7 | Name: %{name} 8 | Version: %{version} 9 | Release: %{release} 10 | Source0: %{pythonname}-%{version}.tar.gz 11 | License: MPL 12 | Group: Development/Libraries 13 | BuildRoot: %{_tmppath}/%{pythonname}-%{version}-%{release}-buildroot 14 | Prefix: %{_prefix} 15 | BuildArch: noarch 16 | Vendor: Services team 17 | Requires: nginx gunicorn pyzmq python26 python26-argparse python26-cef python26-chameleon python26-colander python26-mako python26-markupsafe python26-meld3 python26-mozsvc python26-ordereddict python26-paste python26-pastedeploy python26-pastescript python26-pycassa python26-pygments python26-pyramid python26-setuptools python26-repoze.lru python26-simplejson python26-thrift python26-translationstring python26-venusian python26-webob python26-wsgiref python26-zope.component python26-zope.deprecation python26-zope.event python26-zope.interface python26-ujson python26-metlog-py 18 | 19 | Url: ${url} 20 | 21 | %description 22 | ====== 23 | Queuey 24 | ====== 25 | 26 | This is the Python implementation of the Queuey Message Queue Service. 27 | 28 | 29 | %prep 30 | %setup -n %{pythonname}-%{version} -n %{pythonname}-%{version} 31 | 32 | %build 33 | python2.6 setup.py build 34 | 35 | %install 36 | 37 | # the config files for Queuey apps 38 | mkdir -p %{buildroot}%{_sysconfdir}/queuey 39 | install -m 0644 etc/production.ini %{buildroot}%{_sysconfdir}/queuey/production.ini 40 | 41 | # nginx config 42 | mkdir -p %{buildroot}%{_sysconfdir}/nginx 43 | mkdir -p %{buildroot}%{_sysconfdir}/nginx/conf.d 44 | install -m 0644 etc/queuey.nginx.conf %{buildroot}%{_sysconfdir}/nginx/conf.d/queuey.conf 45 | 46 | # logging 47 | mkdir -p %{buildroot}%{_localstatedir}/log 48 | touch %{buildroot}%{_localstatedir}/log/queuey.log 49 | 50 | # the app 51 | python2.6 setup.py install --single-version-externally-managed --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES 52 | 53 | %clean 54 | rm -rf $RPM_BUILD_ROOT 55 | 56 | %post 57 | touch %{_localstatedir}/log/queuey.log 58 | chown nginx:nginx %{_localstatedir}/log/queuey.log 59 | chmod 640 %{_localstatedir}/log/queuey.log 60 | 61 | %files -f INSTALLED_FILES 62 | 63 | %attr(640, nginx, nginx) %ghost %{_localstatedir}/log/queuey.log 64 | 65 | %dir %{_sysconfdir}/queuey/ 66 | 67 | %config(noreplace) %{_sysconfdir}/queuey/* 68 | %config(noreplace) %{_sysconfdir}/nginx/conf.d/queuey.conf 69 | 70 | %defattr(-,root,root) 71 | -------------------------------------------------------------------------------- /queuey/__init__.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import os 5 | 6 | from pyramid.authorization import ACLAuthorizationPolicy 7 | from pyramid.config import Configurator 8 | 9 | from metlog.config import client_from_dict_config 10 | from mozsvc.config import Config 11 | 12 | from queuey.resources import Root 13 | from queuey.security import QueueyAuthenticationPolicy 14 | from queuey.storage import configure_from_settings 15 | 16 | 17 | def main(global_config, **settings): 18 | config_file = global_config['__file__'] 19 | config_file = os.path.abspath( 20 | os.path.normpath( 21 | os.path.expandvars( 22 | os.path.expanduser( 23 | config_file)))) 24 | 25 | settings['config'] = config = Config(config_file) 26 | 27 | # Put values from the config file into the pyramid settings dict. 28 | for section in config.sections(): 29 | setting_prefix = section.replace(":", ".") 30 | for name, value in config.get_map(section).iteritems(): 31 | settings[setting_prefix + "." + name] = value 32 | 33 | config = Configurator( 34 | root_factory=Root, 35 | settings=settings, 36 | authentication_policy=QueueyAuthenticationPolicy(), 37 | authorization_policy=ACLAuthorizationPolicy() 38 | ) 39 | 40 | config.registry['backend_storage'] = configure_from_settings( 41 | 'storage', settings['config'].get_map('storage')) 42 | config.registry['backend_metadata'] = configure_from_settings( 43 | 'metadata', settings['config'].get_map('metadata')) 44 | 45 | # Load the Metlog Client instance 46 | config.registry['metlog_client'] = client_from_dict_config( 47 | settings['config'].get_map('metlog') 48 | ) 49 | 50 | # Load the application keys 51 | app_vals = settings['config'].get_map('application_keys') 52 | app_keys = {} 53 | for k, v in app_vals.items(): 54 | for item in v: 55 | app_keys[item] = k 56 | config.registry['app_keys'] = app_keys 57 | config.registry['app_names'] = app_vals.keys() 58 | 59 | # adds Mozilla default views 60 | config.include("mozsvc") 61 | 62 | config.scan('queuey.views') 63 | 64 | # Replace default renderer with ujson rendering 65 | config.add_renderer(None, 'queuey.views.UJSONRendererFactory') 66 | return config.make_wsgi_app() 67 | -------------------------------------------------------------------------------- /queuey/exceptions.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """Message Queue Exceptions""" 5 | 6 | 7 | class MessageQueueException(BaseException): 8 | """Base MessageQueue Exception""" 9 | 10 | 11 | class StorageException(MessageQueueException): 12 | """All exceptions from storage backends""" 13 | 14 | 15 | class ApplicationExists(StorageException): 16 | """Raised when an application of a given name already exists""" 17 | 18 | 19 | class ApplicationNotRegistered(StorageException): 20 | """Raised when an application is not registered for an action 21 | requiring registration""" 22 | 23 | 24 | class QueueAlreadyExists(StorageException): 25 | """Raised when a queue already exists and an action tries to 26 | create it""" 27 | 28 | 29 | class QueueDoesNotExist(StorageException): 30 | """Raised when a queue does not exist and an action tries to 31 | act on it""" 32 | -------------------------------------------------------------------------------- /queuey/resources.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import collections 5 | from cdecimal import Decimal 6 | import re 7 | 8 | from pyramid.security import Allow 9 | from pyramid.security import Everyone 10 | 11 | 12 | DECIMAL_REGEX = re.compile(r'^\d+(\.\d+)?$') 13 | MESSAGE_REGEX = re.compile( 14 | r'(?:\d\:)?[a-zA-Z0-9]{32}(?:\,(?:\d{1,3}\:)?[a-zA-Z0-9]{32}){0,}' 15 | ) 16 | 17 | 18 | class InvalidQueueName(Exception): 19 | """Raised when a queue name is invalid""" 20 | status = 404 21 | 22 | 23 | class InvalidUpdate(Exception): 24 | """Raised when an update to existing data fails""" 25 | status = 400 26 | 27 | 28 | class InvalidMessageID(Exception): 29 | """Raised for invalid message ID's""" 30 | status = 400 31 | 32 | 33 | def transform_stored_message(message): 34 | del message['metadata'] 35 | message['partition'] = int(message['queue_name'].split(':')[-1]) 36 | del message['queue_name'] 37 | message['timestamp'] = str(message['timestamp']) 38 | 39 | 40 | class Root(object): 41 | __acl__ = [] 42 | 43 | def __init__(self, request): 44 | self.request = request 45 | 46 | def __getitem__(self, name): 47 | if name == 'v1': 48 | return QueueyVersion1API(self.request) 49 | else: 50 | raise KeyError("No key %s found" % name) 51 | 52 | 53 | class QueueyVersion1API(object): 54 | def __init__(self, request): 55 | self.request = request 56 | 57 | def __getitem__(self, name): 58 | # See if the application name is valid 59 | if name in self.request.registry['app_names']: 60 | return Application(self.request, name) 61 | else: 62 | raise KeyError("No key %s found" % name) 63 | 64 | 65 | class Application(object): 66 | """Application resource""" 67 | def __init__(self, request, application_name): 68 | self.request = request 69 | self.application_name = application_name 70 | self.metadata = request.registry['backend_metadata'] 71 | self.storage = request.registry['backend_storage'] 72 | app_id = 'app:%s' % self.application_name 73 | 74 | # Applications can create queues and view existing queues 75 | self.__acl__ = [ 76 | (Allow, app_id, 'create_queue'), 77 | (Allow, app_id, 'view_queues') 78 | ] 79 | 80 | def __getitem__(self, name): 81 | if len(name) > 50: 82 | raise InvalidQueueName("Queue name longer than 50 characters.") 83 | data = self.metadata.queue_information(self.application_name, [name]) 84 | if not data or not data[0]: 85 | raise InvalidQueueName("Queue of that name was not found.") 86 | return Queue(self.request, name, data[0]) 87 | 88 | def register_queue(self, queue_name, **metadata): 89 | """Register a queue for this application""" 90 | if not metadata.get('principles'): 91 | del metadata['principles'] 92 | return self.metadata.register_queue( 93 | self.application_name, 94 | queue_name, 95 | **metadata 96 | ) 97 | 98 | def queue_list(self, details=False, include_count=False, limit=None, 99 | offset=None): 100 | queues = self.metadata.queue_list(self.application_name, limit=limit, 101 | offset=offset) 102 | queue_list = [] 103 | queue_data = [] 104 | if details or include_count: 105 | queue_data = self.metadata.queue_information(self.application_name, 106 | queues) 107 | for index, queue_name in enumerate(queues): 108 | qd = { 109 | 'queue_name': queue_name, 110 | } 111 | if details or include_count: 112 | qd.update(queue_data[index]) 113 | if include_count: 114 | total = 0 115 | for num in range(queue_data[index]['partitions']): 116 | qn = '%s:%s' % (queue_name, num + 1) 117 | total += self.storage.count('weak', self.application_name, 118 | qn) 119 | qd['count'] = total 120 | queue_list.append(qd) 121 | return queue_list 122 | 123 | 124 | class Queue(object): 125 | """Queue Resource""" 126 | def __init__(self, request, queue_name, queue_data): 127 | self.request = request 128 | self.metadata = request.registry['backend_metadata'] 129 | self.storage = request.registry['backend_storage'] 130 | self.queue_name = queue_name 131 | self.metlog = request.registry['metlog_client'] 132 | principles = queue_data.pop('principles', '').split(',') 133 | self.principles = [x.strip() for x in principles if x] 134 | 135 | for name, value in queue_data.items(): 136 | setattr(self, name, value) 137 | 138 | # Applications are always allowed to create message in queues 139 | # they made 140 | app_id = 'app:%s' % self.application 141 | self.__acl__ = acl = [ 142 | (Allow, app_id, 'create'), 143 | (Allow, app_id, 'create_queue'), 144 | (Allow, app_id, 'delete_queue') 145 | ] 146 | 147 | # If there's additional principles, view/info/delete messages will 148 | # be granted to them 149 | if self.principles: 150 | for principle in self.principles: 151 | for permission in ['view', 'delete']: 152 | acl.append((Allow, principle, permission)) 153 | else: 154 | # If there are no additional principles, the application 155 | # may also view and delete messages in the queue 156 | acl.append((Allow, app_id, 'view')) 157 | acl.append((Allow, app_id, 'delete')) 158 | 159 | # Everyons is allowed to view public queues 160 | if queue_data['type'] == 'public': 161 | acl.append((Allow, Everyone, 'view')) 162 | 163 | def __getitem__(self, name): 164 | """Determine if this is a multiple message context""" 165 | if not MESSAGE_REGEX.match(name): 166 | raise InvalidMessageID("Invalid message id's.") 167 | return MessageBatch(self.request, self, name) 168 | 169 | def update_metadata(self, **metadata): 170 | # Strip out data not being updated 171 | metadata = dict((k, v) for k, v in metadata.items() if v) 172 | if 'partitions' in metadata: 173 | if metadata['partitions'] < self.partitions: 174 | raise InvalidUpdate("Partitions can only be increased.") 175 | 176 | self.metadata.register_queue(self.application, self.queue_name, 177 | **metadata) 178 | for k, v in metadata.items(): 179 | setattr(self, k, v) 180 | if 'principles' in metadata: 181 | self.principles = [x.strip() for x in 182 | metadata['principles'].split(',') if x] 183 | 184 | def push_batch(self, messages): 185 | """Push a batch of messages to the storage""" 186 | msgs = [('%s:%s' % (self.queue_name, x['partition']), x['body'], 187 | x['ttl'], x.get('metadata', {})) for x in messages] 188 | results = self.storage.push_batch(self.consistency, self.application, 189 | msgs) 190 | rl = [] 191 | for i, msg in enumerate(results): 192 | rl.append({'key': msg[0], 'timestamp': str(msg[1]), 193 | 'partition': messages[i]['partition']}) 194 | self.metlog.incr('%s.new_message' % self.application, 195 | count=len(results)) 196 | return rl 197 | 198 | def get_messages(self, since=None, limit=None, order=None, partitions=None): 199 | queue_names = [] 200 | for part in partitions: 201 | queue_names.append('%s:%s' % (self.queue_name, part)) 202 | if since and DECIMAL_REGEX.match(since): 203 | since = Decimal(since) 204 | results = self.storage.retrieve_batch( 205 | self.consistency, self.application, queue_names, start_at=since, 206 | limit=limit, order=order) 207 | for res in results: 208 | transform_stored_message(res) 209 | self.metlog.incr('%s.get_message' % self.application, 210 | count=len(results)) 211 | return results 212 | 213 | def delete(self): 214 | partitions = range(1, self.partitions + 1) 215 | for partition in partitions: 216 | self.storage.truncate(self.consistency, self.application, '%s:%s' % 217 | (self.queue_name, partition)) 218 | self.metadata.remove_queue(self.application, self.queue_name) 219 | return True 220 | 221 | 222 | class MessageBatch(object): 223 | def __init__(self, request, queue, message_ids): 224 | self.request, self.queue = request, queue 225 | self.message_ids = [x.strip() for x in message_ids.split(',')] 226 | 227 | # Copy parent ACL 228 | self.__acl__ = queue.__acl__[:] 229 | 230 | def _messages(self): 231 | partition_hash = collections.defaultdict(lambda: []) 232 | for msg_id in self.message_ids: 233 | if ':' in msg_id: 234 | partition, msg_id = msg_id.split(':') 235 | else: 236 | partition = 1 237 | qn = '%s:%s' % (self.queue.queue_name, partition) 238 | partition_hash[qn].append(msg_id) 239 | return partition_hash 240 | 241 | def delete(self): 242 | for queue, msgs in self._messages().iteritems(): 243 | self.queue.storage.delete( 244 | self.queue.consistency, 245 | self.queue.application, 246 | queue, *msgs) 247 | return 248 | 249 | def get(self): 250 | results = [] 251 | for queue, msgs in self._messages().iteritems(): 252 | for msg_id in msgs: 253 | res = self.queue.storage.retrieve(self.queue.consistency, 254 | self.queue.application, queue, str(msg_id)) 255 | if res: 256 | transform_stored_message(res) 257 | results.append(res) 258 | self.queue.metlog.incr('%s.get_message' % self.queue.application, 259 | count=len(results)) 260 | return results 261 | 262 | def update(self, params): 263 | for queue, msgs in self._messages().iteritems(): 264 | for msg in msgs: 265 | self.queue.storage.push(self.queue.consistency, 266 | self.queue.application, queue, 267 | params['body'], ttl=params['ttl'], timestamp=msg) 268 | return 269 | -------------------------------------------------------------------------------- /queuey/run.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | Runs the Application. This script can be called by any wsgi runner that looks 6 | for an 'application' variable 7 | """ 8 | import os 9 | from logging.config import fileConfig 10 | from ConfigParser import NoSectionError 11 | 12 | # setting up the egg cache to a place where apache can write 13 | os.environ['PYTHON_EGG_CACHE'] = '/tmp/python-eggs' 14 | 15 | # the ini file is grabbed at its production place 16 | # unless force via an environ variable 17 | ini_file = os.path.join('/etc', 'queuey', 'production.ini') 18 | ini_file = os.path.abspath(os.environ.get('INI_FILE', ini_file)) 19 | 20 | # running the app using Paste 21 | if __name__ == '__main__': # pragma: no cover 22 | # setting up logging 23 | try: 24 | fileConfig(ini_file) 25 | except NoSectionError: 26 | pass 27 | 28 | from paste.deploy import loadapp 29 | application = loadapp('config:%s' % ini_file) 30 | -------------------------------------------------------------------------------- /queuey/security.py: -------------------------------------------------------------------------------- 1 | from pyramid.security import Authenticated 2 | from pyramid.security import Everyone 3 | 4 | 5 | class InvalidApplicationKey(Exception): 6 | """Raised when an application key is invalid""" 7 | 8 | 9 | class InvalidBrowserID(Exception): 10 | """Raised when a browser id assertion is invalid""" 11 | 12 | 13 | class QueueyAuthenticationPolicy(object): 14 | def effective_principals(self, request): 15 | effective_principals = [Everyone] 16 | auth_header = request.headers.get('Authorization', []) 17 | if auth_header: 18 | auth_header = [x.strip() for x in auth_header.split(';')] 19 | for auth_line in auth_header: 20 | if auth_line.startswith('Application '): 21 | app_key = auth_line.strip('Application ').strip() 22 | app_name = request.registry['app_keys'].get(app_key) 23 | if app_name: 24 | effective_principals.append('app:%s' % app_name) 25 | request.application_name = app_name 26 | if 'application' not in effective_principals: 27 | effective_principals.append('application') 28 | else: 29 | raise InvalidApplicationKey("Invalid application key") 30 | # TODO: Whenever the MAC/BID stuff is determined, pull it out here 31 | # elif auth_line.startswith('BrowserID '): 32 | # assertion = auth_line.strip('BrowserID ').strip() 33 | # try: 34 | # data = vep.verify(assertion, request.host) 35 | # except Exception: 36 | # raise InvalidBrowserID("Invalid browser ID assertion") 37 | # effective_principals.append('bid:%s' % data['email']) 38 | if len(effective_principals) > 1: 39 | effective_principals.append(Authenticated) 40 | return effective_principals 41 | -------------------------------------------------------------------------------- /queuey/storage/__init__.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """MessageQueue Storage Interface 5 | 6 | .. note:: 7 | 8 | The MetadataBackend and MessageQueueBackend are separate in the event 9 | that the message queuing backend is suitable only for messages and 10 | not storing the additional application and queue metadata. 11 | 12 | """ 13 | from pyramid.util import DottedNameResolver 14 | from zope.interface import Interface 15 | 16 | dotted_resolver = DottedNameResolver(None) 17 | 18 | 19 | def configure_from_settings(object_name, settings): 20 | """Given a settings dict, create the storage instance and return it""" 21 | config = {} 22 | prefix = object_name + '.' 23 | for name, value in settings.iteritems(): 24 | name = config[name[len(prefix):]] if name.startswith(prefix) else name 25 | config[name] = value 26 | klass = dotted_resolver.resolve(config.pop('backend')) 27 | return klass(**config) 28 | 29 | 30 | class StorageUnavailable(Exception): 31 | """Raised when the storage backend is unavailable""" 32 | 33 | 34 | class MessageQueueBackend(Interface): 35 | """A MessageQueue Backend""" 36 | def __init__(username=None, password=None, database='MessageQueue', 37 | host='localhost'): 38 | """Initialize the backend 39 | 40 | If any operation fails due to servers being unavailable, the 41 | :exc:`StorageUnavailable` exception should be raised. 42 | 43 | """ 44 | 45 | def retrieve_batch(consistency, application_name, queue_names, 46 | limit=None, include_metadata=False, start_at=None, 47 | order="ascending"): 48 | """Retrieve a batch of messages from a queue 49 | 50 | :param consistency: Desired consistency of the read operation 51 | :param application_name: Name of the application 52 | :param queue_names: List of queue names to retrieve from 53 | :param limit: Amount of messages to retrieve 54 | :param include_metadata: Whether to include message metadata 55 | :param start_at: Either a timestamp or message id to start from 56 | :param order: Which order to traverse the messages. Defaults to 57 | ascending order. 58 | :type order: `ascending` or `descending` 59 | 60 | :returns: A list of dicts, empty if no messages meet the criteria 61 | :rtype: list 62 | 63 | Example response:: 64 | 65 | [ 66 | { 67 | 'message_id': 'aebb663d1d4311e1a65f002500f0fa7c', 68 | 'timestamp': 1323973966282.637, 69 | 'body': 'jiawefjilawe', 70 | 'metadata': {}, 71 | 'queue_name': 'a queue' 72 | }, 73 | { 74 | 'message_id': 'ae45017a1d4311e19562002500f0fa7c', 75 | 'timestamp': 1323973966918.241, 76 | 'body': 'auwiofuweni3', 77 | 'metadata': {}, 78 | 'queue_name': 'other queue' 79 | }, 80 | ] 81 | 82 | The messages will be ordered based on the ``order`` param using 83 | the timestamp. 84 | 85 | .. note:: 86 | 87 | The limit is applied per queue_name, so a limit of 10 with 3 88 | queue names supplied could return up to 30 messages. 89 | 90 | """ 91 | 92 | def retrieve(consistency, application_name, queue_name, message_id, 93 | include_metadata=False): 94 | """Retrieve a single message 95 | 96 | :param consistency: Desired consistency of the read operation 97 | :param application_name: Name of the application 98 | :param queue_name: Queue name 99 | :param message_id: Message id to retrieve 100 | :param include_metadata: Whether to include message metadata 101 | 102 | :returns: A dict 103 | :rtype: dict 104 | 105 | Example response:: 106 | 107 | { 108 | 'message_id': 'ae45017a1d4311e19562002500f0fa7c', 109 | 'timestamp': 1323973966918.241, 110 | 'body': 'auwiofuweni3', 111 | 'metadata': {} 112 | } 113 | 114 | """ 115 | 116 | def push(consistency, application_name, queue_name, message, 117 | metadata=None, ttl=3600 * 24 * 3, timestamp=None): 118 | """Push a message onto the given queue 119 | 120 | The queue is assumed to exist, and will be created if it does not 121 | exist. 122 | 123 | :param consistency: Desired consistency of the write operation 124 | :param application_name: Name of the application 125 | :param queue_name: Queue name 126 | :param message: Message to add to the queue 127 | :param metadata: Additional metadata to record for the message 128 | :type metadata: dict 129 | :param ttl: Time to Live in seconds for the message, after this 130 | period the message should be unavilable 131 | :param timestamp: The timestamp to use for the message, should be 132 | either a `uuid.uuid1` or a decimal/float of seconds 133 | since the epoch as time.time() would return. 134 | Defaults to the current time. 135 | 136 | :returns: The message id and timestamp as a tuple 137 | :rtype: tuple 138 | 139 | Example response:: 140 | 141 | ('ae45017a1d4311e19562002500f0fa7c', 1323973966918.241) 142 | 143 | """ 144 | 145 | def push_batch(consistency, application_name, message_data): 146 | """Push a batch of messages to queues 147 | 148 | The queue(s) are assumed to exist, and will be created if 149 | they do not exist. 150 | 151 | :param consistency: Desired consistency of the write operation 152 | :param application_name: Name of the application 153 | :param message_data: A list of messages to insert into queues 154 | :type message_data: List of tuples, where each tuple is the 155 | queue_name, message body, TTL, and a dict of 156 | message metadata. 157 | 158 | :returns: The message id's and timestamps as a list of tuples in the 159 | order they were sent 160 | :rtype: list of tuples 161 | 162 | Example message_data content:: 163 | 164 | [ 165 | ('my_queue', 'some message body', 3600, {}), 166 | ('other_queue', 'other body', 7200, {}) 167 | ] 168 | 169 | Example response:: 170 | 171 | [ 172 | ('ae45017a1d4311e19562002500f0fa7c', 1323973966282.637). 173 | ('aebb663d1d4311e1a65f002500f0fa7c', 1323973966918.241) 174 | ] 175 | 176 | """ 177 | 178 | def truncate(consistency, application_name, queue_name): 179 | """Remove all contents of the queue 180 | 181 | :param consistency: Desired consistency of the truncate operation 182 | :param application_name: Name of the application 183 | :param queue_name: Queue name 184 | 185 | :returns: Whether the queue was truncated. 186 | :rtype: bool 187 | 188 | """ 189 | 190 | def delete(consistency, application_name, queue_name, *ids): 191 | """Delete all the given message ids from the queue 192 | 193 | :param consistency: Desired consistency of the delete operation 194 | :param application_name: Name of the application 195 | :param queue_name: Queue name 196 | :param ids: Message ids that should be removed 197 | 198 | :returns: Whether the delete executed successfully 199 | :rtype: bool 200 | 201 | """ 202 | 203 | def count(consistency, application_name, queue_name): 204 | """Returns the amount of messages in the queue 205 | 206 | :param consistency: Desired consistency of the read operation 207 | :param application_name: Name of the application 208 | :param queue_name: Queue name 209 | 210 | :returns: Message total 211 | :rtype: int 212 | 213 | """ 214 | 215 | 216 | class MetadataBackend(Interface): 217 | """A Metadata Backend 218 | 219 | Stores associated metadata for the message queue system, such as the 220 | active applications registered, and the queues that have been 221 | allocated for each application. 222 | 223 | """ 224 | def __init__(username=None, password=None, database='MetaData', 225 | host='localhost'): 226 | """Initialize the backend""" 227 | 228 | def register_queue(application_name, queue_name, **metadata): 229 | """Register a queue for the given application 230 | 231 | Registers a queue for the application and when it was 232 | created in seconds since the epoch, and additional metadata. 233 | 234 | This function should record all data needed to lookup queues 235 | by application name, along with the metadata. 236 | 237 | :param application_name: Name of the application 238 | :param queue_name: Queue name 239 | :param metadata: Queue metadata 240 | 241 | :returns: Whether the queue was registered 242 | :rtype: bool 243 | 244 | """ 245 | 246 | def remove_queue(application_name, queue_name): 247 | """Remove a queue registration for the given application 248 | 249 | :param application_name: Name of the application 250 | :param queue_name: Queue name 251 | 252 | :returns: Whether the queue was removed. 253 | :rtype: bool 254 | 255 | """ 256 | 257 | def queue_list(application_name, limit=100, offset=None): 258 | """Return a list of queues registered for the application 259 | 260 | :param application_name: Name of the application 261 | :param limit: How many queue names to return at once 262 | :param offset: Start at a specific queue_name offset 263 | 264 | :returns: List of queues registered for the application 265 | :rtype: list 266 | 267 | """ 268 | 269 | def queue_information(application_name, queue_names): 270 | """Return information regarding the queue for the application 271 | 272 | This is a mix of basic queue information as well as the 273 | queue metadata. 274 | 275 | :param application_name: Name of the application 276 | :param queue_names: Queue names to retreive information from 277 | 278 | :returns: Queue information, an empty dict if the queue doesn't 279 | exist 280 | :rtype: dict 281 | 282 | Example response:: 283 | 284 | [ 285 | { 286 | 'created': 82989382, 287 | 'partitions': 20, 288 | 'application': 'your app name', 289 | 'type': 'user', 290 | 'consistency': 'strong', 291 | 'principles': 'bid:fred@browserid.org,bid:george@home.com' 292 | } 293 | ] 294 | 295 | """ 296 | -------------------------------------------------------------------------------- /queuey/storage/cassandra.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | from cdecimal import Decimal 5 | import inspect 6 | import uuid 7 | import time 8 | 9 | import pycassa 10 | from pycassa.index import create_index_expression 11 | from pycassa.index import create_index_clause 12 | from pycassa import system_manager 13 | from thrift.Thrift import TException 14 | from zope.interface import implements 15 | 16 | from queuey.storage import MessageQueueBackend 17 | from queuey.storage import MetadataBackend 18 | from queuey.storage import StorageUnavailable 19 | from queuey.storage.util import convert_time_to_uuid 20 | 21 | ONE = pycassa.ConsistencyLevel.ONE 22 | QUORUM = pycassa.ConsistencyLevel.QUORUM 23 | LOCAL_QUORUM = pycassa.ConsistencyLevel.LOCAL_QUORUM 24 | EACH_QUORUM = pycassa.ConsistencyLevel.EACH_QUORUM 25 | DECIMAL_1E7 = Decimal('1e7') 26 | 27 | 28 | def parse_hosts(raw_hosts): 29 | """Parses out hosts into a list""" 30 | hosts = [] 31 | if ',' in raw_hosts: 32 | names = [x.strip() for x in raw_hosts.split(',')] 33 | else: 34 | names = [raw_hosts] 35 | for name in names: 36 | if ':' not in name: 37 | name += ':9160' 38 | hosts.append(name) 39 | return hosts 40 | 41 | 42 | def wrap_func(func): 43 | def wrapper(*args, **kwargs): 44 | try: 45 | return func(*args, **kwargs) 46 | except (pycassa.UnavailableException, pycassa.TimedOutException, 47 | pycassa.MaximumRetryException): 48 | raise StorageUnavailable("Unable to contact storage pool") 49 | for attr in "__module__", "__name__", "__doc__": 50 | setattr(wrapper, attr, getattr(func, attr)) 51 | return wrapper 52 | 53 | 54 | def raise_unavailable(cls): 55 | """Wrap public method calls to return appropriate exception in the 56 | event the cluster is unavailable or has insufficient nodes available 57 | for the operation""" 58 | for name, meth in inspect.getmembers(cls, inspect.ismethod): 59 | if name.startswith('_'): 60 | continue 61 | setattr(cls, name, wrap_func(meth)) 62 | return cls 63 | 64 | 65 | @raise_unavailable 66 | class CassandraQueueBackend(object): 67 | implements(MessageQueueBackend) 68 | 69 | def __init__(self, username=None, password=None, database='MessageStore', 70 | host='localhost', base_delay=None, multi_dc=False, 71 | create_schema=True): 72 | """Create a Cassandra backend for the Message Queue 73 | 74 | :param host: Hostname, accepts either an IP, hostname, hostname:port, 75 | or a comma seperated list of 'hostname:port' 76 | 77 | """ 78 | hosts = parse_hosts(host) 79 | if create_schema: 80 | self._create_schema(hosts[0], database) 81 | credentials = None 82 | if username and password is not None: 83 | credentials = dict(username=username, password=password) 84 | self.pool = pool = pycassa.ConnectionPool( 85 | keyspace=database, 86 | server_list=hosts, 87 | credentials=credentials, 88 | ) 89 | self.message_fam = pycassa.ColumnFamily(pool, 'Messages') 90 | self.meta_fam = pycassa.ColumnFamily(pool, 'MessageMetadata') 91 | self.delay = int(base_delay) if base_delay else 0 92 | self.cl = ONE if len(hosts) < 2 else None 93 | self.multi_dc = multi_dc 94 | 95 | def _create_schema(self, host, database): 96 | try: 97 | sm = Schema(host) 98 | sm.install_message(database) 99 | sm.close() 100 | except TException: 101 | pass 102 | 103 | def _get_cl(self, consistency): 104 | """Return the consistency operation to use""" 105 | if consistency == 'weak': 106 | return ONE 107 | elif self.multi_dc == False: 108 | return QUORUM 109 | elif consistency == 'very_strong': 110 | return EACH_QUORUM 111 | else: 112 | return LOCAL_QUORUM 113 | 114 | def _get_delay(self, consistency): 115 | """Return the delay value to use for the results""" 116 | if self.cl: 117 | return 0 118 | elif consistency == 'weak': 119 | return 1 + self.delay 120 | elif consistency == 'very_strong': 121 | return 600 + self.delay 122 | else: 123 | return 5 + self.delay 124 | 125 | def retrieve_batch(self, consistency, application_name, queue_names, 126 | limit=None, include_metadata=False, start_at=None, 127 | order="ascending"): 128 | """Retrieve a batch of messages off the queue""" 129 | if not isinstance(queue_names, list): 130 | raise Exception("queue_names must be a list") 131 | 132 | cl = self.cl or self._get_cl(consistency) 133 | delay = self._get_delay(consistency) 134 | 135 | kwargs = {'read_consistency_level': cl} 136 | if order == 'descending': 137 | kwargs['column_reversed'] = True 138 | 139 | if limit: 140 | kwargs['column_count'] = limit 141 | 142 | if start_at: 143 | if isinstance(start_at, basestring): 144 | # Assume its a hex, transform to a datetime 145 | start_at = uuid.UUID(hex=start_at) 146 | else: 147 | # Assume its a float/decimal, convert to UUID 148 | start_at = convert_time_to_uuid(start_at) 149 | 150 | kwargs['column_start'] = start_at 151 | 152 | queue_names = ['%s:%s' % (application_name, x) for x in queue_names] 153 | results = self.message_fam.multiget(keys=queue_names, **kwargs) 154 | results = results.items() 155 | if delay: 156 | cut_off = time.time() - delay 157 | # Turn it into time in ns, for efficient comparison 158 | cut_off = int(cut_off * 1e7) + 0x01b21dd213814000L 159 | 160 | result_list = [] 161 | msg_hash = {} 162 | for queue_name, messages in results: 163 | for msg_id, body in messages.items(): 164 | if delay and msg_id.time >= cut_off: 165 | continue 166 | obj = { 167 | 'message_id': msg_id.hex, 168 | 'timestamp': (Decimal(msg_id.time - 0x01b21dd213814000L) / 169 | DECIMAL_1E7), 170 | 'body': body, 171 | 'metadata': {}, 172 | 'queue_name': queue_name[queue_name.find(':'):] 173 | } 174 | result_list.append(obj) 175 | msg_hash[msg_id] = obj 176 | 177 | # Get metadata? 178 | if include_metadata: 179 | results = self.meta_fam.multiget(keys=msg_hash.keys()) 180 | for msg_id, metadata in results.items(): 181 | msg_hash[msg_id]['metadata'] = metadata 182 | return result_list 183 | 184 | def retrieve(self, consistency, application_name, queue_name, message_id, 185 | include_metadata=False): 186 | """Retrieve a single message""" 187 | cl = self.cl or self._get_cl(consistency) 188 | if isinstance(message_id, basestring): 189 | # Convert to uuid for lookup 190 | message_id = uuid.UUID(hex=message_id) 191 | else: 192 | # Assume its a float/decimal, convert to UUID 193 | message_id = convert_time_to_uuid(message_id) 194 | 195 | kwargs = { 196 | 'read_consistency_level': cl, 197 | 'columns': [message_id]} 198 | queue_name = '%s:%s' % (application_name, queue_name) 199 | try: 200 | results = self.message_fam.get(key=queue_name, **kwargs) 201 | except (pycassa.NotFoundException, pycassa.InvalidRequestException): 202 | return {} 203 | msg_id, body = results.items()[0] 204 | 205 | obj = { 206 | 'message_id': msg_id.hex, 207 | 'timestamp': (Decimal(msg_id.time - 0x01b21dd213814000L) / 208 | DECIMAL_1E7), 209 | 'body': body, 210 | 'metadata': {}, 211 | 'queue_name': queue_name[queue_name.find(':'):] 212 | } 213 | 214 | # Get metadata? 215 | if include_metadata: 216 | try: 217 | results = self.meta_fam.get(key=msg_id) 218 | obj['metadata'] = results 219 | except pycassa.NotFoundException: 220 | pass 221 | return obj 222 | 223 | def push(self, consistency, application_name, queue_name, message, 224 | metadata=None, ttl=60 * 60 * 24 * 3, timestamp=None): 225 | """Push a message onto the queue""" 226 | cl = self.cl or self._get_cl(consistency) 227 | if not timestamp: 228 | now = uuid.uuid1() 229 | elif isinstance(timestamp, (float, Decimal)): 230 | now = convert_time_to_uuid(timestamp, randomize=True) 231 | else: 232 | now = uuid.UUID(hex=timestamp) 233 | queue_name = '%s:%s' % (application_name, queue_name) 234 | if metadata: 235 | batch = pycassa.batch.Mutator(self.pool, 236 | write_consistency_level=cl) 237 | batch.insert(self.message_fam, key=queue_name, 238 | columns={now: message}, ttl=ttl) 239 | batch.insert(self.meta_fam, key=now, columns=metadata, ttl=ttl) 240 | batch.send() 241 | else: 242 | self.message_fam.insert(key=queue_name, columns={now: message}, 243 | ttl=ttl, write_consistency_level=cl) 244 | timestamp = Decimal(now.time - 0x01b21dd213814000L) / DECIMAL_1E7 245 | return now.hex, timestamp 246 | 247 | def push_batch(self, consistency, application_name, message_data): 248 | """Push a batch of messages""" 249 | cl = self.cl or self._get_cl(consistency) 250 | batch = pycassa.batch.Mutator(self.pool, write_consistency_level=cl) 251 | msgs = [] 252 | for queue_name, body, ttl, metadata in message_data: 253 | qn = '%s:%s' % (application_name, queue_name) 254 | now = uuid.uuid1() 255 | batch.insert(self.message_fam, key=qn, columns={now: body}, 256 | ttl=ttl) 257 | if metadata: 258 | batch.insert(self.meta_fam, key=now, columns=metadata, ttl=ttl) 259 | timestamp = (Decimal(now.time - 0x01b21dd213814000L) / DECIMAL_1E7) 260 | msgs.append((now.hex, timestamp)) 261 | batch.send() 262 | return msgs 263 | 264 | def truncate(self, consistency, application_name, queue_name): 265 | """Remove all contents of the queue""" 266 | cl = self.cl or self._get_cl(consistency) 267 | queue_name = '%s:%s' % (application_name, queue_name) 268 | self.message_fam.remove(key=queue_name, write_consistency_level=cl) 269 | return True 270 | 271 | def delete(self, consistency, application_name, queue_name, *keys): 272 | """Delete a batch of keys""" 273 | cl = self.cl or self._get_cl(consistency) 274 | queue_name = '%s:%s' % (application_name, queue_name) 275 | self.message_fam.remove(key=queue_name, 276 | columns=[uuid.UUID(hex=x) for x in keys], 277 | write_consistency_level=cl) 278 | return True 279 | 280 | def count(self, consistency, application_name, queue_name): 281 | """Return a count of the items in this queue""" 282 | cl = self.cl or self._get_cl(consistency) 283 | queue_name = '%s:%s' % (application_name, queue_name) 284 | return self.message_fam.get_count(key=queue_name, 285 | read_consistency_level=cl) 286 | 287 | 288 | @raise_unavailable 289 | class CassandraMetadata(object): 290 | implements(MetadataBackend) 291 | 292 | def __init__(self, username=None, password=None, database='MetadataStore', 293 | host='localhost', multi_dc=False, create_schema=True): 294 | """Create a Cassandra backend for the Message Queue 295 | 296 | :param host: Hostname, accepts either an IP, hostname, hostname:port, 297 | or a comma seperated list of 'hostname:port' 298 | 299 | """ 300 | hosts = parse_hosts(host) 301 | if create_schema: 302 | self._create_schema(hosts[0], database) 303 | credentials = None 304 | if username and password is not None: 305 | credentials = dict(username=username, password=password) 306 | self.pool = pool = pycassa.ConnectionPool( 307 | keyspace=database, 308 | server_list=hosts, 309 | credentials=credentials, 310 | ) 311 | self.metric_fam = pycassa.ColumnFamily(pool, 'ApplicationQueueData') 312 | self.queue_fam = pycassa.ColumnFamily(pool, 'Queues') 313 | self.cl = ONE if len(hosts) < 2 else None 314 | self.multi_dc = multi_dc 315 | 316 | def _create_schema(self, host, database): 317 | try: 318 | sm = Schema(host) 319 | sm.install_metadata(database) 320 | sm.close() 321 | except TException: 322 | pass 323 | 324 | def register_queue(self, application_name, queue_name, **metadata): 325 | """Register a queue, optionally with metadata""" 326 | # Determine if its registered already 327 | cl = self.cl or LOCAL_QUORUM if self.multi_dc else QUORUM 328 | queue_name = '%s:%s' % (application_name, queue_name) 329 | try: 330 | self.queue_fam.get(queue_name) 331 | if metadata: 332 | # Only update metadata 333 | self.queue_fam.insert(queue_name, columns=metadata) 334 | return 335 | except pycassa.NotFoundException: 336 | pass 337 | 338 | metadata['application'] = application_name 339 | if 'created' not in metadata: 340 | metadata['created'] = time.time() 341 | self.queue_fam.insert(queue_name, columns=metadata, 342 | write_consistency_level=cl) 343 | self.metric_fam.add(application_name, column='queue_count', value=1, 344 | write_consistency_level=cl) 345 | return True 346 | 347 | def remove_queue(self, application_name, queue_name): 348 | """Remove a queue""" 349 | cl = self.cl or LOCAL_QUORUM if self.multi_dc else QUORUM 350 | queue_name = '%s:%s' % (application_name, queue_name) 351 | try: 352 | self.queue_fam.get(key=queue_name, 353 | read_consistency_level=cl) 354 | except pycassa.NotFoundException: 355 | return False 356 | self.queue_fam.remove(key=queue_name, 357 | write_consistency_level=cl) 358 | self.metric_fam.add(application_name, column='queue_count', value=-1, 359 | write_consistency_level=cl) 360 | return True 361 | 362 | def queue_list(self, application_name, limit=100, offset=None): 363 | """Return list of queues""" 364 | cl = self.cl or LOCAL_QUORUM if self.multi_dc else QUORUM 365 | app_expr = create_index_expression('application', application_name) 366 | if offset: 367 | offset = '%s:%s' % (application_name, offset) 368 | clause = create_index_clause([app_expr], start_key=offset, 369 | count=limit) 370 | else: 371 | clause = create_index_clause([app_expr], count=limit) 372 | results = self.queue_fam.get_indexed_slices( 373 | clause, columns=['application'], read_consistency_level=cl) 374 | # Pull off the application name in front 375 | app_len = len(application_name) + 1 376 | return [key[app_len:] for key, _ in results] 377 | 378 | def queue_information(self, application_name, queue_names): 379 | """Return information on a registered queue""" 380 | if not isinstance(queue_names, list): 381 | raise Exception("Queue names must be a list.") 382 | queue_names = ['%s:%s' % (application_name, queue_name) for 383 | queue_name in queue_names] 384 | queues = self.queue_fam.multiget(keys=queue_names, 385 | read_consistency_level=ONE) 386 | results = [] 387 | for queue in queue_names: 388 | results.append(queues.get(queue, {})) 389 | return results 390 | 391 | 392 | class Schema(object): 393 | 394 | COUNTER_COLUMN_TYPE = system_manager.COUNTER_COLUMN_TYPE 395 | INT_TYPE = system_manager.INT_TYPE 396 | FLOAT_TYPE = system_manager.FLOAT_TYPE 397 | KEYS_INDEX = system_manager.KEYS_INDEX 398 | LONG_TYPE = system_manager.LONG_TYPE 399 | UTF8_TYPE = system_manager.UTF8_TYPE 400 | TIME_UUID_TYPE = system_manager.TIME_UUID_TYPE 401 | 402 | def __init__(self, host='localhost:9160'): 403 | self.host = host 404 | self.sm = system_manager.SystemManager(self.host) 405 | 406 | def install(self): 407 | self.install_message() 408 | self.install_metadata() 409 | self.close() 410 | 411 | def install_message(self, database='MessageStore'): 412 | sm = self.sm 413 | keyspaces = sm.list_keyspaces() 414 | if database not in keyspaces: 415 | sm.create_keyspace(database, 416 | system_manager.SIMPLE_STRATEGY, {'replication_factor': '1'}) 417 | 418 | cfs = sm.get_keyspace_column_families(database) 419 | if 'Messages' not in cfs: 420 | sm.create_column_family(database, 'Messages', 421 | comparator_type=self.TIME_UUID_TYPE, 422 | default_validation_class=self.UTF8_TYPE, 423 | key_validation_class=self.UTF8_TYPE, 424 | ) 425 | 426 | if 'MessageMetadata' not in cfs: 427 | sm.create_column_family(database, 'MessageMetadata', 428 | comparator_type=self.UTF8_TYPE, 429 | default_validation_class=self.UTF8_TYPE, 430 | key_validation_class=self.TIME_UUID_TYPE, 431 | column_validation_classes={ 432 | 'ContentType': self.UTF8_TYPE, 433 | 'ContentLength': self.LONG_TYPE, 434 | } 435 | ) 436 | 437 | def install_metadata(self, database='MetadataStore'): 438 | sm = self.sm 439 | keyspaces = sm.list_keyspaces() 440 | if database not in keyspaces: 441 | sm.create_keyspace(database, 442 | system_manager.SIMPLE_STRATEGY, {'replication_factor': '1'}) 443 | 444 | cfs = sm.get_keyspace_column_families(database) 445 | if 'ApplicationQueueData' not in cfs: 446 | sm.create_column_family(database, 'ApplicationQueueData', 447 | comparator_type=self.UTF8_TYPE, 448 | default_validation_class=self.COUNTER_COLUMN_TYPE, 449 | key_validation_class=self.UTF8_TYPE, 450 | caching='all', 451 | column_validation_classes={ 452 | 'queue_count': self.COUNTER_COLUMN_TYPE, 453 | } 454 | ) 455 | 456 | if 'Queues' not in cfs: 457 | sm.create_column_family(database, 'Queues', 458 | comparator_type=self.UTF8_TYPE, 459 | key_validation_class=self.UTF8_TYPE, 460 | caching='all', 461 | column_validation_classes={ 462 | 'partitions': self.INT_TYPE, 463 | 'application': self.UTF8_TYPE, 464 | 'created': self.FLOAT_TYPE, 465 | 'type': self.UTF8_TYPE, 466 | 'consistency': self.UTF8_TYPE, 467 | } 468 | ) 469 | sm.create_index(database, 'Queues', 'application', 470 | self.UTF8_TYPE, index_type=self.KEYS_INDEX) 471 | sm.create_index(database, 'Queues', 'type', 472 | self.UTF8_TYPE, index_type=self.KEYS_INDEX) 473 | 474 | def close(self): 475 | self.sm.close() 476 | -------------------------------------------------------------------------------- /queuey/storage/memory.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | from collections import defaultdict 5 | from cdecimal import Decimal 6 | import uuid 7 | import time 8 | 9 | from zope.interface import implements 10 | 11 | from queuey.storage import MessageQueueBackend 12 | from queuey.storage import MetadataBackend 13 | from queuey.storage.util import convert_time_to_uuid 14 | 15 | DECIMAL_1E7 = Decimal('1e7') 16 | 17 | # Queue's keyed by applciation_name + queue_name 18 | # Queues are just a list of Message objects 19 | message_store = defaultdict(list) 20 | 21 | # Applcation's keyed by application name 22 | metadata_store = {} 23 | 24 | 25 | class Message(object): 26 | def __init__(self, id, body, ttl, **metadata): 27 | self.id = id 28 | self.body = body 29 | self.metadata = metadata 30 | self.ttl = None 31 | self.expiration = None 32 | if ttl: 33 | now = Decimal(self.id.time - 0x01b21dd213814000L) / DECIMAL_1E7 34 | self.expiration = now + ttl 35 | 36 | def __eq__(self, other): 37 | if isinstance(other, Message): 38 | return self.id == other.id 39 | return id(self) == id(other) 40 | 41 | 42 | class Application(object): 43 | def __init__(self, application_name): 44 | self.application_name = application_name 45 | self.queues = {} 46 | 47 | 48 | class QueueMetadata(object): 49 | def __init__(self, queue_name, **metadata): 50 | self.queue_name = queue_name 51 | self.metadata = metadata 52 | 53 | 54 | class MemoryQueueBackend(object): 55 | implements(MessageQueueBackend) 56 | 57 | def __init__(self): 58 | pass 59 | 60 | def retrieve_batch(self, consistency, application_name, queue_names, 61 | limit=None, include_metadata=False, start_at=None, 62 | order="ascending"): 63 | """Retrieve a batch of messages off the queue""" 64 | if not isinstance(queue_names, list): 65 | raise Exception("queue_names must be a list") 66 | 67 | order = -1 if order == 'descending' else 1 68 | 69 | if start_at: 70 | if isinstance(start_at, basestring): 71 | # Assume its a hex, transform to a UUID 72 | start_at = uuid.UUID(hex=start_at) 73 | else: 74 | # Assume its a float/decimal, convert to UUID 75 | start_at = convert_time_to_uuid(start_at) 76 | 77 | queue_names = ['%s:%s' % (application_name, x) for x in queue_names] 78 | results = [] 79 | now = Decimal(repr(time.time())) 80 | for queue_name in queue_names: 81 | msgs = message_store[queue_name] 82 | if not msgs: 83 | continue 84 | msgs.sort(key=lambda k: (k.id.time, k.id.bytes)) 85 | if start_at: 86 | # Locate the index given the start_at 87 | point = (start_at.time, start_at.bytes) 88 | beg = msgs[0].id 89 | end = msgs[-1].id 90 | if point <= (beg.time, beg.bytes): 91 | # Is the start_at less than the beginning? Start at beginning 92 | start = 0 93 | elif point >= (end.time, end.bytes): 94 | # Is the start_at larger than the end? Start at the end 95 | start = len(msgs) - 1 96 | else: 97 | # The start point is somewhere inside, skim through until 98 | # we hit a value a value equal to or greater than 99 | start = 0 100 | msg_comp = (msgs[start].id.time, msgs[start].id.bytes) 101 | while point > msg_comp: 102 | start += 1 103 | msg_comp = (msgs[start].id.time, msgs[start].id.bytes) 104 | else: 105 | if order == -1: 106 | start = len(msgs) - 1 107 | else: 108 | start = 0 109 | count = 0 110 | 111 | for msg in msgs[start::order]: 112 | if msg.expiration and now > msg.expiration: 113 | msgs.remove(msg) 114 | continue 115 | count += 1 116 | if limit and count > limit: 117 | break 118 | obj = { 119 | 'message_id': msg.id.hex, 120 | 'timestamp': (Decimal(msg.id.time - 0x01b21dd213814000L) / 121 | DECIMAL_1E7), 122 | 'body': msg.body, 123 | 'metadata': {}, 124 | 'queue_name': queue_name[queue_name.find(':'):] 125 | } 126 | if include_metadata: 127 | obj['metadata'] = msg.metadata 128 | results.append(obj) 129 | return results 130 | 131 | def retrieve(self, consistency, application_name, queue_name, message_id, 132 | include_metadata=False): 133 | """Retrieve a single message""" 134 | if isinstance(message_id, basestring): 135 | # Convert to uuid for lookup 136 | message_id = uuid.UUID(hex=message_id) 137 | else: 138 | # Assume its a float/decimal, convert to UUID 139 | message_id = convert_time_to_uuid(message_id) 140 | 141 | queue_name = '%s:%s' % (application_name, queue_name) 142 | queue = message_store[queue_name] 143 | found = None 144 | for msg in queue: 145 | if msg.id == message_id: 146 | found = msg 147 | break 148 | 149 | if not found: 150 | return {} 151 | 152 | now = Decimal(repr(time.time())) 153 | if found.expiration and now > found.expiration: 154 | queue.remove(found) 155 | return {} 156 | 157 | obj = { 158 | 'message_id': found.id.hex, 159 | 'timestamp': (Decimal(found.id.time - 0x01b21dd213814000L) / 160 | DECIMAL_1E7), 161 | 'body': found.body, 162 | 'metadata': {}, 163 | 'queue_name': queue_name[queue_name.find(':'):] 164 | } 165 | if include_metadata: 166 | obj['metadata'] = found.metadata 167 | return obj 168 | 169 | def push(self, consistency, application_name, queue_name, message, 170 | metadata=None, ttl=60 * 60 * 24 * 3, timestamp=None): 171 | """Push a message onto the queue""" 172 | if not timestamp: 173 | now = uuid.uuid1() 174 | elif isinstance(timestamp, (float, Decimal)): 175 | now = convert_time_to_uuid(timestamp, randomize=True) 176 | else: 177 | now = uuid.UUID(hex=timestamp) 178 | msg = Message(id=now, body=message, ttl=ttl) 179 | if metadata: 180 | msg.metadata = metadata 181 | timestamp = Decimal(msg.id.time - 0x01b21dd213814000L) / DECIMAL_1E7 182 | queue_name = '%s:%s' % (application_name, queue_name) 183 | if msg in message_store[queue_name]: 184 | message_store[queue_name].remove(msg) 185 | message_store[queue_name].append(msg) 186 | return msg.id.hex, timestamp 187 | 188 | def push_batch(self, consistency, application_name, message_data): 189 | """Push a batch of messages""" 190 | msgs = [] 191 | for queue_name, body, ttl, metadata in message_data: 192 | qn = '%s:%s' % (application_name, queue_name) 193 | msg = Message(id=uuid.uuid1(), body=body, ttl=ttl) 194 | if metadata: 195 | msg.metadata = metadata 196 | message_store[qn].append(msg) 197 | timestamp = (Decimal(msg.id.time - 0x01b21dd213814000L) / 198 | DECIMAL_1E7) 199 | msgs.append((msg.id.hex, timestamp)) 200 | return msgs 201 | 202 | def truncate(self, consistency, application_name, queue_name): 203 | """Remove all contents of the queue""" 204 | queue_name = '%s:%s' % (application_name, queue_name) 205 | message_store[queue_name] = [] 206 | return True 207 | 208 | def delete(self, consistency, application_name, queue_name, *keys): 209 | """Delete a batch of keys""" 210 | queue_name = '%s:%s' % (application_name, queue_name) 211 | queue = message_store.get(queue_name) 212 | del_items = [] 213 | for index, msg in enumerate(queue): 214 | if msg.id.hex in keys: 215 | del_items.append(index) 216 | for index in sorted(del_items)[::-1]: 217 | del queue[index] 218 | return True 219 | 220 | def count(self, consistency, application_name, queue_name): 221 | """Return a count of the items in this queue""" 222 | queue_name = '%s:%s' % (application_name, queue_name) 223 | queue = message_store.get(queue_name) 224 | if not queue: 225 | return 0 226 | else: 227 | return len(queue) 228 | 229 | 230 | class MemoryMetadata(object): 231 | implements(MetadataBackend) 232 | 233 | def __init__(self): 234 | pass 235 | 236 | def register_queue(self, application_name, queue_name, **metadata): 237 | """Register a queue, optionally with metadata""" 238 | if application_name not in metadata_store: 239 | metadata_store[application_name] = app = Application(application_name) 240 | else: 241 | app = metadata_store[application_name] 242 | if queue_name in app.queues: 243 | app.queues[queue_name].metadata.update(metadata) 244 | else: 245 | metadata['application'] = application_name 246 | if 'created' not in metadata: 247 | metadata['created'] = time.time() 248 | app.queues[queue_name] = QueueMetadata(queue_name, **metadata) 249 | return True 250 | 251 | def remove_queue(self, application_name, queue_name): 252 | """Remove a queue""" 253 | app = metadata_store.get(application_name) 254 | if not app or queue_name not in app.queues: 255 | return False 256 | 257 | del app.queues[queue_name] 258 | return True 259 | 260 | def queue_list(self, application_name, limit=100, offset=None): 261 | """Return list of queues""" 262 | app = metadata_store.get(application_name, None) 263 | if app is None: 264 | return [] 265 | if offset: 266 | queues = filter(lambda x: x >= offset, 267 | sorted(app.queues.keys())) 268 | else: 269 | queues = sorted(app.queues.keys()) 270 | 271 | return queues[:limit] 272 | 273 | def queue_information(self, application_name, queue_names): 274 | """Return information on a registered queue""" 275 | if not isinstance(queue_names, list): 276 | raise Exception("Queue names must be a list.") 277 | app = metadata_store.get(application_name, 278 | Application(application_name)) 279 | results = [] 280 | for qn in queue_names: 281 | queue = app.queues.get(qn) 282 | if not queue: 283 | results.append({}) 284 | continue 285 | results.append(queue.metadata) 286 | return results 287 | -------------------------------------------------------------------------------- /queuey/storage/util.py: -------------------------------------------------------------------------------- 1 | """Storage utility functions""" 2 | from cdecimal import Decimal 3 | import random 4 | import uuid 5 | 6 | DECIMAL_1E7 = Decimal('1e7') 7 | 8 | # This function copied from pycassa, under MIT license 9 | # Copyright (c) 2009 Jonathan Hseu 10 | # 11 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this 12 | # software and associated documentation files (the "Software"), to deal in the Software 13 | # without restriction, including without limitation the rights to use, copy, modify, merge, 14 | # publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons 15 | # to whom the Software is furnished to do so, subject to the following conditions: 16 | # 17 | # The above copyright notice and this permission notice shall be included in all copies or 18 | # substantial portions of the Software. 19 | # 20 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 21 | # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 22 | # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE 23 | # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 | # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | # DEALINGS IN THE SOFTWARE. 26 | 27 | 28 | def convert_time_to_uuid(time_arg, lowest_val=True, randomize=False): 29 | """ 30 | Converts a timestamp to a type 1 :class:`uuid.UUID`. 31 | 32 | This is to assist with getting a time slice of columns or creating 33 | columns when column names are ``TimeUUIDType``. Note that this is done 34 | automatically in most cases if name packing and value packing are 35 | enabled. 36 | 37 | Also, be careful not to rely on this when specifying a discrete 38 | set of columns to fetch, as the non-timestamp portions of the 39 | UUID will be generated randomly. This problem does not matter 40 | with slice arguments, however, as the non-timestamp portions 41 | can be set to their lowest or highest possible values. 42 | 43 | :param time_arg: 44 | The time to use for the timestamp portion of the UUID. 45 | Expected inputs to this would either be a :class:`decimal` object or 46 | a timestamp with a precision of at most 100 nanoseconds. 47 | Sub-second precision should be below the decimal place. 48 | :type time_arg: :class:`decimal` or timestamp 49 | 50 | :param lowest_val: 51 | Whether the UUID produced should be the lowest possible value 52 | UUID with the same timestamp as time_arg or the highest possible 53 | value. 54 | :type lowest_val: bool 55 | 56 | :param randomize: 57 | Whether the clock and node bits of the UUID should be randomly 58 | generated. The `lowest_val` argument will be ignored if this 59 | is true. 60 | :type randomize: bool 61 | 62 | :rtype: :class:`uuid.UUID` 63 | 64 | """ 65 | if isinstance(time_arg, uuid.UUID): 66 | return time_arg 67 | if isinstance(time_arg, float): 68 | time_arg = Decimal.from_float(time_arg) 69 | 70 | ns_100 = int(time_arg * DECIMAL_1E7) 71 | 72 | # 0x01b21dd213814000 is the number of 100-ns intervals between the 73 | # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. 74 | timestamp = ns_100 + 0x01b21dd213814000L 75 | 76 | time_low = timestamp & 0xffffffffL 77 | time_mid = (timestamp >> 32L) & 0xffffL 78 | time_hi_version = (timestamp >> 48L) & 0x0fffL 79 | 80 | if randomize: 81 | rand_bits = random.getrandbits(8 + 8 + 48) 82 | clock_seq_low = rand_bits & 0xffL # 8 bits, no offset 83 | clock_seq_hi_variant = (rand_bits & 0xff00L) / 0x100 # 8 bits, 8 offset 84 | node = (rand_bits & 0xffffffffffff0000L) / 0x10000L # 48 bits, 16 offset 85 | else: 86 | # In the event of a timestamp tie, Cassandra compares the two 87 | # byte arrays directly. This is a *signed* comparison of each byte 88 | # in the two arrays. So, we have to make each byte -128 or +127 for 89 | # this to work correctly. 90 | # 91 | # For the clock_seq_hi_variant, we don't get to pick the two most 92 | # significant bits (they're always 01), so we are dealing with a 93 | # positive byte range for this particular byte. 94 | if lowest_val: 95 | # Make the lowest value UUID with the same timestamp 96 | clock_seq_low = 0x80L 97 | clock_seq_hi_variant = 0 & 0x3fL # The two most significant bits 98 | # will be 0 and 1, no matter what 99 | node = 0x808080808080L # 48 bits 100 | else: # pragma: nocover 101 | # Make the highest value UUID with the same timestamp 102 | clock_seq_low = 0x7fL 103 | clock_seq_hi_variant = 0x3fL # The two most significant bits will 104 | # 0 and 1, no matter what 105 | node = 0x7f7f7f7f7f7fL # 48 bits 106 | return uuid.UUID(fields=(time_low, time_mid, time_hi_version, 107 | clock_seq_hi_variant, clock_seq_low, node), version=1) 108 | -------------------------------------------------------------------------------- /queuey/testing.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # This Source Code Form is subject to the terms of the Mozilla Public 3 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 | # You can obtain one at http://mozilla.org/MPL/2.0/. 5 | import os 6 | import time 7 | import xmlrpclib 8 | 9 | processes = {} 10 | 11 | here = os.path.dirname(__file__) 12 | maindir = os.path.dirname(here) 13 | 14 | 15 | def ensure_process(name, timeout=10): 16 | srpc = processes['supervisor'] 17 | if srpc.getProcessInfo(name)['statename'] in ('STOPPED', 'EXITED'): 18 | print(u'Starting %s!\n' % name) 19 | srpc.startProcess(name) 20 | # wait for startup to succeed 21 | for i in range(1, timeout): 22 | state = srpc.getProcessInfo(name)['statename'] 23 | if state == 'RUNNING': 24 | break 25 | elif state != 'RUNNING': 26 | print(u'Waiting on %s for %s seconds.' % (name, i * 0.1)) 27 | time.sleep(i * 0.1) 28 | if srpc.getProcessInfo(name)['statename'] != 'RUNNING': 29 | vardir = os.path.join(maindir, 'var') 30 | for name in os.listdir(vardir): 31 | if name in ('README.txt', 'supervisor.sock'): 32 | continue 33 | print("\n\nFILE: %s" % name) 34 | with open(os.path.join(vardir, name)) as f: 35 | print(f.read()) 36 | raise RuntimeError('%s not running' % name) 37 | 38 | 39 | def setup_supervisor(): 40 | processes['supervisor'] = xmlrpclib.ServerProxy( 41 | 'http://127.0.0.1:4999').supervisor 42 | 43 | 44 | def setup(timeout=10): 45 | """Shared one-time test setup, called from tests/__init__.py""" 46 | setup_supervisor() 47 | ensure_process('cassandra', timeout) 48 | -------------------------------------------------------------------------------- /queuey/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | -------------------------------------------------------------------------------- /queuey/tests/storage.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import unittest 5 | import uuid 6 | 7 | from nose.tools import eq_ 8 | from nose.tools import raises 9 | 10 | 11 | class StorageTestMessageBase(unittest.TestCase): 12 | def _makeOne(self, **kwargs): 13 | raise NotImplemented("You must implement _makeOne") 14 | 15 | def test_noqueue(self): 16 | backend = self._makeOne() 17 | queue_name = uuid.uuid4().hex 18 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 19 | eq_([], existing) 20 | 21 | def test_onemessage(self): 22 | backend = self._makeOne() 23 | payload = 'a rather boring payload' 24 | queue_name = uuid.uuid4().hex 25 | msg_id = backend.push('weak', 'myapp', queue_name, payload)[0] 26 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 27 | eq_(existing[0]['body'], payload) 28 | 29 | # Retrieve just one message 30 | one = backend.retrieve('weak', 'myapp', queue_name, msg_id) 31 | eq_(one['body'], payload) 32 | 33 | # Empty metadata 34 | one = backend.retrieve('weak', 'myapp', queue_name, msg_id, 35 | include_metadata=True) 36 | eq_(one['metadata'], {}) 37 | 38 | def test_push_batch(self): 39 | backend = self._makeOne() 40 | queue_name = uuid.uuid4().hex 41 | queue_name2 = uuid.uuid4().hex 42 | backend.push_batch('weak', 'myapp', [ 43 | (queue_name, 'first message', 3600, {}), 44 | (queue_name, 'second message', 3600, {'ContentType': 'application/json'}), 45 | (queue_name2, 'another first', 3600, {}), 46 | ]) 47 | batch = backend.retrieve_batch('weak', 'myapp', [queue_name], 48 | include_metadata=True) 49 | eq_(batch[0]['body'], 'first message') 50 | eq_(batch[1]['metadata'], {'ContentType': 'application/json'}) 51 | 52 | def test_must_use_list(self): 53 | @raises(Exception) 54 | def testit(): 55 | backend = self._makeOne() 56 | queue_name = uuid.uuid4().hex 57 | backend.retrieve_batch('weak', 'myapp', queue_name) 58 | testit() 59 | 60 | def test_no_message(self): 61 | backend = self._makeOne() 62 | queue_name = uuid.uuid4().hex 63 | existing = backend.retrieve('weak', 'myapp', queue_name, queue_name) 64 | eq_({}, existing) 65 | 66 | def test_message_ordering(self): 67 | backend = self._makeOne() 68 | payload = 'a rather boring payload' 69 | another = 'another payload' 70 | queue_name = uuid.uuid4().hex 71 | backend.push('weak', 'myapp', queue_name, payload) 72 | middle = backend.push('weak', 'myapp', queue_name, another) 73 | backend.push('weak', 'myapp', queue_name, "more stuff") 74 | 75 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name], 76 | order='descending') 77 | eq_(3, len(existing)) 78 | eq_(existing[1]['body'], another) 79 | 80 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 81 | eq_(3, len(existing)) 82 | eq_(existing[0]['body'], payload) 83 | 84 | # Add a limit 85 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name], 86 | limit=1, order='descending') 87 | eq_(existing[0]['body'], "more stuff") 88 | eq_(len(existing), 1) 89 | 90 | # Add the prior value 91 | existing = backend.retrieve_batch( 92 | 'weak', 'myapp', [queue_name], start_at=middle[0]) 93 | eq_(existing[0]['body'], another) 94 | eq_(len(existing), 2) 95 | 96 | def test_message_removal(self): 97 | backend = self._makeOne() 98 | payload = 'a rather boring payload' 99 | another = 'another payload' 100 | queue_name = uuid.uuid4().hex 101 | backend.push('weak', 'myapp', queue_name, payload) 102 | backend.push('weak', 'myapp', queue_name, another) 103 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 104 | eq_(2, len(existing)) 105 | 106 | backend.truncate('weak', 'myapp', queue_name) 107 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 108 | eq_(0, len(existing)) 109 | 110 | def test_message_retrieve(self): 111 | backend = self._makeOne() 112 | payload = 'a rather boring payload' 113 | queue_name = uuid.uuid4().hex 114 | last = backend.push('weak', 'myapp', queue_name, payload)[0] 115 | last_uuid = uuid.UUID(hex=last) 116 | msg = backend.retrieve('weak', 'myapp', queue_name, last_uuid) 117 | eq_(msg['body'], payload) 118 | 119 | def test_message_retrieve_with_metadata(self): 120 | backend = self._makeOne() 121 | payload = 'a rather boring payload' 122 | queue_name = uuid.uuid4().hex 123 | last = backend.push('weak', 'myapp', queue_name, payload, 124 | {'ContentType': 'application/json'})[0] 125 | last_uuid = uuid.UUID(hex=last) 126 | msg = backend.retrieve('weak', 'myapp', queue_name, last_uuid, 127 | include_metadata=True) 128 | eq_(msg['body'], payload) 129 | eq_(msg['metadata']['ContentType'], 'application/json') 130 | 131 | def test_batch_message_with_metadata(self): 132 | backend = self._makeOne() 133 | payload = 'a rather boring payload' 134 | queue_name = uuid.uuid4().hex 135 | backend.push('weak', 'myapp', queue_name, payload, 136 | {'ContentType': 'application/json'}) 137 | msg = backend.retrieve_batch('weak', 'myapp', [queue_name], 138 | include_metadata=True) 139 | eq_(msg[0]['body'], payload) 140 | eq_(msg[0]['metadata']['ContentType'], 'application/json') 141 | 142 | def test_message_delete(self): 143 | backend = self._makeOne() 144 | payload = 'a rather boring payload' 145 | another = 'another payload' 146 | queue_name = uuid.uuid4().hex 147 | key1 = backend.push('weak', 'myapp', queue_name, payload)[0] 148 | key2 = backend.push('weak', 'myapp', queue_name, another)[0] 149 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 150 | eq_(2, len(existing)) 151 | 152 | backend.delete('weak', 'myapp', queue_name, key2) 153 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 154 | eq_(1, len(existing)) 155 | 156 | backend.delete('weak', 'myapp', queue_name, key1) 157 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 158 | eq_(0, len(existing)) 159 | 160 | def test_message_counting(self): 161 | backend = self._makeOne() 162 | payload = 'a rather boring payload' 163 | queue_name = uuid.uuid4().hex 164 | for x in range(4): 165 | backend.push('weak', 'myapp', queue_name, payload) 166 | eq_(x + 1, backend.count('weak', 'myapp', queue_name)) 167 | 168 | # Test non-existing row 169 | eq_(backend.count('weak', 'myapp', 'no row'), 0) 170 | 171 | def test_message_update(self): 172 | backend = self._makeOne() 173 | payload = 'a rather boring payload' 174 | queue_name = uuid.uuid4().hex 175 | key, timestamp = backend.push('weak', 'myapp', queue_name, payload) 176 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 177 | eq_(1, len(existing)) 178 | 179 | backend.push('weak', 'myapp', queue_name, payload, timestamp=key) 180 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 181 | eq_(1, len(existing)) 182 | 183 | # using just the message timestamp will generate a message with a new 184 | # random host part 185 | backend.push('weak', 'myapp', queue_name, payload, timestamp=timestamp) 186 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 187 | eq_(2, len(existing)) 188 | 189 | 190 | class StorageTestMetadataBase(unittest.TestCase): 191 | def _makeOne(self): 192 | """Create and return a MetaData backend""" 193 | raise NotImplemented("You must implement _makeOne") 194 | 195 | def setUp(self): 196 | backend = self._makeOne() 197 | backend.remove_queue('myapp', 'fredrick') 198 | backend.remove_queue('myapp', 'smith') 199 | backend.remove_queue('myapp', 'alpha') 200 | 201 | def test_register_queue(self): 202 | backend = self._makeOne() 203 | backend.register_queue('myapp', 'fredrick') 204 | eq_(1, len(backend.queue_list('myapp'))) 205 | 206 | def test_update_queue_with_metadata(self): 207 | backend = self._makeOne() 208 | backend.register_queue('myapp', 'fredrick') 209 | eq_(1, len(backend.queue_list('myapp'))) 210 | 211 | # Update metadata 212 | backend.register_queue('myapp', 'fredrick', partitions=5) 213 | info = backend.queue_information('myapp', ['fredrick']) 214 | eq_(5, info[0]['partitions']) 215 | 216 | def test_queue_paging(self): 217 | backend = self._makeOne() 218 | backend.register_queue('myapp', 'fredrick') 219 | backend.register_queue('myapp', 'smith') 220 | backend.register_queue('myapp', 'alpha') 221 | 222 | # See that we get it back in our list 223 | results = backend.queue_list('myapp') 224 | eq_(3, len(results)) 225 | 226 | # Page 1 in 227 | res = backend.queue_list('myapp', offset=results[1]) 228 | eq_(2, len(res)) 229 | eq_(results[2], res[1]) 230 | 231 | def test_remove_queue(self): 232 | backend = self._makeOne() 233 | backend.register_queue('myapp', 'fredrick') 234 | backend.remove_queue('myapp', 'fredrick') 235 | 236 | results = backend.remove_queue('myapp', 'fredrick') 237 | eq_(False, results) 238 | 239 | def test_queue_info(self): 240 | backend = self._makeOne() 241 | backend.register_queue('myapp', 'fredrick', partitions=3) 242 | 243 | info = backend.queue_information('myapp', ['fredrick']) 244 | eq_(info[0]['partitions'], 3) 245 | 246 | eq_([{}], backend.queue_information('myapp', ['asdfasdf'])) 247 | 248 | def test_must_use_list(self): 249 | @raises(Exception) 250 | def testit(): 251 | backend = self._makeOne() 252 | backend.register_queue('myapp', 'fredrick', partitions=3) 253 | queue_name = uuid.uuid4().hex 254 | backend.queue_information('myapp', queue_name) 255 | testit() 256 | -------------------------------------------------------------------------------- /queuey/tests/test_cassandra.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = queuey 4 | debug = false 5 | 6 | [metlog] 7 | logger = queuey 8 | backend = mozsvc.metrics.MetlogPlugin 9 | sender_class = metlog.senders.dev.DebugCaptureSender 10 | 11 | [storage] 12 | backend = queuey.storage.cassandra.CassandraQueueBackend 13 | host = localhost 14 | database = MessageStore 15 | 16 | [metadata] 17 | backend = queuey.storage.cassandra.CassandraMetadata 18 | host = localhost 19 | database = MetadataStore 20 | 21 | [ipauth] 22 | ipaddrs = 127.0.0.1 23 | 24 | [application_keys] 25 | queuey = 26 | f25bfb8fe200475c8a0532a9cbe7651e 27 | 28 | [smtp] 29 | host = localhost 30 | port = 25 31 | sender = queuey@mozilla.com 32 | 33 | [cef] 34 | use = true 35 | file = syslog 36 | vendor = mozilla 37 | version = 0 38 | device_version = 1.3 39 | product = queuey 40 | 41 | [host:localhost] 42 | storage.sqluri = sqlite:////tmp/test.db 43 | 44 | # Paster configuration for Pyramid 45 | [filter:catcherror] 46 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 47 | 48 | [pipeline:main] 49 | pipeline = catcherror 50 | pyramidapp 51 | 52 | [app:pyramidapp] 53 | use = egg:queuey 54 | 55 | pyramid.reload_templates = true 56 | pyramid.debug_authorization = false 57 | pyramid.debug_notfound = false 58 | pyramid.debug_routematch = false 59 | pyramid.debug_templates = true 60 | pyramid.default_locale_name = en 61 | 62 | # need to do this programmatically 63 | mako.directories = queuey:templates 64 | 65 | # services config file 66 | configuration = %(here)s/queuey.conf 67 | 68 | [server:main] 69 | use = egg:Paste#http 70 | host = 0.0.0.0 71 | port = 5000 72 | 73 | # Begin logging configuration 74 | 75 | [loggers] 76 | keys = root, queuey 77 | 78 | [handlers] 79 | keys = console 80 | 81 | [formatters] 82 | keys = generic 83 | 84 | [logger_root] 85 | level = INFO 86 | handlers = console 87 | 88 | [logger_queuey] 89 | level = DEBUG 90 | handlers = 91 | qualname = queuey 92 | 93 | [handler_console] 94 | class = StreamHandler 95 | args = (sys.stderr,) 96 | level = NOTSET 97 | formatter = generic 98 | 99 | [formatter_generic] 100 | format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s 101 | 102 | # End logging configuration 103 | -------------------------------------------------------------------------------- /queuey/tests/test_cassandra.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import uuid 5 | import os 6 | 7 | from nose.tools import eq_ 8 | from nose.tools import raises 9 | from pycassa import ConsistencyLevel 10 | import pycassa 11 | 12 | import mock 13 | 14 | from queuey.tests.storage import StorageTestMessageBase 15 | from queuey.tests.storage import StorageTestMetadataBase 16 | 17 | 18 | class TestCassandraStore(StorageTestMessageBase): 19 | def _makeOne(self, **kwargs): 20 | from queuey.storage.cassandra import CassandraQueueBackend 21 | host = os.environ.get('TEST_CASSANDRA_HOST', 'localhost') 22 | return CassandraQueueBackend(host=host, **kwargs) 23 | 24 | def test_parsehosts(self): 25 | from queuey.storage.cassandra import parse_hosts 26 | hosts = parse_hosts('localhost') 27 | eq_(hosts, ['localhost:9160']) 28 | 29 | hosts = parse_hosts('192.168.2.1,192.168.2.3:9180 , 192.168.2.19') 30 | eq_(hosts, 31 | ['192.168.2.1:9160', '192.168.2.3:9180', '192.168.2.19:9160']) 32 | 33 | def test_credentials(self): 34 | creds = dict(username='foo', password='foo') 35 | backend = self._makeOne(**creds) 36 | eq_(backend.pool.credentials, creds) 37 | 38 | def test_cl(self): 39 | backend = self._makeOne() 40 | backend.cl = None 41 | eq_(ConsistencyLevel.ONE, backend._get_cl('weak')) 42 | eq_(ConsistencyLevel.QUORUM, backend._get_cl('very_strong')) 43 | eq_(ConsistencyLevel.QUORUM, backend._get_cl('medium')) 44 | 45 | def test_multidc_cl(self): 46 | backend = self._makeOne() 47 | backend.cl = None 48 | backend.multi_dc = True 49 | eq_(ConsistencyLevel.ONE, backend._get_cl('weak')) 50 | eq_(ConsistencyLevel.EACH_QUORUM, backend._get_cl('very_strong')) 51 | eq_(ConsistencyLevel.LOCAL_QUORUM, backend._get_cl('medium')) 52 | 53 | def test_delay(self): 54 | backend = self._makeOne() 55 | eq_(0, backend._get_delay('weak')) 56 | backend.cl = None 57 | eq_(1, backend._get_delay('weak')) 58 | eq_(600, backend._get_delay('very_strong')) 59 | eq_(5, backend._get_delay('medium')) 60 | 61 | def test_delayed_messages(self): 62 | backend = self._makeOne() 63 | payload = 'a rather boring payload' 64 | queue_name = uuid.uuid4().hex 65 | backend.push('weak', 'myapp', queue_name, payload) 66 | backend._get_delay = lambda x: 5 67 | existing = backend.retrieve_batch('very_strong', 'myapp', [queue_name]) 68 | eq_(0, len(existing)) 69 | 70 | def test_unavailable(self): 71 | from queuey.storage import StorageUnavailable 72 | mock_pool = mock.Mock(spec=pycassa.ColumnFamily) 73 | mock_cf = mock.Mock() 74 | mock_pool.return_value = mock_cf 75 | 76 | def explode(*args, **kwargs): 77 | raise pycassa.UnavailableException() 78 | 79 | mock_cf.get.side_effect = explode 80 | 81 | with mock.patch('pycassa.ColumnFamily', mock_pool): 82 | backend = self._makeOne() 83 | 84 | @raises(StorageUnavailable) 85 | def testit(): 86 | queue_name = uuid.uuid4().hex 87 | backend.retrieve('strong', 'myapp', queue_name, queue_name) 88 | testit() 89 | 90 | 91 | class TestCassandraMetadata(StorageTestMetadataBase): 92 | def _makeOne(self, **kwargs): 93 | from queuey.storage.cassandra import CassandraMetadata 94 | host = os.environ.get('TEST_CASSANDRA_HOST', 'localhost') 95 | return CassandraMetadata(host=host, **kwargs) 96 | 97 | def test_credentials(self): 98 | creds = dict(username='foo', password='foo') 99 | backend = self._makeOne(**creds) 100 | eq_(backend.pool.credentials, creds) 101 | 102 | 103 | del StorageTestMessageBase 104 | del StorageTestMetadataBase 105 | -------------------------------------------------------------------------------- /queuey/tests/test_integrated.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import os 5 | import unittest 6 | import urllib 7 | import uuid 8 | import json 9 | 10 | from paste.deploy import loadapp 11 | from webtest import TestApp 12 | from nose.tools import eq_ 13 | 14 | auth_header = {'Authorization': 'Application f25bfb8fe200475c8a0532a9cbe7651e'} 15 | 16 | 17 | class TestQueueyBaseApp(unittest.TestCase): 18 | ini_file = 'test_memory.ini' 19 | 20 | def makeOne(self): 21 | try: 22 | return self.application 23 | except AttributeError: 24 | ini_file = os.path.abspath( 25 | os.path.join(os.path.dirname(__file__), self.ini_file)) 26 | self.application = application = TestApp(loadapp('config:%s' % ini_file)) 27 | return application 28 | 29 | def _get_queue_info(self, app, queue_name, include_count=False): 30 | resp = app.get('/v1/queuey', 31 | {'details': 'true', 32 | 'offset': queue_name, 33 | 'include_count': include_count, 34 | 'limit': 1}, headers=auth_header) 35 | return json.loads(resp.body)['queues'][0] 36 | 37 | def _make_app_queue(self, params=None): 38 | params = params or {} 39 | app = self.makeOne() 40 | resp = app.post('/v1/queuey', params, headers=auth_header) 41 | result = json.loads(resp.body) 42 | queue_name = str(result['queue_name']) 43 | return app, queue_name 44 | 45 | def test_app(self): 46 | app = self.makeOne() 47 | resp = app.post('/v1/queuey', status=403) 48 | assert "Access was denied" in resp.body 49 | 50 | # Must have a valid queue name 51 | app.get('/v1/fredrick', status=404) 52 | 53 | def test_queue_list(self): 54 | app = self.makeOne() 55 | resp = app.get('/v1/queuey', headers=auth_header) 56 | result = json.loads(resp.body) 57 | eq_('ok', result['status']) 58 | 59 | def test_queue_and_get_since_message_id(self): 60 | app, queue_name = self._make_app_queue() 61 | 62 | # Post a message 63 | app.post('/v1/queuey/' + queue_name, 64 | 'Hello there!', headers=auth_header) 65 | response = app.post('/v1/queuey/' + queue_name, 66 | 'Hello there 2!', headers=auth_header) 67 | result = json.loads(response.body) 68 | msg_id = result['messages'][0]['key'] 69 | msg_ts = result['messages'][0]['timestamp'] 70 | 71 | app.post('/v1/queuey/' + queue_name, 72 | 'Hello there! 3', headers=auth_header) 73 | 74 | # Fetch the messages 75 | resp = app.get('/v1/queuey/' + queue_name, {'since': msg_id}, 76 | headers=auth_header) 77 | result = json.loads(resp.body) 78 | if len(result['messages']) != 2: 79 | print msg_id 80 | print msg_ts 81 | print result 82 | 83 | eq_(2, len(result['messages'])) 84 | msg = result['messages'][0] 85 | eq_('Hello there 2!', msg['body']) 86 | eq_(1, msg['partition']) 87 | 88 | def test_queue_update(self): 89 | app, queue_name = self._make_app_queue({'principles': 'app:queuey'}) 90 | 91 | # Get the queue info 92 | queue = self._get_queue_info(app, queue_name, include_count=True) 93 | eq_(0, queue['count']) 94 | assert 'app:queuey' in queue['principles'] 95 | eq_(1, queue['partitions']) 96 | 97 | # Update the partitions 98 | resp = app.put('/v1/queuey/%s' % queue_name, {'partitions': 2}, 99 | headers=auth_header) 100 | result = json.loads(resp.body) 101 | assert 'app:queuey' in result['principles'] 102 | eq_(2, result['partitions']) 103 | 104 | # Add principles 105 | resp = app.put('/v1/queuey/%s' % queue_name, 106 | {'principles': 'app:queuey,app:notifications'}, 107 | headers=auth_header) 108 | result = json.loads(resp.body) 109 | assert 'app:queuey' in result['principles'] 110 | assert 'app:notifications' in result['principles'] 111 | 112 | # Bad partition update 113 | resp = app.put('/v1/queuey/%s' % queue_name, {'partitions': 1}, 114 | headers=auth_header, status=400) 115 | result = json.loads(resp.body) 116 | eq_('error', result['status']) 117 | 118 | def test_public_queue(self): 119 | app, queue_name = self._make_app_queue({'type': 'public'}) 120 | 121 | # Get the queue info 122 | queue = self._get_queue_info(app, queue_name) 123 | eq_('public', queue['type']) 124 | result = app.get('/v1/queuey/%s' % queue_name) 125 | result = json.loads(result.body) 126 | eq_('ok', result['status']) 127 | 128 | def test_make_queue_post_get_batches(self): 129 | app, queue_name = self._make_app_queue({'partitions': 3}) 130 | 131 | # Post several messages 132 | msgs = { 133 | 'messages': [ 134 | {'body': 'Hello msg 1', 'partition': 2}, 135 | {'body': 'Hello msg 2', 'partition': 2}, 136 | {'body': 'Hello msg 3', 'partition': 1, 'ttl': 3600} 137 | ] 138 | } 139 | msgs = json.dumps(msgs) 140 | json_header = {'Content-Type': 'application/json'} 141 | json_header.update(auth_header) 142 | resp = app.post('/v1/queuey/' + queue_name, msgs, headers=json_header) 143 | result = json.loads(resp.body) 144 | 145 | # Fetch the messages 146 | resp = app.get('/v1/queuey/' + queue_name, {'partitions': '1,2,3'}, 147 | headers=auth_header) 148 | result = json.loads(resp.body) 149 | eq_(3, len(result['messages'])) 150 | msg = result['messages'][0] 151 | eq_('Hello msg 3', msg['body']) 152 | eq_(1, msg['partition']) 153 | eq_(2, result['messages'][1]['partition']) 154 | 155 | # From a single partition 156 | resp = app.get('/v1/queuey/' + queue_name, {'partitions': '2'}, 157 | headers=auth_header) 158 | result = json.loads(resp.body) 159 | eq_(2, len(result['messages'])) 160 | msg = result['messages'][0] 161 | eq_('Hello msg 1', msg['body']) 162 | 163 | # Dump a message in a cluster without a partition 164 | msgs = { 165 | 'messages': [ 166 | {'body': 'Hello msg 1'}, 167 | ] 168 | } 169 | msgs = json.dumps(msgs) 170 | json_header = {'Content-Type': 'application/json'} 171 | json_header.update(auth_header) 172 | resp = app.post('/v1/queuey/' + queue_name, msgs, headers=json_header) 173 | result = json.loads(resp.body) 174 | eq_('ok', result['status']) 175 | 176 | def test_delete_queue(self): 177 | app, queue_name = self._make_app_queue({'partitions': 3}) 178 | 179 | queue = self._get_queue_info(app, queue_name) 180 | eq_('user', queue['type']) 181 | eq_(queue_name, queue['queue_name']) 182 | eq_(3, queue['partitions']) 183 | 184 | resp = app.delete('/v1/queuey/%s' % queue_name, 185 | headers=auth_header) 186 | result = json.loads(resp.body) 187 | eq_('ok', result['status']) 188 | 189 | resp = app.get('/v1/queuey/%s' % queue_name, headers=auth_header, 190 | status=404) 191 | result = json.loads(resp.body) 192 | eq_('error', result['status']) 193 | 194 | def test_delete_queue_messages(self): 195 | app, queue_name = self._make_app_queue({'partitions': 3}) 196 | 197 | # Post a few messages 198 | p2 = auth_header.copy() 199 | p2['X-Partition'] = '2' 200 | resp = app.post('/v1/queuey/' + queue_name, 201 | 'Hello there!', headers=p2) 202 | resp2 = app.post('/v1/queuey/' + queue_name, 203 | 'Hello there!', headers=p2) 204 | msg = json.loads(resp2.body)['messages'][0] 205 | msg2 = json.loads(resp.body)['messages'][0] 206 | p2['X-Partition'] = '1' 207 | resp = app.post('/v1/queuey/' + queue_name, 208 | 'Hello there!', headers=p2) 209 | msg3 = json.loads(resp.body)['messages'][0] 210 | resp = app.post('/v1/queuey/' + queue_name, 211 | 'Hello there!', headers=auth_header) 212 | 213 | # Fetch the messages 214 | resp = app.get('/v1/queuey/' + queue_name, {'partitions': '1,2,3'}, 215 | headers=auth_header) 216 | result = json.loads(resp.body) 217 | eq_(4, len(result['messages'])) 218 | 219 | # Delete 2 messages 220 | q = urllib.quote_plus('2:%s,2:%s' % (msg['key'], msg2['key'])) 221 | resp = app.delete('/v1/queuey/%s/%s' % (queue_name, q), headers=auth_header) 222 | 223 | resp = app.get('/v1/queuey/' + queue_name, {'partitions': '1,2,3'}, 224 | headers=auth_header) 225 | result = json.loads(resp.body) 226 | eq_(2, len(result['messages'])) 227 | 228 | # Delete 1 message 229 | q = str(urllib.quote_plus('%s' % msg3['key'])) 230 | resp = app.delete('/v1/queuey/%s/%s' % (queue_name, q), headers=auth_header) 231 | 232 | resp = app.get('/v1/queuey/' + queue_name, {'partitions': '1,2,3'}, 233 | headers=auth_header) 234 | result = json.loads(resp.body) 235 | eq_(1, len(result['messages'])) 236 | 237 | def test_get_messages_by_key(self): 238 | app, queue_name = self._make_app_queue({'partitions': 2}) 239 | 240 | # Post several messages 241 | test_msgs = { 242 | 'messages': [ 243 | {'body': 'Hello msg 1', 'partition': 1}, 244 | {'body': 'Hello msg 2', 'partition': 1}, 245 | {'body': 'Hello msg 3', 'partition': 2}, 246 | ] 247 | } 248 | msgs = json.dumps(test_msgs) 249 | json_header = {'Content-Type': 'application/json'} 250 | json_header.update(auth_header) 251 | resp = app.post('/v1/queuey/' + queue_name, msgs, headers=json_header) 252 | result = json.loads(resp.body) 253 | 254 | messages = [] 255 | for m in result['messages']: 256 | messages.append(urllib.quote_plus( 257 | str(m['partition']) + ':' + m['key'])) 258 | 259 | # Fetch a non-existing message 260 | fake = '3%3A' + uuid.uuid1().hex 261 | resp = app.get('/v1/queuey/%s/%s' % (queue_name, fake), 262 | headers=auth_header) 263 | result = json.loads(resp.body) 264 | eq_(0, len(result['messages'])) 265 | 266 | # Fetch first message 267 | resp = app.get('/v1/queuey/%s/%s' % (queue_name, messages[0]), 268 | headers=auth_header) 269 | result = json.loads(resp.body) 270 | eq_(1, len(result['messages'])) 271 | eq_(messages[0][4:], result['messages'][0]['message_id']) 272 | 273 | # Fetch one existing and one non-existing message 274 | resp = app.get('/v1/queuey/%s/%s,%s' % (queue_name, messages[0], fake), 275 | headers=auth_header) 276 | result = json.loads(resp.body) 277 | eq_(1, len(result['messages'])) 278 | eq_(messages[0][4:], result['messages'][0]['message_id']) 279 | 280 | # Fetch multiple messages 281 | resp = app.get('/v1/queuey/%s/%s' % (queue_name, ','.join(messages)), 282 | headers=auth_header) 283 | result = json.loads(resp.body) 284 | eq_(3, len(result['messages'])) 285 | bodies = set([m['body'] for m in result['messages']]) 286 | test_bodies = set([m['body'] for m in test_msgs['messages']]) 287 | eq_(test_bodies, bodies) 288 | 289 | def test_update_message(self): 290 | app, queue_name = self._make_app_queue() 291 | h = auth_header.copy() 292 | h['X-TTL'] = '300' 293 | app.post('/v1/queuey/' + queue_name, 'Hello there!', headers=h) 294 | resp = app.get('/v1/queuey/' + queue_name, headers=auth_header) 295 | result = json.loads(resp.body) 296 | eq_(1, len(result['messages'])) 297 | 298 | # update message 299 | message = result['messages'][0] 300 | message_id = message['message_id'] 301 | timestamp = message['timestamp'] 302 | q = urllib.quote_plus('1:' + message_id) 303 | h = auth_header.copy() 304 | h['X-TTL'] = '600' 305 | resp = app.put('/v1/queuey/%s/%s' % (queue_name, q), 'Good bye!', 306 | headers=h) 307 | eq_(200, resp.status_int) 308 | 309 | # check message 310 | resp = app.get('/v1/queuey/' + queue_name, headers=auth_header) 311 | result = json.loads(resp.body) 312 | eq_(1, len(result['messages'])) 313 | message = result['messages'][0] 314 | eq_('Good bye!', message['body']) 315 | eq_(timestamp, message['timestamp']) 316 | 317 | def test_update_messages(self): 318 | app, queue_name = self._make_app_queue() 319 | h = auth_header.copy() 320 | app.post('/v1/queuey/' + queue_name, 'Hello 1', headers=h) 321 | app.post('/v1/queuey/' + queue_name, 'Hello 2', headers=h) 322 | resp = app.get('/v1/queuey/' + queue_name, headers=h) 323 | result = json.loads(resp.body) 324 | eq_(2, len(result['messages'])) 325 | 326 | # update messages 327 | id0 = result['messages'][0]['message_id'] 328 | id1 = result['messages'][1]['message_id'] 329 | q = urllib.quote_plus('1:' + id0 + ',1:' + id1) 330 | resp = app.put('/v1/queuey/%s/%s' % (queue_name, q), 'Bye', headers=h) 331 | eq_(200, resp.status_int) 332 | 333 | # check messages 334 | resp = app.get('/v1/queuey/' + queue_name, headers=h) 335 | result = json.loads(resp.body) 336 | eq_(2, len(result['messages'])) 337 | eq_('Bye', result['messages'][0]['body']) 338 | eq_('Bye', result['messages'][1]['body']) 339 | 340 | def test_update_messages_implicit_create(self): 341 | app, queue_name = self._make_app_queue() 342 | h = auth_header.copy() 343 | 344 | # update non-existing messages 345 | id0 = uuid.uuid1().hex 346 | id1 = uuid.uuid1().hex 347 | q = urllib.quote_plus('1:' + id0 + ',1:' + id1) 348 | resp = app.put('/v1/queuey/%s/%s' % (queue_name, q), 'Yo', headers=h) 349 | eq_(200, resp.status_int) 350 | 351 | # check messages 352 | resp = app.get('/v1/queuey/' + queue_name, headers=h) 353 | result = json.loads(resp.body) 354 | eq_(2, len(result['messages'])) 355 | eq_('Yo', result['messages'][0]['body']) 356 | eq_('Yo', result['messages'][1]['body']) 357 | 358 | def test_high_ttl(self): 359 | app, queue_name = self._make_app_queue() 360 | h = auth_header.copy() 361 | h['X-TTL'] = str(2 ** 25) 362 | resp = app.post('/v1/queuey/' + queue_name, 'Hello there!', headers=h) 363 | result = json.loads(resp.body) 364 | eq_(201, resp.status_int) 365 | eq_('ok', result['status']) 366 | 367 | def test_bad_ttl(self): 368 | app, queue_name = self._make_app_queue() 369 | h = auth_header.copy() 370 | h['X-TTL'] = '0' 371 | resp = app.post('/v1/queuey/' + queue_name, 'Hello there!', headers=h, 372 | status=400) 373 | result = json.loads(resp.body) 374 | eq_('error', result['status']) 375 | eq_(['ttl'], result['error_msg'].keys()) 376 | 377 | def test_bad_partition(self): 378 | app, queue_name = self._make_app_queue() 379 | h = auth_header.copy() 380 | h['X-Partition'] = 'fred' 381 | resp = app.post('/v1/queuey/' + queue_name, 382 | 'Hello there!', headers=h, 383 | status=400) 384 | result = json.loads(resp.body) 385 | eq_('error', result['status']) 386 | eq_('"fred" is not a number', result['error_msg']['partition']) 387 | 388 | def test_no_body(self): 389 | app, queue_name = self._make_app_queue() 390 | resp = app.post('/v1/queuey/' + queue_name, 391 | '', headers=auth_header, 392 | status=400) 393 | result = json.loads(resp.body) 394 | eq_('error', result['status']) 395 | eq_(u'Required', result['error_msg']['body']) 396 | 397 | def test_invalid_partition(self): 398 | app, queue_name = self._make_app_queue() 399 | p2 = auth_header.copy() 400 | p2['X-Partition'] = '4' 401 | resp = app.post('/v1/queuey/' + queue_name, "Hi there", headers=p2, 402 | status=400) 403 | result = json.loads(resp.body) 404 | eq_('error', result['status']) 405 | eq_("4 is greater than maximum value 1", result['error_msg']['partition']) 406 | 407 | # Dump a message in a cluster with a bad partition 408 | msgs = { 409 | 'messages': [ 410 | {'body': 'Hello msg 1'}, 411 | {'body': 'Hello msg 2', 'partition': 4} 412 | ] 413 | } 414 | msgs = json.dumps(msgs) 415 | json_header = {'Content-Type': 'application/json'} 416 | json_header.update(auth_header) 417 | resp = app.post('/v1/queuey/' + queue_name, msgs, headers=json_header, 418 | status=400) 419 | result = json.loads(resp.body) 420 | eq_('error', result['status']) 421 | eq_("4 is greater than maximum value 1", result['error_msg']['1.partition']) 422 | 423 | def test_no_queuename(self): 424 | app, queue_name = self._make_app_queue() 425 | resp = app.post('/v1/queuey/' + queue_name + 'blip', 426 | 'Hello there!', headers=auth_header, status=404) 427 | eq_(404, resp.status_int) 428 | 429 | def test_queuename_too_long(self): 430 | app, queue_name = self._make_app_queue() 431 | resp = app.post('/v1/queuey/' + 'blip' * 30, 432 | {'.body': 'Hello there!'}, headers=auth_header, status=404) 433 | eq_(404, resp.status_int) 434 | 435 | def test_invalid_partition_type(self): 436 | app, queue_name = self._make_app_queue() 437 | resp = app.get('/v1/queuey/' + queue_name, {'partitions': '1,fred'}, 438 | headers=auth_header, status=400) 439 | result = json.loads(resp.body) 440 | eq_('error', result['status']) 441 | 442 | def test_bad_principle_name(self): 443 | app, queue_name = self._make_app_queue() 444 | resp = app.post('/v1/queuey', {'principles': 'app:queuey,apple:oranges'}, 445 | headers=auth_header, status=400) 446 | eq_(400, resp.status_int) 447 | resp = app.post('/v1/queuey', {'principles': 'apple:oranges'}, 448 | headers=auth_header, status=400) 449 | eq_(400, resp.status_int) 450 | 451 | def test_bad_hex(self): 452 | app, queue_name = self._make_app_queue() 453 | q = urllib.quote_plus('2:asdfasdfasdfadsfasdf') 454 | resp = app.delete('/v1/queuey/%s/%s' % (queue_name, q), headers=auth_header, 455 | status=400) 456 | result = json.loads(resp.body) 457 | eq_('error', result['status']) 458 | 459 | def test_bad_json(self): 460 | app, queue_name = self._make_app_queue() 461 | q = '[this isnt good}' 462 | h = auth_header.copy() 463 | h['Content-Type'] = 'application/json' 464 | resp = app.post('/v1/queuey/%s' % queue_name, q, headers=h, status=400) 465 | result = json.loads(resp.body) 466 | eq_('error', result['status']) 467 | 468 | def test_bad_appkey(self): 469 | app = self.makeOne() 470 | resp = app.post('/v1/queuey', headers={'Authorization': 'Application OOPS'}, 471 | status=401) 472 | result = json.loads(resp.body) 473 | 474 | eq_('error', result['status']) 475 | eq_('InvalidApplicationKey', result['error_msg'].keys()[0]) 476 | 477 | def test_no_path(self): 478 | app = self.makeOne() 479 | resp = app.post('/blah', status=404) 480 | eq_(404, resp.status_int) 481 | 482 | 483 | class TestCassandraQueueyApp(TestQueueyBaseApp): 484 | ini_file = 'test_cassandra.ini' 485 | -------------------------------------------------------------------------------- /queuey/tests/test_memory.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = queuey 4 | debug = false 5 | 6 | [metlog] 7 | logger = queuey 8 | backend = mozsvc.metrics.MetlogPlugin 9 | sender_class = metlog.senders.dev.DebugCaptureSender 10 | 11 | [storage] 12 | backend = queuey.storage.memory.MemoryQueueBackend 13 | 14 | [metadata] 15 | backend = queuey.storage.memory.MemoryMetadata 16 | 17 | [ipauth] 18 | ipaddrs = 127.0.0.1 19 | 20 | [application_keys] 21 | queuey = 22 | f25bfb8fe200475c8a0532a9cbe7651e 23 | 24 | [smtp] 25 | host = localhost 26 | port = 25 27 | sender = queuey@mozilla.com 28 | 29 | [cef] 30 | use = true 31 | file = syslog 32 | vendor = mozilla 33 | version = 0 34 | device_version = 1.3 35 | product = queuey 36 | 37 | [host:localhost] 38 | storage.sqluri = sqlite:////tmp/test.db 39 | 40 | # Paster configuration for Pyramid 41 | [filter:catcherror] 42 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 43 | 44 | [pipeline:main] 45 | pipeline = catcherror 46 | pyramidapp 47 | 48 | [app:pyramidapp] 49 | use = egg:queuey 50 | 51 | pyramid.reload_templates = true 52 | pyramid.debug_authorization = false 53 | pyramid.debug_notfound = false 54 | pyramid.debug_routematch = false 55 | pyramid.debug_templates = true 56 | pyramid.default_locale_name = en 57 | 58 | # need to do this programmatically 59 | mako.directories = queuey:templates 60 | 61 | # services config file 62 | configuration = %(here)s/queuey.conf 63 | 64 | [server:main] 65 | use = egg:Paste#http 66 | host = 0.0.0.0 67 | port = 5000 68 | 69 | # Begin logging configuration 70 | 71 | [loggers] 72 | keys = root, queuey 73 | 74 | [handlers] 75 | keys = console 76 | 77 | [formatters] 78 | keys = generic 79 | 80 | [logger_root] 81 | level = INFO 82 | handlers = console 83 | 84 | [logger_queuey] 85 | level = DEBUG 86 | handlers = 87 | qualname = queuey 88 | 89 | [handler_console] 90 | class = StreamHandler 91 | args = (sys.stderr,) 92 | level = NOTSET 93 | formatter = generic 94 | 95 | [formatter_generic] 96 | format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s 97 | 98 | # End logging configuration 99 | -------------------------------------------------------------------------------- /queuey/tests/test_memory.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import unittest 5 | import uuid 6 | import time 7 | 8 | from nose.tools import eq_ 9 | 10 | from queuey.tests.storage import StorageTestMessageBase 11 | from queuey.tests.storage import StorageTestMetadataBase 12 | 13 | 14 | class TestMessage(unittest.TestCase): 15 | 16 | def _makeOne(self): 17 | from queuey.storage.memory import Message 18 | return Message(uuid.uuid1(), 'body', 300) 19 | 20 | def test_create(self): 21 | msg = self._makeOne() 22 | eq_('body', msg.body) 23 | 24 | def test_compare(self): 25 | msg1 = self._makeOne() 26 | msg2 = self._makeOne() 27 | eq_(msg1, msg1) 28 | eq_(msg2, msg2) 29 | self.assertNotEqual(msg1, msg2) 30 | self.assertNotEqual(msg1, object()) 31 | 32 | 33 | class TestMemoryStore(StorageTestMessageBase): 34 | def _makeOne(self, **kwargs): 35 | from queuey.storage.memory import MemoryQueueBackend 36 | return MemoryQueueBackend() 37 | 38 | def test_ttl_in_batch(self): 39 | backend = self._makeOne() 40 | payload = 'a rather boring payload' 41 | queue_name = uuid.uuid4().hex 42 | past = time.time() - 10 43 | backend.push('weak', 'myapp', queue_name, payload, ttl=5, 44 | timestamp=past)[0] 45 | existing = backend.retrieve_batch('weak', 'myapp', [queue_name]) 46 | eq_([], existing) 47 | 48 | def test_ttl_in_retrieve(self): 49 | backend = self._makeOne() 50 | payload = 'a rather boring payload' 51 | queue_name = uuid.uuid4().hex 52 | past = time.time() - 10 53 | msg = backend.push('weak', 'myapp', queue_name, payload, ttl=5, 54 | timestamp=past)[0] 55 | existing = backend.retrieve('weak', 'myapp', queue_name, msg) 56 | eq_({}, existing) 57 | 58 | 59 | class TestMemoryMetadata(StorageTestMetadataBase): 60 | def _makeOne(self): 61 | from queuey.storage.memory import MemoryMetadata 62 | return MemoryMetadata() 63 | 64 | del StorageTestMessageBase 65 | del StorageTestMetadataBase 66 | -------------------------------------------------------------------------------- /queuey/validators.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import re 5 | import uuid 6 | 7 | import colander 8 | 9 | BID_REGEX = re.compile(r'^(bid:\w+@\w+\.\w+|app:\w+)$') 10 | INT_REGEX = re.compile(r'^\d+$') 11 | 12 | 13 | @colander.deferred 14 | def default_queuename(node, kw): 15 | queue_name = kw.get('default_queue_name') 16 | if queue_name is None: 17 | queue_name = uuid.uuid4().hex 18 | return queue_name 19 | 20 | 21 | @colander.deferred 22 | def max_queue_partition(node, kw): 23 | max_partition = kw['max_partition'] 24 | return colander.Range(1, max_partition) 25 | 26 | 27 | class CommaList(object): 28 | def deserialize(self, node, cstruct): 29 | if cstruct is colander.null: 30 | return colander.null 31 | return [x.strip() for x in cstruct.split(',') if x] 32 | 33 | 34 | def principle_validator(node, value): 35 | for value in [x.strip() for x in value.split(',') if x]: 36 | if not BID_REGEX.match(value): 37 | raise colander.Invalid(node, '%r is not a valid permission list.' % 38 | value) 39 | 40 | 41 | def comma_int_list(node, value): 42 | msg = ('%r is not a valid comma separated list of integers or a single ' 43 | 'integer.' % value) 44 | for val in value: 45 | if not INT_REGEX.match(val): 46 | raise colander.Invalid(node, msg) 47 | 48 | 49 | class GetMessages(colander.MappingSchema): 50 | since = colander.SchemaNode(colander.String(), missing=None) 51 | limit = colander.SchemaNode(colander.Int(), missing=None, 52 | validator=colander.Range(1, 1000)) 53 | order = colander.SchemaNode(colander.String(), missing="ascending", 54 | validator=colander.OneOf(['descending', 55 | 'ascending'])) 56 | partitions = colander.SchemaNode(CommaList(), missing=[1], 57 | validator=comma_int_list) 58 | 59 | 60 | class UpdateQueue(colander.MappingSchema): 61 | partitions = colander.SchemaNode(colander.Int(), missing=None, 62 | validator=colander.Range(1, 200)) 63 | type = colander.SchemaNode(colander.String(), missing='user', 64 | validator=colander.OneOf(['public', 'user'])) 65 | consistency = colander.SchemaNode( 66 | colander.String(), missing='strong', validator=colander.OneOf( 67 | ['weak', 'strong', 'very_strong'])) 68 | principles = colander.SchemaNode(colander.String(), missing=None, 69 | validator=principle_validator) 70 | 71 | 72 | class NewQueue(colander.MappingSchema): 73 | partitions = colander.SchemaNode(colander.Int(), missing=1, 74 | validator=colander.Range(1, 200)) 75 | queue_name = colander.SchemaNode(colander.String(), 76 | missing=default_queuename) 77 | type = colander.SchemaNode(colander.String(), missing='user', 78 | validator=colander.OneOf(['public', 'user'])) 79 | consistency = colander.SchemaNode( 80 | colander.String(), missing='strong', validator=colander.OneOf( 81 | ['weak', 'strong', 'very_strong'])) 82 | principles = colander.SchemaNode(colander.String(), missing=None, 83 | validator=principle_validator) 84 | 85 | 86 | class QueueList(colander.MappingSchema): 87 | limit = colander.SchemaNode(colander.Int(), missing=None) 88 | offset = colander.SchemaNode(colander.String(), missing=None) 89 | details = colander.SchemaNode(colander.Bool(), missing=False) 90 | include_count = colander.SchemaNode(colander.Bool(), missing=False) 91 | 92 | 93 | class Message(colander.MappingSchema): 94 | body = colander.SchemaNode(colander.String(), 95 | validator=colander.Length(min=1)) 96 | partition = colander.SchemaNode(colander.Int(), missing=None, 97 | validator=max_queue_partition) 98 | ttl = colander.SchemaNode(colander.Int(), missing=60 * 60 * 24 * 3, 99 | validator=colander.Range(1, 2 ** 25)) 100 | 101 | 102 | class MessageList(colander.SequenceSchema): 103 | message = Message() 104 | -------------------------------------------------------------------------------- /queuey/views.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import random 5 | 6 | from pyramid.view import view_config 7 | import ujson 8 | 9 | from queuey import validators 10 | 11 | from queuey.resources import Application 12 | from queuey.resources import Queue 13 | from queuey.resources import MessageBatch 14 | 15 | 16 | class InvalidParameter(Exception): 17 | """Raised in views to flag a bad parameter""" 18 | status = 400 19 | 20 | 21 | class UJSONRendererFactory: 22 | def __init__(self, info): 23 | pass 24 | 25 | def __call__(self, value, system): 26 | return ujson.dumps(value) 27 | 28 | 29 | # Our invalid schema catch-all 30 | @view_config(context=InvalidParameter) 31 | @view_config(context='colander.Invalid') 32 | @view_config(context='queuey.security.InvalidBrowserID') 33 | @view_config(context='queuey.security.InvalidApplicationKey') 34 | @view_config(context='queuey.resources.InvalidQueueName') 35 | @view_config(context='queuey.resources.InvalidUpdate') 36 | @view_config(context='queuey.resources.InvalidMessageID') 37 | @view_config(context='queuey.storage.StorageUnavailable') 38 | def bad_params(context, request): 39 | exc = request.exception 40 | cls_name = exc.__class__.__name__ 41 | if cls_name == 'Invalid': 42 | errors = exc.asdict() 43 | request.response.status = 400 44 | elif cls_name == 'StorageUnavailable': 45 | request.response.status = 500 46 | errors = {'storage': 'Back-end storage unavailable. If this is a ' 47 | 'queue request that includes counts, try ' 48 | 'ommitting the count.'} 49 | else: 50 | request.response.status = getattr(exc, 'status', 401) 51 | errors = {cls_name: str(exc)} 52 | return { 53 | 'status': 'error', 54 | 'error_msg': errors 55 | } 56 | 57 | 58 | @view_config(context=Application, request_method='POST', 59 | permission='create_queue') 60 | def create_queue(context, request): 61 | schema = validators.NewQueue().bind() 62 | params = schema.deserialize(request.POST) 63 | context.register_queue(**params) 64 | request.response.status = 201 65 | return dict(status='ok', **params) 66 | 67 | 68 | @view_config(context=Application, request_method='GET', 69 | permission='view_queues') 70 | def queue_list(context, request): 71 | params = validators.QueueList().deserialize(request.GET) 72 | return { 73 | 'status': 'ok', 74 | 'queues': context.queue_list(**params) 75 | } 76 | 77 | 78 | @view_config(context=Queue, request_method='PUT', permission='create_queue') 79 | def update_queue(context, request): 80 | params = validators.UpdateQueue().deserialize(request.POST) 81 | context.update_metadata(**params) 82 | return dict( 83 | status='ok', 84 | queue_name=context.queue_name, 85 | partitions=context.partitions, 86 | created=context.created, 87 | principles=context.principles, 88 | type=context.type 89 | ) 90 | 91 | 92 | @view_config(context=Queue, request_method='POST', permission='create', 93 | header="Content-Type:application/json") 94 | def new_messages(context, request): 95 | request.response.status = 201 96 | try: 97 | msgs = ujson.loads(request.body)['messages'] 98 | except: 99 | # A bare except like this is horrible, but we need to toss this right 100 | raise InvalidParameter("Unable to properly deserialize JSON body.") 101 | schema = validators.MessageList().bind(max_partition=context.partitions) 102 | msgs = schema.deserialize(msgs) 103 | for msg in msgs: 104 | if not msg['partition']: 105 | msg['partition'] = random.randint(1, context.partitions) 106 | return { 107 | 'status': 'ok', 108 | 'messages': context.push_batch(msgs) 109 | } 110 | 111 | 112 | @view_config(context=Queue, request_method='POST', permission='create') 113 | def new_message(context, request): 114 | request.response.status = 201 115 | msg = {'body': request.body, 116 | 'ttl': request.headers.get('X-TTL'), 117 | 'partition': request.headers.get('X-Partition')} 118 | schema = validators.Message().bind(max_partition=context.partitions) 119 | msg = schema.deserialize(msg) 120 | if not msg['partition']: 121 | msg['partition'] = random.randint(1, context.partitions) 122 | return { 123 | 'status': 'ok', 124 | 'messages': context.push_batch([msg]) 125 | } 126 | 127 | 128 | @view_config(context=Queue, request_method='GET', permission='view') 129 | def get_messages(context, request): 130 | params = validators.GetMessages().deserialize(request.GET) 131 | return { 132 | 'status': 'ok', 133 | 'messages': context.get_messages(**params) 134 | } 135 | 136 | 137 | @view_config(context=MessageBatch, request_method='GET', permission='view') 138 | def get_messages_by_key(context, request): 139 | return { 140 | 'status': 'ok', 141 | 'messages': context.get(), 142 | } 143 | 144 | 145 | @view_config(context=MessageBatch, request_method='PUT', permission='create') 146 | def update_messages(context, request): 147 | msg = {'body': request.body, 148 | 'ttl': request.headers.get('X-TTL', 60 * 60 * 24 * 3), 149 | 'partition': None} 150 | params = validators.Message().deserialize(msg) 151 | context.update(params) 152 | return {'status': 'ok'} 153 | 154 | 155 | @view_config(context=Queue, request_method='DELETE', permission='delete_queue') 156 | @view_config(context=MessageBatch, request_method='DELETE', 157 | permission='delete') 158 | def delete(context, request): 159 | context.delete() 160 | return {'status': 'ok'} 161 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Chameleon==2.9.2 2 | Distutils2==1.0a3 3 | Jinja2==2.6 4 | Mako==0.7.2 5 | MarkupSafe==0.15 6 | MoPyTools==3.3 7 | Paste==1.7.5.1 8 | PasteDeploy==1.5.0 9 | PasteScript==1.7.5 10 | Pygments==1.5 11 | Sphinx==1.1.3 12 | WebOb==1.2.2 13 | WebTest==1.3.4 14 | argparse==1.2.1 15 | cdecimal==2.3 16 | cef==0.3 17 | colander==0.9.8 18 | coverage==3.5.2 19 | distribute==0.6.28 20 | docutils==0.9.1 21 | flake8==1.4 22 | gunicorn==0.14.6 23 | meld3==0.6.8 24 | metlog-py==0.9.5 25 | mock==0.8.0 26 | mozsvc==0.6 27 | nose==1.1.2 28 | pycassa==1.7.0 29 | pypi2rpm==0.6.3 30 | pyramid==1.3.3 31 | repoze.lru==0.6 32 | simplejson==2.6.1 33 | supervisor==3.0a12 34 | thrift==0.8.0 35 | translationstring==1.1 36 | ujson==1.19 37 | venusian==1.0a6 38 | virtualenv==1.7.2 39 | wsgiref==0.1.2 40 | z3c.checkversions==0.4.1 41 | zope.deprecation==4.0.0 42 | zope.interface==4.0.1 43 | -------------------------------------------------------------------------------- /runtests.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # This Source Code Form is subject to the terms of the Mozilla Public 3 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 | # You can obtain one at http://mozilla.org/MPL/2.0/. 5 | 6 | import os 7 | import sys 8 | import time 9 | from contextlib import contextmanager 10 | 11 | import pycassa 12 | 13 | from queuey.storage.cassandra import parse_hosts 14 | from queuey.testing import setup 15 | 16 | 17 | @contextmanager 18 | def supervisor(): 19 | started_supervisor = False 20 | if not os.path.exists(os.path.join('var', 'supervisor.sock')): 21 | started_supervisor = True 22 | os.system('bin/supervisord') 23 | try: 24 | yield 25 | finally: 26 | if started_supervisor: 27 | os.system('bin/supervisorctl shutdown') 28 | 29 | 30 | def main(): 31 | ret = 1 32 | host = os.environ.get('TEST_CASSANDRA_HOST', '127.0.0.1') 33 | hosts = parse_hosts(host) 34 | with supervisor(): 35 | setup(40) 36 | while 1: 37 | try: 38 | pycassa.ConnectionPool( 39 | keyspace='MessageStore', server_list=hosts) 40 | break 41 | except pycassa.InvalidRequestException: 42 | # successful connection but missing schema 43 | break 44 | except pycassa.AllServersUnavailable: 45 | time.sleep(0.2) 46 | print(u'Waiting on connection pool for 0.2 seconds.') 47 | ret = os.system('make test-python') 48 | sys.exit(ret) 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.9' 2 | 3 | import os 4 | 5 | from setuptools import setup, find_packages 6 | 7 | here = os.path.abspath(os.path.dirname(__file__)) 8 | with open(os.path.join(here, 'README.rst')) as f: 9 | README = f.read() 10 | with open(os.path.join(here, 'CHANGES.rst')) as f: 11 | CHANGES = f.read() 12 | 13 | reqs = [ 14 | 'distribute', 15 | 'nose', 16 | ] 17 | 18 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 19 | if not on_rtd: 20 | reqs.extend([ 21 | 'cdecimal', 22 | 'colander', 23 | 'gunicorn', 24 | 'metlog-py', 25 | 'mozsvc', 26 | 'pycassa', 27 | 'pyramid', 28 | 'thrift', 29 | 'ujson', 30 | 'webtest', 31 | 'zope.interface', 32 | ]) 33 | else: 34 | # Ensure if we *are* on RTD, we include the plugin we need 35 | reqs.extend([ 36 | 'sphinx_http_domain' 37 | ]) 38 | 39 | setup( 40 | name='queuey', 41 | description="RESTful Message Queue", 42 | version=__version__, 43 | long_description=README + '\n\n' + CHANGES, 44 | classifiers=[ 45 | "Intended Audience :: Developers", 46 | "Programming Language :: Python", 47 | "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", 48 | "Programming Language :: Python", 49 | "Programming Language :: Python :: 2.6", 50 | "Programming Language :: Python :: 2.7", 51 | ], 52 | keywords="message-queue notifications server messaging queue", 53 | author="Mozilla Foundation", 54 | author_email="bbangert@mozilla.com", 55 | url="http://queuey.readthedocs.org/", 56 | license="MPLv2.0", 57 | packages=find_packages(), 58 | test_suite="queuey.tests", 59 | include_package_data=True, 60 | zip_safe=False, 61 | tests_require=['pkginfo', 'Mock>=0.8rc2', 'nose', 'supervisor'], 62 | install_requires=reqs, 63 | entry_points=""" 64 | [paste.app_factory] 65 | main = queuey:main 66 | """, 67 | 68 | ) 69 | -------------------------------------------------------------------------------- /start.sh: -------------------------------------------------------------------------------- 1 | bin/gunicorn -w1 messagequeue.run -t 3000 --log-file - --log-level info 2 | -------------------------------------------------------------------------------- /var/README.txt: -------------------------------------------------------------------------------- 1 | # directory for data & socket files, etc. --------------------------------------------------------------------------------