├── .gitignore ├── .travis.yml ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── __init__.py ├── distributors ├── __init__.py ├── cache.py ├── clean.py ├── coffeescript.py ├── dist.py ├── downloads.py ├── settings.py ├── urlobject │ ├── __init__.py │ ├── netloc.py │ ├── path.py │ ├── ports.py │ ├── query_string.py │ └── urlobject.py └── xwhich.py ├── requirements.txt ├── setup.py └── signalqueue ├── __init__.py ├── admin.py ├── dispatcher.py ├── fixtures ├── TESTMODEL-DUMP.json └── TESTMODEL-ENQUEUED-SIGNALS.json ├── management ├── __init__.py └── commands │ ├── __init__.py │ ├── dequeue.py │ ├── dumpqueue.py │ ├── purgequeue.py │ └── runqueueserver.py ├── mappings.py ├── models.py ├── settings ├── __init__.py ├── redis-compatible.conf ├── redis.conf ├── test_async.py ├── test_sync.py └── urlconf.py ├── signals.py ├── static ├── signalqueue │ ├── coffee │ │ └── jquery.signalqueue.coffee │ └── js │ │ ├── jquery.queuestatus.js │ │ └── jquery.signalqueue.js └── socket.io-client │ ├── WebSocketMain.swf │ ├── WebSocketMainInsecure.swf │ ├── flashpolicy.xml │ ├── socket.io.js │ └── socket.io.min.js ├── templates ├── admin │ ├── app_index.html │ ├── index_with_queues.html │ └── sidebar_queue_module.html └── status.html ├── templatetags ├── __init__.py └── signalqueue_status.py ├── testrunner.py ├── tests.py ├── utils.py └── worker ├── __init__.py ├── backends.py ├── base.py ├── celeryqueue.py ├── poolqueue.py ├── supercell.py └── vortex.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | Icon* 3 | dist/ 4 | django_signalqueue.egg-info/ 5 | build/ 6 | MANIFEST 7 | 8 | signalqueue/migrations/ 9 | 10 | *.eprj 11 | *.svn 12 | *.pyc 13 | *~ 14 | \#*\# 15 | 16 | *.o 17 | *.so 18 | *.dmg 19 | *.jar 20 | 21 | .hg/ 22 | *.hg 23 | *.hgignore 24 | .hgignore 25 | *.dropbox 26 | .dropbox 27 | 28 | 29 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.7" 4 | # - "3.2" 5 | 6 | env: 7 | - DJANGO_SETTINGS_MODULE='settings' 8 | # command to install dependencies 9 | install: 10 | - "pip install -U . --use-mirrors" 11 | - "pip install django-signalqueue --use-mirrors" 12 | - "pip install -r requirements.txt --use-mirrors" 13 | 14 | # command to run tests 15 | script: 16 | # - python signalqueue/testrunner.py 17 | - "cd signalqueue" 18 | - "PYTHONPATH=${PYTHONPATH}:. python testrunner.py" -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2007-2008, Alexander Böhn 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, 7 | this list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright 10 | notice, this list of conditions and the following disclaimer in the 11 | documentation and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of django-imagekit nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 24 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | exclude *.pyc *~ .DS_Store 2 | include .gitignore 3 | include LICENSE.txt 4 | include README.md 5 | recursive-include signalqueue/fixtures * 6 | recursive-include signalqueue/static * 7 | recursive-include signalqueue/templates * 8 | recursive-include signalqueue/templates/admin * 9 | recursive-include signalqueue/settings * 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/fish2000/django-signalqueue.png?branch=master)](https://travis-ci.org/fish2000/django-signalqueue) 2 | 3 | django-signalqueue 4 | ================== 5 | 6 | After a certain amount of time anyone concerning themselves with the Django framework is going 7 | to ask the question: *I love Django's signals, indeed. But if only I could dispatch them asynchronously. 8 | Like, on some other thread or something, I don't really know.... Is that somehow possible?* 9 | 10 | Well, now you can easily do that! One contrived yet demonstrative example of such is this: 11 | you want to update an event log in your app when a user saves a form, but the update function you wrote does some gnarly aggregation so you can see each datum reflected in real-time. If you call it in a view it beachballs 12 | both the running app process and your users' patience. 13 | 14 | That's where django-signalqueue comes in. After you set it up, this is all you need to do: 15 | 16 | 17 | # yourapp/signals.py 18 | 19 | from signalqueue import dispatch 20 | from yourapp.logs import inefficient_log_update_function as log_update 21 | 22 | form_submit = dispatch.AsyncSignal( 23 | providing_args=['instance']) # define an asynchronous signal 24 | 25 | form_submit.connect(log_update) # doesn't have to be right here, as long 26 | # as it runs when the app starts up 27 | 28 | Now you can call the function in a view without blocking everything: 29 | 30 | # yourapp/views.py 31 | 32 | from yourapp import signals, models 33 | 34 | def process_form(request): 35 | pk = save_user_form(request) # your logic here 36 | obj = models.MyModl.objects.get(pk=pk) 37 | signals.form_submit.send(instance=obj) # returns quickly! 38 | return an_http_response_object # eventually return an HttpResponse 39 | 40 | 41 | Django-signalqueue sticks to Django's naming and calling conventions for signals. It gets out of your 42 | way and feels familiar, while granting you the power of async calls. 43 | 44 | 45 | With django-signalqueue, asynchronous dispatch is not even a thing — that's how easy it is. 46 | ================================================================================================= 47 | 48 | Setting It Up 49 | ------------- 50 | 51 | Watch, I'll show you. First, install django-signalqueue: 52 | 53 | $ pip install django-signalqueue # pulls in tornado and django-delegate, if need be 54 | 55 | ... you may also want some of these optional packages, if you don't have them already: 56 | 57 | $ brew install redis # s/brew/apt-get/ to taste 58 | $ pip install redis hiredis # recommended 59 | $ pip install ujson # recommended 60 | $ brew install yajl && pip install czjson yajl simplejson # these work too 61 | $ pip install nose rednose django-nose # for the tests 62 | 63 | Add django-signalqueue to your `INSTALLED_APPS`, and the settings for a queue, while you're in your `settings.py`: 64 | 65 | # settings.py 66 | 67 | INSTALLED_APPS = [ 68 | 'signalqueue', # ... 69 | ] 70 | 71 | SQ_QUEUES = { 72 | 'default': { # a 'default' queue in SQ_QUEUES is required 73 | 'ENGINE': 'signalqueue.worker.backends.RedisSetQueue', # also required - the queue's driver 74 | 'INTERVAL': 30, # required - the polling interval (30 <= ~1/3 sec) 75 | 'OPTIONS': dict(), 76 | }, 77 | } 78 | SQ_RUNMODE = 'SQ_ASYNC_REQUEST' # use async dispatch by default 79 | SQ_WORKER_PORT = 11231 # port to which the worker process binds 80 | 81 | Besides all that, you just need a call to `signalqueue.autodiscover()` in your root URLConf: 82 | 83 | # urls.py 84 | 85 | import signalqueue 86 | signalqueue.autodiscover() 87 | 88 | Now you can define async signals! 89 | --------------------------------- 90 | 91 | Asynchronous signals are instances of `signalqueue.dispatch.AsyncSignal` that you've defined in one of the following places: 92 | 93 | * `your_app/signals.py` (it's fine if you already use this file, as many do) 94 | * Modules named in a `settings.SQ_ADDITIONAL_SIGNALS` list or tuple 95 | * *Coming soon:* `signalqueue.register()` *-- so you can put them anywhere else.* 96 | 97 | AsyncSignals are subclasses of the familiar `django.dispatch.Signal` class. As such, you define AsyncSignals much like the Django signals you know and love: 98 | 99 | # yourapp/your_callbacks.py 100 | 101 | # the callback definition can go anywhere 102 | def callback(sender, **kwargs): 103 | print "I, %s, have been hereby dispatched asynchronously by %s, thanks to django-signalqueue." % ( 104 | str(kwargs['instance']), 105 | sender.__name__) 106 | 107 | 108 | # yourapp/signals.py 109 | 110 | from signalqueue.dispatch import AsyncSignal 111 | from yourapp.your_callbacks import callback 112 | 113 | my_signal = AsyncSignal(providing_args=['instance']) # the yuge. 114 | 115 | # while you can put your callbacks anywhere, be sure they're connect()-ed to your signals in 116 | # yourapp/signals.py or another module that loads when the app starts (e.g. models.py) 117 | 118 | my_signal.connect(callback) 119 | 120 | At the time of writing, arguments specified the providing_args list are assumed to be Django model instances. 121 | django-signalqueue serializes model instances by looking at: 122 | 123 | * the app name - `obj._meta.app_label`, 124 | * the model's class name - `obj.__class__.__name__.lower()`, 125 | * and the object's primary key value - `obj.pk`. 126 | 127 | You can define mappings for other object types (the curious can have a look in `signalqueue/mappings.py` for 128 | how that works) -- this part of the API is currently in flux as we're working towards the simplest, 129 | programmer-user-friendliest, most-dependency-unshackled method of implementation for the type stuff. 130 | 131 | BUT SO ANYWAY. To start up a worker, use the management command `runqueueserver`: 132 | 133 | $ python ./manage.py runqueueserver localhost:2345 134 | +++ django-signalqueue by Alexander Bohn -- http://objectsinspaceandtime.com/ 135 | 136 | Validating models...0 errors found 137 | 138 | Django version 1.4 pre-alpha SVN-16857, using settings 'settings' 139 | Tornado worker for queue "default" binding to http://127.0.0.1:11231/ 140 | Quit the server with CONTROL-C. 141 | 2011-09-30 15:25:21,098 [INFO] signalqueue: Dequeueing signal: None 142 | 2011-09-30 15:25:21,400 [INFO] signalqueue: Dequeueing signal: None 143 | 2011-09-30 15:25:21,701 [INFO] signalqueue: Dequeueing signal: None 144 | [... et cetera, ad nauseum] 145 | 146 | 147 | The `runqueueserver` process will sit in the foreground and blurt its output to stdout every time it polls 148 | the queue (in ANSI color!) which is handy for debugging your setup. 149 | 150 | Once you've got a worker process running, you can fire one of your signal asynchronously like so: 151 | 152 | >>> from yourapp.signals import my_signal 153 | >>> my_signal.send(sender=AModelClass, instance=a_model_instance) 154 | 155 | send() returns immediately after enqueueing the call, which is pushed onto a stack. The worker process, 156 | running in its own process, pops any available signals off the stack and executes them in its own instance 157 | of your Django app. 158 | 159 | You can fire async signals synchronously using send_now() -- the call will block until all of the connected 160 | callback handlers return (just like a call to a standard signals' send() method): 161 | 162 | >>> my_signal.send_now(sender=AModelClass, instance=a_model_instance) 163 | >>> my_signal.send_now(instance=a_model_instance) 164 | 165 | As with `django.dispatch.Signal.send()`, the sender kwarg is optional if your callback handlers don't expect it. 166 | 167 | *Tune in tomorrow for the astonishing conclusion of... the django-signalqueue README!!!!!!* 168 | 169 | 170 | [![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/fish2000/django-signalqueue/trend.png)](https://bitdeli.com/free "Bitdeli Badge") 171 | 172 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | # package path-extension snippet. 2 | from pkgutil import extend_path 3 | __path__ = extend_path(__path__, __name__) 4 | -------------------------------------------------------------------------------- /distributors/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | from distutils.cmd import Command 4 | import os 5 | 6 | # package path-extension snippet. 7 | from pkgutil import extend_path 8 | __path__ = extend_path(__path__, __name__) 9 | 10 | class build_js(Command): 11 | 12 | user_options = [ 13 | ('inplace', 'i', 14 | 'build JavaScript modules in-place (for development)')] 15 | 16 | description = 'Build and generate CoffeeScript/UglifyJS project code' 17 | 18 | def initialize_options(self): 19 | self.inplace = False 20 | self.js_package = None 21 | self.js_outdirs = {} 22 | 23 | self.build_lib = None 24 | self.build_temp = None 25 | self.debug = None 26 | self.force = None 27 | self.plat_name = None 28 | 29 | def finalize_options(self): 30 | self.js_outdirs.update(self.distribution.js_outdirs) 31 | self.set_undefined_options('build', 32 | ('build_lib', 'build_lib'), 33 | ('build_temp', 'build_temp'), 34 | ('debug', 'debug'), 35 | ('force', 'force'), 36 | ('plat_name', 'plat_name')) 37 | if self.js_package is None: 38 | self.js_package = self.distribution.js_package 39 | 40 | def get_js_outdir(self, package_name): 41 | if self.inplace: 42 | build_py = self.get_finalized_command('build_py') 43 | package_dir = os.path.abspath(build_py.get_package_dir(package_name)) 44 | return os.path.join(package_dir, self.js_outdirs.get(package_name, 'js_out')) 45 | 46 | if self.js_package is not None: 47 | pkgpth = self.js_package.split('.') 48 | pth = os.path.abspath( 49 | os.path.join(self.build_lib, 50 | *pkgpth)) 51 | return os.path.join(pth, self.js_outdirs.get(package_name, 'js_out')) 52 | 53 | return os.path.abspath( 54 | os.path.join(self.build_lib, 55 | self.js_outdirs.get(package_name, 'js_out'))) 56 | 57 | def run(self): 58 | if not self.inplace: 59 | self.run_command('build') 60 | for cmd_name in self.get_sub_commands(): 61 | self.run_command(cmd_name) 62 | 63 | outdir = self.get_js_outdir(self.js_package) 64 | if not os.path.exists(outdir): 65 | raise IOError("JS output directory %s doesn't exist" % outdir) 66 | 67 | from distributors.coffeescript import uglification_build_out_dir 68 | for root, dirs, files in os.walk(uglification_build_out_dir()): 69 | for f in files: 70 | if f.endswith('.js'): 71 | self.move_file( 72 | os.path.join(root, f), outdir) 73 | 74 | sub_commands = [ 75 | ('build_coffeescript', None), 76 | ('download_js_libs', None), 77 | ('uglify', None)] 78 | 79 | -------------------------------------------------------------------------------- /distributors/cache.py: -------------------------------------------------------------------------------- 1 | ''' 2 | filecache -- http://code.google.com/p/filecache/ 3 | 4 | filecache is a decorator which saves the return value of functions even 5 | after the interpreter dies. For example this is useful on functions that download 6 | and parse webpages. All you need to do is specify how long 7 | the return values should be cached (use seconds, like time.sleep). 8 | 9 | USAGE: 10 | 11 | from filecache import filecache 12 | 13 | @filecache(24 * 60 * 60) 14 | def time_consuming_function(args): 15 | # etc 16 | 17 | @filecache(filecache.YEAR) 18 | def another_function(args): 19 | # etc 20 | 21 | 22 | NOTE: All arguments of the decorated function and the return value need to be 23 | picklable for this to work. 24 | 25 | NOTE: The cache isn't automatically cleaned, it is only overwritten. If your 26 | function can receive many different arguments that rarely repeat, your 27 | cache may forever grow. One day I might add a feature that once in every 28 | 100 calls scans the db for outdated stuff and erases. 29 | 30 | NOTE: This is less useful on methods of a class because the instance (self) 31 | is cached, and if the instance isn't the same, the cache isn't used. This 32 | makes sense because class methods are affected by changes in whatever 33 | is attached to self. 34 | 35 | Tested on python 2.7 and 3.1 36 | 37 | License: BSD, do what you wish with this. Could be awesome to hear if you found 38 | it useful and/or you have suggestions. ubershmekel at gmail 39 | 40 | 41 | A trick to invalidate a single value: 42 | 43 | @filecache.filecache 44 | def somefunc(x, y, z): 45 | return x * y * z 46 | 47 | del somefunc._db[filecache._args_key(somefunc, (1,2,3), {})] 48 | # or just iterate of somefunc._db (it's a shelve, like a dict) to find the right key. 49 | 50 | 51 | ''' 52 | 53 | import collections as _collections 54 | import datetime as _datetime 55 | import functools as _functools 56 | import inspect as _inspect 57 | import os as _os 58 | import pickle as _pickle 59 | import shelve as _shelve 60 | import sys as _sys 61 | import time as _time 62 | import traceback as _traceback 63 | import types 64 | 65 | _retval = _collections.namedtuple('_retval', 'timesig data') 66 | _SRC_DIR = _os.path.dirname(_os.path.abspath(__file__)) 67 | 68 | SECOND = 1 69 | MINUTE = 60 * SECOND 70 | HOUR = 60 * MINUTE 71 | DAY = 24 * HOUR 72 | WEEK = 7 * DAY 73 | MONTH = 30 * DAY 74 | YEAR = 365 * DAY 75 | FOREVER = None 76 | 77 | OPEN_DBS = dict() 78 | 79 | def _get_cache_name(function): 80 | """ 81 | returns a name for the module's cache db. 82 | """ 83 | module_name = _inspect.getfile(function) 84 | cache_name = module_name 85 | 86 | # fix for '' or '' in exec or interpreter usage. 87 | cache_name = cache_name.replace('<', '_lt_') 88 | cache_name = cache_name.replace('>', '_gt_') 89 | 90 | cache_name += '.cache' 91 | return cache_name 92 | 93 | 94 | def _log_error(error_str): 95 | try: 96 | error_log_fname = _os.path.join(_SRC_DIR, 'filecache.err.log') 97 | if _os.path.isfile(error_log_fname): 98 | fhand = open(error_log_fname, 'a') 99 | else: 100 | fhand = open(error_log_fname, 'w') 101 | fhand.write('[%s] %s\r\n' % (_datetime.datetime.now().isoformat(), error_str)) 102 | fhand.close() 103 | except Exception: 104 | pass 105 | 106 | def _args_key(function, args, kwargs): 107 | arguments = (args, kwargs) 108 | # Check if you have a valid, cached answer, and return it. 109 | # Sadly this is python version dependant 110 | if _sys.version_info[0] == 2: 111 | arguments_pickle = _pickle.dumps(arguments) 112 | else: 113 | # NOTE: protocol=0 so it's ascii, this is crucial for py3k 114 | # because shelve only works with proper strings. 115 | # Otherwise, we'd get an exception because 116 | # function.__name__ is str but dumps returns bytes. 117 | arguments_pickle = _pickle.dumps(arguments, protocol=0).decode('ascii') 118 | 119 | key = function.__name__ + arguments_pickle 120 | return key 121 | 122 | def filecache(seconds_of_validity=None, fail_silently=False): 123 | ''' 124 | filecache is called and the decorator should be returned. 125 | ''' 126 | def filecache_decorator(function): 127 | @_functools.wraps(function) 128 | def function_with_cache(*args, **kwargs): 129 | try: 130 | key = _args_key(function, args, kwargs) 131 | 132 | if key in function._db: 133 | rv = function._db[key] 134 | if seconds_of_validity is None or _time.time() - rv.timesig < seconds_of_validity: 135 | return rv.data 136 | except Exception: 137 | # in any case of failure, don't let filecache break the program 138 | error_str = _traceback.format_exc() 139 | _log_error(error_str) 140 | if not fail_silently: 141 | raise 142 | 143 | retval = function(*args, **kwargs) 144 | 145 | # store in cache 146 | # NOTE: no need to _db.sync() because there was no mutation 147 | # NOTE: it's importatnt to do _db.sync() because otherwise the cache doesn't survive Ctrl-Break! 148 | try: 149 | function._db[key] = _retval(_time.time(), retval) 150 | function._db.sync() 151 | except Exception: 152 | # in any case of failure, don't let filecache break the program 153 | error_str = _traceback.format_exc() 154 | _log_error(error_str) 155 | if not fail_silently: 156 | raise 157 | 158 | return retval 159 | 160 | # make sure cache is loaded 161 | if not hasattr(function, '_db'): 162 | cache_name = _get_cache_name(function) 163 | if cache_name in OPEN_DBS: 164 | function._db = OPEN_DBS[cache_name] 165 | else: 166 | function._db = _shelve.open(cache_name) 167 | OPEN_DBS[cache_name] = function._db 168 | 169 | function_with_cache._db = function._db 170 | 171 | return function_with_cache 172 | 173 | if type(seconds_of_validity) == types.FunctionType: 174 | # support for when people use '@filecache.filecache' instead of '@filecache.filecache()' 175 | func = seconds_of_validity 176 | return filecache_decorator(func) 177 | 178 | return filecache_decorator 179 | 180 | 181 | -------------------------------------------------------------------------------- /distributors/clean.py: -------------------------------------------------------------------------------- 1 | 2 | # special thanks to Mike Johnson, author of Jep, 3 | # a Java-Python bridge and also the most coherent 4 | # example of setuptools extension code I've seen: 5 | # 6 | # https://github.com/mrj0/jep/tree/master/commands 7 | 8 | from __future__ import print_function 9 | 10 | import shutil 11 | from os.path import join 12 | from distutils.command.clean import clean 13 | 14 | class really_clean(clean): 15 | 16 | def run(self): 17 | build_cathostel = join(self.build_base, 'cathostel') 18 | build_coffeecup = join(self.build_base, 'coffeecup') 19 | build_starbucks = join(self.build_base, 'starbucks') 20 | build_uglies = join(self.build_base, 'disgusting') 21 | 22 | print('removing', build_cathostel) 23 | shutil.rmtree(build_cathostel, ignore_errors=True) 24 | 25 | print('removing', build_coffeecup) 26 | shutil.rmtree(build_coffeecup, ignore_errors=True) 27 | 28 | print('removing', build_starbucks) 29 | shutil.rmtree(build_starbucks, ignore_errors=True) 30 | 31 | print('removing', build_uglies) 32 | shutil.rmtree(build_uglies, ignore_errors=True) 33 | 34 | # below this was stuff that was here before -- 35 | 36 | print('removing', self.build_base) 37 | shutil.rmtree(self.build_base, ignore_errors=True) 38 | 39 | print('removing', self.build_lib) 40 | shutil.rmtree(self.build_lib, ignore_errors=True) 41 | 42 | print('removing', self.build_scripts) 43 | shutil.rmtree(self.build_scripts, ignore_errors=True) 44 | 45 | print('removing', self.build_temp) 46 | shutil.rmtree(self.build_temp, ignore_errors=True) 47 | 48 | print('removing', self.bdist_base) 49 | shutil.rmtree(self.bdist_base, ignore_errors=True) -------------------------------------------------------------------------------- /distributors/coffeescript.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function, with_statement 3 | 4 | from distutils.cmd import Command 5 | from distutils.spawn import spawn 6 | from os import environ, makedirs 7 | from os.path import isdir, join, exists, abspath, basename 8 | 9 | from xwhich import xwhich 10 | 11 | JAVASCRIPT_LIB_DLCACHE_DIR = join('build', 'starbucks') 12 | COFFEESCRIPT_BUILD_OUT_DIR = join('build', 'coffeecup') 13 | UGLIFICATION_BUILD_MID_DIR = join('build', 'cathostel') 14 | UGLIFICATION_BUILD_OUT_DIR = join('build', 'disgusting') 15 | 16 | def javascript_lib_dlcache_dir(): 17 | global JAVASCRIPT_LIB_DLCACHE_DIR 18 | return JAVASCRIPT_LIB_DLCACHE_DIR 19 | 20 | def coffeescript_build_out_dir(): 21 | global COFFEESCRIPT_BUILD_OUT_DIR 22 | return COFFEESCRIPT_BUILD_OUT_DIR 23 | 24 | def uglification_build_mezzo_dir(): 25 | global UGLIFICATION_BUILD_MID_DIR 26 | return UGLIFICATION_BUILD_MID_DIR 27 | 28 | def uglification_build_out_dir(): 29 | global UGLIFICATION_BUILD_OUT_DIR 30 | return UGLIFICATION_BUILD_OUT_DIR 31 | 32 | def js_download_storage(): 33 | from downloads import URLRetrievalStorage 34 | return URLRetrievalStorage( 35 | location=javascript_lib_dlcache_dir(), 36 | base_url="file://%s" % abspath(javascript_lib_dlcache_dir())) 37 | 38 | def coffeescript_node_lib_cmds(): 39 | return [join(pth, 'coffee-script', 'bin') \ 40 | for pth in environ.get('NODE_PATH', '').split(':') \ 41 | if bool(len(pth)) and isdir(pth)] 42 | 43 | def uglification_node_lib_cmds(): 44 | return [join(pth, 'uglify-js', 'bin') \ 45 | for pth in environ.get('NODE_PATH', '').split(':') \ 46 | if bool(len(pth)) and isdir(pth)] 47 | 48 | def coffeescript_cmd(): 49 | return xwhich('coffee', 50 | also_look=coffeescript_node_lib_cmds()) 51 | 52 | def uglification_cmd(): 53 | return xwhich('uglifyjs', 54 | also_look=uglification_node_lib_cmds()) 55 | 56 | class build_coffeescript(Command): 57 | """ Distutils command for CoffeScript compilation. 58 | Based largely on the fine build-system architecture 59 | of Jep. See also: 60 | 61 | https://github.com/mrj0/jep/blob/master/commands/java.py 62 | 63 | ... for the orig. """ 64 | 65 | outdir = None 66 | user_options = [ 67 | ('coffee=', 'C', 68 | 'use coffeescript command (default: {0})'.format( 69 | coffeescript_cmd()))] 70 | description = 'Compile CoffeScript source to JavaScript' 71 | 72 | def initialize_options(self): 73 | build_coffeescript.outdir = coffeescript_build_out_dir() 74 | if not exists(build_coffeescript.outdir): 75 | makedirs(build_coffeescript.outdir) 76 | self.cs_files = [] 77 | self.coffee = coffeescript_cmd() 78 | 79 | def finalize_options(self): 80 | self.cs_files = self.distribution.cs_files 81 | 82 | def demitasse(self, js_file): 83 | spawn([self.coffee, 84 | '--nodejs', '--no-deprecation', 85 | '-o', build_coffeescript.outdir, 86 | '-c', js_file]) 87 | 88 | def run(self): 89 | print('') 90 | for js_file in list(self.cs_files): 91 | self.demitasse(js_file) 92 | 93 | 94 | 95 | class download_js_libs(Command): 96 | outdir = None 97 | user_options = [] 98 | description = 'Fetch JavaScript library files' 99 | 100 | def initialize_options(self): 101 | download_js_libs.outdir = javascript_lib_dlcache_dir() 102 | if not exists(download_js_libs.outdir): 103 | makedirs(download_js_libs.outdir) 104 | self.js_libs = [] 105 | self.js_storage = None 106 | 107 | def finalize_options(self): 108 | self.js_libs = self.distribution.js_libs 109 | self.js_storage = js_download_storage() 110 | 111 | def run(self): 112 | print('') 113 | i = 1 114 | 115 | for js_lib in list(self.js_libs): 116 | 117 | if not self.js_storage.downloaded(js_lib): 118 | 119 | print("retrieving %s" % js_lib) 120 | js_dl = self.js_storage.download(js_lib, 121 | content_type='application/javascript') 122 | 123 | self.js_storage.safely_move( 124 | js_dl, 125 | "%s-%s" % (i, js_dl.name), 126 | clobber=True) 127 | i += 1 128 | 129 | else: 130 | print("already downloaded %s" % js_lib) 131 | print("up-to-date copy in %s" % self.js_storage.downloaded(js_lib)) 132 | 133 | 134 | class uglify(Command): 135 | 136 | outdir = None 137 | user_options = [ 138 | ('uglifyjs=', 'U', 139 | 'use uglifyjs command (default: {0})'.format(uglification_cmd())), 140 | ('pedantic', 'P', 141 | 'emit uglifyjs debug-level trace messages during the uglification.')] 142 | description = 'Uglification: concatenate generated and library JavaScript, ' 143 | description += 'and compress the remainder' 144 | 145 | def initialize_options(self): 146 | uglify.indir = coffeescript_build_out_dir() 147 | uglify.mezzodir = uglification_build_mezzo_dir() 148 | uglify.outdir = uglification_build_out_dir() 149 | 150 | if not exists(uglify.mezzodir): 151 | makedirs(uglify.mezzodir) 152 | if not exists(uglify.outdir): 153 | makedirs(uglify.outdir) 154 | 155 | uglify.reeturn = """ 156 | 157 | """ 158 | self.pretty_files = [] 159 | self.pretty_store = None 160 | self.pretty_libs = [] 161 | self.catty_files = [] 162 | self.uglifier = None 163 | 164 | def finalize_options(self): 165 | 166 | # `pretty_files` -- not yet uglified -- are created by the `build_coffeescript` extension. 167 | # They are JavaScript source analogues produced by the `coffee` compilation command; 168 | # they have the same name as their pre-translation counterparts save for their `.js` suffix. 169 | self.pretty_files = map( 170 | lambda pn: join(uglify.indir, pn), 171 | map(basename, 172 | map(lambda fn: fn.replace('.coffee', '.js'), 173 | self.distribution.cs_files))) 174 | 175 | # `pretty_libs` are also fresh-faced and young, free of the repugnant morphological grotesqueries 176 | # contemporary JavaScript must endure -- as served straight from the internets by `download_js_libs`. 177 | # PROTIP: Don't use precompressed libraries... we want 'em ugly of course, but double-stuffing 178 | # JS code all willy-nilly will yield assuredly disgusting shit of like an Octomom-porn magnatude. 179 | self.pretty_store = js_download_storage() 180 | self.pretty_libs = map(self.pretty_store.path, 181 | filter(lambda fn: fn.endswith('.js'), 182 | self.pretty_store.listdir('')[-1])) 183 | 184 | # catty_files are just what the name implies: the `pretty_files` content, concattylated [sic] 185 | # with the libraries. At the moment this process works like so: each of the libraries whose URLs 186 | # you've enumerated in the iterable you passed to your `setup()` call, via the `js_libs` kwarg, 187 | # are combined -- *in order* as specified; JavaScript code emitted from CoffeeScript compilation 188 | # is added at the end. The order-preservation is for safety's sake, as are the line breaks that 189 | # get stuck in between each con-cat-tylated [sic] code block... Overkill, really, to take such 190 | # precations, I mean, it's 2012. Right? So there's no real valid reason why, like, any of that 191 | # should matter, cuz all your code is properly encapsulated, e.g. with anonymous function wraps 192 | # and nary a whiff of let's call it global SCOPE-TAINT.* But what do I know, maybe you're siccing 193 | # these build extensions on some crazy legacy codebase full of w3schools.com copypasta, or some 194 | # shit like that. I've had the displeasure of both contributing to and extricating myself from 195 | # a variety of such projects, in my years of computering, so I am totally happy to help out any 196 | # users of this project who find themselves in a vexing mire of illegibly paleolithic JavaScript. 197 | # Erm. So, anyway. The upshot is that the `catty_files` are simple intermediates; hence this is 198 | # a list of dangling lexical references to not-yet-existent files, the names of which are based on 199 | # the source filenames of the CoffeeScript from which they originated. 200 | self.catty_files = map( 201 | lambda pn: join(uglify.mezzodir, pn), 202 | map(basename, 203 | map(lambda fn: fn.replace('.coffee', '.libs.js'), 204 | self.distribution.cs_files))) 205 | 206 | # `ugly_files` are the `uglify` command's final output -- since the files do not exist yet, 207 | # at this point in the build's arc we will populate this list with the output filenames only 208 | # (versus filesystem-absolute pathnames, which is what is in the others). 209 | self.ugly_files = map(basename, 210 | map(lambda fn: fn.replace('.coffee', '.libs.min.js'), 211 | self.distribution.cs_files)) 212 | 213 | # `uglifier` is a string, containing the command we'll use when invoking UglifyJS, 214 | # during the actual JavaScript-uglification process. 215 | self.uglifier = uglification_cmd() 216 | 217 | def run(self): 218 | print('') 219 | 220 | print("prepending libraries to generated code") 221 | print("\t- %1s post-CoffeeScript JS files" % len(self.pretty_files)) 222 | print("\t- %1s downloaded JS libraries" % len(self.pretty_libs)) 223 | 224 | print('') 225 | 226 | # Concatinate the libraries first while prepending that amalgamated datum 227 | # onto each post-CoffeeScript block of generated JS. 228 | for pretty, catty in zip( 229 | list(self.pretty_files), 230 | list(self.catty_files)): 231 | 232 | pretties = list(self.pretty_libs) 233 | pretties.append(pretty) 234 | catastrophe = self.catinate(pretties) 235 | 236 | self.cathole(catastrophe, 237 | catty, clobber=True) 238 | print("\t> %10sb wrote to %s" % (len(catastrophe), catty)) 239 | 240 | print('') 241 | 242 | print('uglifying concatenated modules...') 243 | 244 | for catter, gross in zip( 245 | list(self.catty_files), 246 | list(self.ugly_files)): 247 | self.grossitate(catter, gross) 248 | 249 | def cathole(self, do_what, where_exactly, clobber=True): 250 | """ A cathole is where you shit when you're in the woods; relatedly, 251 | the `uglify.cathole()` method dumps to a file -- Dude. I mean, I never 252 | said I'm like fucking Shakespeare or whatevs. Ok. """ 253 | if exists(where_exactly) and not clobber: 254 | raise IOError("*** can't concatinate into %s: file already exists") 255 | 256 | if not bool(do_what) or len(do_what) < 10: 257 | raise ValueError("*** can't write <10b into %s: not enough data") 258 | 259 | with open(where_exactly, 'wb') as cat: 260 | cat.write(do_what) 261 | cat.flush() 262 | return 263 | 264 | def catinate(self, *js_files): 265 | global reeturn 266 | catout = "" 267 | for catin in list(*js_files): 268 | with open(catin, 'rb') as cat: 269 | catout += cat.read() 270 | catout += uglify.reeturn 271 | return catout 272 | 273 | def grossitate(self, in_file, out_filename): 274 | ''' cat %s | /usr/local/bin/uglifyjs > %s ''' 275 | 276 | spawn([self.uglifier, 277 | '--verbose', '--no-copyright', 278 | '--unsafe', '--lift-vars', 279 | '-o', join(uglify.outdir, out_filename), 280 | '-c', in_file]) 281 | 282 | 283 | 284 | 285 | # * Not to be confused with TAINT-SCOPE, the verenable OTC topical relief for anytime use, whenever 286 | # the gum disease gingivitis gets all up underneath your balls and/or labia and brushing alone 287 | # isn't enough. Don't be that guy who mixes them up. You know that guy -- the guy talking about 288 | # "taint scope" at code review. Nobody eats with that guy or offers him meaningful eye contact. 289 | 290 | -------------------------------------------------------------------------------- /distributors/dist.py: -------------------------------------------------------------------------------- 1 | from distutils.dist import Distribution 2 | 3 | class SQDistribution(Distribution): 4 | def __init__(self, attrs=None): 5 | self.js_package = None 6 | self.cs_files = None 7 | self.js_outdirs = None 8 | self.js_libs = None 9 | Distribution.__init__(self, attrs) 10 | -------------------------------------------------------------------------------- /distributors/downloads.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | import sys 4 | 5 | try: 6 | from django.conf import settings 7 | except ImportError: 8 | print("build_js can't run without Django", 9 | file=sys.stderr) 10 | else: 11 | if not settings.configured: 12 | print("build_js running with default Django settings", 13 | file=sys.stdout) 14 | settings.configure(**dict()) 15 | else: 16 | print("build_js running with settings from an existing Django config", 17 | file=sys.stdout) 18 | 19 | import mimetypes 20 | from os.path import dirname 21 | from urlobject import URLObject as URL 22 | 23 | from django.core.files.uploadedfile import SimpleUploadedFile 24 | from django.core.files.storage import FileSystemStorage 25 | from django.core.files.move import file_move_safe 26 | from django.utils._os import safe_join 27 | 28 | class URLRequestFile(SimpleUploadedFile): 29 | 30 | DEFAULT_TYPE = 'text/plain' 31 | 32 | def __init__(self, url, filename, **kwargs): 33 | """ A URLRequestFile is created with a URL and a filename. 34 | The data from the URL is immediately fetched when one constructs 35 | a new URLRequestFile object -- exceptions are thrown in 36 | the event of failure. """ 37 | import requests 38 | 39 | self._source_url = url 40 | 41 | try: 42 | request = requests.get(url) 43 | except ( 44 | requests.exceptions.TooManyRedirects, 45 | requests.exceptions.ConnectionError, 46 | requests.exceptions.SSLError, 47 | requests.exceptions.Timeout), err: 48 | print("*** Couldn't save %s to a file" % url, 49 | file=sys.stderr) 50 | print("*** (%s)" % err, 51 | file=sys.stderr) 52 | content = '' 53 | else: 54 | content = request.ok and request.content or '' 55 | 56 | content_type = request.ok and \ 57 | request.headers.get('content-type') or \ 58 | kwargs.pop('content_type', 59 | URLRequestFile.DEFAULT_TYPE) 60 | 61 | self._source_content_type = content_type 62 | self._source_encoding = request.ok and request.encoding or None 63 | 64 | super(URLRequestFile, self).__init__( 65 | filename, content, content_type) 66 | self.charset = self._source_encoding 67 | 68 | @property 69 | def source_url(self): 70 | return getattr(self, '_source_url', None) 71 | 72 | @property 73 | def source_content_type(self): 74 | return getattr(self, '_source_content_type', None) 75 | 76 | @property 77 | def source_encoding(self): 78 | return getattr(self, '_source_encoding', None) 79 | 80 | @property 81 | def source_charset(self): 82 | return self.source_encoding 83 | 84 | 85 | class URLRetrievalStorage(FileSystemStorage): 86 | 87 | DEFAULT_EXT = '_noext.txt' 88 | MINIMUM_BYTE_SIZE = 10 89 | 90 | def _extension(self, mime_type=DEFAULT_EXT): 91 | """ Get the common-law file extension for a given MIME type.""" 92 | exts = mimetypes.guess_all_extensions(mime_type) 93 | if '.jpe' in exts: 94 | exts.remove('.jpe') # WHO USES THAT. 95 | ext = bool(exts) and \ 96 | exts[0] or \ 97 | URLRetrievalStorage.DEFAULT_EXT 98 | return ext 99 | 100 | def download(self, urlstr, **kwargs): 101 | """ Call url_rs.download('URL') to save that URL's contents 102 | into a new file within the storages' filesystem. 103 | Optionally setting the 'clobber' keyword to False will raise 104 | an exception before overwriting existing data. 105 | Any other keyword args are passed wholesale to URLRequestFile's 106 | constructor when the new file is saved locally. """ 107 | import requests 108 | import socket 109 | 110 | url = URL(urlstr) 111 | clobber = bool(kwargs.pop('clobber', True)) 112 | 113 | try: 114 | headstat = requests.head(url) 115 | except ( 116 | requests.exceptions.TooManyRedirects, 117 | requests.exceptions.ConnectionError, 118 | requests.exceptions.SSLError, 119 | requests.exceptions.Timeout, 120 | socket.gaierror, 121 | socket.herror, 122 | socket.sslerror, 123 | socket.timeout), err: 124 | print("*** HTTP HEAD failed for %s" % url, 125 | file=sys.stderr) 126 | print("--- (%s)" % err, 127 | file=sys.stderr) 128 | return None 129 | 130 | ct = kwargs.pop('content_type', 131 | headstat.headers.get('content-type', 132 | URLRequestFile.DEFAULT_TYPE)) 133 | if ';' in ct: 134 | ct = ct.split(';')[0] 135 | 136 | ext = self._extension(ct) 137 | fn = "%s%s" % (url.hash, ext) 138 | ff = URLRequestFile(url, fn, **kwargs) 139 | 140 | if self.exists(fn) and not clobber: 141 | raise IOError( 142 | "*** Can't overwrite existing file %s (clobber=%s)" % (fn, clobber)) 143 | 144 | if ff.size < URLRetrievalStorage.MINIMUM_BYTE_SIZE: 145 | raise ValueError( 146 | "*** Bailing -- download's size smaller than MINIMUM_BYTE_SIZE: %sb" % 147 | URLRequestFile.MINIMUM_BYTE_SIZE) 148 | 149 | self.save(fn, ff) 150 | return ff 151 | 152 | def downloaded(self, urlstr, path=None): 153 | """ We say that a remote file has been 'downloaded' to a local directory 154 | if we can spot the SHA1 of its URL inside exactly one local filename. """ 155 | 156 | path = self.path(path or '') 157 | oneornone = filter( 158 | lambda fn: fn.find(URL(urlstr).hash) > -1, 159 | self.listdir(path)[-1]) 160 | 161 | if len(oneornone) is 1: 162 | one = oneornone[0] 163 | return bool(self.size(one)) and self.path(one) or None 164 | return None 165 | 166 | def local_content_type(self, urlstr, path=None): 167 | """ Guess an existant local file's mimetype from its 168 | corresponding remote URL... it sounds circuitous I know. """ 169 | if self.exists(self.downloaded(urlstr, path)): 170 | return mimetypes.guess_type(urlstr) 171 | 172 | def safely_rename(self, url_request_file, new_name, clobber=False): 173 | """ Pass a URLRequestFile, with a new filename, to move or rename. """ 174 | new_path = safe_join( 175 | dirname(self.path(url_request_file.name)), 176 | new_name) 177 | 178 | file_move_safe( 179 | self.path(url_request_file.name), 180 | new_path, 181 | allow_overwrite=clobber) 182 | 183 | url_request_file.name = new_name 184 | 185 | safely_move = safely_rename 186 | 187 | 188 | if __name__ == "__main__": 189 | from pprint import pformat 190 | import tempfile 191 | td = tempfile.mkdtemp() 192 | fs = URLRetrievalStorage( 193 | location=td, base_url='http://owls.com/discount') 194 | 195 | stuff_to_grab = ( 196 | 'http://objectsinspaceandtime.com/', 197 | 'http://objectsinspaceandtime.com/index.html', 198 | 'http://objectsinspaceandtime.com/css/fn_typography.css', 199 | 'http://scs.viceland.com/int/v17n11/htdocs/bright-lights-591/it-s-over.jpg', 200 | 'http://yo-dogggggg.com/i-dont-exist') 201 | 202 | print('> directory:', td) 203 | print('> storage:', fs) 204 | print('') 205 | 206 | for thing in stuff_to_grab: 207 | print('-' * 133) 208 | print('') 209 | 210 | print('\t +++ URL: %s' % thing) 211 | 212 | ff = fs.download(thing) 213 | success = bool(fs.downloaded(thing)) 214 | 215 | print('\t +++ success: %s' % str(success)) 216 | 217 | if success: 218 | print('\t +++ local content/type (guess): %s' % fs.local_content_type(thing)[0]) 219 | if ff is not None: 220 | print('\t +++ file object: %s' % ff) 221 | print('\t +++ path:', fs.path(ff.name)) 222 | print('\t +++ FS url:', fs.url(ff.name)) 223 | print('\t +++ orig URL:', ff.source_url) 224 | print('') 225 | print(pformat(ff.__dict__, 226 | indent=8)) 227 | 228 | print('') 229 | 230 | print('-' * 133) 231 | print('') 232 | 233 | yieldem = fs.listdir('')[-1] 234 | 235 | print('> fs.listdir(\'\')[-1] yields %s files:' % len(yieldem)) 236 | print('') 237 | print(pformat(yieldem, 238 | indent=8)) 239 | 240 | print('') 241 | print('') 242 | -------------------------------------------------------------------------------- /distributors/settings.py: -------------------------------------------------------------------------------- 1 | 2 | # blank Django settings file (irritatingly necessary to use its file API) -------------------------------------------------------------------------------- /distributors/urlobject/__init__.py: -------------------------------------------------------------------------------- 1 | from urlobject import URLObject 2 | 3 | # package path-extension snippet. 4 | from pkgutil import extend_path 5 | __path__ = extend_path(__path__, __name__) 6 | -------------------------------------------------------------------------------- /distributors/urlobject/netloc.py: -------------------------------------------------------------------------------- 1 | import urlparse 2 | 3 | 4 | class Netloc(unicode): 5 | 6 | """ 7 | A netloc string (``username:password@hostname:port``). 8 | 9 | Contains methods for accessing and (non-destructively) modifying those four 10 | components of the netloc. All methods return new instances. 11 | """ 12 | 13 | def __repr__(self): 14 | return 'Netloc(%r)' % (unicode(self),) 15 | 16 | @classmethod 17 | def __unsplit(cls, username, password, hostname, port): 18 | """Put together a :class:`Netloc` from its constituent parts.""" 19 | auth_string = u'' 20 | if username: 21 | auth_string = username 22 | if password: 23 | auth_string += u':' + password 24 | auth_string += '@' 25 | port_string = u'' 26 | if port is not None: 27 | port_string = u':%d' % port 28 | return cls(auth_string + hostname + port_string) 29 | 30 | @property 31 | def username(self): 32 | """The username portion of this netloc, or ``None``.""" 33 | return self.__urlsplit.username 34 | 35 | def with_username(self, username): 36 | """Replace or add a username to this netloc.""" 37 | return self.__replace(username=username) 38 | 39 | def without_username(self): 40 | """Remove any username (and password) from this netloc.""" 41 | return self.without_password().with_username('') 42 | 43 | @property 44 | def password(self): 45 | """The password portion of this netloc, or ``None``.""" 46 | return self.__urlsplit.password 47 | 48 | def with_password(self, password): 49 | 50 | """ 51 | Replace or add a password to this netloc. 52 | 53 | Raises a ``ValueError`` if you attempt to add a password to a netloc 54 | with no username. 55 | """ 56 | 57 | if password and not self.username: 58 | raise ValueError("Can't set a password on a netloc with no username") 59 | return self.__replace(password=password) 60 | 61 | def without_password(self): 62 | """Remove any password from this netloc.""" 63 | return self.with_password('') 64 | 65 | @property 66 | def auth(self): 67 | """The username and password of this netloc as a 2-tuple.""" 68 | return (self.username, self.password) 69 | 70 | def with_auth(self, username, *password): 71 | """Replace or add a username and password in one method call.""" 72 | netloc = self.without_auth() 73 | if password: 74 | return netloc.with_username(username).with_password(*password) 75 | return netloc.with_username(username) 76 | 77 | def without_auth(self): 78 | return self.without_password().without_username() 79 | 80 | @property 81 | def hostname(self): 82 | """The hostname portion of this netloc.""" 83 | return self.__urlsplit.hostname 84 | 85 | def with_hostname(self, hostname): 86 | """Replace the hostname on this netloc.""" 87 | return self.__replace(hostname=hostname) 88 | 89 | @property 90 | def port(self): 91 | """The port number on this netloc (as an ``int``), or ``None``.""" 92 | return self.__urlsplit.port 93 | 94 | def with_port(self, port): 95 | """Replace or add a port number to this netloc.""" 96 | return self.__replace(port=port) 97 | 98 | def without_port(self): 99 | """Remove any port number from this netloc.""" 100 | return self.__replace(port=None) 101 | 102 | @property 103 | def __urlsplit(self): 104 | return urlparse.SplitResult('', self, '', '', '') 105 | 106 | def __replace(self, **params): 107 | """Replace any number of components on this netloc.""" 108 | unsplit_args = {'username': self.username, 109 | 'password': self.password, 110 | 'hostname': self.hostname, 111 | 'port': self.port} 112 | unsplit_args.update(params) 113 | return self.__unsplit(**unsplit_args) 114 | -------------------------------------------------------------------------------- /distributors/urlobject/path.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import posixpath 4 | import urllib 5 | import urlparse 6 | 7 | 8 | class Root(object): 9 | 10 | """A descriptor which always returns the root path.""" 11 | 12 | def __get__(self, instance, cls): 13 | return cls('/') 14 | 15 | 16 | class URLPath(unicode): 17 | 18 | root = Root() 19 | 20 | def __repr__(self): 21 | return 'URLPath(%r)' % (unicode(self),) 22 | 23 | @classmethod 24 | def join_segments(cls, segments, absolute=True): 25 | """Create a :class:`URLPath` from an iterable of segments.""" 26 | path = cls('/') 27 | for segment in segments: 28 | path = path.add_segment(segment) 29 | return path 30 | 31 | @property 32 | def segments(self): 33 | """ 34 | Split this path into (decoded) segments. 35 | 36 | >>> URLPath(u'/a/b/c').segments 37 | (u'a', u'b', u'c') 38 | 39 | Non-leaf nodes will have a trailing empty string, and percent encodes 40 | will be decoded: 41 | 42 | >>> URLPath(u'/a%20b/c%20d/').segments 43 | (u'a b', u'c d', u'') 44 | """ 45 | segments = tuple(map(path_decode, self.split('/'))) 46 | if segments[0] == u'': 47 | return segments[1:] 48 | return segments 49 | 50 | @property 51 | def parent(self): 52 | """ 53 | The parent of this node. 54 | 55 | >>> URLPath(u'/a/b/c').parent 56 | URLPath(u'/a/b/') 57 | >>> URLPath(u'/foo/bar/').parent 58 | URLPath(u'/foo/') 59 | """ 60 | if self.is_leaf: 61 | return self.relative('.') 62 | return self.relative('..') 63 | 64 | @property 65 | def is_leaf(self): 66 | """ 67 | Is this path a leaf node? 68 | 69 | >>> URLPath(u'/a/b/c').is_leaf 70 | True 71 | >>> URLPath(u'/a/b/').is_leaf 72 | False 73 | """ 74 | return self and self.segments[-1] != u'' 75 | 76 | @property 77 | def is_relative(self): 78 | """ 79 | Is this path relative? 80 | 81 | >>> URLPath(u'a/b/c').is_relative 82 | True 83 | >>> URLPath(u'/a/b/c').is_relative 84 | False 85 | """ 86 | return self[0] != u'/' 87 | 88 | @property 89 | def is_absolute(self): 90 | """ 91 | Is this path absolute? 92 | 93 | >>> URLPath(u'a/b/c').is_absolute 94 | False 95 | >>> URLPath(u'/a/b/c').is_absolute 96 | True 97 | """ 98 | return self[0] == u'/' 99 | 100 | def relative(self, rel_path): 101 | """ 102 | Resolve a relative path against this one. 103 | 104 | >>> URLPath(u'/a/b/c').relative('.') 105 | URLPath(u'/a/b/') 106 | >>> URLPath(u'/a/b/c').relative('d') 107 | URLPath(u'/a/b/d') 108 | >>> URLPath(u'/a/b/c').relative('../d') 109 | URLPath(u'/a/d') 110 | """ 111 | return type(self)(urlparse.urljoin(self, rel_path)) 112 | 113 | def add_segment(self, segment): 114 | u""" 115 | Add a segment to this path. 116 | 117 | >>> URLPath(u'/a/b/').add_segment('c') 118 | URLPath(u'/a/b/c') 119 | 120 | Non-ASCII and reserved characters (including slashes) will be encoded: 121 | 122 | >>> URLPath(u'/a/b/').add_segment(u'dé/f') 123 | URLPath(u'/a/b/d%C3%A9%2Ff') 124 | """ 125 | return type(self)(posixpath.join(self, path_encode(segment))) 126 | 127 | def add(self, path): 128 | u""" 129 | Add a partial path to this one. 130 | 131 | The only difference between this and :meth:`add_segment` is that slash 132 | characters will not be encoded, making it suitable for adding more than 133 | one path segment at a time: 134 | 135 | >>> URLPath(u'/a/b/').add(u'dé/f/g') 136 | URLPath(u'/a/b/d%C3%A9/f/g') 137 | """ 138 | return type(self)(posixpath.join(self, path_encode(path, safe='/'))) 139 | 140 | 141 | def path_encode(string, safe=''): 142 | return urllib.quote(string.encode('utf-8'), safe=safe) 143 | 144 | def path_decode(string): 145 | return urllib.unquote(string).decode('utf-8') 146 | -------------------------------------------------------------------------------- /distributors/urlobject/ports.py: -------------------------------------------------------------------------------- 1 | """Default port numbers for the URI schemes supported by urlparse.""" 2 | 3 | DEFAULT_PORTS = { 4 | 'ftp': 21, 5 | 'gopher': 70, 6 | 'hdl': 2641, 7 | 'http': 80, 8 | 'https': 443, 9 | 'imap': 143, 10 | 'mms': 651, 11 | 'news': 2009, 12 | 'nntp': 119, 13 | 'prospero': 191, 14 | 'rsync': 873, 15 | 'rtsp': 554, 16 | 'rtspu': 554, 17 | 'sftp': 115, 18 | 'shttp': 80, 19 | 'sip': 5060, 20 | 'sips': 5061, 21 | 'snews': 2009, 22 | 'svn': 3690, 23 | 'svn+ssh': 22, 24 | 'telnet': 23, 25 | } 26 | -------------------------------------------------------------------------------- /distributors/urlobject/query_string.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import re 3 | import urllib 4 | import urlparse 5 | 6 | 7 | class QueryString(unicode): 8 | 9 | def __repr__(self): 10 | return 'QueryString(%r)' % (unicode(self),) 11 | 12 | @property 13 | def list(self): 14 | result = [] 15 | if not self: 16 | # Empty string => empty list. 17 | return result 18 | 19 | name_value_pairs = re.split(r'[\&\;]', self) 20 | for name_value_pair in name_value_pairs: 21 | # Split the pair string into a naive, encoded (name, value) pair. 22 | name_value = name_value_pair.split('=', 1) 23 | # 'param=' => ('param', None) 24 | if len(name_value) == 1: 25 | name, value = name_value + [None] 26 | # 'param=value' => ('param', 'value') 27 | # 'param=' => ('param', '') 28 | else: 29 | name, value = name_value 30 | 31 | name = qs_decode(name) 32 | if value is not None: 33 | value = qs_decode(value) 34 | 35 | result.append((name, value)) 36 | return result 37 | 38 | @property 39 | def dict(self): 40 | return dict(self.list) 41 | 42 | @property 43 | def multi_dict(self): 44 | result = collections.defaultdict(list) 45 | for name, value in self.list: 46 | result[name].append(value) 47 | return dict(result) 48 | 49 | def add_param(self, name, value): 50 | if value is None: 51 | parameter = qs_encode(name) 52 | else: 53 | parameter = qs_encode(name) + '=' + qs_encode(value) 54 | if self: 55 | return type(self)(self + '&' + parameter) 56 | return type(self)(parameter) 57 | 58 | def add_params(self, *args, **kwargs): 59 | params_list = get_params_list(*args, **kwargs) 60 | new = self 61 | for name, value in params_list: 62 | new = new.add_param(name, value) 63 | return new 64 | 65 | def del_param(self, name): 66 | params = [(n, v) for n, v in self.list if n != name] 67 | qs = type(self)('') 68 | for param in params: 69 | qs = qs.add_param(*param) 70 | return qs 71 | 72 | def set_param(self, name, value): 73 | return self.del_param(name).add_param(name, value) 74 | 75 | def set_params(self, *args, **kwargs): 76 | params_list = get_params_list(*args, **kwargs) 77 | new = self 78 | for name, value in params_list: 79 | new = new.set_param(name, value) 80 | return new 81 | 82 | def del_params(self, params): 83 | deleted = set(params) 84 | params = [(name, value) for name, value in self.list 85 | if name not in deleted] 86 | qs = type(self)('') 87 | for param in params: 88 | qs = qs.add_param(*param) 89 | return qs 90 | 91 | 92 | qs_encode = lambda s: urllib.quote(s.encode('utf-8')) 93 | qs_decode = lambda s: urllib.unquote(str(s).replace('+', ' ')).decode('utf-8') 94 | 95 | 96 | def get_params_list(*args, **kwargs): 97 | """Turn dict-like arguments into an ordered list of pairs.""" 98 | params = [] 99 | if args: 100 | if len(args) > 1: 101 | raise TypeError("Expected at most 1 arguments, got 2") 102 | arg = args[0] 103 | if hasattr(arg, 'items'): 104 | params.extend(arg.items()) 105 | else: 106 | params.extend(list(arg)) 107 | if kwargs: 108 | params.extend(kwargs.items()) 109 | return params 110 | -------------------------------------------------------------------------------- /distributors/urlobject/urlobject.py: -------------------------------------------------------------------------------- 1 | 2 | import urlparse 3 | import hashlib 4 | import mimetypes 5 | 6 | from netloc import Netloc 7 | from path import URLPath, path_encode, path_decode 8 | from ports import DEFAULT_PORTS 9 | from query_string import QueryString 10 | 11 | 12 | class URLObject(unicode): 13 | 14 | """ 15 | A URL. 16 | 17 | This class contains properties and methods for accessing and modifying the 18 | constituent components of a URL. :class:`URLObject` instances are 19 | immutable, as they derive from the built-in ``unicode``, and therefore all 20 | methods return *new* objects; you need to consider this when using 21 | :class:`URLObject` in your own code. 22 | """ 23 | 24 | def __repr__(self): 25 | return 'URLObject(%r)' % (unicode(self),) 26 | 27 | @property 28 | def hash(self): 29 | return hashlib.sha1(self).hexdigest() 30 | 31 | @property 32 | def content_type(self): 33 | return mimetype.guess_type(self) 34 | 35 | @property 36 | def scheme(self): 37 | return urlparse.urlsplit(self).scheme 38 | def with_scheme(self, scheme): 39 | return self.__replace(scheme=scheme) 40 | 41 | @property 42 | def netloc(self): 43 | return Netloc(urlparse.urlsplit(self).netloc) 44 | def with_netloc(self, netloc): 45 | return self.__replace(netloc=netloc) 46 | 47 | @property 48 | def username(self): 49 | return self.netloc.username 50 | def with_username(self, username): 51 | return self.with_netloc(self.netloc.with_username(username)) 52 | def without_username(self): 53 | return self.with_netloc(self.netloc.without_username()) 54 | 55 | @property 56 | def password(self): 57 | return self.netloc.password 58 | def with_password(self, password): 59 | return self.with_netloc(self.netloc.with_password(password)) 60 | def without_password(self): 61 | return self.with_netloc(self.netloc.without_password()) 62 | 63 | @property 64 | def hostname(self): 65 | return self.netloc.hostname 66 | def with_hostname(self, hostname): 67 | return self.with_netloc(self.netloc.with_hostname(hostname)) 68 | 69 | @property 70 | def port(self): 71 | return self.netloc.port 72 | def with_port(self, port): 73 | return self.with_netloc(self.netloc.with_port(port)) 74 | def without_port(self): 75 | return self.with_netloc(self.netloc.without_port()) 76 | 77 | @property 78 | def auth(self): 79 | return self.netloc.auth 80 | def with_auth(self, *auth): 81 | return self.with_netloc(self.netloc.with_auth(*auth)) 82 | def without_auth(self): 83 | return self.with_netloc(self.netloc.without_auth()) 84 | 85 | @property 86 | def default_port(self): 87 | """ 88 | The destination port number for this URL. 89 | 90 | If no port number is explicitly given in the URL, this will return the 91 | default port number for the scheme if one is known, or ``None``. The 92 | mapping of schemes to default ports is defined in 93 | :const:`urlobject.ports.DEFAULT_PORTS`. 94 | """ 95 | port = urlparse.urlsplit(self).port 96 | if port is not None: 97 | return port 98 | return DEFAULT_PORTS.get(self.scheme) 99 | 100 | @property 101 | def path(self): 102 | return URLPath(urlparse.urlsplit(self).path) 103 | def with_path(self, path): 104 | return self.__replace(path=path) 105 | 106 | @property 107 | def root(self): 108 | return self.with_path('/') 109 | 110 | @property 111 | def parent(self): 112 | return self.with_path(self.path.parent) 113 | 114 | @property 115 | def is_leaf(self): 116 | return self.path.is_leaf 117 | 118 | def add_path_segment(self, segment): 119 | return self.with_path(self.path.add_segment(segment)) 120 | 121 | def add_path(self, partial_path): 122 | return self.with_path(self.path.add(partial_path)) 123 | 124 | @property 125 | def query(self): 126 | return QueryString(urlparse.urlsplit(self).query) 127 | def with_query(self, query): 128 | return self.__replace(query=query) 129 | def without_query(self): 130 | return self.__replace(query='') 131 | 132 | @property 133 | def query_list(self): 134 | return self.query.list 135 | 136 | @property 137 | def query_dict(self): 138 | return self.query.dict 139 | 140 | @property 141 | def query_multi_dict(self): 142 | return self.query.multi_dict 143 | 144 | def add_query_param(self, name, value): 145 | return self.with_query(self.query.add_param(name, value)) 146 | def add_query_params(self, *args, **kwargs): 147 | return self.with_query(self.query.add_params(*args, **kwargs)) 148 | 149 | def set_query_param(self, name, value): 150 | return self.with_query(self.query.set_param(name, value)) 151 | def set_query_params(self, *args, **kwargs): 152 | return self.with_query(self.query.set_params(*args, **kwargs)) 153 | 154 | def del_query_param(self, name): 155 | return self.with_query(self.query.del_param(name)) 156 | def del_query_params(self, params): 157 | return self.with_query(self.query.del_params(params)) 158 | 159 | @property 160 | def fragment(self): 161 | return path_decode(urlparse.urlsplit(self).fragment) 162 | def with_fragment(self, fragment): 163 | return self.__replace(fragment=path_encode(fragment)) 164 | def without_fragment(self): 165 | return self.__replace(fragment='') 166 | 167 | def relative(self, other): 168 | """Resolve another URL relative to this one.""" 169 | # Relative URL resolution involves cascading through the properties 170 | # from left to right, replacing 171 | other = type(self)(other) 172 | if other.scheme: 173 | return other 174 | elif other.netloc: 175 | return other.with_scheme(self.scheme) 176 | elif other.path: 177 | return other.with_scheme(self.scheme).with_netloc(self.netloc) \ 178 | .with_path(self.path.relative(other.path)) 179 | elif other.query: 180 | return other.with_scheme(self.scheme).with_netloc(self.netloc) \ 181 | .with_path(self.path) 182 | elif other.fragment: 183 | return other.with_scheme(self.scheme).with_netloc(self.netloc) \ 184 | .with_path(self.path).with_query(self.query) 185 | # Empty string just removes fragment; it's treated as a path meaning 186 | # 'the current location'. 187 | return self.without_fragment() 188 | 189 | def __replace(self, **replace): 190 | """Replace a field in the ``urlparse.SplitResult`` for this URL.""" 191 | return type(self)(urlparse.urlunsplit( 192 | urlparse.urlsplit(self)._replace(**replace))) 193 | 194 | 195 | if not hasattr(urlparse, 'ResultMixin'): 196 | def _replace(split_result, **replace): 197 | return urlparse.SplitResult( 198 | **dict((attr, replace.get(attr, getattr(split_result, attr))) 199 | for attr in ('scheme', 'netloc', 'path', 'query', 'fragment'))) 200 | urlparse.BaseResult._replace = _replace 201 | del _replace 202 | -------------------------------------------------------------------------------- /distributors/xwhich.py: -------------------------------------------------------------------------------- 1 | 2 | from os import environ, access, pathsep, X_OK 3 | from os.path import exists, isdir, split, join 4 | 5 | is_exe = lambda fpth: exists(fpth) and access(fpth, X_OK) 6 | 7 | def xwhich(program, also_look=[]): 8 | """ UNIX `which` analogue. Derived from: 9 | https://github.com/amoffat/pbs/blob/master/pbs.py#L95) """ 10 | fpath, fname = split(program) 11 | if fpath: 12 | if is_exe(program): 13 | return program 14 | else: 15 | paths = environ["PATH"].split(pathsep) 16 | try: 17 | paths += list(also_look) 18 | except (TypeError, ValueError): 19 | pass 20 | for path in paths: 21 | exe_file = join(path, program) 22 | if is_exe(exe_file): 23 | return exe_file 24 | return None 25 | 26 | def which(program): 27 | return xwhich(program) 28 | 29 | 30 | if __name__ == '__main__': 31 | programs_to_try = ( 32 | 'python', 33 | 'ls', 34 | 'wget', 35 | 'curl', 36 | 'coffee', 37 | 'lessc', 38 | 'yo-dogg', 39 | ) 40 | 41 | ali = [join(pth, 42 | 'coffee-script', 'bin') for pth in environ['NODE_PATH'].split(':') if bool( 43 | len(pth)) and isdir(pth)] 44 | 45 | for p in programs_to_try: 46 | print "\t %20s --> %s" % (("which('%s')" % p), xwhich(p, also_look=ali)) 47 | 48 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | django<1.5 2 | django-delegate>=0.2.2 3 | tornado 4 | tornadio2 5 | redis 6 | requests 7 | setproctitle 8 | nose 9 | django-nose 10 | nosetty 11 | rednose 12 | celery 13 | django-celery -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | setup.py 5 | 6 | Created by FI$H 2000 on 2012-06-19. 7 | Copyright (c) 2012 Objects In Space And Time, LLC. All rights reserved. 8 | 9 | """ 10 | 11 | try: 12 | from setuptools import setup 13 | except ImportError: 14 | from distutils.core import setup 15 | 16 | from distributors.dist import SQDistribution 17 | from distributors.clean import really_clean 18 | from distributors.coffeescript import build_coffeescript 19 | from distributors.coffeescript import download_js_libs 20 | from distributors.coffeescript import uglify 21 | from distributors import build_js 22 | 23 | __author__ = 'Alexander Bohn' 24 | __version__ = (0, 5, 1) 25 | 26 | import os 27 | 28 | def get_coffeescript_files(): 29 | out = [] 30 | pattern = '.coffee' 31 | for root, dirs, files in os.walk(os.path.join( 32 | 'signalqueue', 'static', 'signalqueue', 'coffee')): 33 | for f in files: 34 | if f.endswith(pattern): 35 | out.append(os.path.join(root, f)) 36 | return out 37 | 38 | setup( 39 | name='django-signalqueue', 40 | version='%s.%s.%s' % __version__, 41 | description='Truly asynchronous signal dispatch for Django!', 42 | author=__author__, 43 | author_email='fish2000@gmail.com', 44 | maintainer=__author__, 45 | maintainer_email='fish2000@gmail.com', 46 | 47 | license='BSD', 48 | url='http://github.com/fish2000/django-signalqueue/', 49 | keywords=['django','signals','async','asynchronous','queue'], 50 | 51 | distclass=SQDistribution, 52 | js_package='signalqueue', 53 | cs_files=get_coffeescript_files(), 54 | js_libs=[ 55 | 'https://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.js', 56 | 'http://cdn.socket.io/stable/socket.io.js'], 57 | js_outdirs={ 58 | 'signalqueue': os.path.join('static', 'signalqueue', 'js') }, 59 | 60 | cmdclass={ 61 | 'build_js': build_js, 62 | 'clean': really_clean, 63 | 64 | 'build_coffeescript': build_coffeescript, 65 | 'download_js_libs': download_js_libs, 66 | 'uglify': uglify }, 67 | 68 | entry_points={ 69 | 'console_scripts': ['signalqueue-test = signalqueue.testrunner:main'] }, 70 | 71 | include_package_data=True, 72 | package_data={ 73 | 'signalqueue': [ 74 | 'fixtures/*.json', 75 | 'settings/*.conf', 76 | 'static/signalqueue/js/*.js', 77 | 'static/signalqueue/coffee/*.coffee', 78 | 'static/socket.io-client/*', 79 | 'templates/*.html', 80 | 'templates/admin/*.html']}, 81 | 82 | packages=[ 83 | 'distributors', 84 | 'distributors.urlobject', 85 | 'signalqueue', 86 | 'signalqueue.management', 87 | 'signalqueue.management.commands', 88 | 'signalqueue.settings', 89 | 'signalqueue.templatetags', 90 | 'signalqueue.worker'], 91 | 92 | setup_requires=[ 93 | 'django>=1.4'], 94 | 95 | install_requires=[ 96 | 'django<1.5', 97 | 'django-delegate>=0.2.2', 98 | 'tornado', 'tornadio2', 99 | 'redis', 'requests', 100 | 'setproctitle'], 101 | 102 | tests_require=[ 103 | 'nose', 'rednose', 'django-nose'], 104 | 105 | classifiers=[ 106 | 'Development Status :: 4 - Beta', 107 | 'Environment :: Web Environment', 108 | 'Framework :: Django', 109 | 'Intended Audience :: Developers', 110 | 'License :: OSI Approved :: BSD License', 111 | 'Operating System :: OS Independent', 112 | 'Programming Language :: Python :: 2.7', 113 | 'Topic :: Utilities']) 114 | 115 | -------------------------------------------------------------------------------- /signalqueue/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | signalqueue/__init__.py 5 | 6 | Provides the signalqueue registry and an autodiscover() function to populate it 7 | from instances of AsyncSignal that it finds in either: 8 | 9 | a) modules named 'signals' in any of the members of INSTALLED_APPS, or 10 | b) modules that are specified in the settings.SQ_ADDITIONAL_SIGNALS list. 11 | 12 | a la django.contrib.admin's autodiscover() and suchlike. 13 | 14 | Created by FI$H 2000 on 2011-09-09. 15 | Copyright (c) 2011 Objects In Space And Time, LLC. All rights reserved. 16 | 17 | """ 18 | # package path-extension snippet. 19 | from pkgutil import extend_path 20 | __path__ = extend_path(__path__, __name__) 21 | 22 | import threading 23 | from collections import defaultdict 24 | from signalqueue.dispatcher import AsyncSignal 25 | 26 | SQ_RUNMODES = { 27 | 'SQ_SYNC': 1, # synchronous operation -- fire signals concurrently with save() and cache pragma 28 | 'SQ_ASYNC_MGMT': 2, # async operation -- we are running from the command line, fire signals concurrently 29 | 'SQ_ASYNC_DAEMON': 3, # async operation -- deque images from cache, fire signals but don't save 30 | 'SQ_ASYNC_REQUEST': 4, # async operation -- queue up signals on save() and cache pragma 31 | } 32 | 33 | SQ_DMV = defaultdict(set) 34 | 35 | class SignalRegistryError(AttributeError): 36 | pass 37 | 38 | class SignalDispatchError(AttributeError): 39 | pass 40 | 41 | def register(signal, name, regkey=None): 42 | if regkey is None: 43 | if hasattr(signal, '__module__'): 44 | regkey = signal.__module__ 45 | else: 46 | raise SignalRegistryError("A regkey must be supplied to register a signal without a __module__ attribute: '%s'" % ( 47 | signal,)) 48 | 49 | if not isinstance(signal, AsyncSignal): 50 | raise SignalRegistryError("Can only register AsyncSignal or descendant types, not %s instance '%s'" % ( 51 | signal.__class__.__name__, signal)) 52 | 53 | #from signalqueue.utils import logg 54 | #logg.debug("*** %0s %14s '%s'" % (regkey, signal.__class__.__name__, name)) 55 | 56 | if not getattr(signal, 'name', None): 57 | signal.name = name 58 | if not getattr(signal, 'regkey', None): 59 | signal.regkey = regkey 60 | SQ_DMV[regkey].add(signal) 61 | 62 | def autodiscover(): 63 | """ 64 | Auto-discover signals.py modules in the apps in INSTALLED_APPS; 65 | and fail silently when not present. 66 | 67 | N.B. this autdiscover() implementation is based on dajaxice_autodiscover in the 68 | Dajaxice module: 69 | 70 | https://github.com/jorgebastida/django-dajaxice/blob/master/dajaxice/core/Dajaxice.py#L155 71 | 72 | ... which in turn was inspired/copied from django.contrib.admin.autodiscover(). 73 | One key modification is our use of threading.Lock instead of the global state variables 74 | used by Dajaxice. 75 | 76 | """ 77 | 78 | autodiscover.lock.acquire() 79 | 80 | try: 81 | import imp 82 | from django.conf import settings 83 | from signalqueue.dispatcher import AsyncSignal 84 | from signalqueue.utils import logg 85 | 86 | # Gather signals that any of the installed apps define in 87 | # their respective signals.py files: 88 | logg.debug("*** Registering signals in %s installed apps ..." % ( 89 | len(settings.INSTALLED_APPS),)) 90 | 91 | from signalqueue.utils import import_module 92 | 93 | for appstring in settings.INSTALLED_APPS: 94 | 95 | try: 96 | app = import_module(appstring) 97 | except AttributeError: 98 | continue 99 | 100 | try: 101 | imp.find_module('signals', app.__path__) 102 | except ImportError: 103 | continue 104 | 105 | modstring = "%s.signals" % appstring 106 | mod = import_module(modstring) 107 | 108 | logg.debug("*** Searching for signals in '%s' ..." % ( 109 | (modstring,))) 110 | 111 | for name, thing in mod.__dict__.items(): 112 | if isinstance(thing, AsyncSignal): 113 | logg.debug("*** Registering %s: %s.%s ..." % ( 114 | thing.__class__.__name__, modstring, name)) 115 | register(thing, name, modstring) 116 | 117 | if hasattr(settings, "SQ_ADDITIONAL_SIGNALS"): 118 | if isinstance(settings.SQ_ADDITIONAL_SIGNALS, (list, tuple)): 119 | 120 | logg.debug("*** Registering signals from %s SQ_ADDITIONAL_SIGNALS modules ..." % ( 121 | len(settings.SQ_ADDITIONAL_SIGNALS),)) 122 | 123 | for addendumstring in settings.SQ_ADDITIONAL_SIGNALS: 124 | 125 | try: 126 | addendum = import_module(addendumstring) 127 | except AttributeError, err: 128 | # TODO: log this in a reliably sane manner 129 | logg.warning("--- SQ_ADDITIONAL_SIGNALS module '%s' import failure: %s" % ( 130 | addendumstring, err)) 131 | continue 132 | 133 | logg.debug("*** Searching for signals in '%s' ..." % ( 134 | (addendumstring,))) 135 | 136 | for name, thing in addendum.__dict__.items(): 137 | if isinstance(thing, AsyncSignal): 138 | logg.debug("*** Registering %s: %s.%s ..." % ( 139 | thing.__class__.__name__, addendumstring, name)) 140 | register(thing, name, addendumstring) 141 | 142 | finally: 143 | autodiscover.lock.release() 144 | 145 | autodiscover.lock = threading.Lock() 146 | 147 | def clear(): 148 | """ Clear the signal registry. """ 149 | 150 | autodiscover.lock.acquire() 151 | 152 | try: 153 | SQ_DMV = defaultdict(set) 154 | 155 | finally: 156 | autodiscover.lock.release() 157 | 158 | def rediscover(): 159 | clear() 160 | autodiscover() -------------------------------------------------------------------------------- /signalqueue/admin.py: -------------------------------------------------------------------------------- 1 | import os 2 | from django.contrib import admin 3 | from signalqueue.utils import SQ_ROOT 4 | 5 | admin.site.index_template = os.path.join(SQ_ROOT, 'templates/admin/index_with_queues.html') 6 | admin.site.app_index_template = os.path.join(SQ_ROOT, 'templates/admin/app_index.html') 7 | 8 | import signalqueue.models 9 | admin.site.register(signalqueue.models.EnqueuedSignal) 10 | -------------------------------------------------------------------------------- /signalqueue/dispatcher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | dispatch.py 5 | 6 | Created by FI$H 2000 on 2011-09-09. 7 | Copyright (c) 2011 Objects In Space And Time, LLC. All rights reserved. 8 | 9 | """ 10 | from django.dispatch import Signal 11 | 12 | class AsyncSignal(Signal): 13 | 14 | regkey = None 15 | name = None 16 | runmode = None 17 | 18 | queue_name = None 19 | mapping = None 20 | 21 | def __init__(self, providing_args=None, queue_name='default'): 22 | from signalqueue import mappings 23 | 24 | self.queue_name = queue_name 25 | self.mapping = mappings.MapperToPedigreeIndex() 26 | just_the_args = [] 27 | 28 | if isinstance(providing_args, dict): 29 | for providing_arg, MappingCls in providing_args.items(): 30 | just_the_args.append(providing_arg) 31 | self.mapping.update(providing_args) 32 | 33 | else: # list, iterable, whatev. 34 | just_the_args.extend(providing_args) 35 | 36 | super(AsyncSignal, self).__init__(providing_args=just_the_args) 37 | 38 | def send_now(self, sender, **named): 39 | return super(AsyncSignal, self).send(sender=sender, **named) 40 | 41 | def enqueue(self, sender, **named): 42 | from signalqueue import SQ_RUNMODES as runmodes 43 | if self.runmode == runmodes['SQ_SYNC']: 44 | from signalqueue import SignalDispatchError 45 | raise SignalDispatchError("WTF: enqueue() called in SQ_SYNC mode") 46 | 47 | from signalqueue.worker import queues 48 | return queues[self.queue_name].enqueue(self, sender=sender, **named) 49 | 50 | def send(self, sender, **named): 51 | from signalqueue import SQ_RUNMODES as runmodes 52 | from signalqueue.worker import queues 53 | from signalqueue.utils import logg 54 | 55 | self.runmode = int(named.pop('runmode', queues._runmode)) 56 | 57 | #logg.debug("--- send() called, runmode = %s" % self.runmode) 58 | 59 | if self.runmode: 60 | 61 | if self.runmode == runmodes['SQ_ASYNC_REQUEST']: 62 | # it's a web request -- enqueue it 63 | return self.enqueue(sender, **named) 64 | 65 | elif self.runmode == runmodes['SQ_ASYNC_DAEMON']: 66 | # signal sent in daemon mode -- enqueue it 67 | return self.enqueue(sender, **named) 68 | 69 | elif self.runmode == runmodes['SQ_ASYNC_MGMT']: 70 | # signal sent in command mode -- fire away 71 | return self.send_now(sender, **named) 72 | 73 | elif self.runmode == runmodes['SQ_SYNC']: 74 | # fire normally 75 | return self.send_now(sender, **named) 76 | 77 | else: 78 | # unknown runmode value -- fire normally 79 | logg.info( 80 | "*** send() called with an unknown runmode: '%s' -- firing sync signal." % self.runmode) 81 | return self.send_now(sender, **named) 82 | else: 83 | # fire normally 84 | logg.info("*** send() called and no runmode configured -- firing sync signal.") 85 | return self.send_now(sender, **named) 86 | 87 | -------------------------------------------------------------------------------- /signalqueue/fixtures/TESTMODEL-DUMP.json: -------------------------------------------------------------------------------- 1 | 2 | [ 3 | { 4 | "pk": 1, 5 | "model": "signalqueue.testmodel", 6 | "fields": 7 | { 8 | "name": "Yo dogg: 9cfb0ec6515b4ae690e786a7f7511d76" 9 | } 10 | }, 11 | { 12 | "pk": 2, 13 | "model": "signalqueue.testmodel", 14 | "fields": 15 | { 16 | "name": "Yo dogg: 386b1381c0be4d6a80d249eab749e86f" 17 | } 18 | }, 19 | { 20 | "pk": 3, 21 | "model": "signalqueue.testmodel", 22 | "fields": 23 | { 24 | "name": "Yo dogg: 16aef994247443ce92c0ff5a622c8c62" 25 | } 26 | }, 27 | { 28 | "pk": 4, 29 | "model": "signalqueue.testmodel", 30 | "fields": 31 | { 32 | "name": "Yo dogg: fbd9ddee0a9a4eb58b1948546abb4a8a" 33 | } 34 | }, 35 | { 36 | "pk": 5, 37 | "model": "signalqueue.testmodel", 38 | "fields": 39 | { 40 | "name": "Yo dogg: 5277cae36baa4a8a93f73906257922cd" 41 | } 42 | }, 43 | { 44 | "pk": 6, 45 | "model": "signalqueue.testmodel", 46 | "fields": 47 | { 48 | "name": "Yo dogg: 15b1b29b92eb4f16802069eb9d1f7454" 49 | } 50 | }, 51 | { 52 | "pk": 7, 53 | "model": "signalqueue.testmodel", 54 | "fields": 55 | { 56 | "name": "Yo dogg: 527d2b20ecee4f9ba1f8a366b3f0c48a" 57 | } 58 | }, 59 | { 60 | "pk": 8, 61 | "model": "signalqueue.testmodel", 62 | "fields": 63 | { 64 | "name": "Yo dogg: c27119738b43434eaaa83d7c817083b3" 65 | } 66 | }, 67 | { 68 | "pk": 9, 69 | "model": "signalqueue.testmodel", 70 | "fields": 71 | { 72 | "name": "Yo dogg: af978ab8cf384c98a3069743e3b67b4a" 73 | } 74 | }, 75 | { 76 | "pk": 10, 77 | "model": "signalqueue.testmodel", 78 | "fields": 79 | { 80 | "name": "Yo dogg: 3bf0fa40dbc743bfa956bc58794dc8f0" 81 | } 82 | }, 83 | { 84 | "pk": 11, 85 | "model": "signalqueue.testmodel", 86 | "fields": 87 | { 88 | "name": "Yo dogg: e57315a46c734b26a3345782f91d334a" 89 | } 90 | }, 91 | { 92 | "pk": 12, 93 | "model": "signalqueue.testmodel", 94 | "fields": 95 | { 96 | "name": "Yo dogg: 1637312cfb0446ab9b211fa9d70f9a83" 97 | } 98 | }, 99 | { 100 | "pk": 13, 101 | "model": "signalqueue.testmodel", 102 | "fields": 103 | { 104 | "name": "Yo dogg: 9ff3043f182d444c82c401d009ac8746" 105 | } 106 | }, 107 | { 108 | "pk": 14, 109 | "model": "signalqueue.testmodel", 110 | "fields": 111 | { 112 | "name": "Yo dogg: 525ce95d5e1e44d7b121fd2b41af47da" 113 | } 114 | }, 115 | { 116 | "pk": 15, 117 | "model": "signalqueue.testmodel", 118 | "fields": 119 | { 120 | "name": "Yo dogg: 3f60c9b055444233ace96768736e89c4" 121 | } 122 | }, 123 | { 124 | "pk": 16, 125 | "model": "signalqueue.testmodel", 126 | "fields": 127 | { 128 | "name": "Yo dogg: 52de4844d8124f349093cb091c3971f7" 129 | } 130 | } 131 | 132 | ] -------------------------------------------------------------------------------- /signalqueue/fixtures/TESTMODEL-ENQUEUED-SIGNALS.json: -------------------------------------------------------------------------------- 1 | 2 | [ 3 | { 4 | "pk": 1, 5 | "model": "signalqueue.enqueuedsignal", 6 | "fields": 7 | { 8 | "enqueued": true, 9 | "queue_name": "db", 10 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":1,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 11 | } 12 | }, 13 | { 14 | "pk": 2, 15 | "model": "signalqueue.enqueuedsignal", 16 | "fields": 17 | { 18 | "enqueued": true, 19 | "queue_name": "db", 20 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":2,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 21 | } 22 | }, 23 | { 24 | "pk": 3, 25 | "model": "signalqueue.enqueuedsignal", 26 | "fields": 27 | { 28 | "enqueued": true, 29 | "queue_name": "db", 30 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":3,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 31 | } 32 | }, 33 | { 34 | "pk": 4, 35 | "model": "signalqueue.enqueuedsignal", 36 | "fields": 37 | { 38 | "enqueued": true, 39 | "queue_name": "db", 40 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":4,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 41 | } 42 | }, 43 | { 44 | "pk": 5, 45 | "model": "signalqueue.enqueuedsignal", 46 | "fields": 47 | { 48 | "enqueued": true, 49 | "queue_name": "db", 50 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":5,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 51 | } 52 | }, 53 | { 54 | "pk": 6, 55 | "model": "signalqueue.enqueuedsignal", 56 | "fields": 57 | { 58 | "enqueued": true, 59 | "queue_name": "db", 60 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":6,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 61 | } 62 | }, 63 | { 64 | "pk": 7, 65 | "model": "signalqueue.enqueuedsignal", 66 | "fields": 67 | { 68 | "enqueued": true, 69 | "queue_name": "db", 70 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":7,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 71 | } 72 | }, 73 | { 74 | "pk": 8, 75 | "model": "signalqueue.enqueuedsignal", 76 | "fields": 77 | { 78 | "enqueued": true, 79 | "queue_name": "db", 80 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":8,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 81 | } 82 | }, 83 | { 84 | "pk": 9, 85 | "model": "signalqueue.enqueuedsignal", 86 | "fields": 87 | { 88 | "enqueued": true, 89 | "queue_name": "db", 90 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":9,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 91 | } 92 | }, 93 | { 94 | "pk": 10, 95 | "model": "signalqueue.enqueuedsignal", 96 | "fields": 97 | { 98 | "enqueued": true, 99 | "queue_name": "db", 100 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":10,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 101 | } 102 | }, 103 | { 104 | "pk": 11, 105 | "model": "signalqueue.enqueuedsignal", 106 | "fields": 107 | { 108 | "enqueued": true, 109 | "queue_name": "db", 110 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":11,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 111 | } 112 | }, 113 | { 114 | "pk": 12, 115 | "model": "signalqueue.enqueuedsignal", 116 | "fields": 117 | { 118 | "enqueued": true, 119 | "queue_name": "db", 120 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":12,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 121 | } 122 | }, 123 | { 124 | "pk": 13, 125 | "model": "signalqueue.enqueuedsignal", 126 | "fields": 127 | { 128 | "enqueued": true, 129 | "queue_name": "db", 130 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":13,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 131 | } 132 | }, 133 | { 134 | "pk": 14, 135 | "model": "signalqueue.enqueuedsignal", 136 | "fields": 137 | { 138 | "enqueued": true, 139 | "queue_name": "db", 140 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":14,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 141 | } 142 | }, 143 | { 144 | "pk": 15, 145 | "model": "signalqueue.enqueuedsignal", 146 | "fields": 147 | { 148 | "enqueued": true, 149 | "queue_name": "db", 150 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":15,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 151 | } 152 | }, 153 | { 154 | "pk": 16, 155 | "model": "signalqueue.enqueuedsignal", 156 | "fields": 157 | { 158 | "enqueued": true, 159 | "queue_name": "db", 160 | "value": "{\"instance\":{\"modl_name\":\"testmodel\",\"obj_id\":16,\"app_label\":\"signalqueue\"},\"signal\":{\"signalqueue.tests\":\"test_sync_function_signal\"},\"sender\":{\"modl_name\":\"testmodel\",\"app_label\":\"signalqueue\"},\"enqueue_runmode\":4}" 161 | } 162 | } 163 | 164 | ] -------------------------------------------------------------------------------- /signalqueue/management/__init__.py: -------------------------------------------------------------------------------- 1 | # package path-extension snippet. 2 | from pkgutil import extend_path 3 | __path__ = extend_path(__path__, __name__) 4 | -------------------------------------------------------------------------------- /signalqueue/management/commands/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | # package path-extension snippet. 5 | from pkgutil import extend_path 6 | __path__ = extend_path(__path__, __name__) 7 | 8 | 9 | def echo_banner(): 10 | print u"+++ django-signalqueue by Alexander Bohn -- http://objectsinspaceandtime.com/" 11 | print u"" 12 | 13 | -------------------------------------------------------------------------------- /signalqueue/management/commands/dequeue.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | import sys, os 4 | from django.core.management.base import BaseCommand, CommandError 5 | from django.core.exceptions import ImproperlyConfigured 6 | #from pprint import pformat 7 | from optparse import make_option 8 | 9 | from . import echo_banner 10 | 11 | class Command(BaseCommand): 12 | 13 | option_list = BaseCommand.option_list + ( 14 | make_option('--queuename', '-n', dest='queue_name', default='default', 15 | help="Name of queue, as specified in settings.py (defaults to 'default')", 16 | ), 17 | ) 18 | 19 | help = ('Flushes a signal queue, executing all enqueued signals.') 20 | requires_model_validation = True 21 | can_import_settings = True 22 | 23 | def handle(self, *args, **options): 24 | import signalqueue 25 | signalqueue.autodiscover() 26 | echo_banner() 27 | try: 28 | return self.flush_signal_queue(args, options) 29 | except ImproperlyConfigured, err: 30 | self.echo("*** ERROR in configuration: %s" % err) 31 | self.echo("*** Check the signalqueue-related options in your settings.py.") 32 | 33 | def echo(self, *args, **kwargs): 34 | """ Print in color to stdout. """ 35 | text = " ".join([str(item) for item in args]) 36 | DEBUG = False 37 | 38 | if DEBUG: 39 | color = kwargs.get("color",32) 40 | self.stdout.write("\033[0;%dm%s\033[0;m" % (color, text)) 41 | 42 | else: 43 | print text 44 | 45 | def flush_signal_queue(self, apps, options): 46 | """ 47 | Flushes the named signal queue, executing all enqueued signals. 48 | 49 | """ 50 | from django.conf import settings 51 | from signalqueue import SQ_RUNMODES as runmodes 52 | from signalqueue.worker import backends 53 | 54 | queue_name = options.get('queue_name') 55 | queues = backends.ConnectionHandler(settings.SQ_QUEUES, runmodes['SQ_ASYNC_MGMT']) 56 | 57 | if not queue_name in queues: 58 | self.echo("\n--- No definition found for a queue named '%s'" % (queue_name,), color=16) 59 | self.echo("\n--- Your defined queues have these names: '%s'" % ("', '".join(queues.keys()),), color=16) 60 | self.echo("\n>>> Exiting ...\n\n", color=16) 61 | sys.exit(2) 62 | 63 | queue = queues[queue_name] 64 | 65 | try: 66 | queue_available = queue.ping() 67 | except: 68 | self.echo("\n--- Can't ping the backend for %s named '%s'" % (queue.__class__.__name__, queue_name), color=16) 69 | self.echo("\n--- Is the server running?", color=16) 70 | self.echo("\n>>> Exiting ...\n\n", color=16) 71 | sys.exit(2) 72 | 73 | if not queue_available: 74 | self.echo("\n--- Can't ping the backend for %s named '%s'" % (queue.__class__.__name__, queue_name), color=16) 75 | self.echo("\n--- Is the server running?", color=16) 76 | self.echo("\n>>> Exiting ...\n\n", color=16) 77 | sys.exit(2) 78 | 79 | self.echo("\n>>> Flushing signal queue '%s' -- %s enqueued signals total" % ( 80 | queue.queue_name, queue.count()), color=31) 81 | 82 | from django.db.models.loading import cache 83 | if queue.count() > 0: 84 | for signalblip in queue: 85 | #self.echo("\n>>> Signal: ", color=31) 86 | #self.echo("\n%s" % pformat(signalblip), color=31) 87 | 88 | sender_dict = signalblip.get('sender') 89 | sender = cache.get_model(str(sender_dict['app_label']), str(sender_dict['modl_name'])) 90 | signal = signalblip.get('signal') 91 | 92 | self.echo(">>> Processing signal sent by %s.%s: %s.%s" % ( 93 | sender._meta.app_label, sender.__name__, signal.keys()[0], signal.values()[0]), color=31) 94 | 95 | queue.dequeue(queued_signal=signalblip) 96 | 97 | self.echo(">>> Done flushing signal queue '%s' -- %s enqueued signals remaining" % ( 98 | queue.queue_name, queue.count()), color=31) 99 | self.echo("\n") 100 | 101 | -------------------------------------------------------------------------------- /signalqueue/management/commands/dumpqueue.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | import sys, os 4 | from django.core.management.base import BaseCommand, CommandError 5 | from django.core.exceptions import ImproperlyConfigured 6 | #from pprint import pformat 7 | from optparse import make_option 8 | 9 | from . import echo_banner 10 | 11 | class Command(BaseCommand): 12 | 13 | option_list = BaseCommand.option_list + ( 14 | make_option('--queuename', '-n', dest='queue_name', default='default', 15 | help="Name of queue, as specified in settings.py (defaults to 'default')", 16 | ), 17 | make_option('--indent', '-t', dest='indent', default='0', 18 | help="Levels to indent the output.", 19 | ), 20 | ) 21 | 22 | help = ('Dumps the contents of a signal queue to a serialized format.') 23 | requires_model_validation = True 24 | can_import_settings = True 25 | 26 | def handle(self, *args, **options): 27 | echo_banner() 28 | try: 29 | return self.dump_queue(args, options) 30 | except ImproperlyConfigured, err: 31 | self.echo("*** ERROR in configuration: %s" % err) 32 | self.echo("*** Check the signalqueue-related options in your settings.py.") 33 | 34 | def echo(self, *args, **kwargs): 35 | """ Print in color to stdout. """ 36 | text = " ".join([str(item) for item in args]) 37 | DEBUG = False 38 | 39 | if DEBUG: 40 | color = kwargs.get("color",32) 41 | self.stdout.write("\033[0;%dm%s\033[0;m" % (color, text)) 42 | 43 | else: 44 | print text 45 | 46 | def dump_queue(self, apps, options): 47 | from django.conf import settings 48 | from signalqueue import SQ_RUNMODES as runmodes 49 | from signalqueue.worker import backends 50 | import json as library_json 51 | 52 | queue_name = options.get('queue_name') 53 | indent = int(options.get('indent')) 54 | queues = backends.ConnectionHandler(settings.SQ_QUEUES, runmodes['SQ_ASYNC_MGMT']) 55 | 56 | if not queue_name in queues: 57 | self.echo("\n--- No definition found for a queue named '%s'" % queue_name, 58 | color=16) 59 | self.echo("\n--- Your defined queues have these names: '%s'" % ( 60 | "', '".join(queues.keys()),), 61 | color=16) 62 | self.echo("\n>>> Exiting ...\n\n", 63 | color=16) 64 | sys.exit(2) 65 | 66 | queue = queues[queue_name] 67 | 68 | try: 69 | queue_available = queue.ping() 70 | except: 71 | self.echo("\n--- Can't ping the backend for %s named '%s'" % ( 72 | queue.__class__.__name__, queue_name), 73 | color=16) 74 | self.echo("\n--- Is the server running?", 75 | color=16) 76 | self.echo("\n>>> Exiting ...\n\n", 77 | color=16) 78 | sys.exit(2) 79 | 80 | if not queue_available: 81 | self.echo("\n--- Can't ping the backend for %s named '%s'" % ( 82 | queue.__class__.__name__, queue_name), 83 | color=16) 84 | self.echo("\n--- Is the server running?", 85 | color=16) 86 | self.echo("\n>>> Exiting ...\n\n", 87 | color=16) 88 | sys.exit(2) 89 | 90 | queue_json = repr(queue) 91 | 92 | if indent > 0: 93 | queue_out = library_json.loads(queue_json) 94 | print library_json.dumps(queue_out, indent=indent) 95 | else: 96 | print queue_json -------------------------------------------------------------------------------- /signalqueue/management/commands/purgequeue.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | import sys 4 | from django.core.management.base import BaseCommand 5 | from django.core.exceptions import ImproperlyConfigured 6 | #from pprint import pformat 7 | from optparse import make_option 8 | 9 | from . import echo_banner 10 | 11 | class Command(BaseCommand): 12 | 13 | option_list = BaseCommand.option_list + ( 14 | make_option('--queuename', '-n', dest='queue_name', default='default', 15 | help="Name of queue, as specified in settings.py (defaults to 'default')", 16 | ), 17 | ) 18 | 19 | help = ('Purges everything from a queue, deleting all signals.') 20 | requires_model_validation = True 21 | can_import_settings = True 22 | 23 | def handle(self, *args, **options): 24 | import signalqueue 25 | signalqueue.autodiscover() 26 | echo_banner() 27 | try: 28 | return self.purge_signal_queue(args, options) 29 | except ImproperlyConfigured, err: 30 | self.echo("*** ERROR in configuration: %s" % err) 31 | self.echo("*** Check the signalqueue-related options in your settings.py.") 32 | 33 | def echo(self, *args, **kwargs): 34 | """ Print in color to stdout. """ 35 | text = " ".join([str(item) for item in args]) 36 | DEBUG = False 37 | 38 | if DEBUG: 39 | color = kwargs.get("color",32) 40 | self.stdout.write("\033[0;%dm%s\033[0;m" % (color, text)) 41 | 42 | else: 43 | print text 44 | 45 | def purge_signal_queue(self, apps, options): 46 | """ Purges all signals from the queue. """ 47 | from django.conf import settings 48 | from signalqueue import SQ_RUNMODES as runmodes 49 | from signalqueue.worker import backends 50 | 51 | queue_name = options.get('queue_name') 52 | queues = backends.ConnectionHandler(settings.SQ_QUEUES, runmodes['SQ_ASYNC_MGMT']) 53 | 54 | if not queue_name in queues: 55 | self.echo("\n--- No definition found for a queue named '%s'" % (queue_name,), color=16) 56 | self.echo("\n--- Your defined queues have these names: '%s'" % ("', '".join(queues.keys()),), color=16) 57 | self.echo("\n>>> Exiting ...\n\n", color=16) 58 | sys.exit(2) 59 | 60 | queue = queues[queue_name] 61 | 62 | try: 63 | queue_available = queue.ping() 64 | except: 65 | self.echo("\n--- Can't ping the backend for %s named '%s'" % (queue.__class__.__name__, queue_name), color=16) 66 | self.echo("\n--- Is the server running?", color=16) 67 | self.echo("\n>>> Exiting ...\n\n", color=16) 68 | sys.exit(2) 69 | 70 | if not queue_available: 71 | self.echo("\n--- Can't ping the backend for %s named '%s'" % (queue.__class__.__name__, queue_name), color=16) 72 | self.echo("\n--- Is the server running?", color=16) 73 | self.echo("\n>>> Exiting ...\n\n", color=16) 74 | sys.exit(2) 75 | 76 | self.echo("\n>>> Purging signals in queue '%s' -- %s enqueued signals total" % ( 77 | queue.queue_name, queue.count()), color=31) 78 | 79 | from django.db.models.loading import cache 80 | if queue.count() > 0: 81 | for signalblip in queue: 82 | #self.echo("\n>>> Signal: ", color=31) 83 | #self.echo("\n%s" % pformat(signalblip), color=31) 84 | 85 | sender_dict = signalblip.get('sender') 86 | sender = cache.get_model(str(sender_dict['app_label']), str(sender_dict['modl_name'])) 87 | signal = signalblip.get('signal') 88 | 89 | self.echo(">>> Purging signal sent by %s.%s: %s.%s" % ( 90 | sender._meta.app_label, sender.__name__, signal.keys()[0], signal.values()[0]), color=31) 91 | 92 | self.echo(">>> Done purging signals in queue '%s' -- %s enqueued signals remaining" % ( 93 | queue.queue_name, queue.count()), color=31) 94 | self.echo("\n") 95 | 96 | -------------------------------------------------------------------------------- /signalqueue/management/commands/runqueueserver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | import sys, os 4 | from django.conf import settings 5 | from django.core.management.base import BaseCommand, CommandError 6 | from django.core.exceptions import ImproperlyConfigured 7 | from optparse import make_option 8 | 9 | from . import echo_banner 10 | 11 | 12 | class Command(BaseCommand): 13 | 14 | option_list = BaseCommand.option_list + ( 15 | make_option('--queuename', '-n', dest='queue_name', 16 | default='default', 17 | help="Name of the queue as defined in settings.py", 18 | ), 19 | make_option('--halt-when-exhausted', '-H', action='store_true', dest='halt_when_exhausted', 20 | default=False, 21 | help="Halt the queue worker once the queue has been exhausted", 22 | ), 23 | make_option('--no-exit', '-N', action='store_false', dest='exit', 24 | default=True, 25 | help="Don't call sys.exit() when halting", 26 | ), 27 | make_option('--disable-exception-logging', '-x', action='store_false', dest='log_exceptions', 28 | default=True, 29 | help="Disable the Sentry exception log.", 30 | ), 31 | ) 32 | 33 | help = ('Runs the Tornado-based queue worker.') 34 | args = '[optional port number, or ipaddr:port]' 35 | can_import_settings = True 36 | exit_when_halting = True 37 | 38 | def echo(self, *args, **kwargs): 39 | """ Print in color to stdout. """ 40 | text = " ".join([str(item) for item in args]) 41 | 42 | if settings.DEBUG: 43 | color = kwargs.get("color", 32) 44 | self.stdout.write("\033[0;%dm%s\033[0;m" % (color, text)) 45 | else: 46 | print text 47 | 48 | def exit(self, status=2): 49 | """ Exit when complete. """ 50 | self.echo("+++ Exiting ...\n", color=16) 51 | if self.exit_when_halting: 52 | sys.exit(status) 53 | 54 | def run_worker(self, args, options): 55 | """ Runs the Tornado-based queue worker. """ 56 | import tornado.options 57 | from tornado.httpserver import HTTPServer 58 | from tornado.ioloop import IOLoop 59 | from signalqueue.worker.vortex import Application 60 | from signalqueue.worker import backends 61 | import signalqueue 62 | 63 | queue_name = options.get('queue_name') 64 | queues = backends.ConnectionHandler(settings.SQ_QUEUES, signalqueue.SQ_RUNMODES['SQ_ASYNC_MGMT']) 65 | queue = queues[queue_name] 66 | 67 | try: 68 | queue_available = queue.ping() 69 | except: 70 | self.echo("\n--- Can't ping the backend for %s named '%s'" % (queue.__class__.__name__, queue_name), color=16) 71 | self.echo("\n--- Is the server running?", color=16) 72 | self.exit(2) 73 | 74 | if not queue_available: 75 | self.echo("\n--- Can't ping the backend for %s named '%s'" % (queue.__class__.__name__, queue_name), color=16) 76 | self.echo("\n--- Is the server running?", color=16) 77 | self.exit(2) 78 | 79 | http_server = HTTPServer(Application(queue_name=queue_name, 80 | halt_when_exhausted=options.get('halt_when_exhausted', False), 81 | log_exceptions=options.get('log_exceptions', True), 82 | )) 83 | 84 | http_server.listen(int(options.get('port')), address=options.get('addr')) 85 | 86 | try: 87 | IOLoop.instance().start() 88 | 89 | except KeyboardInterrupt: 90 | self.echo("Shutting down signal queue worker ...", color=31) 91 | 92 | def handle(self, addrport='', *args, **options): 93 | """ Handle command-line options. """ 94 | echo_banner() 95 | 96 | if args: 97 | raise CommandError('Usage: %s %s' % (__file__, self.args)) 98 | 99 | self.exit_when_halting = options.get('exit', True) 100 | 101 | if not addrport: 102 | addr = '' 103 | port = str(settings.SQ_WORKER_PORT) or '8088' 104 | else: 105 | try: 106 | addr, port = addrport.split(':') 107 | except ValueError: 108 | addr, port = '', addrport 109 | 110 | if not addr: 111 | addr = '127.0.0.1' 112 | 113 | if not port.isdigit(): 114 | raise CommandError("%r is not a valid port number." % port) 115 | 116 | self.quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C' 117 | options.update({ 118 | 'addr': addr, 119 | 'port': port, 120 | }) 121 | 122 | self.echo("Validating models...") 123 | self.validate(display_num_errors=True) 124 | 125 | self.echo(("\nDjango version %(version)s, using settings %(settings)r\n" 126 | "Tornado worker for queue \"%(queue_name)s\" binding to http://%(addr)s:%(port)s/\n" 127 | "Quit the server with %(quit_command)s.\n" ) % { 128 | "version": self.get_version(), 129 | "settings": settings.SETTINGS_MODULE, 130 | "queue_name": options.get('queue_name'), 131 | "addr": addr, 132 | "port": port, 133 | "quit_command": self.quit_command, 134 | }) 135 | 136 | try: 137 | self.run_worker(args, options) 138 | 139 | except ImproperlyConfigured, err: 140 | self.echo("*** ERROR in configuration: %s" % err, color=31) 141 | self.echo("*** Check the signalqueue options in your settings.py.", color=31) 142 | 143 | finally: 144 | self.exit(0) 145 | -------------------------------------------------------------------------------- /signalqueue/mappings.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | from collections import defaultdict 4 | 5 | def who_calls(): 6 | try: 7 | import sys 8 | return sys._getframe(1).f_code.co_name 9 | except (ValueError, AttributeError): 10 | return "I was never given a name." 11 | 12 | by_priority = defaultdict(lambda: set()) 13 | 14 | class Mappers(type): 15 | def __new__(cls, name, bases, attrs): 16 | global by_priority 17 | outcls = super(Mappers, cls).__new__(cls, name, bases, attrs) 18 | if name is not 'Mapper': 19 | by_priority[attrs.get('PRIORITY', "normal")].add(outcls) 20 | return outcls 21 | 22 | class Mapper(object): 23 | 24 | __metaclass__ = Mappers 25 | 26 | PRIORITY = "normal" 27 | 28 | """ Maybe I will make these singletons. 29 | Then, when they're all singly alone, 30 | I can get dressed up like global state 31 | and jump out in front of them like, 32 | 33 | !*** BOO ***! 34 | 35 | which they never expect that shit, haha, 36 | nerd alert. """ 37 | 38 | @classmethod 39 | def demap(cls, signal_arg): 40 | ''' serialize an argument. ''' 41 | 42 | who = who_calls() 43 | raise NotImplementedError( 44 | '%s subclasses need to define %s()' % ( 45 | cls.__name__, who)) 46 | 47 | @classmethod 48 | def remap(cls, intermediate): # unserialize 49 | ''' un-serialize an argument from a provided 50 | intermediate representation. ''' 51 | 52 | who = who_calls() 53 | raise NotImplementedError( 54 | '%s subclasses need to define %s()' % ( 55 | cls.__name__, who)) 56 | 57 | @classmethod 58 | def can_demap(cls, test_value): 59 | try: 60 | cls.demap(test_value) 61 | except NotImplementedError, exc: 62 | import sys 63 | raise NotImplementedError, exc, sys.exc_info()[2] 64 | except Exception: 65 | return False 66 | return True 67 | 68 | @classmethod 69 | def can_remap(cls, test_value): 70 | try: 71 | cls.remap(test_value) 72 | except NotImplementedError, exc: 73 | import sys 74 | raise NotImplementedError, exc, sys.exc_info()[2] 75 | except Exception: 76 | return False 77 | return True 78 | 79 | 80 | class LiteralValueMapper(Mapper): 81 | """ Python primitive types e.g. bool, int, str; 82 | also list, dict & friends -- once it exists, 83 | this mapper class will be using Base64-encoded, 84 | compressed JSON as its intermediate form. """ 85 | 86 | DEMAP_TYPES = ( 87 | bool, int, long, float, 88 | str, unicode, 89 | list, dict) 90 | PRIORITY = "penultimate" 91 | 92 | @classmethod 93 | def json(cls): 94 | if not hasattr(cls, '_json'): 95 | from signalqueue.utils import json 96 | cls._json = json 97 | return cls._json 98 | 99 | @classmethod 100 | def base64(cls): 101 | if not hasattr(cls, '_base64'): 102 | import base64 103 | cls._base64 = base64 104 | return cls._base64 105 | 106 | @classmethod 107 | def demap(cls, signal_arg): 108 | return cls.base64().encodestring( 109 | cls.json().dumps( 110 | signal_arg)) 111 | 112 | @classmethod 113 | def remap(cls, intermediate): 114 | return cls.json().loads( 115 | cls.base64().decodestring( 116 | intermediate)) 117 | 118 | @classmethod 119 | def can_demap(cls, test_value): 120 | if type(test_value) not in cls.DEMAP_TYPES: 121 | return False 122 | try: 123 | ir = cls.demap(test_value) 124 | rt = cls.remap(ir) 125 | except Exception: 126 | return False 127 | return (repr(test_value) == repr(rt)) 128 | 129 | @classmethod 130 | def can_remap(cls, test_value): 131 | try: 132 | rt = cls.remap(test_value) 133 | except Exception: 134 | return False 135 | return (type(rt) in cls.DEMAP_TYPES) 136 | 137 | class PickleMapper(Mapper): 138 | """ Miscellaneous other objects -- see the `pickle` 139 | module documentation for details about what can 140 | be pickled. """ 141 | 142 | PICKLE_PROTOCOL = 1 143 | PRIORITY = "ultimate" 144 | 145 | @classmethod 146 | def brine(cls): 147 | if not hasattr(cls, '_brine'): 148 | try: 149 | import cPickle 150 | except ImportError: 151 | import pickle 152 | cls._brine = pickle 153 | else: 154 | cls._brine = cPickle 155 | return cls._brine 156 | 157 | @classmethod 158 | def demap(cls, signal_arg): 159 | return cls.brine().dumps(signal_arg, 160 | cls.PICKLE_PROTOCOL) 161 | 162 | @classmethod 163 | def remap(cls, intermediate): 164 | return cls.brine().loads(str(intermediate)) 165 | 166 | @classmethod 167 | def can_demap(cls, test_value): 168 | try: 169 | cls.demap(test_value) 170 | except Exception: 171 | return False 172 | return True 173 | 174 | @classmethod 175 | def can_remap(cls, test_value): 176 | try: 177 | cls.remap(str(test_value)) 178 | except Exception: 179 | return False 180 | return True 181 | 182 | class ModelIDMapper(Mapper): 183 | 184 | """ Django model instances, as in properly-saved 185 | instances of non-abstract django.db.models.Model 186 | subclasses -- they have valid `pk` properties 187 | and suchlike. 188 | 189 | This mapper 'passes by reference', using an intermediate 190 | serial form consisting of a JSONified dict,* containing 191 | three values: the instance object's `pk` and its parent 192 | classes' `app_label` and `model_name` property. These 193 | are the data with which the object can be reconstituted 194 | with `django.db.models.loading.cache.get_model()`. """ 195 | 196 | @classmethod 197 | def demap(cls, signal_arg): 198 | return { 199 | 'app_label': signal_arg._meta.app_label, 200 | 'modl_name': signal_arg.__class__.__name__.lower(), 201 | 'obj_id': signal_arg.pk } 202 | 203 | @classmethod 204 | def remap(cls, intermediate): 205 | from django.db.models.loading import cache 206 | pk = intermediate.get('obj_id') 207 | ModCls = cache.get_model( 208 | intermediate.get('app_label'), 209 | intermediate.get('modl_name')) 210 | if ModCls: 211 | if pk is not -1: 212 | try: 213 | return ModCls.objects.get(pk=pk) 214 | except ModCls.DoesNotExist: 215 | return None 216 | return None 217 | 218 | @classmethod 219 | def can_demap(cls, test_value): 220 | return hasattr(test_value, '_meta') and \ 221 | hasattr(test_value, '__class__') and \ 222 | hasattr(test_value, 'pk') 223 | 224 | @classmethod 225 | def can_remap(cls, test_value): 226 | return ('obj_id' in test_value) and \ 227 | ('app_label' in test_value) and \ 228 | ('modl_name' in test_value) 229 | 230 | ModelInstanceMapper = ModelIDMapper # 'legacy support' 231 | 232 | class ModelValueMapper(Mapper): 233 | """ Django model instances, as in properly-saved 234 | instances of non-abstract django.db.models.Model 235 | subclasses -- they have valid `pk` properties 236 | and suchlike. 237 | 238 | This mapper uses the analagous corrolary to its 239 | sibling `ModelIDMapper` in that it 'passes by value'. 240 | The model instances' ID is actually ignored, and the 241 | object __dict__ is filtered and then JSONated, using 242 | whatever `django.core.serializers.serialize` employs. 243 | 244 | """ 245 | 246 | @classmethod 247 | def flattener(cls): 248 | if not hasattr(cls, '_flattener'): 249 | from django.core import serializers 250 | PyFlattener = serializers.get_serializer('python') 251 | cls._flattener = PyFlattener() 252 | return cls._flattener 253 | 254 | @classmethod 255 | def expander(cls, expandees): 256 | if not hasattr(cls, '_expander'): 257 | from django.core import serializers 258 | cls._expander = staticmethod( 259 | serializers.get_deserializer('python')) 260 | return cls._expander(expandees) 261 | 262 | @classmethod 263 | def model_from_identifier(cls, model_identifier): 264 | from django.db.models import get_model 265 | try: 266 | return get_model(*model_identifier.split('.')) 267 | except (TypeError, AttributeError, ValueError): 268 | return None 269 | 270 | @classmethod 271 | def demap(cls, signal_arg): 272 | return cls.flattener().serialize([signal_arg])[0] 273 | 274 | @classmethod 275 | def remap(cls, intermediate): 276 | return list(cls.expander([intermediate]))[0].object 277 | 278 | @classmethod 279 | def can_demap(cls, test_value): 280 | return hasattr(test_value, '_meta') and \ 281 | hasattr(test_value, '__class__') 282 | 283 | @classmethod 284 | def can_remap(cls, test_value): 285 | has_atts = ('model' in test_value) and \ 286 | ('fields' in test_value) 287 | if not has_atts: 288 | return False 289 | ModlCls = cls.model_from_identifier( 290 | test_value['model']) 291 | return (ModlCls is not None) 292 | 293 | 294 | signature = lambda thing: "%s.%s" % ( 295 | type(thing) in __import__('__builtin__').__dict__.values() \ 296 | and '__builtin__' \ 297 | or thing.__module__, \ 298 | thing.__class__.__name__) 299 | 300 | signature.__doc__ = """ 301 | signature(x): return a string with the qualified module path of x.__class__ 302 | 303 | examples: 304 | 305 | >>> signature(lambda: None) 306 | '__main__.function' 307 | >>> def yodogg(): pass 308 | ... 309 | >>> 310 | >>> signature(yodogg) 311 | '__main__.function' 312 | 313 | >>> sig(models.Model) 314 | 'django.db.models.base.ModelBase' 315 | >>> sig(models) 316 | Traceback (most recent call last): 317 | File "", line 1, in 318 | File "", line 1, in 319 | AttributeError: 'module' object has no attribute '__module__' 320 | 321 | >>> sig(fish) 322 | 'django.contrib.auth.models.User' 323 | 324 | >>> sig(dict) 325 | '__builtin__.type' 326 | 327 | >>> sig(defaultdict) 328 | '__builtin__.type' 329 | >>> from django.core.files.storage import FileSystemStorage 330 | >>> fs = FileSystemStorage() 331 | >>> fs 332 | 333 | >>> sig(fs) 334 | 'django.core.files.storage.FileSystemStorage' 335 | >>> sig(FileSystemStorage) 336 | '__builtin__.type' 337 | 338 | """ 339 | 340 | class MapperToPedigreeIndex(defaultdict): 341 | 342 | pedigrees = { 343 | 344 | # here's why I might do singletons (despite my 345 | # idiotic joke I was serious): 346 | 347 | 'django.db.models.Model': ModelIDMapper, 348 | 'django.db.models.ModelBase': ModelValueMapper, 349 | 350 | # this dict won't necessarily have this type 351 | # of thing in here literally, btdubs. 352 | # etc, etc... it's a heiarchy. 353 | } 354 | pedigrees.update(dict( 355 | [(signature(T()), LiteralValueMapper) \ 356 | for T in LiteralValueMapper.DEMAP_TYPES])) 357 | 358 | def _demap_tests(self): 359 | global by_priority 360 | order = () 361 | for priority in ('normal', 'penultimate', 'ultimate'): 362 | order += tuple(sorted(tuple(by_priority[priority]))) 363 | return order 364 | 365 | demap_tests = property(_demap_tests) 366 | remap_tests = property(_demap_tests) 367 | 368 | # the above sequence dictates the order in which 369 | # the mapping classes will be applied to an argument 370 | # when checking it. 371 | 372 | def demapper_for_value(self, value): 373 | ''' Mapper.can_demap() implementations should NOT 374 | fuck with, in-place or otherwise, the values 375 | they are passed to examine. ''' 376 | for TestCls in self.demap_tests: 377 | try: 378 | if TestCls.can_demap(value): 379 | return (TestCls, value) 380 | except Exception: 381 | continue 382 | return (self[None], value) 383 | 384 | def remapper_for_serial(self, serial): 385 | ''' Generally the sequential order is less important 386 | on this end -- a proper value serial is valid for 387 | exactly 1 deserializer, like by definition. 388 | long as one doesn't list mappers whose inter- 389 | mediate structures have much formal overlap... 390 | As a valid Base64-ed bzipped minified JSON blob 391 | is highly unlikely to also be (say) a reasonable 392 | pickle value, the order won't matter, as long 393 | as the can_demap()/can_remap() functions in play 394 | are responsible w/r/t the data they are passed. ''' 395 | for TestCls in self.remap_tests: 396 | try: 397 | if TestCls.can_remap(serial): 398 | return (TestCls, serial) 399 | except Exception: 400 | continue 401 | return (self[None], serial) 402 | 403 | def demap(self, value): 404 | MapCls, val = self.demapper_for_value(value) 405 | return MapCls.demap(val) 406 | 407 | def remap(self, value): 408 | MapCls, val = self.remapper_for_serial(value) 409 | return MapCls.remap(val) 410 | 411 | # The way to do this is: 412 | # MOST SPECIFIC -> LEAST SPECIFIC. 413 | # ... The pickle mapper [2] takes most anything 414 | # in Python i.e. generator sequences and other 415 | # things that don't have a one-to-one JSONish 416 | # lexical analogue. Before pickling everything, 417 | # the LiteralValueMapper will make exceptions 418 | # for JSONerizable values [1]; before that, any 419 | # Django model objects, who are disproportionately 420 | # frequent commuters in the signal traffic of 421 | # most apps, have already been sieved out 422 | # by the ModelIDMapper [0]. 423 | # 424 | # N.B. ModelValueMapper isn't used by default -- 425 | # it's a nuanced, subtle, upscale sort of mapper 426 | # and it's not applied willy-nilly to objects. 427 | # 428 | # Also the map_test_order tuple might benefit from 429 | # being built on-the-fly (allowing 3rd parties 430 | # to do their own mapping, either by subclassing 431 | # or delegation or someshit, I don't know.) 432 | 433 | def __init__(self, *args, **kwargs): 434 | self_update = kwargs.pop('self_update', True) 435 | super(MapperToPedigreeIndex, self).__init__(*args, **kwargs) 436 | if self_update: 437 | self.update(self.pedigrees) 438 | 439 | def __missing__(self, key): 440 | return self.demap_tests[-1] 441 | 442 | def for_object(self, obj): 443 | return self[signature(obj)] 444 | 445 | def update_for_type(self, betyped, fortype): 446 | try: 447 | handcock = signature(betyped) 448 | except AttributeError: 449 | print('*** signatures of object instances are currently supported --') 450 | print('*** but not class types or other higher-order structures.') 451 | return 452 | 453 | if len(handcock) < 3: 454 | print('*** instance signature "%s" is too short.' % handcock) 455 | return 456 | 457 | self.update({ handcock: fortype, }) 458 | return handcock 459 | 460 | def update_for(self, betyped): 461 | """ use this on objects that are as type-ishly consistent 462 | with those you'll be flinging down the signal's chute 463 | as you can find. """ 464 | 465 | mapper, _ = self.demapper_for_value(betyped) 466 | if mapper is not None: 467 | return self.update_for_type(betyped, mapper) 468 | return 469 | 470 | -------------------------------------------------------------------------------- /signalqueue/models.py: -------------------------------------------------------------------------------- 1 | 2 | from django.db import models 3 | from datetime import datetime 4 | from delegate import DelegateManager, delegate 5 | from signalqueue.worker.base import QueueBase 6 | #from signalqueue.utils import logg 7 | 8 | 9 | class SignalQuerySet(models.query.QuerySet): 10 | """ 11 | SignalQuerySet is a QuerySet that works as a signalqueue backend. 12 | 13 | The actual QueueBase override methods are implemented here and delegated to 14 | SignalManager, which is a DelegateManager subclass with the QueueBase 15 | implementation "mixed in". 16 | 17 | Since you can't nakedly instantiate managers outside of a model 18 | class, we use a proxy class to hand off SignalQuerySet's delegated 19 | manager to the queue config stuff. See the working implementation in 20 | signalqueue.worker.backends.DatabaseQueueProxy for details. 21 | 22 | """ 23 | @delegate 24 | def queued(self, enqueued=True): 25 | return self.filter(queue_name=self.queue_name, enqueued=enqueued).order_by("createdate") 26 | 27 | @delegate 28 | def ping(self): 29 | return True 30 | 31 | @delegate 32 | def push(self, value): 33 | self.get_or_create(queue_name=self.queue_name, value=value, enqueued=True) 34 | 35 | @delegate 36 | def pop(self): 37 | """ Dequeued signals are marked as such (but not deleted) by default. """ 38 | out = self.queued()[0] 39 | out.enqueued = False 40 | out.save() 41 | return str(out.value) 42 | 43 | def count(self, enqueued=True): 44 | """ This override can't be delegated as the super() call isn't portable. """ 45 | return super(self.__class__, self.all().queued(enqueued=enqueued)).count() 46 | 47 | @delegate 48 | def clear(self): 49 | self.queued().update(enqueued=False) 50 | 51 | @delegate 52 | def values(self, floor=0, ceil=-1): 53 | if floor < 1: 54 | floor = 0 55 | if ceil < 1: 56 | ceil = self.count() 57 | 58 | out = self.queued()[floor:ceil] 59 | return [str(value[0]) for value in out.values_list('value')] 60 | 61 | @delegate 62 | def __repr__(self): 63 | return "[%s]" % ",".join([str(value[0]) for value in self.values_list('value')]) 64 | 65 | @delegate 66 | def __str__(self): 67 | return repr(self) 68 | 69 | def __unicode__(self): 70 | import json as library_json 71 | return u"%s" % library_json.dumps(library_json.loads(repr(self)), indent=4) 72 | 73 | class SignalManager(DelegateManager, QueueBase): 74 | __queryset__ = SignalQuerySet 75 | 76 | def __init__(self, *args, **kwargs): 77 | self.runmode = kwargs.get('runmode', 4) 78 | QueueBase.__init__(self, *args, **kwargs) 79 | DelegateManager.__init__(self, *args, **kwargs) 80 | 81 | def count(self, enqueued=True): 82 | return self.queued(enqueued=enqueued).count() 83 | 84 | def _get_queue_name(self): 85 | if self._queue_name: 86 | return self._queue_name 87 | return None 88 | 89 | def _set_queue_name(self, queue_name): 90 | self._queue_name = queue_name 91 | self.__queryset__.queue_name = queue_name 92 | 93 | queue_name = property(_get_queue_name, _set_queue_name) 94 | 95 | class EnqueuedSignal(models.Model): 96 | class Meta: 97 | abstract = False 98 | verbose_name = "Enqueued Signal" 99 | verbose_name_plural = "Enqueued Signals" 100 | 101 | objects = SignalManager() 102 | keys = set( 103 | ('signal', 'sender', 'enqueue_runmode')) 104 | 105 | createdate = models.DateTimeField("Created on", 106 | default=datetime.now, 107 | blank=True, 108 | null=True, 109 | editable=False) 110 | 111 | enqueued = models.BooleanField("Enqueued", 112 | default=True, 113 | editable=True) 114 | 115 | queue_name = models.CharField(verbose_name="Queue Name", 116 | max_length=255, db_index=True, 117 | default="default", 118 | unique=False, 119 | blank=True, 120 | null=False) 121 | 122 | value = models.TextField(verbose_name="Serialized Signal Value", 123 | editable=False, 124 | unique=True, db_index=True, 125 | blank=True, 126 | null=True) 127 | 128 | def _get_struct(self): 129 | if self.value: 130 | from signalqueue.utils import json, ADict 131 | return ADict( 132 | json.loads(self.value)) 133 | return ADict() 134 | 135 | def _set_struct(self, newstruct): 136 | if self.keys.issuperset(newstruct.keys()): 137 | from signalqueue.utils import json 138 | self.value = json.dumps(newstruct) 139 | 140 | struct = property(_get_struct, _set_struct) 141 | 142 | def __repr__(self): 143 | if self.value: 144 | return str(self.value) 145 | return "{'instance':null}" 146 | 147 | def __str__(self): 148 | return repr(self) 149 | 150 | def __unicode__(self): 151 | if self.value: 152 | import json as library_json 153 | return u"%s" % library_json.dumps( 154 | library_json.loads(repr(self)), 155 | indent=4) 156 | return u"{'instance':null}" 157 | -------------------------------------------------------------------------------- /signalqueue/settings/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | DEBUG = True 3 | TEMPLATE_DEBUG = DEBUG 4 | 5 | ADMINS = ( 6 | ('My Name', 'your_email@domain.com'), 7 | ) 8 | MANAGERS = ADMINS 9 | 10 | import tempfile, os 11 | from django import contrib 12 | tempdata = tempfile.mkdtemp() 13 | approot = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 14 | adminroot = os.path.join(contrib.__path__[0], 'admin') 15 | 16 | DATABASES = { 17 | 'default': { 18 | 'NAME': os.path.join(tempdata, 'signalqueue-test.db'), 19 | 'TEST_NAME': os.path.join(tempdata, 'signalqueue-test.db'), 20 | 'ENGINE': 'django.db.backends.sqlite3', 21 | 'USER': '', 22 | 'PASSWORD': '', 23 | } 24 | } 25 | 26 | TIME_ZONE = 'America/New_York' 27 | LANGUAGE_CODE = 'en-us' 28 | SITE_ID = 1 29 | USE_I18N = False 30 | MEDIA_ROOT = os.path.join(approot, 'static') 31 | MEDIA_URL = '/face/' 32 | STATIC_ROOT = os.path.join(adminroot, 'static', 'admin')[0] 33 | STATIC_URL = '/staticfiles/' 34 | ADMIN_MEDIA_PREFIX = '/admin-media/' 35 | ROOT_URLCONF = 'signalqueue.settings.urlconf' 36 | 37 | TEMPLATE_DIRS = ( 38 | os.path.join(approot, 'templates'), 39 | os.path.join(adminroot, 'templates'), 40 | os.path.join(adminroot, 'templates', 'admin'), 41 | ) 42 | 43 | STATICFILES_FINDERS = ( 44 | 'django.contrib.staticfiles.finders.FileSystemFinder', 45 | 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 46 | 'django.contrib.staticfiles.finders.DefaultStorageFinder', 47 | ) 48 | 49 | TEMPLATE_LOADERS = ( 50 | 'django.template.loaders.filesystem.Loader', 51 | 'django.template.loaders.app_directories.Loader', 52 | 'django.template.loaders.eggs.Loader', 53 | ) 54 | 55 | MIDDLEWARE_CLASSES = ( 56 | 'django.middleware.gzip.GZipMiddleware', 57 | 'django.middleware.common.CommonMiddleware', 58 | 'django.middleware.csrf.CsrfViewMiddleware', 59 | 'django.contrib.sessions.middleware.SessionMiddleware', 60 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 61 | ) 62 | 63 | TEMPLATE_CONTEXT_PROCESSORS = ( 64 | "django.contrib.auth.context_processors.auth", 65 | "django.core.context_processors.request", 66 | "django.core.context_processors.debug", 67 | #"django.core.context_processors.i18n", this is AMERICA 68 | "django.core.context_processors.media", 69 | ) 70 | 71 | INSTALLED_APPS = ( 72 | 'django.contrib.auth', 73 | 'django.contrib.contenttypes', 74 | 'django.contrib.staticfiles', 75 | 'django.contrib.sessions', 76 | 'django.contrib.sites', 77 | 'django.contrib.admin', 78 | 'django_nose', 79 | 'djcelery', 80 | 'delegate', 81 | 'signalqueue', 82 | ) 83 | 84 | LOGGING = dict( 85 | version=1, 86 | disable_existing_loggers=False, 87 | formatters={ 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, }, 88 | handlers={ 89 | 'default': { 'level':'DEBUG', 'class':'logging.StreamHandler', 'formatter':'standard', }, 90 | 'nil': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', }, 91 | }, 92 | loggers={ 93 | 'signalqueue': { 'handlers': ['default'], 'level': 'INFO', 'propagate': False }, 94 | }, 95 | root={ 'handlers': ['default'], 'level': 'INFO', 'propagate': False }, 96 | ) 97 | 98 | SQ_QUEUES = { 99 | 'default': { # you need at least one dict named 'default' in SQ_QUEUES 100 | 'ENGINE': 'signalqueue.worker.backends.RedisSetQueue', # required - full path to a QueueBase subclass 101 | 'INTERVAL': 30, # 1/3 sec 102 | 'OPTIONS': dict(port=8356), 103 | }, 104 | 'listqueue': { 105 | 'ENGINE': 'signalqueue.worker.backends.RedisQueue', 106 | 'INTERVAL': 30, # 1/3 sec 107 | 'OPTIONS': dict(port=8356), 108 | }, 109 | 'db': { 110 | 'ENGINE': 'signalqueue.worker.backends.DatabaseQueueProxy', 111 | 'INTERVAL': 30, # 1/3 sec 112 | 'OPTIONS': dict(app_label='signalqueue', 113 | modl_name='EnqueuedSignal'), 114 | }, 115 | 'celery': { 116 | 'ENGINE': 'signalqueue.worker.celeryqueue.CeleryQueue', 117 | 'INTERVAL': 30, # 1/3 sec 118 | 'OPTIONS': dict(celery_queue_name='inactive', 119 | transport='redis', port=8356), 120 | }, 121 | } 122 | 123 | 124 | SQ_ADDITIONAL_SIGNALS=['signalqueue.tests'] 125 | SQ_WORKER_PORT = 11201 126 | 127 | TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' 128 | 129 | try: 130 | from kombu import Queue 131 | except ImportError: 132 | pass 133 | else: 134 | CELERY_DEFAULT_QUEUE = 'default' 135 | CELERY_DEFAULT_ROUTING_KEY = 'default' 136 | CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' 137 | 138 | CELERY_QUEUES = ( 139 | Queue('default', routing_key='default.#'), 140 | Queue('yodogg', routing_key='yodogg.#'), 141 | ) 142 | 143 | CELERY_ALWAYS_EAGER = True 144 | BROKER_URL = 'redis://localhost:8356/0' 145 | 146 | BROKER_HOST = "localhost" 147 | BROKER_BACKEND = "redis" 148 | REDIS_PORT = 8356 149 | REDIS_HOST = "localhost" 150 | BROKER_USER = "" 151 | BROKER_PASSWORD = "" 152 | BROKER_VHOST = "0" 153 | REDIS_DB = 0 154 | REDIS_CONNECT_RETRY = True 155 | CELERY_SEND_EVENTS = True 156 | CELERY_RESULT_BACKEND = "redis://localhost:8356/0" 157 | CELERY_TASK_RESULT_EXPIRES = 10 158 | CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler" 159 | 160 | try: 161 | import djcelery 162 | except ImportError: 163 | pass 164 | else: 165 | djcelery.setup_loader() 166 | 167 | # package path-extension snippet. 168 | from pkgutil import extend_path 169 | __path__ = extend_path(__path__, __name__) 170 | -------------------------------------------------------------------------------- /signalqueue/settings/redis-compatible.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example 2 | 3 | # Note on units: when memory size is needed, it is possible to specifiy 4 | # it in the usual form of 1k 5GB 4M and so forth: 5 | # 6 | # 1k => 1000 bytes 7 | # 1kb => 1024 bytes 8 | # 1m => 1000000 bytes 9 | # 1mb => 1024*1024 bytes 10 | # 1g => 1000000000 bytes 11 | # 1gb => 1024*1024*1024 bytes 12 | # 13 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 14 | 15 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 16 | # Note that Redis will write a pid file in /usr/local/var/run/redis.pid when daemonized. 17 | daemonize no 18 | 19 | # When running daemonized, Redis writes a pid file in /usr/local/var/run/redis.pid by 20 | # default. You can specify a custom pid file location here. 21 | pidfile /tmp/redis.pid 22 | 23 | # Accept connections on the specified port, default is 6379. 24 | # If port 0 is specified Redis will not listen on a TCP socket. 25 | port 8356 26 | 27 | # If you want you can bind a single interface, if the bind option is not 28 | # specified all the interfaces will listen for incoming connections. 29 | # 30 | # bind 127.0.0.1 31 | 32 | # Specify the path for the unix socket that will be used to listen for 33 | # incoming connections. There is no default, so Redis will not listen 34 | # on a unix socket when not specified. 35 | # 36 | # unixsocket /tmp/redis.sock 37 | 38 | # Close the connection after a client is idle for N seconds (0 to disable) 39 | timeout 300 40 | 41 | # Set server verbosity to 'debug' 42 | # it can be one of: 43 | # debug (a lot of information, useful for development/testing) 44 | # verbose (many rarely useful info, but not a mess like the debug level) 45 | # notice (moderately verbose, what you want in production probably) 46 | # warning (only very important / critical messages are logged) 47 | loglevel notice 48 | 49 | # Specify the log file name. Also 'stdout' can be used to force 50 | # Redis to log on the standard output. Note that if you use standard 51 | # output for logging but daemonize, logs will be sent to /dev/null 52 | logfile stdout 53 | 54 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 55 | # and optionally update the other syslog parameters to suit your needs. 56 | # syslog-enabled no 57 | 58 | # Specify the syslog identity. 59 | # syslog-ident redis 60 | 61 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 62 | # syslog-facility local0 63 | 64 | # Set the number of databases. The default database is DB 0, you can select 65 | # a different one on a per-connection basis using SELECT where 66 | # dbid is a number between 0 and 'databases'-1 67 | databases 16 68 | 69 | ################################ SNAPSHOTTING ################################# 70 | # 71 | # Save the DB on disk: 72 | # 73 | # save 74 | # 75 | # Will save the DB if both the given number of seconds and the given 76 | # number of write operations against the DB occurred. 77 | # 78 | # In the example below the behaviour will be to save: 79 | # after 900 sec (15 min) if at least 1 key changed 80 | # after 300 sec (5 min) if at least 10 keys changed 81 | # after 60 sec if at least 10000 keys changed 82 | # 83 | # Note: you can disable saving at all commenting all the "save" lines. 84 | 85 | save 900 1 86 | save 300 10 87 | save 60 10000 88 | 89 | # Compress string objects using LZF when dump .rdb databases? 90 | # For default that's set to 'yes' as it's almost always a win. 91 | # If you want to save some CPU in the saving child set it to 'no' but 92 | # the dataset will likely be bigger if you have compressible values or keys. 93 | rdbcompression yes 94 | 95 | 96 | # The filename where to dump the DB 97 | dbfilename dump.rdb 98 | 99 | # The working directory. 100 | # 101 | # The DB will be written inside this directory, with the filename specified 102 | # above using the 'dbfilename' configuration directive. 103 | # 104 | # Also the Append Only File will be created inside this directory. 105 | # 106 | # Note that you must specify a directory here, not a file name. 107 | dir /tmp/redis/ 108 | -------------------------------------------------------------------------------- /signalqueue/settings/test_async.py: -------------------------------------------------------------------------------- 1 | from ..settings import * 2 | 3 | SQ_RUNMODE = 'SQ_ASYNC_REQUEST' 4 | -------------------------------------------------------------------------------- /signalqueue/settings/test_sync.py: -------------------------------------------------------------------------------- 1 | from ..settings import * 2 | 3 | SQ_RUNMODE = 'SQ_SYNC' 4 | -------------------------------------------------------------------------------- /signalqueue/settings/urlconf.py: -------------------------------------------------------------------------------- 1 | from django.conf.urls import patterns, include, url 2 | 3 | from django.contrib import admin 4 | admin.autodiscover() 5 | 6 | import signalqueue 7 | signalqueue.autodiscover() 8 | 9 | urlpatterns = patterns('', 10 | url(r'^admin/', include(admin.site.urls)), 11 | ) 12 | -------------------------------------------------------------------------------- /signalqueue/signals.py: -------------------------------------------------------------------------------- 1 | 2 | from signalqueue import mappings 3 | from signalqueue.dispatcher import AsyncSignal 4 | 5 | test_signal = AsyncSignal(providing_args={ 6 | 7 | 'instance': mappings.ModelInstanceMapper, 8 | 'signal_label': mappings.LiteralValueMapper, 9 | 10 | }) -------------------------------------------------------------------------------- /signalqueue/static/signalqueue/coffee/jquery.signalqueue.coffee: -------------------------------------------------------------------------------- 1 | 2 | $ = jQuery 3 | ß = io.connect('http://queueserver.asio-otus.local/') 4 | 5 | class @SQStatus 6 | 7 | defaults = 8 | interval: 500 9 | queuename: 'default' 10 | 11 | constructor: (element, options) -> 12 | @elem = $(element) 13 | @options = $.extend {}, defaults, options 14 | @recently = [0,0,0,0,0,0,0,0,0] 15 | @interval_id = null 16 | 17 | start: () -> 18 | if not @interval_id 19 | @interval_id = window.setInterval => 20 | ß.emit 'status', @options['queuename'], (data) => 21 | qlen = data.queue_length 22 | lastvalues = @recently[..] 23 | lastvalues.shift() 24 | lastvalues.push qlen 25 | if (lastvalues.every (itm) -> (itm == 0)) 26 | @elem.html("Currently Idle") 27 | else 28 | @elem.html("#{ qlen } Queued Signals") 29 | , @options.interval 30 | 31 | stop: () -> 32 | if @interval_id 33 | window.clearInterval @interval_id 34 | 35 | sqstatus: (cmd, args...) -> 36 | command = "#{ cmd }".toLowerCase() 37 | return @each () -> 38 | instance = $.data this, 'sqstatus' 39 | if not instance 40 | $.data this, 'sqstatus', new SQStatus this, args 41 | else if typeof options is "string" 42 | instance[command] args... 43 | -------------------------------------------------------------------------------- /signalqueue/static/signalqueue/js/jquery.queuestatus.js: -------------------------------------------------------------------------------- 1 | 2 | (function ($) { 3 | 4 | /* 5 | 6 | JQuery plugin encapsulating display of a signal queue's status. 7 | 8 | You can bind an element or group of elements to a queue by name, like so: 9 | 10 | $('#elem').queuestatus({ queuename: 'somequeue' }); 11 | 12 | If you only want to monitor the default queue, the name can be omitted: 13 | 14 | $('#elem').queuestatus(); 15 | 16 | Start monitoring with the 'start' call, and stop (predictibly) with 'stop': 17 | 18 | $('#elem').queuestatus('start'); 19 | $('#elem').queuestatus('stop'); 20 | 21 | */ 22 | 23 | var JSON = JSON || {}; 24 | 25 | /// Stringifier from http://www.sitepoint.com/javascript-json-serialization/ 26 | JSON.stringify = JSON.stringify || function (obj) { 27 | 28 | var t = typeof(obj); 29 | 30 | if (t != "object" || obj === null) { 31 | 32 | // simple data type 33 | if (t == "string") obj = '"'+obj+'"'; 34 | return String(obj); 35 | 36 | } else { 37 | 38 | // recurse array or object 39 | var n, v, json = [], arr = (obj && obj.constructor == Array); 40 | 41 | for (n in obj) { 42 | v = obj[n]; t = typeof(v); 43 | 44 | if (t == "string") v = '"'+v+'"'; 45 | else if (t == "object" && v !== null) v = JSON.stringify(v); 46 | 47 | json.push((arr ? "" : '"' + n + '":') + String(v)); 48 | } 49 | 50 | return (arr ? "[" : "{") + String(json) + (arr ? "]" : "}"); 51 | } 52 | }; 53 | 54 | $.fn.queuestatus = function () { 55 | var _defaults = { 56 | interval: 500, 57 | queuename: 'default' 58 | }; 59 | 60 | var options = this.data('options') || _defaults; 61 | 62 | var methods = { 63 | init: function (_options) { 64 | options = $.extend(options, _options); 65 | var self = this; 66 | 67 | self.data('recently', [0,0,0,0,0,0,0,0,0]); 68 | self.data('options', options); 69 | self.data('sock', null); 70 | 71 | var sock = null; 72 | if ('endpoint' in options && options['endpoint']) { 73 | sock = new WebSocket(options['endpoint']); 74 | sock.onopen = function () {}; 75 | sock.onclose = function () {}; 76 | sock.onmessage = function (e) { 77 | var d = $.parseJSON(e.data); 78 | if (options.queuename in d) { 79 | var qlen = d[options.queuename] 80 | lastvalues = self.data('recently'); 81 | lastvalues.shift(); 82 | lastvalues.push(qlen); 83 | if (lastvalues.every(function (itm) { return itm == 0; })) { 84 | self.each(function () { 85 | var elem = $(this); 86 | elem.html("Currently Idle"); 87 | }); 88 | } else { 89 | self.each(function () { 90 | var elem = $(this); 91 | elem.html("" + qlen + " Queued Signals"); 92 | }); 93 | } 94 | self.data('recently', lastvalues); 95 | } 96 | } 97 | } 98 | self.data('sock', sock); 99 | return self.each(function () { 100 | var elem = $(this); 101 | elem.data('sock', sock); 102 | }); 103 | }, 104 | start: function () { 105 | return this.each(function () { 106 | var elem = $(this); 107 | var interval_id = elem.data('interval_id'); 108 | 109 | /* 110 | if (typeof(sock.send) === "undefined") { 111 | console.log("Undefined socket: "+sock); 112 | sock = null 113 | elem.data('sock', null); 114 | return; 115 | } 116 | 117 | try { 118 | sock.send(JSON.stringify({ status: options['queuename'] })); 119 | } catch (e) { 120 | console.log("Can't send: "+e); 121 | sock = null 122 | elem.data('sock', null); 123 | return; 124 | } 125 | */ 126 | 127 | if (!interval_id) { 128 | interval_id = window.setInterval(function () { 129 | var ssck = elem.data('sock'); 130 | var opts = elem.data('options'); 131 | if (typeof(ssck.send) !== "undefined") { 132 | var out = { status: opts['queuename'] }; 133 | try { 134 | ssck.send(JSON.stringify(out)); 135 | } catch (e) { 136 | console.log("Can't send: "+e); 137 | ssck = null; 138 | 139 | ssck = new WebSocket(opts['endpoint']); 140 | elem.data('sock', ssck); 141 | } 142 | } 143 | }, 500); 144 | } 145 | elem.data('interval_id', interval_id); 146 | }); 147 | }, 148 | stop: function () { 149 | return this.each(function () { 150 | var elem = $(this); 151 | var interval_id = elem.data('interval_id'); 152 | if (interval_id) { 153 | window.clearInterval(interval_id); 154 | } 155 | elem.data('interval_id', null); 156 | 157 | var sock = elem.data('sock'); 158 | sock.disconnect() 159 | sock = null; 160 | 161 | elem.data('sock', sock); 162 | }); 163 | } 164 | }; 165 | 166 | var method = arguments[0]; 167 | 168 | if (typeof(methods[method]) !== 'undefined') { 169 | return methods[method].apply(this, Array.prototype.slice.call(arguments, 1)); 170 | } else { 171 | return methods.init.apply(this, arguments); 172 | } 173 | }; 174 | })(jQuery); -------------------------------------------------------------------------------- /signalqueue/static/signalqueue/js/jquery.signalqueue.js: -------------------------------------------------------------------------------- 1 | // Generated by CoffeeScript 1.3.3 2 | (function() { 3 | var $, ß, 4 | __slice = [].slice; 5 | 6 | $ = jQuery; 7 | 8 | ß = io.connect('http://queueserver.asio-otus.local/'); 9 | 10 | this.SQStatus = (function() { 11 | var defaults; 12 | 13 | defaults = { 14 | interval: 500, 15 | queuename: 'default' 16 | }; 17 | 18 | function SQStatus(element, options) { 19 | this.elem = $(element); 20 | this.options = $.extend({}, defaults, options); 21 | this.recently = [0, 0, 0, 0, 0, 0, 0, 0, 0]; 22 | this.interval_id = null; 23 | } 24 | 25 | SQStatus.prototype.start = function() { 26 | var _this = this; 27 | if (!this.interval_id) { 28 | return this.interval_id = window.setInterval(function() { 29 | return ß.emit('status', _this.options['queuename'], function(data) { 30 | var lastvalues, qlen; 31 | qlen = data.queue_length; 32 | lastvalues = _this.recently.slice(0); 33 | lastvalues.shift(); 34 | lastvalues.push(qlen); 35 | if (lastvalues.every(function(itm) { 36 | return itm === 0; 37 | })) { 38 | return _this.elem.html("Currently Idle"); 39 | } else { 40 | return _this.elem.html("" + qlen + " Queued Signals"); 41 | } 42 | }); 43 | }, this.options.interval); 44 | } 45 | }; 46 | 47 | SQStatus.prototype.stop = function() { 48 | if (this.interval_id) { 49 | return window.clearInterval(this.interval_id); 50 | } 51 | }; 52 | 53 | return SQStatus; 54 | 55 | })(); 56 | 57 | $.fn.extend({ 58 | sqstatus: function() { 59 | var args, cmd, command; 60 | cmd = arguments[0], args = 2 <= arguments.length ? __slice.call(arguments, 1) : []; 61 | command = ("" + cmd).toLowerCase(); 62 | return this.each(function() { 63 | var instance; 64 | instance = $.data(this, 'sqstatus'); 65 | if (!instance) { 66 | return $.data(this, 'sqstatus', new SQStatus(this, args)); 67 | } else if (typeof options === "string") { 68 | return instance[command].apply(instance, args); 69 | } 70 | }); 71 | } 72 | }); 73 | 74 | }).call(this); 75 | -------------------------------------------------------------------------------- /signalqueue/static/socket.io-client/WebSocketMain.swf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fish2000/django-signalqueue/2d98b8e6b7bf2bd131c4d7c18d54d7fb008ed989/signalqueue/static/socket.io-client/WebSocketMain.swf -------------------------------------------------------------------------------- /signalqueue/static/socket.io-client/WebSocketMainInsecure.swf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fish2000/django-signalqueue/2d98b8e6b7bf2bd131c4d7c18d54d7fb008ed989/signalqueue/static/socket.io-client/WebSocketMainInsecure.swf -------------------------------------------------------------------------------- /signalqueue/static/socket.io-client/flashpolicy.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /signalqueue/templates/admin/app_index.html: -------------------------------------------------------------------------------- 1 | {% extends "admin/index.html" %} 2 | {% load i18n signalqueue_status %} 3 | 4 | {% if not is_popup %} 5 | 6 | {% block breadcrumbs %} 7 | {% endblock %} 12 | 13 | {% endif %} 14 | 15 | {% block sidebar %} 16 | 17 | {% sidebar_queue_module %} 18 | 19 |
20 | 21 | {% endblock %} 22 | -------------------------------------------------------------------------------- /signalqueue/templates/admin/index_with_queues.html: -------------------------------------------------------------------------------- 1 | {% extends "admin/index.html" %} 2 | {% load i18n adminmedia signalqueue_status %} 3 | 4 | {% block sidebar %} 5 | 6 | {% sidebar_queue_module %} 7 | 8 |
9 | 10 | {{ block.super }} 11 | 12 | {% endblock %} 13 | -------------------------------------------------------------------------------- /signalqueue/templates/admin/sidebar_queue_module.html: -------------------------------------------------------------------------------- 1 | {% load i18n signalqueue_status %} 2 | 3 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /signalqueue/templates/status.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | yo dogg 8 | 9 | 10 | 11 | 65 | 66 | 67 | 68 | 69 |
70 | 71 | 72 |

Queue “{{ queue_name }}” contains {{ count }} enqueued signals

73 | 74 | 75 | {% for item in items %} 76 | 77 | {% for key, thingy in item.items %} 78 | {% if key == 'instance' %} 79 | 82 | {% else %} 83 | 86 | {% endif %} 87 | {% endfor %} 88 | 89 | {% endfor %} 90 |
80 | {{ key }}: {{ thingy }} 81 | 84 | {{ key }}: {{ thingy }} 85 |
91 | 92 |
93 | 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /signalqueue/templatetags/__init__.py: -------------------------------------------------------------------------------- 1 | # package path-extension snippet. 2 | from pkgutil import extend_path 3 | __path__ = extend_path(__path__, __name__) 4 | -------------------------------------------------------------------------------- /signalqueue/templatetags/signalqueue_status.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | from django.conf import settings 4 | from django import template 5 | from signalqueue.worker import queues 6 | 7 | register = template.Library() 8 | 9 | # get the URL for a static asset (css, js, et cetera.) 10 | static = lambda pth: os.path.join(settings.STATIC_URL, 'signalqueue', pth) 11 | sockio = lambda pth: os.path.join(settings.STATIC_URL, 'socket.io-client', pth) 12 | 13 | @register.simple_tag 14 | def queue_length(queue_name): 15 | if queue_name in queues: 16 | try: 17 | return queues[queue_name].count() 18 | except: 19 | return -1 20 | return -1 21 | 22 | @register.simple_tag 23 | def queue_classname(queue_name): 24 | return str(queues[queue_name].__class__.__name__) 25 | 26 | @register.simple_tag 27 | def sock_status_url(): 28 | import socket 29 | return "ws://%s:%s/sock/status" % (socket.gethostname().lower(), settings.SQ_WORKER_PORT) 30 | 31 | @register.inclusion_tag('admin/sidebar_queue_module.html', takes_context=True) 32 | def sidebar_queue_module(context): 33 | qs = dict(queues.items()) 34 | default = qs.pop('default') 35 | return dict( 36 | default=default, queues=qs, 37 | queue_javascript=static('js/jquery.signalqueue.js'), 38 | socketio_javascript=sockio('socket.io.js'), 39 | socketio_swf=sockio('WebSocketMain.swf')) 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /signalqueue/testrunner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | Run this file to test `signalqueue` -- 5 | You'll want to have `nose` and `django-nose` installed. 6 | 7 | """ 8 | def main(): 9 | rp = None 10 | from signalqueue import settings as signalqueue_settings 11 | 12 | logging_format = '--logging-format="%(asctime)s %(levelname)-8s %(name)s:%(lineno)03d:%(funcName)s %(message)s"' 13 | signalqueue_settings.__dict__.update({ 14 | "NOSE_ARGS": [ 15 | '--rednose', '--nocapture', '--nologcapture', '-v', 16 | logging_format] }) 17 | 18 | from django.conf import settings 19 | settings.configure(**signalqueue_settings.__dict__) 20 | import logging.config 21 | logging.config.dictConfig(settings.LOGGING) 22 | 23 | import subprocess, os 24 | redis_dir = '/tmp/redis/' 25 | if not os.path.isdir(redis_dir): 26 | try: 27 | os.makedirs(redis_dir) # make redis as happy as possible 28 | except OSError: 29 | print "- Can't create Redis data dir %s" % redis_dir 30 | 31 | rp = subprocess.Popen([ 32 | 'redis-server', 33 | "%s" % os.path.join( 34 | signalqueue_settings.approot, 35 | 'settings', 'redis-compatible.conf'), 36 | ]) 37 | 38 | from django.core.management import call_command 39 | call_command('test', 'signalqueue.tests', 40 | interactive=False, traceback=True, verbosity=2) 41 | 42 | if rp is not None: 43 | print "Shutting down Redis test process (pid = %s)" % rp.pid 44 | rp.kill() 45 | 46 | tempdata = settings.tempdata 47 | print "Deleting test data (%s)" % tempdata 48 | os.rmdir(tempdata) 49 | 50 | import sys 51 | sys.exit(0) 52 | 53 | if __name__ == '__main__': 54 | main() -------------------------------------------------------------------------------- /signalqueue/utils.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | 4 | #import os, sys, traceback 5 | import setproctitle 6 | import sys 7 | import os 8 | 9 | # Root directory of this package 10 | SQ_ROOT = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | # Similar arrangement that affords us some kind of reasonable 13 | # implementation of import_module 14 | def simple_import_module(name, package=None): 15 | """ 16 | Dumb version of import_module. 17 | Based on a function from dajaxice.utils of a similar name. """ 18 | __import__(name) 19 | return sys.modules[name] 20 | 21 | try: 22 | from importlib import import_module 23 | except: 24 | try: 25 | from django.utils.importlib import import_module 26 | except: 27 | import_module = simple_import_module 28 | 29 | 30 | class FakeLogger(object): 31 | """ Completely unacceptable fake-logger class, for last-resort use. """ 32 | def log(self, level, msg): 33 | print("signalqueue.utils.FakeLogger: %s" % msg) 34 | 35 | def logg(self, msg): 36 | self.log(0, msg) 37 | 38 | def __init__(self, *args, **kwargs): 39 | super(FakeLogger, self).__init__(*args, **kwargs) 40 | for fname in ('critical', 'debug', 'error', 'exception', 'info', 'warning'): 41 | setattr(self, fname, self.logg) 42 | 43 | 44 | # HAAAAAX 45 | logger_name = "signalqueue > MODULE" 46 | if 'runqueueserver' in setproctitle.getproctitle(): 47 | logger_name = "signalqueue > WORKER" 48 | 49 | try: 50 | from jogging import logging as logg 51 | except ImportError: 52 | try: 53 | import logging 54 | except ImportError: 55 | print("WTF: You have no logging facilities available whatsoever.") 56 | print("I'm initializing a fake logger class. Love, django-signalqueue.") 57 | # set up fake logger 58 | logg = FakeLogger() 59 | else: 60 | logg = logging.getLogger(logger_name) 61 | logg.setLevel(logging.DEBUG) 62 | 63 | from contextlib import contextmanager 64 | 65 | @contextmanager 66 | def log_exceptions(exc_type=Exception, **kwargs): 67 | try: 68 | from raven.contrib.django.models import client as raven_client 69 | except ImportError: 70 | raven_client = None 71 | try: 72 | yield 73 | except exc_type, exc: 74 | if raven_client is not None: 75 | raven_client.captureException(sys.exc_info()) 76 | print(exc) 77 | 78 | 79 | class ADict(dict): 80 | """ 81 | ADict -- Convenience class for dictionary key access via attributes. 82 | 83 | The 'A' in 'ADict' is for 'Access' -- you can also use adict.key as well as adict[key] 84 | to access hash values. """ 85 | 86 | def __init__(self, *args, **kwargs): 87 | dict.__init__(self, *args, **kwargs) 88 | 89 | def __getattr__(self, name): 90 | if name in self: 91 | return self[name] 92 | else: 93 | raise AttributeError(name) 94 | 95 | def __setattr__(self, name, value): 96 | self[name] = value 97 | 98 | def __delattr__(self, name): 99 | del self[name] 100 | 101 | 102 | # To consistently use the fastest serializer possible, use: 103 | # from signalqueue.utils import json 104 | # ... so if you need to swap a library, do it here. 105 | try: 106 | import ujson as json 107 | except ImportError: 108 | logg.info("--- Loading czjson in leu of ujson") 109 | try: 110 | import czjson as json 111 | except ImportError: 112 | logg.info("--- Loading yajl in leu of czjson") 113 | try: 114 | import yajl as json 115 | assert hasattr(json, 'loads') 116 | assert hasattr(json, 'dumps') 117 | except (ImportError, AssertionError): 118 | logg.info("--- Loading simplejson in leu of yajl") 119 | try: 120 | import simplejson as json 121 | except ImportError: 122 | logg.info("--- Loading stdlib json module in leu of simplejson") 123 | import json 124 | -------------------------------------------------------------------------------- /signalqueue/worker/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # package path-extension snippet. 3 | from pkgutil import extend_path 4 | __path__ = extend_path(__path__, __name__) 5 | 6 | import os 7 | from django.utils.functional import SimpleLazyObject 8 | from django.core.exceptions import ImproperlyConfigured 9 | from django.conf import settings 10 | 11 | from signalqueue import SQ_RUNMODES as runmodes 12 | from signalqueue.worker import backends 13 | 14 | if not hasattr(settings, 'SQ_QUEUES'): 15 | raise ImproperlyConfigured('The SQ_QUEUES setting is required.') 16 | 17 | if 'default' not in settings.SQ_QUEUES: 18 | raise ImproperlyConfigured("You need to define at least one queue (for your default) in settings.SQ_QUEUES.") 19 | 20 | runmode_setting = None 21 | 22 | if 'SIGNALQUEUE_RUNMODE' in os.environ: 23 | runmode_setting = str(os.environ['SIGNALQUEUE_RUNMODE']) 24 | elif hasattr(settings, 'SQ_RUNMODE'): 25 | runmode_setting = str(settings.SQ_RUNMODE) 26 | 27 | if runmode_setting is not None: 28 | if runmode_setting not in runmodes: 29 | try: 30 | runmode = int(runmode_setting) 31 | except ValueError: 32 | raise ImproperlyConfigured('You specified an invalid runmode "%s" in your settings.' % runmode_setting) 33 | else: 34 | runmode = runmodes.get(runmode_setting) 35 | else: 36 | if hasattr(settings, 'SQ_ASYNC'): 37 | if not bool(settings.SQ_ASYNC): 38 | runmode = runmodes['SQ_SYNC'] 39 | else: 40 | runmode = runmodes['SQ_ASYNC_REQUEST'] # the default if neither settings.SQ_ASYNC or settings.SQ_RUNMODE are set 41 | else: 42 | runmode = runmodes['SQ_ASYNC_REQUEST'] # the default if neither settings.SQ_ASYNC or settings.SQ_RUNMODE are set 43 | 44 | 45 | queues = backends.ConnectionHandler(settings.SQ_QUEUES, runmode) 46 | queue = SimpleLazyObject(lambda: queues.get('default')) 47 | 48 | -------------------------------------------------------------------------------- /signalqueue/worker/backends.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | backends.py 5 | 6 | Driver classes -- each allows the queue workers to talk to a different backend server. 7 | 8 | Created by FI$H 2000 on 2011-06-29. 9 | Copyright (c) 2011 OST, LLC. All rights reserved. 10 | 11 | """ 12 | 13 | from django.core.exceptions import ImproperlyConfigured 14 | from signalqueue.utils import import_module, logg 15 | from signalqueue.worker.base import QueueBase 16 | 17 | class RedisQueue(QueueBase): 18 | 19 | def __init__(self, *args, **kwargs): 20 | """ 21 | The RedisQueue is the default queue backend. The QueueBase methods are mapped to 22 | a Redis list; the redis-py module is required: 23 | 24 | https://github.com/andymccurdy/redis-py 25 | 26 | Redis is simple and fast, out of the box. The hiredis C library and python wrappers 27 | can be dropped into your install, to make it faster: 28 | 29 | https://github.com/pietern/hiredis-py 30 | 31 | To configure Redis, we pass the queue OPTIONS dict off wholesale to the python 32 | redis constructor -- Simply stick any Redis kwarg options you need into the OPTIONS 33 | setting. All the redis options can be furthermore specified in the RedisQueue constructor 34 | as a queue_options dict to override settings.py. 35 | 36 | """ 37 | super(RedisQueue, self).__init__(*args, **kwargs) 38 | 39 | try: 40 | import redis 41 | except ImportError: 42 | raise IOError("WTF: Can't import redis python module.") 43 | else: 44 | try: 45 | import hiredis 46 | except ImportError: 47 | logg.warn("can't import the `hiredis` package") 48 | logg.warn("consider installing `hiredis` for native-speed access to a Redis queue") 49 | logg.warn("you can see the difference even when running the tests --") 50 | logg.warn("-- it's a good move, trust me on that") 51 | 52 | self.r = redis.Redis(**self.queue_options) 53 | self.exceptions = ( 54 | redis.exceptions.ConnectionError, 55 | redis.exceptions.DataError, 56 | redis.exceptions.InvalidResponse) 57 | 58 | try: 59 | self.r.ping() 60 | except self.exceptions, err: 61 | logg.error("connection to Redis server failed: %s" % err) 62 | self.r = None 63 | 64 | def ping(self): 65 | if self.r is not None: 66 | try: 67 | return self.r.ping() 68 | except (self.ConnectionError, AttributeError), err: 69 | logg.error("no Redis connection available: %s" % err) 70 | return False 71 | return False 72 | 73 | def push(self, value): 74 | self.r.lpush(self.queue_name, value) 75 | 76 | def pop(self): 77 | return self.r.lpop(self.queue_name) 78 | 79 | def count(self): 80 | return self.r.llen(self.queue_name) 81 | 82 | def clear(self): 83 | self.r.delete(self.queue_name) 84 | 85 | def values(self, floor=0, ceil=-1): 86 | return list(self.r.lrange(self.queue_name, floor, ceil)) 87 | 88 | class RedisSetQueue(RedisQueue): 89 | """ 90 | RedisSetQueue uses a Redis set. Use this queue backend if you want to ensure signals aren't 91 | dequeued and sent more than once. 92 | 93 | I'll be honest here -- I did not originally intend to write any of this configgy stuff or 94 | provide multiple backend implementations or any of that. I just wanted to write a queue for 95 | signals, man. That was it. In fact I didn't even set out to write •that• -- I was just going 96 | to put the constructors for the non-standard signals I was thinking about using in 97 | the 'signals.py' file, cuz they did that by convention at the last place I worked, so I was 98 | like hey why not. The notion of having async signal invocation occurred to me, so I took 99 | a stab at an implementation. 100 | 101 | Srsly I was going for casual friday for realsies, with KewGarden's API. The queue implementation 102 | was like two extra lines (aside from the serialization crapola) and it worked just fine, you had 103 | your redis instance, and you used it, erm. 104 | 105 | BUT SO. I ended up piling most everything else on because I thought: well, this is open source, 106 | and I obvi want to contribute my own brick in the GPL wall in the fine tradition of Stallman 107 | and/or de Raadt -- I am a de Raadt guy myself but either way -- and also maybe potential 108 | employers might look at this and be like "Hmm, this man has written some interesting codes. 109 | Let's give him money so he'll do an fascinatingly engaging yet flexible project for us." 110 | 111 | Anything is possible, right? Hence we have confguration dicts, multiple extensible backend 112 | implementations, inline documentation, management commands with help/usage text, sensible 113 | defaults with responsibly legible and double-entendre-free variable names... the works. 114 | But the deal is: it's actually helpful. Like to me, the implementor. For example look: 115 | here's my iterative enhancement to the Redis queue in which we swap datastructures and 116 | see what happens. Not for my health; I wrote the list version first and then decided I wanted 117 | unque values to curtail signal throughput -- it's not like I sat around with such a fantastic 118 | void of things to do with my time that I needed to write multiple backends for my queue thingy 119 | in order to fill the days and nights with meaning. 120 | 121 | Anyway that is the docstring for RedisSetQueue, which I hope you find informative. 122 | 123 | """ 124 | def __init__(self, *args, **kwargs): 125 | super(RedisSetQueue, self).__init__(*args, **kwargs) 126 | 127 | def push(self, value): 128 | self.r.sadd(self.queue_name, value) 129 | 130 | def pop(self): 131 | return self.r.spop(self.queue_name) 132 | 133 | def count(self): 134 | return self.r.scard(self.queue_name) 135 | 136 | def clear(self): 137 | while self.r.spop(self.queue_name): pass 138 | 139 | def values(self, **kwargs): 140 | return list(self.r.smembers(self.queue_name)) 141 | 142 | class DatabaseQueueProxy(QueueBase): 143 | """ 144 | The DatabaseQueueProxy doesn't directly instantiate; instead, this proxy object 145 | will set up a model manager you specify in your settings as a queue backend. 146 | This allows you to use a standard database-backed model to run a queue. 147 | 148 | A working implementation of such a model manager is available in signalqueue/models.py. 149 | To use it, sync the EnqueuedSignal model to your database and configure the queue like so: 150 | 151 | SQ_QUEUES = { 152 | 'default': { 153 | 'NAME': 'signalqueue_database_queue', 154 | 'ENGINE': 'signalqueue.worker.backends.DatabaseQueueProxy', 155 | 'OPTIONS': dict(app_label='signalqueue', modl_name='EnqueuedSignal'), 156 | }, 157 | } 158 | 159 | This is useful for: 160 | 161 | * Debugging -- the queue can be easily inspected via the admin interface; 162 | dequeued objects aren't deleted by default (the 'enqueued' boolean field 163 | is set to False when instances are dequeued). 164 | * Less moving parts -- useful if you don't want to set up another service 165 | (e.g. Redis) to start working with queued signals. 166 | * Fallback functionality -- you can add logic to set up a database queue 167 | if the queue backend you want to use is somehow unavailable, to keep from 168 | losing signals e.g. while scaling Amazon AMIs or transitioning your 169 | servers to new hosts. 170 | 171 | """ 172 | def __new__(cls, *args, **kwargs): 173 | 174 | if 'app_label' in kwargs['queue_options']: 175 | if 'modl_name' in kwargs['queue_options']: 176 | 177 | from django.db.models.loading import cache 178 | mgr_attr = kwargs['queue_options'].get('manager', "objects") 179 | 180 | ModlCls = cache.get_model( 181 | app_label=kwargs['queue_options'].get('app_label'), 182 | model_name=kwargs['queue_options'].get('modl_name')) 183 | 184 | if ModlCls is not None: 185 | mgr_instance = getattr(ModlCls, mgr_attr) 186 | mgr_instance.runmode = kwargs.pop('runmode', None) 187 | mgr_instance.queue_name = kwargs.pop('queue_name') 188 | mgr_instance.queue_options = {} 189 | mgr_instance.queue_options.update(kwargs.pop('queue_options', {})) 190 | return mgr_instance 191 | 192 | else: 193 | return QueueBase() 194 | 195 | else: 196 | raise ImproperlyConfigured( 197 | "DatabaseQueueProxy's queue configuration requires the name of a model class to be specified in in 'modl_name'.") 198 | 199 | else: 200 | raise ImproperlyConfigured( 201 | "DatabaseQueueProxy's queue configuration requires an app specified in 'app_label', in which the definition for a model named 'modl_name' can be found.") 202 | 203 | """ 204 | Class-loading functions. 205 | 206 | ConnectionHandler, import_class() and load_backend() are based on original implementations 207 | from the django-haystack app: 208 | 209 | https://github.com/toastdriven/django-haystack/blob/master/haystack/utils/loading.py 210 | https://github.com/toastdriven/django-haystack/ 211 | 212 | See the Haystack source for more on these. 213 | 214 | """ 215 | def import_class(path): 216 | path_bits = path.split('.') # Cut off the class name at the end. 217 | class_name = path_bits.pop() 218 | module_path = '.'.join(path_bits) 219 | module_itself = import_module(module_path) 220 | if not hasattr(module_itself, class_name): 221 | raise ImportError( 222 | "The Python module '%s' has no '%s' class." % (module_path, class_name)) 223 | return getattr(module_itself, class_name) 224 | 225 | def load_backend(full_backend_path): 226 | path_bits = full_backend_path.split('.') 227 | if len(path_bits) < 2: 228 | raise ImproperlyConfigured( 229 | "The provided backend '%s' is not a complete Python path to a QueueBase subclass." % full_backend_path) 230 | return import_class(full_backend_path) 231 | 232 | class ConnectionHandler(object): 233 | def __init__(self, connections_info, runmode): 234 | #logg.debug( 235 | # "Initializing a ConnectionHandler with %s queues running in mode %s" % ( 236 | # len(connections_info), runmode)) 237 | self.connections_info = connections_info 238 | self._connections = {} 239 | self._runmode = runmode 240 | self._index = None 241 | 242 | def _get_runmode(self): 243 | return self._runmode 244 | def _set_runmode(self, mde): 245 | for key in self._connections.keys(): 246 | if hasattr(self._connections[key], 'runmode'): 247 | self._connections[key].runmode = mde 248 | self._runmode = mde 249 | 250 | runmode = property(_get_runmode, _set_runmode) 251 | 252 | def ensure_defaults(self, alias): 253 | try: 254 | conn = self.connections_info[alias] 255 | except KeyError: 256 | raise ImproperlyConfigured( 257 | "The key '%s' isn't an available connection in (%s)." % (alias, ','.join(self.connections_info.keys()))) 258 | 259 | default_engine = 'signalqueue.worker.backends.RedisSetQueue' 260 | 261 | if not conn.get('ENGINE'): 262 | logg.warn( 263 | "connection '%s' doesn't specify an ENGINE, using the default engine: '%s'" % 264 | default_engine) 265 | # default to using the Redis set backend 266 | conn['ENGINE'] = default_engine 267 | 268 | def __getitem__(self, key): 269 | if key in self._connections: 270 | return self._connections[key] 271 | 272 | self.ensure_defaults(key) 273 | 274 | ConnectionClass = load_backend(self.connections_info[key]['ENGINE']) 275 | self._connections[key] = ConnectionClass( 276 | runmode=self._runmode, 277 | queue_name=str(key), 278 | queue_interval=self.connections_info[key].get('INTERVAL', None), 279 | queue_options=self.connections_info[key].get('OPTIONS', {})) 280 | 281 | self._connections[key].runmode = self._runmode 282 | 283 | return self._connections[key] 284 | 285 | def __setitem__(self, key, val): 286 | if not isinstance(val, QueueBase): 287 | raise ValueError( 288 | "Can't add instance of non-QueueBase descent '%s' to the ConnectionHandler." % val) 289 | if not val.runmode == self._runmode: 290 | raise AttributeError( 291 | "Queue backend '%s' was instantiated with runmode %s but the ConnectionHandler is in runmode %s" % (val.runmode, self._runmode)) 292 | self._connections[key] = val 293 | 294 | def get(self, key, default=None): 295 | try: 296 | return self[key] 297 | except: 298 | if default is None: 299 | raise 300 | return default 301 | 302 | def all(self): 303 | return [self[alias] for alias in self.connections_info] 304 | 305 | def keys(self): 306 | return self.connections_info.keys() 307 | 308 | def items(self): 309 | return [(qn, self[qn]) for qn in self.keys()] 310 | 311 | def values(self): 312 | return self.all() 313 | 314 | def __iter__(self): 315 | return (self[alias] for alias in self.connections_info) 316 | 317 | def __len__(self): 318 | return len(self.keys()) 319 | 320 | def __contains__(self, item): 321 | return item in dict(self.items()) 322 | 323 | 324 | 325 | 326 | -------------------------------------------------------------------------------- /signalqueue/worker/base.py: -------------------------------------------------------------------------------- 1 | 2 | import signalqueue 3 | from signalqueue.utils import json, logg 4 | from signalqueue import SQ_RUNMODES as runmodes 5 | 6 | class QueueBase(object): 7 | """ 8 | Base class for a signalqueue backend. 9 | 10 | Implementors of backend interfaces will want to override these methods: 11 | 12 | * ping(self) # returns a boolean 13 | * push(self, value) 14 | * pop(self) # returns a serialized signal value 15 | * count(self) # returns an integer 16 | * clear(self) 17 | * values(self) # returns a list of serialized signal values 18 | 19 | If your implementation has those methods implemented and working, 20 | your queue should run. 21 | 22 | Only reimplement enqueue(), retrieve(), and dequeue() if you know what 23 | you are doing and have some debugging time on your hands. 24 | 25 | The JSON structure of a serialized signal value looks like this: 26 | 27 | { 28 | "instance": { 29 | "modl_name": "testmodel", 30 | "obj_id": 1, 31 | "app_label": "signalqueue" 32 | }, 33 | "signal": { 34 | "signalqueue.tests": "test_sync_function_signal" 35 | }, 36 | "sender": { 37 | "modl_name": "testmodel", 38 | "app_label": "signalqueue" 39 | }, 40 | "enqueue_runmode": 4 41 | } 42 | 43 | """ 44 | runmode = None 45 | queue_name = None 46 | queue_interval = None 47 | queue_options = {} 48 | 49 | def __init__(self, *args, **kwargs): 50 | """ 51 | It's a good idea to call super() first in your overrides, 52 | to take care of params and whatnot like these. 53 | 54 | """ 55 | self.runmode = kwargs.pop('runmode', None) 56 | self.queue_name = kwargs.pop('queue_name', "default") 57 | self.queue_interval = kwargs.pop('queue_interval', None) 58 | self.queue_options = {} 59 | self.queue_options.update(kwargs.pop('queue_options', {})) 60 | super(QueueBase, self).__init__() 61 | 62 | def ping(self): 63 | raise NotImplementedError( 64 | "WTF: %s needs a Queue.ping() implementaton" % 65 | self.__class__.__name__) 66 | 67 | def push(self, value): 68 | raise NotImplementedError( 69 | "WTF: %s backend needs a Queue.push() implementaton" % 70 | self.__class__.__name__) 71 | 72 | def pop(self): 73 | raise NotImplementedError( 74 | "WTF: %s backend needs a Queue.pop() implementaton" % 75 | self.__class__.__name__) 76 | 77 | def count(self): 78 | return NotImplementedError( 79 | "WTF: %s backend needs a Queue.count() implementaton" % 80 | self.__class__.__name__) 81 | 82 | def clear(self): 83 | raise NotImplementedError( 84 | "WTF: %s backend needs a Queue.flush() implementaton" % 85 | self.__class__.__name__) 86 | 87 | def values(self, **kwargs): 88 | raise NotImplementedError( 89 | "WTF: %s backend needs a Queue.values() implementaton" % 90 | self.__class__.__name__) 91 | 92 | def enqueue(self, signal, sender=None, **kwargs): 93 | """ Serialize the parameters of a signal call, encode 94 | the serialized structure, and push the encoded 95 | string onto the queue. """ 96 | 97 | if signal.regkey is not None: 98 | if self.ping(): 99 | queue_json = { 100 | 'signal': { signal.regkey: signal.name }, 101 | #'sender': None, 102 | 'enqueue_runmode': self.runmode } 103 | 104 | if sender is not None: 105 | queue_json.update({ 106 | 'sender': dict( 107 | app_label=sender._meta.app_label, 108 | modl_name=sender._meta.object_name.lower()) }) 109 | 110 | for k, v in kwargs.items(): 111 | queue_json.update({ k: signal.mapping.demap(v), }) 112 | 113 | #print queue_json 114 | self.push(json.dumps(queue_json)) 115 | return queue_json 116 | else: 117 | raise signalqueue.SignalRegistryError("Signal has no regkey value.") 118 | 119 | def retrieve(self): 120 | """ Pop the queue, decode the popped signal without deserializing, 121 | returning the serialized data. """ 122 | 123 | if self.count() > 0: 124 | out = self.pop() 125 | if out is not None: 126 | return json.loads(out) 127 | return None 128 | 129 | def dequeue(self, queued_signal=None): 130 | """ Deserialize and execute a signal, either from the queue or as per the contents 131 | of the queued_signal kwarg. 132 | 133 | If queued_signal contains a serialized signal call datastructure,* dequeue() 134 | will deserialize and execute that serialized signal without popping the queue. 135 | If queued_signal is None, it will call retrieve() to pop the queue for the next 136 | signal, which it will execute if one is returned successfully. 137 | 138 | * See the QueueBase docstring for an example. """ 139 | 140 | from django.db.models.loading import cache 141 | 142 | if queued_signal is None: 143 | queued_signal = self.retrieve() 144 | 145 | if queued_signal is not None: 146 | #logg.debug("Dequeueing signal: %s" % queued_signal) 147 | pass 148 | else: 149 | return (None, None) 150 | 151 | signal_dict = queued_signal.get('signal') 152 | sender_dict = queued_signal.get('sender') 153 | regkey, name = signal_dict.items()[0] 154 | sender = None 155 | 156 | # specifying a sender is optional. 157 | if sender_dict is not None: 158 | try: 159 | sender = cache.get_model( 160 | str(sender_dict['app_label']), 161 | str(sender_dict['modl_name'])) 162 | except (KeyError, AttributeError), err: 163 | logg.info("*** Error deserializing sender_dict: %s" % err) 164 | sender = None 165 | 166 | enqueue_runmode = queued_signal.get('enqueue_runmode', runmodes['SQ_ASYNC_REQUEST']) 167 | 168 | kwargs = { 169 | 'dequeue_runmode': self.runmode, 170 | 'enqueue_runmode': enqueue_runmode, 171 | } 172 | 173 | thesignal = None 174 | if regkey in signalqueue.SQ_DMV: 175 | for signal in signalqueue.SQ_DMV[regkey]: 176 | if signal.name == name: 177 | thesignal = signal 178 | break 179 | else: 180 | raise signalqueue.SignalRegistryError( 181 | "Signal '%s' not amongst the registered: %s)." % ( 182 | regkey, ', '.join(signalqueue.SQ_DMV.keys()))) 183 | 184 | if thesignal is not None: 185 | for k, v in queued_signal.items(): 186 | if k not in ('signal', 'sender', 'enqueue_runmode', 'dequeue_runmode'): 187 | kwargs.update({ k: thesignal.mapping.remap(v), }) 188 | 189 | # result_list is a list of tuples, each containing a reference 190 | # to a callback function at [0] and that callback's return at [1] 191 | # ... this is per what the Django signal send() implementation returns; 192 | # AsyncSignal.send_now() returns whatever it gets from Signal.send(). 193 | result_list = self.dispatch(thesignal, sender=sender, **kwargs) 194 | return (queued_signal, result_list) 195 | 196 | else: 197 | raise signalqueue.SignalRegistryError( 198 | "No registered signals named '%s'." % name) 199 | 200 | def dispatch(self, signal, sender, **kwargs): 201 | return signal.send_now(sender=sender, **kwargs) 202 | 203 | def next(self): 204 | """ 205 | Retrieve and return a signal from the queue without executing it. 206 | 207 | This allows one to iterate through a queue with access to the signal data, 208 | and control over the dequeue execution -- exceptions can be caught, signals 209 | can be conditionally dealt with, and so on, as per your needs. 210 | 211 | This example script dequeues and executes all of the signals in one queue. 212 | If a signal's execution raises a specific type of error, its call data is requeued 213 | into a secondary backup queue (which the backup queue's contents can be used however 214 | it may most please you -- e.g. dequeued into an amenable execution environment; 215 | inspected as a blacklist by the signal-sending code to prevent known-bad calls; 216 | analytically aggregated into pie charts in real-time and displayed distractingly 217 | across the phalanx of giant flatscreens festooning the walls of the conference room 218 | you stand in when subjecting yourself and your big pitch to both the harsh whim 219 | of the venture capitalists whom you manage to coax into your office for meetings 220 | and the simultaneously indolent and obsequious Skype interview questions from 221 | B-list TechCrunch blog writers in search of blurbs they can grind into filler 222 | for their daily link-baiting top-ten-reasons-why contribution to the ceaseless 223 | maelstrom that is the zeitgeist of online technology news; et cetera ad nauseum): 224 | 225 | 226 | from myapp.logs import logging 227 | from myapp.exceptions import MyDequeueError 228 | from signalqueue import SignalRegistryError 229 | import math, signalqueue.worker 230 | myqueue = signalqueue.worker.queues['myqueue'] 231 | backupqueue = signalqueue.worker.queues['backup'] 232 | 233 | tries = 0 234 | wins = 0 235 | do_overs = 0 236 | perc = lambda num, tot: int(math.floor((float(num)/float(tot))*100)) 237 | logging.info("Dequeueing %s signals from queue '%s'..." % (tries, myqueue.queue_name)) 238 | 239 | for next_signal in myqueue: 240 | tries += 1 241 | try: 242 | result, spent_signal = myqueue.dequeue(queued_signal=next_signal) 243 | except MyDequeueError, err: 244 | # execution went awry but not catastrophically so -- reassign it to the backup queue 245 | logging.warn("Error %s dequeueing signal: %s" % (repr(err), str(next_signal))) 246 | logging.warn("Requeueing to backup queue: %s" % str(backupqueue.queue_name)) 247 | backupqueue.push(next_signal) 248 | do_overs += 1 249 | except (SignalRegistryError, AttributeError), err: 250 | # either this signal isn't registered or is somehow otherwise wack -- don't requeue it 251 | logging.error("Fatal error %s dequeueing signal: %s" % (repr(err), str(next_signal))) 252 | else: 253 | logging.info("Successful result %s from dequeued signal: %s " % (result, repr(spent_signal))) 254 | wins += 1 255 | 256 | logging.info("Successfully dequeued %s signals (%s%% of %s total) from queue '%s'" % 257 | wins, perc(wins, tries), tries, myqueue.queue_name) 258 | logging.info("Requeued %s signals (%s%% of %s total) into queue '%s'" % 259 | do_overs, perc(do_overs, tries), tries, backupqueue.queue_name) 260 | 261 | """ 262 | if not self.count() > 0: 263 | raise StopIteration 264 | return self.retrieve() 265 | 266 | def __iter__(self): 267 | return self 268 | 269 | def __getitem__(self, idx): 270 | """ Syntax sugar: myqueue[i] gives you the same value as myqueue.values()[i] """ 271 | return self.values().__getitem__(idx) 272 | 273 | def __setitem__(self, idx, val): 274 | raise NotImplementedError( 275 | "OMG: Queue backend doesn't define __setitem__() -- items at specific indexes cannot be explicitly set.") 276 | 277 | def __delitem__(self, idx, val): 278 | raise NotImplementedError( 279 | "OMG: Queue backend doesn't define __delitem__() -- items at specific indexes cannot be explicitly removed.") 280 | 281 | def __repr__(self): 282 | """ Returns a JSON-stringified array, containing all enqueued signals. """ 283 | return "[%s]" % ",".join([str(value) for value in self.values()]) 284 | 285 | def __str__(self): 286 | """ Returns a JSON-stringified array, containing all enqueued signals. """ 287 | return repr(self) 288 | 289 | def __unicode__(self): 290 | """ Returns a JSON-stringified array, containing all enqueued signals, 291 | properly pretty-printed. """ 292 | import json as library_json 293 | return u"%s" % library_json.dumps(library_json.loads(repr(self)), indent=4) 294 | -------------------------------------------------------------------------------- /signalqueue/worker/celeryqueue.py: -------------------------------------------------------------------------------- 1 | 2 | from celery import Task 3 | from celery.registry import tasks 4 | from kombu import Connection 5 | import kombu.exceptions 6 | 7 | import signalqueue 8 | from signalqueue.worker.base import QueueBase 9 | #from signalqueue.utils import logg 10 | 11 | def taskmaster(sig): 12 | class CelerySignalTask(Task): 13 | name = "%s:%s" % (sig.regkey, sig.name) 14 | store_errors_even_if_ignored = True 15 | ignore_result = False 16 | track_started = True 17 | acks_late = True 18 | 19 | def __init__(self): 20 | self.signal_regkey = sig.regkey 21 | self.signal_name = sig.name 22 | 23 | @property 24 | def signal(self): 25 | for registered_signal in signalqueue.SQ_DMV[self.signal_regkey]: 26 | if registered_signal.name == self.signal_name: 27 | return registered_signal 28 | return None 29 | 30 | def run(self, sender=None, **kwargs): 31 | self.signal.send_now(sender=sender, **kwargs) 32 | 33 | return CelerySignalTask 34 | 35 | class CeleryQueue(QueueBase): 36 | """ At some point this will adapt `django-signalqueue` for use 37 | with popular `(dj)celery` platform (but not today). 38 | 39 | When this class is done, I will discuss it here. """ 40 | 41 | def __init__(self, *args, **kwargs): 42 | super(CeleryQueue, self).__init__(*args, **kwargs) 43 | 44 | self.celery_queue_name = self.queue_options.pop('celery_queue_name', 'inactive') 45 | self.serializer = self.queue_options.pop('serializer', 'json') 46 | self.compression = self.queue_options.pop('compression', None) 47 | self.kc = Connection(**self.queue_options) 48 | self.kc.connect() 49 | 50 | self.qc = self.kc.SimpleQueue(name=self.celery_queue_name) 51 | 52 | def ping(self): 53 | return self.kc.connected and not self.qc.channel.closed 54 | 55 | def push(self, value): 56 | self.qc.put(value, 57 | compression=self.compression, serializer=None) 58 | 59 | def pop(self): 60 | virtual_message = self.qc.get(block=False, timeout=1) 61 | return virtual_message.payload 62 | 63 | def count(self): 64 | try: 65 | return self.qc.qsize() 66 | except kombu.exceptions.StdChannelError: 67 | self.qc.queue.declare() 68 | return 0 69 | 70 | def clear(self): 71 | self.qc.clear() 72 | 73 | def values(self, **kwargs): 74 | return [] 75 | 76 | def __getitem__(self, idx): 77 | #return self.values().__getitem__(idx) 78 | return '' 79 | 80 | def dispatch(self, signal, sender=None, **kwargs): 81 | task_name = "%s:%s" % (signal.regkey, signal.name) 82 | try: 83 | result = tasks[task_name].delay(sender=sender, **kwargs) 84 | except tasks.NotRegistered: 85 | pass 86 | else: 87 | return result 88 | 89 | -------------------------------------------------------------------------------- /signalqueue/worker/poolqueue.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | poolqueue.py 5 | 6 | Internal 'pooling' of signal-dispatcher instances 7 | that the tornado worker can safely deal with. 8 | 9 | Created by FI$H 2000 on 2011-07-05. 10 | Copyright (c) 2011 OST, LLC. All rights reserved. 11 | 12 | """ 13 | from tornado.ioloop import PeriodicCallback 14 | 15 | class PoolQueue(object): 16 | 17 | def __init__(self, *args, **kwargs): 18 | super(PoolQueue, self).__init__() 19 | 20 | import signalqueue 21 | signalqueue.autodiscover() 22 | 23 | from django.conf import settings as django_settings 24 | from signalqueue.utils import logg 25 | from signalqueue.worker import backends 26 | from signalqueue import SQ_RUNMODES as runmodes 27 | 28 | self.active = kwargs.get('active', True) 29 | self.halt = kwargs.get('halt', False) 30 | self.logx = kwargs.get('log_exceptions', True) 31 | 32 | self.interval = 1 33 | self.queue_name = kwargs.get('queue_name', "default") 34 | 35 | self.runmode = runmodes['SQ_ASYNC_MGMT'] 36 | self.queues = backends.ConnectionHandler(django_settings.SQ_QUEUES, self.runmode) 37 | self.signalqueue = self.queues[self.queue_name] 38 | self.signalqueue.runmode = self.runmode 39 | self.logg = logg 40 | 41 | # use interval from the config if it exists 42 | interval = kwargs.get('interval', self.signalqueue.queue_interval) 43 | if interval is not None: 44 | self.interval = interval 45 | 46 | if self.interval > 0: 47 | 48 | if self.logx: 49 | if self.halt: 50 | self.shark = PeriodicCallback(self.joe_flacco, self.interval*10) 51 | else: 52 | self.shark = PeriodicCallback(self.ray_rice, self.interval*10) 53 | 54 | else: 55 | if self.halt: 56 | self.shark = PeriodicCallback(self.cueball_scratch, self.interval*10) 57 | else: 58 | self.shark = PeriodicCallback(self.cueball, self.interval*10) 59 | 60 | if self.active: 61 | self.shark.start() 62 | 63 | def stop(self): 64 | self.active = False 65 | self.shark.stop() 66 | 67 | def rerack(self): 68 | self.active = True 69 | self.shark.start() 70 | 71 | """ Non-logging cues """ 72 | 73 | def cueball(self): 74 | try: 75 | self.signalqueue.dequeue() 76 | except Exception, err: 77 | self.logg.info("--- Exception during dequeue: %s" % err) 78 | 79 | def cueball_scratch(self): 80 | try: 81 | self.signalqueue.dequeue() 82 | except Exception, err: 83 | self.logg.info("--- Exception during dequeue: %s" % err) 84 | if self.signalqueue.count() < 1: 85 | self.logg.info("Queue exhausted, exiting...") 86 | raise KeyboardInterrupt 87 | 88 | """ Logging cues (using the Raven client for Sentry) """ 89 | 90 | def ray_rice(self): 91 | from signalqueue.utils import log_exceptions 92 | with log_exceptions(): 93 | self.signalqueue.dequeue() 94 | 95 | def joe_flacco(self): 96 | from signalqueue.utils import log_exceptions 97 | with log_exceptions(): 98 | self.signalqueue.dequeue() 99 | if self.signalqueue.count() < 1: 100 | self.logg.info("Queue exhausted, exiting...") 101 | raise KeyboardInterrupt 102 | -------------------------------------------------------------------------------- /signalqueue/worker/supercell.py: -------------------------------------------------------------------------------- 1 | from os import path as op 2 | 3 | import tornado 4 | import tornado.web 5 | import tornado.httpserver 6 | import tornadio2 7 | import tornadio2.router 8 | import tornadio2.server 9 | import tornadio2.conn 10 | 11 | #ROOT = op.normpath(op.dirname(__file__)) 12 | from signalqueue.templatetags.signalqueue_status import sockio 13 | 14 | class IndexHandler(tornado.web.RequestHandler): 15 | "" "Regular HTTP handler to serve the chatroom page """ 16 | def get(self): 17 | self.render('index.html') 18 | 19 | 20 | class SocketIOHandler(tornado.web.RequestHandler): 21 | def get(self): 22 | self.render(sockio('socket.io.js')) 23 | 24 | 25 | class ChatConnection(tornadio2.conn.SocketConnection): 26 | clients = set() 27 | 28 | def on_open(self, info): 29 | #self.send("Welcome from the server.") 30 | self.clients.add(self) 31 | 32 | def on_message(self, message): 33 | # Pong message back 34 | for p in self.clients: 35 | p.send(message) 36 | 37 | def on_close(self): 38 | if self in self.clients: 39 | self.clients.remove(self) 40 | 41 | 42 | # Create tornadio server 43 | ChatRouter = tornadio2.router.TornadioRouter(ChatConnection) 44 | 45 | 46 | # Create socket application 47 | sock_app = tornado.web.Application( 48 | ChatRouter.urls, 49 | flash_policy_port=843, 50 | flash_policy_file=sockio('flashpolicy.xml'), 51 | socket_io_port=8002, 52 | ) 53 | 54 | # Create HTTP application 55 | http_app = tornado.web.Application([ 56 | (r"/", IndexHandler), 57 | (r"/socket.io.js", SocketIOHandler) 58 | ]) 59 | 60 | if __name__ == "__main__": 61 | import logging 62 | logging.getLogger().setLevel(logging.DEBUG) 63 | 64 | # Create http server on port 8001 65 | http_server = tornado.httpserver.HTTPServer(http_app) 66 | http_server.listen(8001) 67 | 68 | # Create tornadio server on port 8002, but don't start it yet 69 | tornadio2.server.SocketServer(sock_app, auto_start=False) 70 | 71 | # Start both servers 72 | tornado.ioloop.IOLoop.instance().start() -------------------------------------------------------------------------------- /signalqueue/worker/vortex.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | vortex.py 5 | 6 | The name 'tornado' was taken, you see. 7 | 8 | Created by FI$H 2000 on 2011-07-05. 9 | Copyright (c) 2011 OST, LLC. All rights reserved. 10 | """ 11 | 12 | import sys, hashlib, curses, logging 13 | 14 | from django.conf import settings 15 | from django.template import Context, loader 16 | import tornado.options 17 | import tornado.web 18 | import tornado.websocket 19 | from tornado.httpserver import HTTPServer 20 | from tornado.ioloop import IOLoop 21 | from tornado.options import define 22 | from tornado.log import LogFormatter 23 | from signalqueue.utils import json 24 | from signalqueue.worker import queues 25 | from signalqueue.worker.poolqueue import PoolQueue 26 | 27 | define('port', default=settings.SQ_WORKER_PORT, help='Queue server HTTP port', type=int) 28 | 29 | class Application(tornado.web.Application): 30 | def __init__(self, **kwargs): 31 | from django.conf import settings as django_settings 32 | 33 | nm = kwargs.get('queue_name', "default") 34 | self.queue_name = nm 35 | 36 | handlers = [ 37 | (r'/socket.io/1/', MainHandler), 38 | (r'/', MainHandler), 39 | (r'/status', QueueServerStatusHandler), 40 | (r'/sock/status', QueueStatusSock), 41 | ] 42 | 43 | settings = dict( 44 | static_path=django_settings.MEDIA_ROOT, 45 | xsrf_cookies=True, 46 | cookie_secret=hashlib.sha1(django_settings.SECRET_KEY).hexdigest(), 47 | logging='info', 48 | queue_name=nm, 49 | ) 50 | 51 | tornado.web.Application.__init__(self, handlers, **settings) 52 | self.queues = {} 53 | if nm is not None: 54 | self.queues.update({ 55 | nm: PoolQueue(queue_name=nm, active=True, 56 | halt=kwargs.get('halt_when_exhausted', False), 57 | log_exceptions=kwargs.get('log_exceptions', True), 58 | ), 59 | }) 60 | 61 | 62 | class BaseQueueConnector(object): 63 | 64 | def queue(self, queue_name=None): 65 | if queue_name is None: 66 | queue_name = self.application.queue_name 67 | if queue_name not in queues.keys(): 68 | raise IndexError("No queue named %s is defined" % queue_name) 69 | 70 | if not queue_name in self.application.queues: 71 | self.application.queues[queue_name] = PoolQueue(queue_name=queue_name, active=False) 72 | 73 | return self.application.queues[queue_name] 74 | 75 | @property 76 | def defaultqueue(self): 77 | return self.queue('default') 78 | 79 | def clientlist_get(self): 80 | if not hasattr(self.application, 'clientlist'): 81 | self.application.clientlist = [] 82 | return self.application.clientlist 83 | def clientlist_set(self, val): 84 | self.application.clientlist = val 85 | def clientlist_del(self): 86 | del self.application.clientlist 87 | 88 | clientlist = property(clientlist_get, clientlist_set, clientlist_del) 89 | 90 | 91 | class QueueStatusSock(tornado.websocket.WebSocketHandler, BaseQueueConnector): 92 | def open(self): 93 | self.clientlist.append(self) 94 | 95 | def on_message(self, inmess): 96 | mess = json.loads(str(inmess)) 97 | nm = mess.get('status', "default") 98 | self.write_message({ 99 | nm: self.queue(nm).signalqueue.count(), 100 | }) 101 | 102 | def on_close(self): 103 | if self in self.clientlist: 104 | self.clientlist.remove(self) 105 | 106 | class BaseHandler(tornado.web.RequestHandler, BaseQueueConnector): 107 | pass 108 | 109 | class MainHandler(BaseHandler): 110 | def get(self): 111 | self.write("YO DOGG!") 112 | 113 | class QueueServerStatusHandler(BaseHandler): 114 | def __init__(self, *args, **kwargs): 115 | super(QueueServerStatusHandler, self).__init__(*args, **kwargs) 116 | self.template = loader.get_template('status.html') 117 | 118 | def get(self): 119 | nm = self.get_argument('queue', self.application.queue_name) 120 | queue = self.queue(nm).signalqueue 121 | self.write( 122 | self.template.render(Context({ 123 | 'queue_name': nm, 124 | 'items': [json.loads(morsel) for morsel in queue.values()], 125 | 'count': queue.count(), 126 | })) 127 | ) 128 | 129 | 130 | 131 | def main(): 132 | logg = logging.getLogger("signalqueue") 133 | # Set up color if we are in a tty and curses is installed 134 | 135 | color = False 136 | if curses and sys.stderr.isatty(): 137 | try: 138 | curses.setupterm() 139 | if curses.tigetnum("colors") > 0: 140 | color = True 141 | except: 142 | pass 143 | channel = logging.StreamHandler() 144 | channel.setFormatter(LogFormatter(color=color)) 145 | logg.addHandler(channel) 146 | 147 | logg.info("YO DOGG.") 148 | from django.conf import settings 149 | 150 | try: 151 | tornado.options.parse_command_line() 152 | http_server = HTTPServer(Application()) 153 | http_server.listen(settings.SQ_WORKER_PORT) 154 | IOLoop.instance().start() 155 | 156 | except KeyboardInterrupt: 157 | print 'NOOOOOOOOOOOO DOGGGGG!!!' 158 | 159 | 160 | if __name__ == '__main__': 161 | main() 162 | 163 | 164 | 165 | --------------------------------------------------------------------------------