├── .gitignore ├── .travis.yml ├── AUTHORS.md ├── CHANGES.txt ├── HISTORY.md ├── LICENSE ├── MANIFEST.in ├── README.markdown ├── docs ├── Makefile ├── make.bat └── source │ ├── _theme │ ├── ADCTheme │ │ ├── README.rst │ │ ├── layout.html │ │ ├── static │ │ │ ├── adctheme.css │ │ │ ├── breadcrumb_background.png │ │ │ ├── documentation.png │ │ │ ├── header_sm_mid.png │ │ │ ├── scrn1.png │ │ │ ├── scrn2.png │ │ │ ├── searchfield_leftcap.png │ │ │ ├── searchfield_repeat.png │ │ │ ├── searchfield_rightcap.png │ │ │ ├── title_background.png │ │ │ ├── triangle_closed.png │ │ │ ├── triangle_left.png │ │ │ └── triangle_open.png │ │ └── theme.conf │ ├── flask │ │ ├── layout.html │ │ ├── relations.html │ │ ├── static │ │ │ ├── flasky.css_t │ │ │ └── small_flask.css │ │ └── theme.conf │ └── nature │ │ ├── layout.html │ │ ├── static │ │ ├── nature.css_t │ │ └── pygments.css │ │ └── theme.conf │ ├── class.rst │ ├── conf.py │ ├── example.rst │ ├── failures.rst │ ├── horde.rst │ ├── index.rst │ ├── install.rst │ ├── intentions.rst │ ├── intro.rst │ └── tests.rst ├── pyres ├── __init__.py ├── compat.py ├── exceptions.py ├── extensions.py ├── failure │ ├── __init__.py │ ├── base.py │ ├── mail.py │ ├── multiple.py │ └── redis.py ├── horde.py ├── job.py ├── json_parser.py ├── scheduler.py ├── scripts.py └── worker.py ├── requirements-test.txt ├── requirements.txt ├── roadmap.md ├── setup.py ├── tests ├── __init__.py ├── test_failure.py ├── test_failure_multi.py ├── test_horde.py ├── test_jobs.py ├── test_json.py ├── test_resq.py ├── test_schedule.py ├── test_stats.py └── test_worker.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | build/ 3 | .coverage 4 | *.report 5 | *.egg-info 6 | logs/ 7 | dist/ 8 | *.swp 9 | *.swo 10 | .tox/ 11 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.6" 4 | - "2.7" 5 | - "3.3" 6 | - "3.4" 7 | # - "pypy" 8 | # command to install dependencies 9 | install: 10 | - python setup.py install 11 | # command to run tests 12 | script: python setup.py test 13 | services: 14 | - redis-server 15 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | ## Authors 2 | * Matt George 3 | * Craig Hawco 4 | * Michael Russo 5 | * Chris Song 6 | * Whit Morriss 7 | * Joe Shaw 8 | * Yashwanth Nelapati 9 | * Cezar Sa Espinola 10 | * Alex Ezell 11 | * Christy O'Reilly 12 | * Kevin McConnell 13 | * Bernardo Heynemann 14 | * David Schoonover 15 | * Rob Hudson 16 | * Salimane Adjao Moustapha 17 | * John Hobbs 18 | * James M. Henderson 19 | * Iraê Carvalho 20 | * Fabien Reboia 21 | * Peter Teichman 22 | 23 | 24 | Inspired by Resque, by Chris Wanstrath 25 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | 2011-03-01 whit 2 | 3 | * Added hooks for the worker to allow worker subclasses to insert 4 | code before and after forking 5 | 6 | -------------------------------------------------------------------------------- /HISTORY.md: -------------------------------------------------------------------------------- 1 | ##1.4.2 (2013-06-21) 2 | * __str__ returns correctly with dsn 3 | * worker_pids returns correct set of workers 4 | * workers are re-registered on every job 5 | * add exception metadata for after_perform method 6 | * logger no longer overrides root logger 7 | * support for redis db in dsn 8 | 9 | ##1.4.1 (2012-07-30) 10 | * fix for non existent system signal for linux 11 | * cleanup of setup.py and requirements 12 | 13 | ##1.4 (2012-06-?) 14 | * added hooks for before and after perform methods 15 | * fixed logging 16 | *fixed problems with password authentication 17 | 18 | ##1.3 (2012-06-01) 19 | * remove resweb from pyres 20 | * resweb is now available at http://github.com/Pyres/resweb or on pypi 21 | 22 | ##1.2 23 | * release with changes from pull requests 24 | 25 | ##1.1 (2011-06-16) 26 | * api change based on redis-py 27 | * setproctitle requirements fix 28 | * change exception logging in worker 29 | 30 | ##1.0.1 (2011-04-12) 31 | * fixed bug with tempaltes and media in resweb 32 | * call to redis-py disconnect was failing, switched to connection.disconnect 33 | * interval cast to int for pyres_worker script command 34 | 35 | ## 0.9.1 (2010-10-15) 36 | * fixing issues #45, #46. 37 | * #45 - resweb not working in chrome 38 | * #46 - delayed_queue_schedule_size() returns incorrect value 39 | * updated version requirement for redis-py 40 | * added Failure docs from Alex._ 41 | 42 | ## 0.9 (2010-08-05) 43 | * added better logging to the project 44 | 45 | ## 0.8 (2010-04-24) 46 | * added the pyres_manager and the horde module. This allows a more prefork like model for processing jobs. 47 | * setproctitle usage. Allows better process titles when viewing via ps 48 | * ability to delete and requeue failed items 49 | 50 | ## 0.7.5.1 (2010-03-18) 51 | * fixed the pyres_scheduler script 52 | * changed download link to remove v from version number 53 | 54 | ## 0.7.5 (2010-03-18) 55 | * added feature to retry jobs based on a class attribute 56 | 57 | ## 0.7.1 (2010-03-16) 58 | * bug fix for pruning workers. 59 | 60 | ## 0.7.0 (2010-03-05) 61 | * delayed tasks 62 | * resweb pagination 63 | * switch stored timestamps to a unix timestamp 64 | * updated documentation 65 | * upgraded to redis-py 1.34.1 66 | * switched from print statements to the logging module 67 | * import errors on jobs are now reported in the failed queue 68 | * prune dead workers 69 | * small bugfixes in the resweb package 70 | * improved failure formatting 71 | * datetime json parser 72 | 73 | ## 0.5.0 (2010-0114) 74 | 75 | * added new documentation to the project 76 | * update setup.py 77 | * preparing for semantic versioning 78 | 79 | ## 0.4.1 (2010-01-06) 80 | 81 | * fixed issue with new failure package in distutils sdist 82 | * changed setup.py to remove camel case, because it's ugly 83 | 84 | ## 0.4.0 (2010-01-06) 85 | 86 | * added the basics of failure backend support 87 | 88 | ## 0.3.1 (2009-12-16) 89 | 90 | * minor bug fix in worker.py 91 | * merged in some setup.py niceties from dsc fork 92 | * merged in better README info from dsc fork 93 | 94 | ## 0.3.0 (2009-12-10) 95 | 96 | * updated setup.py 97 | * refactored package for better testing 98 | * resque namespacing by fakechris 99 | * smarter import from string by fakechris 100 | 101 | ## 0.2.0 (2009-12-09) 102 | 103 | * Better web interface via resweb 104 | * Updated the api to be more inline with resque 105 | * More tests. 106 | 107 | ## 0.1.0 (2009-12-01) 108 | 109 | * First release. 110 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2009-2013 Matt George 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | Pyres - a Resque clone 2 | ====================== 3 | 4 | [Resque](http://github.com/defunkt/resque) is a great implementation of a job queue by the people at github. It's written in ruby, which is great, but I primarily work in python. So I took on the task of porting over the code to python and PyRes was the result 5 | 6 | 7 | ## Project Goals 8 | 9 | Because of some differences between ruby and python, there are a couple of places where I chose speed over correctness. The goal will be to eventually take the application and make it more pythonic without sacrificing the awesome functionality found in resque. At the same time, I hope to stay within the bounds of the original api and web interface. 10 | 11 | ## Travis CI 12 | 13 | Currently, pyres is being tested via travis ci for python version 2.6, 2.7, and 3.3: 14 | [![Build Status](https://secure.travis-ci.org/binarydud/pyres.png)](http://travis-ci.org/binarydud/pyres) 15 | 16 | ## Running Tests 17 | 18 | 1. Install nose: `$ easy_install nose` 19 | 2. Start redis: `$ redis-server [PATH_TO_YOUR_REDIS_CONFIG]` 20 | 3. Run nose: `$ nosetests` Or more verbosely: `$ nosetests -v` 21 | 22 | 23 | ##Mailing List 24 | 25 | To join the list simply send an email to . This 26 | will subscribe you and send you information about your subscription, 27 | include unsubscribe information. 28 | 29 | The archive can be found at . 30 | 31 | 32 | ## Information 33 | 34 | * Code: `git clone git://github.com/binarydud/pyres.git` 35 | * Home: 36 | * Docs: 37 | * Bugs: 38 | * List: 39 | 40 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | 9 | # Internal variables. 10 | PAPEROPT_a4 = -D latex_paper_size=a4 11 | PAPEROPT_letter = -D latex_paper_size=letter 12 | ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 13 | 14 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 15 | 16 | help: 17 | @echo "Please use \`make ' where is one of" 18 | @echo " html to make standalone HTML files" 19 | @echo " dirhtml to make HTML files named index.html in directories" 20 | @echo " pickle to make pickle files" 21 | @echo " json to make JSON files" 22 | @echo " htmlhelp to make HTML files and a HTML help project" 23 | @echo " qthelp to make HTML files and a qthelp project" 24 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 25 | @echo " changes to make an overview of all changed/added/deprecated items" 26 | @echo " linkcheck to check all external links for integrity" 27 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 28 | 29 | clean: 30 | -rm -rf build/* 31 | 32 | html: 33 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html 34 | @echo 35 | @echo "Build finished. The HTML pages are in build/html." 36 | 37 | dirhtml: 38 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) build/dirhtml 39 | @echo 40 | @echo "Build finished. The HTML pages are in build/dirhtml." 41 | 42 | pickle: 43 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle 44 | @echo 45 | @echo "Build finished; now you can process the pickle files." 46 | 47 | json: 48 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json 49 | @echo 50 | @echo "Build finished; now you can process the JSON files." 51 | 52 | htmlhelp: 53 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp 54 | @echo 55 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 56 | ".hhp project file in build/htmlhelp." 57 | 58 | qthelp: 59 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp 60 | @echo 61 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 62 | ".qhcp project file in build/qthelp, like this:" 63 | @echo "# qcollectiongenerator build/qthelp/PyRes.qhcp" 64 | @echo "To view the help file:" 65 | @echo "# assistant -collectionFile build/qthelp/PyRes.qhc" 66 | 67 | latex: 68 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex 69 | @echo 70 | @echo "Build finished; the LaTeX files are in build/latex." 71 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 72 | "run these through (pdf)latex." 73 | 74 | changes: 75 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes 76 | @echo 77 | @echo "The overview file is in build/changes." 78 | 79 | linkcheck: 80 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck 81 | @echo 82 | @echo "Link check complete; look for any errors in the above output " \ 83 | "or in build/linkcheck/output.txt." 84 | 85 | doctest: 86 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest 87 | @echo "Testing of doctests in the sources finished, look at the " \ 88 | "results in build/doctest/output.txt." 89 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | set SPHINXBUILD=sphinx-build 6 | set ALLSPHINXOPTS=-d build/doctrees %SPHINXOPTS% source 7 | if NOT "%PAPER%" == "" ( 8 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 9 | ) 10 | 11 | if "%1" == "" goto help 12 | 13 | if "%1" == "help" ( 14 | :help 15 | echo.Please use `make ^` where ^ is one of 16 | echo. html to make standalone HTML files 17 | echo. dirhtml to make HTML files named index.html in directories 18 | echo. pickle to make pickle files 19 | echo. json to make JSON files 20 | echo. htmlhelp to make HTML files and a HTML help project 21 | echo. qthelp to make HTML files and a qthelp project 22 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 23 | echo. changes to make an overview over all changed/added/deprecated items 24 | echo. linkcheck to check all external links for integrity 25 | echo. doctest to run all doctests embedded in the documentation if enabled 26 | goto end 27 | ) 28 | 29 | if "%1" == "clean" ( 30 | for /d %%i in (build\*) do rmdir /q /s %%i 31 | del /q /s build\* 32 | goto end 33 | ) 34 | 35 | if "%1" == "html" ( 36 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% build/html 37 | echo. 38 | echo.Build finished. The HTML pages are in build/html. 39 | goto end 40 | ) 41 | 42 | if "%1" == "dirhtml" ( 43 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% build/dirhtml 44 | echo. 45 | echo.Build finished. The HTML pages are in build/dirhtml. 46 | goto end 47 | ) 48 | 49 | if "%1" == "pickle" ( 50 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% build/pickle 51 | echo. 52 | echo.Build finished; now you can process the pickle files. 53 | goto end 54 | ) 55 | 56 | if "%1" == "json" ( 57 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% build/json 58 | echo. 59 | echo.Build finished; now you can process the JSON files. 60 | goto end 61 | ) 62 | 63 | if "%1" == "htmlhelp" ( 64 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% build/htmlhelp 65 | echo. 66 | echo.Build finished; now you can run HTML Help Workshop with the ^ 67 | .hhp project file in build/htmlhelp. 68 | goto end 69 | ) 70 | 71 | if "%1" == "qthelp" ( 72 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% build/qthelp 73 | echo. 74 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 75 | .qhcp project file in build/qthelp, like this: 76 | echo.^> qcollectiongenerator build\qthelp\PyRes.qhcp 77 | echo.To view the help file: 78 | echo.^> assistant -collectionFile build\qthelp\PyRes.ghc 79 | goto end 80 | ) 81 | 82 | if "%1" == "latex" ( 83 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% build/latex 84 | echo. 85 | echo.Build finished; the LaTeX files are in build/latex. 86 | goto end 87 | ) 88 | 89 | if "%1" == "changes" ( 90 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% build/changes 91 | echo. 92 | echo.The overview file is in build/changes. 93 | goto end 94 | ) 95 | 96 | if "%1" == "linkcheck" ( 97 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% build/linkcheck 98 | echo. 99 | echo.Link check complete; look for any errors in the above output ^ 100 | or in build/linkcheck/output.txt. 101 | goto end 102 | ) 103 | 104 | if "%1" == "doctest" ( 105 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% build/doctest 106 | echo. 107 | echo.Testing of doctests in the sources finished, look at the ^ 108 | results in build/doctest/output.txt. 109 | goto end 110 | ) 111 | 112 | :end 113 | -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/README.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | How To Install 3 | ============== 4 | 5 | Install in Sphinx 6 | ----------------- 7 | 8 | Copy this directory into the ``sphinx/templates`` directory where Shpinx is installed. For example, a standard install of sphinx on Mac OS X is at ``/Library/Python/2.6/site-packages/Sphinx-0.6.3-py2.6.egg/`` 9 | 10 | Install Somewhere Else 11 | ---------------------- 12 | 13 | If you want to install this theme somewhere else, you will have to modify the ``conf.py`` file. :: 14 | 15 | templates_path = ['/absolute/path/to/dir/','relative/path/'] 16 | 17 | Making Sphinx Use the Theme 18 | --------------------------- 19 | 20 | Edit the ``conf.py`` file and make the following setting: :: 21 | 22 | html_theme = 'ADCtheme' 23 | 24 | Screen Shots 25 | ------------ 26 | 27 | .. image:: http://github.com/coordt/ADCtheme/raw/master/static/scrn1.png 28 | 29 | .. image:: http://github.com/coordt/ADCtheme/raw/master/static/scrn2.png 30 | 31 | To Do 32 | ----- 33 | 34 | * Gotta get the javascript working so the Table of Contents is hide-able. 35 | * Probably lots of css cleanup. -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "basic/layout.html" %} 2 | {%- block doctype -%} 3 | 5 | {%- endblock %} 6 | {%- set reldelim1 = reldelim1 is not defined and ' »' or reldelim1 %} 7 | {%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %} 8 | {%- block linktags %} 9 | {%- if hasdoc('about') %} 10 | 11 | {%- endif %} 12 | {%- if hasdoc('genindex') %} 13 | 14 | {%- endif %} 15 | {%- if hasdoc('search') %} 16 | 17 | {%- endif %} 18 | {%- if hasdoc('copyright') %} 19 | 20 | {%- endif %} 21 | 22 | {%- if parents %} 23 | 24 | {%- endif %} 25 | {%- if next %} 26 | 27 | {%- endif %} 28 | {%- if prev %} 29 | 30 | {%- endif %} 31 | {%- endblock %} 32 | {%- block extrahead %} {% endblock %} 33 | {%- block header %}{% endblock %} 34 | {%- block relbar1 %} 35 |
36 |

{{docstitle}}

37 |
38 | 49 | {% endblock %} 50 | 51 | {%- block sidebar1 %} 52 | {%- if not embedded %}{% if not theme_nosidebar|tobool %} 53 |
54 |
55 | {%- block sidebarlogo %} 56 | {%- if logo %} 57 | 60 | {%- endif %} 61 | {%- endblock %} 62 | {%- block sidebartoc %} 63 | 64 | {{ toctree() }} 65 | {%- endblock %} 66 | {%- block sidebarrel %} 67 | {%- endblock %} 68 | {%- block sidebarsourcelink %} 69 | {%- if show_source and has_source and sourcename %} 70 |

{{ _('This Page') }}

71 | 75 | {%- endif %} 76 | {%- endblock %} 77 | {%- if customsidebar %} 78 | {% include customsidebar %} 79 | {%- endif %} 80 | {%- block sidebarsearch %} 81 | {%- if pagename != "search" %} 82 | 98 | 99 | {%- endif %} 100 | {%- endblock %} 101 |
102 |
103 | {%- endif %}{% endif %} 104 | 105 | {% endblock %} 106 | {%- block document %} 107 |
108 | {%- if not embedded %}{% if not theme_nosidebar|tobool %} 109 |
110 | {%- endif %}{% endif %} 111 |
112 | {% block body %} {% endblock %} 113 |
114 | {%- if not embedded %}{% if not theme_nosidebar|tobool %} 115 |
116 | {%- endif %}{% endif %} 117 |
118 | 132 | {%- endblock %} 133 | {%- block sidebar2 %}{% endblock %} 134 | {%- block relbar2 %}{% endblock %} 135 | {%- block footer %} 136 | 143 | {%- endblock %} 144 | -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/adctheme.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Sphinx stylesheet -- basic theme 3 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4 | */ 5 | 6 | /* -- main layout ----------------------------------------------------------- */ 7 | 8 | div.clearer { 9 | clear: both; 10 | } 11 | 12 | /* -- header ---------------------------------------------------------------- */ 13 | 14 | #header #title { 15 | background:#29334F url(title_background.png) repeat-x scroll 0 0; 16 | border-bottom:1px solid #B6B6B6; 17 | height:25px; 18 | overflow:hidden; 19 | } 20 | #headerButtons { 21 | position: absolute; 22 | list-style: none outside; 23 | top: 26px; 24 | left: 0px; 25 | right: 0px; 26 | margin: 0px; 27 | padding: 0px; 28 | border-top: 1px solid #2B334F; 29 | border-bottom: 1px solid #EDEDED; 30 | height: 20px; 31 | font-size: 8pt; 32 | overflow: hidden; 33 | background-color: #D8D8D8; 34 | } 35 | 36 | #headerButtons li { 37 | background-repeat:no-repeat; 38 | display:inline; 39 | margin-top:0; 40 | padding:0; 41 | } 42 | 43 | .headerButton { 44 | display: inline; 45 | height:20px; 46 | } 47 | 48 | .headerButton a { 49 | text-decoration: none; 50 | float: right; 51 | height: 20px; 52 | padding: 4px 15px; 53 | border-left: 1px solid #ACACAC; 54 | font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; 55 | color: black; 56 | } 57 | .headerButton a:hover { 58 | color: white; 59 | background-color: #787878; 60 | 61 | } 62 | 63 | li#toc_button { 64 | text-align:left; 65 | } 66 | 67 | li#toc_button .headerButton a { 68 | width:198px; 69 | padding-top: 4px; 70 | font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; 71 | color: black; 72 | float: left; 73 | padding-left:15px; 74 | border-right:1px solid #ACACAC; 75 | background:transparent url(triangle_closed.png) no-repeat scroll 4px 6px; 76 | } 77 | 78 | 79 | 80 | li#page_buttons { 81 | position:absolute; 82 | right:0; 83 | } 84 | 85 | #breadcrumbs { 86 | color: black; 87 | background-image:url(breadcrumb_background.png); 88 | border-top:1px solid #2B334F; 89 | bottom:0; 90 | font-size:10px; 91 | height:15px; 92 | left:0; 93 | overflow:hidden; 94 | padding:3px 10px 0; 95 | position:absolute; 96 | right:0; 97 | white-space:nowrap; 98 | z-index:901; 99 | } 100 | #breadcrumbs a { 101 | color: black; 102 | text-decoration: none; 103 | } 104 | #breadcrumbs a:hover { 105 | text-decoration: underline; 106 | } 107 | 108 | /* -- sidebar --------------------------------------------------------------- */ 109 | #sphinxsidebar { 110 | position: absolute; 111 | top: 84px; 112 | bottom: 19px; 113 | left: 0px; 114 | width: 229px; 115 | background-color: #E4EBF7; 116 | border-right: 1px solid #ACACAC; 117 | border-top: 1px solid #2B334F; 118 | overflow-x: hidden; 119 | overflow-y: auto; 120 | padding: 0px 0px 0px 0px; 121 | font-size:11px; 122 | } 123 | 124 | div.sphinxsidebarwrapper { 125 | padding: 10px 5px 0 10px; 126 | } 127 | 128 | #sphinxsidebar li { 129 | margin: 0px; 130 | padding: 0px; 131 | font-weight: normal; 132 | margin: 0px 0px 7px 0px; 133 | overflow: hidden; 134 | text-overflow: ellipsis; 135 | font-size: 11px; 136 | } 137 | 138 | #sphinxsidebar ul { 139 | list-style: none; 140 | margin: 0px 0px 0px 0px; 141 | padding: 0px 5px 0px 5px; 142 | } 143 | 144 | #sphinxsidebar ul ul, 145 | #sphinxsidebar ul.want-points { 146 | list-style: square; 147 | } 148 | 149 | #sphinxsidebar ul ul { 150 | margin-top: 0; 151 | margin-bottom: 0; 152 | } 153 | 154 | #sphinxsidebar form { 155 | margin-top: 10px; 156 | } 157 | 158 | #sphinxsidebar input { 159 | border: 1px solid #787878; 160 | font-family: sans-serif; 161 | font-size: 1em; 162 | } 163 | 164 | img { 165 | border: 0; 166 | } 167 | 168 | #sphinxsidebar li.toctree-l1 a { 169 | font-weight: bold; 170 | color: #000; 171 | text-decoration: none; 172 | } 173 | 174 | #sphinxsidebar li.toctree-l2 a { 175 | font-weight: bold; 176 | color: #4f4f4f; 177 | text-decoration: none; 178 | } 179 | 180 | /* -- search page ----------------------------------------------------------- */ 181 | 182 | ul.search { 183 | margin: 10px 0 0 20px; 184 | padding: 0; 185 | } 186 | 187 | ul.search li { 188 | padding: 5px 0 5px 20px; 189 | background-image: url(file.png); 190 | background-repeat: no-repeat; 191 | background-position: 0 7px; 192 | } 193 | 194 | ul.search li a { 195 | font-weight: bold; 196 | } 197 | 198 | ul.search li div.context { 199 | color: #888; 200 | margin: 2px 0 0 30px; 201 | text-align: left; 202 | } 203 | 204 | ul.keywordmatches li.goodmatch a { 205 | font-weight: bold; 206 | } 207 | #sphinxsidebar input.prettysearch {border:none;} 208 | input.searchbutton { 209 | float: right; 210 | } 211 | .search-wrapper {width: 100%; height: 25px;} 212 | .search-wrapper input.prettysearch { border: none; width:200px; height: 16px; background: url(searchfield_repeat.png) center top repeat-x; border: 0px; margin: 0; padding: 3px 0 0 0; font: 11px "Lucida Grande", "Lucida Sans Unicode", Arial, sans-serif; } 213 | .search-wrapper input.prettysearch { width: 184px; margin-left: 20px; *margin-top:-1px; *margin-right:-2px; *margin-left:10px; } 214 | .search-wrapper .search-left { display: block; position: absolute; width: 20px; height: 19px; background: url(searchfield_leftcap.png) left top no-repeat; } 215 | .search-wrapper .search-right { display: block; position: relative; left: 204px; top: -19px; width: 10px; height: 19px; background: url(searchfield_rightcap.png) right top no-repeat; } 216 | 217 | /* -- index page ------------------------------------------------------------ */ 218 | 219 | table.contentstable { 220 | width: 90%; 221 | } 222 | 223 | table.contentstable p.biglink { 224 | line-height: 150%; 225 | } 226 | 227 | a.biglink { 228 | font-size: 1.3em; 229 | } 230 | 231 | span.linkdescr { 232 | font-style: italic; 233 | padding-top: 5px; 234 | font-size: 90%; 235 | } 236 | 237 | /* -- general index --------------------------------------------------------- */ 238 | 239 | table.indextable td { 240 | text-align: left; 241 | vertical-align: top; 242 | } 243 | 244 | table.indextable dl, table.indextable dd { 245 | margin-top: 0; 246 | margin-bottom: 0; 247 | } 248 | 249 | table.indextable tr.pcap { 250 | height: 10px; 251 | } 252 | 253 | table.indextable tr.cap { 254 | margin-top: 10px; 255 | background-color: #f2f2f2; 256 | } 257 | 258 | img.toggler { 259 | margin-right: 3px; 260 | margin-top: 3px; 261 | cursor: pointer; 262 | } 263 | 264 | /* -- general body styles --------------------------------------------------- */ 265 | .document { 266 | border-top:1px solid #2B334F; 267 | overflow:auto; 268 | padding-left:2em; 269 | padding-right:2em; 270 | position:absolute; 271 | z-index:1; 272 | top:84px; 273 | bottom:19px; 274 | right:0; 275 | left:230px; 276 | } 277 | 278 | a.headerlink { 279 | visibility: hidden; 280 | } 281 | 282 | h1:hover > a.headerlink, 283 | h2:hover > a.headerlink, 284 | h3:hover > a.headerlink, 285 | h4:hover > a.headerlink, 286 | h5:hover > a.headerlink, 287 | h6:hover > a.headerlink, 288 | dt:hover > a.headerlink { 289 | visibility: visible; 290 | } 291 | 292 | div.body p.caption { 293 | text-align: inherit; 294 | } 295 | 296 | div.body td { 297 | text-align: left; 298 | } 299 | 300 | .field-list ul { 301 | padding-left: 1em; 302 | } 303 | 304 | .first { 305 | margin-top: 0 !important; 306 | } 307 | 308 | p.rubric { 309 | margin-top: 30px; 310 | font-weight: bold; 311 | } 312 | 313 | /* -- sidebars -------------------------------------------------------------- */ 314 | 315 | /*div.sidebar { 316 | margin: 0 0 0.5em 1em; 317 | border: 1px solid #ddb; 318 | padding: 7px 7px 0 7px; 319 | background-color: #ffe; 320 | width: 40%; 321 | float: right; 322 | } 323 | 324 | p.sidebar-title { 325 | font-weight: bold; 326 | } 327 | */ 328 | /* -- topics ---------------------------------------------------------------- */ 329 | 330 | div.topic { 331 | border: 1px solid #ccc; 332 | padding: 7px 7px 0 7px; 333 | margin: 10px 0 10px 0; 334 | } 335 | 336 | p.topic-title { 337 | font-size: 1.1em; 338 | font-weight: bold; 339 | margin-top: 10px; 340 | } 341 | 342 | /* -- admonitions ----------------------------------------------------------- */ 343 | .admonition { 344 | border: 1px solid #a1a5a9; 345 | background-color: #f7f7f7; 346 | margin: 20px; 347 | padding: 0px 8px 7px 9px; 348 | text-align: left; 349 | } 350 | .warning { 351 | background-color:#E8E8E8; 352 | border:1px solid #111111; 353 | margin:30px; 354 | } 355 | .admonition p { 356 | font: 12px 'Lucida Grande', Geneva, Helvetica, Arial, sans-serif; 357 | margin-top: 7px; 358 | margin-bottom: 0px; 359 | } 360 | 361 | div.admonition dt { 362 | font-weight: bold; 363 | } 364 | 365 | div.admonition dl { 366 | margin-bottom: 0; 367 | } 368 | 369 | p.admonition-title { 370 | margin: 0px 10px 5px 0px; 371 | font-weight: bold; 372 | padding-top: 3px; 373 | } 374 | 375 | div.body p.centered { 376 | text-align: center; 377 | margin-top: 25px; 378 | } 379 | 380 | /* -- tables ---------------------------------------------------------------- */ 381 | 382 | table.docutils { 383 | border-collapse: collapse; 384 | border-top: 1px solid #919699; 385 | border-left: 1px solid #919699; 386 | border-right: 1px solid #919699; 387 | font-size:12px; 388 | padding:8px; 389 | text-align:left; 390 | vertical-align:top; 391 | } 392 | 393 | table.docutils td, table.docutils th { 394 | padding: 8px; 395 | font-size: 12px; 396 | text-align: left; 397 | vertical-align: top; 398 | border-bottom: 1px solid #919699; 399 | } 400 | 401 | table.docutils th { 402 | font-weight: bold; 403 | } 404 | /* This alternates colors in up to six table rows (light blue for odd, white for even)*/ 405 | .docutils tr { 406 | background: #F0F5F9; 407 | } 408 | 409 | .docutils tr + tr { 410 | background: #FFFFFF; 411 | } 412 | 413 | .docutils tr + tr + tr { 414 | background: #F0F5F9; 415 | } 416 | 417 | .docutils tr + tr + tr + tr { 418 | background: #FFFFFF; 419 | } 420 | 421 | .docutils tr + tr + tr +tr + tr { 422 | background: #F0F5F9; 423 | } 424 | 425 | .docutils tr + tr + tr + tr + tr + tr { 426 | background: #FFFFFF; 427 | } 428 | 429 | .docutils tr + tr + tr + tr + tr + tr + tr { 430 | background: #F0F5F9; 431 | } 432 | 433 | table.footnote td, table.footnote th { 434 | border: 0 !important; 435 | } 436 | 437 | th { 438 | text-align: left; 439 | padding-right: 5px; 440 | } 441 | 442 | /* -- other body styles ----------------------------------------------------- */ 443 | 444 | dl { 445 | margin-bottom: 15px; 446 | } 447 | 448 | dd p { 449 | margin-top: 0px; 450 | } 451 | 452 | dd ul, dd table { 453 | margin-bottom: 10px; 454 | } 455 | 456 | dd { 457 | margin-top: 3px; 458 | margin-bottom: 10px; 459 | margin-left: 30px; 460 | } 461 | 462 | dt:target, .highlight { 463 | background-color: #fbe54e; 464 | } 465 | 466 | dl.glossary dt { 467 | font-weight: bold; 468 | font-size: 1.1em; 469 | } 470 | 471 | .field-list ul { 472 | vertical-align: top; 473 | margin: 0; 474 | padding-bottom: 0; 475 | list-style: none inside; 476 | } 477 | 478 | .field-list ul li { 479 | margin-top: 0; 480 | } 481 | 482 | .field-list p { 483 | margin: 0; 484 | } 485 | 486 | .refcount { 487 | color: #060; 488 | } 489 | 490 | .optional { 491 | font-size: 1.3em; 492 | } 493 | 494 | .versionmodified { 495 | font-style: italic; 496 | } 497 | 498 | .system-message { 499 | background-color: #fda; 500 | padding: 5px; 501 | border: 3px solid red; 502 | } 503 | 504 | .footnote:target { 505 | background-color: #ffa 506 | } 507 | 508 | /* -- code displays --------------------------------------------------------- */ 509 | 510 | pre { 511 | overflow: auto; 512 | background-color:#F1F5F9; 513 | border:1px solid #C9D1D7; 514 | border-spacing:0; 515 | font-family:"Bitstream Vera Sans Mono",Monaco,"Lucida Console",Courier,Consolas,monospace; 516 | font-size:11px; 517 | padding: 10px; 518 | } 519 | 520 | td.linenos pre { 521 | padding: 5px 0px; 522 | border: 0; 523 | background-color: transparent; 524 | color: #aaa; 525 | } 526 | 527 | table.highlighttable { 528 | margin-left: 0.5em; 529 | } 530 | 531 | table.highlighttable td { 532 | padding: 0 0.5em 0 0.5em; 533 | } 534 | 535 | tt.descname { 536 | background-color: transparent; 537 | font-weight: bold; 538 | font-size: 1.2em; 539 | } 540 | 541 | tt.descclassname { 542 | background-color: transparent; 543 | } 544 | 545 | tt.xref, a tt { 546 | background-color: transparent; 547 | font-weight: bold; 548 | } 549 | 550 | h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { 551 | background-color: transparent; 552 | } 553 | 554 | /* -- math display ---------------------------------------------------------- */ 555 | 556 | img.math { 557 | vertical-align: middle; 558 | } 559 | 560 | div.body div.math p { 561 | text-align: center; 562 | } 563 | 564 | span.eqno { 565 | float: right; 566 | } 567 | 568 | /* -- printout stylesheet --------------------------------------------------- */ 569 | 570 | @media print { 571 | div.document, 572 | div.documentwrapper, 573 | div.bodywrapper { 574 | margin: 0; 575 | width: 100%; 576 | } 577 | 578 | div.sphinxsidebar, 579 | div.related, 580 | div.footer, 581 | #top-link { 582 | display: none; 583 | } 584 | } 585 | 586 | body { 587 | font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; 588 | } 589 | 590 | dl.class dt { 591 | padding: 3px; 592 | border-top: 2px solid #999; 593 | } 594 | 595 | tt.descname { 596 | font-size: 1em; 597 | } 598 | 599 | em.property { 600 | font-style: normal; 601 | } 602 | 603 | dl.class dd p { 604 | } 605 | 606 | dl.class dd dl.exception dt { 607 | padding: 3px; 608 | background-color: #FFD6D6; 609 | border-top: none; 610 | } 611 | 612 | dl.class dd dl.method dt { 613 | padding: 3px; 614 | background-color: #e9e9e9; 615 | border-top: none; 616 | 617 | } 618 | 619 | dl.function dt { 620 | padding: 3px; 621 | border-top: 2px solid #999; 622 | } 623 | 624 | ul { 625 | list-style-image:none; 626 | list-style-position:outside; 627 | list-style-type:square; 628 | margin:0 0 0 30px; 629 | padding:0 0 12px 6px; 630 | } 631 | #docstitle { 632 | height: 36px; 633 | background-image: url(header_sm_mid.png); 634 | left: 0; 635 | top: 0; 636 | position: absolute; 637 | width: 100%; 638 | } 639 | #docstitle p { 640 | padding:7px 0 0 45px; 641 | margin: 0; 642 | color: white; 643 | text-shadow:0 1px 0 #787878; 644 | background: transparent url(documentation.png) no-repeat scroll 10px 3px; 645 | height: 36px; 646 | font-size: 15px; 647 | } 648 | #header { 649 | height:45px; 650 | left:0; 651 | position:absolute; 652 | right:0; 653 | top:36px; 654 | z-index:900; 655 | } 656 | 657 | #header h1 { 658 | font-size:10pt; 659 | margin:0; 660 | padding:5px 0 0 10px; 661 | text-shadow:0 1px 0 #D5D5D5; 662 | white-space:nowrap; 663 | } 664 | 665 | h1 { 666 | -x-system-font:none; 667 | color:#000000; 668 | font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; 669 | font-size:30px; 670 | font-size-adjust:none; 671 | font-stretch:normal; 672 | font-style:normal; 673 | font-variant:normal; 674 | font-weight:bold; 675 | line-height:normal; 676 | margin-bottom:25px; 677 | margin-top:1em; 678 | } 679 | 680 | .footer { 681 | border-top:1px solid #DDDDDD; 682 | clear:both; 683 | padding-top:9px; 684 | width:100%; 685 | font-size:10px; 686 | } 687 | 688 | p { 689 | -x-system-font:none; 690 | font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; 691 | font-size:12px; 692 | font-size-adjust:none; 693 | font-stretch:normal; 694 | font-style:normal; 695 | font-variant:normal; 696 | font-weight:normal; 697 | line-height:normal; 698 | margin-bottom:10px; 699 | margin-top:0; 700 | } 701 | 702 | h2 { 703 | border-bottom:1px solid #919699; 704 | color:#000000; 705 | font-size:24px; 706 | margin-top:2.5em; 707 | padding-bottom:2px; 708 | } 709 | 710 | a:link:hover { 711 | color:#093D92; 712 | text-decoration:underline; 713 | } 714 | 715 | a:link { 716 | color:#093D92; 717 | text-decoration:none; 718 | } 719 | 720 | 721 | ol { 722 | list-style-position:outside; 723 | list-style-type:decimal; 724 | margin:0 0 0 30px; 725 | padding:0 0 12px 6px; 726 | } 727 | li { 728 | margin-top:7px; 729 | font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; 730 | font-size:12px; 731 | font-size-adjust:none; 732 | font-stretch:normal; 733 | font-style:normal; 734 | font-variant:normal; 735 | font-weight:normal; 736 | line-height:normal; 737 | } 738 | li > p { 739 | display:inline; 740 | } 741 | li p { 742 | margin-top:8px; 743 | } -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/breadcrumb_background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/breadcrumb_background.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/documentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/documentation.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/header_sm_mid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/header_sm_mid.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/scrn1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/scrn1.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/scrn2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/scrn2.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/searchfield_leftcap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/searchfield_leftcap.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/searchfield_repeat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/searchfield_repeat.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/searchfield_rightcap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/searchfield_rightcap.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/title_background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/title_background.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/triangle_closed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/triangle_closed.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/triangle_left.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/triangle_left.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/static/triangle_open.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/binarymatt/pyres/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/docs/source/_theme/ADCTheme/static/triangle_open.png -------------------------------------------------------------------------------- /docs/source/_theme/ADCTheme/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = adctheme.css 4 | pygments_style = friendly 5 | 6 | -------------------------------------------------------------------------------- /docs/source/_theme/flask/layout.html: -------------------------------------------------------------------------------- 1 | {%- extends "basic/layout.html" %} 2 | {%- block extrahead %} 3 | {{ super() }} 4 | {% if theme_touch_icon %} 5 | 6 | {% endif %} 7 | 9 | {% endblock %} 10 | {%- block relbar2 %}{% endblock %} 11 | {% block header %} 12 | {{ super() }} 13 | {% if pagename == 'index' %} 14 |
15 | {% endif %} 16 | {% endblock %} 17 | {%- block footer %} 18 | 22 | {% if pagename == 'index' %} 23 |
24 | {% endif %} 25 | {%- endblock %} 26 | -------------------------------------------------------------------------------- /docs/source/_theme/flask/relations.html: -------------------------------------------------------------------------------- 1 |

Related Topics

2 | 20 | -------------------------------------------------------------------------------- /docs/source/_theme/flask/static/flasky.css_t: -------------------------------------------------------------------------------- 1 | /* 2 | * flasky.css_t 3 | * ~~~~~~~~~~~~ 4 | * 5 | * :copyright: Copyright 2010 by Armin Ronacher. 6 | * :license: Flask Design License, see LICENSE for details. 7 | */ 8 | 9 | {% set page_width = '940px' %} 10 | {% set sidebar_width = '220px' %} 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: 'Georgia', serif; 18 | font-size: 17px; 19 | background-color: white; 20 | color: #000; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.document { 26 | width: {{ page_width }}; 27 | margin: 30px auto 0 auto; 28 | } 29 | 30 | div.documentwrapper { 31 | float: left; 32 | width: 100%; 33 | } 34 | 35 | div.bodywrapper { 36 | margin: 0 0 0 {{ sidebar_width }}; 37 | } 38 | 39 | div.sphinxsidebar { 40 | width: {{ sidebar_width }}; 41 | } 42 | 43 | hr { 44 | border: 1px solid #B1B4B6; 45 | } 46 | 47 | div.body { 48 | background-color: #ffffff; 49 | color: #3E4349; 50 | padding: 0 30px 0 30px; 51 | } 52 | 53 | img.floatingflask { 54 | padding: 0 0 10px 10px; 55 | float: right; 56 | } 57 | 58 | div.footer { 59 | width: {{ page_width }}; 60 | margin: 20px auto 30px auto; 61 | font-size: 14px; 62 | color: #888; 63 | text-align: right; 64 | } 65 | 66 | div.footer a { 67 | color: #888; 68 | } 69 | 70 | div.related { 71 | display: none; 72 | } 73 | 74 | div.sphinxsidebar a { 75 | color: #444; 76 | text-decoration: none; 77 | border-bottom: 1px dotted #999; 78 | } 79 | 80 | div.sphinxsidebar a:hover { 81 | border-bottom: 1px solid #999; 82 | } 83 | 84 | div.sphinxsidebar { 85 | font-size: 14px; 86 | line-height: 1.5; 87 | } 88 | 89 | div.sphinxsidebarwrapper { 90 | padding: 18px 10px; 91 | } 92 | 93 | div.sphinxsidebarwrapper p.logo { 94 | padding: 0 0 20px 0; 95 | margin: 0; 96 | text-align: center; 97 | } 98 | 99 | div.sphinxsidebar h3, 100 | div.sphinxsidebar h4 { 101 | font-family: 'Garamond', 'Georgia', serif; 102 | color: #444; 103 | font-size: 24px; 104 | font-weight: normal; 105 | margin: 0 0 5px 0; 106 | padding: 0; 107 | } 108 | 109 | div.sphinxsidebar h4 { 110 | font-size: 20px; 111 | } 112 | 113 | div.sphinxsidebar h3 a { 114 | color: #444; 115 | } 116 | 117 | div.sphinxsidebar p.logo a, 118 | div.sphinxsidebar h3 a, 119 | div.sphinxsidebar p.logo a:hover, 120 | div.sphinxsidebar h3 a:hover { 121 | border: none; 122 | } 123 | 124 | div.sphinxsidebar p { 125 | color: #555; 126 | margin: 10px 0; 127 | } 128 | 129 | div.sphinxsidebar ul { 130 | margin: 10px 0; 131 | padding: 0; 132 | color: #000; 133 | } 134 | 135 | div.sphinxsidebar input { 136 | border: 1px solid #ccc; 137 | font-family: 'Georgia', serif; 138 | font-size: 1em; 139 | } 140 | 141 | /* -- body styles ----------------------------------------------------------- */ 142 | 143 | a { 144 | color: #004B6B; 145 | text-decoration: underline; 146 | } 147 | 148 | a:hover { 149 | color: #6D4100; 150 | text-decoration: underline; 151 | } 152 | 153 | div.body h1, 154 | div.body h2, 155 | div.body h3, 156 | div.body h4, 157 | div.body h5, 158 | div.body h6 { 159 | font-family: 'Garamond', 'Georgia', serif; 160 | font-weight: normal; 161 | margin: 30px 0px 10px 0px; 162 | padding: 0; 163 | } 164 | 165 | {% if theme_index_logo %} 166 | div.indexwrapper h1 { 167 | text-indent: -999999px; 168 | background: url({{ theme_index_logo }}) no-repeat center center; 169 | height: {{ theme_index_logo_height }}; 170 | } 171 | {% endif %} 172 | 173 | div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } 174 | div.body h2 { font-size: 180%; } 175 | div.body h3 { font-size: 150%; } 176 | div.body h4 { font-size: 130%; } 177 | div.body h5 { font-size: 100%; } 178 | div.body h6 { font-size: 100%; } 179 | 180 | a.headerlink { 181 | color: #ddd; 182 | padding: 0 4px; 183 | text-decoration: none; 184 | } 185 | 186 | a.headerlink:hover { 187 | color: #444; 188 | background: #eaeaea; 189 | } 190 | 191 | div.body p, div.body dd, div.body li { 192 | line-height: 1.4em; 193 | } 194 | 195 | div.admonition { 196 | background: #fafafa; 197 | margin: 20px -30px; 198 | padding: 10px 30px; 199 | border-top: 1px solid #ccc; 200 | border-bottom: 1px solid #ccc; 201 | } 202 | 203 | div.admonition tt.xref, div.admonition a tt { 204 | border-bottom: 1px solid #fafafa; 205 | } 206 | 207 | dd div.admonition { 208 | margin-left: -60px; 209 | padding-left: 60px; 210 | } 211 | 212 | div.admonition p.admonition-title { 213 | font-family: 'Garamond', 'Georgia', serif; 214 | font-weight: normal; 215 | font-size: 24px; 216 | margin: 0 0 10px 0; 217 | padding: 0; 218 | line-height: 1; 219 | } 220 | 221 | div.admonition p.last { 222 | margin-bottom: 0; 223 | } 224 | 225 | div.highlight { 226 | background-color: white; 227 | } 228 | 229 | dt:target, .highlight { 230 | background: #FAF3E8; 231 | } 232 | 233 | div.note { 234 | background-color: #eee; 235 | border: 1px solid #ccc; 236 | } 237 | 238 | div.seealso { 239 | background-color: #ffc; 240 | border: 1px solid #ff6; 241 | } 242 | 243 | div.topic { 244 | background-color: #eee; 245 | } 246 | 247 | p.admonition-title { 248 | display: inline; 249 | } 250 | 251 | p.admonition-title:after { 252 | content: ":"; 253 | } 254 | 255 | pre, tt { 256 | font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; 257 | font-size: 0.9em; 258 | } 259 | 260 | img.screenshot { 261 | } 262 | 263 | tt.descname, tt.descclassname { 264 | font-size: 0.95em; 265 | } 266 | 267 | tt.descname { 268 | padding-right: 0.08em; 269 | } 270 | 271 | img.screenshot { 272 | -moz-box-shadow: 2px 2px 4px #eee; 273 | -webkit-box-shadow: 2px 2px 4px #eee; 274 | box-shadow: 2px 2px 4px #eee; 275 | } 276 | 277 | table.docutils { 278 | border: 1px solid #888; 279 | -moz-box-shadow: 2px 2px 4px #eee; 280 | -webkit-box-shadow: 2px 2px 4px #eee; 281 | box-shadow: 2px 2px 4px #eee; 282 | } 283 | 284 | table.docutils td, table.docutils th { 285 | border: 1px solid #888; 286 | padding: 0.25em 0.7em; 287 | } 288 | 289 | table.field-list, table.footnote { 290 | border: none; 291 | -moz-box-shadow: none; 292 | -webkit-box-shadow: none; 293 | box-shadow: none; 294 | } 295 | 296 | table.footnote { 297 | margin: 15px 0; 298 | width: 100%; 299 | border: 1px solid #eee; 300 | background: #fdfdfd; 301 | font-size: 0.9em; 302 | } 303 | 304 | table.footnote + table.footnote { 305 | margin-top: -15px; 306 | border-top: none; 307 | } 308 | 309 | table.field-list th { 310 | padding: 0 0.8em 0 0; 311 | } 312 | 313 | table.field-list td { 314 | padding: 0; 315 | } 316 | 317 | table.footnote td.label { 318 | width: 0px; 319 | padding: 0.3em 0 0.3em 0.5em; 320 | } 321 | 322 | table.footnote td { 323 | padding: 0.3em 0.5em; 324 | } 325 | 326 | dl { 327 | margin: 0; 328 | padding: 0; 329 | } 330 | 331 | dl dd { 332 | margin-left: 30px; 333 | } 334 | 335 | blockquote { 336 | margin: 0 0 0 30px; 337 | padding: 0; 338 | } 339 | 340 | ul, ol { 341 | margin: 10px 0 10px 30px; 342 | padding: 0; 343 | } 344 | 345 | pre { 346 | background: #eee; 347 | padding: 7px 30px; 348 | margin: 15px -30px; 349 | line-height: 1.3em; 350 | } 351 | 352 | dl pre, blockquote pre, li pre { 353 | margin-left: -60px; 354 | padding-left: 60px; 355 | } 356 | 357 | dl dl pre { 358 | margin-left: -90px; 359 | padding-left: 90px; 360 | } 361 | 362 | tt { 363 | background-color: #ecf0f3; 364 | color: #222; 365 | /* padding: 1px 2px; */ 366 | } 367 | 368 | tt.xref, a tt { 369 | background-color: #FBFBFB; 370 | border-bottom: 1px solid white; 371 | } 372 | 373 | a.reference { 374 | text-decoration: none; 375 | border-bottom: 1px dotted #004B6B; 376 | } 377 | 378 | a.reference:hover { 379 | border-bottom: 1px solid #6D4100; 380 | } 381 | 382 | a.footnote-reference { 383 | text-decoration: none; 384 | font-size: 0.7em; 385 | vertical-align: top; 386 | border-bottom: 1px dotted #004B6B; 387 | } 388 | 389 | a.footnote-reference:hover { 390 | border-bottom: 1px solid #6D4100; 391 | } 392 | 393 | a:hover tt { 394 | background: #EEE; 395 | } 396 | -------------------------------------------------------------------------------- /docs/source/_theme/flask/static/small_flask.css: -------------------------------------------------------------------------------- 1 | /* 2 | * small_flask.css_t 3 | * ~~~~~~~~~~~~~~~~~ 4 | * 5 | * :copyright: Copyright 2010 by Armin Ronacher. 6 | * :license: Flask Design License, see LICENSE for details. 7 | */ 8 | 9 | body { 10 | margin: 0; 11 | padding: 20px 30px; 12 | } 13 | 14 | div.documentwrapper { 15 | float: none; 16 | background: white; 17 | } 18 | 19 | div.sphinxsidebar { 20 | display: block; 21 | float: none; 22 | width: 102.5%; 23 | margin: 50px -30px -20px -30px; 24 | padding: 10px 20px; 25 | background: #333; 26 | color: white; 27 | } 28 | 29 | div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, 30 | div.sphinxsidebar h3 a { 31 | color: white; 32 | } 33 | 34 | div.sphinxsidebar a { 35 | color: #aaa; 36 | } 37 | 38 | div.sphinxsidebar p.logo { 39 | display: none; 40 | } 41 | 42 | div.document { 43 | width: 100%; 44 | margin: 0; 45 | } 46 | 47 | div.related { 48 | display: block; 49 | margin: 0; 50 | padding: 10px 0 20px 0; 51 | } 52 | 53 | div.related ul, 54 | div.related ul li { 55 | margin: 0; 56 | padding: 0; 57 | } 58 | 59 | div.footer { 60 | display: none; 61 | } 62 | 63 | div.bodywrapper { 64 | margin: 0; 65 | } 66 | 67 | div.body { 68 | min-height: 0; 69 | padding: 0; 70 | } 71 | -------------------------------------------------------------------------------- /docs/source/_theme/flask/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = flasky.css 4 | pygments_style = flask_theme_support.FlaskyStyle 5 | 6 | [options] 7 | index_logo = '' 8 | index_logo_height = 120px 9 | touch_icon = 10 | -------------------------------------------------------------------------------- /docs/source/_theme/nature/layout.html: -------------------------------------------------------------------------------- 1 | {%- block doctype -%} 2 | 4 | {%- endblock %} 5 | {%- set reldelim1 = reldelim1 is not defined and ' »' or reldelim1 %} 6 | {%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %} 7 | 8 | {%- macro relbar() %} 9 | 27 | {%- endmacro %} 28 | 29 | {%- macro sidebar() %} 30 | {%- if not embedded %}{% if not theme_nosidebar|tobool %} 31 |
32 |
33 | {%- block sidebarlogo %} 34 | {%- if logo %} 35 | 38 | {%- endif %} 39 | {%- endblock %} 40 | {%- block sidebartoc %} 41 | {%- if display_toc %} 42 |

{{ _('Table Of Contents') }}

43 | {{ toc }} 44 | {%- endif %} 45 | {%- endblock %} 46 | {%- block sidebarrel %} 47 | {%- if prev %} 48 |

{{ _('Previous topic') }}

49 |

{{ prev.title }}

51 | {%- endif %} 52 | {%- if next %} 53 |

{{ _('Next topic') }}

54 |

{{ next.title }}

56 | {%- endif %} 57 | {%- endblock %} 58 | {%- block sidebarsourcelink %} 59 | {%- if show_source and has_source and sourcename %} 60 |

{{ _('This Page') }}

61 | 65 | {%- endif %} 66 | {%- endblock %} 67 | {%- if customsidebar %} 68 | {% include customsidebar %} 69 | {%- endif %} 70 | {%- block sidebarsearch %} 71 | {%- if pagename != "search" %} 72 | 84 | 85 | {%- endif %} 86 | {%- endblock %} 87 |
88 |
89 | {%- endif %}{% endif %} 90 | {%- endmacro %} 91 | 92 | 93 | 94 | 95 | {{ metatags }} 96 | {%- if not embedded %} 97 | {%- set titlesuffix = " — "|safe + docstitle|e %} 98 | {%- else %} 99 | {%- set titlesuffix = "" %} 100 | {%- endif %} 101 | {{ title|striptags }}{{ titlesuffix }} 102 | 103 | 104 | {%- if not embedded %} 105 | 114 | {%- for scriptfile in script_files %} 115 | 116 | {%- endfor %} 117 | {%- if use_opensearch %} 118 | 121 | {%- endif %} 122 | {%- if favicon %} 123 | 124 | {%- endif %} 125 | {%- endif %} 126 | {%- block linktags %} 127 | {%- if hasdoc('about') %} 128 | 129 | {%- endif %} 130 | {%- if hasdoc('genindex') %} 131 | 132 | {%- endif %} 133 | {%- if hasdoc('search') %} 134 | 135 | {%- endif %} 136 | {%- if hasdoc('copyright') %} 137 | 138 | {%- endif %} 139 | 140 | {%- if parents %} 141 | 142 | {%- endif %} 143 | {%- if next %} 144 | 145 | {%- endif %} 146 | {%- if prev %} 147 | 148 | {%- endif %} 149 | {%- endblock %} 150 | {%- block extrahead %} {% endblock %} 151 | 152 | 153 | {%- block header %}{% endblock %} 154 | 155 | {%- block relbar1 %}{{ relbar() }}{% endblock %} 156 | 157 | {%- block sidebar1 %} {# possible location for sidebar #} {% endblock %} 158 | 159 |
160 | {%- block document %} 161 |
162 | {%- if not embedded %}{% if not theme_nosidebar|tobool %} 163 |
164 | {%- endif %}{% endif %} 165 |
166 | {% block body %} {% endblock %} 167 |
168 | {%- if not embedded %}{% if not theme_nosidebar|tobool %} 169 |
170 | {%- endif %}{% endif %} 171 |
172 | {%- endblock %} 173 | 174 | {%- block sidebar2 %}{{ sidebar() }}{% endblock %} 175 |
176 |
177 | 178 | {%- block relbar2 %}{{ relbar() }}{% endblock %} 179 | 180 | {%- block footer %} 181 | 194 | {%- endblock %} 195 | 196 | 197 | -------------------------------------------------------------------------------- /docs/source/_theme/nature/static/nature.css_t: -------------------------------------------------------------------------------- 1 | /** 2 | * Sphinx stylesheet -- default theme 3 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4 | */ 5 | 6 | @import url("basic.css"); 7 | 8 | /* -- page layout ----------------------------------------------------------- */ 9 | 10 | body { 11 | font-family: Arial, sans-serif; 12 | font-size: 100%; 13 | background-color: #111; 14 | color: #555; 15 | margin: 0; 16 | padding: 0; 17 | } 18 | 19 | hr{ 20 | border: 1px solid #B1B4B6; 21 | } 22 | 23 | div.document { 24 | background-color: #eee; 25 | } 26 | 27 | div.body { 28 | background-color: #ffffff; 29 | color: #3E4349; 30 | padding: 0 30px 30px 30px; 31 | font-size: 0.8em; 32 | } 33 | 34 | div.footer { 35 | color: #555; 36 | width: 100%; 37 | padding: 13px 0; 38 | text-align: center; 39 | font-size: 75%; 40 | } 41 | 42 | div.footer a { 43 | color: #444; 44 | text-decoration: underline; 45 | } 46 | 47 | div.related { 48 | background-color: #6BA81E; 49 | line-height: 32px; 50 | color: #fff; 51 | text-shadow: 0px 1px 0 #444; 52 | font-size: 0.80em; 53 | } 54 | 55 | div.related a { 56 | color: #E2F3CC; 57 | } 58 | 59 | div.sphinxsidebar { 60 | font-size: 0.75em; 61 | line-height: 1.5em; 62 | } 63 | 64 | div.sphinxsidebarwrapper{ 65 | padding: 20px 0; 66 | } 67 | 68 | div.sphinxsidebar h3, 69 | div.sphinxsidebar h4 { 70 | font-family: Arial, sans-serif; 71 | color: #222; 72 | font-size: 1.2em; 73 | font-weight: normal; 74 | margin: 0; 75 | padding: 5px 10px; 76 | background-color: #ddd; 77 | text-shadow: 1px 1px 0 white 78 | } 79 | 80 | div.sphinxsidebar h4{ 81 | font-size: 1.1em; 82 | } 83 | 84 | div.sphinxsidebar h3 a { 85 | color: #444; 86 | } 87 | 88 | 89 | div.sphinxsidebar p { 90 | color: #888; 91 | padding: 5px 20px; 92 | } 93 | 94 | div.sphinxsidebar p.topless { 95 | } 96 | 97 | div.sphinxsidebar ul { 98 | margin: 10px 20px; 99 | padding: 0; 100 | color: #000; 101 | } 102 | 103 | div.sphinxsidebar a { 104 | color: #444; 105 | } 106 | 107 | div.sphinxsidebar input { 108 | border: 1px solid #ccc; 109 | font-family: sans-serif; 110 | font-size: 1em; 111 | } 112 | 113 | div.sphinxsidebar input[type=text]{ 114 | margin-left: 20px; 115 | } 116 | 117 | /* -- body styles ----------------------------------------------------------- */ 118 | 119 | a { 120 | color: #005B81; 121 | text-decoration: none; 122 | } 123 | 124 | a:hover { 125 | color: #E32E00; 126 | text-decoration: underline; 127 | } 128 | 129 | div.body h1, 130 | div.body h2, 131 | div.body h3, 132 | div.body h4, 133 | div.body h5, 134 | div.body h6 { 135 | font-family: Arial, sans-serif; 136 | background-color: #BED4EB; 137 | font-weight: normal; 138 | color: #212224; 139 | margin: 30px 0px 10px 0px; 140 | padding: 5px 0 5px 10px; 141 | text-shadow: 0px 1px 0 white 142 | } 143 | 144 | div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; } 145 | div.body h2 { font-size: 150%; background-color: #C8D5E3; } 146 | div.body h3 { font-size: 120%; background-color: #D8DEE3; } 147 | div.body h4 { font-size: 110%; background-color: #D8DEE3; } 148 | div.body h5 { font-size: 100%; background-color: #D8DEE3; } 149 | div.body h6 { font-size: 100%; background-color: #D8DEE3; } 150 | 151 | a.headerlink { 152 | color: #c60f0f; 153 | font-size: 0.8em; 154 | padding: 0 4px 0 4px; 155 | text-decoration: none; 156 | } 157 | 158 | a.headerlink:hover { 159 | background-color: #c60f0f; 160 | color: white; 161 | } 162 | 163 | div.body p, div.body dd, div.body li { 164 | text-align: justify; 165 | line-height: 1.5em; 166 | } 167 | 168 | div.admonition p.admonition-title + p { 169 | display: inline; 170 | } 171 | 172 | div.highlight{ 173 | background-color: white; 174 | } 175 | 176 | div.note { 177 | background-color: #eee; 178 | border: 1px solid #ccc; 179 | } 180 | 181 | div.seealso { 182 | background-color: #ffc; 183 | border: 1px solid #ff6; 184 | } 185 | 186 | div.topic { 187 | background-color: #eee; 188 | } 189 | 190 | div.warning { 191 | background-color: #ffe4e4; 192 | border: 1px solid #f66; 193 | } 194 | 195 | p.admonition-title { 196 | display: inline; 197 | } 198 | 199 | p.admonition-title:after { 200 | content: ":"; 201 | } 202 | 203 | pre { 204 | padding: 10px; 205 | background-color: White; 206 | color: #222; 207 | line-height: 1.2em; 208 | border: 1px solid #C6C9CB; 209 | font-size: 1.2em; 210 | margin: 1.5em 0 1.5em 0; 211 | -webkit-box-shadow: 1px 1px 1px #d8d8d8; 212 | -moz-box-shadow: 1px 1px 1px #d8d8d8; 213 | } 214 | 215 | tt { 216 | background-color: #ecf0f3; 217 | color: #222; 218 | padding: 1px 2px; 219 | font-size: 1.2em; 220 | font-family: monospace; 221 | } -------------------------------------------------------------------------------- /docs/source/_theme/nature/static/pygments.css: -------------------------------------------------------------------------------- 1 | .c { color: #999988; font-style: italic } /* Comment */ 2 | .k { font-weight: bold } /* Keyword */ 3 | .o { font-weight: bold } /* Operator */ 4 | .cm { color: #999988; font-style: italic } /* Comment.Multiline */ 5 | .cp { color: #999999; font-weight: bold } /* Comment.preproc */ 6 | .c1 { color: #999988; font-style: italic } /* Comment.Single */ 7 | .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ 8 | .ge { font-style: italic } /* Generic.Emph */ 9 | .gr { color: #aa0000 } /* Generic.Error */ 10 | .gh { color: #999999 } /* Generic.Heading */ 11 | .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ 12 | .go { color: #111 } /* Generic.Output */ 13 | .gp { color: #555555 } /* Generic.Prompt */ 14 | .gs { font-weight: bold } /* Generic.Strong */ 15 | .gu { color: #aaaaaa } /* Generic.Subheading */ 16 | .gt { color: #aa0000 } /* Generic.Traceback */ 17 | .kc { font-weight: bold } /* Keyword.Constant */ 18 | .kd { font-weight: bold } /* Keyword.Declaration */ 19 | .kp { font-weight: bold } /* Keyword.Pseudo */ 20 | .kr { font-weight: bold } /* Keyword.Reserved */ 21 | .kt { color: #445588; font-weight: bold } /* Keyword.Type */ 22 | .m { color: #009999 } /* Literal.Number */ 23 | .s { color: #bb8844 } /* Literal.String */ 24 | .na { color: #008080 } /* Name.Attribute */ 25 | .nb { color: #999999 } /* Name.Builtin */ 26 | .nc { color: #445588; font-weight: bold } /* Name.Class */ 27 | .no { color: #ff99ff } /* Name.Constant */ 28 | .ni { color: #800080 } /* Name.Entity */ 29 | .ne { color: #990000; font-weight: bold } /* Name.Exception */ 30 | .nf { color: #990000; font-weight: bold } /* Name.Function */ 31 | .nn { color: #555555 } /* Name.Namespace */ 32 | .nt { color: #000080 } /* Name.Tag */ 33 | .nv { color: purple } /* Name.Variable */ 34 | .ow { font-weight: bold } /* Operator.Word */ 35 | .mf { color: #009999 } /* Literal.Number.Float */ 36 | .mh { color: #009999 } /* Literal.Number.Hex */ 37 | .mi { color: #009999 } /* Literal.Number.Integer */ 38 | .mo { color: #009999 } /* Literal.Number.Oct */ 39 | .sb { color: #bb8844 } /* Literal.String.Backtick */ 40 | .sc { color: #bb8844 } /* Literal.String.Char */ 41 | .sd { color: #bb8844 } /* Literal.String.Doc */ 42 | .s2 { color: #bb8844 } /* Literal.String.Double */ 43 | .se { color: #bb8844 } /* Literal.String.Escape */ 44 | .sh { color: #bb8844 } /* Literal.String.Heredoc */ 45 | .si { color: #bb8844 } /* Literal.String.Interpol */ 46 | .sx { color: #bb8844 } /* Literal.String.Other */ 47 | .sr { color: #808000 } /* Literal.String.Regex */ 48 | .s1 { color: #bb8844 } /* Literal.String.Single */ 49 | .ss { color: #bb8844 } /* Literal.String.Symbol */ 50 | .bp { color: #999999 } /* Name.Builtin.Pseudo */ 51 | .vc { color: #ff99ff } /* Name.Variable.Class */ 52 | .vg { color: #ff99ff } /* Name.Variable.Global */ 53 | .vi { color: #ff99ff } /* Name.Variable.Instance */ 54 | .il { color: #009999 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/source/_theme/nature/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = nature.css 4 | pygments_style = tango 5 | 6 | [options] 7 | -------------------------------------------------------------------------------- /docs/source/class.rst: -------------------------------------------------------------------------------- 1 | .. module:: pyres 2 | 3 | ResQ Classes 4 | ========================================== 5 | 6 | .. autoclass:: pyres.ResQ 7 | :members: 8 | 9 | Job Classes 10 | ================= 11 | 12 | .. autoclass:: pyres.job.Job 13 | :members: 14 | 15 | Worker Classes 16 | ================= 17 | 18 | .. autoclass:: pyres.worker.Worker 19 | :members: 20 | 21 | Failure Classes 22 | ================= 23 | 24 | .. autoclass:: pyres.failure.base.BaseBackend 25 | :members: 26 | 27 | .. autoclass:: pyres.failure.RedisBackend 28 | :members: 29 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # PyRes documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Jan 6 16:25:18 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | sys.path.append(os.path.abspath(os.path.dirname(__file__+'/../../../'))) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # Add any Sphinx extension module names here, as strings. They can be extensions 24 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 25 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage'] 26 | 27 | # Add any paths that contain templates here, relative to this directory. 28 | templates_path = ['_templates'] 29 | 30 | # The suffix of source filenames. 31 | source_suffix = '.rst' 32 | 33 | # The encoding of source files. 34 | #source_encoding = 'utf-8' 35 | 36 | # The master toctree document. 37 | master_doc = 'index' 38 | 39 | # General information about the project. 40 | project = u'pyres' 41 | copyright = u'2012, Matt George' 42 | 43 | # The version info for the project you're documenting, acts as replacement for 44 | # |version| and |release|, also used in various other places throughout the 45 | # built documents. 46 | # 47 | # The short X.Y version. 48 | version = '1.3' 49 | # The full version, including alpha/beta/rc tags. 50 | release = '1.3' 51 | 52 | # The language for content autogenerated by Sphinx. Refer to documentation 53 | # for a list of supported languages. 54 | #language = None 55 | 56 | # There are two options for replacing |today|: either, you set today to some 57 | # non-false value, then it is used: 58 | #today = '' 59 | # Else, today_fmt is used as the format for a strftime call. 60 | #today_fmt = '%B %d, %Y' 61 | 62 | # List of documents that shouldn't be included in the build. 63 | #unused_docs = [] 64 | 65 | # List of directories, relative to source directory, that shouldn't be searched 66 | # for source files. 67 | exclude_trees = [] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. Major themes that come with 93 | # Sphinx are currently 'default' and 'sphinxdoc'. 94 | sys.path.append(os.path.abspath('_theme')) 95 | html_theme_path = ['_theme'] 96 | html_theme = 'flask' 97 | 98 | # Theme options are theme-specific and customize the look and feel of a theme 99 | # further. For a list of options available for each theme, see the 100 | # documentation. 101 | #html_theme_options = {} 102 | 103 | # Add any paths that contain custom themes here, relative to this directory. 104 | #html_theme_path = [] 105 | 106 | # The name for this set of Sphinx documents. If None, it defaults to 107 | # " v documentation". 108 | #html_title = None 109 | 110 | # A shorter title for the navigation bar. Default is the same as html_title. 111 | #html_short_title = None 112 | 113 | # The name of an image file (relative to this directory) to place at the top 114 | # of the sidebar. 115 | #html_logo = None 116 | 117 | # The name of an image file (within the static path) to use as favicon of the 118 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 119 | # pixels large. 120 | #html_favicon = None 121 | 122 | # Add any paths that contain custom static files (such as style sheets) here, 123 | # relative to this directory. They are copied after the builtin static files, 124 | # so a file named "default.css" will overwrite the builtin "default.css". 125 | html_static_path = ['_static'] 126 | 127 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 128 | # using the given strftime format. 129 | #html_last_updated_fmt = '%b %d, %Y' 130 | 131 | # If true, SmartyPants will be used to convert quotes and dashes to 132 | # typographically correct entities. 133 | #html_use_smartypants = True 134 | 135 | # Custom sidebar templates, maps document names to template names. 136 | #html_sidebars = {} 137 | 138 | # Additional templates that should be rendered to pages, maps page names to 139 | # template names. 140 | #html_additional_pages = {} 141 | 142 | # If false, no module index is generated. 143 | #html_use_modindex = True 144 | 145 | # If false, no index is generated. 146 | #html_use_index = True 147 | 148 | # If true, the index is split into individual pages for each letter. 149 | #html_split_index = False 150 | 151 | # If true, links to the reST sources are added to the pages. 152 | #html_show_sourcelink = True 153 | 154 | # If true, an OpenSearch description file will be output, and all pages will 155 | # contain a tag referring to it. The value of this option must be the 156 | # base URL from which the finished HTML is served. 157 | #html_use_opensearch = '' 158 | 159 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 160 | #html_file_suffix = '' 161 | 162 | # Output file base name for HTML help builder. 163 | htmlhelp_basename = 'pyresdoc' 164 | 165 | 166 | # -- Options for LaTeX output -------------------------------------------------- 167 | 168 | # The paper size ('letter' or 'a4'). 169 | #latex_paper_size = 'letter' 170 | 171 | # The font size ('10pt', '11pt' or '12pt'). 172 | #latex_font_size = '10pt' 173 | 174 | # Grouping the document tree into LaTeX files. List of tuples 175 | # (source start file, target name, title, author, documentclass [howto/manual]). 176 | latex_documents = [ 177 | ('index', 'pyres.tex', u'pyres Documentation', 178 | u'Matt George', 'manual'), 179 | ] 180 | 181 | # The name of an image file (relative to this directory) to place at the top of 182 | # the title page. 183 | #latex_logo = None 184 | 185 | # For "manual" documents, if this is true, then toplevel headings are parts, 186 | # not chapters. 187 | #latex_use_parts = False 188 | 189 | # Additional stuff for the LaTeX preamble. 190 | #latex_preamble = '' 191 | 192 | # Documents to append as an appendix to all manuals. 193 | #latex_appendices = [] 194 | 195 | # If false, no module index is generated. 196 | #latex_use_modindex = True 197 | 198 | -------------------------------------------------------------------------------- /docs/source/example.rst: -------------------------------------------------------------------------------- 1 | Example 2 | ========= 3 | 4 | Let's take a real world example of a blog where comments need to be checked for 5 | spam. When the comment is saved in the database, we create a job in the 6 | queue with that comment data. Let's take a django model in this case. 7 | 8 | .. code-block:: python 9 | :linenos: 10 | 11 | class Comment(models.Model): 12 | name = Model.CharField() 13 | email = Model.EmailField() 14 | body = Model.TextField() 15 | spam = Model.BooleanField() 16 | queue = "Spam" 17 | 18 | @staticmethod 19 | def perform(comment_id): 20 | comment = Comment.objects.get(pk=comment_id) 21 | params = {"comment_author_email": comment.user.email, 22 | "comment_content": comment.body, 23 | "comment_author_name": comment.user.name, 24 | "request_ip": comment.author_ip} 25 | x = urllib.urlopen("http://apikey.rest.akismet.com/1.1/comment-check", params) 26 | if x == "true": 27 | comment.spam = True 28 | else: 29 | comment.spam = False 30 | comment.save() 31 | 32 | You can convert your existing class to be compatible with pyres. All you need 33 | to do is add a :attr:`queue` attribute and define a :meth:`perform` method 34 | on the class. 35 | 36 | To insert a job into the queue you need to do something like this: 37 | 38 | .. code-block:: python 39 | 40 | >>> from pyres import ResQ 41 | >>> r = ResQ() 42 | >>> r.enqueue(Comment, 23) # Passing the comment id 23 43 | 44 | This puts a job into the queue **Spam**. Now we need to fire off our workers. 45 | In the **scripts** folder there is an executable:: 46 | 47 | $ ./pyres_worker Spam 48 | 49 | 50 | Just pass a comma separated list of queues the worker should poll. 51 | 52 | -------------------------------------------------------------------------------- /docs/source/failures.rst: -------------------------------------------------------------------------------- 1 | Failures 2 | =============== 3 | 4 | Pyres provides a ``BaseBackend`` for handling failed jobs. You can subclass 5 | this backend to store failed jobs in any system you like. 6 | 7 | Currently, the only provided backend is a ``RedisBackend`` which will store 8 | your failed jobs into a special *failed* queue for later processing or 9 | reenqueueing. 10 | 11 | Here's a simple example:: 12 | 13 | >>> from pyres import failure 14 | >>> from pyres.job import Job 15 | >>> from pyres import ResQ 16 | >>> r = ResQ() 17 | >>> job = Job.reserve('basic', r) 18 | >>> job.fail('problem') 19 | -------------------------------------------------------------------------------- /docs/source/horde.rst: -------------------------------------------------------------------------------- 1 | Prefork Manager 2 | =============== 3 | 4 | Sometimes the fork for every job method of processing can be a bit too slow and 5 | take up too many resources. Pyres provides an alternative to the pyres_worker through 6 | the pyres_manager script and the horde module. 7 | 8 | The pyres_manager script is very similar to the pyres_worker. However, instead 9 | of forking a child for every job, the manager takes advantage of the multiprocessing 10 | module in python 2.6 (backported to 2.5 as well) and forks off a pool of children 11 | at startup time. These children then query the redis queue and perform the necessary work. 12 | It is the managers job to manage the pool via signals or a command queue on the redis 13 | server. 14 | 15 | ex:: 16 | 17 | pyres_manager --pool=5 queue_one,queue_two 18 | 19 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. pyres documentation master file, created by 2 | sphinx-quickstart on Wed Jan 6 15:11:19 2010. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to pyres's documentation! 7 | ================================= 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | intro 15 | install 16 | example 17 | class 18 | tests 19 | failures 20 | horde 21 | 22 | 23 | Indices and tables 24 | ================== 25 | 26 | * :ref:`genindex` 27 | * :ref:`modindex` 28 | * :ref:`search` 29 | 30 | -------------------------------------------------------------------------------- /docs/source/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | =============== 3 | 4 | Pyres is most easily installed using pip and can be found on PyPI as pyres_. 5 | 6 | Using ``pip install pyres`` will install the required versions of the above packages/modules. 7 | Those requirements are currently: 8 | 9 | :: 10 | 11 | simplejson==2.0.9 12 | itty==0.6.4 13 | redis==1.34.1 14 | pystache==0.2.0 15 | 16 | If you'd rather install from the git repository, that's easy too:: 17 | 18 | $ git clone git://github.com/binarydud/pyres.git 19 | $ cd pyres 20 | $ python setup.py build 21 | $ python setup.py install 22 | 23 | Of course, you'll need to install the Redis server as well. Below is a simple example, but 24 | please read `Redis's own documentation`_ for more details. 25 | 26 | :: 27 | 28 | $ wget http://redis.googlecode.com/files/redis-1.2.2.tar.gz 29 | $ tar -xvf redis-1.2.2.tar.gz 30 | $ cd redis-1.2.2 31 | $ make 32 | $ ./redis-server 33 | 34 | This will install and start a Redis server with the default config running on port 6379. 35 | This default config is good enough for you to run the pyres tests. 36 | 37 | .. _pyres: http://pypi.python.org/pypi/pyres/ 38 | .. _Redis's own documentation: http://code.google.com/p/redis/wiki/index?tm=6 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /docs/source/intentions.rst: -------------------------------------------------------------------------------- 1 | comming soon 2 | -------------------------------------------------------------------------------- /docs/source/intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | Pyres is a resque_ clone built in python. Resque is used by Github as their 5 | message queue. Both use Redis_ as the queue backend and provide a web-based 6 | monitoring application. 7 | 8 | Read_ the blog post from github about how they use resque in production. 9 | 10 | :synopsis: Put jobs (which can be any kind of class) on a queue and process them while watching the progress via your browser. 11 | 12 | Read our :doc:`example implementation ` of how pyres can be used to spam check comments. 13 | 14 | 15 | .. _resque: http://github.com/defunkt/resque#readme 16 | .. _Read: http://github.com/blog/542-introducing-resque 17 | .. _Redis: http://code.google.com/p/redis/ 18 | -------------------------------------------------------------------------------- /docs/source/tests.rst: -------------------------------------------------------------------------------- 1 | Tests 2 | ======= 3 | 4 | Pyres comes with a test suite which connects to a local Redis server and 5 | creates a couple of *queues* and *jobs*. 6 | 7 | Make sure you have nose_ installed:: 8 | 9 | $ pip install nose 10 | 11 | Also make sure your Redis server is running:: 12 | 13 | $ cd path_to_redis_installation 14 | $ ./redis-server [PATH_TO_YOUR_REDIS_CONFIG] 15 | 16 | If you don't give the ``./redis-server`` command the config path, it will use a default config, which should run the tests just fine. 17 | 18 | Now, we're ready to run the tests. From the pyres install directory:: 19 | 20 | $ nosetests 21 | ............................................ 22 | ---------------------------------------------------------------------- 23 | Ran 44 tests in 0.901s 24 | 25 | OK 26 | 27 | Add **-v** flag if you want verbose output. 28 | 29 | .. _nose: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/ -------------------------------------------------------------------------------- /pyres/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '1.5' 2 | 3 | from redis import Redis 4 | from pyres.compat import string_types 5 | import pyres.json_parser as json 6 | 7 | import os 8 | import time, datetime 9 | import sys 10 | import logging 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | def special_log_file(filename): 15 | if filename in ("stderr", "stdout"): 16 | return True 17 | if filename.startswith("syslog"): 18 | return True 19 | return False 20 | 21 | def get_logging_handler(filename, procname, namespace=None): 22 | if namespace: 23 | message_format = namespace + ': %(message)s' 24 | else: 25 | message_format = '%(message)s' 26 | format = '%(asctime)s %(process)5d %(levelname)-8s ' + message_format 27 | 28 | if not filename: 29 | filename = "stderr" 30 | if filename == "stderr": 31 | handler = logging.StreamHandler(sys.stderr) 32 | elif filename == "stdout": 33 | handler = logging.StreamHandler(sys.stdout) 34 | elif filename.startswith("syslog"): # "syslog:local0" 35 | from logging.handlers import SysLogHandler 36 | facility_name = filename[7:] or 'user' 37 | facility = SysLogHandler.facility_names[facility_name] 38 | 39 | if os.path.exists("/dev/log"): 40 | syslog_path = "/dev/log" 41 | elif os.path.exists("/var/run/syslog"): 42 | syslog_path = "/var/run/syslog" 43 | else: 44 | raise Exception("Unable to figure out the syslog socket path") 45 | 46 | handler = SysLogHandler(syslog_path, facility) 47 | format = procname + "[%(process)d]: " + message_format 48 | else: 49 | try: 50 | from logging.handlers import WatchedFileHandler 51 | handler = WatchedFileHandler(filename) 52 | except: 53 | from logging.handlers import RotatingFileHandler 54 | handler = RotatingFileHandler(filename,maxBytes=52428800, 55 | backupCount=7) 56 | handler.setFormatter(logging.Formatter(format, '%Y-%m-%d %H:%M:%S')) 57 | return handler 58 | 59 | def setup_logging(procname, log_level=logging.INFO, filename=None): 60 | if log_level == logging.NOTSET: 61 | return 62 | main_package = __name__.split('.', 1)[0] if '.' in __name__ else __name__ 63 | logger = logging.getLogger(main_package) 64 | logger.setLevel(log_level) 65 | handler = get_logging_handler(filename, procname) 66 | logger.addHandler(handler) 67 | 68 | def setup_pidfile(path): 69 | if not path: 70 | return 71 | dirname = os.path.dirname(path) 72 | if dirname and not os.path.exists(dirname): 73 | os.makedirs(dirname) 74 | with open(path, 'w') as f: 75 | f.write(str(os.getpid())) 76 | 77 | def my_import(name): 78 | """Helper function for walking import calls when searching for classes by 79 | string names. 80 | """ 81 | mod = __import__(name) 82 | components = name.split('.') 83 | for comp in components[1:]: 84 | mod = getattr(mod, comp) 85 | return mod 86 | 87 | def safe_str_to_class(s): 88 | """Helper function to map string class names to module classes.""" 89 | lst = s.split(".") 90 | klass = lst[-1] 91 | mod_list = lst[:-1] 92 | module = ".".join(mod_list) 93 | 94 | # ruby compatibility kludge: resque sends just a class name and 95 | # not a module name so if I use resque to queue a ruby class 96 | # called "Worker" then pyres will throw a "ValueError: Empty 97 | # module name" exception. To avoid that, if there's no module in 98 | # the json then we'll use the classname as a module name. 99 | if not module: 100 | module = klass 101 | 102 | mod = my_import(module) 103 | if hasattr(mod, klass): 104 | return getattr(mod, klass) 105 | else: 106 | raise ImportError('') 107 | 108 | def str_to_class(s): 109 | """Alternate helper function to map string class names to module classes.""" 110 | lst = s.split(".") 111 | klass = lst[-1] 112 | mod_list = lst[:-1] 113 | module = ".".join(mod_list) 114 | try: 115 | mod = __import__(module) 116 | if hasattr(mod, klass): 117 | return getattr(mod, klass) 118 | else: 119 | return None 120 | except ImportError: 121 | return None 122 | 123 | class ResQ(object): 124 | """The ResQ class defines the Redis server object to which we will 125 | enqueue jobs into various queues. 126 | 127 | The ``__init__`` takes these keyword arguments: 128 | 129 | ``server`` -- IP address and port of the Redis server to which you want to connect, and optional Redis DB number. Default is `localhost:6379`. 130 | 131 | ``password`` -- The password, if required, of your Redis server. Default is "None". 132 | 133 | Example usage:: 134 | 135 | >>> from pyres import * 136 | >>> r = ResQ(server="192.168.1.10:6379", password="some_pwd") 137 | # Assuming redis is running on default port with no password 138 | 139 | **r** is a resque object on which we can enqueue tasks.:: 140 | 141 | >>>> r.enqueue(SomeClass, args) 142 | 143 | SomeClass can be any python class with a *perform* method and a *queue* 144 | attribute on it. 145 | 146 | """ 147 | def __init__(self, server="localhost:6379", password=None): 148 | self.password = password 149 | self.redis = server 150 | self._watched_queues = set() 151 | 152 | def push(self, queue, item): 153 | self.watch_queue(queue) 154 | self.redis.rpush("resque:queue:%s" % queue, ResQ.encode(item)) 155 | 156 | def pop(self, queues, timeout=10): 157 | if isinstance(queues, string_types): 158 | queues = [queues] 159 | ret = self.redis.blpop(["resque:queue:%s" % q for q in queues], 160 | timeout=timeout) 161 | if ret: 162 | key, ret = ret 163 | return key[13:].decode(), ResQ.decode(ret) # trim "resque:queue:" 164 | else: 165 | return None, None 166 | 167 | def size(self, queue): 168 | return int(self.redis.llen("resque:queue:%s" % queue)) 169 | 170 | def watch_queue(self, queue): 171 | if queue in self._watched_queues: 172 | return 173 | else: 174 | if self.redis.sadd('resque:queues',str(queue)): 175 | self._watched_queues.add(queue) 176 | 177 | def peek(self, queue, start=0, count=1): 178 | return self.list_range('resque:queue:%s' % queue, start, count) 179 | 180 | def list_range(self, key, start, count): 181 | items = self.redis.lrange(key, start,start+count-1) or [] 182 | ret_list = [] 183 | for i in items: 184 | ret_list.append(ResQ.decode(i)) 185 | return ret_list 186 | 187 | def _get_redis(self): 188 | return self._redis 189 | 190 | def _set_redis(self, server): 191 | if isinstance(server, string_types): 192 | self.dsn = server 193 | address, _, db = server.partition('/') 194 | host, port = address.split(':') 195 | self._redis = Redis(host=host, port=int(port), db=int(db or 0), password=self.password) 196 | self.host = host 197 | self.port = int(port) 198 | elif isinstance(server, Redis): 199 | if hasattr(server, "host"): 200 | self.host = server.host 201 | self.port = server.port 202 | else: 203 | connection = server.connection_pool.get_connection('_') 204 | self.host = connection.host 205 | self.port = connection.port 206 | self.dsn = '%s:%s' % (self.host, self.port) 207 | self._redis = server 208 | else: 209 | raise Exception("I don't know what to do with %s" % str(server)) 210 | redis = property(_get_redis, _set_redis) 211 | 212 | def enqueue(self, klass, *args): 213 | """Enqueue a job into a specific queue. Make sure the class you are 214 | passing has **queue** attribute and a **perform** method on it. 215 | 216 | """ 217 | queue = getattr(klass,'queue', None) 218 | if queue: 219 | class_name = '%s.%s' % (klass.__module__, klass.__name__) 220 | self.enqueue_from_string(class_name, queue, *args) 221 | else: 222 | logger.warning("unable to enqueue job with class %s" % str(klass)) 223 | 224 | def enqueue_from_string(self, klass_as_string, queue, *args, **kwargs): 225 | payload = {'class':klass_as_string, 'args':args, 'enqueue_timestamp': time.time()} 226 | if 'first_attempt' in kwargs: 227 | payload['first_attempt'] = kwargs['first_attempt'] 228 | self.push(queue, payload) 229 | logger.info("enqueued '%s' job on queue %s" % (klass_as_string, queue)) 230 | if args: 231 | logger.debug("job arguments: %s" % str(args)) 232 | else: 233 | logger.debug("no arguments passed in.") 234 | 235 | def queues(self): 236 | return [sm.decode() for sm in self.redis.smembers("resque:queues")] or [] 237 | 238 | def workers(self): 239 | return [w.decode() for w in self.redis.smembers("resque:workers")] or [] 240 | 241 | def info(self): 242 | """Returns a dictionary of the current status of the pending jobs, 243 | processed, no. of queues, no. of workers, no. of failed jobs. 244 | 245 | """ 246 | pending = 0 247 | for q in self.queues(): 248 | pending += self.size(q) 249 | return { 250 | 'pending' : pending, 251 | 'processed' : Stat('processed',self).get(), 252 | 'queues' : len(self.queues()), 253 | 'workers' : len(self.workers()), 254 | #'working' : len(self.working()), 255 | 'failed' : Stat('failed',self).get(), 256 | 'servers' : ['%s:%s' % (self.host, self.port)] 257 | } 258 | 259 | def keys(self): 260 | return [key.decode().replace('resque:','') 261 | for key in self.redis.keys('resque:*')] 262 | 263 | def reserve(self, queues): 264 | from pyres.job import Job 265 | return Job.reserve(queues, self) 266 | 267 | def __str__(self): 268 | return "PyRes Client connected to %s" % self.dsn 269 | 270 | def working(self): 271 | from pyres.worker import Worker 272 | return Worker.working(self) 273 | 274 | def remove_queue(self, queue): 275 | if queue in self._watched_queues: 276 | self._watched_queues.remove(queue) 277 | self.redis.srem('resque:queues',queue) 278 | del self.redis['resque:queue:%s' % queue] 279 | 280 | def close(self): 281 | """Close the underlying redis connection. 282 | 283 | """ 284 | self.redis.connection_pool.get_connection('_').disconnect() 285 | 286 | def enqueue_at(self, datetime, klass, *args, **kwargs): 287 | class_name = '%s.%s' % (klass.__module__, klass.__name__) 288 | self.enqueue_at_from_string(datetime, class_name, klass.queue, *args, **kwargs) 289 | 290 | def enqueue_at_from_string(self, datetime, klass_as_string, queue, *args, **kwargs): 291 | logger.info("scheduled '%s' job on queue %s for execution at %s" % 292 | (klass_as_string, queue, datetime)) 293 | if args: 294 | logger.debug("job arguments are: %s" % str(args)) 295 | payload = {'class': klass_as_string, 'queue': queue, 'args': args} 296 | if 'first_attempt' in kwargs: 297 | payload['first_attempt'] = kwargs['first_attempt'] 298 | self.delayed_push(datetime, payload) 299 | 300 | def delayed_push(self, datetime, item): 301 | key = int(time.mktime(datetime.timetuple())) 302 | self.redis.rpush('resque:delayed:%s' % key, ResQ.encode(item)) 303 | self.redis.zadd('resque:delayed_queue_schedule', key, key) 304 | 305 | def delayed_queue_peek(self, start, count): 306 | return [int(item) for item in self.redis.zrange( 307 | 'resque:delayed_queue_schedule', start, start+count) or []] 308 | 309 | def delayed_timestamp_peek(self, timestamp, start, count): 310 | return self.list_range('resque:delayed:%s' % timestamp, start, count) 311 | 312 | def delayed_queue_schedule_size(self): 313 | size = 0 314 | length = self.redis.zcard('resque:delayed_queue_schedule') 315 | for i in self.redis.zrange('resque:delayed_queue_schedule',0,length): 316 | size += self.delayed_timestamp_size(i.decode()) 317 | return size 318 | 319 | def delayed_timestamp_size(self, timestamp): 320 | #key = int(time.mktime(timestamp.timetuple())) 321 | return self.redis.llen("resque:delayed:%s" % timestamp) 322 | 323 | def next_delayed_timestamp(self): 324 | key = int(time.mktime(ResQ._current_time().timetuple())) 325 | array = self.redis.zrangebyscore('resque:delayed_queue_schedule', 326 | '-inf', key, start=0, num=1) 327 | timestamp = None 328 | if array: 329 | timestamp = array[0] 330 | 331 | if timestamp: 332 | return timestamp.decode() 333 | 334 | def next_item_for_timestamp(self, timestamp): 335 | #key = int(time.mktime(timestamp.timetuple())) 336 | key = "resque:delayed:%s" % timestamp 337 | ret = self.redis.lpop(key) 338 | item = None 339 | if ret: 340 | item = ResQ.decode(ret) 341 | if self.redis.llen(key) == 0: 342 | self.redis.delete(key) 343 | self.redis.zrem('resque:delayed_queue_schedule', timestamp) 344 | return item 345 | 346 | @classmethod 347 | def encode(cls, item): 348 | return json.dumps(item) 349 | 350 | @classmethod 351 | def decode(cls, item): 352 | if not isinstance(item, string_types): 353 | item = item.decode() 354 | ret = json.loads(item) 355 | return ret 356 | 357 | @classmethod 358 | def _enqueue(cls, klass, *args): 359 | queue = getattr(klass,'queue', None) 360 | _self = cls() 361 | if queue: 362 | class_name = '%s.%s' % (klass.__module__, klass.__name__) 363 | _self.push(queue, {'class':class_name,'args':args, 364 | 'enqueue_timestamp': time.time()}) 365 | 366 | @staticmethod 367 | def _current_time(): 368 | return datetime.datetime.now() 369 | 370 | 371 | class Stat(object): 372 | """A Stat class which shows the current status of the queue. 373 | 374 | """ 375 | def __init__(self, name, resq): 376 | self.name = name 377 | self.key = "resque:stat:%s" % self.name 378 | self.resq = resq 379 | 380 | def get(self): 381 | val = self.resq.redis.get(self.key) 382 | if val: 383 | return int(val) 384 | return 0 385 | 386 | def incr(self, ammount=1): 387 | self.resq.redis.incr(self.key, ammount) 388 | 389 | def decr(self, ammount=1): 390 | self.resq.redis.decr(self.key, ammount) 391 | 392 | def clear(self): 393 | self.resq.redis.delete(self.key) 394 | 395 | -------------------------------------------------------------------------------- /pyres/compat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import types 3 | 4 | try: 5 | import cPickle as pickle 6 | except ImportError: # pragma: no cover 7 | import pickle 8 | 9 | # True if we are running on Python 3. 10 | PY3 = sys.version_info[0] == 3 11 | 12 | if PY3: # pragma: no cover 13 | string_types = str, 14 | integer_types = int, 15 | class_types = type, 16 | text_type = str 17 | binary_type = bytes 18 | long = int 19 | import subprocess as commands 20 | 21 | else: 22 | string_types = basestring, 23 | integer_types = (int, long) 24 | class_types = (type, types.ClassType) 25 | text_type = unicode 26 | binary_type = str 27 | long = long 28 | import commands 29 | 30 | 31 | -------------------------------------------------------------------------------- /pyres/exceptions.py: -------------------------------------------------------------------------------- 1 | class NoQueueError(Exception): 2 | pass 3 | 4 | class JobError(RuntimeError): 5 | pass 6 | 7 | class TimeoutError(JobError): 8 | pass 9 | 10 | class CrashError(JobError): 11 | pass -------------------------------------------------------------------------------- /pyres/extensions.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | import time 4 | import signal 5 | 6 | try: 7 | import multiprocessing 8 | except: 9 | import sys 10 | sys.exit("multiprocessing was not available") 11 | 12 | from pyres import ResQ 13 | 14 | from pyres.exceptions import NoQueueError 15 | from pyres.worker import Worker 16 | 17 | class JuniorWorker(Worker): 18 | def work(self, interval=5): 19 | self.startup() 20 | while True: 21 | if self._shutdown: 22 | break 23 | job = self.reserve() 24 | if job: 25 | print "got: %s" % job 26 | 27 | self.child = os.fork() 28 | 29 | if self.child: 30 | print 'Forked %s at %s' % (self.child, 31 | datetime.datetime.now()) 32 | os.waitpid(self.child, 0) 33 | else: 34 | print 'Processing %s since %s' % (job._queue, 35 | datetime.datetime.now()) 36 | self.process(job) 37 | os._exit(0) 38 | self.child = None 39 | else: 40 | break 41 | 42 | self.unregister_worker() 43 | 44 | class Manager(object): 45 | def __init__(self, queues, host, max_children=10): 46 | self.queues = queues 47 | self._host = host 48 | self.max_children = max_children 49 | self._shutdown = False 50 | self.children = [] 51 | self.resq = ResQ(host) 52 | self.validate_queues() 53 | self.reports = {} 54 | 55 | def __str__(self): 56 | hostname = os.uname()[1] 57 | pid = os.getpid() 58 | return 'Manager:%s:%s:%s' % (hostname, pid, ','.join(self.queues)) 59 | 60 | def validate_queues(self): 61 | if not self.queues: 62 | raise NoQueueError("Please give each worker at least one queue.") 63 | 64 | def check_rising(self, queue, size): 65 | if queue in self.reports: 66 | new_time = time.time() 67 | old_size = self.reports[queue][0] 68 | old_time = self.reports[queue][1] 69 | if new_time > old_time + 5 and size > old_size + 20: 70 | return True 71 | else: 72 | self.reports[queue] = (size, time.time()) 73 | return False 74 | 75 | def work(self): 76 | self.startup() 77 | while True: 78 | if self._shutdown: 79 | break 80 | #check to see if stuff is still going 81 | for queue in self.queues: 82 | #check queue size 83 | size = self.resq.size(queue) 84 | 85 | if self.check_rising(queue, size): 86 | if len(self.children) < self.max_children: 87 | self.start_child(queue) 88 | 89 | def startup(self): 90 | self.register_manager() 91 | self.register_signals() 92 | 93 | def register_manager(self): 94 | self.resq.redis.sadd('managers', str(self)) 95 | 96 | def unregister_manager(self): 97 | self.resq.redis.srem('managers', str(self)) 98 | 99 | def register_signals(self): 100 | signal.signal(signal.SIGTERM, self.shutdown_all) 101 | signal.signal(signal.SIGINT, self.shutdown_all) 102 | signal.signal(signal.SIGQUIT, self.schedule_shutdown) 103 | signal.signal(signal.SIGUSR1, self.kill_children) 104 | 105 | def shutdown_all(self, signum, frame): 106 | self.schedule_shutdown(signum, frame) 107 | self.kill_children(signum, frame) 108 | 109 | def schedule_shutdown(self, signum, frame): 110 | self._shutdown = True 111 | 112 | def kill_children(self): 113 | for child in self.children: 114 | child.terminate() 115 | 116 | def start_child(self, queue): 117 | p = multiprocessing.Process(target=JuniorWorker.run, args=([queue], 118 | self._host)) 119 | self.children.append(p) 120 | p.start() 121 | return True 122 | 123 | @classmethod 124 | def run(cls, queues=(), host="localhost:6379"): 125 | manager = cls(queues, host) 126 | manager.work() 127 | -------------------------------------------------------------------------------- /pyres/failure/__init__.py: -------------------------------------------------------------------------------- 1 | from pyres.failure.redis import RedisBackend 2 | 3 | backend = RedisBackend 4 | 5 | def create(*args, **kwargs): 6 | return backend(*args, **kwargs) 7 | 8 | def count(resq): 9 | return backend.count(resq) 10 | 11 | def all(resq, start, count): 12 | return backend.all(resq, start, count) 13 | 14 | def clear(resq): 15 | return backend.clear(resq) 16 | 17 | def requeue(resq, failure_object): 18 | queue = failure_object._queue 19 | payload = failure_object._payload 20 | return resq.push(queue, payload) 21 | 22 | def retry(resq, queue, payload): 23 | job = resq.decode(payload) 24 | resq.push(queue, job['payload']) 25 | return delete(resq, payload) 26 | 27 | def delete(resq, payload): 28 | return resq.redis.lrem(name='resque:failed', num=1, value=payload) 29 | -------------------------------------------------------------------------------- /pyres/failure/base.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import traceback 3 | 4 | class BaseBackend(object): 5 | """Provides a base class that custom backends can subclass. Also provides basic 6 | traceback and message parsing. 7 | 8 | The ``__init__`` takes these keyword arguments: 9 | 10 | ``exp`` -- The exception generated by your failure. 11 | 12 | ``queue`` -- The queue in which the ``Job`` was enqueued when it failed. 13 | 14 | ``payload`` -- The payload that was passed to the ``Job``. 15 | 16 | ``worker`` -- The worker that was processing the ``Job`` when it failed. 17 | 18 | """ 19 | def __init__(self, exp, queue, payload, worker=None): 20 | excc = sys.exc_info()[0] 21 | 22 | self._exception = excc 23 | try: 24 | self._traceback = traceback.format_exc() 25 | except AttributeError: 26 | self._traceback = None 27 | 28 | self._worker = worker 29 | self._queue = queue 30 | self._payload = payload 31 | 32 | 33 | def _parse_traceback(self, trace): 34 | """Return the given traceback string formatted for a notification.""" 35 | if not trace: 36 | return [] 37 | 38 | return trace.split('\n') 39 | 40 | def _parse_message(self, exc): 41 | """Return a message for a notification from the given exception.""" 42 | return '%s: %s' % (exc.__class__.__name__, str(exc)) 43 | 44 | -------------------------------------------------------------------------------- /pyres/failure/mail.py: -------------------------------------------------------------------------------- 1 | import smtplib 2 | 3 | from textwrap import dedent 4 | from email.mime.text import MIMEText 5 | 6 | from base import BaseBackend 7 | 8 | class MailBackend(BaseBackend): 9 | """Extends ``BaseBackend`` to provide support for emailing failures. 10 | Intended to be used with the MultipleBackend: 11 | 12 | from pyres import failure 13 | 14 | from pyres.failure.mail import MailBackend 15 | from pyres.failure.multiple import MultipleBackend 16 | from pyres.failure.redis import RedisBackend 17 | 18 | class EmailFailure(MailBackend): 19 | subject = 'Pyres Failure on {queue}' 20 | from_user = 'My Email User ' 21 | recipients = ['Me '] 22 | 23 | smtp_host = 'mail.mydomain.tld' 24 | smtp_port = 25 25 | smtp_tls = True 26 | 27 | smtp_user = 'mailuser' 28 | smtp_password = 'm41lp455w0rd' 29 | 30 | failure.backend = MultipleBackend 31 | failure.backend.classes = [RedisBackend, EmailFailure] 32 | 33 | 34 | Additional notes: 35 | - The following tokens are available in subject: queue, worker, exception 36 | 37 | - Override the create_message method to provide an alternate body. It 38 | should return one of the message types from email.mime.* 39 | """ 40 | subject = 'Pyres Failure on {queue}' 41 | 42 | recipients = [] 43 | from_user = None 44 | smtp_host = None 45 | smtp_port = 25 46 | 47 | smtp_tls = False 48 | 49 | smtp_user = None 50 | smtp_password = None 51 | 52 | def save(self, resq=None): 53 | if not self.recipients or not self.smtp_host or not self.from_user: 54 | return 55 | 56 | message = self.create_message() 57 | subject = self.format_subject() 58 | 59 | message['Subject'] = subject 60 | message['From'] = self.from_user 61 | message['To'] = ", ".join(self.recipients) 62 | 63 | self.send_message(message) 64 | 65 | def format_subject(self): 66 | return self.subject.format(queue=self._queue, 67 | worker=self._worker, 68 | exception=self._exception) 69 | 70 | def create_message(self): 71 | """Returns a message body to send in this email. Should be from email.mime.*""" 72 | 73 | body = dedent("""\ 74 | Received exception {exception} on {queue} from worker {worker}: 75 | 76 | {traceback} 77 | 78 | Payload: 79 | {payload} 80 | 81 | """).format(exception=self._exception, 82 | traceback=self._traceback, 83 | queue=self._queue, 84 | payload=self._payload, 85 | worker=self._worker) 86 | 87 | return MIMEText(body) 88 | 89 | def send_message(self, message): 90 | smtp = smtplib.SMTP(self.smtp_host, self.smtp_port) 91 | 92 | try: 93 | smtp.ehlo() 94 | 95 | if self.smtp_tls: 96 | smtp.starttls() 97 | 98 | if self.smtp_user: 99 | smtp.login(self.smtp_user, self.smtp_password) 100 | 101 | smtp.sendmail(self.from_user, self.recipients, message.as_string()) 102 | finally: 103 | smtp.close() 104 | -------------------------------------------------------------------------------- /pyres/failure/multiple.py: -------------------------------------------------------------------------------- 1 | from pyres.failure.base import BaseBackend 2 | from pyres.failure.redis import RedisBackend 3 | 4 | class MultipleBackend(BaseBackend): 5 | """Extends ``BaseBackend`` to provide support for delegating calls to multiple 6 | backends. Queries are delegated to the first backend in the list. Defaults to 7 | only the RedisBackend. 8 | 9 | To use: 10 | 11 | from pyres import failure 12 | 13 | from pyres.failure.base import BaseBackend 14 | from pyres.failure.multiple import MultipleBackend 15 | from pyres.failure.redis import RedisBackend 16 | 17 | class CustomBackend(BaseBackend): 18 | def save(self, resq): 19 | print('Custom backend') 20 | 21 | failure.backend = MultipleBackend 22 | failure.backend.classes = [RedisBackend, CustomBackend] 23 | """ 24 | classes = [] 25 | 26 | def __init__(self, *args): 27 | if not self.classes: 28 | self.classes = [RedisBackend] 29 | 30 | self.backends = [klass(*args) for klass in self.classes] 31 | BaseBackend.__init__(self, *args) 32 | 33 | @classmethod 34 | def count(cls, resq): 35 | first = MultipleBackend.classes[0] 36 | return first.count(resq) 37 | 38 | @classmethod 39 | def all(cls, resq, start=0, count=1): 40 | first = MultipleBackend.classes[0] 41 | return first.all(resq, start, count) 42 | 43 | @classmethod 44 | def clear(cls, resq): 45 | first = MultipleBackend.classes[0] 46 | return first.clear(resq) 47 | 48 | def save(self, resq=None): 49 | map(lambda x: x.save(resq), self.backends) 50 | -------------------------------------------------------------------------------- /pyres/failure/redis.py: -------------------------------------------------------------------------------- 1 | import datetime, time 2 | from base64 import b64encode 3 | 4 | from .base import BaseBackend 5 | from pyres import ResQ 6 | 7 | class RedisBackend(BaseBackend): 8 | """Extends the ``BaseBackend`` to provide a Redis backend for failed jobs.""" 9 | 10 | def save(self, resq=None): 11 | """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info.""" 12 | if not resq: 13 | resq = ResQ() 14 | data = { 15 | 'failed_at' : datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), 16 | 'payload' : self._payload, 17 | 'exception' : self._exception.__class__.__name__, 18 | 'error' : self._parse_message(self._exception), 19 | 'backtrace' : self._parse_traceback(self._traceback), 20 | 'queue' : self._queue 21 | } 22 | if self._worker: 23 | data['worker'] = self._worker 24 | data = ResQ.encode(data) 25 | resq.redis.rpush('resque:failed', data) 26 | 27 | @classmethod 28 | def count(cls, resq): 29 | return int(resq.redis.llen('resque:failed')) 30 | 31 | @classmethod 32 | def all(cls, resq, start=0, count=1): 33 | items = resq.redis.lrange('resque:failed', start, count) or [] 34 | 35 | ret_list = [] 36 | for i in items: 37 | failure = ResQ.decode(i) 38 | failure['redis_value'] = b64encode(i) 39 | ret_list.append(failure) 40 | return ret_list 41 | 42 | @classmethod 43 | def clear(cls, resq): 44 | return resq.redis.delete('resque:failed') 45 | 46 | -------------------------------------------------------------------------------- /pyres/horde.py: -------------------------------------------------------------------------------- 1 | import sys 2 | try: 3 | import multiprocessing 4 | except: 5 | sys.exit("multiprocessing was not available") 6 | 7 | import time, os, signal 8 | import datetime 9 | import logging 10 | import logging.handlers 11 | from pyres import ResQ, Stat, get_logging_handler, special_log_file 12 | from pyres.exceptions import NoQueueError 13 | try: 14 | from collections import OrderedDict 15 | except ImportError: 16 | from ordereddict import OrderedDict 17 | from pyres.job import Job 18 | from pyres.compat import string_types 19 | import pyres.json_parser as json 20 | try: 21 | from setproctitle import setproctitle 22 | except: 23 | def setproctitle(name): 24 | pass 25 | 26 | def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None): 27 | 28 | logger = multiprocessing.get_logger() 29 | #logger = multiprocessing.log_to_stderr() 30 | logger.setLevel(log_level) 31 | handler = get_logging_handler(log_file, procname, namespace) 32 | logger.addHandler(handler) 33 | return logger 34 | 35 | class Minion(multiprocessing.Process): 36 | def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None, interval=5, concat_logs=False, 37 | max_jobs=0): 38 | multiprocessing.Process.__init__(self, name='Minion') 39 | 40 | #format = '%(asctime)s %(levelname)s %(filename)s-%(lineno)d: %(message)s' 41 | #logHandler = logging.StreamHandler() 42 | #logHandler.setFormatter(logging.Formatter(format)) 43 | #self.logger = multiprocessing.get_logger() 44 | #self.logger.addHandler(logHandler) 45 | #self.logger.setLevel(logging.DEBUG) 46 | 47 | self.queues = queues 48 | self._shutdown = False 49 | self.hostname = os.uname()[1] 50 | self.server = server 51 | self.password = password 52 | self.interval = interval 53 | 54 | self.log_level = log_level 55 | self.log_path = log_path 56 | self.log_file = None 57 | self.concat_logs = concat_logs 58 | self.max_jobs = max_jobs 59 | 60 | def prune_dead_workers(self): 61 | pass 62 | 63 | def schedule_shutdown(self, signum, frame): 64 | self._shutdown = True 65 | 66 | def register_signal_handlers(self): 67 | signal.signal(signal.SIGTERM, self.schedule_shutdown) 68 | signal.signal(signal.SIGINT, self.schedule_shutdown) 69 | signal.signal(signal.SIGQUIT, self.schedule_shutdown) 70 | 71 | def register_minion(self): 72 | self.resq.redis.sadd('resque:minions',str(self)) 73 | self.started = datetime.datetime.now() 74 | 75 | def startup(self): 76 | self.register_signal_handlers() 77 | self.prune_dead_workers() 78 | self.register_minion() 79 | 80 | def __str__(self): 81 | return '%s:%s:%s' % (self.hostname, self.pid, ','.join(self.queues)) 82 | 83 | def reserve(self): 84 | self.logger.debug('checking queues: %s' % self.queues) 85 | job = Job.reserve(self.queues, self.resq, self.__str__()) 86 | if job: 87 | self.logger.info('Found job on %s' % job._queue) 88 | return job 89 | 90 | def process(self, job): 91 | if not job: 92 | return 93 | try: 94 | self.working_on(job) 95 | job.perform() 96 | except Exception as e: 97 | exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() 98 | self.logger.error("%s failed: %s" % (job, e)) 99 | job.fail(exceptionTraceback) 100 | self.failed() 101 | else: 102 | self.logger.debug("Hells yeah") 103 | self.logger.info('completed job: %s' % job) 104 | finally: 105 | self.done_working() 106 | 107 | def working_on(self, job): 108 | setproctitle('pyres_minion:%s: working on job: %s' % (os.getppid(), job._payload)) 109 | self.logger.debug('marking as working on') 110 | data = { 111 | 'queue': job._queue, 112 | 'run_at': int(time.mktime(datetime.datetime.now().timetuple())), 113 | 'payload': job._payload 114 | } 115 | data = json.dumps(data) 116 | self.resq.redis["resque:minion:%s" % str(self)] = data 117 | self.logger.debug("minion:%s" % str(self)) 118 | #self.logger.debug(self.resq.redis["resque:minion:%s" % str(self)]) 119 | 120 | def failed(self): 121 | Stat("failed", self.resq).incr() 122 | 123 | def processed(self): 124 | total_processed = Stat("processed", self.resq) 125 | total_processed.incr() 126 | 127 | def done_working(self): 128 | self.logger.debug('done working') 129 | self.processed() 130 | self.resq.redis.delete("resque:minion:%s" % str(self)) 131 | 132 | def unregister_minion(self): 133 | self.resq.redis.srem('resque:minions',str(self)) 134 | self.started = None 135 | 136 | def work(self, interval=5): 137 | 138 | self.startup() 139 | cur_job = 0 140 | while True: 141 | setproctitle('pyres_minion:%s: waiting for job on: %s' % (os.getppid(),self.queues)) 142 | self.logger.info('waiting on job') 143 | if self._shutdown: 144 | self.logger.info('shutdown scheduled') 145 | break 146 | self.logger.debug('max_jobs: %d cur_jobs: %d' % (self.max_jobs, cur_job)) 147 | if (self.max_jobs > 0 and self.max_jobs < cur_job): 148 | self.logger.debug('max_jobs reached on %s: %d' % (self.pid, cur_job)) 149 | self.logger.debug('minion sleeping for: %d secs' % interval) 150 | time.sleep(interval) 151 | cur_job = 0 152 | job = self.reserve() 153 | if job: 154 | self.process(job) 155 | cur_job = cur_job + 1 156 | else: 157 | cur_job = 0 158 | self.logger.debug('minion sleeping for: %d secs' % interval) 159 | time.sleep(interval) 160 | self.unregister_minion() 161 | 162 | def clear_logger(self): 163 | for handler in self.logger.handlers: 164 | self.logger.removeHandler(handler) 165 | 166 | def run(self): 167 | setproctitle('pyres_minion:%s: Starting' % (os.getppid(),)) 168 | if self.log_path: 169 | if special_log_file(self.log_path): 170 | self.log_file = self.log_path 171 | elif self.concat_logs: 172 | self.log_file = os.path.join(self.log_path, 'minion.log') 173 | else: 174 | self.log_file = os.path.join(self.log_path, 'minion-%s.log' % self.pid) 175 | namespace = 'minion:%s' % self.pid 176 | self.logger = setup_logging('minion', namespace, self.log_level, self.log_file) 177 | #self.clear_logger() 178 | if isinstance(self.server,string_types): 179 | self.resq = ResQ(server=self.server, password=self.password) 180 | elif isinstance(self.server, ResQ): 181 | self.resq = self.server 182 | else: 183 | raise Exception("Bad server argument") 184 | 185 | 186 | self.work(self.interval) 187 | #while True: 188 | # job = self.q.get() 189 | # print 'pid: %s is running %s ' % (self.pid,job) 190 | 191 | 192 | class Khan(object): 193 | _command_map = { 194 | 'ADD': 'add_minion', 195 | 'REMOVE': '_remove_minion', 196 | 'SHUTDOWN': '_schedule_shutdown' 197 | } 198 | def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, 199 | log_file=None, minions_interval=5, concat_minions_logs=False, max_jobs=0): 200 | #super(Khan,self).__init__(queues=queues,server=server,password=password) 201 | self._shutdown = False 202 | self.pool_size = int(pool_size) 203 | self.queues = queues 204 | self.server = server 205 | self.password = password 206 | self.pid = os.getpid() 207 | self.validate_queues() 208 | self._workers = OrderedDict() 209 | self.server = server 210 | self.password = password 211 | self.logging_level = logging_level 212 | self.log_file = log_file 213 | self.minions_interval = minions_interval 214 | self.concat_minions_logs = concat_minions_logs 215 | self.max_jobs = max_jobs 216 | 217 | #self._workers = list() 218 | 219 | def setup_resq(self): 220 | if hasattr(self,'logger'): 221 | self.logger.info('Connecting to redis server - %s' % self.server) 222 | if isinstance(self.server,string_types): 223 | self.resq = ResQ(server=self.server, password=self.password) 224 | elif isinstance(self.server, ResQ): 225 | self.resq = self.server 226 | else: 227 | raise Exception("Bad server argument") 228 | 229 | def validate_queues(self): 230 | "Checks if a worker is given atleast one queue to work on." 231 | if not self.queues: 232 | raise NoQueueError("Please give each worker at least one queue.") 233 | 234 | def startup(self): 235 | self.register_signal_handlers() 236 | 237 | 238 | def register_signal_handlers(self): 239 | signal.signal(signal.SIGTERM, self.schedule_shutdown) 240 | signal.signal(signal.SIGINT, self.schedule_shutdown) 241 | signal.signal(signal.SIGQUIT, self.schedule_shutdown) 242 | signal.signal(signal.SIGUSR1, self.kill_child) 243 | signal.signal(signal.SIGUSR2, self.add_child) 244 | if hasattr(signal, 'SIGINFO'): 245 | signal.signal(signal.SIGINFO, self.current_state) 246 | 247 | def current_state(self): 248 | tmap = {} 249 | main_thread = None 250 | import traceback 251 | from cStringIO import StringIO 252 | # get a map of threads by their ID so we can print their names 253 | # during the traceback dump 254 | for t in threading.enumerate(): 255 | if getattr(t, "ident", None): 256 | tmap[t.ident] = t 257 | else: 258 | main_thread = t 259 | 260 | out = StringIO() 261 | sep = "=" * 49 + "\n" 262 | for tid, frame in sys._current_frames().iteritems(): 263 | thread = tmap.get(tid, main_thread) 264 | if not thread: 265 | # skip old junk (left-overs from a fork) 266 | continue 267 | out.write("%s\n" % (thread.getName(), )) 268 | out.write(sep) 269 | traceback.print_stack(frame, file=out) 270 | out.write(sep) 271 | out.write("LOCAL VARIABLES\n") 272 | out.write(sep) 273 | pprint(frame.f_locals, stream=out) 274 | out.write("\n\n") 275 | self.logger.info(out.getvalue()) 276 | 277 | def _schedule_shutdown(self): 278 | self.schedule_shutdown(None, None) 279 | 280 | def schedule_shutdown(self, signum, frame): 281 | self.logger.info('Khan Shutdown scheduled') 282 | self._shutdown = True 283 | 284 | def kill_child(self, signum, frame): 285 | self._remove_minion() 286 | 287 | def add_child(self, signum, frame): 288 | self.add_minion() 289 | 290 | def register_khan(self): 291 | if not hasattr(self, 'resq'): 292 | self.setup_resq() 293 | self.resq.redis.sadd('resque:khans',str(self)) 294 | self.started = datetime.datetime.now() 295 | 296 | def _check_commands(self): 297 | if not self._shutdown: 298 | self.logger.debug('Checking commands') 299 | command = self.resq.redis.lpop('resque:khan:%s' % str(self)) 300 | self.logger.debug('COMMAND FOUND: %s ' % command) 301 | if command: 302 | self.process_command(command) 303 | self._check_commands() 304 | 305 | def process_command(self, command): 306 | self.logger.info('Processing Command') 307 | #available commands, shutdown, add 1, remove 1 308 | command_item = self._command_map.get(command, None) 309 | if command_item: 310 | fn = getattr(self, command_item) 311 | if fn: 312 | fn() 313 | 314 | def add_minion(self): 315 | self._add_minion() 316 | self.resq.redis.srem('resque:khans',str(self)) 317 | self.pool_size += 1 318 | self.resq.redis.sadd('resque:khans',str(self)) 319 | 320 | def _add_minion(self): 321 | if hasattr(self,'logger'): 322 | self.logger.info('Adding minion') 323 | if self.log_file: 324 | if special_log_file(self.log_file): 325 | log_path = self.log_file 326 | else: 327 | log_path = os.path.dirname(self.log_file) 328 | else: 329 | log_path = None 330 | m = Minion(self.queues, self.server, self.password, interval=self.minions_interval, 331 | log_level=self.logging_level, log_path=log_path, concat_logs=self.concat_minions_logs, 332 | max_jobs=self.max_jobs) 333 | m.start() 334 | self._workers[m.pid] = m 335 | if hasattr(self,'logger'): 336 | self.logger.info('minion added at: %s' % m.pid) 337 | return m 338 | 339 | def _shutdown_minions(self): 340 | """ 341 | send the SIGNINT signal to each worker in the pool. 342 | """ 343 | setproctitle('pyres_manager: Waiting on children to shutdown.') 344 | for minion in self._workers.values(): 345 | minion.terminate() 346 | minion.join() 347 | 348 | def _remove_minion(self, pid=None): 349 | #if pid: 350 | # m = self._workers.pop(pid) 351 | pid, m = self._workers.popitem(False) 352 | m.terminate() 353 | self.resq.redis.srem('resque:khans',str(self)) 354 | self.pool_size -= 1 355 | self.resq.redis.sadd('resque:khans',str(self)) 356 | return m 357 | 358 | def unregister_khan(self): 359 | if hasattr(self,'logger'): 360 | self.logger.debug('unregistering khan') 361 | self.resq.redis.srem('resque:khans',str(self)) 362 | self.started = None 363 | 364 | def setup_minions(self): 365 | for i in range(self.pool_size): 366 | self._add_minion() 367 | 368 | def _setup_logging(self): 369 | self.logger = setup_logging('khan', 'khan', self.logging_level, self.log_file) 370 | 371 | def work(self, interval=2): 372 | setproctitle('pyres_manager: Starting') 373 | self.startup() 374 | self.setup_minions() 375 | self._setup_logging() 376 | self.logger.info('Running as pid: %s' % self.pid) 377 | self.logger.info('Added %s child processes' % self.pool_size) 378 | self.logger.info('Setting up pyres connection') 379 | self.setup_resq() 380 | self.register_khan() 381 | setproctitle('pyres_manager: running %s' % self.queues) 382 | while True: 383 | self._check_commands() 384 | if self._shutdown: 385 | #send signals to each child 386 | self._shutdown_minions() 387 | break 388 | #get job 389 | else: 390 | self.logger.debug('manager sleeping for: %d secs' % interval) 391 | time.sleep(interval) 392 | self.unregister_khan() 393 | 394 | def __str__(self): 395 | hostname = os.uname()[1] 396 | return '%s:%s:%s' % (hostname, self.pid, self.pool_size) 397 | 398 | @classmethod 399 | def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2, 400 | logging_level=logging.INFO, log_file=None, minions_interval=5, concat_minions_logs=False, max_jobs=0): 401 | worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, 402 | log_file=log_file, minions_interval=minions_interval, concat_minions_logs=concat_minions_logs, 403 | max_jobs=max_jobs) 404 | worker.work(interval=interval) 405 | 406 | #if __name__ == "__main__": 407 | # k = Khan() 408 | # k.run() 409 | 410 | if __name__ == "__main__": 411 | from optparse import OptionParser 412 | parser = OptionParser(usage="%prog [options] queue list") 413 | parser.add_option("-s", dest="server", default="localhost:6379") 414 | (options,args) = parser.parse_args() 415 | if len(args) < 1: 416 | parser.print_help() 417 | parser.error("Please give the horde at least one queue.") 418 | Khan.run(pool_size=2, queues=args, server=options.server) 419 | #khan.run() 420 | #Worker.run(queues, options.server) 421 | -------------------------------------------------------------------------------- /pyres/job.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from datetime import timedelta 4 | from pyres import ResQ, safe_str_to_class 5 | from pyres import failure 6 | from pyres.failure.redis import RedisBackend 7 | from pyres.compat import string_types 8 | 9 | class Job(object): 10 | """Every job on the ResQ is an instance of the *Job* class. 11 | 12 | The ``__init__`` takes these keyword arguments: 13 | 14 | ``queue`` -- A string defining the queue to which this Job will be 15 | added. 16 | 17 | ``payload`` -- A dictionary which contains the string name of a class 18 | which extends this Job and a list of args which will be 19 | passed to that class. 20 | 21 | ``resq`` -- An instance of the ResQ class. 22 | 23 | ``worker`` -- The name of a specific worker if you'd like this Job to be 24 | done by that worker. Default is "None". 25 | 26 | """ 27 | 28 | safe_str_to_class = staticmethod(safe_str_to_class) 29 | 30 | def __init__(self, queue, payload, resq, worker=None): 31 | self._queue = queue 32 | self._payload = payload 33 | self.resq = resq 34 | self._worker = worker 35 | 36 | self.enqueue_timestamp = self._payload.get("enqueue_timestamp") 37 | 38 | # Set the default back end, jobs can override when we import them 39 | # inside perform(). 40 | failure.backend = RedisBackend 41 | 42 | def __str__(self): 43 | return "(Job{%s} | %s | %s)" % ( 44 | self._queue, self._payload['class'], repr(self._payload['args'])) 45 | 46 | def perform(self): 47 | """This method converts payload into args and calls the ``perform`` 48 | method on the payload class. 49 | 50 | Before calling ``perform``, a ``before_perform`` class method 51 | is called, if it exists. It takes a dictionary as an argument; 52 | currently the only things stored on the dictionary are the 53 | args passed into ``perform`` and a timestamp of when the job 54 | was enqueued. 55 | 56 | Similarly, an ``after_perform`` class method is called after 57 | ``perform`` is finished. The metadata dictionary contains the 58 | same data, plus a timestamp of when the job was performed, a 59 | ``failed`` boolean value, and if it did fail, a ``retried`` 60 | boolean value. This method is called after retry, and is 61 | called regardless of whether an exception is ultimately thrown 62 | by the perform method. 63 | 64 | 65 | """ 66 | payload_class_str = self._payload["class"] 67 | payload_class = self.safe_str_to_class(payload_class_str) 68 | payload_class.resq = self.resq 69 | args = self._payload.get("args") 70 | 71 | metadata = dict(args=args) 72 | if self.enqueue_timestamp: 73 | metadata["enqueue_timestamp"] = self.enqueue_timestamp 74 | 75 | before_perform = getattr(payload_class, "before_perform", None) 76 | 77 | metadata["failed"] = False 78 | metadata["perform_timestamp"] = time.time() 79 | check_after = True 80 | try: 81 | if before_perform: 82 | payload_class.before_perform(metadata) 83 | return payload_class.perform(*args) 84 | except Exception as e: 85 | metadata["failed"] = True 86 | metadata["exception"] = e 87 | if not self.retry(payload_class, args): 88 | metadata["retried"] = False 89 | raise 90 | else: 91 | metadata["retried"] = True 92 | logging.exception("Retry scheduled after error in %s", self._payload) 93 | finally: 94 | after_perform = getattr(payload_class, "after_perform", None) 95 | 96 | if after_perform: 97 | payload_class.after_perform(metadata) 98 | 99 | delattr(payload_class,'resq') 100 | 101 | def fail(self, exception): 102 | """This method provides a way to fail a job and will use whatever 103 | failure backend you've provided. The default is the ``RedisBackend``. 104 | 105 | """ 106 | fail = failure.create(exception, self._queue, self._payload, 107 | self._worker) 108 | fail.save(self.resq) 109 | return fail 110 | 111 | def retry(self, payload_class, args): 112 | """This method provides a way to retry a job after a failure. 113 | If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres 114 | will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class. 115 | 116 | """ 117 | retry_every = getattr(payload_class, 'retry_every', None) 118 | retry_timeout = getattr(payload_class, 'retry_timeout', 0) 119 | 120 | if retry_every: 121 | now = ResQ._current_time() 122 | first_attempt = self._payload.get("first_attempt", now) 123 | retry_until = first_attempt + timedelta(seconds=retry_timeout) 124 | retry_at = now + timedelta(seconds=retry_every) 125 | if retry_at < retry_until: 126 | self.resq.enqueue_at(retry_at, payload_class, *args, 127 | **{'first_attempt':first_attempt}) 128 | return True 129 | return False 130 | 131 | @classmethod 132 | def reserve(cls, queues, res, worker=None, timeout=10): 133 | """Reserve a job on one of the queues. This marks this job so 134 | that other workers will not pick it up. 135 | 136 | """ 137 | if isinstance(queues, string_types): 138 | queues = [queues] 139 | queue, payload = res.pop(queues, timeout=timeout) 140 | if payload: 141 | return cls(queue, payload, res, worker) 142 | -------------------------------------------------------------------------------- /pyres/json_parser.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from pyres.compat import string_types 3 | 4 | try: 5 | #import simplejson as json 6 | import json 7 | except ImportError: 8 | import simplejson as json 9 | 10 | DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' 11 | DATE_PREFIX = '@D:' 12 | 13 | class CustomJSONEncoder(json.JSONEncoder): 14 | 15 | def default(self, o): 16 | if isinstance(o, datetime): 17 | return o.strftime(DATE_PREFIX + DATE_FORMAT) 18 | return json.JSONEncoder.default(self, o) 19 | 20 | 21 | class CustomJSONDecoder(json.JSONDecoder): 22 | 23 | def decode(self, json_string): 24 | decoded = json.loads(json_string) 25 | return self.convert(decoded) 26 | 27 | def convert(self, value): 28 | if isinstance(value, string_types) and value.startswith(DATE_PREFIX): 29 | try: 30 | return datetime.strptime(value[len(DATE_PREFIX):], DATE_FORMAT) 31 | except ValueError: 32 | return value 33 | elif isinstance(value, dict): 34 | for k, v in value.items(): 35 | new = self.convert(v) 36 | if new != v: 37 | value[k] = new 38 | elif isinstance(value, list): 39 | for k, v in enumerate(value): 40 | new = self.convert(v) 41 | if new != v: 42 | value[k] = new 43 | return value 44 | 45 | 46 | def dumps(values): 47 | return json.dumps(values, cls=CustomJSONEncoder) 48 | 49 | 50 | def loads(string): 51 | return json.loads(string, cls=CustomJSONDecoder) 52 | -------------------------------------------------------------------------------- /pyres/scheduler.py: -------------------------------------------------------------------------------- 1 | import signal 2 | import time 3 | import logging 4 | 5 | from pyres import ResQ, __version__ 6 | from pyres.compat import string_types 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | class Scheduler(object): 11 | 12 | def __init__(self, server="localhost:6379", password=None): 13 | """ 14 | >>> from pyres.scheduler import Scheduler 15 | >>> scheduler = Scheduler('localhost:6379') 16 | """ 17 | self._shutdown = False 18 | if isinstance(server, string_types): 19 | self.resq = ResQ(server=server, password=password) 20 | elif isinstance(server, ResQ): 21 | self.resq = server 22 | else: 23 | raise Exception("Bad server argument") 24 | 25 | def register_signal_handlers(self): 26 | logger.info('registering signals') 27 | signal.signal(signal.SIGTERM, self.schedule_shutdown) 28 | signal.signal(signal.SIGINT, self.schedule_shutdown) 29 | signal.signal(signal.SIGQUIT, self.schedule_shutdown) 30 | 31 | def schedule_shutdown(self, signal, frame): 32 | logger.info('shutting down started') 33 | self._shutdown = True 34 | 35 | def __call__(self): 36 | _setproctitle("Starting") 37 | logger.info('starting up') 38 | self.register_signal_handlers() 39 | #self.load_schedule() 40 | logger.info('looking for delayed items') 41 | while True: 42 | if self._shutdown: 43 | break 44 | self.handle_delayed_items() 45 | _setproctitle("Waiting") 46 | logger.debug('sleeping') 47 | time.sleep(5) 48 | logger.info('shutting down complete') 49 | 50 | def next_timestamp(self): 51 | while True: 52 | timestamp = self.resq.next_delayed_timestamp() 53 | if timestamp: 54 | yield timestamp 55 | else: 56 | break 57 | 58 | 59 | def next_item(self, timestamp): 60 | while True: 61 | item = self.resq.next_item_for_timestamp(timestamp) 62 | if item: 63 | yield item 64 | else: 65 | break 66 | 67 | def handle_delayed_items(self): 68 | for timestamp in self.next_timestamp(): 69 | _setproctitle('Handling timestamp %s' % timestamp) 70 | logger.debug('handling timestamp: %s' % timestamp) 71 | for item in self.next_item(timestamp): 72 | logger.debug('queueing item %s' % item) 73 | klass = item['class'] 74 | queue = item['queue'] 75 | args = item['args'] 76 | kwargs = {} 77 | if 'first_attempt' in item: 78 | kwargs['first_attempt'] = item['first_attempt'] 79 | self.resq.enqueue_from_string(klass, queue, *args, **kwargs) 80 | 81 | 82 | @classmethod 83 | def run(cls, server, password=None): 84 | sched = cls(server=server, password=password) 85 | sched() 86 | 87 | 88 | try: 89 | from setproctitle import setproctitle 90 | except ImportError: 91 | def setproctitle(name): 92 | pass 93 | 94 | def _setproctitle(msg): 95 | setproctitle("pyres_scheduler-%s: %s" % (__version__, msg)) 96 | -------------------------------------------------------------------------------- /pyres/scripts.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from optparse import OptionParser 4 | 5 | from pyres.horde import Khan 6 | from pyres import setup_logging, setup_pidfile 7 | from pyres.scheduler import Scheduler 8 | from pyres.worker import Worker 9 | 10 | 11 | def pyres_manager(): 12 | usage = "usage: %prog [options] arg1" 13 | parser = OptionParser(usage=usage) 14 | #parser.add_option("-q", dest="queue_list") 15 | parser.add_option("--host", dest="host", default="localhost") 16 | parser.add_option("--port", dest="port",type="int", default=6379) 17 | parser.add_option("--password", dest="password", default=None) 18 | parser.add_option("-i", '--interval', dest='manager_interval', default=None, help='the default time interval to sleep between runs - manager') 19 | parser.add_option("--minions_interval", dest='minions_interval', default=None, help='the default time interval to sleep between runs - minions') 20 | parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') 21 | parser.add_option("--pool", type="int", dest="pool_size", default=1, help="Number of minions to spawn under the manager.") 22 | parser.add_option("-j", "--process_max_jobs", dest="max_jobs", type=int, default=0, help='how many jobs should be processed on worker run.') 23 | parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') 24 | parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') 25 | parser.add_option("--concat_minions_logs", action="store_true", dest="concat_minions_logs", help='Concat all minions logs on same file.') 26 | (options,args) = parser.parse_args() 27 | 28 | if len(args) != 1: 29 | parser.print_help() 30 | parser.error("Argument must be a comma seperated list of queues") 31 | 32 | log_level = getattr(logging, options.log_level.upper(), 'INFO') 33 | #logging.basicConfig(level=log_level, format="%(asctime)s: %(levelname)s: %(message)s") 34 | concat_minions_logs = options.concat_minions_logs 35 | setup_pidfile(options.pidfile) 36 | 37 | manager_interval = options.manager_interval 38 | if manager_interval is not None: 39 | manager_interval = float(manager_interval) 40 | 41 | minions_interval = options.minions_interval 42 | if minions_interval is not None: 43 | minions_interval = float(minions_interval) 44 | 45 | queues = args[0].split(',') 46 | server = '%s:%s' % (options.host,options.port) 47 | password = options.password 48 | Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=manager_interval, 49 | logging_level=log_level, log_file=options.logfile, minions_interval=minions_interval, 50 | concat_minions_logs=concat_minions_logs, max_jobs=options.max_jobs) 51 | 52 | 53 | def pyres_scheduler(): 54 | usage = "usage: %prog [options] arg1" 55 | parser = OptionParser(usage=usage) 56 | #parser.add_option("-q", dest="queue_list") 57 | parser.add_option("--host", dest="host", default="localhost") 58 | parser.add_option("--port", dest="port",type="int", default=6379) 59 | parser.add_option("--password", dest="password", default=None) 60 | parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') 61 | parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') 62 | parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') 63 | (options,args) = parser.parse_args() 64 | log_level = getattr(logging, options.log_level.upper(),'INFO') 65 | #logging.basicConfig(level=log_level, format="%(module)s: %(asctime)s: %(levelname)s: %(message)s") 66 | setup_logging(procname="pyres_scheduler", log_level=log_level, filename=options.logfile) 67 | setup_pidfile(options.pidfile) 68 | server = '%s:%s' % (options.host, options.port) 69 | password = options.password 70 | Scheduler.run(server, password) 71 | 72 | 73 | def pyres_worker(): 74 | usage = "usage: %prog [options] arg1" 75 | parser = OptionParser(usage=usage) 76 | 77 | parser.add_option("--host", dest="host", default="localhost") 78 | parser.add_option("--port", dest="port",type="int", default=6379) 79 | parser.add_option("--password", dest="password", default=None) 80 | parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') 81 | parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') 82 | parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') 83 | parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') 84 | parser.add_option("-t", '--timeout', dest='timeout', default=None, help='the timeout in seconds for this worker') 85 | (options,args) = parser.parse_args() 86 | 87 | if len(args) != 1: 88 | parser.print_help() 89 | parser.error("Argument must be a comma seperated list of queues") 90 | 91 | log_level = getattr(logging, options.log_level.upper(), 'INFO') 92 | setup_logging(procname="pyres_worker", log_level=log_level, filename=options.logfile) 93 | setup_pidfile(options.pidfile) 94 | 95 | interval = options.interval 96 | if interval is not None: 97 | interval = int(interval) 98 | 99 | timeout = options.timeout and int(options.timeout) 100 | 101 | queues = args[0].split(',') 102 | server = '%s:%s' % (options.host,options.port) 103 | password = options.password 104 | Worker.run(queues, server, password, interval, timeout=timeout) 105 | -------------------------------------------------------------------------------- /pyres/worker.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import signal 3 | import datetime, time 4 | import os, sys 5 | from pyres import json_parser as json 6 | from pyres.compat import commands 7 | import random 8 | 9 | from pyres.exceptions import NoQueueError, JobError, TimeoutError, CrashError 10 | from pyres.job import Job 11 | from pyres import ResQ, Stat, __version__ 12 | from pyres.compat import string_types 13 | 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | class Worker(object): 18 | """Defines a worker. The ``pyres_worker`` script instantiates this Worker 19 | class and passes a comma-separated list of queues to listen on.:: 20 | 21 | >>> from pyres.worker import Worker 22 | >>> Worker.run([queue1, queue2], server="localhost:6379/0") 23 | 24 | """ 25 | 26 | job_class = Job 27 | 28 | def __init__(self, queues=(), server="localhost:6379", password=None, timeout=None): 29 | self.queues = queues 30 | self.validate_queues() 31 | self._shutdown = False 32 | self.child = None 33 | self.pid = os.getpid() 34 | self.hostname = os.uname()[1] 35 | self.timeout = timeout 36 | 37 | if isinstance(server, string_types): 38 | self.resq = ResQ(server=server, password=password) 39 | elif isinstance(server, ResQ): 40 | self.resq = server 41 | else: 42 | raise Exception("Bad server argument") 43 | 44 | def validate_queues(self): 45 | """Checks if a worker is given at least one queue to work on.""" 46 | if not self.queues: 47 | raise NoQueueError("Please give each worker at least one queue.") 48 | 49 | def register_worker(self): 50 | self.resq.redis.sadd('resque:workers', str(self)) 51 | #self.resq._redis.add("worker:#{self}:started", Time.now.to_s) 52 | self.started = datetime.datetime.now() 53 | 54 | def _set_started(self, dt): 55 | if dt: 56 | key = int(time.mktime(dt.timetuple())) 57 | self.resq.redis.set("resque:worker:%s:started" % self, key) 58 | else: 59 | self.resq.redis.delete("resque:worker:%s:started" % self) 60 | 61 | def _get_started(self): 62 | datestring = self.resq.redis.get("resque:worker:%s:started" % self) 63 | #ds = None 64 | #if datestring: 65 | # ds = datetime.datetime.strptime(datestring, '%Y-%m-%d %H:%M:%S') 66 | return datestring 67 | 68 | started = property(_get_started, _set_started) 69 | 70 | def unregister_worker(self): 71 | self.resq.redis.srem('resque:workers', str(self)) 72 | self.started = None 73 | Stat("processed:%s" % self, self.resq).clear() 74 | Stat("failed:%s" % self, self.resq).clear() 75 | 76 | def prune_dead_workers(self): 77 | all_workers = Worker.all(self.resq) 78 | known_workers = Worker.worker_pids() 79 | for worker in all_workers: 80 | host, pid, queues = worker.id.split(':') 81 | if host != self.hostname: 82 | continue 83 | if pid in known_workers: 84 | continue 85 | logger.warning("pruning dead worker: %s" % worker) 86 | worker.unregister_worker() 87 | 88 | def startup(self): 89 | self.register_signal_handlers() 90 | self.prune_dead_workers() 91 | self.register_worker() 92 | 93 | def register_signal_handlers(self): 94 | signal.signal(signal.SIGTERM, self.shutdown_all) 95 | signal.signal(signal.SIGINT, self.shutdown_all) 96 | signal.signal(signal.SIGQUIT, self.schedule_shutdown) 97 | signal.signal(signal.SIGUSR1, self.kill_child) 98 | 99 | def shutdown_all(self, signum, frame): 100 | self.schedule_shutdown(signum, frame) 101 | self.kill_child(signum, frame) 102 | 103 | def schedule_shutdown(self, signum, frame): 104 | self._shutdown = True 105 | 106 | def kill_child(self, signum, frame): 107 | if self.child: 108 | logger.info("Killing child at %s" % self.child) 109 | os.kill(self.child, signal.SIGKILL) 110 | 111 | def __str__(self): 112 | if getattr(self,'id', None): 113 | return self.id 114 | return '%s:%s:%s' % (self.hostname, self.pid, ','.join(self.queues)) 115 | 116 | def _setproctitle(self, msg): 117 | setproctitle("pyres_worker-%s [%s]: %s" % (__version__, 118 | ','.join(self.queues), 119 | msg)) 120 | 121 | def work(self, interval=5): 122 | """Invoked by ``run`` method. ``work`` listens on a list of queues and sleeps 123 | for ``interval`` time. 124 | 125 | ``interval`` -- Number of seconds the worker will wait until processing the next job. Default is "5". 126 | 127 | Whenever a worker finds a job on the queue it first calls ``reserve`` on 128 | that job to make sure another worker won't run it, then *forks* itself to 129 | work on that job. 130 | 131 | """ 132 | self._setproctitle("Starting") 133 | logger.info("starting") 134 | self.startup() 135 | 136 | while True: 137 | if self._shutdown: 138 | logger.info('shutdown scheduled') 139 | break 140 | 141 | self.register_worker() 142 | 143 | job = self.reserve(interval) 144 | 145 | if job: 146 | self.fork_worker(job) 147 | else: 148 | if interval == 0: 149 | break 150 | #procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}" 151 | self._setproctitle("Waiting") 152 | #time.sleep(interval) 153 | self.unregister_worker() 154 | 155 | def fork_worker(self, job): 156 | """Invoked by ``work`` method. ``fork_worker`` does the actual forking to create the child 157 | process that will process the job. It's also responsible for monitoring the child process 158 | and handling hangs and crashes. 159 | 160 | Finally, the ``process`` method actually processes the job by eventually calling the Job 161 | instance's ``perform`` method. 162 | 163 | """ 164 | logger.debug('picked up job') 165 | logger.debug('job details: %s' % job) 166 | self.before_fork(job) 167 | self.child = os.fork() 168 | if self.child: 169 | self._setproctitle("Forked %s at %s" % 170 | (self.child, 171 | datetime.datetime.now())) 172 | logger.info('Forked %s at %s' % (self.child, 173 | datetime.datetime.now())) 174 | 175 | try: 176 | start = datetime.datetime.now() 177 | 178 | # waits for the result or times out 179 | while True: 180 | pid, status = os.waitpid(self.child, os.WNOHANG) 181 | if pid != 0: 182 | if os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0: 183 | break 184 | if os.WIFSTOPPED(status): 185 | logger.warning("Process stopped by signal %d" % os.WSTOPSIG(status)) 186 | else: 187 | if os.WIFSIGNALED(status): 188 | raise CrashError("Unexpected exit by signal %d" % os.WTERMSIG(status)) 189 | raise CrashError("Unexpected exit status %d" % os.WEXITSTATUS(status)) 190 | 191 | time.sleep(0.5) 192 | 193 | now = datetime.datetime.now() 194 | if self.timeout and ((now - start).seconds > self.timeout): 195 | os.kill(self.child, signal.SIGKILL) 196 | os.waitpid(-1, os.WNOHANG) 197 | raise TimeoutError("Timed out after %d seconds" % self.timeout) 198 | 199 | except OSError as ose: 200 | import errno 201 | 202 | if ose.errno != errno.EINTR: 203 | raise ose 204 | except JobError: 205 | self._handle_job_exception(job) 206 | finally: 207 | # If the child process' job called os._exit manually we need to 208 | # finish the clean up here. 209 | if self.job(): 210 | self.done_working(job) 211 | 212 | logger.debug('done waiting') 213 | else: 214 | self._setproctitle("Processing %s since %s" % 215 | (job, 216 | datetime.datetime.now())) 217 | logger.info('Processing %s since %s' % 218 | (job, datetime.datetime.now())) 219 | self.after_fork(job) 220 | 221 | # re-seed the Python PRNG after forking, otherwise 222 | # all job process will share the same sequence of 223 | # random numbers 224 | random.seed() 225 | 226 | self.process(job) 227 | os._exit(0) 228 | self.child = None 229 | 230 | def before_fork(self, job): 231 | """ 232 | hook for making changes immediately before forking to process 233 | a job 234 | """ 235 | pass 236 | 237 | def after_fork(self, job): 238 | """ 239 | hook for making changes immediately after forking to process a 240 | job 241 | """ 242 | pass 243 | 244 | def before_process(self, job): 245 | return job 246 | 247 | def process(self, job=None): 248 | if not job: 249 | job = self.reserve() 250 | 251 | job_failed = False 252 | try: 253 | try: 254 | self.working_on(job) 255 | job = self.before_process(job) 256 | return job.perform() 257 | except Exception: 258 | job_failed = True 259 | self._handle_job_exception(job) 260 | except SystemExit as e: 261 | if e.code != 0: 262 | job_failed = True 263 | self._handle_job_exception(job) 264 | 265 | if not job_failed: 266 | logger.debug('completed job') 267 | logger.debug('job details: %s' % job) 268 | finally: 269 | self.done_working(job) 270 | 271 | def _handle_job_exception(self, job): 272 | exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() 273 | logger.exception("%s failed: %s" % (job, exceptionValue)) 274 | job.fail(exceptionTraceback) 275 | self.failed() 276 | 277 | def reserve(self, timeout=10): 278 | logger.debug('checking queues %s' % self.queues) 279 | job = self.job_class.reserve(self.queues, self.resq, self.__str__(), timeout=timeout) 280 | if job: 281 | logger.info('Found job on %s: %s' % (job._queue, job)) 282 | return job 283 | 284 | def working_on(self, job): 285 | logger.debug('marking as working on') 286 | data = { 287 | 'queue': job._queue, 288 | 'run_at': str(int(time.mktime(datetime.datetime.now().timetuple()))), 289 | 'payload': job._payload 290 | } 291 | data = json.dumps(data) 292 | self.resq.redis["resque:worker:%s" % str(self)] = data 293 | logger.debug("worker:%s" % str(self)) 294 | logger.debug(self.resq.redis["resque:worker:%s" % str(self)]) 295 | 296 | def done_working(self, job): 297 | logger.debug('done working on %s', job) 298 | self.processed() 299 | self.resq.redis.delete("resque:worker:%s" % str(self)) 300 | 301 | def processed(self): 302 | total_processed = Stat("processed", self.resq) 303 | worker_processed = Stat("processed:%s" % str(self), self.resq) 304 | total_processed.incr() 305 | worker_processed.incr() 306 | 307 | def get_processed(self): 308 | return Stat("processed:%s" % str(self), self.resq).get() 309 | 310 | def failed(self): 311 | Stat("failed", self.resq).incr() 312 | Stat("failed:%s" % self, self.resq).incr() 313 | 314 | def get_failed(self): 315 | return Stat("failed:%s" % self, self.resq).get() 316 | 317 | def job(self): 318 | data = self.resq.redis.get("resque:worker:%s" % self) 319 | if data: 320 | return ResQ.decode(data) 321 | return {} 322 | 323 | 324 | def processing(self): 325 | return self.job() 326 | 327 | def state(self): 328 | if self.resq.redis.exists('resque:worker:%s' % self): 329 | return 'working' 330 | return 'idle' 331 | 332 | @classmethod 333 | def worker_pids(cls): 334 | """Returns an array of all pids (as strings) of the workers on 335 | this machine. Used when pruning dead workers.""" 336 | cmd = "ps -A -o pid,command | grep pyres_worker | grep -v grep" 337 | output = commands.getoutput(cmd) 338 | if output: 339 | return map(lambda l: l.strip().split(' ')[0], output.split("\n")) 340 | else: 341 | return [] 342 | 343 | @classmethod 344 | def run(cls, queues, server="localhost:6379", password=None, interval=None, timeout=None): 345 | worker = cls(queues=queues, server=server, password=password, timeout=timeout) 346 | if interval is not None: 347 | worker.work(interval) 348 | else: 349 | worker.work() 350 | 351 | @classmethod 352 | def all(cls, host="localhost:6379"): 353 | if isinstance(host,string_types): 354 | resq = ResQ(host) 355 | elif isinstance(host, ResQ): 356 | resq = host 357 | 358 | return [Worker.find(w,resq) for w in resq.workers() or []] 359 | 360 | @classmethod 361 | def working(cls, host): 362 | if isinstance(host, string_types): 363 | resq = ResQ(host) 364 | elif isinstance(host, ResQ): 365 | resq = host 366 | total = [] 367 | for key in Worker.all(host): 368 | total.append('resque:worker:%s' % key) 369 | names = [] 370 | for key in total: 371 | value = resq.redis.get(key) 372 | if value: 373 | w = Worker.find(key[14:], resq) #resque:worker: 374 | names.append(w) 375 | return names 376 | 377 | @classmethod 378 | def find(cls, worker_id, resq): 379 | if Worker.exists(worker_id, resq): 380 | queues = worker_id.split(':')[-1].split(',') 381 | worker = cls(queues,resq) 382 | worker.id = worker_id 383 | return worker 384 | else: 385 | return None 386 | 387 | @classmethod 388 | def exists(cls, worker_id, resq): 389 | return resq.redis.sismember('resque:workers', worker_id) 390 | 391 | 392 | try: 393 | from setproctitle import setproctitle 394 | except ImportError: 395 | def setproctitle(name): 396 | pass 397 | 398 | 399 | if __name__ == "__main__": 400 | from optparse import OptionParser 401 | parser = OptionParser() 402 | parser.add_option("-q", dest="queue_list") 403 | parser.add_option("-s", dest="server", default="localhost:6379") 404 | (options,args) = parser.parse_args() 405 | if not options.queue_list: 406 | parser.print_help() 407 | parser.error("Please give each worker at least one queue.") 408 | queues = options.queue_list.split(',') 409 | Worker.run(queues, options.server) 410 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | nose==1.1.2 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | simplejson>3.0 2 | redis>2.4.12 3 | setproctitle>1.0 4 | -------------------------------------------------------------------------------- /roadmap.md: -------------------------------------------------------------------------------- 1 | pyres todo and roadmap 2 | 3 | 1.3 4 | === 5 | * resweb moved into own package 6 | 7 | 2.0 8 | === 9 | * move from duck typed class to a decorated function for jobs 10 | * add better hooks, like retools 11 | 12 | 2.1 13 | === 14 | * add namespace support 15 | * cleanup workers/extensions 16 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from setuptools import setup, find_packages 3 | from setuptools.command.test import test as TestCommand 4 | 5 | requires=[ 6 | item for item in 7 | open("requirements.txt").read().split("\n") 8 | if item] 9 | 10 | if sys.version_info[0:2] == (2,6): 11 | requires.append('ordereddict') 12 | 13 | class PyTest(TestCommand): 14 | def finalize_options(self): 15 | TestCommand.finalize_options(self) 16 | self.test_args = [] 17 | self.test_suite = True 18 | 19 | def run_tests(self): 20 | #import here, cause outside the eggs aren't loaded 21 | import pytest 22 | result = pytest.main(self.test_args) 23 | sys.exit(result) 24 | 25 | version='1.5' 26 | setup( 27 | name='pyres', 28 | version=version, 29 | description='Python resque clone', 30 | author='Matt George', 31 | author_email='mgeorge@gmail.com', 32 | maintainer='Matt George', 33 | license='MIT', 34 | url='http://github.com/binarydud/pyres', 35 | packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), 36 | download_url='http://pypi.python.org/packages/source/p/pyres/pyres-%s.tar.gz' % version, 37 | include_package_data=True, 38 | package_data={'': ['requirements.txt']}, 39 | entry_points = """\ 40 | [console_scripts] 41 | pyres_manager=pyres.scripts:pyres_manager 42 | pyres_scheduler=pyres.scripts:pyres_scheduler 43 | pyres_worker=pyres.scripts:pyres_worker 44 | """, 45 | tests_require=requires + ['pytest',], 46 | cmdclass={'test': PyTest}, 47 | install_requires=requires, 48 | classifiers = [ 49 | 'Development Status :: 4 - Beta', 50 | 'Environment :: Console', 51 | 'Intended Audience :: Developers', 52 | 'License :: OSI Approved :: MIT License', 53 | 'Operating System :: OS Independent', 54 | 'Programming Language :: Python :: 2.6', 55 | 'Programming Language :: Python :: 2.7', 56 | 'Programming Language :: Python :: 3.3', 57 | 'Programming Language :: Python'], 58 | ) 59 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from pyres import ResQ, str_to_class 4 | 5 | class tests(object): 6 | queue = 'basic' 7 | 8 | @staticmethod 9 | def perform(name): 10 | s = "name:%s" % name 11 | return s 12 | 13 | class Basic(object): 14 | queue = 'basic' 15 | 16 | @staticmethod 17 | def perform(name): 18 | s = "name:%s" % name 19 | return s 20 | 21 | class BasicMulti(object): 22 | queue = 'basic' 23 | @staticmethod 24 | def perform(name, age): 25 | print('name: %s, age: %s' % (name, age)) 26 | 27 | 28 | class ReturnAllArgsJob(object): 29 | queue = 'basic' 30 | 31 | @staticmethod 32 | def perform(*args): 33 | return args 34 | 35 | 36 | class RetryOnExceptionJob(object): 37 | queue = 'basic' 38 | retry_every = 5 39 | retry_timeout = 15 40 | 41 | @staticmethod 42 | def perform(fail_until): 43 | if ResQ._current_time() < fail_until: 44 | raise Exception("Don't blame me! I'm supposed to fail!") 45 | else: 46 | return True 47 | 48 | class TimeoutJob(object): 49 | queue = 'basic' 50 | 51 | @staticmethod 52 | def perform(wait_for): 53 | import time 54 | time.sleep(wait_for) 55 | return "Done Sleeping" 56 | 57 | class CrashJob(object): 58 | queue = 'basic' 59 | 60 | @staticmethod 61 | def perform(): 62 | # Dangerous, this will cause a hard crash of the python process 63 | import ctypes 64 | ctypes.string_at(1) 65 | return "Never got here" 66 | 67 | class PrematureExitJob(object): 68 | queue = 'basic' 69 | 70 | @staticmethod 71 | def perform(exit_code): 72 | import sys 73 | sys.exit(exit_code) 74 | return "Never got here" 75 | 76 | class PrematureHardExitJob(object): 77 | queue = 'basic' 78 | 79 | @staticmethod 80 | def perform(exit_code): 81 | os._exit(exit_code) 82 | return "Never got here" 83 | 84 | class TestProcess(object): 85 | queue = 'high' 86 | 87 | @staticmethod 88 | def perform(): 89 | import time 90 | time.sleep(.5) 91 | return 'Done Sleeping' 92 | 93 | 94 | class ErrorObject(object): 95 | queue = 'basic' 96 | 97 | @staticmethod 98 | def perform(): 99 | raise Exception("Could not finish job") 100 | 101 | class LongObject(object): 102 | queue = 'long_runnning' 103 | 104 | @staticmethod 105 | def perform(sleep_time): 106 | import time 107 | time.sleep(sleep_time) 108 | print('Done Sleeping') 109 | 110 | def test_str_to_class(): 111 | ret = str_to_class('tests.Basic') 112 | assert ret 113 | assert ret == Basic 114 | assert str_to_class('hello.World') == None 115 | 116 | class ImportTest(unittest.TestCase): 117 | def test_safe_str_to_class(self): 118 | from pyres import safe_str_to_class 119 | assert safe_str_to_class('tests.Basic') == Basic 120 | self.assertRaises(ImportError, safe_str_to_class, 'test.Mine') 121 | self.assertRaises(ImportError, safe_str_to_class, 'tests.World') 122 | # test that we'll use the class name as a module name if no 123 | # module name is provided (for Ruby compatibility) 124 | assert safe_str_to_class('tests') == tests 125 | 126 | class PyResTests(unittest.TestCase): 127 | def setUp(self): 128 | self.resq = ResQ() 129 | self.redis = self.resq.redis 130 | self.redis.flushall() 131 | 132 | def tearDown(self): 133 | self.redis.flushall() 134 | del self.redis 135 | del self.resq 136 | -------------------------------------------------------------------------------- /tests/test_failure.py: -------------------------------------------------------------------------------- 1 | from tests import PyResTests, Basic 2 | from pyres import failure 3 | from pyres.job import Job 4 | 5 | class FailureTests(PyResTests): 6 | def setUp(self): 7 | PyResTests.setUp(self) 8 | self.queue_name = 'basic' 9 | self.job_class = Basic 10 | 11 | def test_count(self): 12 | self.resq.enqueue(self.job_class,"test1") 13 | job = Job.reserve(self.queue_name,self.resq) 14 | job.fail("problem") 15 | assert failure.count(self.resq) == 1 16 | assert self.redis.llen('resque:failed') == 1 17 | 18 | def test_create(self): 19 | self.resq.enqueue(self.job_class,"test1") 20 | job = Job.reserve(self.queue_name,self.resq) 21 | e = Exception('test') 22 | fail = failure.create(e, self.queue_name, job._payload) 23 | assert isinstance(fail._payload, dict) 24 | fail.save(self.resq) 25 | assert failure.count(self.resq) == 1 26 | assert self.redis.llen('resque:failed') == 1 27 | 28 | def test_all(self): 29 | self.resq.enqueue(self.job_class,"test1") 30 | job = Job.reserve(self.queue_name,self.resq) 31 | e = Exception('problem') 32 | job.fail(e) 33 | assert len(failure.all(self.resq, 0, 20)) == 1 34 | 35 | def test_clear(self): 36 | self.resq.enqueue(self.job_class,"test1") 37 | job = Job.reserve(self.queue_name,self.resq) 38 | e = Exception('problem') 39 | job.fail(e) 40 | assert self.redis.llen('resque:failed') == 1 41 | failure.clear(self.resq) 42 | assert self.redis.llen('resque:failed') == 0 43 | 44 | def test_requeue(self): 45 | self.resq.enqueue(self.job_class,"test1") 46 | job = Job.reserve(self.queue_name,self.resq) 47 | e = Exception('problem') 48 | fail_object = job.fail(e) 49 | assert self.resq.size(self.queue_name) == 0 50 | failure.requeue(self.resq, fail_object) 51 | assert self.resq.size(self.queue_name) == 1 52 | job = Job.reserve(self.queue_name,self.resq) 53 | assert job._queue == self.queue_name 54 | mod_with_class = '{module}.{klass}'.format( 55 | module=self.job_class.__module__, 56 | klass=self.job_class.__name__) 57 | self.assertEqual(job._payload, {'class':mod_with_class,'args':['test1'],'enqueue_timestamp': job.enqueue_timestamp}) 58 | -------------------------------------------------------------------------------- /tests/test_failure_multi.py: -------------------------------------------------------------------------------- 1 | from tests import Basic 2 | from tests.test_failure import FailureTests 3 | 4 | from pyres import failure 5 | from pyres.failure.base import BaseBackend 6 | from pyres.failure.multiple import MultipleBackend 7 | from pyres.failure.redis import RedisBackend 8 | 9 | # Inner class for the failure backend 10 | class TestBackend(BaseBackend): 11 | def save(self, resq): 12 | resq.redis.set('testbackend:called', 1) 13 | 14 | failure.backend = MultipleBackend 15 | failure.backend.classes = [RedisBackend, TestBackend] 16 | 17 | class BasicMultiBackend(Basic): 18 | queue = 'basicmultibackend' 19 | 20 | class MultipleFailureTests(FailureTests): 21 | def setUp(self): 22 | FailureTests.setUp(self) 23 | self.job_class = BasicMultiBackend 24 | self.queue_name = 'basicmultibackend' 25 | -------------------------------------------------------------------------------- /tests/test_horde.py: -------------------------------------------------------------------------------- 1 | from tests import PyResTests, Basic, TestProcess 2 | from pyres import horde 3 | import os 4 | 5 | class KhanTests(PyResTests): 6 | def test_khan_init(self): 7 | from pyres.exceptions import NoQueueError 8 | self.assertRaises(NoQueueError, horde.Khan, 2, []) 9 | self.assertRaises(ValueError, horde.Khan, 'test', ['test']) 10 | 11 | def test_register_khan(self): 12 | khan = horde.Khan(pool_size=1, queues=['basic']) 13 | khan.register_khan() 14 | name = "%s:%s:1" % (os.uname()[1],os.getpid()) 15 | assert self.redis.sismember('resque:khans',name) 16 | 17 | def test_unregister_khan(self): 18 | khan = horde.Khan(pool_size=1, queues=['basic']) 19 | khan.register_khan() 20 | name = "%s:%s:1" % (os.uname()[1],os.getpid()) 21 | assert self.redis.sismember('resque:khans',name) 22 | assert self.redis.scard('resque:khans') == 1 23 | khan.unregister_khan() 24 | assert not self.redis.sismember('resque:khans', name) 25 | assert self.redis.scard('resque:khans') == 0 26 | 27 | def test_setup_minions(self): 28 | khan = horde.Khan(pool_size=1, queues=['basic']) 29 | khan.setup_minions() 30 | assert len(khan._workers) == 1 31 | khan._shutdown_minions() 32 | 33 | def test_setup_resq(self): 34 | khan = horde.Khan(pool_size=1, queues=['basic']) 35 | assert not hasattr(khan, 'resq') 36 | khan.setup_resq() 37 | assert hasattr(khan, 'resq') 38 | 39 | def test_add_minion(self): 40 | khan = horde.Khan(pool_size=1, queues=['basic']) 41 | khan.setup_minions() 42 | khan.register_khan() 43 | name = "%s:%s:1" % (os.uname()[1],os.getpid()) 44 | assert self.redis.sismember('resque:khans',name) 45 | khan.add_minion() 46 | assert len(khan._workers) == 2 47 | assert not self.redis.sismember('resque:khans',name) 48 | name = '%s:%s:2' % (os.uname()[1], os.getpid()) 49 | assert khan.pool_size == 2 50 | assert self.redis.sismember('resque:khans',name) 51 | khan._shutdown_minions() 52 | 53 | def test_remove_minion(self): 54 | khan = horde.Khan(pool_size=1, queues=['basic']) 55 | khan.setup_minions() 56 | khan.register_khan() 57 | assert khan.pool_size == 1 58 | khan._remove_minion() 59 | assert khan.pool_size == 0 60 | -------------------------------------------------------------------------------- /tests/test_jobs.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from tests import PyResTests, Basic, TestProcess, ReturnAllArgsJob 3 | from pyres.job import Job 4 | class JobTests(PyResTests): 5 | def test_reserve(self): 6 | self.resq.enqueue(Basic,"test1") 7 | job = Job.reserve('basic', self.resq) 8 | assert job._queue == 'basic' 9 | assert job._payload 10 | self.assertEqual(job._payload, {'class':'tests.Basic','args':['test1'],'enqueue_timestamp':job.enqueue_timestamp}) 11 | 12 | def test_perform(self): 13 | self.resq.enqueue(Basic,"test1") 14 | job = Job.reserve('basic',self.resq) 15 | self.resq.enqueue(TestProcess) 16 | job2 = Job.reserve('high', self.resq) 17 | assert job.perform() == "name:test1" 18 | assert job2.perform() 19 | 20 | def test_fail(self): 21 | self.resq.enqueue(Basic,"test1") 22 | job = Job.reserve('basic',self.resq) 23 | assert self.redis.llen('resque:failed') == 0 24 | job.fail("problem") 25 | assert self.redis.llen('resque:failed') == 1 26 | 27 | def test_date_arg_type(self): 28 | dt = datetime.now().replace(microsecond=0) 29 | self.resq.enqueue(ReturnAllArgsJob, dt) 30 | job = Job.reserve('basic',self.resq) 31 | result = job.perform() 32 | assert result[0] == dt 33 | -------------------------------------------------------------------------------- /tests/test_json.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from tests import PyResTests 3 | import pyres.json_parser as json 4 | 5 | class JSONTests(PyResTests): 6 | def test_encode_decode_date(self): 7 | dt = datetime(1972, 1, 22); 8 | encoded = json.dumps({'dt': dt}) 9 | decoded = json.loads(encoded) 10 | assert decoded['dt'] == dt 11 | 12 | def test_dates_in_lists(self): 13 | dates = [datetime.now() for i in range(50)] 14 | decoded = json.loads(json.dumps(dates)) 15 | for value in dates: 16 | assert isinstance(value, datetime) 17 | 18 | def test_dates_in_dict(self): 19 | dates = dict((i, datetime.now()) for i in range(50)) 20 | decoded = json.loads(json.dumps(dates)) 21 | for i, value in dates.items(): 22 | assert isinstance(i, int) 23 | assert isinstance(value, datetime) 24 | 25 | -------------------------------------------------------------------------------- /tests/test_resq.py: -------------------------------------------------------------------------------- 1 | from tests import PyResTests, Basic, TestProcess 2 | from pyres import ResQ 3 | from pyres.worker import Worker 4 | from pyres.job import Job 5 | import os 6 | class ResQTests(PyResTests): 7 | def test_enqueue(self): 8 | self.resq.enqueue(Basic,"test1") 9 | self.resq.enqueue(Basic,"test2", "moretest2args") 10 | ResQ._enqueue(Basic, "test3") 11 | assert self.redis.llen("resque:queue:basic") == 3 12 | assert self.redis.sismember('resque:queues','basic') 13 | 14 | def test_push(self): 15 | self.resq.push('pushq','content-newqueue') 16 | self.resq.push('pushq','content2-newqueue') 17 | assert self.redis.llen('resque:queue:pushq') == 2 18 | assert self.redis.lindex('resque:queue:pushq', 0).decode() == ResQ.encode('content-newqueue') 19 | assert self.redis.lindex('resque:queue:pushq', 1).decode() == ResQ.encode('content2-newqueue') 20 | 21 | def test_pop(self): 22 | self.resq.push('pushq','content-newqueue') 23 | self.resq.push('pushq','content2-newqueue') 24 | assert self.redis.llen('resque:queue:pushq') == 2 25 | assert self.resq.pop('pushq') == ('pushq', 'content-newqueue') 26 | assert self.redis.llen('resque:queue:pushq') == 1 27 | assert self.resq.pop(['pushq']) == ('pushq', 'content2-newqueue') 28 | assert self.redis.llen('resque:queue:pushq') == 0 29 | 30 | def test_pop_two_queues(self): 31 | self.resq.push('pushq1', 'content-q1-1') 32 | self.resq.push('pushq1', 'content-q1-2') 33 | self.resq.push('pushq2', 'content-q2-1') 34 | assert self.redis.llen('resque:queue:pushq1') == 2 35 | assert self.redis.llen('resque:queue:pushq2') == 1 36 | assert self.resq.pop(['pushq1', 'pushq2']) == ('pushq1', 'content-q1-1') 37 | assert self.redis.llen('resque:queue:pushq1') == 1 38 | assert self.redis.llen('resque:queue:pushq2') == 1 39 | assert self.resq.pop(['pushq2', 'pushq1']) == ('pushq2', 'content-q2-1') 40 | assert self.redis.llen('resque:queue:pushq1') == 1 41 | assert self.redis.llen('resque:queue:pushq2') == 0 42 | assert self.resq.pop(['pushq2', 'pushq1']) == ('pushq1', 'content-q1-2') 43 | assert self.redis.llen('resque:queue:pushq1') == 0 44 | assert self.redis.llen('resque:queue:pushq2') == 0 45 | assert self.resq.pop(['pushq1', 'pushq2'], timeout=1) == (None, None) 46 | 47 | def test_peek(self): 48 | self.resq.enqueue(Basic,"test1") 49 | self.resq.enqueue(Basic,"test2") 50 | assert len(self.resq.peek('basic',0,20)) == 2 51 | 52 | def test_size(self): 53 | self.resq.enqueue(Basic,"test1") 54 | self.resq.enqueue(Basic,"test2") 55 | assert self.resq.size('basic') == 2 56 | assert self.resq.size('noq') == 0 57 | 58 | def test_redis_property(self): 59 | from redis import Redis 60 | rq = ResQ(server="localhost:6379") 61 | red = Redis() 62 | #rq2 = ResQ(server=red) 63 | self.assertRaises(Exception, rq.redis,[Basic]) 64 | 65 | def test_info(self): 66 | self.resq.enqueue(Basic,"test1") 67 | self.resq.enqueue(TestProcess) 68 | info = self.resq.info() 69 | assert info['queues'] == 2 70 | assert info['servers'] == ['localhost:6379'] 71 | assert info['workers'] == 0 72 | worker = Worker(['basic']) 73 | worker.register_worker() 74 | info = self.resq.info() 75 | assert info['workers'] == 1 76 | 77 | def test_workers(self): 78 | worker = Worker(['basic']) 79 | worker.register_worker() 80 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 81 | assert len(self.resq.workers()) == 1 82 | #assert Worker.find(name, self.resq) in self.resq.workers() 83 | 84 | def test_enqueue_from_string(self): 85 | self.resq.enqueue_from_string('tests.Basic','basic','test1') 86 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 87 | assert self.redis.llen("resque:queue:basic") == 1 88 | job = Job.reserve('basic', self.resq) 89 | worker = Worker(['basic']) 90 | worker.process(job) 91 | assert not self.redis.get('resque:worker:%s' % worker) 92 | assert not self.redis.get("resque:stat:failed") 93 | assert not self.redis.get("resque:stat:failed:%s" % name) 94 | 95 | def test_remove_queue(self): 96 | self.resq.enqueue_from_string('tests.Basic','basic','test1') 97 | assert 'basic' in self.resq._watched_queues 98 | assert self.redis.sismember('resque:queues','basic') 99 | assert self.redis.llen('resque:queue:basic') == 1 100 | self.resq.remove_queue('basic') 101 | assert 'basic' not in self.resq._watched_queues 102 | assert not self.redis.sismember('resque:queues','basic') 103 | assert not self.redis.exists('resque:queue:basic') 104 | 105 | def test_keys(self): 106 | self.resq.enqueue_from_string('tests.Basic','basic','test1') 107 | assert 'queue:basic' in self.resq.keys() 108 | assert 'queues' in self.resq.keys() 109 | 110 | def test_queues(self): 111 | assert self.resq.queues() == [] 112 | self.resq.enqueue_from_string('tests.Basic','basic','test1') 113 | assert len(self.resq.queues()) == 1 114 | self.resq.enqueue_from_string('tests.Basic','basic','test1') 115 | assert len(self.resq.queues()) == 1 116 | self.resq.enqueue_from_string('tests.Basic','basic2','test1') 117 | assert len(self.resq.queues()) == 2 118 | assert 'test' not in self.resq.queues() 119 | assert 'basic' in self.resq.queues() 120 | 121 | def test_close(self): 122 | self.resq.close() 123 | -------------------------------------------------------------------------------- /tests/test_schedule.py: -------------------------------------------------------------------------------- 1 | from tests import PyResTests, Basic, TestProcess, ErrorObject 2 | from pyres import ResQ 3 | from pyres.job import Job 4 | from pyres.scheduler import Scheduler 5 | import os 6 | import datetime 7 | import time 8 | class ScheduleTests(PyResTests): 9 | def test_enqueue_at(self): 10 | d = datetime.datetime.now() + datetime.timedelta(days=1) 11 | d2 = d + datetime.timedelta(days=1) 12 | key = int(time.mktime(d.timetuple())) 13 | key2 = int(time.mktime(d2.timetuple())) 14 | self.resq.enqueue_at(d, Basic,"test1") 15 | self.resq.enqueue_at(d, Basic,"test2") 16 | assert self.redis.llen("resque:delayed:%s" % key) == 2 17 | assert len(self.redis.zrange('resque:delayed_queue_schedule',0,20)) == 1 18 | self.resq.enqueue_at(d2, Basic,"test1") 19 | assert self.redis.llen("resque:delayed:%s" % key2) == 1 20 | assert len(self.redis.zrange('resque:delayed_queue_schedule',0,20)) == 2 21 | 22 | def test_delayed_queue_schedule_size(self): 23 | d = datetime.datetime.now() + datetime.timedelta(days=1) 24 | d2 = d + datetime.timedelta(days=1) 25 | d3 = d 26 | key = int(time.mktime(d.timetuple())) 27 | key2 = int(time.mktime(d2.timetuple())) 28 | self.resq.enqueue_at(d, Basic,"test1") 29 | self.resq.enqueue_at(d2, Basic,"test1") 30 | self.resq.enqueue_at(d3, Basic,"test1") 31 | assert self.resq.delayed_queue_schedule_size() == 3 32 | 33 | def test_delayed_timestamp_size(self): 34 | d = datetime.datetime.now() + datetime.timedelta(days=1) 35 | d2 = d + datetime.timedelta(days=1) 36 | key = int(time.mktime(d.timetuple())) 37 | key2 = int(time.mktime(d2.timetuple())) 38 | self.resq.enqueue_at(d, Basic,"test1") 39 | assert self.resq.delayed_timestamp_size(key) == 1 40 | self.resq.enqueue_at(d, Basic,"test1") 41 | assert self.resq.delayed_timestamp_size(key) == 2 42 | 43 | def test_next_delayed_timestamp(self): 44 | d = datetime.datetime.now() + datetime.timedelta(days=-1) 45 | d2 = d + datetime.timedelta(days=-2) 46 | key = int(time.mktime(d.timetuple())) 47 | key2 = int(time.mktime(d2.timetuple())) 48 | self.resq.enqueue_at(d, Basic,"test1") 49 | self.resq.enqueue_at(d2, Basic,"test1") 50 | item = self.resq.next_delayed_timestamp() 51 | assert int(item) == key2 52 | 53 | def test_next_item_for_timestamp(self): 54 | d = datetime.datetime.now() + datetime.timedelta(days=-1) 55 | d2 = d + datetime.timedelta(days=-2) 56 | #key = int(time.mktime(d.timetuple())) 57 | #key2 = int(time.mktime(d2.timetuple())) 58 | self.resq.enqueue_at(d, Basic,"test1") 59 | self.resq.enqueue_at(d2, Basic,"test1") 60 | timestamp = self.resq.next_delayed_timestamp() 61 | item = self.resq.next_item_for_timestamp(timestamp) 62 | assert isinstance(item, dict) 63 | assert self.redis.zcard('resque:delayed_queue_schedule') == 1 64 | 65 | def test_scheduler_init(self): 66 | scheduler = Scheduler(self.resq) 67 | assert not scheduler._shutdown 68 | scheduler = Scheduler('localhost:6379') 69 | assert not scheduler._shutdown 70 | self.assertRaises(Exception, Scheduler, Basic) 71 | 72 | def test_schedule_shutdown(self): 73 | scheduler = Scheduler(self.resq) 74 | scheduler.schedule_shutdown(19,'') 75 | assert scheduler._shutdown 76 | 77 | -------------------------------------------------------------------------------- /tests/test_stats.py: -------------------------------------------------------------------------------- 1 | from tests import PyResTests 2 | from pyres import Stat 3 | class StatTests(PyResTests): 4 | def test_incr(self): 5 | stat_obj = Stat('test_stat', self.resq) 6 | stat_obj.incr() 7 | assert self.redis.get('resque:stat:test_stat') == b'1' 8 | stat_obj.incr() 9 | assert self.redis.get('resque:stat:test_stat') == b'2' 10 | stat_obj.incr(2) 11 | assert self.redis.get('resque:stat:test_stat') == b'4' 12 | 13 | def test_decr(self): 14 | stat_obj = Stat('test_stat', self.resq) 15 | stat_obj.incr() 16 | stat_obj.incr() 17 | assert self.redis.get('resque:stat:test_stat') == b'2' 18 | stat_obj.decr() 19 | assert self.redis.get('resque:stat:test_stat') == b'1' 20 | stat_obj.incr() 21 | stat_obj.decr(2) 22 | assert self.redis.get('resque:stat:test_stat') == b'0' 23 | 24 | def test_get(self): 25 | stat_obj = Stat('test_stat', self.resq) 26 | stat_obj.incr() 27 | stat_obj.incr() 28 | assert stat_obj.get() == 2 29 | 30 | def test_clear(self): 31 | stat_obj = Stat('test_stat', self.resq) 32 | stat_obj.incr() 33 | stat_obj.incr() 34 | assert self.redis.exists('resque:stat:test_stat') 35 | stat_obj.clear() 36 | assert not self.redis.exists('resque:stat:test_stat') 37 | -------------------------------------------------------------------------------- /tests/test_worker.py: -------------------------------------------------------------------------------- 1 | from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob, CrashJob, PrematureExitJob, PrematureHardExitJob 2 | from pyres import ResQ 3 | from pyres.job import Job 4 | from pyres.scheduler import Scheduler 5 | from pyres.worker import Worker 6 | import os 7 | import time 8 | import datetime 9 | 10 | 11 | class WorkerTests(PyResTests): 12 | def test_worker_init(self): 13 | from pyres.exceptions import NoQueueError 14 | self.assertRaises(NoQueueError, Worker,[]) 15 | self.assertRaises(Exception, Worker,['test'],TestProcess()) 16 | 17 | def test_startup(self): 18 | worker = Worker(['basic']) 19 | worker.startup() 20 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 21 | assert self.redis.sismember('resque:workers',name) 22 | import signal 23 | assert signal.getsignal(signal.SIGTERM) == worker.shutdown_all 24 | assert signal.getsignal(signal.SIGINT) == worker.shutdown_all 25 | assert signal.getsignal(signal.SIGQUIT) == worker.schedule_shutdown 26 | assert signal.getsignal(signal.SIGUSR1) == worker.kill_child 27 | 28 | def test_register(self): 29 | worker = Worker(['basic']) 30 | worker.register_worker() 31 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 32 | assert self.redis.sismember('resque:workers',name) 33 | 34 | def test_unregister(self): 35 | worker = Worker(['basic']) 36 | worker.register_worker() 37 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 38 | assert self.redis.sismember('resque:workers',name) 39 | worker.unregister_worker() 40 | assert name not in self.redis.smembers('resque:workers') 41 | 42 | def test_working_on(self): 43 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 44 | self.resq.enqueue(Basic,"test1") 45 | job = Job.reserve('basic', self.resq) 46 | worker = Worker(['basic']) 47 | worker.working_on(job) 48 | assert self.redis.exists("resque:worker:%s" % name) 49 | 50 | def test_processed(self): 51 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 52 | worker = Worker(['basic']) 53 | worker.processed() 54 | assert self.redis.exists("resque:stat:processed") 55 | assert self.redis.exists("resque:stat:processed:%s" % name) 56 | assert self.redis.get("resque:stat:processed").decode() == str(1) 57 | assert self.redis.get("resque:stat:processed:%s" % name).decode() == str(1) 58 | assert worker.get_processed() == 1 59 | worker.processed() 60 | assert self.redis.get("resque:stat:processed").decode() == str(2) 61 | assert self.redis.get("resque:stat:processed:%s" % name).decode() == str(2) 62 | assert worker.get_processed() == 2 63 | 64 | def test_failed(self): 65 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 66 | worker = Worker(['basic']) 67 | worker.failed() 68 | assert self.redis.exists("resque:stat:failed") 69 | assert self.redis.exists("resque:stat:failed:%s" % name) 70 | assert self.redis.get("resque:stat:failed").decode() == str(1) 71 | assert self.redis.get("resque:stat:failed:%s" % name).decode() == str(1) 72 | assert worker.get_failed() == 1 73 | worker.failed() 74 | assert self.redis.get("resque:stat:failed").decode() == str(2) 75 | assert self.redis.get("resque:stat:failed:%s" % name).decode() == str(2) 76 | assert worker.get_failed() == 2 77 | 78 | def test_process(self): 79 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 80 | self.resq.enqueue(Basic,"test1") 81 | job = Job.reserve('basic', self.resq) 82 | worker = Worker(['basic']) 83 | worker.process(job) 84 | assert not self.redis.get('resque:worker:%s' % worker) 85 | assert not self.redis.get("resque:stat:failed") 86 | assert not self.redis.get("resque:stat:failed:%s" % name) 87 | self.resq.enqueue(Basic,"test1") 88 | worker.process() 89 | assert not self.redis.get('resque:worker:%s' % worker) 90 | assert not self.redis.get("resque:stat:failed") 91 | assert not self.redis.get("resque:stat:failed:%s" % name) 92 | 93 | 94 | def test_signals(self): 95 | worker = Worker(['basic']) 96 | worker.startup() 97 | import inspect, signal 98 | frame = inspect.currentframe() 99 | worker.schedule_shutdown(frame, signal.SIGQUIT) 100 | assert worker._shutdown 101 | del worker 102 | worker = Worker(['high']) 103 | #self.resq.enqueue(TestSleep) 104 | #worker.work() 105 | #assert worker.child 106 | assert not worker.kill_child(frame, signal.SIGUSR1) 107 | 108 | def test_job_failure(self): 109 | self.resq.enqueue(ErrorObject) 110 | worker = Worker(['basic']) 111 | worker.process() 112 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 113 | assert not self.redis.get('resque:worker:%s' % worker) 114 | assert self.redis.get("resque:stat:failed").decode() == str(1) 115 | assert self.redis.get("resque:stat:failed:%s" % name).decode() == str(1) 116 | 117 | def test_get_job(self): 118 | worker = Worker(['basic']) 119 | self.resq.enqueue(Basic,"test1") 120 | job = Job.reserve('basic', self.resq) 121 | worker.working_on(job) 122 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 123 | assert worker.job() == ResQ.decode(self.redis.get('resque:worker:%s' % name)) 124 | assert worker.processing() == ResQ.decode(self.redis.get('resque:worker:%s' % name)) 125 | worker.done_working(job) 126 | w2 = Worker(['basic']) 127 | print(w2.job()) 128 | assert w2.job() == {} 129 | 130 | def test_working(self): 131 | worker = Worker(['basic']) 132 | self.resq.enqueue_from_string('tests.Basic','basic','test1') 133 | worker.register_worker() 134 | job = Job.reserve('basic', self.resq) 135 | worker.working_on(job) 136 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 137 | workers = Worker.working(self.resq) 138 | assert len(workers) == 1 139 | assert str(worker) == str(workers[0]) 140 | assert worker != workers[0] 141 | 142 | def test_started(self): 143 | import datetime 144 | worker = Worker(['basic']) 145 | dt = datetime.datetime.now() 146 | worker.started = dt 147 | name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') 148 | assert self.redis.get('resque:worker:%s:started' % name).decode() == str(int(time.mktime(dt.timetuple()))) 149 | assert worker.started.decode() == str(int(time.mktime(dt.timetuple()))) 150 | worker.started = None 151 | assert not self.redis.exists('resque:worker:%s:started' % name) 152 | 153 | def test_state(self): 154 | worker = Worker(['basic']) 155 | assert worker.state() == 'idle' 156 | self.resq.enqueue_from_string('tests.Basic','basic','test1') 157 | worker.register_worker() 158 | job = Job.reserve('basic', self.resq) 159 | worker.working_on(job) 160 | assert worker.state() == 'working' 161 | worker.done_working(job) 162 | assert worker.state() == 'idle' 163 | 164 | def test_prune_dead_workers(self): 165 | worker = Worker(['basic']) # we haven't registered this worker, so the assertion below holds 166 | assert self.redis.scard('resque:workers') == 0 167 | self.redis.sadd('resque:workers',"%s:%s:%s" % (os.uname()[1],'1','basic')) 168 | self.redis.sadd('resque:workers',"%s:%s:%s" % (os.uname()[1],'2','basic')) 169 | self.redis.sadd('resque:workers',"%s:%s:%s" % (os.uname()[1],'3','basic')) 170 | assert self.redis.scard('resque:workers') == 3 171 | worker.prune_dead_workers() 172 | assert self.redis.scard('resque:workers') == 0 173 | self.redis.sadd('resque:workers',"%s:%s:%s" % ('host-that-does-not-exist','1','basic')) 174 | self.redis.sadd('resque:workers',"%s:%s:%s" % ('host-that-does-not-exist','2','basic')) 175 | self.redis.sadd('resque:workers',"%s:%s:%s" % ('host-that-does-not-exist','3','basic')) 176 | worker.prune_dead_workers() 177 | # the assertion below should hold, because the workers we registered above are on a 178 | # different host, and thus should not be pruned by this process 179 | assert self.redis.scard('resque:workers') == 3 180 | 181 | def test_retry_on_exception(self): 182 | now = datetime.datetime.now() 183 | self.set_current_time(now) 184 | worker = Worker(['basic']) 185 | scheduler = Scheduler() 186 | 187 | # queue up a job that will fail for 30 seconds 188 | self.resq.enqueue(RetryOnExceptionJob, 189 | now + datetime.timedelta(seconds=30)) 190 | worker.process() 191 | assert worker.get_failed() == 0 192 | 193 | # check it retries the first time 194 | self.set_current_time(now + datetime.timedelta(seconds=5)) 195 | scheduler.handle_delayed_items() 196 | assert None == worker.process() 197 | assert worker.get_failed() == 0 198 | 199 | # check it runs fine when it's stopped crashing 200 | self.set_current_time(now + datetime.timedelta(seconds=60)) 201 | scheduler.handle_delayed_items() 202 | assert True == worker.process() 203 | assert worker.get_failed() == 0 204 | 205 | def test_kills_stale_workers_after_timeout(self): 206 | timeout = 1 207 | 208 | worker = Worker(['basic'], timeout=timeout) 209 | self.resq.enqueue(TimeoutJob, timeout + 1) 210 | 211 | assert worker.get_failed() == 0 212 | worker.fork_worker(worker.reserve()) 213 | assert worker.get_failed() == 1 214 | 215 | def test_detect_crashed_workers_as_failures(self): 216 | worker = Worker(['basic']) 217 | self.resq.enqueue(CrashJob) 218 | 219 | assert worker.job() == {} 220 | assert worker.get_failed() == 0 221 | 222 | worker.fork_worker(worker.reserve()) 223 | 224 | assert worker.job() == {} 225 | assert worker.get_failed() == 1 226 | 227 | def test_detect_non_0_sys_exit_as_failure(self): 228 | worker = Worker(['basic']) 229 | self.resq.enqueue(PrematureExitJob, 9) 230 | 231 | assert worker.job() == {} 232 | assert worker.get_failed() == 0 233 | 234 | worker.fork_worker(worker.reserve()) 235 | 236 | assert worker.job() == {} 237 | assert worker.get_failed() == 1 238 | 239 | def test_detect_code_0_sys_exit_as_success(self): 240 | worker = Worker(['basic']) 241 | self.resq.enqueue(PrematureExitJob, 0) 242 | 243 | assert worker.job() == {} 244 | assert worker.get_failed() == 0 245 | 246 | worker.fork_worker(worker.reserve()) 247 | 248 | assert worker.job() == {} 249 | assert worker.get_failed() == 0 250 | 251 | def test_detect_non_0_os_exit_as_failure(self): 252 | worker = Worker(['basic']) 253 | self.resq.enqueue(PrematureHardExitJob, 9) 254 | 255 | assert worker.job() == {} 256 | assert worker.get_failed() == 0 257 | 258 | worker.fork_worker(worker.reserve()) 259 | 260 | assert worker.job() == {} 261 | assert worker.get_failed() == 1 262 | 263 | def test_detect_code_0_os_exit_as_success(self): 264 | worker = Worker(['basic']) 265 | self.resq.enqueue(PrematureHardExitJob, 0) 266 | 267 | assert worker.job() == {} 268 | assert worker.get_failed() == 0 269 | 270 | worker.fork_worker(worker.reserve()) 271 | 272 | assert worker.job() == {} 273 | assert worker.get_failed() == 0 274 | 275 | def test_retries_give_up_eventually(self): 276 | now = datetime.datetime.now() 277 | self.set_current_time(now) 278 | worker = Worker(['basic']) 279 | scheduler = Scheduler() 280 | 281 | # queue up a job that will fail for 60 seconds 282 | self.resq.enqueue(RetryOnExceptionJob, 283 | now + datetime.timedelta(seconds=60)) 284 | worker.process() 285 | assert worker.get_failed() == 0 286 | 287 | # check it retries the first time 288 | self.set_current_time(now + datetime.timedelta(seconds=5)) 289 | scheduler.handle_delayed_items() 290 | assert None == worker.process() 291 | assert worker.get_failed() == 0 292 | 293 | # check it fails when we've been trying too long 294 | self.set_current_time(now + datetime.timedelta(seconds=20)) 295 | scheduler.handle_delayed_items() 296 | assert None == worker.process() 297 | assert worker.get_failed() == 1 298 | 299 | def test_worker_pids(self): 300 | # spawn worker processes and get pids 301 | pids = [] 302 | pids.append(self.spawn_worker(['basic'])) 303 | pids.append(self.spawn_worker(['basic'])) 304 | time.sleep(1) 305 | worker_pids = Worker.worker_pids() 306 | 307 | # send kill signal to workers and wait for them to exit 308 | import signal 309 | for pid in pids: 310 | os.kill(pid, signal.SIGQUIT) 311 | os.waitpid(pid, 0) 312 | 313 | # ensure worker_pids() returned the correct pids 314 | for pid in pids: 315 | assert str(pid) in worker_pids 316 | 317 | # ensure the workers are no longer returned by worker_pids() 318 | worker_pids = Worker.worker_pids() 319 | for pid in pids: 320 | assert str(pid) not in worker_pids 321 | 322 | def spawn_worker(self, queues): 323 | pid = os.fork() 324 | if not pid: 325 | Worker.run(queues, interval=1) 326 | os._exit(0) 327 | else: 328 | return pid 329 | 330 | def set_current_time(self, time): 331 | ResQ._current_time = staticmethod(lambda: time) 332 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py27, py33 3 | 4 | [testenv] 5 | commands = py.test 6 | deps = 7 | pytest 8 | nose 9 | nosexcover 10 | --------------------------------------------------------------------------------