├── .gitignore ├── AUTHORS.txt ├── CHANGES.txt ├── LICENSE.txt ├── MANIFEST.in ├── README.txt ├── TODO.txt ├── docs ├── 1to2.rst ├── Makefile ├── admin_client.rst ├── architecture.rst ├── client.rst ├── conf.py ├── index.rst ├── job.rst ├── library.rst ├── types_of_jobs.rst └── worker.rst ├── gearman ├── __init__.py ├── admin_client.py ├── admin_client_handler.py ├── client.py ├── client_handler.py ├── command_handler.py ├── compat.py ├── connection.py ├── connection_manager.py ├── constants.py ├── errors.py ├── io.py ├── job.py ├── protocol.py ├── util.py ├── worker.py └── worker_handler.py ├── setup.cfg ├── setup.py └── tests ├── __init__.py ├── _core_testing.py ├── admin_client_tests.py ├── client_tests.py ├── protocol_tests.py └── worker_tests.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore temporary files left behind by TextMate 2 | ._* 3 | docs/._* 4 | docs/_build 5 | gearman/._* 6 | 7 | # Ignore compiled python files 8 | *.py[co] 9 | 10 | # Misc ignores 11 | .DS_Store 12 | MANIFEST 13 | dist 14 | build 15 | *.swo 16 | -------------------------------------------------------------------------------- /AUTHORS.txt: -------------------------------------------------------------------------------- 1 | python-gearman 2.x: 2 | ======================================== 3 | Primary authors: 4 | 5 | * Matthew Tai 6 | 7 | Contributors (in order of contribution, latest first): 8 | 9 | * Khaled alHabache :: python 2.4 backward compatibility 10 | * Eskil Olsen :: Connection fixes 11 | * Julian Krause :: Architectural design 12 | 13 | python-gearman 1.x 14 | ======================================== 15 | Primary authors: 16 | 17 | * Samuel Stauffer 18 | 19 | Contributors: 20 | 21 | * Justin Azoff 22 | * Kristopher 23 | * Eric Sumner 24 | 25 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | v2.0.X, 2012-XX-XX -- Major bug fix release 2 | * GearmanWorker - Dispatch to the right function instead of looping 3 | * GearmanClient - Let the server handle the special '-' unique [GH-18] 4 | * GearmanClient - fix memory leak on brackground jobs [GH-10] 5 | * GearmanWorker - add a after_job hook [GH-22] 6 | * ConnectionManager - fix case where closed connections would still be polled [GHPR-25] 7 | * 1to2 - fix wording mistakce [GHPR-23] 8 | * Protocol - enforce no-nulls in non-last arguments [GHPR-26] 9 | 10 | v2.0.2, 2011-01-11 -- Major bug fix release 11 | * GearmanClient - Fixed a memory leak in the handler where we never de-allocated completed jobs [GH-6] 12 | * GearmanClient - Updated GET_STATUS to ask about a job that wasn't previously submitted [GH-1] 13 | * Gearman library - Fixed logging errors when NullHandler wasn't provdied [GH-3] 14 | 15 | v2.0.1, 2010-10-12 -- Minor bug fix release 16 | * GearmanJobRequest - Combined `server_status` and `status_updates` into a shared `status` field 17 | * GearmanJobRequest.status - `numerator` and `denominator` are now cast to integers 18 | * GearmanWorker.send_* - Updated to immediately send commands instead of waiting for the work select loop 19 | 20 | v2.0.0, 2010-09-28 -- Initial release 21 | v2.0.0.beta, 2010-06-15 -- Beta release 22 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2010 Yelp 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include docs/Makefile 3 | recursive-include docs *.rst conf.py 4 | prune docs/_build 5 | -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | ============== 2 | python-gearman 3 | ============== 4 | 5 | Description 6 | =========== 7 | Python Gearman API - Client, worker, and admin client interfaces 8 | 9 | For information on Gearman and a C-based Gearman server, see http://www.gearman.org/ 10 | 11 | Installation 12 | ============ 13 | * easy_install gearman 14 | * pip install gearman 15 | 16 | Links 17 | ===== 18 | * 2.x source 19 | * 2.x documentation 20 | 21 | * 1.x source 22 | * 1.x documentation 23 | -------------------------------------------------------------------------------- /TODO.txt: -------------------------------------------------------------------------------- 1 | Requested features (contributions welcome) 2 | ========================================== 3 | * Update ConnectionManager code to play well with Twisted 4 | * Update Worker to handle multiple jobs at once instead of processing one at a time 5 | -------------------------------------------------------------------------------- /docs/1to2.rst: -------------------------------------------------------------------------------- 1 | ============================================== 2 | Transitioning from python-gearman 1.x to 2.0.0 3 | ============================================== 4 | 5 | Client (single task) 6 | ==================== 7 | :: 8 | 9 | # python-gearman 1.x 10 | old_client = gearman.GearmanClient(['localhost:4730']) 11 | old_result = old_client.do_task(Task("echo", "foo")) 12 | 13 | 14 | # python-gearman 2.x 15 | new_client = gearman.GearmanClient(['localhost:4730']) 16 | current_request = new_client.submit_job('echo', 'foo') 17 | new_result = current_request.result 18 | 19 | Client (multiple tasks) 20 | ======================= 21 | :: 22 | 23 | # python-gearman 1.x 24 | old_client = gearman.GearmanClient(['localhost:4730']) 25 | ts = Taskset([ 26 | Task(func="echo", arg="foo"), 27 | Task(func="echo", arg="bar"), 28 | ]) 29 | old_client.do_taskset(ts) 30 | for task in ts.values(): 31 | assert task.result == task.arg 32 | 33 | 34 | # python-gearman 2.x 35 | new_client = gearman.GearmanClient(['localhost:4730']) 36 | new_jobs = [ 37 | dict(task='echo', data='foo'), 38 | dict(task='echo', data='bar'), 39 | ] 40 | 41 | completed_requests = new_client.submit_multiple_jobs(new_jobs) 42 | for current_request in completed_requests: 43 | assert current_request.result == current_request.job.data 44 | 45 | Worker 46 | ====== 47 | :: 48 | 49 | # python-gearman 1.x 50 | class WorkerHook(object): 51 | def start(self, current_job): 52 | print "Job started" 53 | 54 | def fail(self, current_job, exc_info): 55 | print "Job failed, can't stop last gasp GEARMAN_COMMAND_WORK_FAIL" 56 | 57 | def complete(self, current_job, result): 58 | print "Job complete, can't stop last gasp GEARMAN_COMMAND_WORK_COMPLETE" 59 | 60 | def callback_fxn(idle, last_job_time): 61 | return False 62 | 63 | old_worker = gearman.GearmanWorker(['localhost:4730']) 64 | old_worker.register_function("echo", lambda job:job.arg) 65 | old_worker.work(stop_if=callback_fxn, hooks=WorkerHook()) 66 | 67 | 68 | # python-gearman 2.x 69 | class CustomGearmanWorker(gearman.GearmanWorker): 70 | def on_job_execute(self, current_job): 71 | print "Job started" 72 | return super(CustomGearmanWorker, self).on_job_execute(current_job) 73 | 74 | def on_job_exception(self, current_job, exc_info): 75 | print "Job failed, CAN stop last gasp GEARMAN_COMMAND_WORK_FAIL" 76 | return super(CustomGearmanWorker, self).on_job_exception(current_job, exc_info) 77 | 78 | def on_job_complete(self, current_job, job_result): 79 | print "Job complete, CAN stop last gasp GEARMAN_COMMAND_WORK_COMPLETE" 80 | return super(CustomGearmanWorker, self).send_job_complete(current_job, job_result) 81 | 82 | def after_poll(self, any_activity): 83 | # Return True if you want to continue polling, replaces callback_fxn 84 | return True 85 | 86 | def task_callback(gearman_worker, job): 87 | return job.data 88 | 89 | new_worker = CustomGearmanWorker(['localhost:4730']) 90 | new_worker.register_task("echo", task_callback) 91 | new_worker.work() 92 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 41 | @echo 42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 43 | 44 | dirhtml: 45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 48 | 49 | singlehtml: 50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 51 | @echo 52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 53 | 54 | pickle: 55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 56 | @echo 57 | @echo "Build finished; now you can process the pickle files." 58 | 59 | json: 60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 61 | @echo 62 | @echo "Build finished; now you can process the JSON files." 63 | 64 | htmlhelp: 65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 66 | @echo 67 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 68 | ".hhp project file in $(BUILDDIR)/htmlhelp." 69 | 70 | qthelp: 71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 72 | @echo 73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/python-gearman.qhcp" 76 | @echo "To view the help file:" 77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/python-gearman.qhc" 78 | 79 | devhelp: 80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 81 | @echo 82 | @echo "Build finished." 83 | @echo "To view the help file:" 84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/python-gearman" 85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/python-gearman" 86 | @echo "# devhelp" 87 | 88 | epub: 89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 90 | @echo 91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 92 | 93 | latex: 94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 95 | @echo 96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 97 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 98 | "(use \`make latexpdf' here to do that automatically)." 99 | 100 | latexpdf: 101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 102 | @echo "Running LaTeX files through pdflatex..." 103 | make -C $(BUILDDIR)/latex all-pdf 104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 105 | 106 | text: 107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 108 | @echo 109 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 110 | 111 | man: 112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 113 | @echo 114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 115 | 116 | changes: 117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 118 | @echo 119 | @echo "The overview file is in $(BUILDDIR)/changes." 120 | 121 | linkcheck: 122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 123 | @echo 124 | @echo "Link check complete; look for any errors in the above output " \ 125 | "or in $(BUILDDIR)/linkcheck/output.txt." 126 | 127 | doctest: 128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 129 | @echo "Testing of doctests in the sources finished, look at the " \ 130 | "results in $(BUILDDIR)/doctest/output.txt." 131 | -------------------------------------------------------------------------------- /docs/admin_client.rst: -------------------------------------------------------------------------------- 1 | :mod:`gearman.admin_client` --- Gearman Admin client 2 | ==================================================== 3 | .. module:: gearman.admin_client 4 | :synopsis: Gearman admin client - public interface for querying about server status 5 | 6 | .. autoclass:: GearmanAdminClient 7 | 8 | Interacting with a server 9 | ------------------------- 10 | .. automethod:: GearmanAdminClient.send_maxqueue 11 | 12 | .. automethod:: GearmanAdminClient.send_shutdown 13 | 14 | .. automethod:: GearmanAdminClient.get_status 15 | 16 | .. automethod:: GearmanAdminClient.get_version 17 | 18 | .. automethod:: GearmanAdminClient.get_workers 19 | 20 | Checking server state:: 21 | 22 | gm_admin_client = gearman.GearmanAdminClient(['localhost:4730']) 23 | 24 | # Inspect server state 25 | status_response = gm_admin_client.get_status() 26 | version_response = gm_admin_client.get_version() 27 | workers_response = gm_admin_client.get_workers() 28 | 29 | Testing server response times 30 | ----------------------------- 31 | 32 | .. automethod:: GearmanAdminClient.ping_server 33 | 34 | Checking server response time:: 35 | 36 | gm_admin_client = gearman.GearmanAdminClient(['localhost:4730']) 37 | response_time = gm_admin_client.ping_server() 38 | -------------------------------------------------------------------------------- /docs/architecture.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Design document 3 | =============== 4 | 5 | Architectural design document for developers 6 | 7 | GearmanConnectionManager - Bridges low-level I/O <-> command handlers 8 | ===================================================================== 9 | * Only class that an API user should directly interact with 10 | * Manages all I/O: polls connections, reconnects failed connections, etc... 11 | * Forwards commands between Connections <-> CommandHandlers 12 | * Manages multiple Connections and multple CommandHandlers 13 | * Manages global state of an interaction with Gearman (global job lock) 14 | 15 | GearmanConnection - Manages low-level I/O 16 | ========================================= 17 | * A single connection between a client/worker and a server 18 | * Thinly wrapped socket that can reconnect 19 | * Converts binary strings <-> Gearman commands 20 | * Manages in/out data buffers for socket-level operations 21 | * Manages in/out command buffers for gearman-level operations 22 | 23 | GearmanCommandHandler - Manages commands 24 | ======================================== 25 | * Represents the state machine of a single GearmanConnection 26 | * 1-1 mapping to a GearmanConnection (via GearmanConnectionManager) 27 | * Sends/receives commands ONLY - does no buffering 28 | * Handles all command generation / interpretation 29 | 30 | -------------------------------------------------------------------------------- /docs/client.rst: -------------------------------------------------------------------------------- 1 | :mod:`gearman.client` --- Gearman client 2 | ========================================== 3 | .. module:: gearman.client 4 | :synopsis: Gearman client - public interface for requesting jobs 5 | 6 | Function available to all examples:: 7 | 8 | def check_request_status(job_request): 9 | if job_request.complete: 10 | print "Job %s finished! Result: %s - %s" % (job_request.job.unique, job_request.state, job_request.result) 11 | elif job_request.timed_out: 12 | print "Job %s timed out!" % job_request.unique 13 | elif job_request.state == JOB_UNKNOWN: 14 | print "Job %s connection failed!" % job_request.unique 15 | 16 | .. autoclass:: GearmanClient 17 | 18 | Submitting jobs 19 | --------------- 20 | .. automethod:: GearmanClient.submit_job 21 | 22 | Sending a simple job as a blocking call:: 23 | 24 | gm_client = gearman.GearmanClient(['localhost:4730', 'otherhost:4730']) 25 | 26 | # See gearman/job.py to see attributes on the GearmanJobRequest 27 | # Defaults to PRIORITY_NONE, background=False (synchronous task), wait_until_complete=True 28 | completed_job_request = gm_client.submit_job("task_name", "arbitrary binary data") 29 | check_request_status(completed_job_request) 30 | 31 | Sending a high priority, background, blocking call:: 32 | 33 | gm_client = gearman.GearmanClient(['localhost:4730', 'otherhost:4730']) 34 | 35 | # See gearman/job.py to see attributes on the GearmanJobRequest 36 | submitted_job_request = gm_client.submit_job("task_name", "arbitrary binary data", priority=gearman.PRIORITY_HIGH, background=True) 37 | 38 | check_request_status(submitted_job_request) 39 | 40 | 41 | .. automethod:: GearmanClient.submit_multiple_jobs 42 | 43 | Sending multiple jobs all at once and behave like a non-blocking call (wait_until_complete=False):: 44 | 45 | import time 46 | gm_client = gearman.GearmanClient(['localhost:4730']) 47 | 48 | list_of_jobs = [dict(task="task_name", data="binary data"), dict(task="other_task", data="other binary data")] 49 | submitted_requests = gm_client.submit_multiple_jobs(list_of_jobs, background=False, wait_until_complete=False) 50 | 51 | # Once we know our jobs are accepted, we can do other stuff and wait for results later in the function 52 | # Similar to multithreading and doing a join except this is all done in a single process 53 | time.sleep(1.0) 54 | 55 | # Wait at most 5 seconds before timing out incomplete requests 56 | completed_requests = gm_client.wait_until_jobs_completed(submitted_requests, poll_timeout=5.0) 57 | for completed_job_request in completed_requests: 58 | check_request_status(completed_job_request) 59 | 60 | 61 | .. automethod:: GearmanClient.submit_multiple_requests 62 | 63 | Recovering from failed connections:: 64 | 65 | import time 66 | gm_client = gearman.GearmanClient(['localhost:4730']) 67 | 68 | list_of_jobs = [dict(task="task_name", data="task binary string"), dict(task="other_task", data="other binary string")] 69 | failed_requests = gm_client.submit_multiple_jobs(list_of_jobs, background=False) 70 | 71 | # Let's pretend our assigned requests' Gearman servers all failed 72 | assert all(request.state == JOB_UNKNOWN for request in failed_requests), "All connections didn't fail!" 73 | 74 | # Let's pretend our assigned requests' don't fail but some simply timeout 75 | retried_connection_failed_requests = gm_client.submit_multiple_requests(failed_requests, wait_until_complete=True, poll_timeout=1.0) 76 | 77 | timed_out_requests = [job_request for job_request in retried_requests if job_request.timed_out] 78 | 79 | # For our timed out requests, lets wait a little longer until they're complete 80 | retried_timed_out_requests = gm_client.submit_multiple_requests(timed_out_requests, wait_until_complete=True, poll_timeout=4.0) 81 | 82 | .. automethod:: GearmanClient.wait_until_jobs_accepted 83 | 84 | .. automethod:: GearmanClient.wait_until_jobs_completed 85 | 86 | Retrieving job status 87 | --------------------- 88 | .. automethod:: GearmanClient.get_job_status 89 | 90 | .. automethod:: GearmanClient.get_job_statuses 91 | 92 | Extending the client 93 | -------------------- 94 | .. autoattribute:: GearmanClient.data_encoder 95 | 96 | Send/receive Python objects (not just byte strings):: 97 | 98 | # By default, GearmanClient's can only send off byte-strings 99 | # If we want to be able to send out Python objects, we can specify a data encoder 100 | # This will automatically convert byte strings <-> Python objects for ALL commands that have the 'data' field 101 | # 102 | # See http://gearman.org/index.php?id=protocol for client commands that send/receive 'opaque data' 103 | import pickle 104 | 105 | class PickleDataEncoder(gearman.DataEncoder): 106 | @classmethod 107 | def encode(cls, encodable_object): 108 | return pickle.dumps(encodable_object) 109 | 110 | @classmethod 111 | def decode(cls, decodable_string): 112 | return pickle.loads(decodable_string) 113 | 114 | class PickleExampleClient(gearman.GearmanClient): 115 | data_encoder = PickleDataEncoder 116 | 117 | my_python_object = {'hello': 'there'} 118 | 119 | gm_client = PickleExampleClient(['localhost:4730']) 120 | gm_client.submit_job("task_name", my_python_object) 121 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # python-gearman documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Aug 25 14:44:14 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | sys.path.insert(0, os.path.abspath('..')) 20 | import gearman 21 | 22 | # -- General configuration ----------------------------------------------------- 23 | 24 | # If your documentation needs a minimal Sphinx version, state it here. 25 | #needs_sphinx = '1.0' 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be extensions 28 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] 30 | 31 | # Add any paths that contain templates here, relative to this directory. 32 | templates_path = ['_templates'] 33 | 34 | # The suffix of source filenames. 35 | source_suffix = '.rst' 36 | 37 | # The encoding of source files. 38 | #source_encoding = 'utf-8-sig' 39 | 40 | # The master toctree document. 41 | master_doc = 'index' 42 | 43 | # General information about the project. 44 | project = u'python-gearman' 45 | copyright = u'2010, Matthew Tai' 46 | 47 | # The version info for the project you're documenting, acts as replacement for 48 | # |version| and |release|, also used in various other places throughout the 49 | # built documents. 50 | # 51 | # The short X.Y version. 52 | version = gearman.__version__ 53 | # The full version, including alpha/beta/rc tags. 54 | release = gearman.__version__ 55 | 56 | # The language for content autogenerated by Sphinx. Refer to documentation 57 | # for a list of supported languages. 58 | #language = None 59 | 60 | # There are two options for replacing |today|: either, you set today to some 61 | # non-false value, then it is used: 62 | #today = '' 63 | # Else, today_fmt is used as the format for a strftime call. 64 | #today_fmt = '%B %d, %Y' 65 | 66 | # List of patterns, relative to source directory, that match files and 67 | # directories to ignore when looking for source files. 68 | exclude_patterns = ['_build'] 69 | 70 | # The reST default role (used for this markup: `text`) to use for all documents. 71 | #default_role = None 72 | 73 | # If true, '()' will be appended to :func: etc. cross-reference text. 74 | #add_function_parentheses = True 75 | 76 | # If true, the current module name will be prepended to all description 77 | # unit titles (such as .. function::). 78 | add_module_names = True 79 | 80 | # If true, sectionauthor and moduleauthor directives will be shown in the 81 | # output. They are ignored by default. 82 | #show_authors = False 83 | 84 | # The name of the Pygments (syntax highlighting) style to use. 85 | pygments_style = 'sphinx' 86 | 87 | # A list of ignored prefixes for module index sorting. 88 | #modindex_common_prefix = [] 89 | 90 | 91 | # -- Options for HTML output --------------------------------------------------- 92 | 93 | # The theme to use for HTML and HTML Help pages. See the documentation for 94 | # a list of builtin themes. 95 | html_theme = 'default' 96 | 97 | # Theme options are theme-specific and customize the look and feel of a theme 98 | # further. For a list of options available for each theme, see the 99 | # documentation. 100 | #html_theme_options = {} 101 | 102 | # Add any paths that contain custom themes here, relative to this directory. 103 | #html_theme_path = [] 104 | 105 | # The name for this set of Sphinx documents. If None, it defaults to 106 | # " v documentation". 107 | #html_title = None 108 | 109 | # A shorter title for the navigation bar. Default is the same as html_title. 110 | #html_short_title = None 111 | 112 | # The name of an image file (relative to this directory) to place at the top 113 | # of the sidebar. 114 | #html_logo = None 115 | 116 | # The name of an image file (within the static path) to use as favicon of the 117 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 118 | # pixels large. 119 | #html_favicon = None 120 | 121 | # Add any paths that contain custom static files (such as style sheets) here, 122 | # relative to this directory. They are copied after the builtin static files, 123 | # so a file named "default.css" will overwrite the builtin "default.css". 124 | html_static_path = ['_static'] 125 | 126 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 127 | # using the given strftime format. 128 | #html_last_updated_fmt = '%b %d, %Y' 129 | 130 | # If true, SmartyPants will be used to convert quotes and dashes to 131 | # typographically correct entities. 132 | #html_use_smartypants = True 133 | 134 | # Custom sidebar templates, maps document names to template names. 135 | #html_sidebars = {} 136 | 137 | # Additional templates that should be rendered to pages, maps page names to 138 | # template names. 139 | #html_additional_pages = {} 140 | 141 | # If false, no module index is generated. 142 | #html_domain_indices = True 143 | 144 | # If false, no index is generated. 145 | #html_use_index = True 146 | 147 | # If true, the index is split into individual pages for each letter. 148 | #html_split_index = False 149 | 150 | # If true, links to the reST sources are added to the pages. 151 | #html_show_sourcelink = True 152 | 153 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 154 | #html_show_sphinx = True 155 | 156 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 157 | #html_show_copyright = True 158 | 159 | # If true, an OpenSearch description file will be output, and all pages will 160 | # contain a tag referring to it. The value of this option must be the 161 | # base URL from which the finished HTML is served. 162 | #html_use_opensearch = '' 163 | 164 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 165 | #html_file_suffix = None 166 | 167 | # Output file base name for HTML help builder. 168 | htmlhelp_basename = 'python-gearmandoc' 169 | 170 | 171 | # -- Options for LaTeX output -------------------------------------------------- 172 | 173 | # The paper size ('letter' or 'a4'). 174 | #latex_paper_size = 'letter' 175 | 176 | # The font size ('10pt', '11pt' or '12pt'). 177 | #latex_font_size = '10pt' 178 | 179 | # Grouping the document tree into LaTeX files. List of tuples 180 | # (source start file, target name, title, author, documentclass [howto/manual]). 181 | latex_documents = [ 182 | ('index', 'python-gearman.tex', u'python-gearman Documentation', 183 | u'Matthew Tai, Eskil Olsen', 'manual'), 184 | ] 185 | 186 | # The name of an image file (relative to this directory) to place at the top of 187 | # the title page. 188 | #latex_logo = None 189 | 190 | # For "manual" documents, if this is true, then toplevel headings are parts, 191 | # not chapters. 192 | #latex_use_parts = False 193 | 194 | # If true, show page references after internal links. 195 | #latex_show_pagerefs = False 196 | 197 | # If true, show URL addresses after external links. 198 | #latex_show_urls = False 199 | 200 | # Additional stuff for the LaTeX preamble. 201 | #latex_preamble = '' 202 | 203 | # Documents to append as an appendix to all manuals. 204 | #latex_appendices = [] 205 | 206 | # If false, no module index is generated. 207 | #latex_domain_indices = True 208 | 209 | 210 | # -- Options for manual page output -------------------------------------------- 211 | 212 | # One entry per manual page. List of tuples 213 | # (source start file, name, description, authors, manual section). 214 | man_pages = [ 215 | ('index', 'python-gearman', u'python-gearman Documentation', 216 | [u'Matthew Tai, Eskil Olsen'], 1) 217 | ] 218 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. python-gearman documentation master file, created by 2 | sphinx-quickstart on Wed Aug 25 14:44:14 2010. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | python-gearman 2.x 7 | ================== 8 | 9 | Python Gearman API - Client, worker, and admin client interfaces 10 | 11 | For information on the Gearman protocol and a Gearman server, see http://www.gearman.org/ 12 | 13 | .. toctree:: 14 | :maxdepth: 3 15 | :numbered: 16 | 17 | library.rst 18 | 1to2.rst 19 | architecture.rst 20 | types_of_jobs.rst 21 | 22 | * :ref:`search` 23 | 24 | -------------------------------------------------------------------------------- /docs/job.rst: -------------------------------------------------------------------------------- 1 | :mod:`gearman.job` --- Gearman job definitions 2 | ============================================== 3 | .. module:: gearman.job 4 | :synopsis: Gearman jobs - Common job classes used within each interface 5 | 6 | GearmanJob - Basic information about a requested job 7 | ---------------------------------------------------- 8 | .. autoclass:: GearmanJob 9 | 10 | Server identifers 11 | ^^^^^^^^^^^^^^^^^ 12 | .. attribute:: GearmanJob.connection 13 | 14 | :const:`GearmanConnection` - Server assignment. Could be :const:`None` prior to client job submission 15 | 16 | .. attribute:: GearmanJob.handle 17 | 18 | :const:`string` - Job's server handle. Handles are NOT interchangeable across different gearman servers 19 | 20 | Job parameters 21 | ^^^^^^^^^^^^^^ 22 | .. attribute:: GearmanJob.task 23 | 24 | :const:`string` - Job's task 25 | 26 | .. attribute:: GearmanJob.unique 27 | 28 | :const:`string` - Job's unique identifier (client assigned) 29 | 30 | .. attribute:: GearmanJob.data 31 | 32 | :const:`binary` - Job's binary payload 33 | 34 | GearmanJobRequest - State tracker for requested jobs 35 | ---------------------------------------------------- 36 | .. autoclass:: GearmanJobRequest 37 | 38 | Tracking job submission 39 | ^^^^^^^^^^^^^^^^^^^^^^^ 40 | .. attribute:: GearmanJobRequest.gearman_job 41 | 42 | :const:`GearmanJob` - Job that is being tracked by this :const:`GearmanJobRequest` object 43 | 44 | .. attribute:: GearmanJobRequest.priority 45 | 46 | * :const:`PRIORITY_NONE` [default] 47 | * :const:`PRIORITY_LOW` 48 | * :const:`PRIORITY_HIGH` 49 | 50 | .. attribute:: GearmanJobRequest.background 51 | 52 | :const:`boolean` - Is this job backgrounded? 53 | 54 | .. attribute:: GearmanJobRequest.connection_attempts 55 | 56 | :const:`integer` - Number of attempted connection attempts 57 | 58 | .. attribute:: GearmanJobRequest.max_connection_attempts 59 | 60 | :const:`integer` - Maximum number of attempted connection attempts before raising an exception 61 | 62 | Tracking job progress 63 | ^^^^^^^^^^^^^^^^^^^^^ 64 | .. attribute:: GearmanJobRequest.result 65 | 66 | :const:`binary` - Job's returned binary payload - Populated if and only if JOB_COMPLETE 67 | 68 | .. attribute:: GearmanJobRequest.exception 69 | 70 | :const:`binary` - Job's exception binary payload 71 | 72 | .. attribute:: GearmanJobRequest.state 73 | 74 | * :const:`JOB_UNKNOWN` - Request state is currently unknown, either unsubmitted or connection failed 75 | * :const:`JOB_PENDING` - Request has been submitted, pending handle 76 | * :const:`JOB_CREATED` - Request has been accepted 77 | * :const:`JOB_FAILED` - Request received an explicit job failure (job done but errored out) 78 | * :const:`JOB_COMPLETE` - Request received an explicit job completion (job done with results) 79 | 80 | .. attribute:: GearmanJobRequest.timed_out 81 | 82 | :const:`boolean` - Did the client hit its polling_timeout prior to a job finishing? 83 | 84 | .. attribute:: GearmanJobRequest.complete 85 | 86 | :const:`boolean` - Does the client need to continue to poll for more updates from this job? 87 | 88 | Tracking in-flight job updates 89 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 90 | Certain GearmanJob's may send back data prior to actually completing. :const:`GearmanClient` uses these queues to keep track of what/when we received certain updates. 91 | 92 | .. attribute:: GearmanJobRequest.warning_updates 93 | 94 | :const:`collections.deque` - Job's warning binary payloads 95 | 96 | .. attribute:: GearmanJobRequest.data_updates 97 | 98 | :const:`collections.deque` - Job's data binary payloads 99 | 100 | .. attribute:: GearmanJobRequest.status 101 | 102 | :const:`dictionary` - Job's status 103 | 104 | * `handle` - :const:`string` - Job handle 105 | * `known` - :const:`boolean` - Is the server aware of this request? 106 | * `running` - :const:`boolean` - Is the request currently being processed by a worker? 107 | * `numerator` - :const:`integer` 108 | * `denominator` - :const:`integer` 109 | * `time_received` - :const:`integer` - Time last updated 110 | 111 | .. versionadded:: 2.0.1 112 | Replaces GearmanJobRequest.status_updates and GearmanJobRquest.server_status 113 | 114 | .. attribute:: GearmanJobRequest.status_updates 115 | 116 | .. deprecated:: 2.0.1 117 | Replaced by GearmanJobRequest.status 118 | 119 | .. attribute:: GearmanJobRequest.server_status 120 | 121 | .. deprecated:: 2.0.1 122 | Replaced by GearmanJobRequest.status 123 | -------------------------------------------------------------------------------- /docs/library.rst: -------------------------------------------------------------------------------- 1 | Gearman Library documentation 2 | ============================= 3 | .. toctree:: 4 | :maxdepth: 3 5 | 6 | client.rst 7 | worker.rst 8 | admin_client.rst 9 | job.rst 10 | -------------------------------------------------------------------------------- /docs/types_of_jobs.rst: -------------------------------------------------------------------------------- 1 | ===================== 2 | Types of Gearman Jobs 3 | ===================== 4 | 5 | Gearman has two types of jobs, "online" jobs and "offline" jobs. Online jobs 6 | keep a socket open to the gearman server while they run, while offline jobs 7 | disconnect from the gearman server after submitting. As such, offline jobs 8 | place less of a burden on the network and are less vulnerable to flakey network 9 | connections. 10 | 11 | =============================== ============ ============= 12 | Feature Online Rules Offline Rules 13 | =============================== ============ ============= 14 | Supports a return value yes no 15 | Supports mid-job data updates yes no 16 | Supports mid-job status updates yes yes 17 | submit_job(...) blocks optional no 18 | =============================== ============ ============= 19 | 20 | Sample Code 21 | =========== 22 | 23 | Client:: 24 | 25 | import gearman 26 | import time 27 | 28 | 29 | gm_client = gearman.GearmanClient(['localhost:4730']) 30 | 31 | initial_request = gm_client.submit_job( 32 | "delayed_reverse", # queue name 33 | "Hello World!", # data 34 | background=False, # False --> online job, True --> offline job 35 | wait_until_complete=False # False --> don't block, True --> block 36 | ) 37 | 38 | 39 | start_time = time.time() 40 | 41 | 42 | def check_job_status(initial_request): 43 | # Create a request for the gearman server to update the original job's status 44 | update_job = gearman.job.GearmanJob(gm_client.connection_list[0], 45 | initial_request.job.handle, 46 | None, 47 | None, 48 | None 49 | ) 50 | update_request = gearman.job.GearmanJobRequest(update_job) 51 | update_request.state = 'CREATED' 52 | 53 | result = gm_client.get_job_status(update_request) 54 | 55 | print ('Time elpased = %.2f, Known = %s, Running = %s, ' + 56 | 'Status = %d/%d, data = %r' 57 | ) % ( 58 | time.time() - start_time, 59 | result.status['known'], 60 | result.status['running'], 61 | result.status['numerator'], 62 | result.status['denominator'], 63 | result.data_updates 64 | ) 65 | 66 | for i in xrange(8): 67 | check_job_status(initial_request) 68 | time.sleep(1) 69 | 70 | Worker:: 71 | 72 | import gearman 73 | import time 74 | 75 | 76 | def delayed_reverse(worker, job): 77 | start_time = time.time() 78 | et = lambda: time.time() - start_time 79 | 80 | print 'Received job `%s`, will run for 6 seconds' % job.data 81 | 82 | time.sleep(2) 83 | print 'Time elapsed = %.2f, updating status to 1/3' % et() 84 | worker.send_job_status(job, 1, 3) 85 | 86 | time.sleep(1) 87 | print 'Time elapsed = %.2f, sent half-way data' % et() 88 | worker.send_job_data(job, 'half way data') 89 | 90 | time.sleep(1) 91 | print 'Time elapsed = %.2f, updating status to 2/3' % et() 92 | worker.send_job_status(job, 2, 3) 93 | 94 | time.sleep(2) 95 | print 'Time elapsed = %.2f, updating status to 3/3 and returning' % et() 96 | 97 | return job.data[::-1] 98 | 99 | 100 | gearman_worker = gearman.GearmanWorker(['localhost:4730']) 101 | gearman_worker.register_task('delayed_reverse', delayed_reverse) 102 | gearman_worker.work() 103 | 104 | Sample Output 105 | ============= 106 | 107 | Client:: 108 | 109 | Time elpased = 0.00, Known = True, Running = False, Status = 0/0, data = deque([]) 110 | Time elpased = 1.00, Known = True, Running = True, Status = 0/0, data = deque([]) 111 | Time elpased = 2.00, Known = True, Running = True, Status = 1/3, data = deque([]) 112 | Time elpased = 3.00, Known = True, Running = True, Status = 1/3, data = deque(['half way data']) 113 | Time elpased = 4.01, Known = True, Running = True, Status = 2/3, data = deque([]) 114 | Time elpased = 5.01, Known = True, Running = True, Status = 2/3, data = deque([]) 115 | Time elpased = 6.01, Known = True, Running = True, Status = 2/3, data = deque([]) 116 | Time elpased = 7.01, Known = False, Running = False, Status = 0/0, data = deque([]) 117 | 118 | Worker:: 119 | 120 | Received job `Hello World!`, will run for 6 seconds 121 | Time elapsed = 2.00, updating status to 1/3 122 | Time elapsed = 3.00, sent half-way data 123 | Time elapsed = 4.00, updating status to 2/3 124 | Time elapsed = 6.01, updating status to 3/3 and returning 125 | 126 | Note that the client never receives the "3/3" status update -- status and 127 | data updates only work if the client queries for the update while the job 128 | is running. The status/data update features are designed as a mechanism for 129 | checking up on a running job, not a means of returning or persistently storing 130 | job output. 131 | -------------------------------------------------------------------------------- /docs/worker.rst: -------------------------------------------------------------------------------- 1 | :mod:`gearman.worker` --- Gearman worker 2 | ======================================== 3 | .. module:: gearman.worker 4 | :synopsis: Gearman worker - public interface for accepting/executing jobs 5 | 6 | .. autoclass:: GearmanWorker 7 | 8 | Job processing 9 | -------------- 10 | .. automethod:: GearmanWorker.set_client_id 11 | 12 | .. automethod:: GearmanWorker.register_task 13 | 14 | .. automethod:: GearmanWorker.unregister_task 15 | 16 | .. automethod:: GearmanWorker.work 17 | 18 | Setting up a basic worker that reverses a given byte-string:: 19 | 20 | gm_worker = gearman.GearmanWorker(['localhost:4730']) 21 | 22 | # See gearman/job.py to see attributes on the GearmanJob 23 | # Send back a reversed version of the 'data' string 24 | def task_listener_reverse(gearman_worker, gearman_job): 25 | return reversed(gearman_job.data) 26 | 27 | # gm_worker.set_client_id is optional 28 | gm_worker.set_client_id('your_worker_client_id_name') 29 | gm_worker.register_task('reverse', task_listener_reverse) 30 | 31 | # Enter our work loop and call gm_worker.after_poll() after each time we timeout/see socket activity 32 | gm_worker.work() 33 | 34 | Sending in-flight job updates 35 | ----------------------------- 36 | .. automethod:: GearmanWorker.send_job_data 37 | 38 | .. automethod:: GearmanWorker.send_job_status 39 | 40 | .. automethod:: GearmanWorker.send_job_warning 41 | 42 | Callback function sending back inflight job updates:: 43 | 44 | gm_worker = gearman.GearmanWorker(['localhost:4730']) 45 | 46 | # See gearman/job.py to see attributes on the GearmanJob 47 | # Send back a reversed version of the 'data' string through WORK_DATA instead of WORK_COMPLETE 48 | def task_listener_reverse_inflight(gearman_worker, gearman_job): 49 | reversed_data = reversed(gearman_job.data) 50 | total_chars = len(reversed_data) 51 | 52 | for idx, character in enumerate(reversed_data): 53 | gearman_worker.send_job_data(gearman_job, str(character)) 54 | gearman_worker.send_job_status(gearman_job, idx + 1, total_chars) 55 | 56 | return None 57 | 58 | # gm_worker.set_client_id is optional 59 | gm_worker.register_task('reverse', task_listener_reverse_inflight) 60 | 61 | # Enter our work loop and call gm_worker.after_poll() after each time we timeout/see socket activity 62 | gm_worker.work() 63 | 64 | Extending the worker 65 | -------------------- 66 | .. autoattribute:: GearmanWorker.data_encoder 67 | 68 | .. automethod:: GearmanWorker.after_poll 69 | 70 | Send/receive Python objects and do work between polls:: 71 | 72 | # By default, GearmanWorker's can only send off byte-strings 73 | # If we want to be able to send out Python objects, we can specify a data encoder 74 | # This will automatically convert byte strings <-> Python objects for ALL commands that have the 'data' field 75 | # 76 | # See http://gearman.org/index.php?id=protocol for Worker commands that send/receive 'opaque data' 77 | # 78 | import json # Or similarly styled library 79 | class JSONDataEncoder(gearman.DataEncoder): 80 | @classmethod 81 | def encode(cls, encodable_object): 82 | return json.dumps(encodable_object) 83 | 84 | @classmethod 85 | def decode(cls, decodable_string): 86 | return json.loads(decodable_string) 87 | 88 | class DBRollbackJSONWorker(gearman.GearmanWorker): 89 | data_encoder = JSONDataEncoder 90 | 91 | def after_poll(self, any_activity): 92 | # After every select loop, let's rollback our DB connections just to be safe 93 | continue_working = True 94 | self.db_connections.rollback() 95 | return continue_working 96 | -------------------------------------------------------------------------------- /gearman/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Gearman API - Client, worker, and admin client interfaces 3 | """ 4 | 5 | __version__ = '2.0.2' 6 | 7 | from gearman.admin_client import GearmanAdminClient 8 | from gearman.client import GearmanClient 9 | from gearman.worker import GearmanWorker 10 | 11 | from gearman.connection_manager import DataEncoder 12 | from gearman.constants import PRIORITY_NONE, PRIORITY_LOW, PRIORITY_HIGH, JOB_PENDING, JOB_CREATED, JOB_FAILED, JOB_COMPLETE, JOB_UNKNOWN 13 | 14 | import logging 15 | 16 | class NullHandler(logging.Handler): 17 | def emit(self, record): 18 | pass 19 | 20 | gearman_root_logger = logging.getLogger('gearman') 21 | gearman_root_logger.addHandler(NullHandler()) 22 | -------------------------------------------------------------------------------- /gearman/admin_client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | from gearman import util 5 | 6 | from gearman.connection_manager import GearmanConnectionManager 7 | from gearman.admin_client_handler import GearmanAdminClientCommandHandler 8 | from gearman.errors import ConnectionError, InvalidAdminClientState, ServerUnavailable 9 | from gearman.protocol import GEARMAN_COMMAND_ECHO_RES, GEARMAN_COMMAND_ECHO_REQ, \ 10 | GEARMAN_SERVER_COMMAND_STATUS, GEARMAN_SERVER_COMMAND_VERSION, GEARMAN_SERVER_COMMAND_WORKERS, \ 11 | GEARMAN_SERVER_COMMAND_MAXQUEUE, GEARMAN_SERVER_COMMAND_SHUTDOWN, GEARMAN_SERVER_COMMAND_GETPID, \ 12 | GEARMAN_SERVER_COMMAND_CANCEL_JOB, GEARMAN_SERVER_COMMAND_SHOW_JOBS, GEARMAN_SERVER_COMMAND_SHOW_UNIQUE_JOBS 13 | 14 | gearman_logger = logging.getLogger(__name__) 15 | 16 | ECHO_STRING = "ping? pong!" 17 | DEFAULT_ADMIN_CLIENT_TIMEOUT = 10.0 18 | 19 | class GearmanAdminClient(GearmanConnectionManager): 20 | """GearmanAdminClient :: Interface to send/receive administrative commands to a Gearman server 21 | 22 | This client acts as a BLOCKING client and each call will poll until it receives a satisfactory server response 23 | 24 | http://gearman.org/index.php?id=protocol 25 | See section 'Administrative Protocol' 26 | """ 27 | command_handler_class = GearmanAdminClientCommandHandler 28 | 29 | def __init__(self, host_list=None, poll_timeout=DEFAULT_ADMIN_CLIENT_TIMEOUT): 30 | super(GearmanAdminClient, self).__init__(host_list=host_list) 31 | self.poll_timeout = poll_timeout 32 | 33 | self.current_connection = util.unlist(self.connection_list) 34 | self.current_handler = None 35 | 36 | def establish_admin_connection(self): 37 | try: 38 | self.establish_connection(self.current_connection) 39 | except ConnectionError: 40 | raise ServerUnavailable('Found no valid connections in list: %r' % self.connection_list) 41 | 42 | self.current_handler = self.connection_to_handler_map[self.current_connection] 43 | 44 | def ping_server(self): 45 | """Sends off a debugging string to execute an application ping on the Gearman server""" 46 | start_time = time.time() 47 | 48 | self.establish_admin_connection() 49 | self.current_handler.send_echo_request(ECHO_STRING) 50 | server_response = self.wait_until_server_responds(GEARMAN_COMMAND_ECHO_REQ) 51 | if server_response != ECHO_STRING: 52 | raise InvalidAdminClientState("Echo string mismatch: got %s, expected %s" % (server_response, ECHO_STRING)) 53 | 54 | elapsed_time = time.time() - start_time 55 | return elapsed_time 56 | 57 | def send_maxqueue(self, task, max_size): 58 | """Sends a request to change the maximum queue size for a given task""" 59 | 60 | self.establish_admin_connection() 61 | self.current_handler.send_text_command('%s %s %s' % (GEARMAN_SERVER_COMMAND_MAXQUEUE, task, max_size)) 62 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_MAXQUEUE) 63 | 64 | def send_shutdown(self, graceful=True): 65 | """Sends a request to shutdown the connected gearman server""" 66 | actual_command = GEARMAN_SERVER_COMMAND_SHUTDOWN 67 | if graceful: 68 | actual_command += ' graceful' 69 | 70 | self.establish_admin_connection() 71 | self.current_handler.send_text_command(actual_command) 72 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_SHUTDOWN) 73 | 74 | def get_status(self): 75 | """Retrieves a list of all registered tasks and reports how many items/workers are in the queue""" 76 | self.establish_admin_connection() 77 | self.current_handler.send_text_command(GEARMAN_SERVER_COMMAND_STATUS) 78 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_STATUS) 79 | 80 | def get_version(self): 81 | """Retrieves the version number of the Gearman server""" 82 | self.establish_admin_connection() 83 | self.current_handler.send_text_command(GEARMAN_SERVER_COMMAND_VERSION) 84 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_VERSION) 85 | 86 | def get_workers(self): 87 | """Retrieves a list of workers and reports what tasks they're operating on""" 88 | self.establish_admin_connection() 89 | self.current_handler.send_text_command(GEARMAN_SERVER_COMMAND_WORKERS) 90 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_WORKERS) 91 | 92 | def wait_until_server_responds(self, expected_type): 93 | current_handler = self.current_handler 94 | def continue_while_no_response(any_activity): 95 | return (not current_handler.response_ready) 96 | 97 | self.poll_connections_until_stopped([self.current_connection], continue_while_no_response, timeout=self.poll_timeout) 98 | if not self.current_handler.response_ready: 99 | raise InvalidAdminClientState('Admin client timed out after %f second(s)' % self.poll_timeout) 100 | 101 | cmd_type, cmd_resp = self.current_handler.pop_response() 102 | if cmd_type != expected_type: 103 | raise InvalidAdminClientState('Received an unexpected response... got command %r, expecting command %r' % (cmd_type, expected_type)) 104 | 105 | return cmd_resp 106 | 107 | def get_pid(self): 108 | """Retrieves the process ID""" 109 | self.establish_admin_connection() 110 | self.current_handler.send_text_command(GEARMAN_SERVER_COMMAND_GETPID) 111 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_GETPID) 112 | 113 | def cancel_job(self, handle): 114 | """Cancels a job""" 115 | self.establish_admin_connection() 116 | self.current_handler.send_text_command(GEARMAN_SERVER_COMMAND_CANCEL_JOB+" "+handle) 117 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_CANCEL_JOB) 118 | 119 | def get_jobs(self): 120 | """Retrieves a list of jobs""" 121 | self.establish_admin_connection() 122 | self.current_handler.send_text_command(GEARMAN_SERVER_COMMAND_SHOW_JOBS) 123 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_SHOW_JOBS) 124 | 125 | def get_unique_jobs(self): 126 | """Retrieves a list of unique jobs""" 127 | self.establish_admin_connection() 128 | self.current_handler.send_text_command(GEARMAN_SERVER_COMMAND_SHOW_UNIQUE_JOBS) 129 | return self.wait_until_server_responds(GEARMAN_SERVER_COMMAND_SHOW_UNIQUE_JOBS) 130 | -------------------------------------------------------------------------------- /gearman/admin_client_handler.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import logging 3 | 4 | from gearman.command_handler import GearmanCommandHandler 5 | from gearman.errors import ProtocolError, InvalidAdminClientState 6 | from gearman.protocol import GEARMAN_COMMAND_ECHO_REQ, GEARMAN_COMMAND_TEXT_COMMAND, \ 7 | GEARMAN_SERVER_COMMAND_STATUS, GEARMAN_SERVER_COMMAND_VERSION, \ 8 | GEARMAN_SERVER_COMMAND_WORKERS, GEARMAN_SERVER_COMMAND_MAXQUEUE, GEARMAN_SERVER_COMMAND_SHUTDOWN, \ 9 | GEARMAN_SERVER_COMMAND_GETPID, GEARMAN_SERVER_COMMAND_SHOW_JOBS, GEARMAN_SERVER_COMMAND_CANCEL_JOB, \ 10 | GEARMAN_SERVER_COMMAND_SHOW_UNIQUE_JOBS 11 | 12 | gearman_logger = logging.getLogger(__name__) 13 | 14 | EXPECTED_GEARMAN_SERVER_COMMANDS = set([GEARMAN_SERVER_COMMAND_STATUS, GEARMAN_SERVER_COMMAND_VERSION, \ 15 | GEARMAN_SERVER_COMMAND_WORKERS, GEARMAN_SERVER_COMMAND_MAXQUEUE, GEARMAN_SERVER_COMMAND_SHUTDOWN, \ 16 | GEARMAN_SERVER_COMMAND_GETPID, GEARMAN_SERVER_COMMAND_SHOW_JOBS, GEARMAN_SERVER_COMMAND_CANCEL_JOB, \ 17 | GEARMAN_SERVER_COMMAND_SHOW_UNIQUE_JOBS]) 18 | 19 | class GearmanAdminClientCommandHandler(GearmanCommandHandler): 20 | """Special GEARMAN_COMMAND_TEXT_COMMAND command handler that'll parse text responses from the server""" 21 | STATUS_FIELDS = 4 22 | WORKERS_FIELDS = 4 23 | JOB_FIELDS = 4 24 | UNIQUE_JOB_FIELDS = 1 25 | 26 | def __init__(self, connection_manager=None): 27 | super(GearmanAdminClientCommandHandler, self).__init__(connection_manager=connection_manager) 28 | self._sent_commands = collections.deque() 29 | self._recv_responses = collections.deque() 30 | 31 | self._status_response = [] 32 | self._workers_response = [] 33 | 34 | ####################################################################### 35 | ##### Public interface methods to be called by GearmanAdminClient ##### 36 | ####################################################################### 37 | 38 | @property 39 | def response_ready(self): 40 | return bool(self._recv_responses) 41 | 42 | def pop_response(self): 43 | if not self._sent_commands or not self._recv_responses: 44 | raise InvalidAdminClientState('Attempted to pop a response for a command that is not ready') 45 | 46 | sent_command = self._sent_commands.popleft() 47 | recv_response = self._recv_responses.popleft() 48 | return sent_command, recv_response 49 | 50 | def send_text_command(self, command_line): 51 | """Send our administrative text command""" 52 | expected_server_command = None 53 | for server_command in EXPECTED_GEARMAN_SERVER_COMMANDS: 54 | if command_line.startswith(server_command): 55 | expected_server_command = server_command 56 | break 57 | 58 | if not expected_server_command: 59 | raise ProtocolError('Attempted to send an unknown server command: %r' % command_line) 60 | 61 | self._sent_commands.append(expected_server_command) 62 | 63 | output_text = '%s\n' % command_line 64 | self.send_command(GEARMAN_COMMAND_TEXT_COMMAND, raw_text=output_text) 65 | 66 | def send_echo_request(self, echo_string): 67 | """Send our administrative text command""" 68 | self._sent_commands.append(GEARMAN_COMMAND_ECHO_REQ) 69 | 70 | self.send_command(GEARMAN_COMMAND_ECHO_REQ, data=echo_string) 71 | 72 | ########################################################### 73 | ### Callbacks when we receive a command from the server ### 74 | ########################################################### 75 | 76 | def recv_echo_res(self, data): 77 | self._recv_responses.append(data) 78 | return False 79 | 80 | def recv_text_command(self, raw_text): 81 | """Catch GEARMAN_COMMAND_TEXT_COMMAND's and forward them onto their respective recv_server_* callbacks""" 82 | if not self._sent_commands: 83 | raise InvalidAdminClientState('Received an unexpected server response') 84 | 85 | # Peek at the first command 86 | cmd = self._sent_commands[0] 87 | cmd_type = cmd.replace(" ", "_") 88 | recv_server_command_function_name = 'recv_server_%s' % cmd_type 89 | 90 | cmd_callback = getattr(self, recv_server_command_function_name, None) 91 | if not cmd_callback: 92 | gearman_logger.error('Could not handle command: %r - %r' % (cmd_type, raw_text)) 93 | raise ValueError('Could not handle command: %r - %r' % (cmd_type, raw_text)) 94 | 95 | # This must match the parameter names as defined in the command handler 96 | completed_work = cmd_callback(raw_text) 97 | return completed_work 98 | 99 | def recv_server_status(self, raw_text): 100 | """Slowly assemble a server status message line by line""" 101 | # If we received a '.', we've finished parsing this status message 102 | # Pack up our output and reset our response queue 103 | if raw_text == '.': 104 | output_response = tuple(self._status_response) 105 | self._recv_responses.append(output_response) 106 | self._status_response = [] 107 | return False 108 | 109 | # If we didn't get a final response, split our line and interpret all the data 110 | split_tokens = raw_text.split('\t') 111 | if len(split_tokens) != self.STATUS_FIELDS: 112 | raise ProtocolError('Received %d tokens, expected %d tokens: %r' % (len(split_tokens), self.STATUS_FIELDS, split_tokens)) 113 | 114 | # Label our fields and make the results Python friendly 115 | task, queued_count, running_count, worker_count = split_tokens 116 | 117 | status_dict = {} 118 | status_dict['task'] = task 119 | status_dict['queued'] = int(queued_count) 120 | status_dict['running'] = int(running_count) 121 | status_dict['workers'] = int(worker_count) 122 | self._status_response.append(status_dict) 123 | return True 124 | 125 | def recv_server_version(self, raw_text): 126 | """Version response is a simple passthrough""" 127 | self._recv_responses.append(raw_text) 128 | return False 129 | 130 | def recv_server_workers(self, raw_text): 131 | """Slowly assemble a server workers message line by line""" 132 | # If we received a '.', we've finished parsing this workers message 133 | # Pack up our output and reset our response queue 134 | if raw_text == '.': 135 | output_response = tuple(self._workers_response) 136 | self._recv_responses.append(output_response) 137 | self._workers_response = [] 138 | return False 139 | 140 | split_tokens = raw_text.split(' ') 141 | if len(split_tokens) < self.WORKERS_FIELDS: 142 | raise ProtocolError('Received %d tokens, expected >= 4 tokens: %r' % (len(split_tokens), split_tokens)) 143 | 144 | if split_tokens[3] != ':': 145 | raise ProtocolError('Malformed worker response: %r' % (split_tokens, )) 146 | 147 | # Label our fields and make the results Python friendly 148 | worker_dict = {} 149 | worker_dict['file_descriptor'] = split_tokens[0] 150 | worker_dict['ip'] = split_tokens[1] 151 | worker_dict['client_id'] = split_tokens[2] 152 | worker_dict['tasks'] = tuple(split_tokens[4:]) 153 | self._workers_response.append(worker_dict) 154 | return True 155 | 156 | def recv_server_maxqueue(self, raw_text): 157 | """Maxqueue response is a simple passthrough""" 158 | if raw_text != 'OK': 159 | raise ProtocolError("Expected 'OK', received: %s" % raw_text) 160 | 161 | self._recv_responses.append(raw_text) 162 | return False 163 | 164 | def recv_server_shutdown(self, raw_text): 165 | """Shutdown response is a simple passthrough""" 166 | self._recv_responses.append(None) 167 | return False 168 | 169 | def recv_server_getpid(self, raw_text): 170 | """PID response is a simple passthrough""" 171 | self._recv_responses.append(raw_text) 172 | return False 173 | 174 | def recv_server_show_jobs(self, raw_text): 175 | """Slowly assemble a show jobs message line by line""" 176 | # If we received a '.', we've finished parsing this status message 177 | # Pack up our output and reset our response queue 178 | if raw_text == '.': 179 | output_response = tuple(self._status_response) 180 | self._recv_responses.append(output_response) 181 | self._status_response = [] 182 | return False 183 | 184 | # If we didn't get a final response, split our line and interpret all the data 185 | split_tokens = raw_text.split('\t') 186 | if len(split_tokens) != self.JOB_FIELDS: 187 | raise ProtocolError('Received %d tokens, expected %d tokens: %r' % (len(split_tokens), self.JOB_FIELDS, split_tokens)) 188 | 189 | # Label our fields and make the results Python friendly 190 | handle, queued_count, canceled_count, enabled_count = split_tokens 191 | 192 | job_dict = {} 193 | job_dict['handle'] = handle 194 | job_dict['queued'] = int(queued_count) 195 | job_dict['canceled'] = int(canceled_count) 196 | job_dict['enabled'] = int(enabled_count) 197 | self._status_response.append(job_dict) 198 | return True 199 | 200 | def recv_server_cancel_job(self, raw_text): 201 | """Cancel job response is a simple passthrough""" 202 | self._recv_responses.append(raw_text) 203 | return False 204 | 205 | def recv_server_show_unique_jobs(self, raw_text): 206 | """Slowly assemble a server show unique jobs message line by line""" 207 | # If we received a '.', we've finished parsing this status message 208 | # Pack up our output and reset our response queue 209 | if raw_text == '.': 210 | output_response = tuple(self._status_response) 211 | self._recv_responses.append(output_response) 212 | self._status_response = [] 213 | return False 214 | 215 | # If we didn't get a final response, split our line and interpret all the data 216 | split_tokens = raw_text.split('\t') 217 | if len(split_tokens) != self.UNIQUE_JOB_FIELDS: 218 | raise ProtocolError('Received %d tokens, expected %d tokens: %r' % (len(split_tokens), self.UNIQUE_JOB_FIELDS, split_tokens)) 219 | 220 | # Label our fields and make the results Python friendly 221 | unique = split_tokens 222 | 223 | job_dict = {} 224 | job_dict['unique'] = unique 225 | self._status_response.append(job_dict) 226 | return True 227 | -------------------------------------------------------------------------------- /gearman/client.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from gearman import compat 3 | import logging 4 | import os 5 | import random 6 | import weakref 7 | 8 | import gearman.util 9 | 10 | from gearman.connection_manager import GearmanConnectionManager 11 | from gearman.client_handler import GearmanClientCommandHandler 12 | from gearman.constants import PRIORITY_NONE, PRIORITY_LOW, PRIORITY_HIGH, JOB_UNKNOWN, JOB_PENDING 13 | from gearman.errors import ConnectionError, ExceededConnectionAttempts, ServerUnavailable 14 | 15 | gearman_logger = logging.getLogger(__name__) 16 | 17 | # This number must be <= GEARMAN_UNIQUE_SIZE in gearman/libgearman/constants.h 18 | RANDOM_UNIQUE_BYTES = 16 19 | 20 | class GearmanClient(GearmanConnectionManager): 21 | """ 22 | GearmanClient :: Interface to submit jobs to a Gearman server 23 | """ 24 | command_handler_class = GearmanClientCommandHandler 25 | 26 | def __init__(self, host_list=None, random_unique_bytes=RANDOM_UNIQUE_BYTES): 27 | super(GearmanClient, self).__init__(host_list=host_list) 28 | 29 | self.random_unique_bytes = random_unique_bytes 30 | 31 | # The authoritative copy of all requests that this client knows about 32 | # Ignores the fact if a request has been bound to a connection or not 33 | self.request_to_rotating_connection_queue = weakref.WeakKeyDictionary(compat.defaultdict(collections.deque)) 34 | 35 | def submit_job(self, task, data, unique=None, priority=PRIORITY_NONE, background=False, wait_until_complete=True, max_retries=0, poll_timeout=None): 36 | """Submit a single job to any gearman server""" 37 | job_info = dict(task=task, data=data, unique=unique, priority=priority) 38 | completed_job_list = self.submit_multiple_jobs([job_info], background=background, wait_until_complete=wait_until_complete, max_retries=max_retries, poll_timeout=poll_timeout) 39 | return gearman.util.unlist(completed_job_list) 40 | 41 | def submit_multiple_jobs(self, jobs_to_submit, background=False, wait_until_complete=True, max_retries=0, poll_timeout=None): 42 | """Takes a list of jobs_to_submit with dicts of 43 | 44 | {'task': task, 'data': data, 'unique': unique, 'priority': priority} 45 | """ 46 | assert type(jobs_to_submit) in (list, tuple, set), "Expected multiple jobs, received 1?" 47 | 48 | # Convert all job dicts to job request objects 49 | requests_to_submit = [self._create_request_from_dictionary(job_info, background=background, max_retries=max_retries) for job_info in jobs_to_submit] 50 | 51 | return self.submit_multiple_requests(requests_to_submit, wait_until_complete=wait_until_complete, poll_timeout=poll_timeout) 52 | 53 | def submit_multiple_requests(self, job_requests, wait_until_complete=True, poll_timeout=None): 54 | """Take GearmanJobRequests, assign them connections, and request that they be done. 55 | 56 | * Blocks until our jobs are accepted (should be fast) OR times out 57 | * Optionally blocks until jobs are all complete 58 | 59 | You MUST check the status of your requests after calling this function as "timed_out" or "state == JOB_UNKNOWN" maybe True 60 | """ 61 | assert type(job_requests) in (list, tuple, set), "Expected multiple job requests, received 1?" 62 | stopwatch = gearman.util.Stopwatch(poll_timeout) 63 | 64 | # We should always wait until our job is accepted, this should be fast 65 | time_remaining = stopwatch.get_time_remaining() 66 | processed_requests = self.wait_until_jobs_accepted(job_requests, poll_timeout=time_remaining) 67 | 68 | # Optionally, we'll allow a user to wait until all jobs are complete with the same poll_timeout 69 | time_remaining = stopwatch.get_time_remaining() 70 | if wait_until_complete and bool(time_remaining != 0.0): 71 | processed_requests = self.wait_until_jobs_completed(processed_requests, poll_timeout=time_remaining) 72 | 73 | return processed_requests 74 | 75 | def wait_until_jobs_accepted(self, job_requests, poll_timeout=None): 76 | """Go into a select loop until all our jobs have moved to STATE_PENDING""" 77 | assert type(job_requests) in (list, tuple, set), "Expected multiple job requests, received 1?" 78 | 79 | def is_request_pending(current_request): 80 | return bool(current_request.state == JOB_PENDING) 81 | 82 | # Poll until we know we've gotten acknowledgement that our job's been accepted 83 | # If our connection fails while we're waiting for it to be accepted, automatically retry right here 84 | def continue_while_jobs_pending(any_activity): 85 | for current_request in job_requests: 86 | if current_request.state == JOB_UNKNOWN: 87 | self.send_job_request(current_request) 88 | 89 | return compat.any(is_request_pending(current_request) for current_request in job_requests) 90 | 91 | self.poll_connections_until_stopped(self.connection_list, continue_while_jobs_pending, timeout=poll_timeout) 92 | 93 | # Mark any job still in the queued state to poll_timeout 94 | for current_request in job_requests: 95 | current_request.timed_out = is_request_pending(current_request) 96 | 97 | return job_requests 98 | 99 | def wait_until_jobs_completed(self, job_requests, poll_timeout=None): 100 | """Go into a select loop until all our jobs have completed or failed""" 101 | assert type(job_requests) in (list, tuple, set), "Expected multiple job requests, received 1?" 102 | 103 | def is_request_incomplete(current_request): 104 | return not current_request.complete 105 | 106 | # Poll until we get responses for all our functions 107 | # Do NOT attempt to auto-retry connection failures as we have no idea how for a worker got 108 | def continue_while_jobs_incomplete(any_activity): 109 | for current_request in job_requests: 110 | if is_request_incomplete(current_request) and current_request.state != JOB_UNKNOWN: 111 | return True 112 | 113 | return False 114 | 115 | self.poll_connections_until_stopped(self.connection_list, continue_while_jobs_incomplete, timeout=poll_timeout) 116 | 117 | # Mark any job still in the queued state to poll_timeout 118 | for current_request in job_requests: 119 | current_request.timed_out = is_request_incomplete(current_request) 120 | 121 | if not current_request.timed_out: 122 | self.request_to_rotating_connection_queue.pop(current_request, None) 123 | 124 | return job_requests 125 | 126 | def get_job_status(self, current_request, poll_timeout=None): 127 | """Fetch the job status of a single request""" 128 | request_list = self.get_job_statuses([current_request], poll_timeout=poll_timeout) 129 | return gearman.util.unlist(request_list) 130 | 131 | def get_job_statuses(self, job_requests, poll_timeout=None): 132 | """Fetch the job status of a multiple requests""" 133 | assert type(job_requests) in (list, tuple, set), "Expected multiple job requests, received 1?" 134 | for current_request in job_requests: 135 | current_request.status['last_time_received'] = current_request.status.get('time_received') 136 | 137 | current_connection = current_request.job.connection 138 | current_command_handler = self.connection_to_handler_map[current_connection] 139 | 140 | current_command_handler.send_get_status_of_job(current_request) 141 | 142 | return self.wait_until_job_statuses_received(job_requests, poll_timeout=poll_timeout) 143 | 144 | def wait_until_job_statuses_received(self, job_requests, poll_timeout=None): 145 | """Go into a select loop until we received statuses on all our requests""" 146 | assert type(job_requests) in (list, tuple, set), "Expected multiple job requests, received 1?" 147 | def is_status_not_updated(current_request): 148 | current_status = current_request.status 149 | return bool(current_status.get('time_received') == current_status.get('last_time_received')) 150 | 151 | # Poll to make sure we send out our request for a status update 152 | def continue_while_status_not_updated(any_activity): 153 | for current_request in job_requests: 154 | if is_status_not_updated(current_request) and current_request.state != JOB_UNKNOWN: 155 | return True 156 | 157 | return False 158 | 159 | self.poll_connections_until_stopped(self.connection_list, continue_while_status_not_updated, timeout=poll_timeout) 160 | 161 | for current_request in job_requests: 162 | current_request.status = current_request.status or {} 163 | current_request.timed_out = is_status_not_updated(current_request) 164 | 165 | return job_requests 166 | 167 | def _create_request_from_dictionary(self, job_info, background=False, max_retries=0): 168 | """Takes a dictionary with fields {'task': task, 'unique': unique, 'data': data, 'priority': priority, 'background': background}""" 169 | # Make sure we have a unique identifier for ALL our tasks 170 | job_unique = job_info.get('unique') 171 | if not job_unique: 172 | job_unique = os.urandom(self.random_unique_bytes).encode('hex') 173 | 174 | current_job = self.job_class(connection=None, handle=None, task=job_info['task'], unique=job_unique, data=job_info['data']) 175 | 176 | initial_priority = job_info.get('priority', PRIORITY_NONE) 177 | 178 | max_attempts = max_retries + 1 179 | current_request = self.job_request_class(current_job, initial_priority=initial_priority, background=background, max_attempts=max_attempts) 180 | return current_request 181 | 182 | def establish_request_connection(self, current_request): 183 | """Return a live connection for the given hash""" 184 | # We'll keep track of the connections we're attempting to use so if we ever have to retry, we can use this history 185 | rotating_connections = self.request_to_rotating_connection_queue.get(current_request, None) 186 | if not rotating_connections: 187 | shuffled_connection_list = list(self.connection_list) 188 | random.shuffle(shuffled_connection_list) 189 | 190 | rotating_connections = collections.deque(shuffled_connection_list) 191 | self.request_to_rotating_connection_queue[current_request] = rotating_connections 192 | 193 | failed_connections = 0 194 | chosen_connection = None 195 | for possible_connection in rotating_connections: 196 | try: 197 | chosen_connection = self.establish_connection(possible_connection) 198 | break 199 | except ConnectionError: 200 | # Rotate our server list so we'll skip all our broken servers 201 | failed_connections += 1 202 | 203 | if not chosen_connection: 204 | raise ServerUnavailable('Found no valid connections: %r' % self.connection_list) 205 | 206 | # Rotate our server list so we'll skip all our broken servers 207 | rotating_connections.rotate(-failed_connections) 208 | return chosen_connection 209 | 210 | def send_job_request(self, current_request): 211 | """Attempt to send out a job request""" 212 | if current_request.connection_attempts >= current_request.max_connection_attempts: 213 | raise ExceededConnectionAttempts('Exceeded %d connection attempt(s) :: %r' % (current_request.max_connection_attempts, current_request)) 214 | 215 | chosen_connection = self.establish_request_connection(current_request) 216 | 217 | current_request.job.connection = chosen_connection 218 | current_request.connection_attempts += 1 219 | current_request.timed_out = False 220 | 221 | current_command_handler = self.connection_to_handler_map[chosen_connection] 222 | current_command_handler.send_job_request(current_request) 223 | return current_request 224 | -------------------------------------------------------------------------------- /gearman/client_handler.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import time 3 | import logging 4 | import weakref 5 | 6 | from gearman.command_handler import GearmanCommandHandler 7 | from gearman.constants import JOB_UNKNOWN, JOB_PENDING, JOB_CREATED, JOB_FAILED, JOB_COMPLETE 8 | from gearman.errors import InvalidClientState 9 | from gearman.protocol import GEARMAN_COMMAND_GET_STATUS, submit_cmd_for_background_priority 10 | 11 | gearman_logger = logging.getLogger(__name__) 12 | 13 | class GearmanClientCommandHandler(GearmanCommandHandler): 14 | """Maintains the state of this connection on behalf of a GearmanClient""" 15 | def __init__(self, connection_manager=None): 16 | super(GearmanClientCommandHandler, self).__init__(connection_manager=connection_manager) 17 | 18 | # When we first submit jobs, we don't have a handle assigned yet... these handles will be returned in the order of submission 19 | self.requests_awaiting_handles = collections.deque() 20 | self.handle_to_request_map = weakref.WeakValueDictionary() 21 | 22 | ################################################################## 23 | ##### Public interface methods to be called by GearmanClient ##### 24 | ################################################################## 25 | def send_job_request(self, current_request): 26 | """Register a newly created job request""" 27 | self._assert_request_state(current_request, JOB_UNKNOWN) 28 | 29 | gearman_job = current_request.job 30 | 31 | # Handle the I/O for requesting a job - determine which COMMAND we need to send 32 | cmd_type = submit_cmd_for_background_priority(current_request.background, current_request.priority) 33 | 34 | outbound_data = self.encode_data(gearman_job.data) 35 | self.send_command(cmd_type, task=gearman_job.task, unique=gearman_job.unique, data=outbound_data) 36 | 37 | # Once this command is sent, our request needs to wait for a handle 38 | current_request.state = JOB_PENDING 39 | 40 | self.requests_awaiting_handles.append(current_request) 41 | 42 | def send_get_status_of_job(self, current_request): 43 | """Forward the status of a job""" 44 | self._register_request(current_request) 45 | self.send_command(GEARMAN_COMMAND_GET_STATUS, job_handle=current_request.job.handle) 46 | 47 | def on_io_error(self): 48 | for pending_request in self.requests_awaiting_handles: 49 | pending_request.state = JOB_UNKNOWN 50 | 51 | for inflight_request in self.handle_to_request_map.itervalues(): 52 | inflight_request.state = JOB_UNKNOWN 53 | 54 | def _register_request(self, current_request): 55 | self.handle_to_request_map[current_request.job.handle] = current_request 56 | 57 | ################################################################## 58 | ## Gearman command callbacks with kwargs defined by protocol.py ## 59 | ################################################################## 60 | def _assert_request_state(self, current_request, expected_state): 61 | if current_request.state != expected_state: 62 | raise InvalidClientState('Expected handle (%s) to be in state %r, got %r' % (current_request.job.handle, expected_state, current_request.state)) 63 | 64 | def recv_job_created(self, job_handle): 65 | if not self.requests_awaiting_handles: 66 | raise InvalidClientState('Received a job_handle with no pending requests') 67 | 68 | # If our client got a JOB_CREATED, our request now has a server handle 69 | current_request = self.requests_awaiting_handles.popleft() 70 | self._assert_request_state(current_request, JOB_PENDING) 71 | 72 | # Update the state of this request 73 | current_request.job.handle = job_handle 74 | current_request.state = JOB_CREATED 75 | self._register_request(current_request) 76 | 77 | return True 78 | 79 | def recv_work_data(self, job_handle, data): 80 | # Queue a WORK_DATA update 81 | current_request = self.handle_to_request_map[job_handle] 82 | self._assert_request_state(current_request, JOB_CREATED) 83 | 84 | current_request.data_updates.append(self.decode_data(data)) 85 | 86 | return True 87 | 88 | def recv_work_warning(self, job_handle, data): 89 | # Queue a WORK_WARNING update 90 | current_request = self.handle_to_request_map[job_handle] 91 | self._assert_request_state(current_request, JOB_CREATED) 92 | 93 | current_request.warning_updates.append(self.decode_data(data)) 94 | 95 | return True 96 | 97 | def recv_work_status(self, job_handle, numerator, denominator): 98 | # Queue a WORK_STATUS update 99 | current_request = self.handle_to_request_map[job_handle] 100 | self._assert_request_state(current_request, JOB_CREATED) 101 | 102 | # The protocol spec is ambiguous as to what type the numerator and denominator is... 103 | # But according to Eskil, gearmand interprets these as integers 104 | current_request.status = { 105 | 'handle': job_handle, 106 | 'known': True, 107 | 'running': True, 108 | 'numerator': int(numerator), 109 | 'denominator': int(denominator), 110 | 'time_received': time.time() 111 | } 112 | return True 113 | 114 | def recv_work_complete(self, job_handle, data): 115 | # Update the state of our request and store our returned result 116 | current_request = self.handle_to_request_map[job_handle] 117 | self._assert_request_state(current_request, JOB_CREATED) 118 | 119 | current_request.result = self.decode_data(data) 120 | current_request.state = JOB_COMPLETE 121 | 122 | return True 123 | 124 | def recv_work_fail(self, job_handle): 125 | # Update the state of our request and mark this job as failed 126 | current_request = self.handle_to_request_map[job_handle] 127 | self._assert_request_state(current_request, JOB_CREATED) 128 | 129 | current_request.state = JOB_FAILED 130 | 131 | return True 132 | 133 | def recv_work_exception(self, job_handle, data): 134 | # Using GEARMAND_COMMAND_WORK_EXCEPTION is not recommended at time of this writing [2010-02-24] 135 | # http://groups.google.com/group/gearman/browse_thread/thread/5c91acc31bd10688/529e586405ed37fe 136 | # 137 | current_request = self.handle_to_request_map[job_handle] 138 | self._assert_request_state(current_request, JOB_CREATED) 139 | 140 | current_request.exception = self.decode_data(data) 141 | 142 | return True 143 | 144 | def recv_status_res(self, job_handle, known, running, numerator, denominator): 145 | # If we received a STATUS_RES update about this request, update our known status 146 | current_request = self.handle_to_request_map[job_handle] 147 | 148 | job_known = bool(known == '1') 149 | # Make our status response Python friendly 150 | current_request.status = { 151 | 'handle': job_handle, 152 | 'known': job_known, 153 | 'running': bool(running == '1'), 154 | 'numerator': int(numerator), 155 | 'denominator': int(denominator), 156 | 'time_received': time.time() 157 | } 158 | 159 | return True 160 | -------------------------------------------------------------------------------- /gearman/command_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from gearman.errors import UnknownCommandError 3 | from gearman.protocol import get_command_name 4 | 5 | gearman_logger = logging.getLogger(__name__) 6 | 7 | class GearmanCommandHandler(object): 8 | """A command handler manages the state which we should be in given a certain stream of commands 9 | 10 | GearmanCommandHandler does no I/O and only understands sending/receiving commands 11 | """ 12 | def __init__(self, connection_manager=None): 13 | self.connection_manager = connection_manager 14 | 15 | def initial_state(self, *largs, **kwargs): 16 | """Called by a Connection Manager after we've been instantiated and we're ready to send off commands""" 17 | pass 18 | 19 | def on_io_error(self): 20 | pass 21 | 22 | def decode_data(self, data): 23 | """Convenience function :: handle binary string -> object unpacking""" 24 | return self.connection_manager.data_encoder.decode(data) 25 | 26 | def encode_data(self, data): 27 | """Convenience function :: handle object -> binary string packing""" 28 | return self.connection_manager.data_encoder.encode(data) 29 | 30 | def fetch_commands(self): 31 | """Called by a Connection Manager to notify us that we have pending commands""" 32 | continue_working = True 33 | while continue_working: 34 | cmd_tuple = self.connection_manager.read_command(self) 35 | if cmd_tuple is None: 36 | break 37 | 38 | cmd_type, cmd_args = cmd_tuple 39 | continue_working = self.recv_command(cmd_type, **cmd_args) 40 | 41 | def send_command(self, cmd_type, **cmd_args): 42 | """Hand off I/O to the connection mananger""" 43 | self.connection_manager.send_command(self, cmd_type, cmd_args) 44 | 45 | def recv_command(self, cmd_type, **cmd_args): 46 | """Maps any command to a recv_* callback function""" 47 | completed_work = None 48 | 49 | gearman_command_name = get_command_name(cmd_type) 50 | if bool(gearman_command_name == cmd_type) or not gearman_command_name.startswith('GEARMAN_COMMAND_'): 51 | unknown_command_msg = 'Could not handle command: %r - %r' % (gearman_command_name, cmd_args) 52 | gearman_logger.error(unknown_command_msg) 53 | raise ValueError(unknown_command_msg) 54 | 55 | recv_command_function_name = gearman_command_name.lower().replace('gearman_command_', 'recv_') 56 | 57 | cmd_callback = getattr(self, recv_command_function_name, None) 58 | if not cmd_callback: 59 | missing_callback_msg = 'Could not handle command: %r - %r' % (get_command_name(cmd_type), cmd_args) 60 | gearman_logger.error(missing_callback_msg) 61 | raise UnknownCommandError(missing_callback_msg) 62 | 63 | # Expand the arguments as passed by the protocol 64 | # This must match the parameter names as defined in the command handler 65 | completed_work = cmd_callback(**cmd_args) 66 | return completed_work 67 | 68 | def recv_error(self, error_code, error_text): 69 | """When we receive an error from the server, notify the connection manager that we have a gearman error""" 70 | return self.connection_manager.on_gearman_error(error_code, error_text) 71 | -------------------------------------------------------------------------------- /gearman/compat.py: -------------------------------------------------------------------------------- 1 | """ 2 | Gearman compatibility module 3 | """ 4 | 5 | # Required for python2.4 backward compatibilty 6 | # Add a module attribute called "any" which is equivalent to "any" 7 | try: 8 | any = any 9 | except NameError: 10 | def any(iterable): 11 | """Return True if any element of the iterable is true. If the iterable is empty, return False""" 12 | for element in iterable: 13 | if element: 14 | return True 15 | return False 16 | 17 | # Required for python2.4 backward compatibilty 18 | # Add a module attribute called "all" which is equivalent to "all" 19 | try: 20 | all = all 21 | except NameError: 22 | def all(iterable): 23 | """Return True if all elements of the iterable are true (or if the iterable is empty)""" 24 | for element in iterable: 25 | if not element: 26 | return False 27 | return True 28 | 29 | # Required for python2.4 backward compatibilty 30 | # Add a class called "defaultdict" which is equivalent to "collections.defaultdict" 31 | try: 32 | from collections import defaultdict 33 | except ImportError: 34 | class defaultdict(dict): 35 | """A pure-Python version of Python 2.5's defaultdict 36 | taken from http://code.activestate.com/recipes/523034-emulate-collectionsdefaultdict/""" 37 | def __init__(self, default_factory=None, * a, ** kw): 38 | if (default_factory is not None and 39 | not hasattr(default_factory, '__call__')): 40 | raise TypeError('first argument must be callable') 41 | dict.__init__(self, * a, ** kw) 42 | self.default_factory = default_factory 43 | def __getitem__(self, key): 44 | try: 45 | return dict.__getitem__(self, key) 46 | except KeyError: 47 | return self.__missing__(key) 48 | def __missing__(self, key): 49 | if self.default_factory is None: 50 | raise KeyError(key) 51 | self[key] = value = self.default_factory() 52 | return value 53 | def __reduce__(self): 54 | if self.default_factory is None: 55 | args = tuple() 56 | else: 57 | args = self.default_factory, 58 | return type(self), args, None, None, self.items() 59 | def copy(self): 60 | return self.__copy__() 61 | def __copy__(self): 62 | return type(self)(self.default_factory, self) 63 | def __deepcopy__(self, memo): 64 | import copy 65 | return type(self)(self.default_factory, 66 | copy.deepcopy(self.items())) 67 | def __repr__(self): 68 | return 'defaultdict(%s, %s)' % (self.default_factory, 69 | dict.__repr__(self)) 70 | -------------------------------------------------------------------------------- /gearman/connection.py: -------------------------------------------------------------------------------- 1 | import array 2 | import collections 3 | import logging 4 | import socket 5 | import ssl 6 | import struct 7 | import time 8 | 9 | from gearman.errors import ConnectionError, ProtocolError, ServerUnavailable 10 | from gearman.constants import DEFAULT_GEARMAN_PORT, _DEBUG_MODE_ 11 | from gearman.protocol import GEARMAN_PARAMS_FOR_COMMAND, GEARMAN_COMMAND_TEXT_COMMAND, NULL_CHAR, \ 12 | get_command_name, pack_binary_command, parse_binary_command, parse_text_command, pack_text_command 13 | 14 | gearman_logger = logging.getLogger(__name__) 15 | 16 | class GearmanConnection(object): 17 | """A connection between a client/worker and a server. Can be used to reconnect (unlike a socket) 18 | 19 | Wraps a socket and provides the following functionality: 20 | Full read/write methods for Gearman BINARY commands and responses 21 | Full read/write methods for Gearman SERVER commands and responses (using GEARMAN_COMMAND_TEXT_COMMAND) 22 | 23 | Manages raw data buffers for socket-level operations 24 | Manages command buffers for gearman-level operations 25 | 26 | All I/O and buffering should be done in this class 27 | """ 28 | connect_cooldown_seconds = 1.0 29 | 30 | def __init__(self, host=None, port=DEFAULT_GEARMAN_PORT, keyfile=None, certfile=None, ca_certs=None): 31 | port = port or DEFAULT_GEARMAN_PORT 32 | self.gearman_host = host 33 | self.gearman_port = port 34 | self.keyfile = keyfile 35 | self.certfile = certfile 36 | self.ca_certs = ca_certs 37 | 38 | if host is None: 39 | raise ServerUnavailable("No host specified") 40 | 41 | # All 3 files must be given before SSL can be used 42 | self.use_ssl = False 43 | if all([self.keyfile, self.certfile, self.ca_certs]): 44 | self.use_ssl = True 45 | 46 | self._reset_connection() 47 | 48 | def _reset_connection(self): 49 | """Reset the state of this connection""" 50 | self.connected = False 51 | self.gearman_socket = None 52 | 53 | self.allowed_connect_time = 0.0 54 | 55 | self._is_client_side = None 56 | self._is_server_side = None 57 | 58 | # Reset all our raw data buffers 59 | self._incoming_buffer = array.array('c') 60 | self._outgoing_buffer = '' 61 | 62 | # Toss all commands we may have sent or received 63 | self._incoming_commands = collections.deque() 64 | self._outgoing_commands = collections.deque() 65 | 66 | def fileno(self): 67 | """Implements fileno() for use with select.select()""" 68 | if not self.gearman_socket: 69 | self.throw_exception(message='no socket set') 70 | 71 | return self.gearman_socket.fileno() 72 | 73 | def get_address(self): 74 | """Returns the host and port""" 75 | return (self.gearman_host, self.gearman_port) 76 | 77 | def writable(self): 78 | """Returns True if we have data to write""" 79 | return self.connected and bool(self._outgoing_commands or self._outgoing_buffer) 80 | 81 | def readable(self): 82 | """Returns True if we might have data to read""" 83 | return self.connected 84 | 85 | def connect(self): 86 | """Connect to the server. Raise ConnectionError if connection fails.""" 87 | if self.connected: 88 | self.throw_exception(message='connection already established') 89 | 90 | current_time = time.time() 91 | if current_time < self.allowed_connect_time: 92 | self.throw_exception(message='attempted to connect before required cooldown') 93 | 94 | self.allowed_connect_time = current_time + self.connect_cooldown_seconds 95 | 96 | self._reset_connection() 97 | 98 | self._create_client_socket() 99 | 100 | self.connected = True 101 | self._is_client_side = True 102 | self._is_server_side = False 103 | 104 | def _create_client_socket(self): 105 | """Creates a client side socket and subsequently binds/configures our socket options""" 106 | try: 107 | client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 108 | 109 | if self.use_ssl: 110 | client_socket = ssl.wrap_socket(client_socket, 111 | keyfile=self.keyfile, 112 | certfile=self.certfile, 113 | ca_certs=self.ca_certs, 114 | cert_reqs=ssl.CERT_REQUIRED, 115 | ssl_version=ssl.PROTOCOL_TLSv1) 116 | 117 | client_socket.connect((self.gearman_host, self.gearman_port)) 118 | except socket.error, socket_exception: 119 | self.throw_exception(exception=socket_exception) 120 | 121 | self.set_socket(client_socket) 122 | 123 | def set_socket(self, current_socket): 124 | """Setup common options for all Gearman-related sockets""" 125 | if self.gearman_socket: 126 | self.throw_exception(message='socket already bound') 127 | 128 | current_socket.setblocking(0) 129 | current_socket.settimeout(0.0) 130 | current_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, struct.pack('L', 1)) 131 | self.gearman_socket = current_socket 132 | 133 | def read_command(self): 134 | """Reads a single command from the command queue""" 135 | if not self._incoming_commands: 136 | return None 137 | 138 | return self._incoming_commands.popleft() 139 | 140 | def read_commands_from_buffer(self): 141 | """Reads data from buffer --> command_queue""" 142 | received_commands = 0 143 | while True: 144 | cmd_type, cmd_args, cmd_len = self._unpack_command(self._incoming_buffer) 145 | if not cmd_len: 146 | break 147 | 148 | received_commands += 1 149 | 150 | # Store our command on the command queue 151 | # Move the self._incoming_buffer forward by the number of bytes we just read 152 | self._incoming_commands.append((cmd_type, cmd_args)) 153 | self._incoming_buffer = self._incoming_buffer[cmd_len:] 154 | 155 | return received_commands 156 | 157 | def read_data_from_socket(self, bytes_to_read=4096): 158 | """Reads data from socket --> buffer""" 159 | if not self.connected: 160 | self.throw_exception(message='disconnected') 161 | 162 | recv_buffer = '' 163 | 164 | while True: 165 | try: 166 | recv_buffer = self.gearman_socket.recv(bytes_to_read) 167 | except ssl.SSLError as e: 168 | # if we would block, ignore the error 169 | if e.errno == ssl.SSL_ERROR_WANT_READ: 170 | continue 171 | elif e.errno == ssl.SSL_ERROR_WANT_WRITE: 172 | continue 173 | else: 174 | self.throw_exception(exception=e) 175 | except socket.error, socket_exception: 176 | self.throw_exception(exception=socket_exception) 177 | 178 | if len(recv_buffer) == 0: 179 | self.throw_exception(message='remote disconnected') 180 | break 181 | 182 | # SSL has an internal buffer we need to empty out 183 | if self.use_ssl: 184 | remaining = self.gearman_socket.pending() 185 | while remaining: 186 | recv_buffer += self.gearman_socket.recv(remaining) 187 | remaining = self.gearman_socket.pending() 188 | 189 | self._incoming_buffer.fromstring(recv_buffer) 190 | return len(self._incoming_buffer) 191 | 192 | def _unpack_command(self, given_buffer): 193 | """Conditionally unpack a binary command or a text based server command""" 194 | assert self._is_client_side is not None, "Ambiguous connection state" 195 | 196 | if not given_buffer: 197 | cmd_type = None 198 | cmd_args = None 199 | cmd_len = 0 200 | elif given_buffer[0] == NULL_CHAR: 201 | # We'll be expecting a response if we know we're a client side command 202 | is_response = bool(self._is_client_side) 203 | cmd_type, cmd_args, cmd_len = parse_binary_command(given_buffer, is_response=is_response) 204 | else: 205 | cmd_type, cmd_args, cmd_len = parse_text_command(given_buffer) 206 | 207 | if _DEBUG_MODE_ and cmd_type is not None: 208 | gearman_logger.debug('%s - Recv - %s - %r', hex(id(self)), get_command_name(cmd_type), cmd_args) 209 | 210 | return cmd_type, cmd_args, cmd_len 211 | 212 | def send_command(self, cmd_type, cmd_args): 213 | """Adds a single gearman command to the outgoing command queue""" 214 | self._outgoing_commands.append((cmd_type, cmd_args)) 215 | 216 | def send_commands_to_buffer(self): 217 | """Sends and packs commands -> buffer""" 218 | if not self._outgoing_commands: 219 | return 220 | 221 | packed_data = [self._outgoing_buffer] 222 | while self._outgoing_commands: 223 | cmd_type, cmd_args = self._outgoing_commands.popleft() 224 | packed_command = self._pack_command(cmd_type, cmd_args) 225 | packed_data.append(packed_command) 226 | 227 | self._outgoing_buffer = ''.join(packed_data) 228 | 229 | def send_data_to_socket(self): 230 | """Send data from buffer -> socket 231 | 232 | Returns remaining size of the output buffer 233 | """ 234 | if not self.connected: 235 | self.throw_exception(message='disconnected') 236 | 237 | if not self._outgoing_buffer: 238 | return 0 239 | 240 | while True: 241 | try: 242 | bytes_sent = self.gearman_socket.send(self._outgoing_buffer) 243 | except ssl.SSLError as e: 244 | if e.errno == ssl.SSL_ERROR_WANT_READ: 245 | continue 246 | elif e.errno == ssl.SSL_ERROR_WANT_WRITE: 247 | continue 248 | else: 249 | self.throw_exception(exception=e) 250 | except socket.error, socket_exception: 251 | self.throw_exception(exception=socket_exception) 252 | 253 | if bytes_sent == 0: 254 | self.throw_exception(message='remote disconnected') 255 | break 256 | 257 | self._outgoing_buffer = self._outgoing_buffer[bytes_sent:] 258 | return len(self._outgoing_buffer) 259 | 260 | def _pack_command(self, cmd_type, cmd_args): 261 | """Converts a command to its raw binary format""" 262 | if cmd_type not in GEARMAN_PARAMS_FOR_COMMAND: 263 | raise ProtocolError('Unknown command: %r' % get_command_name(cmd_type)) 264 | 265 | if _DEBUG_MODE_: 266 | gearman_logger.debug('%s - Send - %s - %r', hex(id(self)), get_command_name(cmd_type), cmd_args) 267 | 268 | if cmd_type == GEARMAN_COMMAND_TEXT_COMMAND: 269 | return pack_text_command(cmd_type, cmd_args) 270 | else: 271 | # We'll be sending a response if we know we're a server side command 272 | is_response = bool(self._is_server_side) 273 | return pack_binary_command(cmd_type, cmd_args, is_response) 274 | 275 | def close(self): 276 | """Shutdown our existing socket and reset all of our connection data""" 277 | try: 278 | if self.gearman_socket: 279 | self.gearman_socket.close() 280 | except socket.error: 281 | pass 282 | 283 | self._reset_connection() 284 | 285 | def throw_exception(self, message=None, exception=None): 286 | # Mark us as disconnected but do NOT call self._reset_connection() 287 | # Allows catchers of ConnectionError a chance to inspect the last state of this connection 288 | self.connected = False 289 | 290 | if exception: 291 | message = repr(exception) 292 | 293 | rewritten_message = "<%s:%d> %s" % (self.gearman_host, self.gearman_port, message) 294 | raise ConnectionError(rewritten_message) 295 | 296 | def __repr__(self): 297 | return ('' % 298 | (self.gearman_host, self.gearman_port, self.connected)) 299 | -------------------------------------------------------------------------------- /gearman/connection_manager.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import gearman.io 4 | import gearman.util 5 | from gearman.connection import GearmanConnection 6 | from gearman.constants import _DEBUG_MODE_ 7 | from gearman.errors import ConnectionError, GearmanError, ServerUnavailable 8 | from gearman.job import GearmanJob, GearmanJobRequest 9 | from gearman import compat 10 | 11 | gearman_logger = logging.getLogger(__name__) 12 | 13 | class DataEncoder(object): 14 | @classmethod 15 | def encode(cls, encodable_object): 16 | raise NotImplementedError 17 | 18 | @classmethod 19 | def decode(cls, decodable_string): 20 | raise NotImplementedError 21 | 22 | class NoopEncoder(DataEncoder): 23 | """Provide common object dumps for all communications over gearman""" 24 | @classmethod 25 | def _enforce_byte_string(cls, given_object): 26 | if type(given_object) != str: 27 | raise TypeError("Expecting byte string, got %r" % type(given_object)) 28 | 29 | @classmethod 30 | def encode(cls, encodable_object): 31 | cls._enforce_byte_string(encodable_object) 32 | return encodable_object 33 | 34 | @classmethod 35 | def decode(cls, decodable_string): 36 | cls._enforce_byte_string(decodable_string) 37 | return decodable_string 38 | 39 | class GearmanConnectionManager(object): 40 | """Abstract base class for any Gearman-type client that needs to connect/listen to multiple connections 41 | 42 | Mananges and polls a group of gearman connections 43 | Forwards all communication between a connection and a command handler 44 | The state of a connection is represented within the command handler 45 | 46 | Automatically encodes all 'data' fields as specified in protocol.py 47 | """ 48 | command_handler_class = None 49 | connection_class = GearmanConnection 50 | 51 | job_class = GearmanJob 52 | job_request_class = GearmanJobRequest 53 | 54 | data_encoder = NoopEncoder 55 | 56 | def __init__(self, host_list=None): 57 | assert self.command_handler_class is not None, 'GearmanClientBase did not receive a command handler class' 58 | 59 | self.connection_list = [] 60 | 61 | host_list = host_list or [] 62 | for element in host_list: 63 | # old style host:port pair 64 | if isinstance(element, str): 65 | self.add_connection(element) 66 | elif isinstance(element, dict): 67 | if not all (k in element for k in ('host', 'port', 'keyfile', 'certfile', 'ca_certs')): 68 | raise GearmanError("Incomplete SSL connection definition") 69 | self.add_ssl_connection(element['host'], element['port'], 70 | element['keyfile'], element['certfile'], 71 | element['ca_certs']) 72 | 73 | self.handler_to_connection_map = {} 74 | self.connection_to_handler_map = {} 75 | 76 | self.handler_initial_state = {} 77 | 78 | def shutdown(self): 79 | # Shutdown all our connections one by one 80 | for gearman_connection in self.connection_list: 81 | gearman_connection.close() 82 | 83 | ################################### 84 | # Connection management functions # 85 | ################################### 86 | 87 | def add_ssl_connection(self, host, port, keyfile, certfile, ca_certs): 88 | """Add a new SSL connection to this connection manager""" 89 | client_connection = self.connection_class(host=host, 90 | port=port, 91 | keyfile=keyfile, 92 | certfile=certfile, 93 | ca_certs=ca_certs) 94 | self.connection_list.append(client_connection) 95 | return client_connection 96 | 97 | def add_connection(self, hostport_tuple): 98 | """Add a new connection to this connection manager""" 99 | gearman_host, gearman_port = gearman.util.disambiguate_server_parameter(hostport_tuple) 100 | 101 | client_connection = self.connection_class(host=gearman_host, port=gearman_port) 102 | self.connection_list.append(client_connection) 103 | 104 | return client_connection 105 | 106 | def establish_connection(self, current_connection): 107 | """Attempt to connect... if not previously connected, create a new CommandHandler to manage this connection's state 108 | !NOTE! This function can throw a ConnectionError which deriving ConnectionManagers should catch 109 | """ 110 | assert current_connection in self.connection_list, "Unknown connection - %r" % current_connection 111 | if current_connection.connected: 112 | return current_connection 113 | 114 | # !NOTE! May throw a ConnectionError 115 | current_connection.connect() 116 | 117 | # Initiate a new command handler every time we start a new connection 118 | current_handler = self.command_handler_class(connection_manager=self) 119 | 120 | # Handler to connection map for CommandHandler -> Connection interactions 121 | # Connection to handler map for Connection -> CommandHandler interactions 122 | self.handler_to_connection_map[current_handler] = current_connection 123 | self.connection_to_handler_map[current_connection] = current_handler 124 | 125 | current_handler.initial_state(**self.handler_initial_state) 126 | return current_connection 127 | 128 | def poll_connections_once(self, poller, connection_map, timeout=None): 129 | # a timeout of -1 when used with epoll will block until there 130 | # is activity. Select does not support negative timeouts, so this 131 | # is translated to a timeout=None when falling back to select 132 | timeout = timeout or -1 133 | 134 | readable = set() 135 | writable = set() 136 | errors = set() 137 | for fileno, events in poller.poll(timeout=timeout): 138 | conn = connection_map.get(fileno) 139 | if not conn: 140 | continue 141 | if events & gearman.io.READ: 142 | readable.add(conn) 143 | if events & gearman.io.WRITE: 144 | writable.add(conn) 145 | if events & gearman.io.ERROR: 146 | errors.add(conn) 147 | 148 | return readable, writable, errors 149 | 150 | def handle_connection_activity(self, rd_connections, wr_connections, ex_connections): 151 | """Process all connection activity... executes all handle_* callbacks""" 152 | dead_connections = set() 153 | for current_connection in rd_connections: 154 | try: 155 | self.handle_read(current_connection) 156 | except ConnectionError: 157 | dead_connections.add(current_connection) 158 | 159 | for current_connection in wr_connections: 160 | try: 161 | self.handle_write(current_connection) 162 | except ConnectionError: 163 | dead_connections.add(current_connection) 164 | 165 | for current_connection in ex_connections: 166 | self.handle_error(current_connection) 167 | 168 | for current_connection in dead_connections: 169 | self.handle_error(current_connection) 170 | 171 | failed_connections = ex_connections | dead_connections 172 | return rd_connections, wr_connections, failed_connections 173 | 174 | def _register_connections_with_poller(self, connections, poller): 175 | for conn in connections: 176 | # possible that not all connections have been established yet 177 | if not conn.gearman_socket: 178 | continue 179 | events = 0 180 | if conn.readable(): 181 | events |= gearman.io.READ 182 | if conn.writable(): 183 | events |= gearman.io.WRITE 184 | poller.register(conn, events) 185 | 186 | def poll_connections_until_stopped(self, submitted_connections, callback_fxn, timeout=None): 187 | """Continue to poll our connections until we receive a stopping condition""" 188 | stopwatch = gearman.util.Stopwatch(timeout) 189 | submitted_connections = set(submitted_connections) 190 | connection_map = {} 191 | 192 | any_activity = False 193 | callback_ok = callback_fxn(any_activity) 194 | connection_ok = compat.any(current_connection.connected for current_connection in submitted_connections) 195 | poller = gearman.io.get_connection_poller() 196 | if connection_ok: 197 | self._register_connections_with_poller(submitted_connections, 198 | poller) 199 | connection_map = dict([(c.fileno(), c) for c in 200 | submitted_connections if c.connected]) 201 | 202 | while connection_ok and callback_ok: 203 | time_remaining = stopwatch.get_time_remaining() 204 | if time_remaining == 0.0: 205 | break 206 | 207 | # Do a single robust select and handle all connection activity 208 | read_connections, write_connections, dead_connections = self.poll_connections_once(poller, connection_map, timeout=time_remaining) 209 | 210 | # Handle reads and writes and close all of the dead connections 211 | read_connections, write_connections, dead_connections = self.handle_connection_activity(read_connections, write_connections, dead_connections) 212 | 213 | any_activity = compat.any([read_connections, write_connections, dead_connections]) 214 | 215 | # Do not retry dead connections on the next iteration of the loop, as we closed them in handle_error 216 | submitted_connections -= dead_connections 217 | 218 | callback_ok = callback_fxn(any_activity) 219 | connection_ok = compat.any(current_connection.connected for current_connection in submitted_connections) 220 | 221 | poller.close() 222 | 223 | # We should raise here if we have no alive connections (don't go into a select polling loop with no connections) 224 | if not connection_ok: 225 | raise ServerUnavailable('Found no valid connections in list: %r' % self.connection_list) 226 | 227 | return bool(connection_ok and callback_ok) 228 | 229 | def handle_read(self, current_connection): 230 | """Handle all our pending socket data""" 231 | current_handler = self.connection_to_handler_map[current_connection] 232 | 233 | # Transfer data from socket -> buffer 234 | current_connection.read_data_from_socket() 235 | 236 | # Transfer command from buffer -> command queue 237 | current_connection.read_commands_from_buffer() 238 | 239 | # Notify the handler that we have commands to fetch 240 | current_handler.fetch_commands() 241 | 242 | def handle_write(self, current_connection): 243 | # Transfer command from command queue -> buffer 244 | current_connection.send_commands_to_buffer() 245 | 246 | # Transfer data from buffer -> socket 247 | current_connection.send_data_to_socket() 248 | 249 | def handle_error(self, current_connection): 250 | dead_handler = self.connection_to_handler_map.pop(current_connection, None) 251 | if dead_handler: 252 | dead_handler.on_io_error() 253 | 254 | self.handler_to_connection_map.pop(dead_handler, None) 255 | current_connection.close() 256 | 257 | ################################## 258 | # Callbacks for Command Handlers # 259 | ################################## 260 | 261 | def read_command(self, command_handler): 262 | """CommandHandlers call this function to fetch pending commands 263 | 264 | NOTE: CommandHandlers have NO knowledge as to which connection they're representing 265 | ConnectionManagers must forward inbound commands to CommandHandlers 266 | """ 267 | gearman_connection = self.handler_to_connection_map[command_handler] 268 | cmd_tuple = gearman_connection.read_command() 269 | if cmd_tuple is None: 270 | return cmd_tuple 271 | 272 | cmd_type, cmd_args = cmd_tuple 273 | return cmd_type, cmd_args 274 | 275 | def send_command(self, command_handler, cmd_type, cmd_args): 276 | """CommandHandlers call this function to send pending commands 277 | 278 | NOTE: CommandHandlers have NO knowledge as to which connection they're representing 279 | ConnectionManagers must forward outbound commands to Connections 280 | """ 281 | gearman_connection = self.handler_to_connection_map[command_handler] 282 | gearman_connection.send_command(cmd_type, cmd_args) 283 | 284 | def on_gearman_error(self, error_code, error_text): 285 | gearman_logger.error('Received error from server: %s: %s' % (error_code, error_text)) 286 | return False 287 | -------------------------------------------------------------------------------- /gearman/constants.py: -------------------------------------------------------------------------------- 1 | _DEBUG_MODE_ = False 2 | DEFAULT_GEARMAN_PORT = 4730 3 | 4 | PRIORITY_NONE = None 5 | PRIORITY_LOW = 'LOW' 6 | PRIORITY_HIGH = 'HIGH' 7 | 8 | JOB_UNKNOWN = 'UNKNOWN' # Request state is currently unknown, either unsubmitted or connection failed 9 | JOB_PENDING = 'PENDING' # Request has been submitted, pending handle 10 | JOB_CREATED = 'CREATED' # Request has been accepted 11 | JOB_FAILED = 'FAILED' # Request received an explicit fail 12 | JOB_COMPLETE = 'COMPLETE' # Request received an explicit complete 13 | -------------------------------------------------------------------------------- /gearman/errors.py: -------------------------------------------------------------------------------- 1 | class GearmanError(Exception): 2 | pass 3 | 4 | class ConnectionError(GearmanError): 5 | pass 6 | 7 | class ServerUnavailable(GearmanError): 8 | pass 9 | 10 | class ProtocolError(GearmanError): 11 | pass 12 | 13 | class UnknownCommandError(GearmanError): 14 | pass 15 | 16 | class ExceededConnectionAttempts(GearmanError): 17 | pass 18 | 19 | class InvalidClientState(GearmanError): 20 | pass 21 | 22 | class InvalidWorkerState(GearmanError): 23 | pass 24 | 25 | class InvalidAdminClientState(GearmanError): 26 | pass 27 | -------------------------------------------------------------------------------- /gearman/io.py: -------------------------------------------------------------------------------- 1 | import select 2 | 3 | import gearman.errors 4 | import gearman.util 5 | 6 | # epoll event types 7 | _EPOLLIN = 0x01 8 | _EPOLLOUT = 0x04 9 | _EPOLLERR = 0x08 10 | _EPOLLHUP = 0x10 11 | 12 | READ = _EPOLLIN 13 | WRITE = _EPOLLOUT 14 | ERROR = _EPOLLERR | _EPOLLHUP 15 | 16 | def get_connection_poller(): 17 | """ 18 | Returns a select.epoll-like object. Depending on the platform, this will 19 | either be: 20 | - On modern Linux system, with python >= 2.6: select.epoll 21 | - On all other systems: gearman.io._Select: an object that mimics 22 | select.epoll, but uses select.select 23 | """ 24 | if hasattr(select, "epoll"): 25 | return select.epoll() 26 | else: 27 | return _Select() 28 | 29 | def _find_bad_connections(connections): 30 | """ 31 | Find any bad connections in a list of connections. 32 | 33 | For use with select.select. 34 | 35 | When select throws an exception, it's likely that one of the sockets 36 | passed in has died. In order to find the bad connections, they must be 37 | checked individually. This will do so and return a list of any bad 38 | connections found. 39 | """ 40 | bad = [] 41 | for conn in connections: 42 | try: 43 | _, _, _ = gearman.util.select([conn], [], [], timeout=0) 44 | except (select.error, gearman.errors.ConnectionError): 45 | bad.append(conn) 46 | return bad 47 | 48 | class _Select(object): 49 | """ 50 | A `select.epoll`-like object that uses select.select. 51 | 52 | Used as a fallback when epoll is not available. Inspired by tornado's 53 | fallback mechanism 54 | """ 55 | 56 | def __init__(self): 57 | self.read = set() 58 | self.write = set() 59 | self.error = set() 60 | 61 | def close(self): 62 | """ 63 | Close the _Select object. For parity with select.epoll. Does nothing 64 | here. 65 | """ 66 | pass 67 | 68 | def register(self, fd, evmask): 69 | """ 70 | Register a file descriptor for polling. 71 | 72 | fd: a file descriptor (socket) to be registers 73 | evmask: a bit set describing the desired events to report 74 | 75 | Events are similar to those accepted by select.epoll: 76 | - gearman.io.READ: report when fd is readable (i.e.: a socket.recv 77 | operation likely won't block, and will yield some data) 78 | - gearman.io.WRITE: report when fd is writable (i.e.: a socket.send 79 | operation likely won't block, and will be able to write some 80 | data) 81 | - gearman.io.ERROR: report when fd is in an error state 82 | """ 83 | if fd in self.read or fd in self.write or fd in self.error: 84 | raise ValueError("Connection already registered: %d" % fd.fileno()) 85 | if evmask & READ: 86 | self.read.add(fd) 87 | if evmask & WRITE: 88 | self.write.add(fd) 89 | if evmask & ERROR: 90 | self.error.add(fd) 91 | 92 | def modify(self, fd, evmask): 93 | """ 94 | Update the IO events that should be reported for a given file 95 | descriptor. See _Select.register for details on these events 96 | """ 97 | self.unregister(fd) 98 | self.register(fd, evmask) 99 | 100 | def unregister(self, fd): 101 | """ 102 | Stop tracking events for a given file descriptor 103 | """ 104 | self.read.discard(fd) 105 | self.write.discard(fd) 106 | self.error.discard(fd) 107 | 108 | def poll(self, timeout): 109 | """ 110 | Wait for events for any of the of register file descriptors. The 111 | maximum time to wait is specified by the timeout value. 112 | 113 | A timeout < 0 will block indefinitely. A timeout of 0 will not block at 114 | all. And, a timeout > 0 will block for at most that many seconds. The 115 | timeout parameter may be a floating point number. 116 | """ 117 | readable = set() 118 | writable = set() 119 | errors = set() 120 | 121 | if timeout is not None and timeout < 0.0: 122 | # for parity with epoll, negative timeout = block until there 123 | # is activity 124 | timeout = None 125 | 126 | connections = (self.read|self.write|self.error) 127 | 128 | success = False 129 | while not success and connections: 130 | connections -= errors 131 | try: 132 | r, w, e = gearman.util.select(self.read, 133 | self.write, self.error, timeout) 134 | readable = set(r) 135 | writable = set(w) 136 | errors |= set(e) #this set could already be populated 137 | success = True 138 | except (select.error, gearman.errors.ConnectionError): 139 | bad_conns = _find_bad_connections(connections) 140 | map(self.read.discard, bad_conns) 141 | map(self.write.discard, bad_conns) 142 | map(self.error.discard, bad_conns) 143 | errors |= set(bad_conns) 144 | 145 | 146 | events = {} 147 | for conn in readable: 148 | events[conn.fileno()] = events.get(conn.fileno(), 0) | READ 149 | for conn in writable: 150 | events[conn.fileno()] = events.get(conn.fileno(), 0) | WRITE 151 | for conn in errors: 152 | events[conn.fileno()] = events.get(conn.fileno(), 0) | ERROR 153 | 154 | return events.items() 155 | 156 | -------------------------------------------------------------------------------- /gearman/job.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from gearman.constants import PRIORITY_NONE, JOB_UNKNOWN, JOB_PENDING, JOB_CREATED, JOB_FAILED, JOB_COMPLETE 3 | 4 | class GearmanJob(object): 5 | """Represents the basics of a job... used in GearmanClient / GearmanWorker to represent job states""" 6 | def __init__(self, connection, handle, task, unique, data): 7 | self.connection = connection 8 | self.handle = handle 9 | 10 | self.task = task 11 | self.unique = unique 12 | self.data = data 13 | 14 | def to_dict(self): 15 | return dict(task=self.task, job_handle=self.handle, unique=self.unique, data=self.data) 16 | 17 | def __repr__(self): 18 | return '' % (self.connection, self.handle, self.task, self.unique, self.data) 19 | 20 | class GearmanJobRequest(object): 21 | """Represents a job request... used in GearmanClient to represent job states""" 22 | def __init__(self, gearman_job, initial_priority=PRIORITY_NONE, background=False, max_attempts=1): 23 | self.gearman_job = gearman_job 24 | 25 | self.priority = initial_priority 26 | self.background = background 27 | 28 | self.connection_attempts = 0 29 | self.max_connection_attempts = max_attempts 30 | 31 | self.initialize_request() 32 | 33 | def initialize_request(self): 34 | # Holds WORK_COMPLETE responses 35 | self.result = None 36 | 37 | # Holds WORK_EXCEPTION responses 38 | self.exception = None 39 | 40 | # Queues to hold WORK_WARNING, WORK_DATA responses 41 | self.warning_updates = collections.deque() 42 | self.data_updates = collections.deque() 43 | 44 | # Holds WORK_STATUS / STATUS_REQ responses 45 | self.status = {} 46 | 47 | self.state = JOB_UNKNOWN 48 | self.timed_out = False 49 | 50 | def reset(self): 51 | self.initialize_request() 52 | self.connection = None 53 | self.handle = None 54 | 55 | @property 56 | def status_updates(self): 57 | """Deprecated since 2.0.1, removing in next major release""" 58 | output_queue = collections.deque() 59 | if self.status: 60 | output_queue.append((self.status.get('numerator', 0), self.status.get('denominator', 0))) 61 | 62 | return output_queue 63 | 64 | @property 65 | def server_status(self): 66 | """Deprecated since 2.0.1, removing in next major release""" 67 | return self.status 68 | 69 | @property 70 | def job(self): 71 | return self.gearman_job 72 | 73 | @property 74 | def complete(self): 75 | background_complete = bool(self.background and self.state in (JOB_CREATED)) 76 | foreground_complete = bool(not self.background and self.state in (JOB_FAILED, JOB_COMPLETE)) 77 | 78 | actually_complete = background_complete or foreground_complete 79 | return actually_complete 80 | 81 | def __repr__(self): 82 | formatted_representation = '' 83 | return formatted_representation % (self.job.task, self.job.unique, self.priority, self.background, self.state, self.timed_out) 84 | -------------------------------------------------------------------------------- /gearman/protocol.py: -------------------------------------------------------------------------------- 1 | import struct 2 | from gearman.constants import PRIORITY_NONE, PRIORITY_LOW, PRIORITY_HIGH 3 | from gearman.errors import ProtocolError 4 | from gearman import compat 5 | # Protocol specific constants 6 | NULL_CHAR = '\x00' 7 | MAGIC_RES_STRING = '%sRES' % NULL_CHAR 8 | MAGIC_REQ_STRING = '%sREQ' % NULL_CHAR 9 | 10 | COMMAND_HEADER_SIZE = 12 11 | 12 | # Gearman commands 1-9 13 | GEARMAN_COMMAND_CAN_DO = 1 14 | GEARMAN_COMMAND_CANT_DO = 2 15 | GEARMAN_COMMAND_RESET_ABILITIES = 3 16 | GEARMAN_COMMAND_PRE_SLEEP = 4 17 | GEARMAN_COMMAND_NOOP = 6 18 | GEARMAN_COMMAND_SUBMIT_JOB = 7 19 | GEARMAN_COMMAND_JOB_CREATED = 8 20 | GEARMAN_COMMAND_GRAB_JOB = 9 21 | 22 | # Gearman commands 10-19 23 | GEARMAN_COMMAND_NO_JOB = 10 24 | GEARMAN_COMMAND_JOB_ASSIGN = 11 25 | GEARMAN_COMMAND_WORK_STATUS = 12 26 | GEARMAN_COMMAND_WORK_COMPLETE = 13 27 | GEARMAN_COMMAND_WORK_FAIL = 14 28 | GEARMAN_COMMAND_GET_STATUS = 15 29 | GEARMAN_COMMAND_ECHO_REQ = 16 30 | GEARMAN_COMMAND_ECHO_RES = 17 31 | GEARMAN_COMMAND_SUBMIT_JOB_BG = 18 32 | GEARMAN_COMMAND_ERROR = 19 33 | 34 | # Gearman commands 20-29 35 | GEARMAN_COMMAND_STATUS_RES = 20 36 | GEARMAN_COMMAND_SUBMIT_JOB_HIGH = 21 37 | GEARMAN_COMMAND_SET_CLIENT_ID = 22 38 | GEARMAN_COMMAND_CAN_DO_TIMEOUT = 23 39 | GEARMAN_COMMAND_ALL_YOURS = 24 40 | GEARMAN_COMMAND_WORK_EXCEPTION = 25 41 | GEARMAN_COMMAND_OPTION_REQ = 26 42 | GEARMAN_COMMAND_OPTION_RES = 27 43 | GEARMAN_COMMAND_WORK_DATA = 28 44 | GEARMAN_COMMAND_WORK_WARNING = 29 45 | 46 | # Gearman commands 30-39 47 | GEARMAN_COMMAND_GRAB_JOB_UNIQ = 30 48 | GEARMAN_COMMAND_JOB_ASSIGN_UNIQ = 31 49 | GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG = 32 50 | GEARMAN_COMMAND_SUBMIT_JOB_LOW = 33 51 | GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG = 34 52 | 53 | # Fake command code 54 | GEARMAN_COMMAND_TEXT_COMMAND = 9999 55 | 56 | GEARMAN_PARAMS_FOR_COMMAND = { 57 | # Gearman commands 1-9 58 | GEARMAN_COMMAND_CAN_DO: ['task'], 59 | GEARMAN_COMMAND_CANT_DO: ['task'], 60 | GEARMAN_COMMAND_RESET_ABILITIES: [], 61 | GEARMAN_COMMAND_PRE_SLEEP: [], 62 | GEARMAN_COMMAND_NOOP: [], 63 | GEARMAN_COMMAND_SUBMIT_JOB: ['task', 'unique', 'data'], 64 | GEARMAN_COMMAND_JOB_CREATED: ['job_handle'], 65 | GEARMAN_COMMAND_GRAB_JOB: [], 66 | 67 | # Gearman commands 10-19 68 | GEARMAN_COMMAND_NO_JOB: [], 69 | GEARMAN_COMMAND_JOB_ASSIGN: ['job_handle', 'task', 'data'], 70 | GEARMAN_COMMAND_WORK_STATUS: ['job_handle', 'numerator', 'denominator'], 71 | GEARMAN_COMMAND_WORK_COMPLETE: ['job_handle', 'data'], 72 | GEARMAN_COMMAND_WORK_FAIL: ['job_handle'], 73 | GEARMAN_COMMAND_GET_STATUS: ['job_handle'], 74 | GEARMAN_COMMAND_ECHO_REQ: ['data'], 75 | GEARMAN_COMMAND_ECHO_RES: ['data'], 76 | GEARMAN_COMMAND_SUBMIT_JOB_BG: ['task', 'unique', 'data'], 77 | GEARMAN_COMMAND_ERROR: ['error_code', 'error_text'], 78 | 79 | # Gearman commands 20-29 80 | GEARMAN_COMMAND_STATUS_RES: ['job_handle', 'known', 'running', 'numerator', 'denominator'], 81 | GEARMAN_COMMAND_SUBMIT_JOB_HIGH: ['task', 'unique', 'data'], 82 | GEARMAN_COMMAND_SET_CLIENT_ID: ['client_id'], 83 | GEARMAN_COMMAND_CAN_DO_TIMEOUT: ['task', 'timeout'], 84 | GEARMAN_COMMAND_ALL_YOURS: [], 85 | GEARMAN_COMMAND_WORK_EXCEPTION: ['job_handle', 'data'], 86 | GEARMAN_COMMAND_OPTION_REQ: ['option_name'], 87 | GEARMAN_COMMAND_OPTION_RES: ['option_name'], 88 | GEARMAN_COMMAND_WORK_DATA: ['job_handle', 'data'], 89 | GEARMAN_COMMAND_WORK_WARNING: ['job_handle', 'data'], 90 | 91 | # Gearman commands 30-39 92 | GEARMAN_COMMAND_GRAB_JOB_UNIQ: [], 93 | GEARMAN_COMMAND_JOB_ASSIGN_UNIQ: ['job_handle', 'task', 'unique', 'data'], 94 | GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG: ['task', 'unique', 'data'], 95 | GEARMAN_COMMAND_SUBMIT_JOB_LOW: ['task', 'unique', 'data'], 96 | GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG: ['task', 'unique', 'data'], 97 | 98 | # Fake gearman command 99 | GEARMAN_COMMAND_TEXT_COMMAND: ['raw_text'] 100 | } 101 | 102 | GEARMAN_COMMAND_TO_NAME = { 103 | GEARMAN_COMMAND_CAN_DO: 'GEARMAN_COMMAND_CAN_DO', 104 | GEARMAN_COMMAND_CANT_DO: 'GEARMAN_COMMAND_CANT_DO', 105 | GEARMAN_COMMAND_RESET_ABILITIES: 'GEARMAN_COMMAND_RESET_ABILITIES', 106 | GEARMAN_COMMAND_PRE_SLEEP: 'GEARMAN_COMMAND_PRE_SLEEP', 107 | GEARMAN_COMMAND_NOOP: 'GEARMAN_COMMAND_NOOP', 108 | GEARMAN_COMMAND_SUBMIT_JOB: 'GEARMAN_COMMAND_SUBMIT_JOB', 109 | GEARMAN_COMMAND_JOB_CREATED: 'GEARMAN_COMMAND_JOB_CREATED', 110 | GEARMAN_COMMAND_GRAB_JOB: 'GEARMAN_COMMAND_GRAB_JOB', 111 | 112 | # Gearman commands 10-19 113 | GEARMAN_COMMAND_NO_JOB: 'GEARMAN_COMMAND_NO_JOB', 114 | GEARMAN_COMMAND_JOB_ASSIGN: 'GEARMAN_COMMAND_JOB_ASSIGN', 115 | GEARMAN_COMMAND_WORK_STATUS: 'GEARMAN_COMMAND_WORK_STATUS', 116 | GEARMAN_COMMAND_WORK_COMPLETE: 'GEARMAN_COMMAND_WORK_COMPLETE', 117 | GEARMAN_COMMAND_WORK_FAIL: 'GEARMAN_COMMAND_WORK_FAIL', 118 | GEARMAN_COMMAND_GET_STATUS: 'GEARMAN_COMMAND_GET_STATUS', 119 | GEARMAN_COMMAND_ECHO_REQ: 'GEARMAN_COMMAND_ECHO_REQ', 120 | GEARMAN_COMMAND_ECHO_RES: 'GEARMAN_COMMAND_ECHO_RES', 121 | GEARMAN_COMMAND_SUBMIT_JOB_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_BG', 122 | GEARMAN_COMMAND_ERROR: 'GEARMAN_COMMAND_ERROR', 123 | 124 | # Gearman commands 20-29 125 | GEARMAN_COMMAND_STATUS_RES: 'GEARMAN_COMMAND_STATUS_RES', 126 | GEARMAN_COMMAND_SUBMIT_JOB_HIGH: 'GEARMAN_COMMAND_SUBMIT_JOB_HIGH', 127 | GEARMAN_COMMAND_SET_CLIENT_ID: 'GEARMAN_COMMAND_SET_CLIENT_ID', 128 | GEARMAN_COMMAND_CAN_DO_TIMEOUT: 'GEARMAN_COMMAND_CAN_DO_TIMEOUT', 129 | GEARMAN_COMMAND_ALL_YOURS: 'GEARMAN_COMMAND_ALL_YOURS', 130 | GEARMAN_COMMAND_WORK_EXCEPTION: 'GEARMAN_COMMAND_WORK_EXCEPTION', 131 | GEARMAN_COMMAND_OPTION_REQ: 'GEARMAN_COMMAND_OPTION_REQ', 132 | GEARMAN_COMMAND_OPTION_RES: 'GEARMAN_COMMAND_OPTION_RES', 133 | GEARMAN_COMMAND_WORK_DATA: 'GEARMAN_COMMAND_WORK_DATA', 134 | GEARMAN_COMMAND_WORK_WARNING: 'GEARMAN_COMMAND_WORK_WARNING', 135 | 136 | # Gearman commands 30-39 137 | GEARMAN_COMMAND_GRAB_JOB_UNIQ: 'GEARMAN_COMMAND_GRAB_JOB_UNIQ', 138 | GEARMAN_COMMAND_JOB_ASSIGN_UNIQ: 'GEARMAN_COMMAND_JOB_ASSIGN_UNIQ', 139 | GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG', 140 | GEARMAN_COMMAND_SUBMIT_JOB_LOW: 'GEARMAN_COMMAND_SUBMIT_JOB_LOW', 141 | GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG', 142 | 143 | GEARMAN_COMMAND_TEXT_COMMAND: 'GEARMAN_COMMAND_TEXT_COMMAND' 144 | } 145 | 146 | GEARMAN_SERVER_COMMAND_STATUS = 'status' 147 | GEARMAN_SERVER_COMMAND_VERSION = 'version' 148 | GEARMAN_SERVER_COMMAND_WORKERS = 'workers' 149 | GEARMAN_SERVER_COMMAND_MAXQUEUE = 'maxqueue' 150 | GEARMAN_SERVER_COMMAND_SHUTDOWN = 'shutdown' 151 | GEARMAN_SERVER_COMMAND_GETPID = 'getpid' 152 | GEARMAN_SERVER_COMMAND_SHOW_JOBS = 'show jobs' 153 | GEARMAN_SERVER_COMMAND_SHOW_UNIQUE_JOBS = 'show unique jobs' 154 | GEARMAN_SERVER_COMMAND_CANCEL_JOB = 'cancel job' 155 | 156 | def get_command_name(cmd_type): 157 | return GEARMAN_COMMAND_TO_NAME.get(cmd_type, cmd_type) 158 | 159 | def submit_cmd_for_background_priority(background, priority): 160 | cmd_type_lookup = { 161 | (True, PRIORITY_NONE): GEARMAN_COMMAND_SUBMIT_JOB_BG, 162 | (True, PRIORITY_LOW): GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG, 163 | (True, PRIORITY_HIGH): GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG, 164 | (False, PRIORITY_NONE): GEARMAN_COMMAND_SUBMIT_JOB, 165 | (False, PRIORITY_LOW): GEARMAN_COMMAND_SUBMIT_JOB_LOW, 166 | (False, PRIORITY_HIGH): GEARMAN_COMMAND_SUBMIT_JOB_HIGH 167 | } 168 | lookup_tuple = (background, priority) 169 | cmd_type = cmd_type_lookup[lookup_tuple] 170 | return cmd_type 171 | 172 | def parse_binary_command(in_buffer, is_response=True): 173 | """Parse data and return (command type, command arguments dict, command size) 174 | or (None, None, data) if there's not enough data for a complete command. 175 | """ 176 | in_buffer_size = len(in_buffer) 177 | magic = None 178 | cmd_type = None 179 | cmd_args = None 180 | cmd_len = 0 181 | expected_packet_size = None 182 | 183 | # If we don't have enough data to parse, error early 184 | if in_buffer_size < COMMAND_HEADER_SIZE: 185 | return cmd_type, cmd_args, cmd_len 186 | 187 | # By default, we'll assume we're dealing with a gearman command 188 | magic, cmd_type, cmd_len = struct.unpack('!4sII', in_buffer[:COMMAND_HEADER_SIZE]) 189 | 190 | received_bad_response = is_response and bool(magic != MAGIC_RES_STRING) 191 | received_bad_request = not is_response and bool(magic != MAGIC_REQ_STRING) 192 | if received_bad_response or received_bad_request: 193 | raise ProtocolError('Malformed Magic') 194 | 195 | expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None) 196 | 197 | # GEARMAN_COMMAND_TEXT_COMMAND is a faked command that we use to support server text-based commands 198 | if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND: 199 | raise ProtocolError('Received unknown binary command: %s' % cmd_type) 200 | 201 | # If everything indicates this is a valid command, we should check to see if we have enough stuff to read in our buffer 202 | expected_packet_size = COMMAND_HEADER_SIZE + cmd_len 203 | if in_buffer_size < expected_packet_size: 204 | return None, None, 0 205 | 206 | binary_payload = in_buffer[COMMAND_HEADER_SIZE:expected_packet_size] 207 | split_arguments = [] 208 | 209 | if len(expected_cmd_params) > 0: 210 | binary_payload = binary_payload.tostring() 211 | split_arguments = binary_payload.split(NULL_CHAR, len(expected_cmd_params) - 1) 212 | elif binary_payload: 213 | raise ProtocolError('Expected no binary payload: %s' % get_command_name(cmd_type)) 214 | 215 | # This is a sanity check on the binary_payload.split() phase 216 | # We should never be able to get here with any VALID gearman data 217 | if len(split_arguments) != len(expected_cmd_params): 218 | raise ProtocolError('Received %d argument(s), expecting %d argument(s): %s' % (len(split_arguments), len(expected_cmd_params), get_command_name(cmd_type))) 219 | 220 | # Iterate through the split arguments and assign them labels based on their order 221 | cmd_args = dict((param_label, param_value) for param_label, param_value in zip(expected_cmd_params, split_arguments)) 222 | return cmd_type, cmd_args, expected_packet_size 223 | 224 | 225 | def pack_binary_command(cmd_type, cmd_args, is_response=False): 226 | """Packs the given command using the parameter ordering specified in GEARMAN_PARAMS_FOR_COMMAND. 227 | *NOTE* Expects that all arguments in cmd_args are already str's. 228 | """ 229 | expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None) 230 | if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND: 231 | raise ProtocolError('Received unknown binary command: %s' % get_command_name(cmd_type)) 232 | 233 | expected_parameter_set = set(expected_cmd_params) 234 | received_parameter_set = set(cmd_args.keys()) 235 | if expected_parameter_set != received_parameter_set: 236 | raise ProtocolError('Received arguments did not match expected arguments: %r != %r' % (expected_parameter_set, received_parameter_set)) 237 | 238 | # Select the right expected magic 239 | if is_response: 240 | magic = MAGIC_RES_STRING 241 | else: 242 | magic = MAGIC_REQ_STRING 243 | 244 | # !NOTE! str should be replaced with bytes in Python 3.x 245 | # We will iterate in ORDER and str all our command arguments 246 | if compat.any(type(param_value) != str for param_value in cmd_args.itervalues()): 247 | raise ProtocolError('Received non-binary arguments: %r' % cmd_args) 248 | 249 | data_items = [cmd_args[param] for param in expected_cmd_params] 250 | 251 | # Now check that all but the last argument are free of \0 as per the protocol spec. 252 | if compat.any('\0' in argument for argument in data_items[:-1]): 253 | raise ProtocolError('Received arguments with NULL byte in non-final argument') 254 | 255 | binary_payload = NULL_CHAR.join(data_items) 256 | 257 | # Pack the header in the !4sII format then append the binary payload 258 | payload_size = len(binary_payload) 259 | packing_format = '!4sII%ds' % payload_size 260 | return struct.pack(packing_format, magic, cmd_type, payload_size, binary_payload) 261 | 262 | def parse_text_command(in_buffer): 263 | """Parse a text command and return a single line at a time""" 264 | cmd_type = None 265 | cmd_args = None 266 | cmd_len = 0 267 | if '\n' not in in_buffer: 268 | return cmd_type, cmd_args, cmd_len 269 | 270 | text_command, in_buffer = in_buffer.tostring().split('\n', 1) 271 | if NULL_CHAR in text_command: 272 | raise ProtocolError('Received unexpected character: %s' % text_command) 273 | 274 | # Fake gearman command "TEXT_COMMAND" used to process server admin client responses 275 | cmd_type = GEARMAN_COMMAND_TEXT_COMMAND 276 | cmd_args = dict(raw_text=text_command) 277 | cmd_len = len(text_command) + 1 278 | 279 | return cmd_type, cmd_args, cmd_len 280 | 281 | def pack_text_command(cmd_type, cmd_args): 282 | """Parse a text command and return a single line at a time""" 283 | if cmd_type != GEARMAN_COMMAND_TEXT_COMMAND: 284 | raise ProtocolError('Unknown cmd_type: Received %s, expecting %s' % (get_command_name(cmd_type), get_command_name(GEARMAN_COMMAND_TEXT_COMMAND))) 285 | 286 | cmd_line = cmd_args.get('raw_text') 287 | if cmd_line is None: 288 | raise ProtocolError('Did not receive arguments any valid arguments: %s' % cmd_args) 289 | 290 | return str(cmd_line) 291 | -------------------------------------------------------------------------------- /gearman/util.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Gearman Client Utils 4 | """ 5 | import errno 6 | import select as select_lib 7 | import time 8 | 9 | from gearman.constants import DEFAULT_GEARMAN_PORT 10 | 11 | class Stopwatch(object): 12 | """Timer class that keeps track of time remaining""" 13 | def __init__(self, time_remaining): 14 | if time_remaining is not None: 15 | self.stop_time = time.time() + time_remaining 16 | else: 17 | self.stop_time = None 18 | 19 | def get_time_remaining(self): 20 | if self.stop_time is None: 21 | return None 22 | 23 | current_time = time.time() 24 | if not self.has_time_remaining(current_time): 25 | return 0.0 26 | 27 | time_remaining = self.stop_time - current_time 28 | return time_remaining 29 | 30 | def has_time_remaining(self, time_comparison=None): 31 | time_comparison = time_comparison or self.get_time_remaining() 32 | if self.stop_time is None: 33 | return True 34 | 35 | return bool(time_comparison < self.stop_time) 36 | 37 | def disambiguate_server_parameter(hostport_tuple): 38 | """Takes either a tuple of (address, port) or a string of 'address:port' and disambiguates them for us""" 39 | if type(hostport_tuple) is tuple: 40 | gearman_host, gearman_port = hostport_tuple 41 | elif ':' in hostport_tuple: 42 | gearman_host, gearman_possible_port = hostport_tuple.split(':') 43 | gearman_port = int(gearman_possible_port) 44 | else: 45 | gearman_host = hostport_tuple 46 | gearman_port = DEFAULT_GEARMAN_PORT 47 | 48 | return gearman_host, gearman_port 49 | 50 | def select(rlist, wlist, xlist, timeout=None): 51 | """Behave similar to select.select, except ignoring certain types of exceptions""" 52 | rd_list = [] 53 | wr_list = [] 54 | ex_list = [] 55 | 56 | select_args = [rlist, wlist, xlist] 57 | if timeout is not None: 58 | select_args.append(timeout) 59 | 60 | try: 61 | rd_list, wr_list, ex_list = select_lib.select(*select_args) 62 | except select_lib.error, exc: 63 | # Ignore interrupted system call, reraise anything else 64 | if exc[0] != errno.EINTR: 65 | raise 66 | 67 | return rd_list, wr_list, ex_list 68 | 69 | def unlist(given_list): 70 | """Convert the (possibly) single item list into a single item""" 71 | list_size = len(given_list) 72 | if list_size == 0: 73 | return None 74 | elif list_size == 1: 75 | return given_list[0] 76 | else: 77 | raise ValueError(list_size) 78 | -------------------------------------------------------------------------------- /gearman/worker.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import sys 4 | 5 | from gearman import compat 6 | from gearman.connection_manager import GearmanConnectionManager 7 | from gearman.worker_handler import GearmanWorkerCommandHandler 8 | from gearman.errors import ConnectionError 9 | 10 | gearman_logger = logging.getLogger(__name__) 11 | 12 | POLL_TIMEOUT_IN_SECONDS = 60.0 13 | 14 | class GearmanWorker(GearmanConnectionManager): 15 | """ 16 | GearmanWorker :: Interface to accept jobs from a Gearman server 17 | """ 18 | command_handler_class = GearmanWorkerCommandHandler 19 | 20 | def __init__(self, host_list=None): 21 | super(GearmanWorker, self).__init__(host_list=host_list) 22 | 23 | self.randomized_connections = None 24 | 25 | self.worker_abilities = {} 26 | self.worker_client_id = None 27 | self.command_handler_holding_job_lock = None 28 | 29 | self._update_initial_state() 30 | 31 | def _update_initial_state(self): 32 | self.handler_initial_state['abilities'] = self.worker_abilities.keys() 33 | self.handler_initial_state['client_id'] = self.worker_client_id 34 | 35 | ######################################################## 36 | ##### Public methods for general GearmanWorker use ##### 37 | ######################################################## 38 | def register_task(self, task, callback_function): 39 | """Register a function with this worker 40 | 41 | def function_callback(calling_gearman_worker, current_job): 42 | return current_job.data 43 | """ 44 | self.worker_abilities[task] = callback_function 45 | self._update_initial_state() 46 | 47 | for command_handler in self.handler_to_connection_map.iterkeys(): 48 | command_handler.set_abilities(self.handler_initial_state['abilities']) 49 | 50 | return task 51 | 52 | def unregister_task(self, task): 53 | """Unregister a function with worker""" 54 | self.worker_abilities.pop(task, None) 55 | self._update_initial_state() 56 | 57 | for command_handler in self.handler_to_connection_map.iterkeys(): 58 | command_handler.set_abilities(self.handler_initial_state['abilities']) 59 | 60 | return task 61 | 62 | def set_client_id(self, client_id): 63 | """Notify the server that we should be identified as this client ID""" 64 | self.worker_client_id = client_id 65 | self._update_initial_state() 66 | 67 | for command_handler in self.handler_to_connection_map.iterkeys(): 68 | command_handler.set_client_id(self.handler_initial_state['client_id']) 69 | 70 | return client_id 71 | 72 | def work(self, poll_timeout=POLL_TIMEOUT_IN_SECONDS): 73 | """Loop indefinitely, complete tasks from all connections.""" 74 | continue_working = True 75 | worker_connections = [] 76 | 77 | # We're going to track whether a previous call to our closure indicated 78 | # we were processing a job. This is just a list of possibly a single 79 | # element indicating we had a job. It's a list so that through the 80 | # magic of closures we can reference and write to it each call. 81 | # This is all so that we can determine when we've finished processing a job 82 | # correctly. 83 | had_job = [] 84 | 85 | def continue_while_connections_alive(any_activity): 86 | if had_job and not self.has_job_lock(): 87 | return self.after_poll(any_activity) and self.after_job() 88 | 89 | del had_job[:] 90 | if self.has_job_lock(): 91 | had_job.append(True) 92 | 93 | return self.after_poll(any_activity) 94 | 95 | # Shuffle our connections after the poll timeout 96 | while continue_working: 97 | worker_connections = self.establish_worker_connections() 98 | continue_working = self.poll_connections_until_stopped(worker_connections, continue_while_connections_alive, timeout=poll_timeout) 99 | 100 | # If we were kicked out of the worker loop, we should shutdown all our connections 101 | for current_connection in worker_connections: 102 | current_connection.close() 103 | 104 | def shutdown(self): 105 | self.command_handler_holding_job_lock = None 106 | super(GearmanWorker, self).shutdown() 107 | 108 | ############################################################### 109 | ## Methods to override when dealing with connection polling ## 110 | ############################################################## 111 | def establish_worker_connections(self): 112 | """Return a shuffled list of connections that are alive, and try to reconnect to dead connections if necessary.""" 113 | self.randomized_connections = list(self.connection_list) 114 | random.shuffle(self.randomized_connections) 115 | 116 | output_connections = [] 117 | for current_connection in self.randomized_connections: 118 | try: 119 | valid_connection = self.establish_connection(current_connection) 120 | output_connections.append(valid_connection) 121 | except ConnectionError: 122 | pass 123 | 124 | return output_connections 125 | 126 | def after_poll(self, any_activity): 127 | """Polling callback to notify any outside listeners whats going on with the GearmanWorker. 128 | 129 | Return True to continue polling, False to exit the work loop""" 130 | return True 131 | 132 | def after_job(self): 133 | """Callback to notify any outside listeners that a GearmanWorker has completed the current job. 134 | 135 | This is useful for accomplishing work or stopping the GearmanWorker in between jobs. 136 | 137 | Return True to continue polling, False to exit the work loop 138 | """ 139 | return True 140 | 141 | def handle_error(self, current_connection): 142 | """If we discover that a connection has a problem, we better release the job lock""" 143 | current_handler = self.connection_to_handler_map.get(current_connection) 144 | if current_handler: 145 | self.set_job_lock(current_handler, lock=False) 146 | 147 | super(GearmanWorker, self).handle_error(current_connection) 148 | 149 | ############################################################# 150 | ## Public methods so Gearman jobs can send Gearman updates ## 151 | ############################################################# 152 | def _get_handler_for_job(self, current_job): 153 | return self.connection_to_handler_map[current_job.connection] 154 | 155 | def wait_until_updates_sent(self, multiple_gearman_jobs, poll_timeout=None): 156 | connection_set = set([current_job.connection for current_job in multiple_gearman_jobs]) 157 | def continue_while_updates_pending(any_activity): 158 | return compat.any(current_connection.writable() for current_connection in connection_set) 159 | 160 | self.poll_connections_until_stopped(connection_set, continue_while_updates_pending, timeout=poll_timeout) 161 | 162 | def send_job_status(self, current_job, numerator, denominator, poll_timeout=None): 163 | """Send a Gearman JOB_STATUS update for an inflight job""" 164 | current_handler = self._get_handler_for_job(current_job) 165 | current_handler.send_job_status(current_job, numerator=numerator, denominator=denominator) 166 | 167 | self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) 168 | 169 | def send_job_complete(self, current_job, data, poll_timeout=None): 170 | current_handler = self._get_handler_for_job(current_job) 171 | current_handler.send_job_complete(current_job, data=data) 172 | 173 | self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) 174 | 175 | def send_job_failure(self, current_job, poll_timeout=None): 176 | """Removes a job from the queue if its backgrounded""" 177 | current_handler = self._get_handler_for_job(current_job) 178 | current_handler.send_job_failure(current_job) 179 | 180 | self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) 181 | 182 | def send_job_exception(self, current_job, data, poll_timeout=None): 183 | """Removes a job from the queue if its backgrounded""" 184 | # Using GEARMAND_COMMAND_WORK_EXCEPTION is not recommended at time of this writing [2010-02-24] 185 | # http://groups.google.com/group/gearman/browse_thread/thread/5c91acc31bd10688/529e586405ed37fe 186 | # 187 | current_handler = self._get_handler_for_job(current_job) 188 | current_handler.send_job_exception(current_job, data=data) 189 | current_handler.send_job_failure(current_job) 190 | 191 | self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) 192 | 193 | def send_job_data(self, current_job, data, poll_timeout=None): 194 | """Send a Gearman JOB_DATA update for an inflight job""" 195 | current_handler = self._get_handler_for_job(current_job) 196 | current_handler.send_job_data(current_job, data=data) 197 | 198 | self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) 199 | 200 | def send_job_warning(self, current_job, data, poll_timeout=None): 201 | """Send a Gearman JOB_WARNING update for an inflight job""" 202 | current_handler = self._get_handler_for_job(current_job) 203 | current_handler.send_job_warning(current_job, data=data) 204 | 205 | self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) 206 | 207 | ##################################################### 208 | ##### Callback methods for GearmanWorkerHandler ##### 209 | ##################################################### 210 | def create_job(self, command_handler, job_handle, task, unique, data): 211 | """Create a new job using our self.job_class""" 212 | current_connection = self.handler_to_connection_map[command_handler] 213 | return self.job_class(current_connection, job_handle, task, unique, data) 214 | 215 | def on_job_execute(self, current_job): 216 | try: 217 | function_callback = self.worker_abilities[current_job.task] 218 | job_result = function_callback(self, current_job) 219 | except Exception: 220 | return self.on_job_exception(current_job, sys.exc_info()) 221 | 222 | return self.on_job_complete(current_job, job_result) 223 | 224 | def on_job_exception(self, current_job, exc_info): 225 | self.send_job_failure(current_job) 226 | return False 227 | 228 | def on_job_complete(self, current_job, job_result): 229 | self.send_job_complete(current_job, job_result) 230 | return True 231 | 232 | def set_job_lock(self, command_handler, lock): 233 | """Set a worker level job lock so we don't try to hold onto 2 jobs at anytime""" 234 | if command_handler not in self.handler_to_connection_map: 235 | return False 236 | 237 | failed_lock = bool(lock and self.command_handler_holding_job_lock is not None) 238 | failed_unlock = bool(not lock and self.command_handler_holding_job_lock != command_handler) 239 | 240 | # If we've already been locked, we should say the lock failed 241 | # If we're attempting to unlock something when we don't have a lock, we're in a bad state 242 | if failed_lock or failed_unlock: 243 | return False 244 | 245 | if lock: 246 | self.command_handler_holding_job_lock = command_handler 247 | else: 248 | self.command_handler_holding_job_lock = None 249 | 250 | return True 251 | 252 | def has_job_lock(self): 253 | return bool(self.command_handler_holding_job_lock is not None) 254 | 255 | def check_job_lock(self, command_handler): 256 | """Check to see if we hold the job lock""" 257 | return bool(self.command_handler_holding_job_lock == command_handler) 258 | -------------------------------------------------------------------------------- /gearman/worker_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from gearman.command_handler import GearmanCommandHandler 4 | from gearman.errors import InvalidWorkerState 5 | from gearman.protocol import GEARMAN_COMMAND_PRE_SLEEP, GEARMAN_COMMAND_RESET_ABILITIES, GEARMAN_COMMAND_CAN_DO, GEARMAN_COMMAND_SET_CLIENT_ID, GEARMAN_COMMAND_GRAB_JOB_UNIQ, \ 6 | GEARMAN_COMMAND_WORK_STATUS, GEARMAN_COMMAND_WORK_COMPLETE, GEARMAN_COMMAND_WORK_FAIL, GEARMAN_COMMAND_WORK_EXCEPTION, GEARMAN_COMMAND_WORK_WARNING, GEARMAN_COMMAND_WORK_DATA 7 | 8 | gearman_logger = logging.getLogger(__name__) 9 | 10 | class GearmanWorkerCommandHandler(GearmanCommandHandler): 11 | """GearmanWorker state machine on a per connection basis 12 | 13 | A worker can be in the following distinct states: 14 | SLEEP -> Doing nothing, can be awoken 15 | AWAKE -> Transitional state (for NOOP) 16 | AWAITING_JOB -> Holding worker level job lock and awaiting a server response 17 | EXECUTING_JOB -> Transitional state (for ASSIGN_JOB) 18 | """ 19 | def __init__(self, connection_manager=None): 20 | super(GearmanWorkerCommandHandler, self).__init__(connection_manager=connection_manager) 21 | 22 | self._handler_abilities = [] 23 | self._client_id = None 24 | 25 | def initial_state(self, abilities=None, client_id=None): 26 | self.set_client_id(client_id) 27 | self.set_abilities(abilities) 28 | 29 | self._sleep() 30 | 31 | ################################################################## 32 | ##### Public interface methods to be called by GearmanWorker ##### 33 | ################################################################## 34 | def set_abilities(self, connection_abilities_list): 35 | assert type(connection_abilities_list) in (list, tuple) 36 | self._handler_abilities = connection_abilities_list 37 | 38 | self.send_command(GEARMAN_COMMAND_RESET_ABILITIES) 39 | for task in self._handler_abilities: 40 | self.send_command(GEARMAN_COMMAND_CAN_DO, task=task) 41 | 42 | def set_client_id(self, client_id): 43 | self._client_id = client_id 44 | 45 | if self._client_id is not None: 46 | self.send_command(GEARMAN_COMMAND_SET_CLIENT_ID, client_id=self._client_id) 47 | 48 | ############################################################### 49 | #### Convenience methods for typical gearman jobs to call ##### 50 | ############################################################### 51 | def send_job_status(self, current_job, numerator, denominator): 52 | assert type(numerator) in (int, float), 'Numerator must be a numeric value' 53 | assert type(denominator) in (int, float), 'Denominator must be a numeric value' 54 | self.send_command(GEARMAN_COMMAND_WORK_STATUS, job_handle=current_job.handle, numerator=str(numerator), denominator=str(denominator)) 55 | 56 | def send_job_complete(self, current_job, data): 57 | """Removes a job from the queue if its backgrounded""" 58 | self.send_command(GEARMAN_COMMAND_WORK_COMPLETE, job_handle=current_job.handle, data=self.encode_data(data)) 59 | 60 | def send_job_failure(self, current_job): 61 | """Removes a job from the queue if its backgrounded""" 62 | self.send_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=current_job.handle) 63 | 64 | def send_job_exception(self, current_job, data): 65 | # Using GEARMAND_COMMAND_WORK_EXCEPTION is not recommended at time of this writing [2010-02-24] 66 | # http://groups.google.com/group/gearman/browse_thread/thread/5c91acc31bd10688/529e586405ed37fe 67 | # 68 | self.send_command(GEARMAN_COMMAND_WORK_EXCEPTION, job_handle=current_job.handle, data=self.encode_data(data)) 69 | 70 | def send_job_data(self, current_job, data): 71 | self.send_command(GEARMAN_COMMAND_WORK_DATA, job_handle=current_job.handle, data=self.encode_data(data)) 72 | 73 | def send_job_warning(self, current_job, data): 74 | self.send_command(GEARMAN_COMMAND_WORK_WARNING, job_handle=current_job.handle, data=self.encode_data(data)) 75 | 76 | ########################################################### 77 | ### Callbacks when we receive a command from the server ### 78 | ########################################################### 79 | def _grab_job(self): 80 | self.send_command(GEARMAN_COMMAND_GRAB_JOB_UNIQ) 81 | 82 | def _sleep(self): 83 | self.send_command(GEARMAN_COMMAND_PRE_SLEEP) 84 | 85 | def _check_job_lock(self): 86 | return self.connection_manager.check_job_lock(self) 87 | 88 | def _acquire_job_lock(self): 89 | return self.connection_manager.set_job_lock(self, lock=True) 90 | 91 | def _release_job_lock(self): 92 | if not self.connection_manager.set_job_lock(self, lock=False): 93 | raise InvalidWorkerState("Unable to release job lock for %r" % self) 94 | 95 | return True 96 | 97 | def recv_noop(self): 98 | """Transition from being SLEEP --> AWAITING_JOB / SLEEP 99 | 100 | AWAITING_JOB -> AWAITING_JOB :: Noop transition, we're already awaiting a job 101 | SLEEP -> AWAKE -> AWAITING_JOB :: Transition if we can acquire the worker job lock 102 | SLEEP -> AWAKE -> SLEEP :: Transition if we can NOT acquire a worker job lock 103 | """ 104 | if self._check_job_lock(): 105 | pass 106 | elif self._acquire_job_lock(): 107 | self._grab_job() 108 | else: 109 | self._sleep() 110 | 111 | return True 112 | 113 | def recv_no_job(self): 114 | """Transition from being AWAITING_JOB --> SLEEP 115 | 116 | AWAITING_JOB -> SLEEP :: Always transition to sleep if we have nothing to do 117 | """ 118 | self._release_job_lock() 119 | self._sleep() 120 | 121 | return True 122 | 123 | def recv_job_assign_uniq(self, job_handle, task, unique, data): 124 | """Transition from being AWAITING_JOB --> EXECUTE_JOB --> SLEEP 125 | 126 | AWAITING_JOB -> EXECUTE_JOB -> SLEEP :: Always transition once we're given a job 127 | """ 128 | assert task in self._handler_abilities, '%s not found in %r' % (task, self._handler_abilities) 129 | 130 | # After this point, we know this connection handler is holding onto the job lock so we don't need to acquire it again 131 | if not self.connection_manager.check_job_lock(self): 132 | raise InvalidWorkerState("Received a job when we weren't expecting one") 133 | 134 | gearman_job = self.connection_manager.create_job(self, job_handle, task, unique, self.decode_data(data)) 135 | 136 | # Create a new job 137 | self.connection_manager.on_job_execute(gearman_job) 138 | 139 | # Release the job lock once we're doing and go back to sleep 140 | self._release_job_lock() 141 | self._sleep() 142 | 143 | return True 144 | 145 | def recv_job_assign(self, job_handle, task, data): 146 | """JOB_ASSIGN and JOB_ASSIGN_UNIQ are essentially the same""" 147 | return self.recv_job_assign_uniq(job_handle=job_handle, task=task, unique=None, data=data) 148 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs/ 3 | build-dir = docs/_build 4 | all_files = 1 5 | 6 | [upload_sphinx] 7 | upload-dir = docs/_build/html 8 | 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | 5 | from gearman import __version__ as version 6 | 7 | setup( 8 | name = 'gearman', 9 | version = version, 10 | author = 'Matthew Tai', 11 | author_email = 'mtai@yelp.com', 12 | description = 'Gearman API - Client, worker, and admin client interfaces', 13 | long_description=open('README.txt').read(), 14 | url = 'http://github.com/Yelp/python-gearman/', 15 | packages = ['gearman'], 16 | license='Apache', 17 | classifiers = [ 18 | 'Development Status :: 5 - Production/Stable', 19 | 'Intended Audience :: Developers', 20 | 'License :: OSI Approved :: Apache Software License', 21 | 'Natural Language :: English', 22 | 'Operating System :: OS Independent', 23 | 'Programming Language :: Python', 24 | 'Programming Language :: Python :: 2.4', 25 | 'Programming Language :: Python :: 2.5', 26 | 'Programming Language :: Python :: 2.6', 27 | 'Programming Language :: Python :: 2.7', 28 | 'Topic :: Software Development :: Libraries :: Python Modules', 29 | ], 30 | ) 31 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yelp/python-gearman/5351771e51f689bd1998c29d5caa55e777a0b40f/tests/__init__.py -------------------------------------------------------------------------------- /tests/_core_testing.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import random 3 | import unittest 4 | 5 | import gearman.util 6 | from gearman.command_handler import GearmanCommandHandler 7 | from gearman.connection import GearmanConnection 8 | from gearman.connection_manager import GearmanConnectionManager, NoopEncoder 9 | 10 | from gearman.constants import PRIORITY_NONE, PRIORITY_HIGH, PRIORITY_LOW, DEFAULT_GEARMAN_PORT, JOB_UNKNOWN, JOB_CREATED 11 | from gearman.errors import ConnectionError 12 | from gearman.job import GearmanJob, GearmanJobRequest 13 | from gearman.protocol import get_command_name 14 | 15 | class MockGearmanConnection(GearmanConnection): 16 | def __init__(self, host=None, port=DEFAULT_GEARMAN_PORT): 17 | host = host or '__testing_host__' 18 | super(MockGearmanConnection, self).__init__(host=host, port=port) 19 | 20 | self._fail_on_bind = False 21 | self._fail_on_read = False 22 | self._fail_on_write = False 23 | 24 | def _create_client_socket(self): 25 | if self._fail_on_bind: 26 | self.throw_exception(message='mock bind failure') 27 | 28 | def read_data_from_socket(self): 29 | if self._fail_on_read: 30 | self.throw_exception(message='mock read failure') 31 | 32 | def send_data_to_socket(self): 33 | if self._fail_on_write: 34 | self.throw_exception(message='mock write failure') 35 | 36 | def fileno(self): 37 | # 73 is the best number, so why not? 38 | return 73 39 | 40 | def __repr__(self): 41 | return (' (%s)' % 42 | (self.gearman_host, self.gearman_port, self.connected, id(self))) 43 | 44 | class MockGearmanConnectionManager(GearmanConnectionManager): 45 | """Handy mock client base to test Worker/Client/Abstract ClientBases""" 46 | def poll_connections_once(self, poller, connection_map, timeout=None): 47 | return set(), set(), set() 48 | 49 | def _register_connections_with_poller(self, connections, poller): 50 | pass 51 | 52 | class _GearmanAbstractTest(unittest.TestCase): 53 | connection_class = MockGearmanConnection 54 | connection_manager_class = MockGearmanConnectionManager 55 | command_handler_class = None 56 | 57 | job_class = GearmanJob 58 | job_request_class = GearmanJobRequest 59 | 60 | def setUp(self): 61 | # Create a new MockGearmanTestClient on the fly 62 | self.setup_connection_manager() 63 | self.setup_connection() 64 | self.setup_command_handler() 65 | 66 | def setup_connection_manager(self): 67 | testing_attributes = {'command_handler_class': self.command_handler_class, 'connection_class': self.connection_class} 68 | testing_client_class = type('MockGearmanTestingClient', (self.connection_manager_class, ), testing_attributes) 69 | 70 | self.connection_manager = testing_client_class() 71 | 72 | def setup_connection(self): 73 | self.connection = self.connection_class() 74 | self.connection_manager.connection_list = [self.connection] 75 | 76 | def setup_command_handler(self): 77 | self.connection_manager.establish_connection(self.connection) 78 | self.command_handler = self.connection_manager.connection_to_handler_map[self.connection] 79 | 80 | def generate_job(self): 81 | return self.job_class(self.connection, handle=str(random.random()), task='__test_ability__', unique=str(random.random()), data=str(random.random())) 82 | 83 | def generate_job_dict(self): 84 | current_job = self.generate_job() 85 | return current_job.to_dict() 86 | 87 | def generate_job_request(self, priority=PRIORITY_NONE, background=False): 88 | job_handle = str(random.random()) 89 | current_job = self.job_class(connection=self.connection, handle=job_handle, task='__test_ability__', unique=str(random.random()), data=str(random.random())) 90 | current_request = self.job_request_class(current_job, initial_priority=priority, background=background) 91 | 92 | self.assertEqual(current_request.state, JOB_UNKNOWN) 93 | 94 | return current_request 95 | 96 | def assert_jobs_equal(self, job_actual, job_expected): 97 | # Validates that GearmanJobs are essentially equal 98 | self.assertEqual(job_actual.handle, job_expected.handle) 99 | self.assertEqual(job_actual.task, job_expected.task) 100 | self.assertEqual(job_actual.unique, job_expected.unique) 101 | self.assertEqual(job_actual.data, job_expected.data) 102 | 103 | def assert_sent_command(self, expected_cmd_type, **expected_cmd_args): 104 | # Make sure any commands we're passing through the CommandHandler gets properly passed through to the client base 105 | client_cmd_type, client_cmd_args = self.connection._outgoing_commands.popleft() 106 | self.assert_commands_equal(client_cmd_type, expected_cmd_type) 107 | self.assertEqual(client_cmd_args, expected_cmd_args) 108 | 109 | def assert_no_pending_commands(self): 110 | self.assertEqual(self.connection._outgoing_commands, collections.deque()) 111 | 112 | def assert_commands_equal(self, cmd_type_actual, cmd_type_expected): 113 | self.assertEqual(get_command_name(cmd_type_actual), get_command_name(cmd_type_expected)) 114 | -------------------------------------------------------------------------------- /tests/admin_client_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from gearman.admin_client import GearmanAdminClient, ECHO_STRING 4 | from gearman.admin_client_handler import GearmanAdminClientCommandHandler 5 | 6 | from gearman.errors import InvalidAdminClientState, ProtocolError 7 | from gearman.protocol import GEARMAN_COMMAND_ECHO_RES, GEARMAN_COMMAND_ECHO_REQ, GEARMAN_COMMAND_TEXT_COMMAND, \ 8 | GEARMAN_SERVER_COMMAND_STATUS, GEARMAN_SERVER_COMMAND_VERSION, GEARMAN_SERVER_COMMAND_WORKERS, GEARMAN_SERVER_COMMAND_MAXQUEUE, GEARMAN_SERVER_COMMAND_SHUTDOWN 9 | 10 | from tests._core_testing import _GearmanAbstractTest, MockGearmanConnectionManager, MockGearmanConnection 11 | 12 | class MockGearmanAdminClient(GearmanAdminClient, MockGearmanConnectionManager): 13 | pass 14 | 15 | class CommandHandlerStateMachineTest(_GearmanAbstractTest): 16 | """Test the public interface a GearmanWorker may need to call in order to update state on a GearmanWorkerCommandHandler""" 17 | connection_manager_class = MockGearmanAdminClient 18 | command_handler_class = GearmanAdminClientCommandHandler 19 | 20 | def setUp(self): 21 | super(CommandHandlerStateMachineTest, self).setUp() 22 | self.connection_manager.current_connection = self.connection 23 | self.connection_manager.current_handler = self.command_handler 24 | 25 | def test_send_illegal_server_commands(self): 26 | self.assertRaises(ProtocolError, self.send_server_command, "This is not a server command") 27 | 28 | def test_ping_server(self): 29 | self.command_handler.send_echo_request(ECHO_STRING) 30 | self.assert_sent_command(GEARMAN_COMMAND_ECHO_REQ, data=ECHO_STRING) 31 | self.assertEqual(self.command_handler._sent_commands[0], GEARMAN_COMMAND_ECHO_REQ) 32 | 33 | self.command_handler.recv_command(GEARMAN_COMMAND_ECHO_RES, data=ECHO_STRING) 34 | server_response = self.pop_response(GEARMAN_COMMAND_ECHO_REQ) 35 | self.assertEquals(server_response, ECHO_STRING) 36 | 37 | def test_state_and_protocol_errors_for_status(self): 38 | self.send_server_command(GEARMAN_SERVER_COMMAND_STATUS) 39 | 40 | # Test premature popping as this we aren't until ready we see the '.' 41 | self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_STATUS) 42 | 43 | # Test malformed server status 44 | self.assertRaises(ProtocolError, self.recv_server_response, '\t'.join(['12', 'IP-A', 'CLIENT-A'])) 45 | 46 | self.recv_server_response('.') 47 | 48 | server_response = self.pop_response(GEARMAN_SERVER_COMMAND_STATUS) 49 | self.assertEquals(server_response, tuple()) 50 | 51 | def test_multiple_status(self): 52 | self.send_server_command(GEARMAN_SERVER_COMMAND_STATUS) 53 | self.recv_server_response('\t'.join(['test_function', '1', '5', '17'])) 54 | self.recv_server_response('\t'.join(['another_function', '2', '4', '23'])) 55 | self.recv_server_response('.') 56 | 57 | server_response = self.pop_response(GEARMAN_SERVER_COMMAND_STATUS) 58 | self.assertEquals(len(server_response), 2) 59 | 60 | test_response, another_response = server_response 61 | self.assertEquals(test_response['task'], 'test_function') 62 | self.assertEquals(test_response['queued'], 1) 63 | self.assertEquals(test_response['running'], 5) 64 | self.assertEquals(test_response['workers'], 17) 65 | 66 | self.assertEquals(another_response['task'], 'another_function') 67 | self.assertEquals(another_response['queued'], 2) 68 | self.assertEquals(another_response['running'], 4) 69 | self.assertEquals(another_response['workers'], 23) 70 | 71 | def test_version(self): 72 | expected_version = '0.12345' 73 | 74 | self.send_server_command(GEARMAN_SERVER_COMMAND_VERSION) 75 | self.recv_server_response(expected_version) 76 | 77 | server_response = self.pop_response(GEARMAN_SERVER_COMMAND_VERSION) 78 | self.assertEquals(expected_version, server_response) 79 | 80 | def test_state_and_protocol_errors_for_workers(self): 81 | self.send_server_command(GEARMAN_SERVER_COMMAND_WORKERS) 82 | 83 | # Test premature popping as this we aren't until ready we see the '.' 84 | self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_WORKERS) 85 | 86 | # Test malformed responses 87 | self.assertRaises(ProtocolError, self.recv_server_response, ' '.join(['12', 'IP-A', 'CLIENT-A'])) 88 | self.assertRaises(ProtocolError, self.recv_server_response, ' '.join(['12', 'IP-A', 'CLIENT-A', 'NOT:'])) 89 | 90 | self.recv_server_response('.') 91 | 92 | server_response = self.pop_response(GEARMAN_SERVER_COMMAND_WORKERS) 93 | self.assertEquals(server_response, tuple()) 94 | 95 | def test_multiple_workers(self): 96 | self.send_server_command(GEARMAN_SERVER_COMMAND_WORKERS) 97 | self.recv_server_response(' '.join(['12', 'IP-A', 'CLIENT-A', ':', 'function-A', 'function-B'])) 98 | self.recv_server_response(' '.join(['13', 'IP-B', 'CLIENT-B', ':', 'function-C'])) 99 | self.recv_server_response('.') 100 | 101 | server_response = self.pop_response(GEARMAN_SERVER_COMMAND_WORKERS) 102 | self.assertEquals(len(server_response), 2) 103 | 104 | test_response, another_response = server_response 105 | self.assertEquals(test_response['file_descriptor'], '12') 106 | self.assertEquals(test_response['ip'], 'IP-A') 107 | self.assertEquals(test_response['client_id'], 'CLIENT-A') 108 | self.assertEquals(test_response['tasks'], ('function-A', 'function-B')) 109 | 110 | self.assertEquals(another_response['file_descriptor'], '13') 111 | self.assertEquals(another_response['ip'], 'IP-B') 112 | self.assertEquals(another_response['client_id'], 'CLIENT-B') 113 | self.assertEquals(another_response['tasks'], ('function-C', )) 114 | 115 | def test_maxqueue(self): 116 | self.send_server_command(GEARMAN_SERVER_COMMAND_MAXQUEUE) 117 | self.assertRaises(ProtocolError, self.recv_server_response, 'NOT OK') 118 | 119 | # Pop prematurely 120 | self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_MAXQUEUE) 121 | 122 | self.recv_server_response('OK') 123 | server_response = self.pop_response(GEARMAN_SERVER_COMMAND_MAXQUEUE) 124 | self.assertEquals(server_response, 'OK') 125 | 126 | def test_shutdown(self): 127 | self.send_server_command(GEARMAN_SERVER_COMMAND_SHUTDOWN) 128 | 129 | # Pop prematurely 130 | self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_SHUTDOWN) 131 | 132 | self.recv_server_response(None) 133 | server_response = self.pop_response(GEARMAN_SERVER_COMMAND_SHUTDOWN) 134 | self.assertEquals(server_response, None) 135 | 136 | def send_server_command(self, expected_command): 137 | self.command_handler.send_text_command(expected_command) 138 | expected_line = "%s\n" % expected_command 139 | self.assert_sent_command(GEARMAN_COMMAND_TEXT_COMMAND, raw_text=expected_line) 140 | 141 | self.assertEqual(self.command_handler._sent_commands[0], expected_command) 142 | 143 | def recv_server_response(self, response_line): 144 | self.command_handler.recv_command(GEARMAN_COMMAND_TEXT_COMMAND, raw_text=response_line) 145 | 146 | def pop_response(self, expected_command): 147 | server_cmd, server_response = self.command_handler.pop_response() 148 | self.assertEquals(expected_command, server_cmd) 149 | 150 | return server_response 151 | 152 | if __name__ == '__main__': 153 | unittest.main() 154 | -------------------------------------------------------------------------------- /tests/client_tests.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import random 3 | import unittest 4 | 5 | from gearman.client import GearmanClient 6 | from gearman.client_handler import GearmanClientCommandHandler 7 | 8 | from gearman.constants import PRIORITY_NONE, PRIORITY_HIGH, PRIORITY_LOW, JOB_UNKNOWN, JOB_PENDING, JOB_CREATED, JOB_FAILED, JOB_COMPLETE 9 | from gearman.errors import ExceededConnectionAttempts, ServerUnavailable, InvalidClientState 10 | from gearman.protocol import submit_cmd_for_background_priority, GEARMAN_COMMAND_STATUS_RES, GEARMAN_COMMAND_GET_STATUS, GEARMAN_COMMAND_JOB_CREATED, \ 11 | GEARMAN_COMMAND_WORK_STATUS, GEARMAN_COMMAND_WORK_FAIL, GEARMAN_COMMAND_WORK_COMPLETE, GEARMAN_COMMAND_WORK_DATA, GEARMAN_COMMAND_WORK_WARNING 12 | 13 | from tests._core_testing import _GearmanAbstractTest, MockGearmanConnectionManager, MockGearmanConnection 14 | 15 | class MockGearmanClient(GearmanClient, MockGearmanConnectionManager): 16 | pass 17 | 18 | class ClientTest(_GearmanAbstractTest): 19 | """Test the public client interface""" 20 | connection_manager_class = MockGearmanClient 21 | command_handler_class = GearmanClientCommandHandler 22 | 23 | def setUp(self): 24 | super(ClientTest, self).setUp() 25 | self.original_handle_connection_activity = self.connection_manager.handle_connection_activity 26 | 27 | def tearDown(self): 28 | super(ClientTest, self).tearDown() 29 | self.connection_manager.handle_connection_activity = self.original_handle_connection_activity 30 | 31 | def generate_job_request(self, submitted=True, accepted=True): 32 | current_request = super(ClientTest, self).generate_job_request() 33 | if submitted or accepted: 34 | self.connection_manager.establish_request_connection(current_request) 35 | self.command_handler.send_job_request(current_request) 36 | 37 | if submitted and accepted: 38 | self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_request.job.handle) 39 | self.assert_(current_request.job.handle in self.command_handler.handle_to_request_map) 40 | 41 | return current_request 42 | 43 | def test_establish_request_connection_complex(self): 44 | # Spin up a bunch of imaginary gearman connections 45 | failed_connection = MockGearmanConnection() 46 | failed_connection._fail_on_bind = True 47 | 48 | failed_then_retried_connection = MockGearmanConnection() 49 | failed_then_retried_connection._fail_on_bind = True 50 | 51 | good_connection = MockGearmanConnection() 52 | good_connection.connect() 53 | 54 | # Register all our connections 55 | self.connection_manager.connection_list = [failed_connection, failed_then_retried_connection, good_connection] 56 | 57 | # When we first create our request, our client shouldn't know anything about it 58 | current_request = self.generate_job_request(submitted=False, accepted=False) 59 | self.failIf(current_request in self.connection_manager.request_to_rotating_connection_queue) 60 | 61 | # Make sure that when we start up, we get our good connection 62 | chosen_connection = self.connection_manager.establish_request_connection(current_request) 63 | self.assertEqual(chosen_connection, good_connection) 64 | 65 | self.assertFalse(failed_connection.connected) 66 | self.assertFalse(failed_then_retried_connection.connected) 67 | self.assertTrue(good_connection.connected) 68 | 69 | # No state changed so we should still go to the correct connection 70 | chosen_connection = self.connection_manager.establish_request_connection(current_request) 71 | self.assertEqual(chosen_connection, good_connection) 72 | 73 | # Pretend like our good connection died so we'll need to choose somethign else 74 | good_connection._reset_connection() 75 | good_connection._fail_on_bind = True 76 | 77 | failed_then_retried_connection._fail_on_bind = False 78 | failed_then_retried_connection.connect() 79 | 80 | # Make sure we rotate good_connection and failed_connection out 81 | chosen_connection = self.connection_manager.establish_request_connection(current_request) 82 | self.assertEqual(chosen_connection, failed_then_retried_connection) 83 | self.assertFalse(failed_connection.connected) 84 | self.assertTrue(failed_then_retried_connection.connected) 85 | self.assertFalse(good_connection.connected) 86 | 87 | def test_establish_request_connection_dead(self): 88 | self.connection_manager.connection_list = [] 89 | self.connection_manager.command_handlers = {} 90 | 91 | current_request = self.generate_job_request(submitted=False, accepted=False) 92 | 93 | # No connections == death 94 | self.assertRaises(ServerUnavailable, self.connection_manager.establish_request_connection, current_request) 95 | 96 | # Spin up a bunch of imaginary gearman connections 97 | failed_connection = MockGearmanConnection() 98 | failed_connection._fail_on_bind = True 99 | self.connection_manager.connection_list.append(failed_connection) 100 | 101 | # All failed connections == death 102 | self.assertRaises(ServerUnavailable, self.connection_manager.establish_request_connection, current_request) 103 | 104 | def test_auto_retry_behavior(self): 105 | current_request = self.generate_job_request(submitted=False, accepted=False) 106 | 107 | def fail_then_create_jobs(rx_conns, wr_conns, ex_conns): 108 | if self.connection_manager.current_failures < self.connection_manager.expected_failures: 109 | self.connection_manager.current_failures += 1 110 | 111 | # We're going to down this connection and reset state 112 | self.assertTrue(self.connection.connected) 113 | self.connection_manager.handle_error(self.connection) 114 | self.assertFalse(self.connection.connected) 115 | 116 | # We're then going to IMMEDIATELY pull this connection back up 117 | # So we don't bail out of the "self.connection_manager.poll_connections_until_stopped" loop 118 | self.connection_manager.establish_connection(self.connection) 119 | else: 120 | self.assertEquals(current_request.state, JOB_PENDING) 121 | self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_request.job.handle) 122 | 123 | return rx_conns, wr_conns, ex_conns 124 | 125 | self.connection_manager.handle_connection_activity = fail_then_create_jobs 126 | self.connection_manager.expected_failures = 5 127 | 128 | # Now that we've setup our rety behavior, we need to reset the entire state of our experiment 129 | # First pass should succeed as we JUST touch our max attempts 130 | self.connection_manager.current_failures = current_request.connection_attempts = 0 131 | current_request.max_connection_attempts = self.connection_manager.expected_failures + 1 132 | current_request.state = JOB_UNKNOWN 133 | 134 | accepted_jobs = self.connection_manager.wait_until_jobs_accepted([current_request]) 135 | self.assertEquals(current_request.state, JOB_CREATED) 136 | self.assertEquals(current_request.connection_attempts, current_request.max_connection_attempts) 137 | 138 | # Second pass should fail as we JUST exceed our max attempts 139 | self.connection_manager.current_failures = current_request.connection_attempts = 0 140 | current_request.max_connection_attempts = self.connection_manager.expected_failures 141 | current_request.state = JOB_UNKNOWN 142 | 143 | self.assertRaises(ExceededConnectionAttempts, self.connection_manager.wait_until_jobs_accepted, [current_request]) 144 | self.assertEquals(current_request.state, JOB_UNKNOWN) 145 | self.assertEquals(current_request.connection_attempts, current_request.max_connection_attempts) 146 | 147 | def test_multiple_fg_job_submission(self): 148 | submitted_job_count = 5 149 | expected_job_list = [self.generate_job() for _ in xrange(submitted_job_count)] 150 | def mark_jobs_created(rx_conns, wr_conns, ex_conns): 151 | for current_job in expected_job_list: 152 | self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_job.handle) 153 | 154 | return rx_conns, wr_conns, ex_conns 155 | 156 | self.connection_manager.handle_connection_activity = mark_jobs_created 157 | 158 | job_dictionaries = [current_job.to_dict() for current_job in expected_job_list] 159 | 160 | # Test multiple job submission 161 | job_requests = self.connection_manager.submit_multiple_jobs(job_dictionaries, wait_until_complete=False) 162 | for current_request, expected_job in zip(job_requests, expected_job_list): 163 | current_job = current_request.job 164 | self.assert_jobs_equal(current_job, expected_job) 165 | 166 | self.assertEqual(current_request.priority, PRIORITY_NONE) 167 | self.assertEqual(current_request.background, False) 168 | self.assertEqual(current_request.state, JOB_CREATED) 169 | 170 | self.assertFalse(current_request.complete) 171 | 172 | def test_single_bg_job_submission(self): 173 | expected_job = self.generate_job() 174 | def mark_job_created(rx_conns, wr_conns, ex_conns): 175 | self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=expected_job.handle) 176 | return rx_conns, wr_conns, ex_conns 177 | 178 | self.connection_manager.handle_connection_activity = mark_job_created 179 | job_request = self.connection_manager.submit_job(expected_job.task, expected_job.data, unique=expected_job.unique, background=True, priority=PRIORITY_LOW, wait_until_complete=False) 180 | 181 | current_job = job_request.job 182 | self.assert_jobs_equal(current_job, expected_job) 183 | 184 | self.assertEqual(job_request.priority, PRIORITY_LOW) 185 | self.assertEqual(job_request.background, True) 186 | self.assertEqual(job_request.state, JOB_CREATED) 187 | 188 | self.assertTrue(job_request.complete) 189 | 190 | def test_single_fg_job_submission_timeout(self): 191 | expected_job = self.generate_job() 192 | def job_failed_submission(rx_conns, wr_conns, ex_conns): 193 | return rx_conns, wr_conns, ex_conns 194 | 195 | self.connection_manager.handle_connection_activity = job_failed_submission 196 | job_request = self.connection_manager.submit_job(expected_job.task, expected_job.data, unique=expected_job.unique, priority=PRIORITY_HIGH, poll_timeout=0.01) 197 | 198 | self.assertEqual(job_request.priority, PRIORITY_HIGH) 199 | self.assertEqual(job_request.background, False) 200 | self.assertEqual(job_request.state, JOB_PENDING) 201 | 202 | self.assertFalse(job_request.complete) 203 | self.assertTrue(job_request.timed_out) 204 | 205 | def test_wait_for_multiple_jobs_to_complete_or_timeout(self): 206 | completed_request = self.generate_job_request() 207 | failed_request = self.generate_job_request() 208 | timeout_request = self.generate_job_request() 209 | 210 | self.update_requests = True 211 | def multiple_job_updates(rx_conns, wr_conns, ex_conns): 212 | # Only give a single status update and have the 3rd job handle timeout 213 | if self.update_requests: 214 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_COMPLETE, job_handle=completed_request.job.handle, data='12345') 215 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=failed_request.job.handle) 216 | self.update_requests = False 217 | 218 | return rx_conns, wr_conns, ex_conns 219 | 220 | self.connection_manager.handle_connection_activity = multiple_job_updates 221 | 222 | finished_requests = self.connection_manager.wait_until_jobs_completed([completed_request, failed_request, timeout_request], poll_timeout=0.01) 223 | del self.update_requests 224 | 225 | finished_completed_request, finished_failed_request, finished_timeout_request = finished_requests 226 | 227 | self.assert_jobs_equal(finished_completed_request.job, completed_request.job) 228 | self.assertEqual(finished_completed_request.state, JOB_COMPLETE) 229 | self.assertEqual(finished_completed_request.result, '12345') 230 | self.assertFalse(finished_completed_request.timed_out) 231 | #self.assert_(finished_completed_request.job.handle not in self.command_handler.handle_to_request_map) 232 | 233 | self.assert_jobs_equal(finished_failed_request.job, failed_request.job) 234 | self.assertEqual(finished_failed_request.state, JOB_FAILED) 235 | self.assertEqual(finished_failed_request.result, None) 236 | self.assertFalse(finished_failed_request.timed_out) 237 | #self.assert_(finished_failed_request.job.handle not in self.command_handler.handle_to_request_map) 238 | 239 | self.assertEqual(finished_timeout_request.state, JOB_CREATED) 240 | self.assertEqual(finished_timeout_request.result, None) 241 | self.assertTrue(finished_timeout_request.timed_out) 242 | self.assert_(finished_timeout_request.job.handle in self.command_handler.handle_to_request_map) 243 | 244 | def test_get_job_status(self): 245 | single_request = self.generate_job_request() 246 | 247 | def retrieve_status(rx_conns, wr_conns, ex_conns): 248 | self.command_handler.recv_command(GEARMAN_COMMAND_STATUS_RES, job_handle=single_request.job.handle, known='1', running='0', numerator='0', denominator='1') 249 | return rx_conns, wr_conns, ex_conns 250 | 251 | self.connection_manager.handle_connection_activity = retrieve_status 252 | 253 | job_request = self.connection_manager.get_job_status(single_request) 254 | request_status = job_request.status 255 | self.failUnless(request_status) 256 | self.assertTrue(request_status['known']) 257 | self.assertFalse(request_status['running']) 258 | self.assertEqual(request_status['numerator'], 0) 259 | self.assertEqual(request_status['denominator'], 1) 260 | self.assertFalse(job_request.timed_out) 261 | 262 | def test_get_job_status_unknown(self): 263 | single_request = self.generate_job_request() 264 | current_handle = single_request.job.handle 265 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=current_handle) 266 | 267 | def retrieve_status(rx_conns, wr_conns, ex_conns): 268 | self.command_handler.recv_command(GEARMAN_COMMAND_STATUS_RES, job_handle=current_handle, known='0', running='0', numerator='0', denominator='1') 269 | return rx_conns, wr_conns, ex_conns 270 | 271 | self.connection_manager.handle_connection_activity = retrieve_status 272 | 273 | job_request = self.connection_manager.get_job_status(single_request) 274 | request_status = job_request.status 275 | self.failUnless(request_status) 276 | self.assertFalse(request_status['known']) 277 | self.assertFalse(request_status['running']) 278 | self.assertEqual(request_status['numerator'], 0) 279 | self.assertEqual(request_status['denominator'], 1) 280 | self.assertFalse(job_request.timed_out) 281 | #self.assert_(current_handle not in self.command_handler.handle_to_request_map) 282 | 283 | def test_get_job_status_timeout(self): 284 | single_request = self.generate_job_request() 285 | 286 | def retrieve_status_timeout(rx_conns, wr_conns, ex_conns): 287 | return rx_conns, wr_conns, ex_conns 288 | 289 | self.connection_manager.handle_connection_activity = retrieve_status_timeout 290 | 291 | job_request = self.connection_manager.get_job_status(single_request, poll_timeout=0.01) 292 | self.assertTrue(job_request.timed_out) 293 | 294 | 295 | class ClientCommandHandlerInterfaceTest(_GearmanAbstractTest): 296 | """Test the public interface a GearmanClient may need to call in order to update state on a GearmanClientCommandHandler""" 297 | connection_manager_class = MockGearmanClient 298 | command_handler_class = GearmanClientCommandHandler 299 | 300 | def test_send_job_request(self): 301 | current_request = self.generate_job_request() 302 | gearman_job = current_request.job 303 | 304 | for priority in (PRIORITY_NONE, PRIORITY_HIGH, PRIORITY_LOW): 305 | for background in (False, True): 306 | current_request.reset() 307 | current_request.priority = priority 308 | current_request.background = background 309 | 310 | self.command_handler.send_job_request(current_request) 311 | 312 | queued_request = self.command_handler.requests_awaiting_handles.popleft() 313 | self.assertEqual(queued_request, current_request) 314 | 315 | expected_cmd_type = submit_cmd_for_background_priority(background, priority) 316 | self.assert_sent_command(expected_cmd_type, task=gearman_job.task, data=gearman_job.data, unique=gearman_job.unique) 317 | 318 | def test_get_status_of_job(self): 319 | current_request = self.generate_job_request() 320 | 321 | self.command_handler.send_get_status_of_job(current_request) 322 | 323 | self.assert_sent_command(GEARMAN_COMMAND_GET_STATUS, job_handle=current_request.job.handle) 324 | 325 | 326 | class ClientCommandHandlerStateMachineTest(_GearmanAbstractTest): 327 | """Test single state transitions within a GearmanWorkerCommandHandler""" 328 | connection_manager_class = MockGearmanClient 329 | command_handler_class = GearmanClientCommandHandler 330 | 331 | def generate_job_request(self, submitted=True, accepted=True): 332 | current_request = super(ClientCommandHandlerStateMachineTest, self).generate_job_request() 333 | if submitted or accepted: 334 | self.command_handler.requests_awaiting_handles.append(current_request) 335 | current_request.state = JOB_PENDING 336 | 337 | if submitted and accepted: 338 | self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_request.job.handle) 339 | 340 | return current_request 341 | 342 | def test_received_job_created(self): 343 | current_request = self.generate_job_request(accepted=False) 344 | 345 | new_handle = str(random.random()) 346 | self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=new_handle) 347 | 348 | self.assertEqual(current_request.job.handle, new_handle) 349 | self.assertEqual(current_request.state, JOB_CREATED) 350 | self.assertEqual(self.command_handler.handle_to_request_map[new_handle], current_request) 351 | 352 | def test_received_job_created_out_of_order(self): 353 | self.assertEqual(self.command_handler.requests_awaiting_handles, collections.deque()) 354 | 355 | # Make sure we bail cuz we have an empty queue 356 | self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_JOB_CREATED, job_handle=None) 357 | 358 | def test_required_state_pending(self): 359 | current_request = self.generate_job_request(submitted=False, accepted=False) 360 | 361 | new_handle = str(random.random()) 362 | 363 | invalid_states = [JOB_UNKNOWN, JOB_CREATED, JOB_COMPLETE, JOB_FAILED] 364 | for bad_state in invalid_states: 365 | current_request.state = bad_state 366 | 367 | # We only want to check the state of request... not die if we don't have any pending requests 368 | self.command_handler.requests_awaiting_handles.append(current_request) 369 | 370 | self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_JOB_CREATED, job_handle=new_handle) 371 | 372 | def test_required_state_queued(self): 373 | current_request = self.generate_job_request() 374 | 375 | job_handle = current_request.job.handle 376 | new_data = str(random.random()) 377 | 378 | invalid_states = [JOB_UNKNOWN, JOB_PENDING, JOB_COMPLETE, JOB_FAILED] 379 | for bad_state in invalid_states: 380 | current_request.state = bad_state 381 | 382 | # All these commands expect to be in JOB_CREATED 383 | self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_DATA, job_handle=job_handle, data=new_data) 384 | 385 | self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_WARNING, job_handle=job_handle, data=new_data) 386 | 387 | self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_STATUS, job_handle=job_handle, numerator=0, denominator=1) 388 | 389 | self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_COMPLETE, job_handle=job_handle, data=new_data) 390 | 391 | self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_FAIL, job_handle=job_handle) 392 | 393 | def test_in_flight_work_updates(self): 394 | current_request = self.generate_job_request() 395 | 396 | job_handle = current_request.job.handle 397 | new_data = str(random.random()) 398 | 399 | # Test WORK_DATA 400 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_DATA, job_handle=job_handle, data=new_data) 401 | self.assertEqual(current_request.data_updates.popleft(), new_data) 402 | self.assertEqual(current_request.state, JOB_CREATED) 403 | 404 | # Test WORK_WARNING 405 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_WARNING, job_handle=job_handle, data=new_data) 406 | self.assertEqual(current_request.warning_updates.popleft(), new_data) 407 | self.assertEqual(current_request.state, JOB_CREATED) 408 | 409 | # Test WORK_STATUS 410 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_STATUS, job_handle=job_handle, numerator=0, denominator=1) 411 | 412 | self.assertEqual(current_request.status_updates.popleft(), (0, 1)) 413 | self.assertEqual(current_request.state, JOB_CREATED) 414 | 415 | def test_work_complete(self): 416 | current_request = self.generate_job_request() 417 | 418 | job_handle = current_request.job.handle 419 | new_data = str(random.random()) 420 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_COMPLETE, job_handle=job_handle, data=new_data) 421 | 422 | self.assertEqual(current_request.result, new_data) 423 | self.assertEqual(current_request.state, JOB_COMPLETE) 424 | 425 | def test_work_fail(self): 426 | current_request = self.generate_job_request() 427 | 428 | job_handle = current_request.job.handle 429 | new_data = str(random.random()) 430 | self.command_handler.recv_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=job_handle) 431 | 432 | self.assertEqual(current_request.state, JOB_FAILED) 433 | 434 | def test_status_request(self): 435 | current_request = self.generate_job_request() 436 | 437 | job_handle = current_request.job.handle 438 | 439 | self.assertEqual(current_request.status, {}) 440 | 441 | self.command_handler.recv_command(GEARMAN_COMMAND_STATUS_RES, job_handle=job_handle, known='1', running='1', numerator='0', denominator='1') 442 | 443 | self.assertEqual(current_request.status['handle'], job_handle) 444 | self.assertTrue(current_request.status['known']) 445 | self.assertTrue(current_request.status['running']) 446 | self.assertEqual(current_request.status['numerator'], 0) 447 | self.assertEqual(current_request.status['denominator'], 1) 448 | 449 | if __name__ == '__main__': 450 | unittest.main() 451 | -------------------------------------------------------------------------------- /tests/protocol_tests.py: -------------------------------------------------------------------------------- 1 | import array 2 | import struct 3 | import unittest 4 | 5 | from gearman import protocol 6 | 7 | from gearman.connection import GearmanConnection 8 | from gearman.constants import JOB_PENDING, JOB_CREATED, JOB_FAILED, JOB_COMPLETE 9 | from gearman.errors import ConnectionError, ServerUnavailable, ProtocolError 10 | 11 | from tests._core_testing import _GearmanAbstractTest 12 | 13 | class ProtocolBinaryCommandsTest(unittest.TestCase): 14 | ####################### 15 | # Begin parsing tests # 16 | ####################### 17 | def test_parsing_errors(self): 18 | malformed_command_buffer = "%sAAAABBBBCCCC" 19 | 20 | # Raise malformed magic exceptions 21 | self.assertRaises( 22 | ProtocolError, 23 | protocol.parse_binary_command, 24 | array.array("c", malformed_command_buffer % "DDDD") 25 | ) 26 | self.assertRaises( 27 | ProtocolError, 28 | protocol.parse_binary_command, 29 | array.array("c", malformed_command_buffer % protocol.MAGIC_RES_STRING), 30 | is_response=False 31 | ) 32 | self.assertRaises( 33 | ProtocolError, 34 | protocol.parse_binary_command, 35 | array.array("c", malformed_command_buffer % protocol.MAGIC_REQ_STRING), 36 | is_response=True 37 | ) 38 | 39 | # Raise unknown command errors 40 | unassigned_gearman_command = 1234 41 | unknown_command_buffer = struct.pack('!4sII', protocol.MAGIC_RES_STRING, unassigned_gearman_command, 0) 42 | unknown_command_buffer = array.array("c", unknown_command_buffer) 43 | self.assertRaises(ProtocolError, protocol.parse_binary_command, unknown_command_buffer) 44 | 45 | # Raise an error on our imaginary GEARMAN_COMMAND_TEXT_COMMAND 46 | imaginary_command_buffer = struct.pack('!4sII4s', protocol.MAGIC_RES_STRING, protocol.GEARMAN_COMMAND_TEXT_COMMAND, 4, 'ABCD') 47 | imaginary_command_buffer = array.array("c", imaginary_command_buffer) 48 | self.assertRaises(ProtocolError, protocol.parse_binary_command, imaginary_command_buffer) 49 | 50 | # Raise an error on receiving an unexpected payload 51 | unexpected_payload_command_buffer = struct.pack('!4sII4s', protocol.MAGIC_RES_STRING, protocol.GEARMAN_COMMAND_NOOP, 4, 'ABCD') 52 | unexpected_payload_command_buffer = array.array("c", unexpected_payload_command_buffer) 53 | self.assertRaises(ProtocolError, protocol.parse_binary_command, unexpected_payload_command_buffer) 54 | 55 | def test_parsing_request(self): 56 | # Test parsing a request for a job (server side parsing) 57 | grab_job_command_buffer = struct.pack('!4sII', protocol.MAGIC_REQ_STRING, protocol.GEARMAN_COMMAND_GRAB_JOB_UNIQ, 0) 58 | grab_job_command_buffer = array.array("c", grab_job_command_buffer) 59 | cmd_type, cmd_args, cmd_len = protocol.parse_binary_command(grab_job_command_buffer, is_response=False) 60 | self.assertEquals(cmd_type, protocol.GEARMAN_COMMAND_GRAB_JOB_UNIQ) 61 | self.assertEquals(cmd_args, dict()) 62 | self.assertEquals(cmd_len, len(grab_job_command_buffer)) 63 | 64 | def test_parsing_without_enough_data(self): 65 | # Test that we return with nothing to do... received a partial packet 66 | not_enough_data_command_buffer = struct.pack('!4s', protocol.MAGIC_RES_STRING) 67 | not_enough_data_command_buffer = array.array("c", not_enough_data_command_buffer) 68 | cmd_type, cmd_args, cmd_len = protocol.parse_binary_command(not_enough_data_command_buffer) 69 | self.assertEquals(cmd_type, None) 70 | self.assertEquals(cmd_args, None) 71 | self.assertEquals(cmd_len, 0) 72 | 73 | # Test that we return with nothing to do... received a partial packet (expected binary payload of size 4, got 0) 74 | not_enough_data_command_buffer = struct.pack('!4sII', protocol.MAGIC_RES_STRING, protocol.GEARMAN_COMMAND_ECHO_RES, 4) 75 | not_enough_data_command_buffer = array.array("c", not_enough_data_command_buffer) 76 | cmd_type, cmd_args, cmd_len = protocol.parse_binary_command(not_enough_data_command_buffer) 77 | self.assertEquals(cmd_type, None) 78 | self.assertEquals(cmd_args, None) 79 | self.assertEquals(cmd_len, 0) 80 | 81 | def test_parsing_no_args(self): 82 | noop_command_buffer = struct.pack('!4sII', protocol.MAGIC_RES_STRING, protocol.GEARMAN_COMMAND_NOOP, 0) 83 | noop_command_buffer = array.array("c", noop_command_buffer) 84 | cmd_type, cmd_args, cmd_len = protocol.parse_binary_command(noop_command_buffer) 85 | self.assertEquals(cmd_type, protocol.GEARMAN_COMMAND_NOOP) 86 | self.assertEquals(cmd_args, dict()) 87 | self.assertEquals(cmd_len, len(noop_command_buffer)) 88 | 89 | def test_parsing_single_arg(self): 90 | echoed_string = 'abcd' 91 | echo_command_buffer = struct.pack('!4sII4s', protocol.MAGIC_RES_STRING, protocol.GEARMAN_COMMAND_ECHO_RES, 4, echoed_string) 92 | echo_command_buffer = array.array("c", echo_command_buffer) 93 | cmd_type, cmd_args, cmd_len = protocol.parse_binary_command(echo_command_buffer) 94 | self.assertEquals(cmd_type, protocol.GEARMAN_COMMAND_ECHO_RES) 95 | self.assertEquals(cmd_args, dict(data=echoed_string)) 96 | self.assertEquals(cmd_len, len(echo_command_buffer)) 97 | 98 | def test_parsing_single_arg_with_extra_data(self): 99 | echoed_string = 'abcd' 100 | excess_bytes = 5 101 | excess_data = echoed_string + (protocol.NULL_CHAR * excess_bytes) 102 | excess_echo_command_buffer = struct.pack('!4sII9s', protocol.MAGIC_RES_STRING, protocol.GEARMAN_COMMAND_ECHO_RES, 4, excess_data) 103 | excess_echo_command_buffer = array.array("c", excess_echo_command_buffer) 104 | 105 | cmd_type, cmd_args, cmd_len = protocol.parse_binary_command(excess_echo_command_buffer) 106 | self.assertEquals(cmd_type, protocol.GEARMAN_COMMAND_ECHO_RES) 107 | self.assertEquals(cmd_args, dict(data=echoed_string)) 108 | self.assertEquals(cmd_len, len(excess_echo_command_buffer) - excess_bytes) 109 | 110 | def test_parsing_multiple_args(self): 111 | # Tests ordered argument processing and proper NULL_CHAR splitting 112 | expected_data = protocol.NULL_CHAR * 4 113 | binary_payload = protocol.NULL_CHAR.join(['test', 'function', 'identifier', expected_data]) 114 | payload_size = len(binary_payload) 115 | 116 | uniq_command_buffer = struct.pack('!4sII%ds' % payload_size, protocol.MAGIC_RES_STRING, protocol.GEARMAN_COMMAND_JOB_ASSIGN_UNIQ, payload_size, binary_payload) 117 | uniq_command_buffer = array.array("c", uniq_command_buffer) 118 | cmd_type, cmd_args, cmd_len = protocol.parse_binary_command(uniq_command_buffer) 119 | self.assertEquals(cmd_type, protocol.GEARMAN_COMMAND_JOB_ASSIGN_UNIQ) 120 | self.assertEquals(cmd_args, dict(job_handle='test', task='function', unique='identifier', data=expected_data)) 121 | self.assertEquals(cmd_len, len(uniq_command_buffer)) 122 | 123 | ####################### 124 | # Begin packing tests # 125 | ####################### 126 | def test_packing_errors(self): 127 | # Assert we get an unknown command 128 | cmd_type = 1234 129 | cmd_args = dict() 130 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 131 | 132 | # Assert we get a fake command 133 | cmd_type = protocol.GEARMAN_COMMAND_TEXT_COMMAND 134 | cmd_args = dict() 135 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 136 | 137 | # Assert we get arg mismatch, got 1, expecting 0 138 | cmd_type = protocol.GEARMAN_COMMAND_GRAB_JOB 139 | cmd_args = dict(extra='arguments') 140 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 141 | 142 | # Assert we get arg mismatch, got 0, expecting 1 143 | cmd_type = protocol.GEARMAN_COMMAND_JOB_CREATED 144 | cmd_args = dict() 145 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 146 | 147 | # Assert we get arg mismatch (name), got 1, expecting 1 148 | cmd_type = protocol.GEARMAN_COMMAND_JOB_CREATED 149 | cmd_args = dict(extra='arguments') 150 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 151 | 152 | # Assert we get a non-string argument 153 | cmd_type = protocol.GEARMAN_COMMAND_JOB_CREATED 154 | cmd_args = dict(job_handle=12345) 155 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 156 | 157 | # Assert we get a non-string argument (expecting BYTES) 158 | cmd_type = protocol.GEARMAN_COMMAND_JOB_CREATED 159 | cmd_args = dict(job_handle=unicode(12345)) 160 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 161 | 162 | # Assert we check for NULLs in all but the "last" argument, where last depends on the cmd_type. 163 | cmd_type = protocol.GEARMAN_COMMAND_SUBMIT_JOB 164 | cmd_args = dict(task='funct\x00ion', data='abcd', unique='12345') 165 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 166 | 167 | # Assert we check for NULLs in all but the "last" argument, where last depends on the cmd_type. 168 | cmd_type = protocol.GEARMAN_COMMAND_SUBMIT_JOB 169 | cmd_args = dict(task='function', data='ab\x00cd', unique='12345') 170 | protocol.pack_binary_command(cmd_type, cmd_args) # Should not raise, 'data' is last. 171 | 172 | # Assert we check for NULLs in all but the "last" argument, where last depends on the cmd_type. 173 | cmd_type = protocol.GEARMAN_COMMAND_SUBMIT_JOB 174 | cmd_args = dict(task='function', data='abcd', unique='123\x0045') 175 | self.assertRaises(ProtocolError, protocol.pack_binary_command, cmd_type, cmd_args) 176 | 177 | def test_packing_response(self): 178 | # Test packing a response for a job (server side packing) 179 | cmd_type = protocol.GEARMAN_COMMAND_NO_JOB 180 | cmd_args = dict() 181 | 182 | expected_command_buffer = struct.pack('!4sII', protocol.MAGIC_RES_STRING, cmd_type, 0) 183 | packed_command_buffer = protocol.pack_binary_command(cmd_type, cmd_args, is_response=True) 184 | self.assertEquals(packed_command_buffer, expected_command_buffer) 185 | 186 | def test_packing_no_arg(self): 187 | cmd_type = protocol.GEARMAN_COMMAND_NOOP 188 | cmd_args = dict() 189 | 190 | expected_command_buffer = struct.pack('!4sII', protocol.MAGIC_REQ_STRING, cmd_type, 0) 191 | packed_command_buffer = protocol.pack_binary_command(cmd_type, cmd_args) 192 | self.assertEquals(packed_command_buffer, expected_command_buffer) 193 | 194 | def test_packing_single_arg(self): 195 | cmd_type = protocol.GEARMAN_COMMAND_ECHO_REQ 196 | cmd_args = dict(data='abcde') 197 | 198 | expected_payload_size = len(cmd_args['data']) 199 | expected_format = '!4sII%ds' % expected_payload_size 200 | 201 | expected_command_buffer = struct.pack(expected_format, protocol.MAGIC_REQ_STRING, cmd_type, expected_payload_size, cmd_args['data']) 202 | packed_command_buffer = protocol.pack_binary_command(cmd_type, cmd_args) 203 | self.assertEquals(packed_command_buffer, expected_command_buffer) 204 | 205 | def test_packing_multiple_args(self): 206 | cmd_type = protocol.GEARMAN_COMMAND_SUBMIT_JOB 207 | cmd_args = dict(task='function', unique='12345', data='abcd') 208 | 209 | ordered_parameters = [cmd_args['task'], cmd_args['unique'], cmd_args['data']] 210 | 211 | expected_payload = protocol.NULL_CHAR.join(ordered_parameters) 212 | expected_payload_size = len(expected_payload) 213 | expected_format = '!4sII%ds' % expected_payload_size 214 | expected_command_buffer = struct.pack(expected_format, protocol.MAGIC_REQ_STRING, cmd_type, expected_payload_size, expected_payload) 215 | 216 | packed_command_buffer = protocol.pack_binary_command(cmd_type, cmd_args) 217 | self.assertEquals(packed_command_buffer, expected_command_buffer) 218 | 219 | class ProtocolTextCommandsTest(unittest.TestCase): 220 | ####################### 221 | # Begin parsing tests # 222 | ####################### 223 | def test_parsing_errors(self): 224 | received_data = array.array("c", "Hello\x00there\n") 225 | self.assertRaises(ProtocolError, protocol.parse_text_command, received_data) 226 | 227 | def test_parsing_without_enough_data(self): 228 | received_data = array.array("c", "Hello there") 229 | cmd_type, cmd_response, cmd_len = protocol.parse_text_command(received_data) 230 | self.assertEquals(cmd_type, None) 231 | self.assertEquals(cmd_response, None) 232 | self.assertEquals(cmd_len, 0) 233 | 234 | def test_parsing_single_line(self): 235 | received_data = array.array("c", "Hello there\n") 236 | cmd_type, cmd_response, cmd_len = protocol.parse_text_command(received_data) 237 | self.assertEquals(cmd_type, protocol.GEARMAN_COMMAND_TEXT_COMMAND) 238 | self.assertEquals(cmd_response, dict(raw_text=received_data.tostring().strip())) 239 | self.assertEquals(cmd_len, len(received_data)) 240 | 241 | def test_parsing_multi_line(self): 242 | sentence_one = array.array("c", "Hello there\n") 243 | sentence_two = array.array("c", "My name is bob\n") 244 | received_data = sentence_one + sentence_two 245 | 246 | cmd_type, cmd_response, cmd_len = protocol.parse_text_command(received_data) 247 | self.assertEquals(cmd_type, protocol.GEARMAN_COMMAND_TEXT_COMMAND) 248 | self.assertEquals(cmd_response, dict(raw_text=sentence_one.tostring().strip())) 249 | self.assertEquals(cmd_len, len(sentence_one)) 250 | 251 | def test_packing_errors(self): 252 | # Test bad command type 253 | cmd_type = protocol.GEARMAN_COMMAND_NOOP 254 | cmd_args = dict() 255 | self.assertRaises(ProtocolError, protocol.pack_text_command, cmd_type, cmd_args) 256 | 257 | # Test missing args 258 | cmd_type = protocol.GEARMAN_COMMAND_TEXT_COMMAND 259 | cmd_args = dict() 260 | self.assertRaises(ProtocolError, protocol.pack_text_command, cmd_type, cmd_args) 261 | 262 | # Test misnamed parameter dict 263 | cmd_type = protocol.GEARMAN_COMMAND_TEXT_COMMAND 264 | cmd_args = dict(bad_text='abcdefghij') 265 | self.assertRaises(ProtocolError, protocol.pack_text_command, cmd_type, cmd_args) 266 | 267 | ####################### 268 | # Begin packing tests # 269 | ####################### 270 | def test_packing_single_line(self): 271 | expected_string = 'Hello world' 272 | cmd_type = protocol.GEARMAN_COMMAND_TEXT_COMMAND 273 | cmd_args = dict(raw_text=expected_string) 274 | 275 | packed_command = protocol.pack_text_command(cmd_type, cmd_args) 276 | self.assertEquals(packed_command, expected_string) 277 | 278 | class GearmanConnectionTest(unittest.TestCase): 279 | """Tests the base CommandHandler class that underpins all other CommandHandlerTests""" 280 | def test_recv_command(self): 281 | pass 282 | 283 | class GearmanCommandHandlerTest(_GearmanAbstractTest): 284 | """Tests the base CommandHandler class that underpins all other CommandHandlerTests""" 285 | def _test_recv_command(self): 286 | # recv_echo_res and recv_error are predefined on the CommandHandler 287 | self.command_handler.recv_command(protocol.GEARMAN_COMMAND_NOOP) 288 | self.assert_recv_command(protocol.GEARMAN_COMMAND_NOOP) 289 | 290 | # The mock handler never implemented 'recv_all_yours' so we should get an attribute error here 291 | self.assertRaises(ValueError, self.command_handler.recv_command, protocol.GEARMAN_COMMAND_ALL_YOURS) 292 | 293 | def _test_send_command(self): 294 | self.command_handler.send_command(protocol.GEARMAN_COMMAND_NOOP) 295 | self.assert_sent_command(protocol.GEARMAN_COMMAND_NOOP) 296 | 297 | # The mock handler never implemented 'recv_all_yours' so we should get an attribute error here 298 | self.command_handler.send_command(protocol.GEARMAN_COMMAND_ECHO_REQ, text='hello world') 299 | self.assert_sent_command(protocol.GEARMAN_COMMAND_ECHO_REQ, text='hello world') 300 | 301 | def assert_recv_command(self, expected_cmd_type, **expected_cmd_args): 302 | cmd_type, cmd_args = self.command_handler.recv_command_queue.popleft() 303 | self.assert_commands_equal(cmd_type, expected_cmd_type) 304 | self.assertEqual(cmd_args, expected_cmd_args) 305 | 306 | def assert_sent_command(self, expected_cmd_type, **expected_cmd_args): 307 | # All commands should be sent via the CommandHandler 308 | handler_cmd_type, handler_cmd_args = self.command_handler.sent_command_queue.popleft() 309 | self.assert_commands_equal(handler_cmd_type, expected_cmd_type) 310 | self.assertEqual(handler_cmd_args, expected_cmd_args) 311 | 312 | super(GearmanCommandHandlerTest, self).assert_sent_command(expected_cmd_type, **expected_cmd_args) 313 | 314 | 315 | if __name__ == '__main__': 316 | unittest.main() 317 | -------------------------------------------------------------------------------- /tests/worker_tests.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from gearman import compat 3 | import unittest 4 | 5 | from gearman.worker import GearmanWorker 6 | from gearman.worker_handler import GearmanWorkerCommandHandler 7 | 8 | from gearman.errors import ServerUnavailable, InvalidWorkerState 9 | from gearman.protocol import get_command_name, GEARMAN_COMMAND_RESET_ABILITIES, GEARMAN_COMMAND_CAN_DO, GEARMAN_COMMAND_SET_CLIENT_ID, \ 10 | GEARMAN_COMMAND_NOOP, GEARMAN_COMMAND_PRE_SLEEP, GEARMAN_COMMAND_NO_JOB, GEARMAN_COMMAND_GRAB_JOB_UNIQ, GEARMAN_COMMAND_JOB_ASSIGN_UNIQ, \ 11 | GEARMAN_COMMAND_WORK_STATUS, GEARMAN_COMMAND_WORK_FAIL, GEARMAN_COMMAND_WORK_COMPLETE, GEARMAN_COMMAND_WORK_DATA, GEARMAN_COMMAND_WORK_EXCEPTION, GEARMAN_COMMAND_WORK_WARNING 12 | 13 | from tests._core_testing import _GearmanAbstractTest, MockGearmanConnectionManager, MockGearmanConnection 14 | 15 | class MockGearmanWorker(MockGearmanConnectionManager, GearmanWorker): 16 | def __init__(self, *largs, **kwargs): 17 | super(MockGearmanWorker, self).__init__(*largs, **kwargs) 18 | self.worker_job_queues = compat.defaultdict(collections.deque) 19 | 20 | def on_job_execute(self, current_job): 21 | current_handler = self.connection_to_handler_map[current_job.connection] 22 | self.worker_job_queues[current_handler].append(current_job) 23 | 24 | class _GearmanAbstractWorkerTest(_GearmanAbstractTest): 25 | connection_manager_class = MockGearmanWorker 26 | command_handler_class = GearmanWorkerCommandHandler 27 | 28 | def setup_command_handler(self): 29 | super(_GearmanAbstractWorkerTest, self).setup_command_handler() 30 | self.assert_sent_abilities([]) 31 | self.assert_sent_command(GEARMAN_COMMAND_PRE_SLEEP) 32 | 33 | def assert_sent_abilities(self, expected_abilities): 34 | observed_abilities = set() 35 | 36 | self.assert_sent_command(GEARMAN_COMMAND_RESET_ABILITIES) 37 | for ability in expected_abilities: 38 | cmd_type, cmd_args = self.connection._outgoing_commands.popleft() 39 | 40 | self.assertEqual(get_command_name(cmd_type), get_command_name(GEARMAN_COMMAND_CAN_DO)) 41 | observed_abilities.add(cmd_args['task']) 42 | 43 | self.assertEqual(observed_abilities, set(expected_abilities)) 44 | 45 | def assert_sent_client_id(self, expected_client_id): 46 | self.assert_sent_command(GEARMAN_COMMAND_SET_CLIENT_ID, client_id=expected_client_id) 47 | 48 | class WorkerTest(_GearmanAbstractWorkerTest): 49 | """Test the public worker interface""" 50 | def test_registering_functions(self): 51 | # Tests that the abilities were set on the GearmanWorker AND the GearmanWorkerCommandHandler 52 | # Does NOT test that commands were actually sent out as that is tested in GearmanWorkerCommandHandlerInterfaceTest.test_set_abilities 53 | def fake_callback_one(worker_command_handler, current_job): 54 | pass 55 | 56 | def fake_callback_two(worker_command_handler, current_job): 57 | pass 58 | 59 | # Register a single callback 60 | self.connection_manager.register_task('fake_callback_one', fake_callback_one) 61 | self.failUnless('fake_callback_one' in self.connection_manager.worker_abilities) 62 | self.failIf('fake_callback_two' in self.connection_manager.worker_abilities) 63 | self.assertEqual(self.connection_manager.worker_abilities['fake_callback_one'], fake_callback_one) 64 | self.assertEqual(self.command_handler._handler_abilities, ['fake_callback_one']) 65 | 66 | # Register another callback and make sure the command_handler sees the same functions 67 | self.connection_manager.register_task('fake_callback_two', fake_callback_two) 68 | self.failUnless('fake_callback_one' in self.connection_manager.worker_abilities) 69 | self.failUnless('fake_callback_two' in self.connection_manager.worker_abilities) 70 | self.assertEqual(self.connection_manager.worker_abilities['fake_callback_one'], fake_callback_one) 71 | self.assertEqual(self.connection_manager.worker_abilities['fake_callback_two'], fake_callback_two) 72 | self.assertEqual(self.command_handler._handler_abilities, ['fake_callback_one', 'fake_callback_two']) 73 | 74 | # Unregister a callback and make sure the command_handler sees the same functions 75 | self.connection_manager.unregister_task('fake_callback_one') 76 | self.failIf('fake_callback_one' in self.connection_manager.worker_abilities) 77 | self.failUnless('fake_callback_two' in self.connection_manager.worker_abilities) 78 | self.assertEqual(self.connection_manager.worker_abilities['fake_callback_two'], fake_callback_two) 79 | self.assertEqual(self.command_handler._handler_abilities, ['fake_callback_two']) 80 | 81 | def test_setting_client_id(self): 82 | new_client_id = 'HELLO' 83 | 84 | # Make sure nothing is set 85 | self.assertEqual(self.connection_manager.worker_client_id, None) 86 | self.assertEqual(self.command_handler._client_id, None) 87 | 88 | self.connection_manager.set_client_id(new_client_id) 89 | 90 | # Make sure both the client and the connection handler reflect the new state 91 | self.assertEqual(self.connection_manager.worker_client_id, new_client_id) 92 | self.assertEqual(self.command_handler._client_id, new_client_id) 93 | 94 | def test_establish_worker_connections(self): 95 | self.connection_manager.connection_list = [] 96 | self.connection_manager.command_handlers = {} 97 | 98 | # Spin up a bunch of imaginary gearman connections 99 | good_connection = MockGearmanConnection() 100 | good_connection.connect() 101 | good_connection._fail_on_bind = False 102 | 103 | failed_then_retried_connection = MockGearmanConnection() 104 | failed_then_retried_connection._fail_on_bind = False 105 | 106 | failed_connection = MockGearmanConnection() 107 | failed_connection._fail_on_bind = True 108 | 109 | # Register all our connections 110 | self.connection_manager.connection_list = [good_connection, failed_then_retried_connection, failed_connection] 111 | 112 | # The only alive connections should be the ones that ultimately be connection.connected 113 | alive_connections = self.connection_manager.establish_worker_connections() 114 | self.assertTrue(good_connection in alive_connections) 115 | self.assertTrue(failed_then_retried_connection in alive_connections) 116 | self.assertFalse(failed_connection in alive_connections) 117 | 118 | def test_establish_worker_connections_dead(self): 119 | self.connection_manager.connection_list = [] 120 | self.connection_manager.command_handlers = {} 121 | 122 | # We have no connections so there will never be any work to do 123 | self.assertRaises(ServerUnavailable, self.connection_manager.work) 124 | 125 | # We were started with a dead connection, make sure we bail again 126 | dead_connection = MockGearmanConnection() 127 | dead_connection._fail_on_bind = True 128 | dead_connection.connected = False 129 | self.connection_manager.connection_list = [dead_connection] 130 | 131 | self.assertRaises(ServerUnavailable, self.connection_manager.work) 132 | 133 | 134 | class WorkerCommandHandlerInterfaceTest(_GearmanAbstractWorkerTest): 135 | """Test the public interface a GearmanWorker may need to call in order to update state on a GearmanWorkerCommandHandler""" 136 | 137 | def test_on_connect(self): 138 | expected_abilities = ['function_one', 'function_two', 'function_three'] 139 | expected_client_id = 'my_client_id' 140 | 141 | self.connection.connected = False 142 | 143 | self.connection_manager.set_client_id(expected_client_id) 144 | self.connection_manager.unregister_task('__test_ability__') 145 | for task in expected_abilities: 146 | self.connection_manager.register_task(task, None) 147 | 148 | # We were disconnected, connect and wipe pending commands 149 | self.connection_manager.establish_connection(self.connection) 150 | 151 | # When we attempt a new connection, make sure we get a new command handler 152 | self.assertNotEquals(self.command_handler, self.connection_manager.connection_to_handler_map[self.connection]) 153 | 154 | self.assert_sent_client_id(expected_client_id) 155 | self.assert_sent_abilities(expected_abilities) 156 | self.assert_sent_command(GEARMAN_COMMAND_PRE_SLEEP) 157 | self.assert_no_pending_commands() 158 | 159 | def test_set_abilities(self): 160 | expected_abilities = ['function_one', 'function_two', 'function_three'] 161 | 162 | # We were disconnected, connect and wipe pending commands 163 | self.command_handler.set_abilities(expected_abilities) 164 | self.assert_sent_abilities(expected_abilities) 165 | self.assert_no_pending_commands() 166 | 167 | def test_set_client_id(self): 168 | expected_client_id = 'my_client_id' 169 | 170 | handler_initial_state = {} 171 | handler_initial_state['abilities'] = [] 172 | handler_initial_state['client_id'] = None 173 | 174 | # We were disconnected, connect and wipe pending commands 175 | self.command_handler.set_client_id(expected_client_id) 176 | self.assert_sent_client_id(expected_client_id) 177 | self.assert_no_pending_commands() 178 | 179 | def test_send_functions(self): 180 | current_job = self.generate_job() 181 | 182 | # Test GEARMAN_COMMAND_WORK_STATUS 183 | self.command_handler.send_job_status(current_job, 0, 1) 184 | self.assert_sent_command(GEARMAN_COMMAND_WORK_STATUS, job_handle=current_job.handle, numerator='0', denominator='1') 185 | 186 | # Test GEARMAN_COMMAND_WORK_COMPLETE 187 | self.command_handler.send_job_complete(current_job, 'completion data') 188 | self.assert_sent_command(GEARMAN_COMMAND_WORK_COMPLETE, job_handle=current_job.handle, data='completion data') 189 | 190 | # Test GEARMAN_COMMAND_WORK_FAIL 191 | self.command_handler.send_job_failure(current_job) 192 | self.assert_sent_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=current_job.handle) 193 | 194 | # Test GEARMAN_COMMAND_WORK_EXCEPTION 195 | self.command_handler.send_job_exception(current_job, 'exception data') 196 | self.assert_sent_command(GEARMAN_COMMAND_WORK_EXCEPTION, job_handle=current_job.handle, data='exception data') 197 | 198 | # Test GEARMAN_COMMAND_WORK_DATA 199 | self.command_handler.send_job_data(current_job, 'job data') 200 | self.assert_sent_command(GEARMAN_COMMAND_WORK_DATA, job_handle=current_job.handle, data='job data') 201 | 202 | # Test GEARMAN_COMMAND_WORK_WARNING 203 | self.command_handler.send_job_warning(current_job, 'job warning') 204 | self.assert_sent_command(GEARMAN_COMMAND_WORK_WARNING, job_handle=current_job.handle, data='job warning') 205 | 206 | class WorkerCommandHandlerStateMachineTest(_GearmanAbstractWorkerTest): 207 | """Test multiple state transitions within a GearmanWorkerCommandHandler 208 | 209 | End to end tests without a server 210 | """ 211 | connection_manager_class = MockGearmanWorker 212 | command_handler_class = GearmanWorkerCommandHandler 213 | 214 | def setup_connection_manager(self): 215 | super(WorkerCommandHandlerStateMachineTest, self).setup_connection_manager() 216 | self.connection_manager.register_task('__test_ability__', None) 217 | 218 | def setup_command_handler(self): 219 | super(_GearmanAbstractWorkerTest, self).setup_command_handler() 220 | self.assert_sent_abilities(['__test_ability__']) 221 | self.assert_sent_command(GEARMAN_COMMAND_PRE_SLEEP) 222 | 223 | def test_wakeup_work(self): 224 | self.move_to_state_wakeup() 225 | 226 | self.move_to_state_job_assign_uniq(self.generate_job_dict()) 227 | 228 | self.move_to_state_wakeup() 229 | 230 | self.move_to_state_no_job() 231 | 232 | def test_wakeup_sleep_wakup_work(self): 233 | self.move_to_state_wakeup() 234 | 235 | self.move_to_state_no_job() 236 | 237 | self.move_to_state_wakeup() 238 | 239 | self.move_to_state_job_assign_uniq(self.generate_job_dict()) 240 | 241 | self.move_to_state_wakeup() 242 | 243 | self.move_to_state_no_job() 244 | 245 | def test_multiple_wakeup_then_no_work(self): 246 | # Awaken the state machine... then give it no work 247 | self.move_to_state_wakeup() 248 | 249 | for _ in range(5): 250 | self.command_handler.recv_command(GEARMAN_COMMAND_NOOP) 251 | 252 | self.assert_job_lock(is_locked=True) 253 | 254 | # Pretend like the server has no work... do nothing 255 | # Moving to state NO_JOB will make sure there's only 1 item on the queue 256 | self.move_to_state_no_job() 257 | 258 | def test_multiple_work(self): 259 | self.move_to_state_wakeup() 260 | 261 | self.move_to_state_job_assign_uniq(self.generate_job_dict()) 262 | 263 | self.move_to_state_wakeup() 264 | 265 | self.move_to_state_job_assign_uniq(self.generate_job_dict()) 266 | 267 | self.move_to_state_wakeup() 268 | 269 | self.move_to_state_job_assign_uniq(self.generate_job_dict()) 270 | 271 | self.move_to_state_wakeup() 272 | 273 | # After this job completes, we're going to greedily ask for more jobs 274 | self.move_to_state_no_job() 275 | 276 | def test_worker_already_locked(self): 277 | other_connection = MockGearmanConnection() 278 | self.connection_manager.connection_list.append(other_connection) 279 | self.connection_manager.establish_connection(other_connection) 280 | 281 | other_handler = self.connection_manager.connection_to_handler_map[other_connection] 282 | other_handler.recv_command(GEARMAN_COMMAND_NOOP) 283 | 284 | # Make sure other handler has a lock 285 | self.assertEqual(self.connection_manager.command_handler_holding_job_lock, other_handler) 286 | 287 | # Make sure OUR handler has nothing incoming 288 | self.assert_no_pending_commands() 289 | 290 | # Make sure we try to grab a job but fail...so go back to sleep 291 | self.command_handler.recv_command(GEARMAN_COMMAND_NOOP) 292 | self.assert_sent_command(GEARMAN_COMMAND_PRE_SLEEP) 293 | 294 | # Make sure other handler still has lock 295 | self.assertEqual(self.connection_manager.command_handler_holding_job_lock, other_handler) 296 | 297 | # Make the other handler release its lock 298 | other_handler.recv_command(GEARMAN_COMMAND_NO_JOB) 299 | 300 | # Ensure that the lock has been freed 301 | self.assert_job_lock(is_locked=False) 302 | 303 | # Try to do work after we have our lock released 304 | self.move_to_state_wakeup() 305 | 306 | self.move_to_state_job_assign_uniq(self.generate_job_dict()) 307 | 308 | self.move_to_state_wakeup() 309 | 310 | self.move_to_state_no_job() 311 | 312 | def move_to_state_wakeup(self): 313 | self.assert_no_pending_commands() 314 | self.assert_job_lock(is_locked=False) 315 | 316 | self.command_handler.recv_command(GEARMAN_COMMAND_NOOP) 317 | 318 | def move_to_state_no_job(self): 319 | """Move us to the NO_JOB state... 320 | 321 | 1) We should've most recently sent only a single GEARMAN_COMMAND_GRAB_JOB_UNIQ 322 | 2) We should be awaiting job assignment 323 | 3) Once we receive a NO_JOB, we should say we're going back to sleep""" 324 | self.assert_awaiting_job() 325 | 326 | self.command_handler.recv_command(GEARMAN_COMMAND_NO_JOB) 327 | 328 | # We should be asleep... which means no pending jobs and we're not awaiting job assignment 329 | self.assert_sent_command(GEARMAN_COMMAND_PRE_SLEEP) 330 | self.assert_no_pending_commands() 331 | self.assert_job_lock(is_locked=False) 332 | 333 | def move_to_state_job_assign_uniq(self, fake_job): 334 | """Move us to the JOB_ASSIGN_UNIQ state... 335 | 336 | 1) We should've most recently sent only a single GEARMAN_COMMAND_GRAB_JOB_UNIQ 337 | 2) We should be awaiting job assignment 338 | 3) The job we receive should be the one we expected""" 339 | self.assert_awaiting_job() 340 | 341 | ### NOTE: This recv_command does NOT send out a GEARMAN_COMMAND_JOB_COMPLETE or GEARMAN_COMMAND_JOB_FAIL 342 | ### as we're using a MockGearmanConnectionManager with a method that only queues the job 343 | self.command_handler.recv_command(GEARMAN_COMMAND_JOB_ASSIGN_UNIQ, **fake_job) 344 | 345 | current_job = self.connection_manager.worker_job_queues[self.command_handler].popleft() 346 | self.assertEqual(current_job.handle, fake_job['job_handle']) 347 | self.assertEqual(current_job.task, fake_job['task']) 348 | self.assertEqual(current_job.unique, fake_job['unique']) 349 | self.assertEqual(current_job.data, fake_job['data']) 350 | 351 | # At the end of recv_command(GEARMAN_COMMAND_JOB_ASSIGN_UNIQ) 352 | self.assert_job_lock(is_locked=False) 353 | self.assert_sent_command(GEARMAN_COMMAND_PRE_SLEEP) 354 | 355 | def assert_awaiting_job(self): 356 | self.assert_sent_command(GEARMAN_COMMAND_GRAB_JOB_UNIQ) 357 | self.assert_no_pending_commands() 358 | 359 | def assert_job_lock(self, is_locked): 360 | expected_value = (is_locked and self.command_handler) or None 361 | self.assertEqual(self.connection_manager.command_handler_holding_job_lock, expected_value) 362 | 363 | if __name__ == '__main__': 364 | unittest.main() 365 | 366 | --------------------------------------------------------------------------------