├── test ├── disable_result1.gold ├── disable_result2.gold ├── disable_result3.gold ├── helloworld.gold ├── filter1.gold ├── backlog.gold ├── filter11.gold ├── filter2.gold ├── filter21.gold ├── filter3.gold ├── chain.gold ├── tiny.gold ├── bottleneck3.gold ├── link_ou.goldr ├── link_uo.goldr ├── pipeout.gold ├── unordered.goldr ├── workparam.gold ├── bottleneck1.gold ├── bottleneck2.gold ├── do_stop_task1.gold ├── do_stop_task2.gold ├── link1.gold ├── fork.goldr ├── helloworld.py ├── multiwork.py ├── bottleneck1.py ├── disable_result0.py ├── bottleneck2.py ├── do_stop_task1.py ├── clog.py ├── disable_result1.py ├── bottleneck3.py ├── disable_result2.py ├── do_stop_task2.py ├── disable_result3.py ├── stackoverflow1.py ├── tiny.py ├── pipeout.py ├── unordered.py ├── link_ou.py ├── link_uo.py ├── workparam.py ├── link1.py ├── drano.py ├── chain.py ├── count_nullops.py ├── tiny2.py ├── packetdrop.py ├── tiny3.py ├── tiny4.py ├── filter3.py ├── filter1.py ├── filter11.py ├── filter2.py ├── filter21.py ├── fork.py ├── buylow.py ├── backlog.py └── test.py ├── doc ├── requirements.txt ├── source │ ├── chain.dia │ ├── fork.dia │ ├── mpipe.xcf │ ├── tiny.dia │ ├── filter.dia │ ├── pipeout.dia │ ├── stage1.dia │ ├── worker1.dia │ ├── worker2.dia │ ├── helloworld.dia │ ├── multiwork.dia │ ├── pipeline1.dia │ ├── _static │ │ ├── logo.png │ │ ├── bodybg.png │ │ ├── forkme.png │ │ ├── mpipe.png │ │ ├── relbg.png │ │ └── headerbg.png │ ├── taskresult1.dia │ ├── _themes │ │ └── mpipe │ │ │ ├── theme.conf │ │ │ ├── static │ │ │ ├── navigation.png │ │ │ └── style.css │ │ │ ├── searchbox.html │ │ │ └── layout.html │ ├── docs.rst │ ├── api.rst │ ├── download.rst │ ├── about.rst │ ├── fordevelopers.rst │ ├── index.rst │ ├── examples.rst │ ├── _templates │ │ └── search.html │ ├── examples3.rst │ ├── examples1.rst │ ├── examples2.rst │ ├── concepts.rst │ ├── cookbook.rst │ └── conf.py ├── README ├── create-gh-pages.sh ├── create.py └── Makefile ├── .gitignore ├── MANIFEST.in ├── .travis.yml ├── src ├── __init__.py ├── OrderedStage.py ├── FilterStage.py ├── UnorderedStage.py ├── TubeQ.py ├── Pipeline.py ├── TubeP.py ├── FilterWorker.py ├── Stage.py ├── UnorderedWorker.py └── OrderedWorker.py ├── README.rst ├── LICENSE └── setup.py /test/disable_result1.gold: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /test/disable_result2.gold: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /test/disable_result3.gold: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | Sphinx==1.2.3 2 | -------------------------------------------------------------------------------- /test/helloworld.gold: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 2 4 | 3 5 | -------------------------------------------------------------------------------- /test/filter1.gold: -------------------------------------------------------------------------------- 1 | 2 0 2 | 4 2 3 | 6 4 4 | 8 6 5 | -------------------------------------------------------------------------------- /test/backlog.gold: -------------------------------------------------------------------------------- 1 | iiiiii+i+i+++-0i+-1i+-2+-3+-4+-5-6-7-8-9 2 | -------------------------------------------------------------------------------- /test/filter11.gold: -------------------------------------------------------------------------------- 1 | 2 0 2 | 3 1 3 | 4 2 4 | 6 3 5 | 7 4 6 | 8 6 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.out 3 | build/ 4 | dist/ 5 | venv/ 6 | MANIFEST 7 | -------------------------------------------------------------------------------- /test/filter2.gold: -------------------------------------------------------------------------------- 1 | 2 0 2 | 3 0 3 | 4 2 4 | 5 2 5 | 6 4 6 | 7 4 7 | 8 6 8 | 9 6 9 | -------------------------------------------------------------------------------- /test/filter21.gold: -------------------------------------------------------------------------------- 1 | 2 0 2 | 3 1 3 | 4 2 4 | 5 2 5 | 6 3 6 | 7 4 7 | 8 6 8 | 9 6 9 | -------------------------------------------------------------------------------- /test/filter3.gold: -------------------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 6 | 6 7 | 7 8 | 8 9 | 9 10 | -------------------------------------------------------------------------------- /doc/source/chain.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/chain.dia -------------------------------------------------------------------------------- /doc/source/fork.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/fork.dia -------------------------------------------------------------------------------- /doc/source/mpipe.xcf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/mpipe.xcf -------------------------------------------------------------------------------- /doc/source/tiny.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/tiny.dia -------------------------------------------------------------------------------- /doc/source/filter.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/filter.dia -------------------------------------------------------------------------------- /doc/source/pipeout.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/pipeout.dia -------------------------------------------------------------------------------- /doc/source/stage1.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/stage1.dia -------------------------------------------------------------------------------- /doc/source/worker1.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/worker1.dia -------------------------------------------------------------------------------- /doc/source/worker2.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/worker2.dia -------------------------------------------------------------------------------- /test/chain.gold: -------------------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 4 | 8 5 | 10 6 | 12 7 | 14 8 | 16 9 | 18 10 | 20 11 | -------------------------------------------------------------------------------- /test/tiny.gold: -------------------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 4 | 8 5 | 10 6 | 12 7 | 14 8 | 16 9 | 18 10 | 20 11 | -------------------------------------------------------------------------------- /doc/source/helloworld.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/helloworld.dia -------------------------------------------------------------------------------- /doc/source/multiwork.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/multiwork.dia -------------------------------------------------------------------------------- /doc/source/pipeline1.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/pipeline1.dia -------------------------------------------------------------------------------- /test/bottleneck3.gold: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 2 4 | 3 5 | 4 6 | 6 7 | 7 8 | 8 9 | 10 10 | 11 11 | -------------------------------------------------------------------------------- /test/link_ou.goldr: -------------------------------------------------------------------------------- 1 | 2 2 | 4 3 | 12 4 | 6 5 | 18 6 | 8 7 | 14 8 | 20 9 | 10 10 | 16 11 | -------------------------------------------------------------------------------- /test/link_uo.goldr: -------------------------------------------------------------------------------- 1 | 2 2 | 6 3 | 4 4 | 8 5 | 12 6 | 10 7 | 14 8 | 16 9 | 18 10 | 20 11 | -------------------------------------------------------------------------------- /test/pipeout.gold: -------------------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 4 | 8 5 | 10 6 | 12 7 | 14 8 | 16 9 | 18 10 | 20 11 | -------------------------------------------------------------------------------- /test/unordered.goldr: -------------------------------------------------------------------------------- 1 | 4 2 | 2 3 | 12 4 | 8 5 | 18 6 | 6 7 | 10 8 | 14 9 | 16 10 | 20 11 | -------------------------------------------------------------------------------- /test/workparam.gold: -------------------------------------------------------------------------------- 1 | 5 2 | 6 3 | 7 4 | 8 5 | 9 6 | 10 7 | 11 8 | 12 9 | 13 10 | 14 11 | -------------------------------------------------------------------------------- /doc/source/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/_static/logo.png -------------------------------------------------------------------------------- /doc/source/taskresult1.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/taskresult1.dia -------------------------------------------------------------------------------- /doc/source/_static/bodybg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/_static/bodybg.png -------------------------------------------------------------------------------- /doc/source/_static/forkme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/_static/forkme.png -------------------------------------------------------------------------------- /doc/source/_static/mpipe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/_static/mpipe.png -------------------------------------------------------------------------------- /doc/source/_static/relbg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/_static/relbg.png -------------------------------------------------------------------------------- /test/bottleneck1.gold: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 2 4 | 3 5 | 4 6 | 5 7 | 6 8 | 7 9 | 8 10 | 9 11 | 10 12 | 11 13 | -------------------------------------------------------------------------------- /test/bottleneck2.gold: -------------------------------------------------------------------------------- 1 | 0 2 | 2 3 | 4 4 | 6 5 | 8 6 | 10 7 | 1 8 | 3 9 | 5 10 | 7 11 | 9 12 | 11 13 | -------------------------------------------------------------------------------- /test/do_stop_task1.gold: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 2 4 | 3 5 | 4 6 | 5 7 | 6 8 | 7 9 | 8 10 | 9 11 | None 12 | -------------------------------------------------------------------------------- /test/do_stop_task2.gold: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 2 4 | 3 5 | 4 6 | 5 7 | 6 8 | 7 9 | 8 10 | 9 11 | None 12 | -------------------------------------------------------------------------------- /doc/source/_static/headerbg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/_static/headerbg.png -------------------------------------------------------------------------------- /test/link1.gold: -------------------------------------------------------------------------------- 1 | 0 2 | 10000 3 | 20000 4 | 30000 5 | 40000 6 | 50000 7 | 60000 8 | 70000 9 | 80000 10 | 90000 11 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | recursive-include src __init__.py mpipe.py 3 | recursive-include test *.py *.go *.gor 4 | -------------------------------------------------------------------------------- /doc/source/_themes/mpipe/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = style.css 4 | pygments_style = friendly 5 | -------------------------------------------------------------------------------- /doc/source/_themes/mpipe/static/navigation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/mpipe/master/doc/source/_themes/mpipe/static/navigation.png -------------------------------------------------------------------------------- /test/fork.goldr: -------------------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 6 | 6 7 | 7 8 | 8 9 | 9 10 | 10 11 | 2 12 | 4 13 | 6 14 | 8 15 | 10 16 | 12 17 | 14 18 | 16 19 | 18 20 | 20 21 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.6" 4 | - "2.7" 5 | - "3.2" 6 | - "3.3" 7 | # command to run tests 8 | script: 9 | - python setup.py install 10 | - python setup.py test 11 | -------------------------------------------------------------------------------- /test/helloworld.py: -------------------------------------------------------------------------------- 1 | import mpipe 2 | 3 | def echo(value): 4 | print(value) 5 | 6 | stage = mpipe.OrderedStage(echo) 7 | pipe = mpipe.Pipeline(stage) 8 | 9 | for val in (0, 1, 2, 3): 10 | pipe.put(val) 11 | 12 | pipe.put(None) # Stop the pipeline. 13 | -------------------------------------------------------------------------------- /test/multiwork.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import mpipe 3 | 4 | def forloop(amount): 5 | for ii in xrange(amount): pass 6 | 7 | stage = mpipe.UnorderedStage(forloop, 2) 8 | pipe = mpipe.Pipeline(stage) 9 | 10 | for foobar in range(5): 11 | pipe.put(int(sys.argv[1])) 12 | 13 | pipe.put(None) 14 | -------------------------------------------------------------------------------- /doc/README: -------------------------------------------------------------------------------- 1 | Contents of this directory: 2 | --------------------------- 3 | create.py - build the documentation 4 | create-gh-pages.sh - build the docs and updates gh-pages 5 | Makefile - Sphinx makefile 6 | README - this README file 7 | source/ - Sphinx documentation source code 8 | -------------------------------------------------------------------------------- /test/bottleneck1.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, Pipeline 3 | 4 | def echo(value): 5 | print(value) 6 | time.sleep(0.013) 7 | return value 8 | 9 | pipe = Pipeline(OrderedStage(echo)) 10 | for number in range(12): 11 | pipe.put(number) 12 | time.sleep(0.010) 13 | 14 | pipe.put(None) 15 | -------------------------------------------------------------------------------- /test/disable_result0.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, Pipeline 2 | 3 | def yes(value): 4 | return value 5 | 6 | pipe = Pipeline(OrderedStage(yes, disable_result=True)) 7 | 8 | for number in range(10): 9 | pipe.put(number) 10 | 11 | pipe.put(None) 12 | 13 | for result in pipe.results(): 14 | print(result) 15 | -------------------------------------------------------------------------------- /test/bottleneck2.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, Pipeline 3 | 4 | def echo(value): 5 | print(value) 6 | time.sleep(0.013) 7 | return value 8 | 9 | pipe = Pipeline(OrderedStage(echo, 2)) 10 | for number in range(12): 11 | pipe.put(number) 12 | time.sleep(0.010) 13 | 14 | pipe.put(None) 15 | -------------------------------------------------------------------------------- /test/do_stop_task1.py: -------------------------------------------------------------------------------- 1 | from mpipe import Stage, OrderedWorker, Pipeline 2 | 3 | class Echo(OrderedWorker): 4 | def doTask(self, value): 5 | print(value) 6 | 7 | stage = Stage(Echo, do_stop_task=True) 8 | pipe = Pipeline(stage) 9 | 10 | for number in range(10): 11 | pipe.put(number) 12 | 13 | pipe.put(None) 14 | -------------------------------------------------------------------------------- /test/clog.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from mpipe import UnorderedStage, Pipeline 3 | 4 | def increment(value): 5 | return value + 1 6 | 7 | stage = UnorderedStage(increment) 8 | pipe = Pipeline(stage) 9 | 10 | for task in xrange(sys.maxint): 11 | pipe.put(task) 12 | 13 | pipe.put(None) 14 | 15 | for result in pipe.results(): 16 | print(result) 17 | -------------------------------------------------------------------------------- /test/disable_result1.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, Pipeline 2 | 3 | def yes(value): 4 | return value 5 | 6 | stage = OrderedStage(yes, 4, disable_result=True) 7 | pipe = Pipeline(stage) 8 | 9 | for number in range(10): 10 | pipe.put(number) 11 | 12 | pipe.put(None) 13 | 14 | count = 0 15 | for result in pipe.results(): 16 | count += 1 17 | 18 | print(count) 19 | -------------------------------------------------------------------------------- /test/bottleneck3.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, FilterStage, Pipeline 3 | 4 | def echo(value): 5 | print(value) 6 | time.sleep(0.013) 7 | return value 8 | 9 | stage = FilterStage((OrderedStage(echo),), max_tasks=2) 10 | pipe = Pipeline(stage) 11 | for number in range(12): 12 | pipe.put(number) 13 | time.sleep(0.010) 14 | 15 | pipe.put(None) 16 | -------------------------------------------------------------------------------- /test/disable_result2.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedWorker, Stage, Pipeline 2 | 3 | class Yes(OrderedWorker): 4 | def doTask(self, value): 5 | return value 6 | 7 | stage = Stage(Yes, 4, disable_result=True) 8 | pipe = Pipeline(stage) 9 | 10 | for number in range(10): 11 | pipe.put(number) 12 | 13 | pipe.put(None) 14 | 15 | count = 0 16 | for result in pipe.results(): 17 | count += 1 18 | 19 | print(count) 20 | -------------------------------------------------------------------------------- /test/do_stop_task2.py: -------------------------------------------------------------------------------- 1 | from mpipe import Stage, OrderedWorker, FilterStage, Pipeline 2 | 3 | class Echo(OrderedWorker): 4 | def doTask(self, value): 5 | print(value) 6 | 7 | s1 = Stage(Echo, do_stop_task=True) 8 | s2 = FilterStage( 9 | (s1,), 10 | max_tasks=999, 11 | do_stop_task=True, 12 | ) 13 | pipe = Pipeline(s2) 14 | for number in range(10): 15 | pipe.put(number) 16 | 17 | pipe.put(None) 18 | -------------------------------------------------------------------------------- /test/disable_result3.py: -------------------------------------------------------------------------------- 1 | from mpipe import UnorderedWorker, Stage, Pipeline 2 | 3 | class Yes(UnorderedWorker): 4 | def doTask(self, value): 5 | return value 6 | 7 | stage = Stage(Yes, 4, disable_result=True) 8 | pipe = Pipeline(stage) 9 | 10 | for number in range(10): 11 | pipe.put(number) 12 | 13 | pipe.put(None) 14 | 15 | count = 0 16 | for result in pipe.results(): 17 | count += 1 18 | 19 | print(count) 20 | -------------------------------------------------------------------------------- /test/stackoverflow1.py: -------------------------------------------------------------------------------- 1 | """Solution for http://stackoverflow.com/questions/8277715""" 2 | 3 | from mpipe import OrderedStage, Pipeline 4 | 5 | def f2(value): 6 | return value * 2 7 | 8 | def f3(value): 9 | print(value) 10 | 11 | s1 = OrderedStage(f2, size=2) 12 | s2 = OrderedStage(f3) 13 | s1.link(s2) 14 | p = Pipeline(s1) 15 | 16 | def f1(): 17 | for task in [1,2,3,4,5,None]: 18 | p.put(task) 19 | 20 | f1() 21 | -------------------------------------------------------------------------------- /test/tiny.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, Pipeline 2 | 3 | def increment(value): 4 | return value + 1 5 | 6 | def double(value): 7 | return value * 2 8 | 9 | stage1 = OrderedStage(increment, 3) 10 | stage2 = OrderedStage(double, 3) 11 | pipe = Pipeline(stage1.link(stage2)) 12 | 13 | for number in range(10): 14 | pipe.put(number) 15 | 16 | pipe.put(None) 17 | 18 | for result in pipe.results(): 19 | print(result) 20 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | """MPipe is a multiprocessing pipeline toolkit for Python.""" 2 | 3 | __version__ = '1.0.8' 4 | 5 | from .OrderedWorker import OrderedWorker 6 | from .UnorderedWorker import UnorderedWorker 7 | from .Stage import Stage 8 | from .OrderedStage import OrderedStage 9 | from .UnorderedStage import UnorderedStage 10 | from .Pipeline import Pipeline 11 | from .FilterWorker import FilterWorker 12 | from .FilterStage import FilterStage 13 | -------------------------------------------------------------------------------- /test/pipeout.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, Pipeline 2 | 3 | def increment(value): 4 | return value + 1 5 | 6 | def double(value): 7 | return value * 2 8 | 9 | stage1 = OrderedStage(increment) 10 | stage2 = OrderedStage(double) 11 | stage1.link(stage2) 12 | pipe = Pipeline(stage1) 13 | 14 | for number in range(10): 15 | pipe.put(number) 16 | 17 | pipe.put(None) 18 | 19 | for result in pipe.results(): 20 | print(result) 21 | -------------------------------------------------------------------------------- /test/unordered.py: -------------------------------------------------------------------------------- 1 | from mpipe import UnorderedStage, Pipeline 2 | 3 | def increment(value): 4 | return value + 1 5 | 6 | def double(value): 7 | return value * 2 8 | 9 | stage1 = UnorderedStage(increment, 2) 10 | stage2 = UnorderedStage(double, 2) 11 | stage1.link(stage2) 12 | pipe = Pipeline(stage1) 13 | 14 | for number in range(10): 15 | pipe.put(number) 16 | 17 | pipe.put(None) 18 | 19 | for result in pipe.results(): 20 | print(result) 21 | -------------------------------------------------------------------------------- /test/link_ou.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, UnorderedStage, Pipeline 2 | 3 | def increment(value): 4 | return value + 1 5 | 6 | def double(value): 7 | return value * 2 8 | 9 | stage1 = OrderedStage(increment, 3) 10 | stage2 = UnorderedStage(double, 3) 11 | stage1.link(stage2) 12 | pipe = Pipeline(stage1) 13 | 14 | for number in range(10): 15 | pipe.put(number) 16 | pipe.put(None) 17 | 18 | for result in pipe.results(): 19 | print(result) 20 | -------------------------------------------------------------------------------- /test/link_uo.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, UnorderedStage, Pipeline 2 | 3 | def increment(value): 4 | return value + 1 5 | 6 | def double(value): 7 | return value * 2 8 | 9 | stage1 = UnorderedStage(increment, 3) 10 | stage2 = OrderedStage(double, 3) 11 | stage1.link(stage2) 12 | pipe = Pipeline(stage1) 13 | 14 | for number in range(10): 15 | pipe.put(number) 16 | pipe.put(None) 17 | 18 | for result in pipe.results(): 19 | print(result) 20 | -------------------------------------------------------------------------------- /test/workparam.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedWorker, Stage, Pipeline 2 | 3 | class Adder(OrderedWorker): 4 | def __init__(self, number): 5 | self.number = number 6 | def doTask(self, value): 7 | return value + self.number 8 | 9 | stage1 = Stage(Adder, 1, number=5) 10 | pipe = Pipeline(stage1) 11 | 12 | for number in range(10): 13 | pipe.put(number) 14 | 15 | pipe.put(None) 16 | 17 | for result in pipe.results(): 18 | print(result) 19 | -------------------------------------------------------------------------------- /test/link1.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage as OStage, Pipeline 2 | 3 | def magnify(value): 4 | return value*10 5 | 6 | p1 = Pipeline( 7 | OStage(magnify).link( 8 | OStage(magnify).link( 9 | OStage(magnify).link( 10 | OStage(magnify) 11 | ) 12 | ) 13 | ) 14 | ) 15 | for val in list(range(10)) + [None]: 16 | p1.put(val) 17 | 18 | for result in p1.results(): 19 | print(result) 20 | -------------------------------------------------------------------------------- /test/drano.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from mpipe import UnorderedStage, Pipeline 3 | 4 | def increment(value): 5 | return value + 1 6 | 7 | stage = UnorderedStage(increment) 8 | pipe = Pipeline(stage) 9 | 10 | def pull(value): 11 | for result in pipe.results(): 12 | print(result) 13 | 14 | pipe2 = Pipeline(UnorderedStage(pull)) 15 | pipe2.put(True) 16 | 17 | for task in xrange(sys.maxint): 18 | pipe.put(task) 19 | 20 | pipe.put(None) 21 | pipe2.put(None) 22 | -------------------------------------------------------------------------------- /test/chain.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, Pipeline 2 | 3 | def increment(value): 4 | return value + 1 5 | 6 | def double(value): 7 | return value * 2 8 | 9 | def echo(value): 10 | print(value) 11 | 12 | stage1 = OrderedStage(increment) 13 | stage2 = OrderedStage(double) 14 | stage3 = OrderedStage(echo) 15 | stage1.link(stage2) 16 | stage2.link(stage3) 17 | pipe = Pipeline(stage1) 18 | 19 | for number in range(10): 20 | pipe.put(number) 21 | 22 | pipe.put(None) 23 | -------------------------------------------------------------------------------- /test/count_nullops.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | def getNumNullops(duration, max_sample=1.0): 4 | """Return number of do-nothing loop iterations.""" 5 | for amount in [2**x for x in range(100)]: # 1,2,4,8,... 6 | begin = datetime.now() 7 | for ii in xrange(amount): pass 8 | elapsed = (datetime.now() - begin).total_seconds() 9 | if elapsed > max_sample: 10 | break 11 | return int(amount/elapsed*duration) 12 | 13 | if __name__ == '__main__': 14 | print(getNumNullops(1.0)) 15 | -------------------------------------------------------------------------------- /test/tiny2.py: -------------------------------------------------------------------------------- 1 | import mpipe 2 | 3 | class Incrementor(mpipe.UnorderedWorker): 4 | def doTask(self, value): 5 | return value + 1 6 | 7 | class Doubler(mpipe.UnorderedWorker): 8 | def doTask(self, value): 9 | return value * 2 10 | 11 | stage1 = mpipe.Stage(Incrementor, 3) 12 | stage2 = mpipe.Stage(Doubler, 3) 13 | stage1.link(stage2) 14 | pipe = mpipe.Pipeline(stage1) 15 | 16 | for number in range(10): 17 | pipe.put(number) 18 | pipe.put(None) 19 | 20 | for result in pipe.results(): 21 | print(result) 22 | 23 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: http://vmlaker.github.io/mpipe/_static/logo.png 2 | :alt: MPipe Logo 3 | :align: right 4 | :target: http://vmlaker.github.io/mpipe 5 | 6 | .. image:: https://travis-ci.org/vmlaker/mpipe.png?branch=master 7 | :alt: Build Result Image 8 | :target: https://travis-ci.org/vmlaker/mpipe 9 | 10 | A tiny Python module that lets you 11 | easily write multi-stage, multiprocess pipeline algorithms. 12 | For the full description, including user guide and examples, 13 | visit `the documentation page `_. 14 | -------------------------------------------------------------------------------- /doc/source/docs.rst: -------------------------------------------------------------------------------- 1 | .. _docs: 2 | 3 | Documentation contents 4 | ********************** 5 | 6 | .. Getting the code and installing it: 7 | 8 | .. toctree:: 9 | download 10 | 11 | .. Some terminology and basic concepts about pipelines: 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | 16 | concepts 17 | 18 | .. A cookbook for building pipelines: 19 | 20 | .. toctree:: 21 | cookbook 22 | 23 | .. Tutorial with working examples: 24 | 25 | .. toctree:: 26 | examples 27 | 28 | .. For developers of MPipe: 29 | 30 | .. toctree:: 31 | fordevelopers 32 | -------------------------------------------------------------------------------- /test/packetdrop.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedStage, FilterStage, Pipeline 2 | import time 3 | 4 | def echo(value): 5 | print(value) 6 | time.sleep(0.0125) 7 | return value 8 | 9 | pipe1 = Pipeline( 10 | FilterStage( 11 | (OrderedStage(echo),), 12 | max_tasks=2 13 | ) 14 | ) 15 | 16 | def pull(task): 17 | for result in pipe1.results(): pass 18 | pipe2 = Pipeline(OrderedStage(pull)) 19 | pipe2.put(True) 20 | pipe2.put(None) 21 | 22 | for number in range(10): 23 | pipe1.put(number) 24 | time.sleep(0.0100) 25 | 26 | pipe1.put(None) 27 | -------------------------------------------------------------------------------- /test/tiny3.py: -------------------------------------------------------------------------------- 1 | import mpipe 2 | 3 | class Incrementor(mpipe.OrderedWorker): 4 | def doTask(self, value): 5 | result = value + 1 6 | self.putResult(result) 7 | 8 | class Doubler(mpipe.OrderedWorker): 9 | def doTask(self, value): 10 | result = value * 2 11 | self.putResult(result) 12 | 13 | stage1 = mpipe.Stage(Incrementor, 13) 14 | stage2 = mpipe.Stage(Doubler, 13) 15 | stage1.link(stage2) 16 | pipe = mpipe.Pipeline(stage1) 17 | 18 | for number in range(10): 19 | pipe.put(number) 20 | pipe.put(None) 21 | 22 | for result in pipe.results(): 23 | print(result) 24 | -------------------------------------------------------------------------------- /test/tiny4.py: -------------------------------------------------------------------------------- 1 | import mpipe 2 | 3 | class Incrementor(mpipe.UnorderedWorker): 4 | def doTask(self, value): 5 | result = value + 1 6 | self.putResult(result) 7 | 8 | class Doubler(mpipe.UnorderedWorker): 9 | def doTask(self, value): 10 | result = value * 2 11 | self.putResult(result) 12 | 13 | stage1 = mpipe.Stage(Incrementor, 3) 14 | stage2 = mpipe.Stage(Doubler, 3) 15 | stage1.link(stage2) 16 | pipe = mpipe.Pipeline(stage1) 17 | 18 | for number in range(10): 19 | pipe.put(number) 20 | pipe.put(None) 21 | 22 | for result in pipe.results(): 23 | print(result) 24 | -------------------------------------------------------------------------------- /test/filter3.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, FilterStage, Pipeline 3 | 4 | def passthru(value): 5 | time.sleep(0.013) 6 | return value 7 | 8 | s1 = FilterStage( 9 | (OrderedStage(passthru),), 10 | max_tasks=1, 11 | drop_results=True, 12 | ) 13 | p1 = Pipeline(s1) 14 | 15 | def pull(task): 16 | for result in p1.results(): 17 | if result: 18 | print(result) 19 | 20 | p2 = Pipeline(OrderedStage(pull)) 21 | p2.put(True) 22 | 23 | 24 | for number in range(10): 25 | p1.put(number) 26 | time.sleep(0.010) 27 | 28 | p1.put(None) 29 | p2.put(None) 30 | 31 | 32 | -------------------------------------------------------------------------------- /test/filter1.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, FilterStage, Pipeline 3 | 4 | def passthru(value): 5 | time.sleep(0.013) 6 | return value 7 | 8 | s1 = FilterStage( 9 | (OrderedStage(passthru),), 10 | max_tasks=1, 11 | ) 12 | p1 = Pipeline(s1) 13 | 14 | def pull(task): 15 | for task, result in p1.results(): 16 | if result: 17 | print('{0} {1}'.format(task, result[0])) 18 | 19 | p2 = Pipeline(OrderedStage(pull)) 20 | p2.put(True) 21 | 22 | 23 | for number in range(10): 24 | p1.put(number) 25 | time.sleep(0.010) 26 | 27 | p1.put(None) 28 | p2.put(None) 29 | 30 | 31 | -------------------------------------------------------------------------------- /test/filter11.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, FilterStage, Pipeline 3 | 4 | def passthru(value): 5 | time.sleep(0.013) 6 | return value 7 | 8 | s1 = FilterStage( 9 | (OrderedStage(passthru),), 10 | max_tasks=2, 11 | ) 12 | p1 = Pipeline(s1) 13 | 14 | def pull(task): 15 | for task, result in p1.results(): 16 | if result: 17 | print('{0} {1}'.format(task, result[0])) 18 | 19 | p2 = Pipeline(OrderedStage(pull)) 20 | p2.put(True) 21 | 22 | 23 | for number in range(10): 24 | p1.put(number) 25 | time.sleep(0.010) 26 | 27 | p1.put(None) 28 | p2.put(None) 29 | 30 | 31 | -------------------------------------------------------------------------------- /src/OrderedStage.py: -------------------------------------------------------------------------------- 1 | """Implements OrderedStage class.""" 2 | 3 | from .Stage import Stage 4 | from .OrderedWorker import OrderedWorker 5 | 6 | class OrderedStage(Stage): 7 | """A specialized :class:`~mpipe.Stage`, 8 | internally creating :class:`~mpipe.OrderedWorker` objects.""" 9 | def __init__(self, target, size=1, disable_result=False): 10 | """Constructor takes a function implementing 11 | :meth:`OrderedWorker.doTask`.""" 12 | class wclass(OrderedWorker): 13 | def doTask(self, task): 14 | return target(task) 15 | super(OrderedStage, self).__init__(wclass, size, disable_result) 16 | -------------------------------------------------------------------------------- /test/filter2.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, FilterStage, Pipeline 3 | 4 | def passthru(value): 5 | time.sleep(0.013) 6 | return value 7 | 8 | s1 = FilterStage( 9 | (OrderedStage(passthru),), 10 | max_tasks=1, 11 | cache_results=True, 12 | ) 13 | p1 = Pipeline(s1) 14 | 15 | def pull(task): 16 | for task, result in p1.results(): 17 | if result: 18 | print('{0} {1}'.format(task, result[0])) 19 | 20 | p2 = Pipeline(OrderedStage(pull)) 21 | p2.put(True) 22 | 23 | 24 | for number in range(10): 25 | p1.put(number) 26 | time.sleep(0.010) 27 | 28 | p1.put(None) 29 | p2.put(None) 30 | 31 | 32 | -------------------------------------------------------------------------------- /test/filter21.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mpipe import OrderedStage, FilterStage, Pipeline 3 | 4 | def passthru(value): 5 | time.sleep(0.013) 6 | return value 7 | 8 | s1 = FilterStage( 9 | (OrderedStage(passthru),), 10 | max_tasks=2, 11 | cache_results=True, 12 | ) 13 | p1 = Pipeline(s1) 14 | 15 | def pull(task): 16 | for task, result in p1.results(): 17 | if result: 18 | print('{0} {1}'.format(task, result[0])) 19 | 20 | p2 = Pipeline(OrderedStage(pull)) 21 | p2.put(True) 22 | 23 | 24 | for number in range(10): 25 | p1.put(number) 26 | time.sleep(0.010) 27 | 28 | p1.put(None) 29 | p2.put(None) 30 | 31 | 32 | -------------------------------------------------------------------------------- /test/fork.py: -------------------------------------------------------------------------------- 1 | from mpipe import OrderedWorker, Stage, Pipeline 2 | 3 | class Incrementor(OrderedWorker): 4 | def doTask(self, value): 5 | return value + 1 6 | 7 | class Doubler(OrderedWorker): 8 | def doTask(self, value): 9 | return value * 2 10 | 11 | class Printer(OrderedWorker): 12 | def doTask(self, value): 13 | print(value) 14 | 15 | stage1 = Stage(Incrementor) 16 | stage2 = Stage(Doubler) 17 | stage3 = Stage(Printer) 18 | stage4 = Stage(Printer) 19 | 20 | stage1.link(stage2) 21 | stage1.link(stage3) 22 | stage2.link(stage4) 23 | 24 | pipe = Pipeline(stage1) 25 | 26 | for number in range(10): 27 | pipe.put(number) 28 | 29 | pipe.put(None) 30 | -------------------------------------------------------------------------------- /doc/source/api.rst: -------------------------------------------------------------------------------- 1 | .. _api: 2 | 3 | ********** 4 | |NAME| API 5 | ********** 6 | 7 | .. module:: mpipe 8 | 9 | .. autoclass:: mpipe.OrderedWorker 10 | :members: doTask, doInit, putResult 11 | 12 | .. autoclass:: mpipe.UnorderedWorker 13 | :members: doTask, doInit, putResult 14 | 15 | ---- 16 | 17 | .. autoclass:: mpipe.Stage 18 | :members: link, put, get 19 | 20 | .. autoclass:: mpipe.OrderedStage 21 | :members: 22 | 23 | .. autoclass:: mpipe.UnorderedStage 24 | :members: 25 | 26 | ---- 27 | 28 | .. autoclass:: mpipe.Pipeline 29 | :members: put, get, results 30 | 31 | ---- 32 | 33 | .. autoclass:: mpipe.FilterWorker 34 | 35 | .. autoclass:: mpipe.FilterStage 36 | 37 | .. End of file. 38 | -------------------------------------------------------------------------------- /doc/source/download.rst: -------------------------------------------------------------------------------- 1 | .. _download: 2 | 3 | Installation instructions 4 | ------------------------- 5 | 6 | Easiest way to install |NAME| on your system is using *pip*: 7 | :: 8 | 9 | pip install mpipe 10 | 11 | Or you may provide ``--user`` flag if you don't have root privs: 12 | :: 13 | 14 | pip install --user mpipe 15 | 16 | Another way is using ``easy_install``: 17 | :: 18 | 19 | easy_install mpipe 20 | 21 | Or, you can clone the GitHub repo and manually run the *Distutils* installer: 22 | :: 23 | 24 | git clone http://github.com/vmlaker/mpipe 25 | cd mpipe 26 | python setup.py install --user 27 | 28 | Another way to get the source code is to download the tarball 29 | from the *Python Package Index* at http://pypi.python.org/pypi/mpipe. 30 | -------------------------------------------------------------------------------- /doc/source/about.rst: -------------------------------------------------------------------------------- 1 | .. _about: 2 | 3 | About |NAME| 4 | ============ 5 | 6 | |NAME| (pronounced "em pipe") is written by 7 | `Velimir Mlaker `_. 8 | It's a small, lightweight Python library you can use 9 | to create programs organized as multitasking pipelined workflows. 10 | |NAME| is written in 100% pure `Python `_. 11 | 12 | The web pages you are reading were created with the fabulous 13 | `Sphinx `_ documentation generator. 14 | Pictures in the documentation were drawn with 15 | `Dia `_, the powerful and easy-to-use 16 | diagram creation program. 17 | 18 | Credits 19 | ------- 20 | 21 | Frens Jan Rumph for adding ability to limit backlog in an unordered stage. 22 | -------------------------------------------------------------------------------- /src/FilterStage.py: -------------------------------------------------------------------------------- 1 | """Implements FilterStage class.""" 2 | 3 | from .Stage import Stage 4 | from .FilterWorker import FilterWorker 5 | 6 | class FilterStage(Stage): 7 | """Single worker stage running 8 | :class:`~mpipe.FilterWorker`.""" 9 | def __init__( 10 | self, 11 | stages, 12 | max_tasks=1, 13 | drop_results=False, 14 | cache_results=False, 15 | do_stop_task=True, 16 | ): 17 | super(FilterStage, self).__init__( 18 | FilterWorker, 19 | size=1, 20 | do_stop_task=do_stop_task, 21 | stages=stages, 22 | max_tasks=max_tasks, 23 | drop_results=drop_results, 24 | cache_results=cache_results, 25 | ) 26 | -------------------------------------------------------------------------------- /src/UnorderedStage.py: -------------------------------------------------------------------------------- 1 | """Implements UnorderedStage class.""" 2 | 3 | from .Stage import Stage 4 | from .UnorderedWorker import UnorderedWorker 5 | from .TubeQ import TubeQ 6 | 7 | class UnorderedStage(Stage): 8 | """A specialized :class:`~mpipe.Stage`, 9 | internally creating :class:`~mpipe.UnorderedWorker` objects.""" 10 | def __init__(self, target, size=1, disable_result=False, max_backlog=None): 11 | """Constructor takes a function implementing 12 | :meth:`UnorderedWorker.doTask`.""" 13 | class wclass(UnorderedWorker): 14 | def doTask(self, task): 15 | return target(task) 16 | super(UnorderedStage, self).__init__(wclass, size, disable_result, 17 | input_tube=TubeQ(maxsize=max_backlog) if max_backlog else None) 18 | -------------------------------------------------------------------------------- /src/TubeQ.py: -------------------------------------------------------------------------------- 1 | """Implements TubeQ class.""" 2 | 3 | import multiprocessing 4 | 5 | class TubeQ: 6 | """A unidirectional communication channel 7 | using :class:`multiprocessing.Queue` for underlying implementation.""" 8 | 9 | def __init__(self, maxsize=0): 10 | self._queue = multiprocessing.Queue(maxsize) 11 | 12 | def put(self, data): 13 | """Put an item on the tube.""" 14 | self._queue.put(data) 15 | 16 | def get(self, timeout=None): 17 | """Return the next available item from the tube. 18 | 19 | Blocks if tube is empty, until a producer for the tube puts an item on it.""" 20 | if timeout: 21 | try: 22 | result = self._queue.get(True, timeout) 23 | except multiprocessing.Queue.Empty: 24 | return(False, None) 25 | return(True, result) 26 | return self._queue.get() 27 | -------------------------------------------------------------------------------- /src/Pipeline.py: -------------------------------------------------------------------------------- 1 | """Implements Pipeline class.""" 2 | 3 | class Pipeline(object): 4 | """A pipeline of stages.""" 5 | def __init__(self, input_stage): 6 | """Constructor takes the root upstream stage.""" 7 | self._input_stage = input_stage 8 | self._output_stages = input_stage.getLeaves() 9 | self._input_stage.build() 10 | 11 | def put(self, task): 12 | """Put *task* on the pipeline.""" 13 | self._input_stage.put(task) 14 | 15 | def get(self, timeout=None): 16 | """Return result from the pipeline.""" 17 | result = None 18 | for stage in self._output_stages: 19 | result = stage.get(timeout) 20 | return result 21 | 22 | def results(self): 23 | """Return a generator to iterate over results from the pipeline.""" 24 | while True: 25 | result = self.get() 26 | if result is None: break 27 | yield result 28 | -------------------------------------------------------------------------------- /test/buylow.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | import numpy as np 3 | from mpipe import OrderedWorker, Stage, OrderedStage, Pipeline 4 | 5 | last10 = deque() 6 | junk = 'http://ws.cdyne.com/delayedstockquote/delayedstockquote.asmx/GetQuote?StockSymbol=fac&LicenseKey=0' 7 | j = 'http://www.google.com/ig/api?stock=AAPL' 8 | 9 | class Accumulator(OrderedWorker): 10 | def doTask(self, price): 11 | if last10: 12 | if price < min(last10): 13 | self.putResult(price) 14 | last10.append(price) 15 | if len(last10) > 10: 16 | last10.popleft() 17 | 18 | def echo(value): 19 | print('value = {0}'.format(value)) 20 | 21 | stage1 = Stage(Accumulator) 22 | stage2 = OrderedStage(echo, 50) 23 | stage1.link(stage2) 24 | pipe = Pipeline(stage1) 25 | 26 | SIZE = 1000 27 | prices = np.linspace(0, np.pi*10, SIZE) 28 | prices = np.sin(prices) + 1 29 | for price in prices: 30 | pipe.put(price) 31 | 32 | pipe.put(None) 33 | -------------------------------------------------------------------------------- /test/backlog.py: -------------------------------------------------------------------------------- 1 | from sys import stdout 2 | from threading import Thread 3 | from time import sleep 4 | 5 | from mpipe.Pipeline import Pipeline 6 | from mpipe.UnorderedStage import UnorderedStage 7 | 8 | 9 | def write(value): 10 | stdout.write(str(value)) 11 | stdout.flush() 12 | 13 | def inc(x): 14 | sleep(0.1) 15 | write('+') 16 | return x+1 17 | 18 | def dec(x): 19 | sleep(0.2) 20 | write('-') 21 | return x-1 22 | 23 | 24 | stage1 = UnorderedStage(inc, 3, max_backlog=3) 25 | stage2 = UnorderedStage(dec, 1, max_backlog=1) 26 | stage1.link(stage2) 27 | pipeline = Pipeline(stage1) 28 | 29 | 30 | def print_results(): 31 | for result in pipeline.results(): 32 | write(result) 33 | 34 | print_thread = Thread(target=print_results) 35 | print_thread.start() 36 | 37 | 38 | for i in range(10): 39 | sleep(0.01) 40 | pipeline.put(i) 41 | write('i') 42 | 43 | pipeline.put(None) 44 | print_thread.join() 45 | write('\n') 46 | 47 | -------------------------------------------------------------------------------- /src/TubeP.py: -------------------------------------------------------------------------------- 1 | """Implements TubeP class.""" 2 | 3 | import multiprocessing 4 | 5 | class TubeP: 6 | """A unidirectional communication channel 7 | using :class:`multiprocessing.Connection` for underlying implementation.""" 8 | 9 | def __init__(self): 10 | (self._conn1, 11 | self._conn2) = multiprocessing.Pipe(duplex=False) 12 | 13 | def put(self, data): 14 | """Put an item on the tube.""" 15 | self._conn2.send(data) 16 | 17 | def get(self, timeout=None): 18 | """Return the next available item from the tube. 19 | 20 | Blocks if tube is empty, until a producer for the tube puts an item on it.""" 21 | if timeout: 22 | # Todo: Consider locking the poll/recv block. 23 | # Otherwise, this method is not thread safe. 24 | if self._conn1.poll(timeout): 25 | return (True, self._conn1.recv()) 26 | else: 27 | return (False, None) 28 | return self._conn1.recv() 29 | -------------------------------------------------------------------------------- /doc/create-gh-pages.sh: -------------------------------------------------------------------------------- 1 | # Shell script to build gh-pages. 2 | 3 | # Start at the root directory. 4 | cd .. 5 | 6 | # Switch to gh-pages branch and start afresh. 7 | git checkout gh-pages 8 | rm -rf * 9 | 10 | # Switch to master branch (Sphinx build needs the checked out dirs and files). 11 | git checkout master doc test src setup.py README.rst 12 | git reset HEAD 13 | 14 | # Create a Python Virtualenv. 15 | virtualenv doc/venv 16 | doc/venv/bin/python setup.py install 17 | doc/venv/bin/pip install -r doc/requirements.txt 18 | 19 | # Build the docs and move html/ files root directory. 20 | cd doc 21 | 22 | venv/bin/python ./create.py build 23 | mv -fv build/html/* .. 24 | cd .. 25 | 26 | # Remove the directories (from master branch) needed for building docs. 27 | rm -rf doc test src build README.rst setup.py 28 | 29 | # Add everything to gh-pages. 30 | git add --all . 31 | 32 | # Commit with comment referencing latest master branch commit. 33 | git commit -m "Updated gh-pages for `git log master -1 | head -1`" 34 | 35 | # Push. 36 | #git push origin gh-pages 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Velimir Mlaker 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /doc/source/_themes/mpipe/searchbox.html: -------------------------------------------------------------------------------- 1 | {# 2 | basic/searchbox.html 3 | ~~~~~~~~~~~~~~~~~~~~ 4 | Sphinx sidebar template: quick search box. 5 | :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. 6 | :license: BSD, see LICENSE for details. 7 | #} 8 | {%- if pagename != "search" %} 9 | 32 | 33 | {%- endif %} 34 | -------------------------------------------------------------------------------- /doc/source/fordevelopers.rst: -------------------------------------------------------------------------------- 1 | .. _fordevelopers: 2 | 3 | For developers 4 | ============== 5 | 6 | If you want to contribute to |NAME| development, 7 | start by forking the `repo `_. 8 | After making changes to the code, run the build/test cycle 9 | as shown in the developer guide below. 10 | 11 | Build/install/test 12 | ------------------ 13 | 14 | It's recommended you use Python Virtualenv for your build/test cycle. 15 | A good place to install Virtualenv is in the root project directory, 16 | simply by running: 17 | :: 18 | 19 | virtualenv venv 20 | 21 | Use *Distutils* to build the code: 22 | :: 23 | 24 | venv/bin/python setup.py build 25 | 26 | Install into your Virtualenv Python: 27 | :: 28 | 29 | venv/bin/python setup.py install 30 | 31 | Run all tests: 32 | :: 33 | 34 | venv/bin/python setup.py test 35 | 36 | Update Pypi 37 | ----------- 38 | 39 | To update the Python Package Index, run: 40 | :: 41 | 42 | venv/bin/python setup.py clean build sdist upload 43 | 44 | Build docs 45 | ---------- 46 | 47 | To build |NAME| documentation, 48 | go to ``doc`` directory and take a look at 49 | ``create-gh-pages.sh`` script. 50 | 51 | .. End of file. 52 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | .. toctree:: 4 | :hidden: 5 | 6 | docs 7 | download 8 | concepts 9 | cookbook 10 | examples 11 | fordevelopers 12 | api 13 | about 14 | 15 | |NAME| is a tiny Python module -- a thin layer above the standard :mod:`multiprocessing` package -- that lets you write parallel, multi-stage pipeline algorithms with remarkable ease. Consider the following workflow: 16 | 17 | .. image:: tiny.png 18 | :align: center 19 | 20 | It's a two-stage pipeline that increments and doubles numbers, each stage concurrently running three workers. 21 | Here's how you'd code it up using the :mod:`mpipe` module: 22 | 23 | .. literalinclude:: tiny.py 24 | 25 | The above snippet runs a total of seven processes: one for the main program and six for the two stages (three processes per stage). 26 | 27 | Installation 28 | ************ 29 | 30 | Get |NAME| now! Easiest way is using *pip*: 31 | :: 32 | 33 | pip install mpipe 34 | 35 | Check out :doc:`download` for other ways of getting |NAME| up and running on your system. 36 | 37 | Got it, now what? 38 | ***************** 39 | 40 | Start piping right away by running through the :doc:`examples`. 41 | If you want a step-by-step guide to creating pipelines, read the :doc:`cookbook`. 42 | For theory and design, take a look at :doc:`concepts`. 43 | -------------------------------------------------------------------------------- /doc/source/_themes/mpipe/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "basic/layout.html" %} 2 | 3 | {# put the sidebar before the body #} 4 | {% block sidebar1 %}{{ sidebar() }}{% endblock %} 5 | {% block sidebar2 %}{% endblock %} 6 | 7 | {% block extrahead %} 8 | {{ super() }} 9 | 10 | Fork me on GitHub 11 | 12 | {%- if not embedded %} 13 | 18 | {%- endif %} 19 | {% endblock %} 20 | 21 | {% block rootrellink %} 22 |
  • MPipe home |
  • 23 |
  • Documentation »
  • 24 | {% endblock %} 25 | 26 | {# Override footer if so desired. #} 27 | {% block footer %} 28 | {{ super() }} 29 | {% endblock %} 30 | 31 | {% block header %} 32 | 45 | {% endblock %} 46 | -------------------------------------------------------------------------------- /doc/source/examples.rst: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Examples 4 | ======== 5 | 6 | The sample codes that follow will show you the basics of building pipelines. The end functionality of these is trivial, and intended mainly to illustrate concepts of |NAME|. Feel free to run the examples, experiment by changing the code snippets, and use them as starting points for creating your own, more complex pipelines. 7 | 8 | Getting started 9 | --------------- 10 | 11 | .. toctree:: 12 | 13 | examples1 14 | 15 | Advanced topics 16 | --------------- 17 | 18 | .. toctree:: 19 | 20 | examples2 21 | 22 | Expert techniques 23 | ----------------- 24 | 25 | .. toctree:: 26 | 27 | examples3 28 | 29 | All examples have links to the source code on the right side above and below each code listing. In addition, all programs are located in the ``test/`` subdirectory of |NAME| distribution. 30 | 31 | When running the examples, keep in mind the multiprocessing nature of |NAME|, especially when running interactively from the Python interpreter (e.g. cut-and-pasting into the ``>>>`` prompt) -- some examples may display mangled text output, particularly in case of multiple processes simultaneously printing to stdout. For that reason, you may prefer to run the examples from your OS shell command prompt instead. If an example shows an actual command running a program, you can reproduce it by running the command from the root of the |NAME| distribution (directory that contains the ``test/`` subdirectory). 32 | 33 | .. End of file. 34 | -------------------------------------------------------------------------------- /doc/source/_templates/search.html: -------------------------------------------------------------------------------- 1 | {# 2 | basic/search.html 3 | ~~~~~~~~~~~~~~~~~ 4 | 5 | Template for the search page. 6 | 7 | :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. 8 | :license: BSD, see LICENSE for details. 9 | #} 10 | {% extends "layout.html" %} 11 | {% set title = _('API Search') %} 12 | {% set script_files = script_files + ['_static/searchtools.js'] %} 13 | {% block extrahead %} 14 | 17 | {{ super() }} 18 | {% endblock %} 19 | {% block body %} 20 |

    {{ _('API Search') }}

    21 |
    22 | 23 |

    24 | {% trans %}Please activate JavaScript to enable the search 25 | functionality.{% endtrans %} 26 |

    27 |
    28 |

    29 | {% trans %}Search the MPipe module namespace. 30 | Enter your search word (method or class name.){% endtrans %} 31 |

    32 |
    33 | 34 | 35 | 36 |
    37 | {% if search_performed %} 38 |

    {{ _('Search Results') }}

    39 | {% if not search_results %} 40 |

    {{ _('Your search did not match any results.') }}

    41 | {% endif %} 42 | {% endif %} 43 |
    44 | {% if search_results %} 45 |
      46 | {% for href, caption, context in search_results %} 47 |
    • {{ caption }} 48 |
      {{ context|e }}
      49 |
    • 50 | {% endfor %} 51 |
    52 | {% endif %} 53 |
    54 | {% endblock %} 55 | -------------------------------------------------------------------------------- /test/test.py: -------------------------------------------------------------------------------- 1 | """Run all tests: look for gold files in given directory, 2 | run the corresponding test, then do a simple line-by-line 3 | comparison.""" 4 | 5 | import sys 6 | import os 7 | import inspect 8 | from subprocess import Popen, STDOUT, PIPE 9 | 10 | # Compute the absolute path to directory of this file. 11 | this_dir = os.path.dirname( 12 | os.path.abspath( 13 | inspect.getfile( 14 | inspect.currentframe()))) 15 | 16 | for entry in sorted(os.listdir(this_dir)): 17 | 18 | # Identify the file -- whether it is gold. 19 | # Gold files are *.gold for exact gold content, or 20 | # *.goldr if "random" (to be sorted before comparing. 21 | gold_fname = os.path.join(this_dir, entry) 22 | suffix = None 23 | for choice in ('.gold', '.goldr'): 24 | if gold_fname[-len(choice):] == choice: 25 | suffix = choice 26 | if not suffix: 27 | continue 28 | 29 | # Read the gold file. 30 | f = open(gold_fname) 31 | gold_lines = f.readlines() 32 | f.close() 33 | 34 | # Run the test. 35 | test_fname = gold_fname[:-len(suffix):] + '.py' 36 | command = '{0} {1}'.format(sys.executable, test_fname) 37 | print(command) 38 | p = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT) 39 | test_lines = p.stdout.readlines() 40 | 41 | # If test "randomizes" output, sort lines before comparing. 42 | if suffix == '.goldr': 43 | gold_lines = sorted(gold_lines) 44 | test_lines = sorted(test_lines) 45 | 46 | # Compare line by line. 47 | failed = False 48 | for gline, tline in zip(gold_lines, test_lines): 49 | gline = gline.strip() 50 | tline = tline.strip().decode() 51 | if gline != tline: 52 | print('Error running: {0}'.format(command)) 53 | failed = True 54 | break 55 | 56 | # On failure, save test output. 57 | if failed: 58 | out_fname = gold_fname[:-len(suffix):] + '.out' 59 | f = open(out_fname, 'w') 60 | for line in test_lines: 61 | f.write(line.decode()) 62 | f.close 63 | -------------------------------------------------------------------------------- /doc/source/examples3.rst: -------------------------------------------------------------------------------- 1 | .. _examples3: 2 | 3 | .. _filtering: 4 | 5 | Filtering 6 | --------- 7 | 8 | If a stage can't process incoming tasks fast enough, we have a bottleneck situation at our hands. 9 | Imagine a stream of tasks feeding a pipeline at the rate of 100 tasks per second. 10 | A single-worker stage that takes 30% longer to process each task inevitably bottlenecks the workflow: 11 | 12 | .. container:: source-click-above 13 | 14 | [`source `_] 15 | 16 | .. literalinclude:: bottleneck1.py 17 | 18 | .. container:: source-click-below 19 | 20 | [`source `_] 21 | 22 | An easy way to fix this, of course, is to devote an additional worker to the stage: 23 | 24 | .. container:: source-click-above 25 | 26 | [`source `_] 27 | 28 | .. literalinclude:: bottleneck2.py 29 | 30 | .. container:: source-click-below 31 | 32 | [`source `_] 33 | 34 | But what if our design limits us to a single worker stage? 35 | If adding workers is not an option, we can instead choose to filter inputs before they reach the problematic stage, by dropping tasks that exceed capacity. 36 | For example, we can limit the carrying capacity of the pipeline to, say, a maximum load of 2 tasks. 37 | If a task arrives while the pipeline is "full" (i.e. is currently working on two tasks) then the new task is thrown away. 38 | This way we are able to keep up with the input flow, granted we lose any tasks that exceed the preset bandwidth. 39 | Running such a filter in our scenario, we lose the 6th and 10th task: 40 | 41 | .. image:: filter.png 42 | :align: center 43 | 44 | Implementing the solution is easy. Simply wrap the original stage into a :mod:`~mpipe.FilterStage`: 45 | 46 | .. container:: source-click-above 47 | 48 | [`source `_] 49 | 50 | .. literalinclude:: bottleneck3.py 51 | 52 | .. container:: source-click-below 53 | 54 | [`source `_] 55 | 56 | Running the above code shows the output below, the 6th and 10th task (index 5 and 9) conspicuously missing from the final result: 57 | :: 58 | 0 59 | 1 60 | 2 61 | 3 62 | 4 63 | 6 64 | 7 65 | 8 66 | 10 67 | 11 68 | 69 | .. the end 70 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """Distutils file for MPipe.""" 2 | 3 | from distutils.core import setup, Command 4 | import subprocess 5 | import inspect 6 | import shutil 7 | import os 8 | import sys 9 | 10 | class Clean2(Command): 11 | """A more thorough clean command.""" 12 | description = 'clean everything generated by the build command' 13 | user_options = [] 14 | def initialize_options(self): pass # Must override. 15 | def finalize_options(self): pass # Must override. 16 | def run(self): 17 | to_remove = ('build','dist','MANIFEST',) 18 | this_dir = os.path.dirname(inspect.getfile(inspect.currentframe())) 19 | this_dir = os.path.normpath(this_dir) 20 | for entry in os.listdir(this_dir): 21 | if entry not in to_remove: 22 | continue 23 | entry = os.path.join(this_dir, entry) 24 | print('erasing {0}'.format(entry)) 25 | if os.path.isfile(entry): 26 | os.remove(entry) 27 | elif os.path.isdir(entry): 28 | shutil.rmtree(entry) 29 | 30 | class Test(Command): 31 | """A custom test command.""" 32 | description = 'run custom test suite' 33 | user_options = [] 34 | def initialize_options(self): pass # Must override. 35 | def finalize_options(self): pass # Must override. 36 | def run(self): 37 | this_dir = os.path.dirname(inspect.getfile(inspect.currentframe())) 38 | this_dir = os.path.normpath(this_dir) 39 | args = os.path.join(this_dir, 'test', 'test.py') 40 | command = '{0} {1}'.format(sys.executable, args) 41 | print(command) 42 | subprocess.call(command, shell=True) 43 | 44 | from src import __version__ 45 | 46 | setup( 47 | name = 'mpipe', 48 | version = __version__, 49 | description = 'Multiprocess pipeline toolkit', 50 | url = 'http://vmlaker.github.io/mpipe', 51 | author = 'Velimir Mlaker', 52 | author_email = 'velimir.mlaker@gmail.com', 53 | license = 'MIT', 54 | long_description = open('README.rst').read(), 55 | package_dir = {'mpipe' : 'src'}, 56 | packages = ['mpipe'], 57 | cmdclass = { 'clean2' : Clean2, 'test' : Test, }, 58 | classifiers = [ 59 | 'Development Status :: 5 - Production/Stable', 60 | 'Intended Audience :: Developers', 61 | 'License :: Freeware', 62 | 'Operating System :: OS Independent', 63 | 'Programming Language :: Python', 64 | 'Programming Language :: Python :: 3', 65 | 'Topic :: Software Development :: Libraries :: Application Frameworks', 66 | 'Topic :: Software Development :: Libraries :: Python Modules', 67 | ], 68 | ) 69 | -------------------------------------------------------------------------------- /doc/create.py: -------------------------------------------------------------------------------- 1 | """ 2 | Create the Sphinx documentation pages. 3 | Run from the current directory, e.g.: 4 | venv/bin/python ./create.py build/ 5 | """ 6 | 7 | import subprocess 8 | import mpipe 9 | import sys 10 | import os 11 | 12 | try: 13 | DEST = sys.argv[1] 14 | except: 15 | print('Usage: {} destination'.format(sys.argv[0])) 16 | sys.exit(1) 17 | 18 | # Diagram filename prefixes. 19 | diagrams = ( 20 | 'tiny', 21 | 'helloworld', 22 | 'chain', 23 | 'pipeout', 24 | 'fork', 25 | 'taskresult1', 26 | 'worker1', 27 | 'worker2', 28 | 'stage1', 29 | 'pipeline1', 30 | 'multiwork', 31 | 'filter', 32 | ) 33 | 34 | # Export Dia diagrams. 35 | saved = os.getcwd() 36 | os.chdir('source') 37 | def runDia(diagram): 38 | """Generate the diagrams using Dia.""" 39 | ifname = '{}.dia'.format(diagram) 40 | ofname = '{}.png'.format(diagram) 41 | cmd = 'dia -t png-libart -e {} {}'.format(ofname, ifname) 42 | print(' {}'.format(cmd)) 43 | subprocess.call(cmd, shell=True) 44 | return True 45 | pipe = mpipe.Pipeline(mpipe.UnorderedStage(runDia, len(diagrams))) 46 | for diagram in diagrams: 47 | pipe.put(diagram) 48 | pipe.put(None) 49 | for result in pipe.results(): 50 | pass 51 | os.chdir(saved) 52 | 53 | # Copy the .py examples from test/ to source/ directory 54 | # so that they can be picked up by the Sphinx build. 55 | codes = ( 56 | 'tiny.py', 57 | 'helloworld.py', 58 | 'chain.py', 59 | 'pipeout.py', 60 | 'fork.py', 61 | 'unordered.py', 62 | 'count_nullops.py', 63 | 'multiwork.py', 64 | 'clog.py', 65 | 'drano.py', 66 | 'bottleneck1.py', 67 | 'bottleneck2.py', 68 | 'bottleneck3.py', 69 | 'disable_result0.py', 70 | ) 71 | def runCopy(fname): 72 | cmd = 'cp {} source/'.format(os.path.join('..', 'test', fname)) 73 | print(' {}'.format(cmd)) 74 | subprocess.call(cmd, shell=True) 75 | return True 76 | pipe = mpipe.Pipeline(mpipe.UnorderedStage(runCopy, len(codes))) 77 | for fname in codes: 78 | pipe.put(fname) 79 | pipe.put(None) 80 | for result in pipe.results(): 81 | pass 82 | 83 | # Build the Sphinx documentation pages. 84 | cmd = 'make BUILDDIR={} SPHINXBUILD={} clean html'.format(DEST, 'venv/bin/sphinx-build') 85 | print(' {}'.format(cmd)) 86 | subprocess.call(cmd, shell=True) 87 | 88 | # Move the .py examples to the build/ destination directory 89 | # so that documentation links to source code will work. 90 | def runMove(fname): 91 | cmd = 'mv {} build/html/'.format(os.path.join('source', fname)) 92 | print(' {}'.format(cmd)) 93 | subprocess.call(cmd, shell=True) 94 | return True 95 | pipe = mpipe.Pipeline(mpipe.UnorderedStage(runMove, len(codes))) 96 | for fname in codes: 97 | pipe.put(fname) 98 | pipe.put(None) 99 | 100 | # Cleanup diagrams. 101 | saved = os.getcwd() 102 | os.chdir('source') 103 | def runDia(diagram): 104 | fname1 = '{}.dia~'.format(diagram) 105 | fname2 = '{}.png'.format(diagram) 106 | cmd = 'rm -f {} {}'.format(fname1, fname2) 107 | print(' {}'.format(cmd)) 108 | subprocess.call(cmd, shell=True) 109 | return True 110 | pipe = mpipe.Pipeline(mpipe.UnorderedStage(runDia, len(diagrams))) 111 | for diagram in diagrams: 112 | pipe.put(diagram) 113 | pipe.put(None) 114 | for result in pipe.results(): 115 | pass 116 | os.chdir(saved) 117 | -------------------------------------------------------------------------------- /doc/source/examples1.rst: -------------------------------------------------------------------------------- 1 | .. _examples1: 2 | 3 | .. _hello_world: 4 | 5 | Hello world 6 | ----------- 7 | 8 | Let's start with a really simple workflow, a "hello world" of pipelines if you will. 9 | 10 | .. image:: helloworld.png 11 | :align: center 12 | 13 | It just prints each input element as it comes in. Feeding it a stream of numbers 0, 1, 2, 3 simply echoes the numbers to your terminal's standard output. Here's the code: 14 | 15 | .. container:: source-click-above 16 | 17 | [`source `_] 18 | 19 | .. literalinclude:: helloworld.py 20 | 21 | .. container:: source-click-below 22 | 23 | [`source `_] 24 | 25 | The program output is: 26 | :: 27 | 28 | 0 29 | 1 30 | 2 31 | 3 32 | 33 | It's a silly pipeline that doesn't do much other than illustrate a few basic ideas. Note the last line that puts ``None`` on the pipeline -- this sends the "stop" task, effectively signaling all processes within the pipeline to terminate. 34 | 35 | .. _serializing_stages: 36 | 37 | Serializing stages 38 | ------------------ 39 | 40 | Multiple stages can be serially linked to create a sequential workflow: 41 | 42 | .. image:: chain.png 43 | :align: center 44 | 45 | .. container:: source-click-above 46 | 47 | [`source `_] 48 | 49 | .. literalinclude:: chain.py 50 | 51 | .. container:: source-click-below 52 | 53 | [`source `_] 54 | 55 | .. _pipeline_with_output: 56 | 57 | Pipeline with output 58 | -------------------- 59 | 60 | Have you noticed that the pipelines so far did not actually produce results at the output end? Here's a pipeline similar to the previous one, except that the final result is returned as *pipeline output* instead of being passed to a third stage: 61 | 62 | .. image:: pipeout.png 63 | :align: center 64 | 65 | Without a third stage doing the printing, the caller of the pipeline must print the final result: 66 | 67 | .. container:: source-click-above 68 | 69 | [`source `_] 70 | 71 | .. literalinclude:: pipeout.py 72 | 73 | .. container:: source-click-below 74 | 75 | [`source `_] 76 | 77 | Note that, before fetching results, we need to put the "stop" task on the pipeline. That's because :meth:`~mpipe.Pipeline.results()` returns a generator function that continues to fetch results so long as the pipeline remains alive. Without previously signaling "stop", the fetch loop would hang on the fifth iteration. 78 | 79 | Another way to fetch results is to call :meth:`~mpipe.Pipeline.get()` exactly four times. Using this method it doesn't matter whether you signal "stop" before or after the fetch loop: 80 | :: 81 | 82 | for foobar in range(10): 83 | print(pipe.get()) 84 | 85 | pipe.put(None) 86 | 87 | 88 | .. _forked_pipeline: 89 | 90 | Forked pipeline 91 | --------------- 92 | 93 | Imagine a pipeline that forks into two separate flows of execution: 94 | 95 | .. image:: fork.png 96 | :align: center 97 | 98 | We can fork into more than two paths, but let's keep it simple for now. 99 | 100 | .. container:: source-click-above 101 | 102 | [`source `_] 103 | 104 | .. literalinclude:: fork.py 105 | 106 | .. container:: source-click-below 107 | 108 | [`source `_] 109 | 110 | This time instead of using standalone functions to implement the work, we used classes. It's really the same thing, but with classes you have greater potential for encapsulation and code organization when implementing complex stages. Note that this requires a slightly different way of creating stage objects, now using the :mod:`~mpipe.Stage` class. 111 | 112 | .. the end 113 | -------------------------------------------------------------------------------- /src/FilterWorker.py: -------------------------------------------------------------------------------- 1 | """Implements FilterWorker class.""" 2 | 3 | import sys 4 | from .OrderedWorker import OrderedWorker 5 | from .Pipeline import Pipeline 6 | 7 | class FilterWorker(OrderedWorker): 8 | """FilterWorker filters input to sub-pipelines.""" 9 | 10 | def __init__(self, stages, max_tasks=1, drop_results=False, cache_results=False): 11 | """Constructor takes an iterable of 12 | :class:`~mpipe.Stage` 13 | objects and creates one pipeline for each stage. 14 | The filter then propagates its input task as input into each pipeline, 15 | filtered by limiting the number of tasks allowed in the stream of a pipeline, 16 | given as *max_tasks* parameter. Any task in excess is not added to 17 | a topped-out pipeline. 18 | 19 | For every input task (even tasks not propagated to sub-pipelines) 20 | the filter stage produces a result. 21 | By default, as its result, the filter stage produces a tuple (task, results) 22 | where results is a list of results from all pipelines, 23 | unless *drop_results* is True, in which case it ignores any 24 | sub-pipeline result, and propagates only the input task. 25 | 26 | If *drop_results* is False, then *cache_results* flag may be used 27 | to save (i.e. cache) last results from pipelines. These are then 28 | used as repeated pipeline results when a pipeline does not produce 29 | a result upon the current input task. 30 | """ 31 | 32 | # Create a pipeline out of each stage. 33 | self._pipelines = list() 34 | self._task_counts = dict() # Maintain counts of tasks in pipes. 35 | for stage in stages: 36 | pipe = Pipeline(stage) 37 | self._pipelines.append(pipe) 38 | self._task_counts[pipe] = 0 # Initilize the task count. 39 | 40 | self._max_tasks = max_tasks 41 | self._drop_results = drop_results 42 | self._cache_results = cache_results 43 | 44 | # Maintain a table of last results from each pipeline. 45 | self._last_results = dict() 46 | 47 | def doTask(self, task): 48 | """Filter input *task* to pipelines -- make sure each one has no more 49 | than *max_tasks* tasks in it. Return a tuple 50 | (*task*, *results*) 51 | where *task* is the given task, and *results* is 52 | a list of latest retrieved results from pipelines.""" 53 | 54 | # If we're not caching, then clear the table of last results. 55 | if not self._cache_results: 56 | self._last_results = dict() 57 | 58 | # Iterate the list of pipelines, draining each one of any results. 59 | # For pipelines whose current stream has less than *max_tasks* tasks 60 | # remaining, feed them the current task. 61 | for pipe in self._pipelines: 62 | 63 | count = self._task_counts[pipe] 64 | 65 | # Let's attempt to drain all (if any) results from the pipeline. 66 | valid = True 67 | last_result = None 68 | while count and valid: 69 | valid, result = pipe.get(sys.float_info.min) 70 | if valid: 71 | last_result = result 72 | count -= 1 73 | 74 | # Unless we're dropping results, save the last result (if any). 75 | if not self._drop_results: 76 | if last_result is not None: 77 | self._last_results[pipe] = last_result 78 | 79 | # If there is room for the task, or if it is a "stop" request, 80 | # put it on the pipeline. 81 | if count <= self._max_tasks-1 or task is None: 82 | pipe.put(task) 83 | count += 1 84 | 85 | # Update the task count for the pipeline. 86 | self._task_counts[pipe] = count 87 | 88 | # If we're only propagating the task, do so now. 89 | if self._drop_results: 90 | return task 91 | 92 | # Otherwise, also propagate the assembly of pipeline results. 93 | all_results = [res for res in self._last_results.values()] 94 | return task, all_results 95 | -------------------------------------------------------------------------------- /src/Stage.py: -------------------------------------------------------------------------------- 1 | """Implements Stage class.""" 2 | 3 | class Stage(object): 4 | """The Stage is an assembly of workers of identical functionality.""" 5 | 6 | def __init__( 7 | self, 8 | worker_class, 9 | size=1, 10 | disable_result=False, 11 | do_stop_task=False, 12 | input_tube=None, 13 | **worker_args 14 | ): 15 | """Create a stage of workers of given *worker_class* implementation, 16 | with *size* indicating the number of workers within the stage. 17 | *disable_result* overrides any result defined in worker implementation, 18 | and does not propagate it downstream (equivalent to the worker 19 | producing ``None`` result). 20 | 21 | *do_stop_task* indicates whether the incoming "stop" signal (``None`` value) 22 | will actually be passed to the worker as a task. When using this option, 23 | implement your worker so that, in addition to regular incoming tasks, 24 | it handles the ``None`` value as well. This will be 25 | the worker's final task before the process exits. 26 | 27 | Any worker initialization arguments are given in *worker_args*.""" 28 | self._worker_class = worker_class 29 | self._worker_args = worker_args 30 | self._size = size 31 | self._disable_result = disable_result 32 | self._do_stop_task = do_stop_task 33 | self._input_tube = self._worker_class.getTubeClass()() \ 34 | if not input_tube else input_tube 35 | self._output_tubes = list() 36 | self._next_stages = list() 37 | 38 | def put(self, task): 39 | """Put *task* on the stage's input tube.""" 40 | self._input_tube.put((task,0)) 41 | 42 | def get(self, timeout=None): 43 | """Retrieve results from all the output tubes.""" 44 | valid = False 45 | result = None 46 | for tube in self._output_tubes: 47 | if timeout: 48 | valid, result = tube.get(timeout) 49 | if valid: 50 | result = result[0] 51 | else: 52 | result = tube.get()[0] 53 | if timeout: 54 | return valid, result 55 | return result 56 | 57 | def results(self): 58 | """Return a generator to iterate over results from the stage.""" 59 | while True: 60 | result = self.get() 61 | if result is None: break 62 | yield result 63 | 64 | def link(self, next_stage): 65 | """Link to the given downstream stage *next_stage* 66 | by adding its input tube to the list of this stage's output tubes. 67 | Return this stage.""" 68 | if next_stage is self: raise ValueError('cannot link stage to itself') 69 | self._output_tubes.append(next_stage._input_tube) 70 | self._next_stages.append(next_stage) 71 | return self 72 | 73 | def getLeaves(self): 74 | """Return the downstream leaf stages of this stage.""" 75 | result = list() 76 | if not self._next_stages: 77 | result.append(self) 78 | else: 79 | for stage in self._next_stages: 80 | leaves = stage.getLeaves() 81 | result += leaves 82 | return result 83 | 84 | def build(self): 85 | """Create and start up the internal workers.""" 86 | 87 | # If there's no output tube, it means that this stage 88 | # is at the end of a fork (hasn't been linked to any stage downstream). 89 | # Therefore, create one output tube. 90 | if not self._output_tubes: 91 | self._output_tubes.append(self._worker_class.getTubeClass()()) 92 | 93 | self._worker_class.assemble( 94 | self._worker_args, 95 | self._input_tube, 96 | self._output_tubes, 97 | self._size, 98 | self._disable_result, 99 | self._do_stop_task, 100 | ) 101 | 102 | # Build all downstream stages. 103 | for stage in self._next_stages: 104 | stage.build() 105 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 41 | @echo 42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 43 | 44 | dirhtml: 45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 48 | 49 | singlehtml: 50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 51 | @echo 52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 53 | 54 | pickle: 55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 56 | @echo 57 | @echo "Build finished; now you can process the pickle files." 58 | 59 | json: 60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 61 | @echo 62 | @echo "Build finished; now you can process the JSON files." 63 | 64 | htmlhelp: 65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 66 | @echo 67 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 68 | ".hhp project file in $(BUILDDIR)/htmlhelp." 69 | 70 | qthelp: 71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 72 | @echo 73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/MPipe.qhcp" 76 | @echo "To view the help file:" 77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/MPipe.qhc" 78 | 79 | devhelp: 80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 81 | @echo 82 | @echo "Build finished." 83 | @echo "To view the help file:" 84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/MPipe" 85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/MPipe" 86 | @echo "# devhelp" 87 | 88 | epub: 89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 90 | @echo 91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 92 | 93 | latex: 94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 95 | @echo 96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 97 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 98 | "(use \`make latexpdf' here to do that automatically)." 99 | 100 | latexpdf: 101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 102 | @echo "Running LaTeX files through pdflatex..." 103 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 105 | 106 | text: 107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 108 | @echo 109 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 110 | 111 | man: 112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 113 | @echo 114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 115 | 116 | changes: 117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 118 | @echo 119 | @echo "The overview file is in $(BUILDDIR)/changes." 120 | 121 | linkcheck: 122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 123 | @echo 124 | @echo "Link check complete; look for any errors in the above output " \ 125 | "or in $(BUILDDIR)/linkcheck/output.txt." 126 | 127 | doctest: 128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 129 | @echo "Testing of doctests in the sources finished, look at the " \ 130 | "results in $(BUILDDIR)/doctest/output.txt." 131 | -------------------------------------------------------------------------------- /src/UnorderedWorker.py: -------------------------------------------------------------------------------- 1 | """Implements UnorderedWorker class.""" 2 | 3 | import multiprocessing 4 | from .TubeQ import TubeQ 5 | 6 | class UnorderedWorker(multiprocessing.Process): 7 | """An UnorderedWorker object operates independently of other 8 | workers in the stage, fetching the first available task, and 9 | publishing its result whenever it is done 10 | (without coordinating with neighboring workers). 11 | Consequently, the order of output results may not match 12 | that of corresponding input tasks.""" 13 | 14 | def __init__(self): 15 | pass 16 | 17 | def init2( 18 | self, 19 | input_tube, # Read task from the input tube. 20 | output_tubes, # Send result on all the output tubes. 21 | num_workers, # Total number of workers in the stage. 22 | disable_result, # Whether to override any result with None. 23 | do_stop_task, # Whether to call doTask() on "stop" request. 24 | ): 25 | """Create *num_workers* worker objects with *input_tube* and 26 | an iterable of *output_tubes*. The worker reads a task from *input_tube* 27 | and writes the result to *output_tubes*.""" 28 | 29 | super(UnorderedWorker, self).__init__() 30 | self._tube_task_input = input_tube 31 | self._tubes_result_output = output_tubes 32 | self._num_workers = num_workers 33 | self._disable_result = disable_result 34 | self._do_stop_task = do_stop_task 35 | 36 | @staticmethod 37 | def getTubeClass(): 38 | """Return the tube class implementation.""" 39 | return TubeQ 40 | 41 | @classmethod 42 | def assemble( 43 | cls, 44 | args, 45 | input_tube, 46 | output_tubes, 47 | size, 48 | disable_result, 49 | do_stop_task, 50 | ): 51 | """Create, assemble and start workers. 52 | Workers are created of class *cls*, initialized with *args*, and given 53 | task/result communication channels *input_tube* and *output_tubes*. 54 | The number of workers created is according to *size* parameter. 55 | *do_stop_task* indicates whether doTask() will be called for "stop" request. 56 | """ 57 | 58 | # Create the workers. 59 | workers = [] 60 | for ii in range(size): 61 | worker = cls(**args) 62 | worker.init2( 63 | input_tube, 64 | output_tubes, 65 | size, 66 | disable_result, 67 | do_stop_task, 68 | ) 69 | workers.append(worker) 70 | 71 | # Start the workers. 72 | for worker in workers: 73 | worker.start() 74 | 75 | def putResult(self, result): 76 | """Register the *result* by putting it on all the output tubes.""" 77 | for tube in self._tubes_result_output: 78 | tube.put((result, 0)) 79 | 80 | def run(self): 81 | 82 | # Run implementation's initialization. 83 | self.doInit() 84 | 85 | while True: 86 | try: 87 | (task, count) = self._tube_task_input.get() 88 | except: 89 | (task, count) = (None, 0) 90 | 91 | # In case the task is None, it represents the "stop" request, 92 | # the count being the number of workers in this stage that had 93 | # already stopped. 94 | if task is None: 95 | 96 | # If this worker is the last one (of its stage) to receive the 97 | # "stop" request, propagate "stop" to the next stage. Otherwise, 98 | # maintain the "stop" signal in this stage for another worker that 99 | # will pick it up. 100 | count += 1 101 | if count == self._num_workers: 102 | self.putResult(None) 103 | else: 104 | self._tube_task_input.put((None, count)) 105 | 106 | # In case we're calling doTask() on a "stop" request, do so now. 107 | if self._do_stop_task: 108 | self.doTask(None) 109 | 110 | # Honor the "stop" request by exiting the process. 111 | break 112 | 113 | # The task is not None, meaning that it is an actual task to 114 | # be processed. Therefore let's call doTask(). 115 | result = self.doTask(task) 116 | 117 | # Unless result is disabled, 118 | # if doTask() actually returns a result (and the result is not None), 119 | # it indicates that it did not call putResult(), instead intending 120 | # it to be called now. 121 | if not self._disable_result and result is not None: 122 | self.putResult(result) 123 | 124 | def doTask(self, task): 125 | """Implement this method in the subclass with work 126 | to be executed on each *task* object. 127 | The implementation can publish the output result in one of two ways, 128 | either by 1) calling :meth:`putResult` and returning ``None``, or 129 | 2) returning the result (other than ``None``).""" 130 | return True 131 | 132 | def doInit(self): 133 | """Implement this method in the subclass in case there's need 134 | for additional initialization after process startup. 135 | Since this class inherits from :class:`multiprocessing.Process`, 136 | its constructor executes in the spawning process. 137 | This method allows additional code to be run in the forked process, 138 | before the worker begins processing input tasks. 139 | """ 140 | return None 141 | -------------------------------------------------------------------------------- /doc/source/examples2.rst: -------------------------------------------------------------------------------- 1 | .. _examples2: 2 | 3 | .. _unordered_worker_stage: 4 | 5 | Unordered worker/stage 6 | ---------------------- 7 | 8 | So far we've only used ordered workers/stages. If you don't care for matching sequences of tasks with results, use unordered stages instead. Here's an unordered version of the :ref:`pipeline with output example `: 9 | 10 | .. container:: source-click-above 11 | 12 | [`source `_] 13 | 14 | .. literalinclude:: unordered.py 15 | 16 | .. container:: source-click-below 17 | 18 | [`source `_] 19 | 20 | One thing to keep in mind: if your stage has only one worker, it makes no difference whether you use Ordered or Unordered objects -- in either case the stage will behave as if it's ordered. That's why here we add an extra worker to each stage. The results, although all accounted for, appear out-of-order as expected: 21 | :: 22 | 23 | 6 24 | 10 25 | 2 26 | 12 27 | 16 28 | 20 29 | 4 30 | 8 31 | 14 32 | 18 33 | 34 | Take a look at :ref:`ordered_vs_unordered` for further discussion about these two different approaches to stage design. 35 | 36 | .. _multiple_workers_per_stage: 37 | 38 | Multiple workers per stage 39 | -------------------------- 40 | 41 | The previous example used two workers per stage, if only to illustrate the concept of unordered stages. A more realistic reason for dedicating additional workers to a stage is to gain processing speed (on multi-core or multi-CPU computer). 42 | 43 | Consider a stream of five tasks feeding a single-stage pipeline running two workers: 44 | 45 | .. image:: multiwork.png 46 | :align: center 47 | 48 | Let's assume the tasks arrive together in a batch, in quick succession, one right after another. If each one takes a full second of dedicated CPU time to complete, then on an SMP computer (assuming full system utilization) this workflow will take three seconds wall-clock time to complete: one second for the first two tasks done in parallel, another for the next two, and finally one more second for the last task, run by just one worker. Total CPU load will be 1.67, computed as: 49 | 50 | 166.67% total = 100% cpu * 5 tasks / 3 seconds 51 | 52 | Since we don't have any useful functionality (er, keeping in tradition with previous examples) let's create a silly function that takes a measurable amount of time to complete. Here's a program that prints the number of null-ops (Python ``pass`` statement) needed to spin -- or busy wait -- the CPU for one second: 53 | 54 | .. container:: source-click-above 55 | 56 | [`source `_] 57 | 58 | .. literalinclude:: count_nullops.py 59 | 60 | .. container:: source-click-below 61 | 62 | [`source `_] 63 | 64 | And here is our pipeline implementation, a program that takes as command-line argument the number of null-ops: 65 | 66 | .. container:: source-click-above 67 | 68 | [`source `_] 69 | 70 | .. literalinclude:: multiwork.py 71 | 72 | .. container:: source-click-below 73 | 74 | [`source `_] 75 | 76 | We can now run our scenario by passing the output of the first program as command-line argument to the second. A simple resource usage profile of the pipeline can be captured with something like this: 77 | :: 78 | 79 | python test/count_nullops.py|xargs time python test/multiwork.py 80 | 81 | If interested, modify the pipeline by changing the number of workers and/or input tasks. Also, consider running it on different computers -- with different processor resources -- and compare results. 82 | 83 | Handling many tasks 84 | ------------------- 85 | 86 | A pipeline can easily clog with too many inputs. If you run the code below, it appears that the program freezes, while actually it is slowly draining your system memory: 87 | 88 | .. container:: source-click-above 89 | 90 | [`source `_] 91 | 92 | .. literalinclude:: clog.py 93 | 94 | .. container:: source-click-below 95 | 96 | [`source `_] 97 | 98 | What's going on here? Note that we first put all tasks on the pipeline, and then follow up by retrieving the results. Unfortunately, we never get to the retrieval part because we're too busy adding a whole bunch of tasks to the pipeline (``sys.maxint`` is a huge number). 99 | 100 | You probably Ctl-c'd out of the program by now. 101 | 102 | Here's what happened: feeding the pipeline just kept using up a bunch of system resources (socket buffers, to be precise) and slowing down the program. The program never got past the feed loop into the fetch-results loop. 103 | 104 | One solution is to feed the pipeline with tasks while, concurrently in another process retrieve the results. This way we balance consumption of system resources, allocating them on one end, while simultaneously releasing them at the other. In the version below, we create another pipeline to do just that: 105 | 106 | .. container:: source-click-above 107 | 108 | [`source `_] 109 | 110 | .. literalinclude:: drano.py 111 | 112 | .. container:: source-click-below 113 | 114 | [`source `_] 115 | 116 | Note that we start doing the retrieval in the second pipeline by adding a single arbitrary task -- in this case the value ``True`` -- before putting any tasks on the first pipeline. In this way we are ready to release resources as soon as the first pipeline starts producing results. 117 | 118 | Disabling results 119 | ----------------- 120 | 121 | Even if your worker implementation produces a result, you may choose to disable it: 122 | 123 | .. container:: source-click-above 124 | 125 | [`source `_] 126 | 127 | .. literalinclude:: disable_result0.py 128 | 129 | .. container:: source-click-below 130 | 131 | [`source `_] 132 | 133 | This pipeline does not produce output -- the final for-loop is useless. 134 | 135 | Results of a pipeline use system resources for storage until they're eventually retrieved using :meth:`~mpipe.Pipeline.get` or :meth:`~mpipe.Pipeline.results`. But sometimes you may not want this effect, especially if the workflow produces a lot of output, none of which you care about. By disabling results, you're able to reuse an existing worker/stage implementation, while forcing all output to be thrown away. 136 | 137 | .. End of file. 138 | -------------------------------------------------------------------------------- /doc/source/concepts.rst: -------------------------------------------------------------------------------- 1 | .. _concepts: 2 | 3 | Pipeline concepts 4 | ================= 5 | 6 | Pipelines are dynamic multi-processing workflows exhibiting some consistent patterns in their design: 7 | 8 | #. a *worker* process operating on an input *task* and producing an output *result*, 9 | #. a *stage* composed of one or more identical workers concurrently processing a stream of tasks, 10 | #. a *pipeline* of stages linked together, output of the upstream stage fed as input into one or more downstream stages. 11 | 12 | A *pipeline* is then conceptually composed of *stages*, each stage being an assembly of a number of (identical) *worker* processes. Messages that flow down the pipeline, from stage to stage, are called *tasks* or *results*, depending on whether they're viewed as input or output to a stage. 13 | 14 | .. _task_result: 15 | 16 | Task, result 17 | ------------ 18 | 19 | The unit of data that passes through the workflow is called a *task* when it is the input to a pipeline/stage/worker, and a *result* when it's the output thereof. 20 | 21 | .. image:: taskresult1.png 22 | :align: center 23 | 24 | Most often a data object is both, depending on the context -- whether it's viewed as the output of an upstream producer, or input to the downstream consumer. It can be any pickleable Python object: a standard Python data type like string or list, or a user-defined object. The only requirement is that it is picklable. 25 | 26 | Worker 27 | ------ 28 | 29 | .. image:: worker1.png 30 | :align: center 31 | 32 | A worker is a basic unit of processing -- it operates some activity on a task, and (usually) produces a result. The worker is where you implement some elemental functionality of your overall program. 33 | 34 | .. image:: worker2.png 35 | :align: center 36 | 37 | A worker exists in a *stage*, alone or with other functionally identical workers, depending on how much system resources you choose to devote to that stage (each worker running in a separate process). 38 | 39 | Stage 40 | ----- 41 | 42 | The stage is an assembly of workers. It accepts a stream of input tasks, delegates them to its workers, and usually produces a stream of output results (products of its internal workers). You can think of the stage as a corral of functionally identical workers, with added synchronization used to delegate tasks and organize results among the individual worker processes. The activity of the stage is therefore defined by the workers therein. 43 | 44 | A stage can be linked to another stage to form a chain: 45 | 46 | .. image:: stage1.png 47 | :align: center 48 | 49 | It can even be linked to multiple downstream stages, splitting the workflow into parallel execution paths. 50 | 51 | Pipeline 52 | -------- 53 | 54 | The pipeline is composed of linked stages forming a unidirectional workflow. 55 | 56 | .. image:: pipeline1.png 57 | :align: center 58 | 59 | Input tasks are fed into the most-upstream stage. Pipeline results, if any, are fetched from outputs of downstream stages. 60 | 61 | .. _ordered_vs_unordered: 62 | 63 | Ordered vs. unordered stages 64 | ---------------------------- 65 | 66 | A worker within a stage can operate in two different ways in relation to other workers: it can be arranged in a specific *order* with respect to its siblings, or unordered, functioning independently of other workers in the stage. 67 | 68 | An **ordered stage** preserves a sequence among its workers, each worker having a *previous* and *next* neighbor worker, all workers thusly arranged in a circular fashion. It guarantees that inputs will be processed in this sequence, the *previous* worker operating on the previous task, and likewise the *next* one working on the task immediately after. Just as stage inputs are processed in order, stage outputs become available in the same sequence: the *previous* worker's result showing up before the current worker's, and the *next* one's result after. In case a worker completes processing ahead of its previous neighbor, it waits for it, adding the result to stage output right after its predecessor. In other words, *order of stage output results matches that of its input tasks.* 69 | 70 | An **unordered stage** adheres to no such symmetry among its worker processes. Each worker starts processing the earliest available task, and as soon as it's done, makes the result available as stage output. 71 | 72 | Choosing between the two depends on the nature of problem. Many signal processing applications require preserved order of inputs and outputs. On the other hand, certain file system tasks -- like source code compiling -- may safely ignore input/output sequences. And, naturally, if your stage operates just a single worker, it doesn't matter which type you use: the output sequence will be ordered. 73 | 74 | .. Performance may vary between the two types. In an ordered stage, a worker may still be processing a task while its *next* neighbor has already completed its own (downstream, i.e. "future") task. The *next* worker will have to wait idly, unable to process any new incoming task. This effect -- due to the dependency relationship of neighboring workers in an *unordered* stage -- is not an issue when using with *unordered* stage. 75 | 76 | .. _multiprocessing: 77 | 78 | Multiprocessing 79 | --------------- 80 | 81 | The gist of structuring your program as a pipeline is to maximize algorithm speed by utilizing additional processing facilities of multi-core and multi-CPU computers. Parallel processing manifests itself in a number of ways in the pipeline workflow: 82 | 83 | 1. Pipeline can have multiple stages in series. 84 | 85 | The program is divided into a sequence of sub-algorithms. This benefits situations where the combined algorithm takes longer than the arrival interval between tasks. The pipeline can begin handling the next input task before the previous is completed. 86 | 87 | 2. Stage can contain multiple workers. 88 | 89 | If the input stream is even faster, such that computing time for a stage is longer that the interval between incoming tasks, additional workers at the stage can ameliorate a bottlenecked flow. Take a look at :ref:`multiple_workers_per_stage` for an illustration of potential speedup using this strategy. 90 | 91 | 3. Pipeline can fork into parallel stages. 92 | 93 | If the program workflow can be split into multiple independent execution paths, then parallel paths can be processed simultaneously. 94 | 95 | .. End of file. 96 | -------------------------------------------------------------------------------- /doc/source/cookbook.rst: -------------------------------------------------------------------------------- 1 | .. _operation: 2 | 3 | |NAME| cookbook 4 | =============== 5 | 6 | A pipeline algorithm is implemented using classes from the :mod:`mpipe` module. 7 | The building blocks of pipelines map to specific Python objects: 8 | 9 | +---------------------+-------------------------------------+ 10 | | Framework element | Python construct | 11 | +=====================+=====================================+ 12 | | *task*, *result* | any Python picklable object | 13 | +---------------------+-------------------------------------+ 14 | | *worker* | single-argument function, | 15 | | | :mod:`~mpipe.OrderedWorker` or | 16 | | | :mod:`~mpipe.UnorderedWorker` | 17 | +---------------------+-------------------------------------+ 18 | | *stage* | :mod:`~mpipe.Stage`, | 19 | | | :mod:`~mpipe.OrderedStage` or | 20 | | | :mod:`~mpipe.UnorderedStage` | 21 | +---------------------+-------------------------------------+ 22 | | *pipeline* | :mod:`~mpipe.Pipeline` | 23 | +---------------------+-------------------------------------+ 24 | 25 | It may be useful to keep in mind that |NAME| is built using classes from Python's standard :mod:`multiprocessing` module. It is a layer on top, encapsulating classes like :class:`~multiprocessing.Process`, :class:`~multiprocessing.Queue` and :class:`~multiprocessing.Connection` with behavior specific to the pipeline workflow. 26 | 27 | The procedure of building and running a pipeline is a sequence of five steps: 28 | 29 | #. :ref:`define workers ` 30 | #. :ref:`create stage objects ` 31 | #. :ref:`link the stages ` 32 | #. :ref:`create pipeline object ` 33 | #. :ref:`operate the pipeline ` 34 | 35 | .. _define_workers: 36 | 37 | 1. Define workers 38 | ----------------- 39 | 40 | Start by defining the work that will be performed by individual workers of your stages. The easiest way is to write a function that takes a single *task* parameter: 41 | :: 42 | 43 | def doSomething(task): 44 | result = f(task) 45 | return result 46 | 47 | The function's return value becomes result of the stage. If it doesn't return anything (or ``None``), then the stage is considered a dead-end stage, not producing any output. 48 | 49 | The other way is to subclass from :mod:`~mpipe.OrderedWorker` or :mod:`~mpipe.UnorderedWorker` and put the actual work inside the :meth:`doTask()` method: 50 | :: 51 | 52 | class MyWorker(mpipe.OrderedWorker): 53 | def doTask(task): 54 | result = f(task) 55 | return result 56 | 57 | Just like when using a standalone function, stage result is the return value of :meth:`doTask()`. Another option is to call :meth:`putResult()`. This can be useful if you want your worker to continue processing after registering the stage result: 58 | :: 59 | 60 | class MyWorker(mpipe.OrderedWorker): 61 | def doTask(task): 62 | result = f(task) 63 | self.putResult(result) 64 | # Do some more stuff. 65 | 66 | .. _create_stage_objects: 67 | 68 | 2. Create stage objects 69 | ----------------------- 70 | 71 | Having defined your workers, the next step is to instantiate stage objects. With standalone work functions, the stage is created with :mod:`~mpipe.OrderedStage` or :mod:`~mpipe.UnorderedStage`. 72 | :: 73 | 74 | stage1 = mpipe.OrderedStage(doSomething, 3) 75 | 76 | When using worker classes, create a :mod:`~mpipe.Stage` object instead: 77 | :: 78 | 79 | stage2 = mpipe.Stage(MyWorker, 4) 80 | 81 | In both cases the second argument is the number of processes devoted to the particular stage. 82 | 83 | .. _link_the_stages: 84 | 85 | 3. Link the stages 86 | ------------------ 87 | 88 | If there are multiple stages in the workflow, they can be linked together in series: 89 | :: 90 | 91 | stage1.link(stage2) 92 | stage2.link(stage3) 93 | 94 | The :meth:`~mpipe.Stage.link` method returns the stage object it is called on, allowing you to serially link many stages in a single statement. Here's the equivalent of above: 95 | :: 96 | 97 | stage1.link(stage2.link(stage3)) 98 | 99 | Output of one stage may also be forked into multiple downstream stages, splitting the workflow into parallel streams of execution: 100 | :: 101 | 102 | stage1.link(stage2) 103 | stage1.link(stage3) 104 | stage1.link(stage4) 105 | 106 | 107 | .. _create_pipeline_object: 108 | 109 | 4. Create pipeline object 110 | ------------------------- 111 | 112 | A pipeline is created by passing the root upstream stage to the :mod:`~mpipe.Pipeline` constructor: 113 | :: 114 | 115 | pipe = mpipe(stage1) 116 | 117 | Once built, the pipeline has allocated and started all designated processes. At this point the pipeline is waiting for input, its worker processes idle and ready. 118 | 119 | .. _operate_the_pipeline: 120 | 121 | 5. Operate the pipeline 122 | ----------------------- 123 | 124 | From this point on, operating the pipeline is solely accomplished by manipulating the :mod:`~mpipe.Pipeline` object. Input tasks are fed using :meth:`~mpipe.Pipeline.put()`: 125 | :: 126 | 127 | pipe.put(something) 128 | 129 | Output results, if any, are fetched using :meth:`~mpipe.Pipeline.get()`: 130 | :: 131 | 132 | result = pipe.get() 133 | 134 | Alternatively, one can iterate the output stream with :meth:`~mpipe.Pipeline.results()` method: 135 | :: 136 | 137 | for result in pipe.results(): 138 | print(result) 139 | 140 | At some point in manipulating the pipeline, the special task ``None`` should be put on it. 141 | :: 142 | 143 | pipe.put(None) 144 | 145 | This signals the end of input stream and eventually terminates all worker processes, effectively "closing" the pipeline to further input. 146 | 147 | The ``None`` task can be thought of as a "stop" request. It becomes part of the sequence of input tasks streaming into the pipeline and, like other tasks, it propagates through all stages. However, it is processed in a special way: when it arrives at a stage, it signals all worker processes within to complete any current task they may be running, and to terminate execution. Before the last worker terminates, it propagates the "stop" request to the next downstram stage (or stages, if forked). 148 | 149 | The ``None`` task should be the last input to the pipeline. After it is added to the stream of tasks, the pipeline continues to process any previous tasks still in the system. After worker processes terminate, results can still be accesses in the usual way (using :meth:`~mpipe.Pipeline.get()` or :meth:`~mpipe.Pipeline.results()`) until the pipeline is emptied. However, any "real" task (i.e. not ``None``) put on the pipeline following the "stop" request will not be processed. 150 | -------------------------------------------------------------------------------- /src/OrderedWorker.py: -------------------------------------------------------------------------------- 1 | """Implements OrderedWorker class.""" 2 | 3 | import multiprocessing 4 | from .TubeP import TubeP 5 | 6 | class OrderedWorker(multiprocessing.Process): 7 | """An OrderedWorker object operates in a stage where the order 8 | of output results always matches that of corresponding input tasks. 9 | 10 | A worker is linked to its two nearest neighbors -- the previous 11 | worker and the next -- all workers in the stage thusly connected 12 | in circular fashion. 13 | Input tasks are fetched in this order. Before publishing its result, 14 | a worker first waits for its previous neighbor to do the same.""" 15 | 16 | def __init__(self): 17 | pass 18 | 19 | def init2( 20 | self, 21 | input_tube, # Read task from the input tube. 22 | output_tubes, # Send result on all the output tubes. 23 | num_workers, # Total number of workers in the stage. 24 | disable_result, # Whether to override any result with None. 25 | do_stop_task, # Whether to call doTask() on "stop" request. 26 | ): 27 | """Create *num_workers* worker objects with *input_tube* and 28 | an iterable of *output_tubes*. The worker reads a task from *input_tube* 29 | and writes the result to *output_tubes*.""" 30 | 31 | super(OrderedWorker, self).__init__() 32 | self._tube_task_input = input_tube 33 | self._tubes_result_output = output_tubes 34 | self._num_workers = num_workers 35 | 36 | # Serializes reading from input tube. 37 | self._lock_prev_input = None 38 | self._lock_next_input = None 39 | 40 | # Serializes writing to output tube. 41 | self._lock_prev_output = None 42 | self._lock_next_output = None 43 | 44 | self._disable_result = disable_result 45 | self._do_stop_task = do_stop_task 46 | 47 | @staticmethod 48 | def getTubeClass(): 49 | """Return the tube class implementation.""" 50 | return TubeP 51 | 52 | @classmethod 53 | def assemble( 54 | cls, 55 | args, 56 | input_tube, 57 | output_tubes, 58 | size, 59 | disable_result=False, 60 | do_stop_task=False, 61 | ): 62 | """Create, assemble and start workers. 63 | Workers are created of class *cls*, initialized with *args*, and given 64 | task/result communication channels *input_tube* and *output_tubes*. 65 | The number of workers created is according to *size* parameter. 66 | *do_stop_task* indicates whether doTask() will be called for "stop" request. 67 | """ 68 | 69 | # Create the workers. 70 | workers = [] 71 | for ii in range(size): 72 | worker = cls(**args) 73 | worker.init2( 74 | input_tube, 75 | output_tubes, 76 | size, 77 | disable_result, 78 | do_stop_task, 79 | ) 80 | workers.append(worker) 81 | 82 | # Connect the workers. 83 | for ii in range(size): 84 | worker_this = workers[ii] 85 | worker_prev = workers[ii-1] 86 | worker_prev._link( 87 | worker_this, 88 | next_is_first=(ii==0), # Designate 0th worker as the first. 89 | ) 90 | 91 | # Start the workers. 92 | for worker in workers: 93 | worker.start() 94 | 95 | def _link(self, next_worker, next_is_first=False): 96 | """Link the worker to the given next worker object, 97 | connecting the two workers with communication tubes.""" 98 | 99 | lock = multiprocessing.Lock() 100 | next_worker._lock_prev_input = lock 101 | self._lock_next_input = lock 102 | lock.acquire() 103 | 104 | lock = multiprocessing.Lock() 105 | next_worker._lock_prev_output = lock 106 | self._lock_next_output = lock 107 | lock.acquire() 108 | 109 | # If the next worker is the first one, trigger it now. 110 | if next_is_first: 111 | self._lock_next_input.release() 112 | self._lock_next_output.release() 113 | 114 | def putResult(self, result): 115 | """Register the *result* by putting it on all the output tubes.""" 116 | self._lock_prev_output.acquire() 117 | for tube in self._tubes_result_output: 118 | tube.put((result, 0)) 119 | self._lock_next_output.release() 120 | 121 | def run(self): 122 | 123 | # Run implementation's initialization. 124 | self.doInit() 125 | 126 | while True: 127 | try: 128 | # Wait on permission from the previous worker that 129 | # it is okay to retrieve the input task. 130 | self._lock_prev_input.acquire() 131 | 132 | # Retrieve the input task. 133 | (task, count) = self._tube_task_input.get() 134 | 135 | # Permit the next worker to retrieve the input task. 136 | self._lock_next_input.release() 137 | 138 | except: 139 | (task, count) = (None, 0) 140 | 141 | # In case the task is None, it represents the "stop" request, 142 | # the count being the number of workers in this stage that had 143 | # already stopped. 144 | if task is None: 145 | 146 | # If this worker is the last one (of its stage) to receive the 147 | # "stop" request, propagate "stop" to the next stage. Otherwise, 148 | # maintain the "stop" signal in this stage for another worker that 149 | # will pick it up. 150 | count += 1 151 | if count == self._num_workers: 152 | 153 | # Propagating the "stop" to the next stage does not require 154 | # synchronization with previous and next worker because we're 155 | # guaranteed (from the count value) that this is the last worker alive. 156 | # Therefore, just put the "stop" signal on the result tube. 157 | for tube in self._tubes_result_output: 158 | tube.put((None, 0)) 159 | 160 | else: 161 | self._tube_task_input.put((None, count)) 162 | 163 | # In case we're calling doTask() on a "stop" request, do so now. 164 | if self._do_stop_task: 165 | self.doTask(None) 166 | 167 | # Honor the "stop" request by exiting the process. 168 | break 169 | 170 | # The task is not None, meaning that it is an actual task to 171 | # be processed. Therefore let's call doTask(). 172 | result = self.doTask(task) 173 | 174 | # Unless result is disabled, 175 | # if doTask() actually returns a result (and the result is not None), 176 | # it indicates that it did not call putResult(), instead intending 177 | # it to be called now. 178 | if not self._disable_result and result is not None: 179 | self.putResult(result) 180 | 181 | def doTask(self, task): 182 | """Implement this method in the subclass with work functionality 183 | to be executed on each *task* object. 184 | The implementation can publish the output result in one of two ways, 185 | either by 1) calling :meth:`putResult` and returning ``None``, or 186 | 2) returning the result (other than ``None``).""" 187 | return True 188 | 189 | def doInit(self): 190 | """Implement this method in the subclass in case there's need 191 | for additional initialization after process startup. 192 | Since this class inherits from :class:`multiprocessing.Process`, 193 | its constructor executes in the spawning process. 194 | This method allows additional code to be run in the forked process, 195 | before the worker begins processing input tasks. 196 | """ 197 | return None 198 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # All configuration values have a default; values that are commented out 4 | # serve to show the default. 5 | 6 | import sys, os 7 | 8 | # If extensions (or modules to document with autodoc) are in another directory, 9 | # add these directories to sys.path here. If the directory is relative to the 10 | # documentation root, use os.path.abspath to make it absolute, like shown here. 11 | #sys.path.insert(0, os.path.abspath('.')) 12 | sys.path.insert(0, os.path.abspath('../../tools')) 13 | 14 | # -- General configuration ----------------------------------------------------- 15 | 16 | # If your documentation needs a minimal Sphinx version, state it here. 17 | needs_sphinx = '1.0' 18 | 19 | # Add any Sphinx extension module names here, as strings. They can be extensions 20 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 21 | extensions = [ 22 | 'sphinx.ext.autodoc', 23 | 'sphinx.ext.pngmath', 24 | 'sphinx.ext.intersphinx', 25 | 26 | # Create links to Python source code for the module. 27 | # 'sphinx.ext.viewcode', 28 | 29 | 'sphinx.ext.autosummary', 30 | 'sphinx.ext.inheritance_diagram', 31 | ] 32 | 33 | # Add any locations and names of other projects that should be linked to in this documentation. 34 | intersphinx_mapping = { 35 | 'python': ('http://docs.python.org', None), 36 | } 37 | 38 | # Add any paths that contain templates here, relative to this directory. 39 | templates_path = ['_templates'] 40 | 41 | # The suffix of source filenames. 42 | source_suffix = '.rst' 43 | 44 | # The encoding of source files. 45 | #source_encoding = 'utf-8-sig' 46 | 47 | # The master toctree document. 48 | master_doc = 'index' 49 | 50 | # General information about the project. 51 | project = u'MPipe' 52 | copyright = u'2014, Velimir Mlaker' 53 | 54 | # The version info for the project you're documenting, acts as replacement for 55 | # |version| and |release|, also used in various other places throughout the 56 | # built documents. 57 | # 58 | # The short X.Y version. 59 | version = '0.1' 60 | # The full version, including alpha/beta/rc tags. 61 | release = '' 62 | 63 | # The language for content autogenerated by Sphinx. Refer to documentation 64 | # for a list of supported languages. 65 | #language = None 66 | 67 | # There are two options for replacing |today|: either, you set today to some 68 | # non-false value, then it is used: 69 | #today = '' 70 | # Else, today_fmt is used as the format for a strftime call. 71 | #today_fmt = '%B %d, %Y' 72 | 73 | # List of patterns, relative to source directory, that match files and 74 | # directories to ignore when looking for source files. 75 | exclude_patterns = [] 76 | 77 | # The reST default role (used for this markup: `text`) to use for all documents. 78 | #default_role = None 79 | 80 | # If true, '()' will be appended to :func: etc. cross-reference text. 81 | #add_function_parentheses = True 82 | 83 | # If true, the current module name will be prepended to all description 84 | # unit titles (such as .. function::). 85 | add_module_names = True 86 | 87 | # If true, sectionauthor and moduleauthor directives will be shown in the 88 | # output. They are ignored by default. 89 | #show_authors = False 90 | 91 | # The name of the Pygments (syntax highlighting) style to use. 92 | pygments_style = 'sphinx' 93 | 94 | # A list of ignored prefixes for module index sorting. 95 | #modindex_common_prefix = ['wm5.', 'wm5', ] 96 | 97 | # Set this to 'both' to append the __init__(self) docstring to the class docstring. 98 | autoclass_content = 'both' 99 | 100 | # -- Options for HTML output --------------------------------------------------- 101 | 102 | # The theme to use for HTML and HTML Help pages. See the documentation for 103 | # a list of builtin themes. 104 | html_theme = 'mpipe' 105 | 106 | # Theme options are theme-specific and customize the look and feel of a theme 107 | # further. For a list of options available for each theme, see the 108 | # documentation. 109 | #html_theme_options = {} 110 | 111 | # Add any paths that contain custom themes here, relative to this directory. 112 | html_theme_path = ['_themes'] 113 | 114 | # The name for this set of Sphinx documents. If None, it defaults to 115 | # " v documentation". 116 | html_title = '{0} Documentation'.format(project) 117 | 118 | # A shorter title for the navigation bar. Default is the same as html_title. 119 | #html_short_title = None 120 | 121 | # The name of an image file (relative to this directory) to place at the top 122 | # of the sidebar. 123 | #html_logo = None 124 | 125 | # The name of an image file (within the static path) to use as favicon of the 126 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 127 | # pixels large. 128 | #html_favicon = None 129 | 130 | # Add any paths that contain custom static files (such as style sheets) here, 131 | # relative to this directory. They are copied after the builtin static files, 132 | # so a file named "default.css" will overwrite the builtin "default.css". 133 | html_static_path = ['_static'] 134 | 135 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 136 | # using the given strftime format. 137 | html_last_updated_fmt = '%b %d, %Y' 138 | 139 | # If true, SmartyPants will be used to convert quotes and dashes to 140 | # typographically correct entities. 141 | #html_use_smartypants = True 142 | 143 | # Custom sidebar templates, maps document names to template names. 144 | html_sidebars = { 145 | '**' : [], 146 | # '**' : ['localtoc.html'], 147 | # '**' : ['globaltoc.html'], 148 | # '**' : ['searchbox.html', 'search.html'], 149 | # '**' : ['searchbox.html'], 150 | } 151 | 152 | # Additional templates that should be rendered to pages, maps page names to 153 | # template names. 154 | html_additional_pages = { 'search' : 'search.html' } 155 | 156 | # If false, no module index is generated. 157 | html_domain_indices = False 158 | 159 | # If false, no index is generated. 160 | html_use_index = True 161 | 162 | # If true, the index is split into individual pages for each letter. 163 | html_split_index = False 164 | 165 | # If true, links to the reST sources are added to the pages. 166 | html_show_sourcelink = False 167 | 168 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 169 | html_show_sphinx = True 170 | 171 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 172 | html_show_copyright = True 173 | 174 | # If true, an OpenSearch description file will be output, and all pages will 175 | # contain a tag referring to it. The value of this option must be the 176 | # base URL from which the finished HTML is served. 177 | #html_use_opensearch = '' 178 | 179 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 180 | #html_file_suffix = None 181 | 182 | # Output file base name for HTML help builder. 183 | htmlhelp_basename = 'MPipedoc' 184 | 185 | 186 | # -- Options for LaTeX output -------------------------------------------------- 187 | 188 | # The paper size ('letter' or 'a4'). 189 | #latex_paper_size = 'letter' 190 | 191 | # The font size ('10pt', '11pt' or '12pt'). 192 | #latex_font_size = '10pt' 193 | 194 | # Grouping the document tree into LaTeX files. List of tuples 195 | # (source start file, target name, title, author, documentclass [howto/manual]). 196 | latex_documents = [ 197 | ('index', 'MPipe.tex', u'MPipe Documentation', 198 | u'Velimir Mlaker', 'manual'), 199 | ] 200 | 201 | # The name of an image file (relative to this directory) to place at the top of 202 | # the title page. 203 | #latex_logo = None 204 | 205 | # For "manual" documents, if this is true, then toplevel headings are parts, 206 | # not chapters. 207 | #latex_use_parts = False 208 | 209 | # If true, show page references after internal links. 210 | #latex_show_pagerefs = False 211 | 212 | # If true, show URL addresses after external links. 213 | #latex_show_urls = False 214 | 215 | # Additional stuff for the LaTeX preamble. 216 | #latex_preamble = '' 217 | 218 | # Documents to append as an appendix to all manuals. 219 | #latex_appendices = [] 220 | 221 | # If false, no module index is generated. 222 | latex_domain_indices = False 223 | 224 | 225 | # -- Options for manual page output -------------------------------------------- 226 | 227 | # One entry per manual page. List of tuples 228 | # (source start file, name, description, authors, manual section). 229 | man_pages = [ 230 | ('index', 'mpipe', u'MPipe Documentation', 231 | [u'Velimir Mlaker'], 1) 232 | ] 233 | 234 | rst_prolog = ''' 235 | .. |NAME| replace:: MPipe 236 | ''' 237 | 238 | # End of file. 239 | -------------------------------------------------------------------------------- /doc/source/_themes/mpipe/static/style.css: -------------------------------------------------------------------------------- 1 | /* 2 | * This file's is a custom stylesheet. 3 | * The original file's header is: 4 | * 5 | * sphinxdoc.css_t 6 | * ~~~~~~~~~~~~~~~ 7 | * 8 | * Sphinx stylesheet -- sphinxdoc theme. Originally created by 9 | * Armin Ronacher for Werkzeug. 10 | * 11 | * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. 12 | * :license: BSD, see LICENSE for details. 13 | * 14 | */ 15 | 16 | @import url("basic.css"); 17 | 18 | /* -- page layout ----------------------------------------------------------- */ 19 | 20 | body { 21 | font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 22 | 'Verdana', sans-serif; 23 | font-size: 14px; 24 | 25 | line-height: 130%; 26 | 27 | color: black; 28 | border: 1px solid gray; 29 | margin: 0 auto; 30 | 31 | background-image: url(bodybg.png); 32 | min-width: 600px; 33 | max-width: 850px; 34 | } 35 | 36 | .pageheader { 37 | background-image: url(headerbg.png); 38 | text-align: left; 39 | padding: 0px 10px; 40 | } 41 | 42 | .pageheader ul { 43 | padding: 0; 44 | float: right; 45 | color: white; 46 | list-style-type: none; 47 | margin-top: 40px; 48 | margin-right: 10px; 49 | font-size: 15px; 50 | } 51 | 52 | .pageheader li { 53 | float: left; 54 | } 55 | 56 | .pageheader li a { 57 | border-radius: 1px; 58 | padding: 8px 12px; 59 | color: white; 60 | text-shadow: 0 0 5px #000; 61 | } 62 | 63 | .pageheader li a:hover { 64 | background-color: white; 65 | color: #0a507a; 66 | text-shadow: none; 67 | } 68 | 69 | div.document { 70 | background-color: white; 71 | text-align: left; 72 | } 73 | 74 | div.bodywrapper { 75 | margin: 0 190px 0 0; 76 | border-right: 1px solid #ccc; 77 | } 78 | 79 | div.body { 80 | margin: 0; 81 | padding: 0.5em 20px 20px 20px; 82 | } 83 | 84 | div.related { 85 | background-image: url(relbg.png); 86 | font-size: 1em; 87 | } 88 | 89 | div.related ul { 90 | /* 91 | background-image: url(navigation.png); 92 | */ 93 | color: white; 94 | height: 2em; 95 | } 96 | 97 | div.related ul li { 98 | margin: 0; 99 | padding: 0; 100 | height: 2em; 101 | float: left; 102 | } 103 | 104 | div.related ul li.right { 105 | float: right; 106 | margin-right: 5px; 107 | } 108 | 109 | div.related ul li a { 110 | margin: 0; 111 | padding: 0 5px 0 5px; 112 | line-height: 1.75em; 113 | color: silver; 114 | } 115 | 116 | div.related ul li a:hover { 117 | color: white; 118 | } 119 | 120 | div.sphinxsidebarwrapper { 121 | padding: 0; 122 | } 123 | 124 | div.sphinxsidebar { 125 | margin: 0; 126 | padding: 0.5em 15px 15px 0; 127 | width: 160px; 128 | float: right; 129 | font-size: 1em; 130 | text-align: left; 131 | /* 132 | border: 1px solid #ddee00; 133 | */ 134 | } 135 | 136 | div.sphinxsidebar h3, div.sphinxsidebar h4 { 137 | margin: 1em 0 0.5em 0; 138 | font-size: 1em; 139 | padding: 0.1em 0 0.1em 0.5em; 140 | color: white; 141 | border: 1px solid #86989B; 142 | /* 143 | border: 1px solid #000000; 144 | */ 145 | background-color: #c0c0c0; 146 | max-width: 150px; 147 | } 148 | 149 | div.sphinxsidebar h3 a { 150 | color: white; 151 | } 152 | 153 | div.sphinxsidebar ul { 154 | padding-left: 1.5em; 155 | margin-top: 7px; 156 | padding: 0; 157 | line-height: 130%; 158 | } 159 | 160 | div.sphinxsidebar ul ul { 161 | margin-left: 20px; 162 | } 163 | 164 | div.sphinxsidebar input { 165 | // border: 1px solid #000000; 166 | // max-width: 50%; 167 | } 168 | 169 | div.footer { 170 | 171 | background-color: silver; 172 | padding: 3px 8px 3px 0; 173 | clear: both; 174 | text-align: right; 175 | font-size: 80%; 176 | line-height: 100%; 177 | } 178 | 179 | div.footer a { 180 | text-decoration: underline; 181 | } 182 | 183 | .source-click-above { 184 | /* Use for link to source code file above the "literalinclude" directive. */ 185 | 186 | margin-top: 0.2em; 187 | margin-bottom: -0.4em; 188 | text-align: right; 189 | line-height: 0; 190 | font-size: 85%; 191 | } 192 | 193 | .source-click-below { 194 | /* Use for link to source code file below the "literalinclude" directive. */ 195 | 196 | text-align: right; 197 | line-height: 0; 198 | margin-top: -0.55em; 199 | font-size: 85%; 200 | } 201 | 202 | /* -- body styles ----------------------------------------------------------- */ 203 | 204 | p { 205 | margin: 0.8em 0 0.5em 0; 206 | } 207 | 208 | a { 209 | color: black; 210 | text-decoration: none; 211 | } 212 | 213 | a:hover { 214 | color: gray; 215 | } 216 | 217 | div.body a { 218 | text-decoration: underline; 219 | } 220 | 221 | h1 { 222 | margin: 0; 223 | padding: 0.7em 0 0.3em 0; 224 | font-size: 1.5em; 225 | } 226 | 227 | h2 { 228 | margin: 1.3em 0 0.2em 0; 229 | font-size: 1.35em; 230 | padding: 0; 231 | } 232 | 233 | h3 { 234 | margin: 1em 0 -0.3em 0; 235 | font-size: 1.2em; 236 | } 237 | 238 | div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { 239 | color: black!important; 240 | } 241 | 242 | h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { 243 | display: none; 244 | margin: 0 0 0 0.3em; 245 | padding: 0 0.2em 0 0.2em; 246 | color: #aaa!important; 247 | } 248 | 249 | h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, 250 | h5:hover a.anchor, h6:hover a.anchor { 251 | display: inline; 252 | } 253 | 254 | h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, 255 | h5 a.anchor:hover, h6 a.anchor:hover { 256 | color: gray; 257 | background-color: #eee; 258 | } 259 | 260 | a.headerlink { 261 | color: #c60f0f!important; 262 | font-size: 1em; 263 | margin-left: 6px; 264 | padding: 0 4px 0 4px; 265 | text-decoration: none!important; 266 | } 267 | 268 | a.headerlink:hover { 269 | background-color: #ccc; 270 | color: white!important; 271 | } 272 | 273 | cite, code, tt { 274 | font-family: 'Consolas', 'Deja Vu Sans Mono', 275 | 'Bitstream Vera Sans Mono', monospace; 276 | font-size: 0.95em; 277 | letter-spacing: 0.01em; 278 | padding: 0 3px; 279 | border: solid 1px lightgray; 280 | } 281 | 282 | tt { 283 | background-color: #f2f2f2; 284 | border-bottom: 1px solid #ddd; 285 | color: #333; 286 | } 287 | 288 | tt.descname, tt.descclassname, tt.xref { 289 | border: 0; 290 | } 291 | 292 | hr { 293 | border: 1px solid #abc; 294 | margin: 2em; 295 | } 296 | 297 | a tt { 298 | color: black; 299 | } 300 | 301 | a tt:hover { 302 | color: gray; 303 | } 304 | 305 | pre { 306 | font-family: 'Consolas', 'Deja Vu Sans Mono', 307 | 'Bitstream Vera Sans Mono', monospace; 308 | font-size: 0.95em; 309 | letter-spacing: 0.015em; 310 | line-height: 120%; 311 | padding: 0.5em; 312 | border: 1px solid silver; 313 | background-color: whitesmoke; 314 | } 315 | 316 | pre a { 317 | color: inherit; 318 | text-decoration: underline; 319 | } 320 | 321 | td.linenos pre { 322 | padding: 0.5em 0; 323 | } 324 | 325 | div.quotebar { 326 | background-color: #f5f5f5; 327 | max-width: 250px; 328 | float: right; 329 | padding: 2px 7px; 330 | border: 1px solid #ccc; 331 | } 332 | 333 | div.topic { 334 | background-color: #f5f5f5; 335 | } 336 | 337 | table { 338 | border-collapse: collapse; 339 | margin: 0 -0.5em 0 -0.5em; 340 | } 341 | 342 | table td, table th { 343 | padding: 0.2em 0.5em 0.2em 0.5em; 344 | } 345 | 346 | div.admonition, div.warning { 347 | font-size: 0.9em; 348 | margin: 1em 0 1em 0; 349 | border: 1px solid #86989B; 350 | background-color: #f7f7f7; 351 | padding: 0; 352 | } 353 | 354 | div.admonition p, div.warning p { 355 | margin: 0.5em 1em 0.5em 1em; 356 | padding: 0; 357 | } 358 | 359 | div.admonition pre, div.warning pre { 360 | margin: 0.4em 1em 0.4em 1em; 361 | } 362 | 363 | div.admonition p.admonition-title, 364 | div.warning p.admonition-title { 365 | margin: 0; 366 | padding: 0.1em 0 0.1em 0.5em; 367 | color: white; 368 | border-bottom: 1px solid #86989B; 369 | font-weight: bold; 370 | background-color: #AFC1C4; 371 | } 372 | 373 | div.warning { 374 | border: 1px solid #940000; 375 | } 376 | 377 | div.warning p.admonition-title { 378 | background-color: #CF0000; 379 | border-bottom-color: #940000; 380 | } 381 | 382 | div.admonition ul, div.admonition ol, 383 | div.warning ul, div.warning ol { 384 | margin: 0.1em 0.5em 0.5em 3em; 385 | padding: 0; 386 | } 387 | 388 | div.versioninfo { 389 | margin: 1em 0 0 0; 390 | border: 1px solid #ccc; 391 | background-color: #DDEAF0; 392 | padding: 8px; 393 | line-height: 1.3em; 394 | font-size: 0.9em; 395 | } 396 | 397 | .viewcode-back { 398 | font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 399 | 'Verdana', sans-serif; 400 | } 401 | 402 | div.viewcode-block:target { 403 | background-color: #f4debf; 404 | border-top: 1px solid #ac9; 405 | border-bottom: 1px solid #ac9; 406 | } 407 | 408 | /* Styling for the search box. */ 409 | .searchtip { 410 | font-size: 90%; 411 | line-height: 100%; 412 | } 413 | .search-input { 414 | max-width: 70%; 415 | } 416 | .search-go { 417 | max-width: 20%; 418 | } 419 | --------------------------------------------------------------------------------