Hi!
26 | How are you?
27 | Here is the link you wanted.
28 |
%s
" % getText(title.childNodes) 63 | 64 | handleSlideshow(dom) 65 | -------------------------------------------------------------------------------- /includes/mp_newtype.py: -------------------------------------------------------------------------------- 1 | # 2 | # This module shows how to use arbitrary callables with a subclass of 3 | # `BaseManager`. 4 | # 5 | # Copyright (c) 2006-2008, R Oudkerk 6 | # All rights reserved. 7 | # 8 | 9 | from multiprocessing import freeze_support 10 | from multiprocessing.managers import BaseManager, BaseProxy 11 | import operator 12 | 13 | ## 14 | 15 | class Foo(object): 16 | def f(self): 17 | print 'you called Foo.f()' 18 | def g(self): 19 | print 'you called Foo.g()' 20 | def _h(self): 21 | print 'you called Foo._h()' 22 | 23 | # A simple generator function 24 | def baz(): 25 | for i in xrange(10): 26 | yield i*i 27 | 28 | # Proxy type for generator objects 29 | class GeneratorProxy(BaseProxy): 30 | _exposed_ = ('next', '__next__') 31 | def __iter__(self): 32 | return self 33 | def next(self): 34 | return self._callmethod('next') 35 | def __next__(self): 36 | return self._callmethod('__next__') 37 | 38 | # Function to return the operator module 39 | def get_operator_module(): 40 | return operator 41 | 42 | ## 43 | 44 | class MyManager(BaseManager): 45 | pass 46 | 47 | # register the Foo class; make `f()` and `g()` accessible via proxy 48 | MyManager.register('Foo1', Foo) 49 | 50 | # register the Foo class; make `g()` and `_h()` accessible via proxy 51 | MyManager.register('Foo2', Foo, exposed=('g', '_h')) 52 | 53 | # register the generator function baz; use `GeneratorProxy` to make proxies 54 | MyManager.register('baz', baz, proxytype=GeneratorProxy) 55 | 56 | # register get_operator_module(); make public functions accessible via proxy 57 | MyManager.register('operator', get_operator_module) 58 | 59 | ## 60 | 61 | def test(): 62 | manager = MyManager() 63 | manager.start() 64 | 65 | print '-' * 20 66 | 67 | f1 = manager.Foo1() 68 | f1.f() 69 | f1.g() 70 | assert not hasattr(f1, '_h') 71 | assert sorted(f1._exposed_) == sorted(['f', 'g']) 72 | 73 | print '-' * 20 74 | 75 | f2 = manager.Foo2() 76 | f2.g() 77 | f2._h() 78 | assert not hasattr(f2, 'f') 79 | assert sorted(f2._exposed_) == sorted(['g', '_h']) 80 | 81 | print '-' * 20 82 | 83 | it = manager.baz() 84 | for i in it: 85 | print '<%d>' % i, 86 | print 87 | 88 | print '-' * 20 89 | 90 | op = manager.operator() 91 | print 'op.add(23, 45) =', op.add(23, 45) 92 | print 'op.pow(2, 94) =', op.pow(2, 94) 93 | print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6) 94 | print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3) 95 | print 'op._exposed_ =', op._exposed_ 96 | 97 | ## 98 | 99 | if __name__ == '__main__': 100 | freeze_support() 101 | test() 102 | -------------------------------------------------------------------------------- /includes/mp_webserver.py: -------------------------------------------------------------------------------- 1 | # 2 | # Example where a pool of http servers share a single listening socket 3 | # 4 | # On Windows this module depends on the ability to pickle a socket 5 | # object so that the worker processes can inherit a copy of the server 6 | # object. (We import `multiprocessing.reduction` to enable this pickling.) 7 | # 8 | # Not sure if we should synchronize access to `socket.accept()` method by 9 | # using a process-shared lock -- does not seem to be necessary. 10 | # 11 | # Copyright (c) 2006-2008, R Oudkerk 12 | # All rights reserved. 13 | # 14 | 15 | import os 16 | import sys 17 | 18 | from multiprocessing import Process, current_process, freeze_support 19 | from BaseHTTPServer import HTTPServer 20 | from SimpleHTTPServer import SimpleHTTPRequestHandler 21 | 22 | if sys.platform == 'win32': 23 | import multiprocessing.reduction # make sockets pickable/inheritable 24 | 25 | 26 | def note(format, *args): 27 | sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args)) 28 | 29 | 30 | class RequestHandler(SimpleHTTPRequestHandler): 31 | # we override log_message() to show which process is handling the request 32 | def log_message(self, format, *args): 33 | note(format, *args) 34 | 35 | def serve_forever(server): 36 | note('starting server') 37 | try: 38 | server.serve_forever() 39 | except KeyboardInterrupt: 40 | pass 41 | 42 | 43 | def runpool(address, number_of_processes): 44 | # create a single server object -- children will each inherit a copy 45 | server = HTTPServer(address, RequestHandler) 46 | 47 | # create child processes to act as workers 48 | for i in range(number_of_processes-1): 49 | Process(target=serve_forever, args=(server,)).start() 50 | 51 | # main process also acts as a worker 52 | serve_forever(server) 53 | 54 | 55 | def test(): 56 | DIR = os.path.join(os.path.dirname(__file__), '..') 57 | ADDRESS = ('localhost', 8000) 58 | NUMBER_OF_PROCESSES = 4 59 | 60 | print 'Serving at http://%s:%d using %d worker processes' % \ 61 | (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES) 62 | print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'] 63 | 64 | os.chdir(DIR) 65 | runpool(ADDRESS, NUMBER_OF_PROCESSES) 66 | 67 | 68 | if __name__ == '__main__': 69 | freeze_support() 70 | test() 71 | -------------------------------------------------------------------------------- /includes/mp_workers.py: -------------------------------------------------------------------------------- 1 | # 2 | # Simple example which uses a pool of workers to carry out some tasks. 3 | # 4 | # Notice that the results will probably not come out of the output 5 | # queue in the same in the same order as the corresponding tasks were 6 | # put on the input queue. If it is important to get the results back 7 | # in the original order then consider using `Pool.map()` or 8 | # `Pool.imap()` (which will save on the amount of code needed anyway). 9 | # 10 | # Copyright (c) 2006-2008, R Oudkerk 11 | # All rights reserved. 12 | # 13 | 14 | import time 15 | import random 16 | 17 | from multiprocessing import Process, Queue, current_process, freeze_support 18 | 19 | # 20 | # Function run by worker processes 21 | # 22 | 23 | def worker(input, output): 24 | for func, args in iter(input.get, 'STOP'): 25 | result = calculate(func, args) 26 | output.put(result) 27 | 28 | # 29 | # Function used to calculate result 30 | # 31 | 32 | def calculate(func, args): 33 | result = func(*args) 34 | return '%s says that %s%s = %s' % \ 35 | (current_process().name, func.__name__, args, result) 36 | 37 | # 38 | # Functions referenced by tasks 39 | # 40 | 41 | def mul(a, b): 42 | time.sleep(0.5*random.random()) 43 | return a * b 44 | 45 | def plus(a, b): 46 | time.sleep(0.5*random.random()) 47 | return a + b 48 | 49 | # 50 | # 51 | # 52 | 53 | def test(): 54 | NUMBER_OF_PROCESSES = 4 55 | TASKS1 = [(mul, (i, 7)) for i in range(20)] 56 | TASKS2 = [(plus, (i, 8)) for i in range(10)] 57 | 58 | # Create queues 59 | task_queue = Queue() 60 | done_queue = Queue() 61 | 62 | # Submit tasks 63 | for task in TASKS1: 64 | task_queue.put(task) 65 | 66 | # Start worker processes 67 | for i in range(NUMBER_OF_PROCESSES): 68 | Process(target=worker, args=(task_queue, done_queue)).start() 69 | 70 | # Get and print results 71 | print 'Unordered results:' 72 | for i in range(len(TASKS1)): 73 | print '\t', done_queue.get() 74 | 75 | # Add more tasks using `put()` 76 | for task in TASKS2: 77 | task_queue.put(task) 78 | 79 | # Get and print some more results 80 | for i in range(len(TASKS2)): 81 | print '\t', done_queue.get() 82 | 83 | # Tell child processes to stop 84 | for i in range(NUMBER_OF_PROCESSES): 85 | task_queue.put('STOP') 86 | 87 | 88 | if __name__ == '__main__': 89 | freeze_support() 90 | test() 91 | -------------------------------------------------------------------------------- /includes/noddy.c: -------------------------------------------------------------------------------- 1 | #includeLast updated on: {{ last_updated }}.
{% endif %} 13 | 14 |To download an archive containing all the documents for this version of 15 | Python in one of various formats, follow one of links in this table. The numbers 16 | in the table are the size of the download files in megabytes.
17 | 18 |Format | Packed as .zip | Packed as .tar.bz2 |
---|---|---|
PDF (US-Letter paper size) | 21 |Download (ca. 8 MB) | 22 |Download (ca. 8 MB) | 23 |
PDF (A4 paper size) | 25 |Download (ca. 8 MB) | 26 |Download (ca. 8 MB) | 27 |
HTML | 29 |Download (ca. 6 MB) | 30 |Download (ca. 4 MB) | 31 |
Plain Text | 33 |Download (ca. 2 MB) | 34 |Download (ca. 1.5 MB) | 35 |
These archives contain all the content in the documentation.
40 | 41 |Unix users should download the .tar.bz2 archives; these are bzipped tar 44 | archives and can be handled in the usual way using tar and the bzip2 45 | program. The InfoZIP unzip program can be 46 | used to handle the ZIP archives if desired. The .tar.bz2 archives provide the 47 | best compression and fastest download times.
48 | 49 |Windows users can use the ZIP archives since those are customary on that 50 | platform. These are created on Unix using the InfoZIP zip program.
51 | 52 | 53 |If you have comments or suggestions for the Python documentation, please send 56 | email to docs@python.org.
57 | {% endblock %} 58 | -------------------------------------------------------------------------------- /tools/sphinxext/indexsidebar.html: -------------------------------------------------------------------------------- 1 |