├── .gitignore ├── CHANGES ├── LICENSE ├── MANIFEST.in ├── concurrent ├── __init__.py └── futures │ ├── __init__.py │ ├── _base.py │ ├── process.py │ └── thread.py ├── crawl.py ├── docs ├── Makefile ├── conf.py ├── index.rst └── make.bat ├── primes.py ├── setup.cfg ├── setup.py ├── test_futures.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | *.pyc 3 | dist 4 | .project 5 | .pydevproject 6 | .idea 7 | .tox 8 | -------------------------------------------------------------------------------- /CHANGES: -------------------------------------------------------------------------------- 1 | 3.0.5 2 | ===== 3 | 4 | - Fixed OverflowError with ProcessPoolExecutor on Windows (regression introduced in 3.0.4) 5 | 6 | 7 | 3.0.4 8 | ===== 9 | 10 | - Fixed inability to forcibly terminate the process if there are pending workers 11 | 12 | 13 | 3.0.3 14 | ===== 15 | 16 | - Fixed AttributeErrors on exit on Python 2.x 17 | 18 | 19 | 3.0.2 20 | ===== 21 | 22 | - Made multiprocessing optional again on implementations other than just Jython 23 | 24 | 25 | 3.0.1 26 | ===== 27 | 28 | - Made Executor.map() non-greedy 29 | 30 | 31 | 3.0.0 32 | ===== 33 | 34 | - Dropped Python 2.5 and 3.1 support 35 | - Removed the deprecated "futures" top level package 36 | - Applied patch for issue 11777 (Executor.map does not submit futures until 37 | iter.next() is called) 38 | - Applied patch for issue 15015 (accessing an non-existing attribute) 39 | - Applied patch for issue 16284 (memory leak) 40 | - Applied patch for issue 20367 (behavior of concurrent.futures.as_completed() 41 | for duplicate arguments) 42 | 43 | 2.2.0 44 | ===== 45 | 46 | - Added the set_exception_info() and exception_info() methods to Future 47 | to enable extraction of tracebacks on Python 2.x 48 | - Added support for Future.set_exception_info() to ThreadPoolExecutor 49 | 50 | 51 | 2.1.6 52 | ===== 53 | 54 | - Fixed a problem with files missing from the source distribution 55 | 56 | 57 | 2.1.5 58 | ===== 59 | 60 | - Fixed Jython compatibility 61 | - Added metadata for wheel support 62 | 63 | 64 | 2.1.4 65 | ===== 66 | 67 | - Ported the library again from Python 3.2.5 to get the latest bug fixes 68 | 69 | 70 | 2.1.3 71 | ===== 72 | 73 | - Fixed race condition in wait(return_when=ALL_COMPLETED) 74 | (http://bugs.python.org/issue14406) -- thanks Ralf Schmitt 75 | - Added missing setUp() methods to several test classes 76 | 77 | 78 | 2.1.2 79 | ===== 80 | 81 | - Fixed installation problem on Python 3.1 82 | 83 | 84 | 2.1.1 85 | ===== 86 | 87 | - Fixed missing 'concurrent' package declaration in setup.py 88 | 89 | 90 | 2.1 91 | === 92 | 93 | - Moved the code from the 'futures' package to 'concurrent.futures' to provide 94 | a drop in backport that matches the code in Python 3.2 standard library 95 | - Deprecated the old 'futures' package 96 | 97 | 98 | 2.0 99 | === 100 | 101 | - Changed implementation to match PEP 3148 102 | 103 | 104 | 1.0 105 | === 106 | 107 | Initial release. 108 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2009 Brian Quinlan. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, 4 | are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, 7 | this list of conditions and the following disclaimer. 8 | 2. Redistributions in binary form must reproduce the above copyright notice, 9 | this list of conditions and the following disclaimer in the documentation 10 | and/or other materials provided with the distribution. 11 | 12 | THIS SOFTWARE IS PROVIDED BY BRIAN QUINLAN "AS IS" AND ANY EXPRESS OR IMPLIED 13 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 14 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 15 | HALL THE FREEBSD PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 16 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 17 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 20 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include docs * 2 | include *.py 3 | include tox.ini 4 | include CHANGES 5 | include LICENSE 6 | -------------------------------------------------------------------------------- /concurrent/__init__.py: -------------------------------------------------------------------------------- 1 | from pkgutil import extend_path 2 | 3 | __path__ = extend_path(__path__, __name__) 4 | -------------------------------------------------------------------------------- /concurrent/futures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2009 Brian Quinlan. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Execute computations asynchronously using threads or processes.""" 5 | 6 | __author__ = 'Brian Quinlan (brian@sweetapp.com)' 7 | 8 | from concurrent.futures._base import (FIRST_COMPLETED, 9 | FIRST_EXCEPTION, 10 | ALL_COMPLETED, 11 | CancelledError, 12 | TimeoutError, 13 | Future, 14 | Executor, 15 | wait, 16 | as_completed) 17 | from concurrent.futures.thread import ThreadPoolExecutor 18 | 19 | try: 20 | from concurrent.futures.process import ProcessPoolExecutor 21 | except ImportError: 22 | # some platforms don't have multiprocessing 23 | pass 24 | -------------------------------------------------------------------------------- /concurrent/futures/_base.py: -------------------------------------------------------------------------------- 1 | # Copyright 2009 Brian Quinlan. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | import collections 5 | import logging 6 | import threading 7 | import itertools 8 | import time 9 | 10 | __author__ = 'Brian Quinlan (brian@sweetapp.com)' 11 | 12 | FIRST_COMPLETED = 'FIRST_COMPLETED' 13 | FIRST_EXCEPTION = 'FIRST_EXCEPTION' 14 | ALL_COMPLETED = 'ALL_COMPLETED' 15 | _AS_COMPLETED = '_AS_COMPLETED' 16 | 17 | # Possible future states (for internal use by the futures package). 18 | PENDING = 'PENDING' 19 | RUNNING = 'RUNNING' 20 | # The future was cancelled by the user... 21 | CANCELLED = 'CANCELLED' 22 | # ...and _Waiter.add_cancelled() was called by a worker. 23 | CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' 24 | FINISHED = 'FINISHED' 25 | 26 | _FUTURE_STATES = [ 27 | PENDING, 28 | RUNNING, 29 | CANCELLED, 30 | CANCELLED_AND_NOTIFIED, 31 | FINISHED 32 | ] 33 | 34 | _STATE_TO_DESCRIPTION_MAP = { 35 | PENDING: "pending", 36 | RUNNING: "running", 37 | CANCELLED: "cancelled", 38 | CANCELLED_AND_NOTIFIED: "cancelled", 39 | FINISHED: "finished" 40 | } 41 | 42 | # Logger for internal use by the futures package. 43 | LOGGER = logging.getLogger("concurrent.futures") 44 | 45 | class Error(Exception): 46 | """Base class for all future-related exceptions.""" 47 | pass 48 | 49 | class CancelledError(Error): 50 | """The Future was cancelled.""" 51 | pass 52 | 53 | class TimeoutError(Error): 54 | """The operation exceeded the given deadline.""" 55 | pass 56 | 57 | class _Waiter(object): 58 | """Provides the event that wait() and as_completed() block on.""" 59 | def __init__(self): 60 | self.event = threading.Event() 61 | self.finished_futures = [] 62 | 63 | def add_result(self, future): 64 | self.finished_futures.append(future) 65 | 66 | def add_exception(self, future): 67 | self.finished_futures.append(future) 68 | 69 | def add_cancelled(self, future): 70 | self.finished_futures.append(future) 71 | 72 | class _AsCompletedWaiter(_Waiter): 73 | """Used by as_completed().""" 74 | 75 | def __init__(self): 76 | super(_AsCompletedWaiter, self).__init__() 77 | self.lock = threading.Lock() 78 | 79 | def add_result(self, future): 80 | with self.lock: 81 | super(_AsCompletedWaiter, self).add_result(future) 82 | self.event.set() 83 | 84 | def add_exception(self, future): 85 | with self.lock: 86 | super(_AsCompletedWaiter, self).add_exception(future) 87 | self.event.set() 88 | 89 | def add_cancelled(self, future): 90 | with self.lock: 91 | super(_AsCompletedWaiter, self).add_cancelled(future) 92 | self.event.set() 93 | 94 | class _FirstCompletedWaiter(_Waiter): 95 | """Used by wait(return_when=FIRST_COMPLETED).""" 96 | 97 | def add_result(self, future): 98 | super(_FirstCompletedWaiter, self).add_result(future) 99 | self.event.set() 100 | 101 | def add_exception(self, future): 102 | super(_FirstCompletedWaiter, self).add_exception(future) 103 | self.event.set() 104 | 105 | def add_cancelled(self, future): 106 | super(_FirstCompletedWaiter, self).add_cancelled(future) 107 | self.event.set() 108 | 109 | class _AllCompletedWaiter(_Waiter): 110 | """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" 111 | 112 | def __init__(self, num_pending_calls, stop_on_exception): 113 | self.num_pending_calls = num_pending_calls 114 | self.stop_on_exception = stop_on_exception 115 | self.lock = threading.Lock() 116 | super(_AllCompletedWaiter, self).__init__() 117 | 118 | def _decrement_pending_calls(self): 119 | with self.lock: 120 | self.num_pending_calls -= 1 121 | if not self.num_pending_calls: 122 | self.event.set() 123 | 124 | def add_result(self, future): 125 | super(_AllCompletedWaiter, self).add_result(future) 126 | self._decrement_pending_calls() 127 | 128 | def add_exception(self, future): 129 | super(_AllCompletedWaiter, self).add_exception(future) 130 | if self.stop_on_exception: 131 | self.event.set() 132 | else: 133 | self._decrement_pending_calls() 134 | 135 | def add_cancelled(self, future): 136 | super(_AllCompletedWaiter, self).add_cancelled(future) 137 | self._decrement_pending_calls() 138 | 139 | class _AcquireFutures(object): 140 | """A context manager that does an ordered acquire of Future conditions.""" 141 | 142 | def __init__(self, futures): 143 | self.futures = sorted(futures, key=id) 144 | 145 | def __enter__(self): 146 | for future in self.futures: 147 | future._condition.acquire() 148 | 149 | def __exit__(self, *args): 150 | for future in self.futures: 151 | future._condition.release() 152 | 153 | def _create_and_install_waiters(fs, return_when): 154 | if return_when == _AS_COMPLETED: 155 | waiter = _AsCompletedWaiter() 156 | elif return_when == FIRST_COMPLETED: 157 | waiter = _FirstCompletedWaiter() 158 | else: 159 | pending_count = sum( 160 | f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) 161 | 162 | if return_when == FIRST_EXCEPTION: 163 | waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) 164 | elif return_when == ALL_COMPLETED: 165 | waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) 166 | else: 167 | raise ValueError("Invalid return condition: %r" % return_when) 168 | 169 | for f in fs: 170 | f._waiters.append(waiter) 171 | 172 | return waiter 173 | 174 | def as_completed(fs, timeout=None): 175 | """An iterator over the given futures that yields each as it completes. 176 | 177 | Args: 178 | fs: The sequence of Futures (possibly created by different Executors) to 179 | iterate over. 180 | timeout: The maximum number of seconds to wait. If None, then there 181 | is no limit on the wait time. 182 | 183 | Returns: 184 | An iterator that yields the given Futures as they complete (finished or 185 | cancelled). If any given Futures are duplicated, they will be returned 186 | once. 187 | 188 | Raises: 189 | TimeoutError: If the entire result iterator could not be generated 190 | before the given timeout. 191 | """ 192 | if timeout is not None: 193 | end_time = timeout + time.time() 194 | 195 | fs = set(fs) 196 | with _AcquireFutures(fs): 197 | finished = set( 198 | f for f in fs 199 | if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) 200 | pending = fs - finished 201 | waiter = _create_and_install_waiters(fs, _AS_COMPLETED) 202 | 203 | try: 204 | for future in finished: 205 | yield future 206 | 207 | while pending: 208 | if timeout is None: 209 | wait_timeout = None 210 | else: 211 | wait_timeout = end_time - time.time() 212 | if wait_timeout < 0: 213 | raise TimeoutError( 214 | '%d (of %d) futures unfinished' % ( 215 | len(pending), len(fs))) 216 | 217 | waiter.event.wait(wait_timeout) 218 | 219 | with waiter.lock: 220 | finished = waiter.finished_futures 221 | waiter.finished_futures = [] 222 | waiter.event.clear() 223 | 224 | for future in finished: 225 | yield future 226 | pending.remove(future) 227 | 228 | finally: 229 | for f in fs: 230 | with f._condition: 231 | f._waiters.remove(waiter) 232 | 233 | DoneAndNotDoneFutures = collections.namedtuple( 234 | 'DoneAndNotDoneFutures', 'done not_done') 235 | def wait(fs, timeout=None, return_when=ALL_COMPLETED): 236 | """Wait for the futures in the given sequence to complete. 237 | 238 | Args: 239 | fs: The sequence of Futures (possibly created by different Executors) to 240 | wait upon. 241 | timeout: The maximum number of seconds to wait. If None, then there 242 | is no limit on the wait time. 243 | return_when: Indicates when this function should return. The options 244 | are: 245 | 246 | FIRST_COMPLETED - Return when any future finishes or is 247 | cancelled. 248 | FIRST_EXCEPTION - Return when any future finishes by raising an 249 | exception. If no future raises an exception 250 | then it is equivalent to ALL_COMPLETED. 251 | ALL_COMPLETED - Return when all futures finish or are cancelled. 252 | 253 | Returns: 254 | A named 2-tuple of sets. The first set, named 'done', contains the 255 | futures that completed (is finished or cancelled) before the wait 256 | completed. The second set, named 'not_done', contains uncompleted 257 | futures. 258 | """ 259 | with _AcquireFutures(fs): 260 | done = set(f for f in fs 261 | if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) 262 | not_done = set(fs) - done 263 | 264 | if (return_when == FIRST_COMPLETED) and done: 265 | return DoneAndNotDoneFutures(done, not_done) 266 | elif (return_when == FIRST_EXCEPTION) and done: 267 | if any(f for f in done 268 | if not f.cancelled() and f.exception() is not None): 269 | return DoneAndNotDoneFutures(done, not_done) 270 | 271 | if len(done) == len(fs): 272 | return DoneAndNotDoneFutures(done, not_done) 273 | 274 | waiter = _create_and_install_waiters(fs, return_when) 275 | 276 | waiter.event.wait(timeout) 277 | for f in fs: 278 | with f._condition: 279 | f._waiters.remove(waiter) 280 | 281 | done.update(waiter.finished_futures) 282 | return DoneAndNotDoneFutures(done, set(fs) - done) 283 | 284 | class Future(object): 285 | """Represents the result of an asynchronous computation.""" 286 | 287 | def __init__(self): 288 | """Initializes the future. Should not be called by clients.""" 289 | self._condition = threading.Condition() 290 | self._state = PENDING 291 | self._result = None 292 | self._exception = None 293 | self._traceback = None 294 | self._waiters = [] 295 | self._done_callbacks = [] 296 | 297 | def _invoke_callbacks(self): 298 | for callback in self._done_callbacks: 299 | try: 300 | callback(self) 301 | except Exception: 302 | LOGGER.exception('exception calling callback for %r', self) 303 | 304 | def __repr__(self): 305 | with self._condition: 306 | if self._state == FINISHED: 307 | if self._exception: 308 | return '' % ( 309 | hex(id(self)), 310 | _STATE_TO_DESCRIPTION_MAP[self._state], 311 | self._exception.__class__.__name__) 312 | else: 313 | return '' % ( 314 | hex(id(self)), 315 | _STATE_TO_DESCRIPTION_MAP[self._state], 316 | self._result.__class__.__name__) 317 | return '' % ( 318 | hex(id(self)), 319 | _STATE_TO_DESCRIPTION_MAP[self._state]) 320 | 321 | def cancel(self): 322 | """Cancel the future if possible. 323 | 324 | Returns True if the future was cancelled, False otherwise. A future 325 | cannot be cancelled if it is running or has already completed. 326 | """ 327 | with self._condition: 328 | if self._state in [RUNNING, FINISHED]: 329 | return False 330 | 331 | if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: 332 | return True 333 | 334 | self._state = CANCELLED 335 | self._condition.notify_all() 336 | 337 | self._invoke_callbacks() 338 | return True 339 | 340 | def cancelled(self): 341 | """Return True if the future has cancelled.""" 342 | with self._condition: 343 | return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] 344 | 345 | def running(self): 346 | """Return True if the future is currently executing.""" 347 | with self._condition: 348 | return self._state == RUNNING 349 | 350 | def done(self): 351 | """Return True of the future was cancelled or finished executing.""" 352 | with self._condition: 353 | return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] 354 | 355 | def __get_result(self): 356 | if self._exception: 357 | raise type(self._exception), self._exception, self._traceback 358 | else: 359 | return self._result 360 | 361 | def add_done_callback(self, fn): 362 | """Attaches a callable that will be called when the future finishes. 363 | 364 | Args: 365 | fn: A callable that will be called with this future as its only 366 | argument when the future completes or is cancelled. The callable 367 | will always be called by a thread in the same process in which 368 | it was added. If the future has already completed or been 369 | cancelled then the callable will be called immediately. These 370 | callables are called in the order that they were added. 371 | """ 372 | with self._condition: 373 | if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: 374 | self._done_callbacks.append(fn) 375 | return 376 | fn(self) 377 | 378 | def result(self, timeout=None): 379 | """Return the result of the call that the future represents. 380 | 381 | Args: 382 | timeout: The number of seconds to wait for the result if the future 383 | isn't done. If None, then there is no limit on the wait time. 384 | 385 | Returns: 386 | The result of the call that the future represents. 387 | 388 | Raises: 389 | CancelledError: If the future was cancelled. 390 | TimeoutError: If the future didn't finish executing before the given 391 | timeout. 392 | Exception: If the call raised then that exception will be raised. 393 | """ 394 | with self._condition: 395 | if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: 396 | raise CancelledError() 397 | elif self._state == FINISHED: 398 | return self.__get_result() 399 | 400 | self._condition.wait(timeout) 401 | 402 | if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: 403 | raise CancelledError() 404 | elif self._state == FINISHED: 405 | return self.__get_result() 406 | else: 407 | raise TimeoutError() 408 | 409 | def exception_info(self, timeout=None): 410 | """Return a tuple of (exception, traceback) raised by the call that the 411 | future represents. 412 | 413 | Args: 414 | timeout: The number of seconds to wait for the exception if the 415 | future isn't done. If None, then there is no limit on the wait 416 | time. 417 | 418 | Returns: 419 | The exception raised by the call that the future represents or None 420 | if the call completed without raising. 421 | 422 | Raises: 423 | CancelledError: If the future was cancelled. 424 | TimeoutError: If the future didn't finish executing before the given 425 | timeout. 426 | """ 427 | with self._condition: 428 | if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: 429 | raise CancelledError() 430 | elif self._state == FINISHED: 431 | return self._exception, self._traceback 432 | 433 | self._condition.wait(timeout) 434 | 435 | if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: 436 | raise CancelledError() 437 | elif self._state == FINISHED: 438 | return self._exception, self._traceback 439 | else: 440 | raise TimeoutError() 441 | 442 | def exception(self, timeout=None): 443 | """Return the exception raised by the call that the future represents. 444 | 445 | Args: 446 | timeout: The number of seconds to wait for the exception if the 447 | future isn't done. If None, then there is no limit on the wait 448 | time. 449 | 450 | Returns: 451 | The exception raised by the call that the future represents or None 452 | if the call completed without raising. 453 | 454 | Raises: 455 | CancelledError: If the future was cancelled. 456 | TimeoutError: If the future didn't finish executing before the given 457 | timeout. 458 | """ 459 | return self.exception_info(timeout)[0] 460 | 461 | # The following methods should only be used by Executors and in tests. 462 | def set_running_or_notify_cancel(self): 463 | """Mark the future as running or process any cancel notifications. 464 | 465 | Should only be used by Executor implementations and unit tests. 466 | 467 | If the future has been cancelled (cancel() was called and returned 468 | True) then any threads waiting on the future completing (though calls 469 | to as_completed() or wait()) are notified and False is returned. 470 | 471 | If the future was not cancelled then it is put in the running state 472 | (future calls to running() will return True) and True is returned. 473 | 474 | This method should be called by Executor implementations before 475 | executing the work associated with this future. If this method returns 476 | False then the work should not be executed. 477 | 478 | Returns: 479 | False if the Future was cancelled, True otherwise. 480 | 481 | Raises: 482 | RuntimeError: if this method was already called or if set_result() 483 | or set_exception() was called. 484 | """ 485 | with self._condition: 486 | if self._state == CANCELLED: 487 | self._state = CANCELLED_AND_NOTIFIED 488 | for waiter in self._waiters: 489 | waiter.add_cancelled(self) 490 | # self._condition.notify_all() is not necessary because 491 | # self.cancel() triggers a notification. 492 | return False 493 | elif self._state == PENDING: 494 | self._state = RUNNING 495 | return True 496 | else: 497 | LOGGER.critical('Future %s in unexpected state: %s', 498 | id(self), 499 | self._state) 500 | raise RuntimeError('Future in unexpected state') 501 | 502 | def set_result(self, result): 503 | """Sets the return value of work associated with the future. 504 | 505 | Should only be used by Executor implementations and unit tests. 506 | """ 507 | with self._condition: 508 | self._result = result 509 | self._state = FINISHED 510 | for waiter in self._waiters: 511 | waiter.add_result(self) 512 | self._condition.notify_all() 513 | self._invoke_callbacks() 514 | 515 | def set_exception_info(self, exception, traceback): 516 | """Sets the result of the future as being the given exception 517 | and traceback. 518 | 519 | Should only be used by Executor implementations and unit tests. 520 | """ 521 | with self._condition: 522 | self._exception = exception 523 | self._traceback = traceback 524 | self._state = FINISHED 525 | for waiter in self._waiters: 526 | waiter.add_exception(self) 527 | self._condition.notify_all() 528 | self._invoke_callbacks() 529 | 530 | def set_exception(self, exception): 531 | """Sets the result of the future as being the given exception. 532 | 533 | Should only be used by Executor implementations and unit tests. 534 | """ 535 | self.set_exception_info(exception, None) 536 | 537 | class Executor(object): 538 | """This is an abstract base class for concrete asynchronous executors.""" 539 | 540 | def submit(self, fn, *args, **kwargs): 541 | """Submits a callable to be executed with the given arguments. 542 | 543 | Schedules the callable to be executed as fn(*args, **kwargs) and returns 544 | a Future instance representing the execution of the callable. 545 | 546 | Returns: 547 | A Future representing the given call. 548 | """ 549 | raise NotImplementedError() 550 | 551 | def map(self, fn, *iterables, **kwargs): 552 | """Returns a iterator equivalent to map(fn, iter). 553 | 554 | Args: 555 | fn: A callable that will take as many arguments as there are 556 | passed iterables. 557 | timeout: The maximum number of seconds to wait. If None, then there 558 | is no limit on the wait time. 559 | 560 | Returns: 561 | An iterator equivalent to: map(func, *iterables) but the calls may 562 | be evaluated out-of-order. 563 | 564 | Raises: 565 | TimeoutError: If the entire result iterator could not be generated 566 | before the given timeout. 567 | Exception: If fn(*args) raises for any values. 568 | """ 569 | timeout = kwargs.get('timeout') 570 | if timeout is not None: 571 | end_time = timeout + time.time() 572 | 573 | fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)] 574 | 575 | # Yield must be hidden in closure so that the futures are submitted 576 | # before the first iterator value is required. 577 | def result_iterator(): 578 | try: 579 | for future in fs: 580 | if timeout is None: 581 | yield future.result() 582 | else: 583 | yield future.result(end_time - time.time()) 584 | finally: 585 | for future in fs: 586 | future.cancel() 587 | return result_iterator() 588 | 589 | def shutdown(self, wait=True): 590 | """Clean-up the resources associated with the Executor. 591 | 592 | It is safe to call this method several times. Otherwise, no other 593 | methods can be called after this one. 594 | 595 | Args: 596 | wait: If True then shutdown will not return until all running 597 | futures have finished executing and the resources used by the 598 | executor have been reclaimed. 599 | """ 600 | pass 601 | 602 | def __enter__(self): 603 | return self 604 | 605 | def __exit__(self, exc_type, exc_val, exc_tb): 606 | self.shutdown(wait=True) 607 | return False 608 | -------------------------------------------------------------------------------- /concurrent/futures/process.py: -------------------------------------------------------------------------------- 1 | # Copyright 2009 Brian Quinlan. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Implements ProcessPoolExecutor. 5 | 6 | The follow diagram and text describe the data-flow through the system: 7 | 8 | |======================= In-process =====================|== Out-of-process ==| 9 | 10 | +----------+ +----------+ +--------+ +-----------+ +---------+ 11 | | | => | Work Ids | => | | => | Call Q | => | | 12 | | | +----------+ | | +-----------+ | | 13 | | | | ... | | | | ... | | | 14 | | | | 6 | | | | 5, call() | | | 15 | | | | 7 | | | | ... | | | 16 | | Process | | ... | | Local | +-----------+ | Process | 17 | | Pool | +----------+ | Worker | | #1..n | 18 | | Executor | | Thread | | | 19 | | | +----------- + | | +-----------+ | | 20 | | | <=> | Work Items | <=> | | <= | Result Q | <= | | 21 | | | +------------+ | | +-----------+ | | 22 | | | | 6: call() | | | | ... | | | 23 | | | | future | | | | 4, result | | | 24 | | | | ... | | | | 3, except | | | 25 | +----------+ +------------+ +--------+ +-----------+ +---------+ 26 | 27 | Executor.submit() called: 28 | - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict 29 | - adds the id of the _WorkItem to the "Work Ids" queue 30 | 31 | Local worker thread: 32 | - reads work ids from the "Work Ids" queue and looks up the corresponding 33 | WorkItem from the "Work Items" dict: if the work item has been cancelled then 34 | it is simply removed from the dict, otherwise it is repackaged as a 35 | _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" 36 | until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because 37 | calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). 38 | - reads _ResultItems from "Result Q", updates the future stored in the 39 | "Work Items" dict and deletes the dict entry 40 | 41 | Process #1..n: 42 | - reads _CallItems from "Call Q", executes the calls, and puts the resulting 43 | _ResultItems in "Request Q" 44 | """ 45 | 46 | import atexit 47 | from concurrent.futures import _base 48 | import Queue as queue 49 | import multiprocessing 50 | import threading 51 | import weakref 52 | import sys 53 | 54 | __author__ = 'Brian Quinlan (brian@sweetapp.com)' 55 | 56 | # Workers are created as daemon threads and processes. This is done to allow the 57 | # interpreter to exit when there are still idle processes in a 58 | # ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, 59 | # allowing workers to die with the interpreter has two undesirable properties: 60 | # - The workers would still be running during interpretor shutdown, 61 | # meaning that they would fail in unpredictable ways. 62 | # - The workers could be killed while evaluating a work item, which could 63 | # be bad if the callable being evaluated has external side-effects e.g. 64 | # writing to a file. 65 | # 66 | # To work around this problem, an exit handler is installed which tells the 67 | # workers to exit when their work queues are empty and then waits until the 68 | # threads/processes finish. 69 | 70 | _threads_queues = weakref.WeakKeyDictionary() 71 | _shutdown = False 72 | 73 | def _python_exit(): 74 | global _shutdown 75 | _shutdown = True 76 | items = list(_threads_queues.items()) if _threads_queues else () 77 | for t, q in items: 78 | q.put(None) 79 | for t, q in items: 80 | t.join(sys.maxint) 81 | 82 | # Controls how many more calls than processes will be queued in the call queue. 83 | # A smaller number will mean that processes spend more time idle waiting for 84 | # work while a larger number will make Future.cancel() succeed less frequently 85 | # (Futures in the call queue cannot be cancelled). 86 | EXTRA_QUEUED_CALLS = 1 87 | 88 | class _WorkItem(object): 89 | def __init__(self, future, fn, args, kwargs): 90 | self.future = future 91 | self.fn = fn 92 | self.args = args 93 | self.kwargs = kwargs 94 | 95 | class _ResultItem(object): 96 | def __init__(self, work_id, exception=None, result=None): 97 | self.work_id = work_id 98 | self.exception = exception 99 | self.result = result 100 | 101 | class _CallItem(object): 102 | def __init__(self, work_id, fn, args, kwargs): 103 | self.work_id = work_id 104 | self.fn = fn 105 | self.args = args 106 | self.kwargs = kwargs 107 | 108 | def _process_worker(call_queue, result_queue): 109 | """Evaluates calls from call_queue and places the results in result_queue. 110 | 111 | This worker is run in a separate process. 112 | 113 | Args: 114 | call_queue: A multiprocessing.Queue of _CallItems that will be read and 115 | evaluated by the worker. 116 | result_queue: A multiprocessing.Queue of _ResultItems that will written 117 | to by the worker. 118 | shutdown: A multiprocessing.Event that will be set as a signal to the 119 | worker that it should exit when call_queue is empty. 120 | """ 121 | while True: 122 | call_item = call_queue.get(block=True) 123 | if call_item is None: 124 | # Wake up queue management thread 125 | result_queue.put(None) 126 | return 127 | try: 128 | r = call_item.fn(*call_item.args, **call_item.kwargs) 129 | except BaseException: 130 | e = sys.exc_info()[1] 131 | result_queue.put(_ResultItem(call_item.work_id, 132 | exception=e)) 133 | else: 134 | result_queue.put(_ResultItem(call_item.work_id, 135 | result=r)) 136 | 137 | def _add_call_item_to_queue(pending_work_items, 138 | work_ids, 139 | call_queue): 140 | """Fills call_queue with _WorkItems from pending_work_items. 141 | 142 | This function never blocks. 143 | 144 | Args: 145 | pending_work_items: A dict mapping work ids to _WorkItems e.g. 146 | {5: <_WorkItem...>, 6: <_WorkItem...>, ...} 147 | work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids 148 | are consumed and the corresponding _WorkItems from 149 | pending_work_items are transformed into _CallItems and put in 150 | call_queue. 151 | call_queue: A multiprocessing.Queue that will be filled with _CallItems 152 | derived from _WorkItems. 153 | """ 154 | while True: 155 | if call_queue.full(): 156 | return 157 | try: 158 | work_id = work_ids.get(block=False) 159 | except queue.Empty: 160 | return 161 | else: 162 | work_item = pending_work_items[work_id] 163 | 164 | if work_item.future.set_running_or_notify_cancel(): 165 | call_queue.put(_CallItem(work_id, 166 | work_item.fn, 167 | work_item.args, 168 | work_item.kwargs), 169 | block=True) 170 | else: 171 | del pending_work_items[work_id] 172 | continue 173 | 174 | def _queue_management_worker(executor_reference, 175 | processes, 176 | pending_work_items, 177 | work_ids_queue, 178 | call_queue, 179 | result_queue): 180 | """Manages the communication between this process and the worker processes. 181 | 182 | This function is run in a local thread. 183 | 184 | Args: 185 | executor_reference: A weakref.ref to the ProcessPoolExecutor that owns 186 | this thread. Used to determine if the ProcessPoolExecutor has been 187 | garbage collected and that this function can exit. 188 | process: A list of the multiprocessing.Process instances used as 189 | workers. 190 | pending_work_items: A dict mapping work ids to _WorkItems e.g. 191 | {5: <_WorkItem...>, 6: <_WorkItem...>, ...} 192 | work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). 193 | call_queue: A multiprocessing.Queue that will be filled with _CallItems 194 | derived from _WorkItems for processing by the process workers. 195 | result_queue: A multiprocessing.Queue of _ResultItems generated by the 196 | process workers. 197 | """ 198 | nb_shutdown_processes = [0] 199 | def shutdown_one_process(): 200 | """Tell a worker to terminate, which will in turn wake us again""" 201 | call_queue.put(None) 202 | nb_shutdown_processes[0] += 1 203 | while True: 204 | _add_call_item_to_queue(pending_work_items, 205 | work_ids_queue, 206 | call_queue) 207 | 208 | result_item = result_queue.get(block=True) 209 | if result_item is not None: 210 | work_item = pending_work_items[result_item.work_id] 211 | del pending_work_items[result_item.work_id] 212 | 213 | if result_item.exception: 214 | work_item.future.set_exception(result_item.exception) 215 | else: 216 | work_item.future.set_result(result_item.result) 217 | # Delete references to object. See issue16284 218 | del work_item 219 | # Check whether we should start shutting down. 220 | executor = executor_reference() 221 | # No more work items can be added if: 222 | # - The interpreter is shutting down OR 223 | # - The executor that owns this worker has been collected OR 224 | # - The executor that owns this worker has been shutdown. 225 | if _shutdown or executor is None or executor._shutdown_thread: 226 | # Since no new work items can be added, it is safe to shutdown 227 | # this thread if there are no pending work items. 228 | if not pending_work_items: 229 | while nb_shutdown_processes[0] < len(processes): 230 | shutdown_one_process() 231 | # If .join() is not called on the created processes then 232 | # some multiprocessing.Queue methods may deadlock on Mac OS 233 | # X. 234 | for p in processes: 235 | p.join() 236 | call_queue.close() 237 | return 238 | del executor 239 | 240 | _system_limits_checked = False 241 | _system_limited = None 242 | def _check_system_limits(): 243 | global _system_limits_checked, _system_limited 244 | if _system_limits_checked: 245 | if _system_limited: 246 | raise NotImplementedError(_system_limited) 247 | _system_limits_checked = True 248 | try: 249 | import os 250 | nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") 251 | except (AttributeError, ValueError): 252 | # sysconf not available or setting not available 253 | return 254 | if nsems_max == -1: 255 | # indetermine limit, assume that limit is determined 256 | # by available memory only 257 | return 258 | if nsems_max >= 256: 259 | # minimum number of semaphores available 260 | # according to POSIX 261 | return 262 | _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max 263 | raise NotImplementedError(_system_limited) 264 | 265 | class ProcessPoolExecutor(_base.Executor): 266 | def __init__(self, max_workers=None): 267 | """Initializes a new ProcessPoolExecutor instance. 268 | 269 | Args: 270 | max_workers: The maximum number of processes that can be used to 271 | execute the given calls. If None or not given then as many 272 | worker processes will be created as the machine has processors. 273 | """ 274 | _check_system_limits() 275 | 276 | if max_workers is None: 277 | self._max_workers = multiprocessing.cpu_count() 278 | else: 279 | self._max_workers = max_workers 280 | 281 | # Make the call queue slightly larger than the number of processes to 282 | # prevent the worker processes from idling. But don't make it too big 283 | # because futures in the call queue cannot be cancelled. 284 | self._call_queue = multiprocessing.Queue(self._max_workers + 285 | EXTRA_QUEUED_CALLS) 286 | self._result_queue = multiprocessing.Queue() 287 | self._work_ids = queue.Queue() 288 | self._queue_management_thread = None 289 | self._processes = set() 290 | 291 | # Shutdown is a two-step process. 292 | self._shutdown_thread = False 293 | self._shutdown_lock = threading.Lock() 294 | self._queue_count = 0 295 | self._pending_work_items = {} 296 | 297 | def _start_queue_management_thread(self): 298 | # When the executor gets lost, the weakref callback will wake up 299 | # the queue management thread. 300 | def weakref_cb(_, q=self._result_queue): 301 | q.put(None) 302 | if self._queue_management_thread is None: 303 | self._queue_management_thread = threading.Thread( 304 | target=_queue_management_worker, 305 | args=(weakref.ref(self, weakref_cb), 306 | self._processes, 307 | self._pending_work_items, 308 | self._work_ids, 309 | self._call_queue, 310 | self._result_queue)) 311 | self._queue_management_thread.daemon = True 312 | self._queue_management_thread.start() 313 | _threads_queues[self._queue_management_thread] = self._result_queue 314 | 315 | def _adjust_process_count(self): 316 | for _ in range(len(self._processes), self._max_workers): 317 | p = multiprocessing.Process( 318 | target=_process_worker, 319 | args=(self._call_queue, 320 | self._result_queue)) 321 | p.start() 322 | self._processes.add(p) 323 | 324 | def submit(self, fn, *args, **kwargs): 325 | with self._shutdown_lock: 326 | if self._shutdown_thread: 327 | raise RuntimeError('cannot schedule new futures after shutdown') 328 | 329 | f = _base.Future() 330 | w = _WorkItem(f, fn, args, kwargs) 331 | 332 | self._pending_work_items[self._queue_count] = w 333 | self._work_ids.put(self._queue_count) 334 | self._queue_count += 1 335 | # Wake up queue management thread 336 | self._result_queue.put(None) 337 | 338 | self._start_queue_management_thread() 339 | self._adjust_process_count() 340 | return f 341 | submit.__doc__ = _base.Executor.submit.__doc__ 342 | 343 | def shutdown(self, wait=True): 344 | with self._shutdown_lock: 345 | self._shutdown_thread = True 346 | if self._queue_management_thread: 347 | # Wake up queue management thread 348 | self._result_queue.put(None) 349 | if wait: 350 | self._queue_management_thread.join(sys.maxint) 351 | # To reduce the risk of openning too many files, remove references to 352 | # objects that use file descriptors. 353 | self._queue_management_thread = None 354 | self._call_queue = None 355 | self._result_queue = None 356 | self._processes = None 357 | shutdown.__doc__ = _base.Executor.shutdown.__doc__ 358 | 359 | atexit.register(_python_exit) 360 | -------------------------------------------------------------------------------- /concurrent/futures/thread.py: -------------------------------------------------------------------------------- 1 | # Copyright 2009 Brian Quinlan. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Implements ThreadPoolExecutor.""" 5 | 6 | import atexit 7 | from concurrent.futures import _base 8 | import Queue as queue 9 | import threading 10 | import weakref 11 | import sys 12 | 13 | __author__ = 'Brian Quinlan (brian@sweetapp.com)' 14 | 15 | # Workers are created as daemon threads. This is done to allow the interpreter 16 | # to exit when there are still idle threads in a ThreadPoolExecutor's thread 17 | # pool (i.e. shutdown() was not called). However, allowing workers to die with 18 | # the interpreter has two undesirable properties: 19 | # - The workers would still be running during interpretor shutdown, 20 | # meaning that they would fail in unpredictable ways. 21 | # - The workers could be killed while evaluating a work item, which could 22 | # be bad if the callable being evaluated has external side-effects e.g. 23 | # writing to a file. 24 | # 25 | # To work around this problem, an exit handler is installed which tells the 26 | # workers to exit when their work queues are empty and then waits until the 27 | # threads finish. 28 | 29 | _threads_queues = weakref.WeakKeyDictionary() 30 | _shutdown = False 31 | 32 | def _python_exit(): 33 | global _shutdown 34 | _shutdown = True 35 | items = list(_threads_queues.items()) if _threads_queues else () 36 | for t, q in items: 37 | q.put(None) 38 | for t, q in items: 39 | t.join(sys.maxint) 40 | 41 | atexit.register(_python_exit) 42 | 43 | class _WorkItem(object): 44 | def __init__(self, future, fn, args, kwargs): 45 | self.future = future 46 | self.fn = fn 47 | self.args = args 48 | self.kwargs = kwargs 49 | 50 | def run(self): 51 | if not self.future.set_running_or_notify_cancel(): 52 | return 53 | 54 | try: 55 | result = self.fn(*self.args, **self.kwargs) 56 | except BaseException: 57 | e, tb = sys.exc_info()[1:] 58 | self.future.set_exception_info(e, tb) 59 | else: 60 | self.future.set_result(result) 61 | 62 | def _worker(executor_reference, work_queue): 63 | try: 64 | while True: 65 | work_item = work_queue.get(block=True) 66 | if work_item is not None: 67 | work_item.run() 68 | # Delete references to object. See issue16284 69 | del work_item 70 | continue 71 | executor = executor_reference() 72 | # Exit if: 73 | # - The interpreter is shutting down OR 74 | # - The executor that owns the worker has been collected OR 75 | # - The executor that owns the worker has been shutdown. 76 | if _shutdown or executor is None or executor._shutdown: 77 | # Notice other workers 78 | work_queue.put(None) 79 | return 80 | del executor 81 | except BaseException: 82 | _base.LOGGER.critical('Exception in worker', exc_info=True) 83 | 84 | class ThreadPoolExecutor(_base.Executor): 85 | def __init__(self, max_workers): 86 | """Initializes a new ThreadPoolExecutor instance. 87 | 88 | Args: 89 | max_workers: The maximum number of threads that can be used to 90 | execute the given calls. 91 | """ 92 | self._max_workers = max_workers 93 | self._work_queue = queue.Queue() 94 | self._threads = set() 95 | self._shutdown = False 96 | self._shutdown_lock = threading.Lock() 97 | 98 | def submit(self, fn, *args, **kwargs): 99 | with self._shutdown_lock: 100 | if self._shutdown: 101 | raise RuntimeError('cannot schedule new futures after shutdown') 102 | 103 | f = _base.Future() 104 | w = _WorkItem(f, fn, args, kwargs) 105 | 106 | self._work_queue.put(w) 107 | self._adjust_thread_count() 108 | return f 109 | submit.__doc__ = _base.Executor.submit.__doc__ 110 | 111 | def _adjust_thread_count(self): 112 | # When the executor gets lost, the weakref callback will wake up 113 | # the worker threads. 114 | def weakref_cb(_, q=self._work_queue): 115 | q.put(None) 116 | # TODO(bquinlan): Should avoid creating new threads if there are more 117 | # idle threads than items in the work queue. 118 | if len(self._threads) < self._max_workers: 119 | t = threading.Thread(target=_worker, 120 | args=(weakref.ref(self, weakref_cb), 121 | self._work_queue)) 122 | t.daemon = True 123 | t.start() 124 | self._threads.add(t) 125 | _threads_queues[t] = self._work_queue 126 | 127 | def shutdown(self, wait=True): 128 | with self._shutdown_lock: 129 | self._shutdown = True 130 | self._work_queue.put(None) 131 | if wait: 132 | for t in self._threads: 133 | t.join(sys.maxint) 134 | shutdown.__doc__ = _base.Executor.shutdown.__doc__ 135 | -------------------------------------------------------------------------------- /crawl.py: -------------------------------------------------------------------------------- 1 | """Compare the speed of downloading URLs sequentially vs. using futures.""" 2 | 3 | import functools 4 | import time 5 | import timeit 6 | import sys 7 | 8 | try: 9 | from urllib2 import urlopen 10 | except ImportError: 11 | from urllib.request import urlopen 12 | 13 | from concurrent.futures import (as_completed, ThreadPoolExecutor, 14 | ProcessPoolExecutor) 15 | 16 | URLS = ['http://www.google.com/', 17 | 'http://www.apple.com/', 18 | 'http://www.ibm.com', 19 | 'http://www.thisurlprobablydoesnotexist.com', 20 | 'http://www.slashdot.org/', 21 | 'http://www.python.org/', 22 | 'http://www.bing.com/', 23 | 'http://www.facebook.com/', 24 | 'http://www.yahoo.com/', 25 | 'http://www.youtube.com/', 26 | 'http://www.blogger.com/'] 27 | 28 | def load_url(url, timeout): 29 | kwargs = {'timeout': timeout} if sys.version_info >= (2, 6) else {} 30 | return urlopen(url, **kwargs).read() 31 | 32 | def download_urls_sequential(urls, timeout=60): 33 | url_to_content = {} 34 | for url in urls: 35 | try: 36 | url_to_content[url] = load_url(url, timeout=timeout) 37 | except: 38 | pass 39 | return url_to_content 40 | 41 | def download_urls_with_executor(urls, executor, timeout=60): 42 | try: 43 | url_to_content = {} 44 | future_to_url = dict((executor.submit(load_url, url, timeout), url) 45 | for url in urls) 46 | 47 | for future in as_completed(future_to_url): 48 | try: 49 | url_to_content[future_to_url[future]] = future.result() 50 | except: 51 | pass 52 | return url_to_content 53 | finally: 54 | executor.shutdown() 55 | 56 | def main(): 57 | for name, fn in [('sequential', 58 | functools.partial(download_urls_sequential, URLS)), 59 | ('processes', 60 | functools.partial(download_urls_with_executor, 61 | URLS, 62 | ProcessPoolExecutor(10))), 63 | ('threads', 64 | functools.partial(download_urls_with_executor, 65 | URLS, 66 | ThreadPoolExecutor(10)))]: 67 | sys.stdout.write('%s: ' % name.ljust(12)) 68 | start = time.time() 69 | url_map = fn() 70 | sys.stdout.write('%.2f seconds (%d of %d downloaded)\n' % 71 | (time.time() - start, len(url_map), len(URLS))) 72 | 73 | if __name__ == '__main__': 74 | main() 75 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | 9 | # Internal variables. 10 | PAPEROPT_a4 = -D latex_paper_size=a4 11 | PAPEROPT_letter = -D latex_paper_size=letter 12 | ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 13 | 14 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 15 | 16 | help: 17 | @echo "Please use \`make ' where is one of" 18 | @echo " html to make standalone HTML files" 19 | @echo " dirhtml to make HTML files named index.html in directories" 20 | @echo " pickle to make pickle files" 21 | @echo " json to make JSON files" 22 | @echo " htmlhelp to make HTML files and a HTML help project" 23 | @echo " qthelp to make HTML files and a qthelp project" 24 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 25 | @echo " changes to make an overview of all changed/added/deprecated items" 26 | @echo " linkcheck to check all external links for integrity" 27 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 28 | 29 | clean: 30 | -rm -rf _build/* 31 | 32 | html: 33 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html 34 | @echo 35 | @echo "Build finished. The HTML pages are in _build/html." 36 | 37 | dirhtml: 38 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml 39 | @echo 40 | @echo "Build finished. The HTML pages are in _build/dirhtml." 41 | 42 | pickle: 43 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle 44 | @echo 45 | @echo "Build finished; now you can process the pickle files." 46 | 47 | json: 48 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json 49 | @echo 50 | @echo "Build finished; now you can process the JSON files." 51 | 52 | htmlhelp: 53 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp 54 | @echo 55 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 56 | ".hhp project file in _build/htmlhelp." 57 | 58 | qthelp: 59 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp 60 | @echo 61 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 62 | ".qhcp project file in _build/qthelp, like this:" 63 | @echo "# qcollectiongenerator _build/qthelp/futures.qhcp" 64 | @echo "To view the help file:" 65 | @echo "# assistant -collectionFile _build/qthelp/futures.qhc" 66 | 67 | latex: 68 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex 69 | @echo 70 | @echo "Build finished; the LaTeX files are in _build/latex." 71 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 72 | "run these through (pdf)latex." 73 | 74 | changes: 75 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes 76 | @echo 77 | @echo "The overview file is in _build/changes." 78 | 79 | linkcheck: 80 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck 81 | @echo 82 | @echo "Link check complete; look for any errors in the above output " \ 83 | "or in _build/linkcheck/output.txt." 84 | 85 | doctest: 86 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest 87 | @echo "Testing of doctests in the sources finished, look at the " \ 88 | "results in _build/doctest/output.txt." 89 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # futures documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Jun 3 19:35:34 2009. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | #sys.path.append(os.path.abspath('.')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # Add any Sphinx extension module names here, as strings. They can be extensions 24 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 25 | extensions = [] 26 | 27 | # Add any paths that contain templates here, relative to this directory. 28 | templates_path = ['_templates'] 29 | 30 | # The suffix of source filenames. 31 | source_suffix = '.rst' 32 | 33 | # The encoding of source files. 34 | #source_encoding = 'utf-8' 35 | 36 | # The master toctree document. 37 | master_doc = 'index' 38 | 39 | # General information about the project. 40 | project = u'futures' 41 | copyright = u'2009-2011, Brian Quinlan' 42 | 43 | # The version info for the project you're documenting, acts as replacement for 44 | # |version| and |release|, also used in various other places throughout the 45 | # built documents. 46 | # 47 | # The short X.Y version. 48 | version = '2.1.6' 49 | # The full version, including alpha/beta/rc tags. 50 | release = '2.1.6' 51 | 52 | # The language for content autogenerated by Sphinx. Refer to documentation 53 | # for a list of supported languages. 54 | #language = None 55 | 56 | # There are two options for replacing |today|: either, you set today to some 57 | # non-false value, then it is used: 58 | #today = '' 59 | # Else, today_fmt is used as the format for a strftime call. 60 | #today_fmt = '%B %d, %Y' 61 | 62 | # List of documents that shouldn't be included in the build. 63 | #unused_docs = [] 64 | 65 | # List of directories, relative to source directory, that shouldn't be searched 66 | # for source files. 67 | exclude_trees = ['_build'] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. Major themes that come with 93 | # Sphinx are currently 'default' and 'sphinxdoc'. 94 | html_theme = 'default' 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | #html_theme_options = {} 100 | 101 | # Add any paths that contain custom themes here, relative to this directory. 102 | #html_theme_path = [] 103 | 104 | # The name for this set of Sphinx documents. If None, it defaults to 105 | # " v documentation". 106 | #html_title = None 107 | 108 | # A shorter title for the navigation bar. Default is the same as html_title. 109 | #html_short_title = None 110 | 111 | # The name of an image file (relative to this directory) to place at the top 112 | # of the sidebar. 113 | #html_logo = None 114 | 115 | # The name of an image file (within the static path) to use as favicon of the 116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 117 | # pixels large. 118 | #html_favicon = None 119 | 120 | # Add any paths that contain custom static files (such as style sheets) here, 121 | # relative to this directory. They are copied after the builtin static files, 122 | # so a file named "default.css" will overwrite the builtin "default.css". 123 | html_static_path = ['_static'] 124 | 125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 126 | # using the given strftime format. 127 | #html_last_updated_fmt = '%b %d, %Y' 128 | 129 | # If true, SmartyPants will be used to convert quotes and dashes to 130 | # typographically correct entities. 131 | #html_use_smartypants = True 132 | 133 | # Custom sidebar templates, maps document names to template names. 134 | #html_sidebars = {} 135 | 136 | # Additional templates that should be rendered to pages, maps page names to 137 | # template names. 138 | #html_additional_pages = {} 139 | 140 | # If false, no module index is generated. 141 | #html_use_modindex = True 142 | 143 | # If false, no index is generated. 144 | #html_use_index = True 145 | 146 | # If true, the index is split into individual pages for each letter. 147 | #html_split_index = False 148 | 149 | # If true, links to the reST sources are added to the pages. 150 | #html_show_sourcelink = True 151 | 152 | # If true, an OpenSearch description file will be output, and all pages will 153 | # contain a tag referring to it. The value of this option must be the 154 | # base URL from which the finished HTML is served. 155 | #html_use_opensearch = '' 156 | 157 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 158 | #html_file_suffix = '' 159 | 160 | # Output file base name for HTML help builder. 161 | htmlhelp_basename = 'futuresdoc' 162 | 163 | 164 | # -- Options for LaTeX output -------------------------------------------------- 165 | 166 | # The paper size ('letter' or 'a4'). 167 | #latex_paper_size = 'letter' 168 | 169 | # The font size ('10pt', '11pt' or '12pt'). 170 | #latex_font_size = '10pt' 171 | 172 | # Grouping the document tree into LaTeX files. List of tuples 173 | # (source start file, target name, title, author, documentclass [howto/manual]). 174 | latex_documents = [ 175 | ('index', 'futures.tex', u'futures Documentation', 176 | u'Brian Quinlan', 'manual'), 177 | ] 178 | 179 | # The name of an image file (relative to this directory) to place at the top of 180 | # the title page. 181 | #latex_logo = None 182 | 183 | # For "manual" documents, if this is true, then toplevel headings are parts, 184 | # not chapters. 185 | #latex_use_parts = False 186 | 187 | # Additional stuff for the LaTeX preamble. 188 | #latex_preamble = '' 189 | 190 | # Documents to append as an appendix to all manuals. 191 | #latex_appendices = [] 192 | 193 | # If false, no module index is generated. 194 | #latex_use_modindex = True 195 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | :mod:`concurrent.futures` --- Asynchronous computation 2 | ====================================================== 3 | 4 | .. module:: concurrent.futures 5 | :synopsis: Execute computations asynchronously using threads or processes. 6 | 7 | The :mod:`concurrent.futures` module provides a high-level interface for 8 | asynchronously executing callables. 9 | 10 | The asynchronous execution can be be performed by threads using 11 | :class:`ThreadPoolExecutor` or seperate processes using 12 | :class:`ProcessPoolExecutor`. Both implement the same interface, which is 13 | defined by the abstract :class:`Executor` class. 14 | 15 | Executor Objects 16 | ---------------- 17 | 18 | :class:`Executor` is an abstract class that provides methods to execute calls 19 | asynchronously. It should not be used directly, but through its two 20 | subclasses: :class:`ThreadPoolExecutor` and :class:`ProcessPoolExecutor`. 21 | 22 | .. method:: Executor.submit(fn, *args, **kwargs) 23 | 24 | Schedules the callable to be executed as *fn*(*\*args*, *\*\*kwargs*) and 25 | returns a :class:`Future` representing the execution of the callable. 26 | 27 | :: 28 | 29 | with ThreadPoolExecutor(max_workers=1) as executor: 30 | future = executor.submit(pow, 323, 1235) 31 | print(future.result()) 32 | 33 | .. method:: Executor.map(func, *iterables, timeout=None) 34 | 35 | Equivalent to map(*func*, *\*iterables*) but func is executed asynchronously 36 | and several calls to *func* may be made concurrently. The returned iterator 37 | raises a :exc:`TimeoutError` if :meth:`__next__()` is called and the result 38 | isn't available after *timeout* seconds from the original call to 39 | :meth:`map()`. *timeout* can be an int or float. If *timeout* is not 40 | specified or ``None`` then there is no limit to the wait time. If a call 41 | raises an exception then that exception will be raised when its value is 42 | retrieved from the iterator. 43 | 44 | .. method:: Executor.shutdown(wait=True) 45 | 46 | Signal the executor that it should free any resources that it is using when 47 | the currently pending futures are done executing. Calls to 48 | :meth:`Executor.submit` and :meth:`Executor.map` made after shutdown will 49 | raise :exc:`RuntimeError`. 50 | 51 | If *wait* is `True` then this method will not return until all the pending 52 | futures are done executing and the resources associated with the executor 53 | have been freed. If *wait* is `False` then this method will return 54 | immediately and the resources associated with the executor will be freed 55 | when all pending futures are done executing. Regardless of the value of 56 | *wait*, the entire Python program will not exit until all pending futures 57 | are done executing. 58 | 59 | You can avoid having to call this method explicitly if you use the `with` 60 | statement, which will shutdown the `Executor` (waiting as if 61 | `Executor.shutdown` were called with *wait* set to `True`): 62 | 63 | :: 64 | 65 | import shutil 66 | with ThreadPoolExecutor(max_workers=4) as e: 67 | e.submit(shutil.copy, 'src1.txt', 'dest1.txt') 68 | e.submit(shutil.copy, 'src2.txt', 'dest2.txt') 69 | e.submit(shutil.copy, 'src3.txt', 'dest3.txt') 70 | e.submit(shutil.copy, 'src3.txt', 'dest4.txt') 71 | 72 | 73 | ThreadPoolExecutor Objects 74 | -------------------------- 75 | 76 | The :class:`ThreadPoolExecutor` class is an :class:`Executor` subclass that uses 77 | a pool of threads to execute calls asynchronously. 78 | 79 | Deadlock can occur when the callable associated with a :class:`Future` waits on 80 | the results of another :class:`Future`. For example: 81 | 82 | :: 83 | 84 | import time 85 | def wait_on_b(): 86 | time.sleep(5) 87 | print(b.result()) # b will never complete because it is waiting on a. 88 | return 5 89 | 90 | def wait_on_a(): 91 | time.sleep(5) 92 | print(a.result()) # a will never complete because it is waiting on b. 93 | return 6 94 | 95 | 96 | executor = ThreadPoolExecutor(max_workers=2) 97 | a = executor.submit(wait_on_b) 98 | b = executor.submit(wait_on_a) 99 | 100 | And: 101 | 102 | :: 103 | 104 | def wait_on_future(): 105 | f = executor.submit(pow, 5, 2) 106 | # This will never complete because there is only one worker thread and 107 | # it is executing this function. 108 | print(f.result()) 109 | 110 | executor = ThreadPoolExecutor(max_workers=1) 111 | executor.submit(wait_on_future) 112 | 113 | .. class:: ThreadPoolExecutor(max_workers) 114 | 115 | Executes calls asynchronously using a pool of at most *max_workers* threads. 116 | 117 | .. _threadpoolexecutor-example: 118 | 119 | ThreadPoolExecutor Example 120 | ^^^^^^^^^^^^^^^^^^^^^^^^^^ 121 | :: 122 | 123 | from concurrent import futures 124 | import urllib.request 125 | 126 | URLS = ['http://www.foxnews.com/', 127 | 'http://www.cnn.com/', 128 | 'http://europe.wsj.com/', 129 | 'http://www.bbc.co.uk/', 130 | 'http://some-made-up-domain.com/'] 131 | 132 | def load_url(url, timeout): 133 | return urllib.request.urlopen(url, timeout=timeout).read() 134 | 135 | with futures.ThreadPoolExecutor(max_workers=5) as executor: 136 | future_to_url = dict((executor.submit(load_url, url, 60), url) 137 | for url in URLS) 138 | 139 | for future in futures.as_completed(future_to_url): 140 | url = future_to_url[future] 141 | if future.exception() is not None: 142 | print('%r generated an exception: %s' % (url, 143 | future.exception())) 144 | else: 145 | print('%r page is %d bytes' % (url, len(future.result()))) 146 | 147 | ProcessPoolExecutor Objects 148 | --------------------------- 149 | 150 | The :class:`ProcessPoolExecutor` class is an :class:`Executor` subclass that 151 | uses a pool of processes to execute calls asynchronously. 152 | :class:`ProcessPoolExecutor` uses the :mod:`multiprocessing` module, which 153 | allows it to side-step the :term:`Global Interpreter Lock` but also means that 154 | only picklable objects can be executed and returned. 155 | 156 | Calling :class:`Executor` or :class:`Future` methods from a callable submitted 157 | to a :class:`ProcessPoolExecutor` will result in deadlock. 158 | 159 | .. class:: ProcessPoolExecutor(max_workers=None) 160 | 161 | Executes calls asynchronously using a pool of at most *max_workers* 162 | processes. If *max_workers* is ``None`` or not given then as many worker 163 | processes will be created as the machine has processors. 164 | 165 | .. _processpoolexecutor-example: 166 | 167 | ProcessPoolExecutor Example 168 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 169 | :: 170 | 171 | import math 172 | 173 | PRIMES = [ 174 | 112272535095293, 175 | 112582705942171, 176 | 112272535095293, 177 | 115280095190773, 178 | 115797848077099, 179 | 1099726899285419] 180 | 181 | def is_prime(n): 182 | if n % 2 == 0: 183 | return False 184 | 185 | sqrt_n = int(math.floor(math.sqrt(n))) 186 | for i in range(3, sqrt_n + 1, 2): 187 | if n % i == 0: 188 | return False 189 | return True 190 | 191 | def main(): 192 | with futures.ProcessPoolExecutor() as executor: 193 | for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)): 194 | print('%d is prime: %s' % (number, prime)) 195 | 196 | if __name__ == '__main__': 197 | main() 198 | 199 | Future Objects 200 | -------------- 201 | 202 | The :class:`Future` class encapulates the asynchronous execution of a callable. 203 | :class:`Future` instances are created by :meth:`Executor.submit`. 204 | 205 | .. method:: Future.cancel() 206 | 207 | Attempt to cancel the call. If the call is currently being executed then 208 | it cannot be cancelled and the method will return `False`, otherwise the call 209 | will be cancelled and the method will return `True`. 210 | 211 | .. method:: Future.cancelled() 212 | 213 | Return `True` if the call was successfully cancelled. 214 | 215 | .. method:: Future.running() 216 | 217 | Return `True` if the call is currently being executed and cannot be 218 | cancelled. 219 | 220 | .. method:: Future.done() 221 | 222 | Return `True` if the call was successfully cancelled or finished running. 223 | 224 | .. method:: Future.result(timeout=None) 225 | 226 | Return the value returned by the call. If the call hasn't yet completed then 227 | this method will wait up to *timeout* seconds. If the call hasn't completed 228 | in *timeout* seconds then a :exc:`TimeoutError` will be raised. *timeout* can 229 | be an int or float.If *timeout* is not specified or ``None`` then there is no 230 | limit to the wait time. 231 | 232 | If the future is cancelled before completing then :exc:`CancelledError` will 233 | be raised. 234 | 235 | If the call raised then this method will raise the same exception. 236 | 237 | .. method:: Future.exception(timeout=None) 238 | 239 | Return the exception raised by the call. If the call hasn't yet completed 240 | then this method will wait up to *timeout* seconds. If the call hasn't 241 | completed in *timeout* seconds then a :exc:`TimeoutError` will be raised. 242 | *timeout* can be an int or float. If *timeout* is not specified or ``None`` 243 | then there is no limit to the wait time. 244 | 245 | If the future is cancelled before completing then :exc:`CancelledError` will 246 | be raised. 247 | 248 | If the call completed without raising then ``None`` is returned. 249 | 250 | .. method:: Future.add_done_callback(fn) 251 | 252 | Attaches the callable *fn* to the future. *fn* will be called, with the 253 | future as its only argument, when the future is cancelled or finishes 254 | running. 255 | 256 | Added callables are called in the order that they were added and are always 257 | called in a thread belonging to the process that added them. If the callable 258 | raises an :exc:`Exception` then it will be logged and ignored. If the 259 | callable raises another :exc:`BaseException` then the behavior is not 260 | defined. 261 | 262 | If the future has already completed or been cancelled then *fn* will be 263 | called immediately. 264 | 265 | Internal Future Methods 266 | ^^^^^^^^^^^^^^^^^^^^^^^ 267 | 268 | The following :class:`Future` methods are meant for use in unit tests and 269 | :class:`Executor` implementations. 270 | 271 | .. method:: Future.set_running_or_notify_cancel() 272 | 273 | This method should only be called by :class:`Executor` implementations before 274 | executing the work associated with the :class:`Future` and by unit tests. 275 | 276 | If the method returns `False` then the :class:`Future` was cancelled i.e. 277 | :meth:`Future.cancel` was called and returned `True`. Any threads waiting 278 | on the :class:`Future` completing (i.e. through :func:`as_completed` or 279 | :func:`wait`) will be woken up. 280 | 281 | If the method returns `True` then the :class:`Future` was not cancelled 282 | and has been put in the running state i.e. calls to 283 | :meth:`Future.running` will return `True`. 284 | 285 | This method can only be called once and cannot be called after 286 | :meth:`Future.set_result` or :meth:`Future.set_exception` have been 287 | called. 288 | 289 | .. method:: Future.set_result(result) 290 | 291 | Sets the result of the work associated with the :class:`Future` to *result*. 292 | 293 | This method should only be used by Executor implementations and unit tests. 294 | 295 | .. method:: Future.set_exception(exception) 296 | 297 | Sets the result of the work associated with the :class:`Future` to the 298 | :class:`Exception` *exception*. 299 | 300 | This method should only be used by Executor implementations and unit tests. 301 | 302 | Module Functions 303 | ---------------- 304 | 305 | .. function:: wait(fs, timeout=None, return_when=ALL_COMPLETED) 306 | 307 | Wait for the :class:`Future` instances (possibly created by different 308 | :class:`Executor` instances) given by *fs* to complete. Returns a named 309 | 2-tuple of sets. The first set, named "done", contains the futures that 310 | completed (finished or were cancelled) before the wait completed. The second 311 | set, named "not_done", contains uncompleted futures. 312 | 313 | *timeout* can be used to control the maximum number of seconds to wait before 314 | returning. *timeout* can be an int or float. If *timeout* is not specified or 315 | ``None`` then there is no limit to the wait time. 316 | 317 | *return_when* indicates when this function should return. It must be one of 318 | the following constants: 319 | 320 | +-----------------------------+----------------------------------------+ 321 | | Constant | Description | 322 | +=============================+========================================+ 323 | | :const:`FIRST_COMPLETED` | The function will return when any | 324 | | | future finishes or is cancelled. | 325 | +-----------------------------+----------------------------------------+ 326 | | :const:`FIRST_EXCEPTION` | The function will return when any | 327 | | | future finishes by raising an | 328 | | | exception. If no future raises an | 329 | | | exception then it is equivalent to | 330 | | | `ALL_COMPLETED`. | 331 | +-----------------------------+----------------------------------------+ 332 | | :const:`ALL_COMPLETED` | The function will return when all | 333 | | | futures finish or are cancelled. | 334 | +-----------------------------+----------------------------------------+ 335 | 336 | .. function:: as_completed(fs, timeout=None) 337 | 338 | Returns an iterator over the :class:`Future` instances (possibly created by 339 | different :class:`Executor` instances) given by *fs* that yields futures as 340 | they complete (finished or were cancelled). Any futures given by *fs* that 341 | are duplicated will be returned once. Any futures that completed 342 | before :func:`as_completed` is called will be yielded first. The returned 343 | iterator raises a :exc:`TimeoutError` if :meth:`~iterator.__next__` is 344 | called and the result isn't available after *timeout* seconds from the 345 | original call to :func:`as_completed`. *timeout* can be an int or float. 346 | If *timeout* is not specified or ``None``, there is no limit to the wait 347 | time. 348 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | set SPHINXBUILD=sphinx-build 6 | set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% . 7 | if NOT "%PAPER%" == "" ( 8 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 9 | ) 10 | 11 | if "%1" == "" goto help 12 | 13 | if "%1" == "help" ( 14 | :help 15 | echo.Please use `make ^` where ^ is one of 16 | echo. html to make standalone HTML files 17 | echo. dirhtml to make HTML files named index.html in directories 18 | echo. pickle to make pickle files 19 | echo. json to make JSON files 20 | echo. htmlhelp to make HTML files and a HTML help project 21 | echo. qthelp to make HTML files and a qthelp project 22 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 23 | echo. changes to make an overview over all changed/added/deprecated items 24 | echo. linkcheck to check all external links for integrity 25 | echo. doctest to run all doctests embedded in the documentation if enabled 26 | goto end 27 | ) 28 | 29 | if "%1" == "clean" ( 30 | for /d %%i in (_build\*) do rmdir /q /s %%i 31 | del /q /s _build\* 32 | goto end 33 | ) 34 | 35 | if "%1" == "html" ( 36 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html 37 | echo. 38 | echo.Build finished. The HTML pages are in _build/html. 39 | goto end 40 | ) 41 | 42 | if "%1" == "dirhtml" ( 43 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml 44 | echo. 45 | echo.Build finished. The HTML pages are in _build/dirhtml. 46 | goto end 47 | ) 48 | 49 | if "%1" == "pickle" ( 50 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle 51 | echo. 52 | echo.Build finished; now you can process the pickle files. 53 | goto end 54 | ) 55 | 56 | if "%1" == "json" ( 57 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json 58 | echo. 59 | echo.Build finished; now you can process the JSON files. 60 | goto end 61 | ) 62 | 63 | if "%1" == "htmlhelp" ( 64 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp 65 | echo. 66 | echo.Build finished; now you can run HTML Help Workshop with the ^ 67 | .hhp project file in _build/htmlhelp. 68 | goto end 69 | ) 70 | 71 | if "%1" == "qthelp" ( 72 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% _build/qthelp 73 | echo. 74 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 75 | .qhcp project file in _build/qthelp, like this: 76 | echo.^> qcollectiongenerator _build\qthelp\futures.qhcp 77 | echo.To view the help file: 78 | echo.^> assistant -collectionFile _build\qthelp\futures.ghc 79 | goto end 80 | ) 81 | 82 | if "%1" == "latex" ( 83 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex 84 | echo. 85 | echo.Build finished; the LaTeX files are in _build/latex. 86 | goto end 87 | ) 88 | 89 | if "%1" == "changes" ( 90 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes 91 | echo. 92 | echo.The overview file is in _build/changes. 93 | goto end 94 | ) 95 | 96 | if "%1" == "linkcheck" ( 97 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck 98 | echo. 99 | echo.Link check complete; look for any errors in the above output ^ 100 | or in _build/linkcheck/output.txt. 101 | goto end 102 | ) 103 | 104 | if "%1" == "doctest" ( 105 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest 106 | echo. 107 | echo.Testing of doctests in the sources finished, look at the ^ 108 | results in _build/doctest/output.txt. 109 | goto end 110 | ) 111 | 112 | :end 113 | -------------------------------------------------------------------------------- /primes.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | import math 3 | import time 4 | import sys 5 | 6 | from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor 7 | 8 | PRIMES = [ 9 | 112272535095293, 10 | 112582705942171, 11 | 112272535095293, 12 | 115280095190773, 13 | 115797848077099, 14 | 117450548693743, 15 | 993960000099397] 16 | 17 | def is_prime(n): 18 | if n % 2 == 0: 19 | return False 20 | 21 | sqrt_n = int(math.floor(math.sqrt(n))) 22 | for i in range(3, sqrt_n + 1, 2): 23 | if n % i == 0: 24 | return False 25 | return True 26 | 27 | def sequential(): 28 | return list(map(is_prime, PRIMES)) 29 | 30 | def with_process_pool_executor(): 31 | with ProcessPoolExecutor(10) as executor: 32 | return list(executor.map(is_prime, PRIMES)) 33 | 34 | def with_thread_pool_executor(): 35 | with ThreadPoolExecutor(10) as executor: 36 | return list(executor.map(is_prime, PRIMES)) 37 | 38 | def main(): 39 | for name, fn in [('sequential', sequential), 40 | ('processes', with_process_pool_executor), 41 | ('threads', with_thread_pool_executor)]: 42 | sys.stdout.write('%s: ' % name.ljust(12)) 43 | start = time.time() 44 | if fn() != [True] * len(PRIMES): 45 | sys.stdout.write('failed\n') 46 | else: 47 | sys.stdout.write('%.2f seconds\n' % (time.time() - start)) 48 | 49 | if __name__ == '__main__': 50 | main() 51 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs 3 | build-dir = build/sphinx 4 | 5 | [upload_docs] 6 | upload-dir = build/sphinx/html 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from warnings import warn 3 | import sys 4 | 5 | if sys.version_info[0] > 2: 6 | warn('This backport is meant only for Python 2.\n' 7 | 'Python 3 users do not need it, as the concurrent.futures ' 8 | 'package is available in the standard library.') 9 | 10 | extras = {} 11 | try: 12 | from setuptools import setup 13 | extras['zip_safe'] = False 14 | except ImportError: 15 | from distutils.core import setup 16 | 17 | setup(name='futures', 18 | version='3.0.5', 19 | description='Backport of the concurrent.futures package from Python 3.2', 20 | author='Brian Quinlan', 21 | author_email='brian@sweetapp.com', 22 | maintainer='Alex Gronholm', 23 | maintainer_email='alex.gronholm+pypi@nextday.fi', 24 | url='https://github.com/agronholm/pythonfutures', 25 | packages=['concurrent', 'concurrent.futures'], 26 | license='BSD', 27 | classifiers=['License :: OSI Approved :: BSD License', 28 | 'Development Status :: 5 - Production/Stable', 29 | 'Intended Audience :: Developers', 30 | 'Programming Language :: Python :: 2.6', 31 | 'Programming Language :: Python :: 2.7', 32 | 'Programming Language :: Python :: 2 :: Only'], 33 | **extras 34 | ) 35 | -------------------------------------------------------------------------------- /test_futures.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import sys 4 | import threading 5 | import functools 6 | import contextlib 7 | import logging 8 | import re 9 | import time 10 | import gc 11 | from StringIO import StringIO 12 | from test import test_support 13 | 14 | from concurrent import futures 15 | from concurrent.futures._base import ( 16 | PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future) 17 | 18 | try: 19 | import unittest2 as unittest 20 | except ImportError: 21 | import unittest 22 | 23 | 24 | def reap_threads(func): 25 | """Use this function when threads are being used. This will 26 | ensure that the threads are cleaned up even when the test fails. 27 | If threading is unavailable this function does nothing. 28 | """ 29 | @functools.wraps(func) 30 | def decorator(*args): 31 | key = test_support.threading_setup() 32 | try: 33 | return func(*args) 34 | finally: 35 | test_support.threading_cleanup(*key) 36 | return decorator 37 | 38 | 39 | # Executing the interpreter in a subprocess 40 | def _assert_python(expected_success, *args, **env_vars): 41 | cmd_line = [sys.executable] 42 | if not env_vars: 43 | cmd_line.append('-E') 44 | # Need to preserve the original environment, for in-place testing of 45 | # shared library builds. 46 | env = os.environ.copy() 47 | # But a special flag that can be set to override -- in this case, the 48 | # caller is responsible to pass the full environment. 49 | if env_vars.pop('__cleanenv', None): 50 | env = {} 51 | env.update(env_vars) 52 | cmd_line.extend(args) 53 | p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE, 54 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, 55 | env=env) 56 | try: 57 | out, err = p.communicate() 58 | finally: 59 | subprocess._cleanup() 60 | p.stdout.close() 61 | p.stderr.close() 62 | rc = p.returncode 63 | err = strip_python_stderr(err) 64 | if (rc and expected_success) or (not rc and not expected_success): 65 | raise AssertionError( 66 | "Process return code is %d, " 67 | "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore'))) 68 | return rc, out, err 69 | 70 | 71 | def assert_python_ok(*args, **env_vars): 72 | """ 73 | Assert that running the interpreter with `args` and optional environment 74 | variables `env_vars` is ok and return a (return code, stdout, stderr) tuple. 75 | """ 76 | return _assert_python(True, *args, **env_vars) 77 | 78 | 79 | def strip_python_stderr(stderr): 80 | """Strip the stderr of a Python process from potential debug output 81 | emitted by the interpreter. 82 | 83 | This will typically be run on the result of the communicate() method 84 | of a subprocess.Popen object. 85 | """ 86 | stderr = re.sub(r"\[\d+ refs\]\r?\n?$".encode(), "".encode(), stderr).strip() 87 | return stderr 88 | 89 | 90 | @contextlib.contextmanager 91 | def captured_stderr(): 92 | """Return a context manager used by captured_stdout/stdin/stderr 93 | that temporarily replaces the sys stream *stream_name* with a StringIO.""" 94 | logging_stream = StringIO() 95 | handler = logging.StreamHandler(logging_stream) 96 | logging.root.addHandler(handler) 97 | 98 | try: 99 | yield logging_stream 100 | finally: 101 | logging.root.removeHandler(handler) 102 | 103 | 104 | def create_future(state=PENDING, exception=None, result=None): 105 | f = Future() 106 | f._state = state 107 | f._exception = exception 108 | f._result = result 109 | return f 110 | 111 | 112 | PENDING_FUTURE = create_future(state=PENDING) 113 | RUNNING_FUTURE = create_future(state=RUNNING) 114 | CANCELLED_FUTURE = create_future(state=CANCELLED) 115 | CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED) 116 | EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError()) 117 | SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42) 118 | 119 | 120 | def mul(x, y): 121 | return x * y 122 | 123 | 124 | def sleep_and_raise(t): 125 | time.sleep(t) 126 | raise Exception('this is an exception') 127 | 128 | def sleep_and_print(t, msg): 129 | time.sleep(t) 130 | print(msg) 131 | sys.stdout.flush() 132 | 133 | 134 | class ExecutorMixin: 135 | worker_count = 5 136 | 137 | def setUp(self): 138 | self.t1 = time.time() 139 | try: 140 | self.executor = self.executor_type(max_workers=self.worker_count) 141 | except NotImplementedError: 142 | e = sys.exc_info()[1] 143 | self.skipTest(str(e)) 144 | self._prime_executor() 145 | 146 | def tearDown(self): 147 | self.executor.shutdown(wait=True) 148 | dt = time.time() - self.t1 149 | if test_support.verbose: 150 | print("%.2fs" % dt) 151 | self.assertLess(dt, 60, "synchronization issue: test lasted too long") 152 | 153 | def _prime_executor(self): 154 | # Make sure that the executor is ready to do work before running the 155 | # tests. This should reduce the probability of timeouts in the tests. 156 | futures = [self.executor.submit(time.sleep, 0.1) 157 | for _ in range(self.worker_count)] 158 | 159 | for f in futures: 160 | f.result() 161 | 162 | 163 | class ThreadPoolMixin(ExecutorMixin): 164 | executor_type = futures.ThreadPoolExecutor 165 | 166 | 167 | class ProcessPoolMixin(ExecutorMixin): 168 | executor_type = futures.ProcessPoolExecutor 169 | 170 | 171 | class ExecutorShutdownTest(unittest.TestCase): 172 | def test_run_after_shutdown(self): 173 | self.executor.shutdown() 174 | self.assertRaises(RuntimeError, 175 | self.executor.submit, 176 | pow, 2, 5) 177 | 178 | def test_interpreter_shutdown(self): 179 | # Test the atexit hook for shutdown of worker threads and processes 180 | rc, out, err = assert_python_ok('-c', """if 1: 181 | from concurrent.futures import %s 182 | from time import sleep 183 | from test_futures import sleep_and_print 184 | t = %s(5) 185 | t.submit(sleep_and_print, 1.0, "apple") 186 | """ % (self.executor_type.__name__, self.executor_type.__name__)) 187 | # Errors in atexit hooks don't change the process exit code, check 188 | # stderr manually. 189 | self.assertFalse(err) 190 | self.assertEqual(out.strip(), "apple".encode()) 191 | 192 | def test_hang_issue12364(self): 193 | fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)] 194 | self.executor.shutdown() 195 | for f in fs: 196 | f.result() 197 | 198 | 199 | class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest): 200 | def _prime_executor(self): 201 | pass 202 | 203 | def test_threads_terminate(self): 204 | self.executor.submit(mul, 21, 2) 205 | self.executor.submit(mul, 6, 7) 206 | self.executor.submit(mul, 3, 14) 207 | self.assertEqual(len(self.executor._threads), 3) 208 | self.executor.shutdown() 209 | for t in self.executor._threads: 210 | t.join() 211 | 212 | def test_context_manager_shutdown(self): 213 | with futures.ThreadPoolExecutor(max_workers=5) as e: 214 | executor = e 215 | self.assertEqual(list(e.map(abs, range(-5, 5))), 216 | [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) 217 | 218 | for t in executor._threads: 219 | t.join() 220 | 221 | def test_del_shutdown(self): 222 | executor = futures.ThreadPoolExecutor(max_workers=5) 223 | executor.map(abs, range(-5, 5)) 224 | threads = executor._threads 225 | del executor 226 | gc.collect() 227 | 228 | for t in threads: 229 | t.join() 230 | 231 | 232 | class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest): 233 | def _prime_executor(self): 234 | pass 235 | 236 | def test_processes_terminate(self): 237 | self.executor.submit(mul, 21, 2) 238 | self.executor.submit(mul, 6, 7) 239 | self.executor.submit(mul, 3, 14) 240 | self.assertEqual(len(self.executor._processes), 5) 241 | processes = self.executor._processes 242 | self.executor.shutdown() 243 | 244 | for p in processes: 245 | p.join() 246 | 247 | def test_context_manager_shutdown(self): 248 | with futures.ProcessPoolExecutor(max_workers=5) as e: 249 | processes = e._processes 250 | self.assertEqual(list(e.map(abs, range(-5, 5))), 251 | [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) 252 | 253 | for p in processes: 254 | p.join() 255 | 256 | def test_del_shutdown(self): 257 | executor = futures.ProcessPoolExecutor(max_workers=5) 258 | list(executor.map(abs, range(-5, 5))) 259 | queue_management_thread = executor._queue_management_thread 260 | processes = executor._processes 261 | del executor 262 | gc.collect() 263 | 264 | queue_management_thread.join() 265 | for p in processes: 266 | p.join() 267 | 268 | 269 | class WaitTests(unittest.TestCase): 270 | 271 | def test_first_completed(self): 272 | future1 = self.executor.submit(mul, 21, 2) 273 | future2 = self.executor.submit(time.sleep, 1.5) 274 | 275 | done, not_done = futures.wait( 276 | [CANCELLED_FUTURE, future1, future2], 277 | return_when=futures.FIRST_COMPLETED) 278 | 279 | self.assertEqual(set([future1]), done) 280 | self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done) 281 | 282 | def test_first_completed_some_already_completed(self): 283 | future1 = self.executor.submit(time.sleep, 1.5) 284 | 285 | finished, pending = futures.wait( 286 | [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], 287 | return_when=futures.FIRST_COMPLETED) 288 | 289 | self.assertEqual( 290 | set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), 291 | finished) 292 | self.assertEqual(set([future1]), pending) 293 | 294 | def test_first_exception(self): 295 | future1 = self.executor.submit(mul, 2, 21) 296 | future2 = self.executor.submit(sleep_and_raise, 1.5) 297 | future3 = self.executor.submit(time.sleep, 3) 298 | 299 | finished, pending = futures.wait( 300 | [future1, future2, future3], 301 | return_when=futures.FIRST_EXCEPTION) 302 | 303 | self.assertEqual(set([future1, future2]), finished) 304 | self.assertEqual(set([future3]), pending) 305 | 306 | def test_first_exception_some_already_complete(self): 307 | future1 = self.executor.submit(divmod, 21, 0) 308 | future2 = self.executor.submit(time.sleep, 1.5) 309 | 310 | finished, pending = futures.wait( 311 | [SUCCESSFUL_FUTURE, 312 | CANCELLED_FUTURE, 313 | CANCELLED_AND_NOTIFIED_FUTURE, 314 | future1, future2], 315 | return_when=futures.FIRST_EXCEPTION) 316 | 317 | self.assertEqual(set([SUCCESSFUL_FUTURE, 318 | CANCELLED_AND_NOTIFIED_FUTURE, 319 | future1]), finished) 320 | self.assertEqual(set([CANCELLED_FUTURE, future2]), pending) 321 | 322 | def test_first_exception_one_already_failed(self): 323 | future1 = self.executor.submit(time.sleep, 2) 324 | 325 | finished, pending = futures.wait( 326 | [EXCEPTION_FUTURE, future1], 327 | return_when=futures.FIRST_EXCEPTION) 328 | 329 | self.assertEqual(set([EXCEPTION_FUTURE]), finished) 330 | self.assertEqual(set([future1]), pending) 331 | 332 | def test_all_completed(self): 333 | future1 = self.executor.submit(divmod, 2, 0) 334 | future2 = self.executor.submit(mul, 2, 21) 335 | 336 | finished, pending = futures.wait( 337 | [SUCCESSFUL_FUTURE, 338 | CANCELLED_AND_NOTIFIED_FUTURE, 339 | EXCEPTION_FUTURE, 340 | future1, 341 | future2], 342 | return_when=futures.ALL_COMPLETED) 343 | 344 | self.assertEqual(set([SUCCESSFUL_FUTURE, 345 | CANCELLED_AND_NOTIFIED_FUTURE, 346 | EXCEPTION_FUTURE, 347 | future1, 348 | future2]), finished) 349 | self.assertEqual(set(), pending) 350 | 351 | def test_timeout(self): 352 | future1 = self.executor.submit(mul, 6, 7) 353 | future2 = self.executor.submit(time.sleep, 3) 354 | 355 | finished, pending = futures.wait( 356 | [CANCELLED_AND_NOTIFIED_FUTURE, 357 | EXCEPTION_FUTURE, 358 | SUCCESSFUL_FUTURE, 359 | future1, future2], 360 | timeout=1.5, 361 | return_when=futures.ALL_COMPLETED) 362 | 363 | self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, 364 | EXCEPTION_FUTURE, 365 | SUCCESSFUL_FUTURE, 366 | future1]), finished) 367 | self.assertEqual(set([future2]), pending) 368 | 369 | 370 | class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests): 371 | 372 | def test_pending_calls_race(self): 373 | # Issue #14406: multi-threaded race condition when waiting on all 374 | # futures. 375 | event = threading.Event() 376 | def future_func(): 377 | event.wait() 378 | oldswitchinterval = sys.getcheckinterval() 379 | sys.setcheckinterval(1) 380 | try: 381 | fs = set(self.executor.submit(future_func) for i in range(100)) 382 | event.set() 383 | futures.wait(fs, return_when=futures.ALL_COMPLETED) 384 | finally: 385 | sys.setcheckinterval(oldswitchinterval) 386 | 387 | 388 | class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests): 389 | pass 390 | 391 | 392 | class AsCompletedTests(unittest.TestCase): 393 | # TODO(brian@sweetapp.com): Should have a test with a non-zero timeout. 394 | def test_no_timeout(self): 395 | future1 = self.executor.submit(mul, 2, 21) 396 | future2 = self.executor.submit(mul, 7, 6) 397 | 398 | completed = set(futures.as_completed( 399 | [CANCELLED_AND_NOTIFIED_FUTURE, 400 | EXCEPTION_FUTURE, 401 | SUCCESSFUL_FUTURE, 402 | future1, future2])) 403 | self.assertEqual(set( 404 | [CANCELLED_AND_NOTIFIED_FUTURE, 405 | EXCEPTION_FUTURE, 406 | SUCCESSFUL_FUTURE, 407 | future1, future2]), 408 | completed) 409 | 410 | def test_zero_timeout(self): 411 | future1 = self.executor.submit(time.sleep, 2) 412 | completed_futures = set() 413 | try: 414 | for future in futures.as_completed( 415 | [CANCELLED_AND_NOTIFIED_FUTURE, 416 | EXCEPTION_FUTURE, 417 | SUCCESSFUL_FUTURE, 418 | future1], 419 | timeout=0): 420 | completed_futures.add(future) 421 | except futures.TimeoutError: 422 | pass 423 | 424 | self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, 425 | EXCEPTION_FUTURE, 426 | SUCCESSFUL_FUTURE]), 427 | completed_futures) 428 | 429 | def test_duplicate_futures(self): 430 | # Issue 20367. Duplicate futures should not raise exceptions or give 431 | # duplicate responses. 432 | future1 = self.executor.submit(time.sleep, 2) 433 | completed = [f for f in futures.as_completed([future1,future1])] 434 | self.assertEqual(len(completed), 1) 435 | 436 | 437 | class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests): 438 | pass 439 | 440 | 441 | class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests): 442 | pass 443 | 444 | 445 | class ExecutorTest(unittest.TestCase): 446 | # Executor.shutdown() and context manager usage is tested by 447 | # ExecutorShutdownTest. 448 | def test_submit(self): 449 | future = self.executor.submit(pow, 2, 8) 450 | self.assertEqual(256, future.result()) 451 | 452 | def test_submit_keyword(self): 453 | future = self.executor.submit(mul, 2, y=8) 454 | self.assertEqual(16, future.result()) 455 | 456 | def test_map(self): 457 | self.assertEqual( 458 | list(self.executor.map(pow, range(10), range(10))), 459 | list(map(pow, range(10), range(10)))) 460 | 461 | def test_map_exception(self): 462 | i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) 463 | self.assertEqual(next(i), (0, 1)) 464 | self.assertEqual(next(i), (0, 1)) 465 | self.assertRaises(ZeroDivisionError, next, i) 466 | 467 | def test_map_timeout(self): 468 | results = [] 469 | try: 470 | for i in self.executor.map(time.sleep, 471 | [0, 0, 3], 472 | timeout=1.5): 473 | results.append(i) 474 | except futures.TimeoutError: 475 | pass 476 | else: 477 | self.fail('expected TimeoutError') 478 | 479 | self.assertEqual([None, None], results) 480 | 481 | 482 | class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest): 483 | def test_map_submits_without_iteration(self): 484 | """Tests verifying issue 11777.""" 485 | finished = [] 486 | def record_finished(n): 487 | finished.append(n) 488 | 489 | self.executor.map(record_finished, range(10)) 490 | self.executor.shutdown(wait=True) 491 | self.assertEqual(len(finished), 10) 492 | 493 | 494 | class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest): 495 | pass 496 | 497 | 498 | class FutureTests(unittest.TestCase): 499 | def test_done_callback_with_result(self): 500 | callback_result = [None] 501 | def fn(callback_future): 502 | callback_result[0] = callback_future.result() 503 | 504 | f = Future() 505 | f.add_done_callback(fn) 506 | f.set_result(5) 507 | self.assertEqual(5, callback_result[0]) 508 | 509 | def test_done_callback_with_exception(self): 510 | callback_exception = [None] 511 | def fn(callback_future): 512 | callback_exception[0] = callback_future.exception() 513 | 514 | f = Future() 515 | f.add_done_callback(fn) 516 | f.set_exception(Exception('test')) 517 | self.assertEqual(('test',), callback_exception[0].args) 518 | 519 | def test_done_callback_with_cancel(self): 520 | was_cancelled = [None] 521 | def fn(callback_future): 522 | was_cancelled[0] = callback_future.cancelled() 523 | 524 | f = Future() 525 | f.add_done_callback(fn) 526 | self.assertTrue(f.cancel()) 527 | self.assertTrue(was_cancelled[0]) 528 | 529 | def test_done_callback_raises(self): 530 | with captured_stderr() as stderr: 531 | raising_was_called = [False] 532 | fn_was_called = [False] 533 | 534 | def raising_fn(callback_future): 535 | raising_was_called[0] = True 536 | raise Exception('doh!') 537 | 538 | def fn(callback_future): 539 | fn_was_called[0] = True 540 | 541 | f = Future() 542 | f.add_done_callback(raising_fn) 543 | f.add_done_callback(fn) 544 | f.set_result(5) 545 | self.assertTrue(raising_was_called) 546 | self.assertTrue(fn_was_called) 547 | self.assertIn('Exception: doh!', stderr.getvalue()) 548 | 549 | def test_done_callback_already_successful(self): 550 | callback_result = [None] 551 | def fn(callback_future): 552 | callback_result[0] = callback_future.result() 553 | 554 | f = Future() 555 | f.set_result(5) 556 | f.add_done_callback(fn) 557 | self.assertEqual(5, callback_result[0]) 558 | 559 | def test_done_callback_already_failed(self): 560 | callback_exception = [None] 561 | def fn(callback_future): 562 | callback_exception[0] = callback_future.exception() 563 | 564 | f = Future() 565 | f.set_exception(Exception('test')) 566 | f.add_done_callback(fn) 567 | self.assertEqual(('test',), callback_exception[0].args) 568 | 569 | def test_done_callback_already_cancelled(self): 570 | was_cancelled = [None] 571 | def fn(callback_future): 572 | was_cancelled[0] = callback_future.cancelled() 573 | 574 | f = Future() 575 | self.assertTrue(f.cancel()) 576 | f.add_done_callback(fn) 577 | self.assertTrue(was_cancelled[0]) 578 | 579 | def test_repr(self): 580 | self.assertRegexpMatches(repr(PENDING_FUTURE), 581 | '') 582 | self.assertRegexpMatches(repr(RUNNING_FUTURE), 583 | '') 584 | self.assertRegexpMatches(repr(CANCELLED_FUTURE), 585 | '') 586 | self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE), 587 | '') 588 | self.assertRegexpMatches( 589 | repr(EXCEPTION_FUTURE), 590 | '') 591 | self.assertRegexpMatches( 592 | repr(SUCCESSFUL_FUTURE), 593 | '') 594 | 595 | def test_cancel(self): 596 | f1 = create_future(state=PENDING) 597 | f2 = create_future(state=RUNNING) 598 | f3 = create_future(state=CANCELLED) 599 | f4 = create_future(state=CANCELLED_AND_NOTIFIED) 600 | f5 = create_future(state=FINISHED, exception=IOError()) 601 | f6 = create_future(state=FINISHED, result=5) 602 | 603 | self.assertTrue(f1.cancel()) 604 | self.assertEqual(f1._state, CANCELLED) 605 | 606 | self.assertFalse(f2.cancel()) 607 | self.assertEqual(f2._state, RUNNING) 608 | 609 | self.assertTrue(f3.cancel()) 610 | self.assertEqual(f3._state, CANCELLED) 611 | 612 | self.assertTrue(f4.cancel()) 613 | self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED) 614 | 615 | self.assertFalse(f5.cancel()) 616 | self.assertEqual(f5._state, FINISHED) 617 | 618 | self.assertFalse(f6.cancel()) 619 | self.assertEqual(f6._state, FINISHED) 620 | 621 | def test_cancelled(self): 622 | self.assertFalse(PENDING_FUTURE.cancelled()) 623 | self.assertFalse(RUNNING_FUTURE.cancelled()) 624 | self.assertTrue(CANCELLED_FUTURE.cancelled()) 625 | self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled()) 626 | self.assertFalse(EXCEPTION_FUTURE.cancelled()) 627 | self.assertFalse(SUCCESSFUL_FUTURE.cancelled()) 628 | 629 | def test_done(self): 630 | self.assertFalse(PENDING_FUTURE.done()) 631 | self.assertFalse(RUNNING_FUTURE.done()) 632 | self.assertTrue(CANCELLED_FUTURE.done()) 633 | self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done()) 634 | self.assertTrue(EXCEPTION_FUTURE.done()) 635 | self.assertTrue(SUCCESSFUL_FUTURE.done()) 636 | 637 | def test_running(self): 638 | self.assertFalse(PENDING_FUTURE.running()) 639 | self.assertTrue(RUNNING_FUTURE.running()) 640 | self.assertFalse(CANCELLED_FUTURE.running()) 641 | self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running()) 642 | self.assertFalse(EXCEPTION_FUTURE.running()) 643 | self.assertFalse(SUCCESSFUL_FUTURE.running()) 644 | 645 | def test_result_with_timeout(self): 646 | self.assertRaises(futures.TimeoutError, 647 | PENDING_FUTURE.result, timeout=0) 648 | self.assertRaises(futures.TimeoutError, 649 | RUNNING_FUTURE.result, timeout=0) 650 | self.assertRaises(futures.CancelledError, 651 | CANCELLED_FUTURE.result, timeout=0) 652 | self.assertRaises(futures.CancelledError, 653 | CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0) 654 | self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0) 655 | self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42) 656 | 657 | def test_result_with_success(self): 658 | # TODO(brian@sweetapp.com): This test is timing dependant. 659 | def notification(): 660 | # Wait until the main thread is waiting for the result. 661 | time.sleep(1) 662 | f1.set_result(42) 663 | 664 | f1 = create_future(state=PENDING) 665 | t = threading.Thread(target=notification) 666 | t.start() 667 | 668 | self.assertEqual(f1.result(timeout=5), 42) 669 | 670 | def test_result_with_cancel(self): 671 | # TODO(brian@sweetapp.com): This test is timing dependant. 672 | def notification(): 673 | # Wait until the main thread is waiting for the result. 674 | time.sleep(1) 675 | f1.cancel() 676 | 677 | f1 = create_future(state=PENDING) 678 | t = threading.Thread(target=notification) 679 | t.start() 680 | 681 | self.assertRaises(futures.CancelledError, f1.result, timeout=5) 682 | 683 | def test_exception_with_timeout(self): 684 | self.assertRaises(futures.TimeoutError, 685 | PENDING_FUTURE.exception, timeout=0) 686 | self.assertRaises(futures.TimeoutError, 687 | RUNNING_FUTURE.exception, timeout=0) 688 | self.assertRaises(futures.CancelledError, 689 | CANCELLED_FUTURE.exception, timeout=0) 690 | self.assertRaises(futures.CancelledError, 691 | CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0) 692 | self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0), 693 | IOError)) 694 | self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None) 695 | 696 | def test_exception_with_success(self): 697 | def notification(): 698 | # Wait until the main thread is waiting for the exception. 699 | time.sleep(1) 700 | with f1._condition: 701 | f1._state = FINISHED 702 | f1._exception = IOError() 703 | f1._condition.notify_all() 704 | 705 | f1 = create_future(state=PENDING) 706 | t = threading.Thread(target=notification) 707 | t.start() 708 | 709 | self.assertTrue(isinstance(f1.exception(timeout=5), IOError)) 710 | 711 | @reap_threads 712 | def test_main(): 713 | try: 714 | test_support.run_unittest(ProcessPoolExecutorTest, 715 | ThreadPoolExecutorTest, 716 | ProcessPoolWaitTests, 717 | ThreadPoolWaitTests, 718 | ProcessPoolAsCompletedTests, 719 | ThreadPoolAsCompletedTests, 720 | FutureTests, 721 | ProcessPoolShutdownTest, 722 | ThreadPoolShutdownTest) 723 | finally: 724 | test_support.reap_children() 725 | 726 | if __name__ == "__main__": 727 | test_main() 728 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py26,py27,pypy,jython 3 | 4 | [testenv] 5 | commands={envpython} test_futures.py [] 6 | 7 | [testenv:py26] 8 | deps=unittest2 9 | --------------------------------------------------------------------------------