├── cpu-bound ├── serial.py ├── multip.py ├── thread.py ├── coro_thread.py └── coro_multi.py ├── run.sh ├── io-bound ├── serial.py ├── multip.py ├── thread.py ├── coro_thread.py └── coro_multi.py ├── mixed ├── serial.py ├── multip.py ├── thread.py ├── coro_thread.py └── coro_multi.py └── README.md /cpu-bound/serial.py: -------------------------------------------------------------------------------- 1 | def do_something(n): 2 | for i in range(n): 3 | for x in range(i): 4 | continue 5 | 6 | do_something(2000) 7 | do_something(15000) 8 | do_something(10000) 9 | do_something(1000) 10 | do_something(12000) 11 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # Get the results from each test set. 3 | 4 | for dir in */; 5 | do 6 | echo "====> $dir <====" 7 | for py in `ls $dir*py`; 8 | do 9 | echo $py 10 | echo "--------" 11 | time python3.5 $py 12 | done 13 | done 14 | rm *.dat 15 | -------------------------------------------------------------------------------- /cpu-bound/multip.py: -------------------------------------------------------------------------------- 1 | def do_something(n): 2 | for i in range(n): 3 | for x in range(i): 4 | continue 5 | 6 | 7 | from multiprocessing import Pool 8 | 9 | with Pool(5) as p: 10 | p.map(do_something, [ 11 | 2000, 12 | 15000, 13 | 10000, 14 | 1000, 15 | 12000 16 | ]) 17 | -------------------------------------------------------------------------------- /cpu-bound/thread.py: -------------------------------------------------------------------------------- 1 | def do_something(n): 2 | for i in range(n): 3 | for x in range(i): 4 | continue 5 | 6 | from multiprocessing.pool import ThreadPool as Pool 7 | 8 | with Pool(5) as p: 9 | p.map(do_something, [ 10 | 2000, 11 | 15000, 12 | 10000, 13 | 1000, 14 | 12000 15 | ]) 16 | -------------------------------------------------------------------------------- /io-bound/serial.py: -------------------------------------------------------------------------------- 1 | def do_something(fname, data): 2 | with open(fname, 'w') as f: 3 | for line in data: 4 | f.write('%s\n' % line) 5 | 6 | do_something('test1.dat', ['asdf'] * 200000) 7 | do_something('test2.dat', ['asdf'] * 1500000) 8 | do_something('test3.dat', ['asdf'] * 1000000) 9 | do_something('test4.dat', ['asdf'] * 100000) 10 | do_something('test5.dat', ['asdf'] * 1200000) 11 | -------------------------------------------------------------------------------- /mixed/serial.py: -------------------------------------------------------------------------------- 1 | def do_something(fname, data): 2 | with open(fname, 'w') as f: 3 | for line in data: 4 | f.write('%s\n' % line) 5 | 6 | def do_something_else(n): 7 | for i in range(n): 8 | for x in range(i): 9 | continue 10 | 11 | do_something_else(2000) 12 | do_something_else(15000) 13 | do_something('test3.dat', ['asdf'] * 1000000) 14 | do_something('test4.dat', ['asdf'] * 100000) 15 | do_something('test5.dat', ['asdf'] * 1200000) 16 | -------------------------------------------------------------------------------- /io-bound/multip.py: -------------------------------------------------------------------------------- 1 | def do_something(d): 2 | fname, data = d 3 | with open(fname, 'w') as f: 4 | for line in data: 5 | f.write('%s\n' % line) 6 | 7 | from multiprocessing import Pool 8 | 9 | with Pool(5) as p: 10 | p.map(do_something, [ 11 | ('test1.dat', ['asdf'] * 200000), 12 | ('test2.dat', ['asdf'] * 1500000), 13 | ('test3.dat', ['asdf'] * 1000000), 14 | ('test4.dat', ['asdf'] * 100000), 15 | ('test5.dat', ['asdf'] * 1200000) 16 | ]) 17 | -------------------------------------------------------------------------------- /io-bound/thread.py: -------------------------------------------------------------------------------- 1 | def do_something(d): 2 | fname, data = d 3 | with open(fname, 'w') as f: 4 | for line in data: 5 | f.write('%s\n' % line) 6 | 7 | from multiprocessing.pool import ThreadPool as Pool 8 | 9 | with Pool(5) as p: 10 | p.map(do_something, [ 11 | ('test1.dat', ['asdf'] * 200000), 12 | ('test2.dat', ['asdf'] * 1500000), 13 | ('test3.dat', ['asdf'] * 1000000), 14 | ('test4.dat', ['asdf'] * 100000), 15 | ('test5.dat', ['asdf'] * 1200000) 16 | ]) 17 | -------------------------------------------------------------------------------- /cpu-bound/coro_thread.py: -------------------------------------------------------------------------------- 1 | import asyncio, types 2 | 3 | async def do_something(n, loop): 4 | await count(n, loop) 5 | 6 | @types.coroutine 7 | def count(n, loop): 8 | yield from loop.run_in_executor(None, do_count, n) 9 | 10 | def do_count(n): 11 | for i in range(n): 12 | for x in range(i): 13 | continue 14 | 15 | loop = asyncio.get_event_loop() 16 | tasks= [ 17 | do_something(2000, loop), 18 | do_something(15000, loop), 19 | do_something(10000, loop), 20 | do_something(1000, loop), 21 | do_something(12000, loop)] 22 | 23 | coro = asyncio.gather(*(task for task in tasks)) 24 | loop.run_until_complete(coro) 25 | loop.close() 26 | 27 | -------------------------------------------------------------------------------- /mixed/multip.py: -------------------------------------------------------------------------------- 1 | def do_something(d): 2 | fname, data = d 3 | with open(fname, 'w') as f: 4 | for line in data: 5 | f.write('%s\n' % line) 6 | 7 | def do_something_else(n): 8 | for i in range(n[0]): 9 | for x in range(i): 10 | continue 11 | 12 | def f(args): 13 | fn, args = args[0], args[1:] 14 | fn(args) 15 | 16 | from multiprocessing import Pool 17 | 18 | with Pool(5) as p: 19 | p.map(f, [ 20 | (do_something_else, 2000), 21 | (do_something_else, 15000), 22 | (do_something, 'test3.dat', ['asdf'] * 1000000), 23 | (do_something, 'test4.dat', ['asdf'] * 100000), 24 | (do_something, 'test5.dat', ['asdf'] * 1200000) 25 | ]) 26 | -------------------------------------------------------------------------------- /mixed/thread.py: -------------------------------------------------------------------------------- 1 | def do_something(d): 2 | fname, data = d 3 | with open(fname, 'w') as f: 4 | for line in data: 5 | f.write('%s\n' % line) 6 | 7 | def do_something_else(n): 8 | for i in range(n[0]): 9 | for x in range(i): 10 | continue 11 | 12 | def f(args): 13 | fn, args = args[0], args[1:] 14 | fn(args) 15 | 16 | from multiprocessing.pool import ThreadPool as Pool 17 | 18 | with Pool(5) as p: 19 | p.map(f, [ 20 | (do_something_else, 2000), 21 | (do_something_else, 15000), 22 | (do_something, 'test3.dat', ['asdf'] * 1000000), 23 | (do_something, 'test4.dat', ['asdf'] * 100000), 24 | (do_something, 'test5.dat', ['asdf'] * 1200000) 25 | ]) 26 | -------------------------------------------------------------------------------- /cpu-bound/coro_multi.py: -------------------------------------------------------------------------------- 1 | import asyncio, types 2 | from concurrent.futures import ProcessPoolExecutor as PPE 3 | 4 | ppe = PPE() 5 | 6 | async def do_something(n, loop): 7 | await count(n, loop) 8 | 9 | @types.coroutine 10 | def count(n, loop): 11 | yield from loop.run_in_executor(ppe, do_count, n) 12 | 13 | def do_count(n): 14 | for i in range(n): 15 | for x in range(i): 16 | continue 17 | 18 | loop = asyncio.get_event_loop() 19 | tasks= [ 20 | do_something(2000, loop), 21 | do_something(15000, loop), 22 | do_something(10000, loop), 23 | do_something(1000, loop), 24 | do_something(12000, loop)] 25 | 26 | coro = asyncio.gather(*(task for task in tasks)) 27 | loop.run_until_complete(coro) 28 | loop.close() 29 | 30 | -------------------------------------------------------------------------------- /io-bound/coro_thread.py: -------------------------------------------------------------------------------- 1 | import asyncio, types 2 | 3 | async def do_something(fname, data, loop): 4 | await write(fname, data, loop) 5 | 6 | @types.coroutine 7 | def write(f, data, loop): 8 | yield from loop.run_in_executor(None, do_write, f, data) 9 | 10 | def do_write(f, data): 11 | with open(f, 'w') as f: 12 | for line in data: 13 | f.write(line) 14 | 15 | loop = asyncio.get_event_loop() 16 | tasks= [ 17 | do_something('test1.dat', ['asdf'] * 200000, loop), 18 | do_something('test2.dat', ['asdf'] * 1500000, loop), 19 | do_something('test3.dat', ['asdf'] * 1000000, loop), 20 | do_something('test4.dat', ['asdf'] * 100000, loop), 21 | do_something('test5.dat', ['asdf'] * 1200000, loop)] 22 | 23 | coro = asyncio.gather(*(task for task in tasks)) 24 | loop.run_until_complete(coro) 25 | loop.close() 26 | 27 | -------------------------------------------------------------------------------- /io-bound/coro_multi.py: -------------------------------------------------------------------------------- 1 | import asyncio, types 2 | from concurrent.futures import ProcessPoolExecutor as PPE 3 | 4 | ppe = PPE() 5 | 6 | async def do_something(fname, data, loop): 7 | await write(fname, data, loop) 8 | 9 | @types.coroutine 10 | def write(f, data, loop): 11 | yield from loop.run_in_executor(ppe, do_write, f, data) 12 | 13 | def do_write(f, data): 14 | with open(f, 'w') as f: 15 | for line in data: 16 | f.write(line) 17 | 18 | loop = asyncio.get_event_loop() 19 | tasks= [ 20 | do_something('test1.dat', ['asdf'] * 200000, loop), 21 | do_something('test2.dat', ['asdf'] * 1500000, loop), 22 | do_something('test3.dat', ['asdf'] * 1000000, loop), 23 | do_something('test4.dat', ['asdf'] * 100000, loop), 24 | do_something('test5.dat', ['asdf'] * 1200000, loop)] 25 | 26 | coro = asyncio.gather(*(task for task in tasks)) 27 | loop.run_until_complete(coro) 28 | loop.close() 29 | 30 | -------------------------------------------------------------------------------- /mixed/coro_thread.py: -------------------------------------------------------------------------------- 1 | import asyncio, types 2 | from concurrent.futures import ProcessPoolExecutor as PPE 3 | 4 | # IO bound 5 | 6 | async def do_something(fname, data, loop): 7 | await write(fname, data, loop) 8 | 9 | @types.coroutine 10 | def write(f, data, loop): 11 | yield from loop.run_in_executor(None, do_write, f, data) 12 | 13 | def do_write(f, data): 14 | with open(f, 'w') as f: 15 | for line in data: 16 | f.write(line) 17 | 18 | # CPU bound 19 | 20 | async def do_something_else(n, loop): 21 | await count(n, loop) 22 | 23 | @types.coroutine 24 | def count(n, loop): 25 | for i in range(n): 26 | yield from loop.run_in_executor(None, do_count, i) 27 | 28 | def do_count(i): 29 | for x in range(i): 30 | pass 31 | 32 | 33 | loop = asyncio.get_event_loop() 34 | tasks= [ 35 | do_something_else(2000, loop), 36 | do_something_else(15000, loop), 37 | do_something('test3.dat', ['asdf'] * 1000000, loop), 38 | do_something('test4.dat', ['asdf'] * 100000, loop), 39 | do_something('test5.dat', ['asdf'] * 1200000, loop)] 40 | 41 | coro = asyncio.gather(*(task for task in tasks)) 42 | loop.run_until_complete(coro) 43 | loop.close() 44 | -------------------------------------------------------------------------------- /mixed/coro_multi.py: -------------------------------------------------------------------------------- 1 | import asyncio, types 2 | from concurrent.futures import ProcessPoolExecutor as PPE 3 | 4 | ppe = PPE() 5 | 6 | # IO bound 7 | 8 | async def do_something(fname, data, loop): 9 | await write(fname, data, loop) 10 | 11 | @types.coroutine 12 | def write(f, data, loop): 13 | yield from loop.run_in_executor(ppe, do_write, f, data) 14 | 15 | def do_write(f, data): 16 | with open(f, 'w') as f: 17 | for line in data: 18 | f.write(line) 19 | 20 | # CPU bound 21 | 22 | async def do_something_else(n, loop): 23 | await count(n, loop) 24 | 25 | @types.coroutine 26 | def count(n, loop): 27 | for i in range(n): 28 | yield from loop.run_in_executor(ppe, do_count, i) 29 | 30 | def do_count(i): 31 | for x in range(i): 32 | pass 33 | 34 | 35 | loop = asyncio.get_event_loop() 36 | tasks= [ 37 | do_something_else(2000, loop), 38 | do_something_else(15000, loop), 39 | do_something('test3.dat', ['asdf'] * 1000000, loop), 40 | do_something('test4.dat', ['asdf'] * 100000, loop), 41 | do_something('test5.dat', ['asdf'] * 1200000, loop)] 42 | 43 | coro = asyncio.gather(*(task for task in tasks)) 44 | loop.run_until_complete(coro) 45 | loop.close() 46 | 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Testing Python 3.5's various concurrency methods 2 | ================================================ 3 | 4 | A little suite of tests for gauging the speed of various Python concurrency 5 | methods. 6 | 7 | This suite tests: 8 | 9 | - CPU Bound operations 10 | - I/O Bound operations 11 | - A mix of CPU and I/O operations 12 | 13 | Each of the tests contains 5 parts: 14 | 15 | - Sequential execution (for comparison). 16 | - Multiprocessing pool execution (classical Python multiprocessing) 17 | - Multithreading pool execution (classical Python threading) 18 | - Python 3.5 async Coroutines (using a multiprocessing pool) 19 | - Python 3.5 async Coroutines (using a threading pool) 20 | 21 | Each of the 3 test types should be taken seperately as they contain different 22 | computations. 23 | 24 | Test it yourself 25 | --------------- 26 | 27 | 28 | git clone https://github.com/Sonictherocketman/python3.5_coro_testing 29 | cd python3.5_coro_testing 30 | bash run.sh 31 | 32 | 33 | Sample Results 34 | -------------- 35 | 36 | *Retina Macbook Pro (2014, 13")* 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 |
TypeTestTime
CPU BoundSerial7.804
Coro Multi4.739s
Coro Thread8.221s
Multiprocess4.488s
Threading8.251s
I/O BoundSerial1.924s
Coro Multi0.951s
Coro Thread1.97s
Multiprocess1.320s
Threading1.807s
Mixed I/O and CPUSerial4.789s
Coro Multi13.574s
Coro Thread7.724s
Multiprocess4.513s
Threading4.976s
119 | --------------------------------------------------------------------------------