├── threading2
├── tests
│ ├── stdregr
│ │ ├── __init__.py
│ │ ├── lock_te_sts.py
│ │ └── test_threading.py
│ └── __init__.py
├── t2_win32.py
├── t2_posix.py
├── __init__.py
└── t2_base.py
├── .gitignore
├── MANIFEST.in
├── .travis.yml
├── NOTES.txt
├── setup.py
├── ChangeLog.txt
├── LICENSE.txt
└── README.rst
/threading2/tests/stdregr/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.so
2 | *.o
3 | *.pyc
4 | *~
5 | MANIFEST
6 | build/
7 | dist/
8 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 |
2 | include README.txt
3 | include LICENSE.txt
4 | include ChangeLog.txt
5 |
6 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "2.7"
4 | install:
5 | - "pip install discover"
6 | script:
7 | - "python -m unittest discover -v "
8 |
--------------------------------------------------------------------------------
/NOTES.txt:
--------------------------------------------------------------------------------
1 |
2 | win32 threading primitives:
3 |
4 | Event -> Event
5 | RLock -> critical sections? mutex?
6 | BoundedSemaphore -> CreateSemaphore
7 | Condition -> condition variables (Vista+)
8 |
9 |
10 | pthreads primitives:
11 |
12 | Condition -> condition
13 | RLock -> mutex
14 |
15 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 |
2 | from distutils.core import setup
3 |
4 | import threading2
5 | VERSION = threading2.__version__
6 |
7 | NAME = "threading2"
8 | DESCRIPTION = "like the standard threading module, but awesomer"
9 | LONG_DESC = threading2.__doc__
10 | AUTHOR = "Ryan Kelly"
11 | AUTHOR_EMAIL = "ryan@rfk.id.au"
12 | URL = "http://github.com/rfk/threading2"
13 | LICENSE = "MIT"
14 | KEYWORDS = "thread threading"
15 |
16 | setup(name=NAME,
17 | version=VERSION,
18 | author=AUTHOR,
19 | author_email=AUTHOR_EMAIL,
20 | url=URL,
21 | description=DESCRIPTION,
22 | long_description=LONG_DESC,
23 | license=LICENSE,
24 | keywords=KEYWORDS,
25 | packages=["threading2","threading2.tests","threading2.tests.stdregr"],
26 | )
27 |
28 |
--------------------------------------------------------------------------------
/ChangeLog.txt:
--------------------------------------------------------------------------------
1 |
2 | v0.3.1:
3 |
4 | * posix: don't try to use sched_setaffinity for setting thread affinity.
5 | It requires the os-level thread id which we do not know without hackery.
6 |
7 | v0.3.0:
8 |
9 | * ThreadGroup: use weakrefs internally to avoid creating memory leaks.
10 | * SHLock: fix cleanup logic when acquire() times out
11 |
12 | v0.2.1:
13 |
14 | * rework SHLock class to avoid deadlocks, be fair and re-entrant.
15 | * ensure that the thread addinity is set properly on thread startup.
16 |
17 | v0.2.0:
18 |
19 | * add SHLock class for shared/exclusive (also known as read/write) locks.
20 |
21 | v0.1.4:
22 |
23 | * fix silly typo when loading libc (it's a miracle previous versions
24 | worked at all!)
25 |
26 | v0.1.3:
27 |
28 | * better CPUSet <-> bitmask conversion
29 | * posix: dynamically calculate size of _cpuset struct.
30 |
31 | v0.1.1:
32 |
33 | * support for setting process- and thread-level CPU affinity.
34 |
35 | v0.1.0:
36 |
37 | * initial release - you might say *everything* has changed...
38 |
39 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2009-2011 Ryan Kelly
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 |
2 | Status: Unmaintained
3 | ====================
4 |
5 | .. image:: http://unmaintained.tech/badge.svg
6 | :target: http://unmaintained.tech/
7 | :alt: No Maintenance Intended
8 |
9 | I am `no longer actively maintaining this project `_.
10 |
11 |
12 | threading2: like the standard threading module, but awesomer.
13 | ==============================================================
14 |
15 | This module is designed as a drop-in replacement and extension for the default
16 | "threading" module. It has two main objectives:
17 |
18 | * implement primitives using native platform functionality where possible
19 | * expose more sophisticated functionality where it can be done uniformly
20 |
21 | The following extensions are currently implemented:
22 |
23 | * ability to set (advisory) thread priority
24 | * ability to set (advisory) CPU affinity at thread and process level
25 | * thread groups for simultaneous management of multiple threads
26 | * SHLock class for shared/exclusive (also known as read/write) locks
27 |
28 | The following API niceties are also included:
29 |
30 | * all blocking methods take a "timeout" argument and return a success code
31 | * all exposed objects are actual classes and can be safely subclassed
32 |
33 | This has currently only been tested on WinXP and Ubuntu Karmic; similar
34 | platforms *should* work OK, and other platforms *should* fall back to using
35 | sensible default behaviour, but I'm not making any guarantees at this stage.
36 |
37 | Additional planned extensions include:
38 |
39 | * make stack_size a kwarg when creating a thread
40 | * native events, semaphores and timed waits on win32
41 | * native conditions and timed waits on pthreads platforms
42 | * native SHLock implementations (SRW on Win Vista+, pthread_rwlock)
43 |
44 |
--------------------------------------------------------------------------------
/threading2/t2_win32.py:
--------------------------------------------------------------------------------
1 |
2 | import t2_base
3 | from t2_base import *
4 | from t2_base import __all__
5 |
6 | import errno
7 | from ctypes import *
8 |
9 | kernel32 = windll.kernel32
10 |
11 | THREAD_SET_INFORMATION = 0x20
12 | THREAD_PRIORITY_ABOVE_NORMAL = 1
13 |
14 |
15 | class Thread(Thread):
16 |
17 | def before_run(self):
18 | self.__w32id = kernel32.GetCurrentThreadId()
19 | super(Thread,self).before_run()
20 |
21 | if hasattr(kernel32,"SetThreadPriority"):
22 | def _set_priority(self,priority):
23 | priority = super(Thread,self)._set_priority(priority)
24 | if priority >= 0.95:
25 | # max == THREAD_PRIORITY_TIME_CRITICAL
26 | value = 15
27 | elif priority <= 0.05:
28 | # min == THREAD_PRIORITY_IDLE
29 | value = -15
30 | else:
31 | # Spread the rest evenly over the five levels from -2 to 2
32 | # (THREAD_PRIORITY_LOWEST through THREAD_PRIORITY_HIGHEST)
33 | value = int(round(4*priority) - 2)
34 | handle = kernel32.OpenThread(THREAD_SET_INFORMATION,False,self.__w32id)
35 | value = c_int(value)
36 | try:
37 | if not kernel32.SetThreadPriority(handle,value):
38 | raise WinError()
39 | finally:
40 | kernel32.CloseHandle(handle)
41 | return priority
42 |
43 |
44 | if hasattr(kernel32,"SetThreadAffinityMask"):
45 | def _set_affinity(self,affinity):
46 | affinity = super(Thread,self)._set_affinity(affinity)
47 | mask = affinity.to_bitmask()
48 | handle = kernel32.OpenThread(THREAD_SET_INFORMATION,False,self.__w32id)
49 | try:
50 | if not kernel32.SetThreadAffinityMask(handle,mask):
51 | raise WinError()
52 | finally:
53 | kernel32.CloseHandle(handle)
54 | return affinity
55 |
56 |
57 |
58 | if hasattr(kernel32,"GetProcessAffinityMask"):
59 |
60 | def _GetProcessAffinityMask():
61 | pmask = c_int()
62 | smask = c_int()
63 | p = kernel32.GetCurrentProcess()
64 | if not kernel32.GetProcessAffinityMask(p,byref(pmask),byref(smask)):
65 | raise WinError()
66 | return (pmask.value,smask.value)
67 |
68 | def system_affinity():
69 | return CPUSet(_GetProcessAffinityMask()[1])
70 | system_affinity.__doc__ = t2_base.system_affinity.__doc__
71 |
72 | def process_affinity(affinity=None):
73 | if affinity is not None:
74 | mask = CPUSet(affinity).to_bitmask()
75 | p = kernel32.GetCurrentProcess()
76 | if not kernel32.SetProcessAffinityMask(p,mask):
77 | raise WinError()
78 | return CPUSet(_GetProcessAffinityMask()[0])
79 | process_affinity.__doc__ = t2_base.process_affinity.__doc__
80 |
81 |
82 |
--------------------------------------------------------------------------------
/threading2/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from __future__ import with_statement
3 |
4 | import os
5 | import sys
6 | import unittest
7 | import doctest
8 | import random
9 | import time
10 |
11 | import threading2
12 | from threading2 import *
13 |
14 | # Grab everything needed to run standard threading test function
15 | from threading import _test as std_threading_test
16 | from threading import _Verbose, _sleep
17 | from collections import deque
18 |
19 |
20 | class TestStandard(unittest.TestCase):
21 | """Run standard threading testcases using our new classes."""
22 |
23 | def test_standard(self):
24 | exec std_threading_test.func_code in globals()
25 |
26 |
27 | class TestSHLock(unittest.TestCase):
28 | """Testcases for SHLock class."""
29 |
30 | def test_contention(self):
31 | lock = SHLock()
32 | done = []
33 | def lots_of_acquires():
34 | for _ in xrange(1000):
35 | shared = random.choice([True,False])
36 | lock.acquire(shared=shared)
37 | lock.acquire(shared=shared)
38 | time.sleep(random.random() * 0.0001)
39 | lock.release()
40 | time.sleep(random.random() * 0.0001)
41 | lock.acquire(shared=shared)
42 | time.sleep(random.random() * 0.0001)
43 | lock.release()
44 | lock.release()
45 | done.append(True)
46 | threads = [Thread(target=lots_of_acquires) for _ in xrange(3)]
47 | for t in threads:
48 | t.daemon = True
49 | t.start()
50 | for t in threads:
51 | if not t.join(timeout=10):
52 | raise RuntimeError("SHLock deadlock")
53 | if len(done) != len(threads):
54 | print done, threads
55 | raise RuntimeError("SHLock test error")
56 |
57 | class TestSHLockContext(unittest.TestCase):
58 | class TestPassed(Exception): pass
59 |
60 | @staticmethod
61 | def raise_test_passed(): raise TestSHLockContext.TestPassed
62 |
63 | @staticmethod
64 | def noop(*args, **kwargs): pass
65 |
66 | def check_args(self, passed, expected):
67 | def f(**f_kwargs):
68 | self.assertItemsEqual(expected.items(), f_kwargs.items(), 'Passed {} Got {} Expected {}'.format(passed, f_kwargs, expected))
69 | raise TestSHLockContext.TestPassed
70 | return f
71 |
72 | def test_context_without_args(self):
73 |
74 | lock_acquire = SHLock()
75 | lock_acquire.acquire = TestSHLockContext.raise_test_passed
76 | with self.assertRaises(TestSHLockContext.TestPassed):
77 | with lock_acquire:
78 | pass
79 |
80 | lock_release = SHLock()
81 | lock_release.release = TestSHLockContext.raise_test_passed
82 | with self.assertRaises(TestSHLockContext.TestPassed):
83 | with lock_release:
84 | pass
85 |
86 | def test_context_with_args(self):
87 | for passed,expected in (
88 | ({}, {'shared':False, 'blocking':True, 'timeout':None}),
89 | ({'shared':True}, {'shared':True, 'blocking':True, 'timeout':None}),
90 | ({'blocking':False}, {'shared':False, 'blocking':False, 'timeout':None}),
91 | ({'timeout':1}, {'shared':False, 'blocking':True, 'timeout':1}),
92 | ):
93 | lock_acquire_arg = SHLock()
94 | lock_acquire_arg.acquire = self.check_args(passed, expected)
95 | with self.assertRaises(TestSHLockContext.TestPassed):
96 | with lock_acquire_arg(**passed):
97 | pass
98 |
99 | lock_release_arg = SHLock()
100 | lock_acquire_arg.acquire = TestSHLockContext.noop
101 | lock_release_arg.release = TestSHLockContext.raise_test_passed
102 | with self.assertRaises(TestSHLockContext.TestPassed):
103 | with lock_release_arg(**passed):
104 | pass
105 |
106 | class TestCPUSet(unittest.TestCase):
107 | """Unittests for CPUSet class."""
108 |
109 | def test_initialisation(self):
110 | def assertSetEquals(set1,set2):
111 | self.assertEquals(sorted(list(set1)),sorted(list(set2)))
112 | # Initialisation from iterables
113 | assertSetEquals(CPUSet(),[])
114 | assertSetEquals(CPUSet([0,3,2]),[0,2,3])
115 | assertSetEquals(CPUSet(""),[])
116 | assertSetEquals(CPUSet("3158"),[1,3,5,8])
117 | assertSetEquals(CPUSet("3158"),[1,3,5,8])
118 | # Initialisation from bitmasks
119 | assertSetEquals(CPUSet(0),[])
120 | assertSetEquals(CPUSet(1),[0])
121 | assertSetEquals(CPUSet(2),[1])
122 | assertSetEquals(CPUSet(3),[0,1])
123 | assertSetEquals(CPUSet(4),[2])
124 | assertSetEquals(CPUSet(5),[0,2])
125 | assertSetEquals(CPUSet(6),[1,2])
126 | assertSetEquals(CPUSet(7),[0,1,2])
127 | assertSetEquals(CPUSet(1 << 7),[7])
128 | assertSetEquals(CPUSet(1 << 127),[127])
129 | assertSetEquals(CPUSet(1 << 128),[128])
130 |
131 | def test_to_bitmask(self):
132 | self.assertEquals(CPUSet().to_bitmask(),0)
133 | self.assertEquals(CPUSet("0").to_bitmask(),1)
134 | self.assertEquals(CPUSet("1").to_bitmask(),2)
135 | self.assertEquals(CPUSet("01").to_bitmask(),3)
136 | self.assertEquals(CPUSet("2").to_bitmask(),4)
137 | self.assertEquals(CPUSet("02").to_bitmask(),5)
138 | self.assertEquals(CPUSet("12").to_bitmask(),6)
139 | self.assertEquals(CPUSet("012").to_bitmask(),7)
140 | for i in xrange(100):
141 | self.assertEquals(CPUSet(i).to_bitmask(),i)
142 |
143 | class TestMisc(unittest.TestCase):
144 | """Miscellaneous test procedures."""
145 |
146 | def test_docstrings(self):
147 | """Test threading2 docstrings."""
148 | assert doctest.testmod(threading2)[0] == 0
149 |
150 | def test_README(self):
151 | """Ensure that the README is in sync with the docstring.
152 |
153 | This test should always pass; if the README is out of sync it just
154 | updates it with the contents of threading2.__doc__.
155 | """
156 | dirname = os.path.dirname
157 | readme = os.path.join(dirname(dirname(dirname(__file__))),"README.txt")
158 | if not os.path.isfile(readme):
159 | f = open(readme,"wb")
160 | f.write(threading2.__doc__)
161 | f.close()
162 | else:
163 | f = open(readme,"rb")
164 | if f.read() != threading2.__doc__:
165 | f.close()
166 | f = open(readme,"wb")
167 | f.write(threading2.__doc__)
168 | f.close()
169 |
170 |
--------------------------------------------------------------------------------
/threading2/t2_posix.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import errno
4 | from ctypes import *
5 | from ctypes.util import find_library
6 |
7 | import t2_base
8 | from t2_base import *
9 | from t2_base import __all__
10 |
11 | libc = find_library("c")
12 | if libc is None:
13 | raise ImportError("libc not found")
14 | libc = CDLL(libc,use_errno=True)
15 |
16 | pthread = find_library("pthread")
17 | if pthread is None:
18 | raise ImportError("pthreads not found")
19 | pthread = CDLL(pthread,use_errno=True)
20 |
21 |
22 | SCHED_OTHER = 0
23 | SCHED_FIFO = 1
24 | SCHED_RR = 2
25 |
26 |
27 | class _sched_param(Structure):
28 | _fields_ = [("priority",c_int32)]
29 |
30 |
31 | # _cpuset is the structure representing a set of cpus. Ordinarily you'd
32 | # manipulate it using platform-specific macros, but we don't have that luxury.
33 | # Instread, we adjust the definition of the structure in response to EINVAL.
34 | # TODO: there's no guarantee the cpu_set_t structure is a long array bitmask.
35 | _CPUSET_SIZE = 1
36 | _MAX_CPUSET_SIZE = 8
37 | _HAVE_ADJUSTED_CPUSET_SIZE = False
38 | _cpuset_bits_t = c_int32
39 | class _cpuset(Structure):
40 | _fields_ = [("bits",_cpuset_bits_t*_CPUSET_SIZE)]
41 |
42 | def _incr_cpuset_size():
43 | global _cpuset
44 | global _CPUSET_SIZE
45 | _CPUSET_SIZE += 1
46 | class _cpuset(Structure):
47 | _fields_ = [("bits",_cpuset_bits_t*_CPUSET_SIZE)]
48 |
49 |
50 |
51 | def _priority_range(policy=None):
52 | """Determine the priority range (min,max) for the given scheduler policy.
53 |
54 | If no policy is specified, the current default policy is used.
55 | """
56 | if policy is None:
57 | policy = libc.sched_getscheduler(0)
58 | if policy < 0:
59 | raise OSError(get_errno(),"sched_getscheduler")
60 | max = libc.sched_get_priority_max(policy)
61 | if max < 0:
62 | raise OSError(get_errno(),"sched_get_priority_max")
63 | min = libc.sched_get_priority_min(policy)
64 | if min < 0:
65 | raise OSError(get_errno(),"sched_get_priority_min")
66 | return (min,max)
67 |
68 |
69 | # Try to define _do_get_affinity and _do_set_affinity based on availability
70 | # of the necessary functions in libpthread.
71 | if hasattr(pthread,"pthread_setaffinity_np"):
72 | def _do_set_affinity(tid,affinity):
73 | if not _HAVE_ADJUSTED_CPUSET_SIZE:
74 | _do_get_affinity(tid)
75 | affinity = CPUSet(affinity)
76 | mask = c_long()
77 | mask.value = affinity.to_bitmask()
78 | res = pthread.pthread_setaffinity_np(tid,sizeof(mask),byref(mask))
79 | if res:
80 | raise OSError(res,"pthread_setaffinity_np")
81 | def _do_get_affinity(tid):
82 | global _HAVE_ADJUSTED_CPUSET_SIZE
83 | _HAVE_ADJUSTED_CPUSET_SIZE = True
84 | mask = _cpuset()
85 | res = pthread.pthread_getaffinity_np(tid,sizeof(mask),byref(mask))
86 | if res:
87 | if res == errno.EINVAL and _CPUSET_SIZE < _MAX_CPUSET_SIZE:
88 | _incr_cpuset_size()
89 | return _do_get_affinity(tid)
90 | raise OSError(res,"pthread_get_affinity_np")
91 | intmask = 0
92 | shift = 8*sizeof(_cpuset_bits_t)
93 | for i in xrange(len(mask.bits)):
94 | intmask |= mask.bits[i] << (i*shift)
95 | return CPUSet(intmask)
96 | else:
97 | _do_set_affinity = None
98 | _do_get_affinity = None
99 |
100 |
101 | class Thread(Thread):
102 |
103 | if hasattr(pthread,"pthread_setpriority"):
104 | def _set_priority(self,priority):
105 | priority = super(Thread,self)._set_priority(priority)
106 | me = self.ident
107 | (max,min) = _priority_range()
108 | range = max - min
109 | if range <= 0:
110 | if hasattr(pthread,"pthread_setschedparam"):
111 | # We're in a priority-less scheduler, try to change.
112 | (max,min) = _priority_range(SCHED_RR)
113 | value = int((max - min) * priority + min)
114 | value = byref(_sched_param(value))
115 | res = pthread.pthread_setschedparam(me,SCHED_RR,value)
116 | if res == errno.EPERM:
117 | res = 0
118 | elif res:
119 | raise OSError(res,"pthread_setschedparam")
120 | else:
121 | value = int(range * priority + min)
122 | if pthread.pthread_setpriority(me,value):
123 | raise OSError(res,"pthread_setpriority")
124 | return priority
125 |
126 | if _do_set_affinity is not None:
127 | def _set_affinity(self,affinity):
128 | affinity = super(Thread,self)._set_affinity(affinity)
129 | me = self.ident
130 | _do_set_affinity(me,affinity)
131 | return affinity
132 |
133 |
134 | def system_affinity():
135 | # Try to read cpu info from /proc
136 | try:
137 | with open("/proc/cpuinfo","r") as cpuinfo:
138 | affinity = CPUSet()
139 | for ln in cpuinfo:
140 | info = ln.split()
141 | if len(info) == 3:
142 | if info[0] == "processor" and info[1] == ":":
143 | affinity.add(info[2])
144 | return affinity
145 | except EnvironmentError:
146 | pass
147 | # Fall back to the process affinity
148 | return process_affinity()
149 | system_affinity.__doc__ = t2_base.system_affinity.__doc__
150 |
151 |
152 | if hasattr(libc,"sched_setaffinity"):
153 |
154 | def _do_set_proc_affinity(pid,affinity):
155 | if not _HAVE_ADJUSTED_CPUSET_SIZE:
156 | _do_get_affinity(pid)
157 | affinity = CPUSet(affinity)
158 | print "AFFINITY", affinity
159 | mask = _cpuset()
160 | bitmask = affinity.to_bitmask()
161 | chunkmask = 2**(8*sizeof(_cpuset_bits_t))-1
162 | for i in xrange(_CPUSET_SIZE):
163 | mask.bits[i] = bitmask & chunkmask
164 | bitmask = bitmask >> (8*sizeof(_cpuset_bits_t))
165 | if libc.sched_setaffinity(pid,sizeof(mask),byref(mask)) < 0:
166 | raise OSError(get_errno(),"sched_setaffinity")
167 |
168 | def _do_get_proc_affinity(pid):
169 | global _HAVE_ADJUSTED_CPUSET_SIZE
170 | _HAVE_ADJUSTED_CPUSET_SIZE = True
171 | mask = _cpuset()
172 | if libc.sched_getaffinity(pid,sizeof(mask),byref(mask)) < 0:
173 | eno = get_errno()
174 | if eno == errno.EINVAL and _CPUSET_SIZE < _MAX_CPUSET_SIZE:
175 | _incr_cpuset_size()
176 | return _do_get_affinity(pid)
177 | raise OSError(eno,"sched_getaffinity")
178 | intmask = 0
179 | shift = 8*sizeof(_cpuset_bits_t)
180 | for i in xrange(len(mask.bits)):
181 | intmask |= mask.bits[i] << (i*shift)
182 | return CPUSet(intmask)
183 |
184 | def process_affinity(affinity=None):
185 | pid = os.getpid()
186 | if affinity is not None:
187 | _do_set_proc_affinity(pid,affinity)
188 | return _do_get_proc_affinity(pid)
189 | process_affinity.__doc__ = t2_base.process_affinity.__doc__
190 |
191 |
192 |
--------------------------------------------------------------------------------
/threading2/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | threading2: like the standard threading module, but awesomer.
4 |
5 | This module is designed as a drop-in replacement and extension for the default
6 | "threading" module. It has two main objectives:
7 |
8 | * implement primitives using native platform functionality where possible
9 | * expose more sophisticated functionality where it can be done uniformly
10 |
11 | The following extensions are currently implemented:
12 |
13 | * ability to set (advisory) thread priority
14 | * ability to set (advisory) CPU affinity at thread and process level
15 | * thread groups for simultaneous management of multiple threads
16 | * SHLock class for shared/exclusive (also known as read/write) locks
17 |
18 | The following API niceties are also included:
19 |
20 | * all blocking methods take a "timeout" argument and return a success code
21 | * all exposed objects are actual classes and can be safely subclassed
22 |
23 | This has currently only been tested on WinXP and Ubuntu Karmic; similar
24 | platforms *should* work OK, and other platforms *should* fall back to using
25 | sensible default behaviour, but I'm not making any guarantees at this stage.
26 |
27 | Additional planned extensions include:
28 |
29 | * make stack_size a kwarg when creating a thread
30 | * native events, semaphores and timed waits on win32
31 | * native conditions and timed waits on pthreads platforms
32 | * native SHLock implementations (SRW on Win Vista+, pthread_rwlock)
33 |
34 | """
35 |
36 | from __future__ import with_statement
37 |
38 |
39 | __ver_major__ = 0
40 | __ver_minor__ = 3
41 | __ver_patch__ = 1
42 | __ver_sub__ = ""
43 | __version__ = "%d.%d.%d%s" % (__ver_major__,__ver_minor__,
44 | __ver_patch__,__ver_sub__)
45 |
46 | import sys
47 | import weakref
48 |
49 | # Expose some internal state of the threading module, for use by regr tests
50 | from threading import _active,_DummyThread
51 |
52 | # Grab the best implementation we can use on this platform
53 | try:
54 | if sys.platform == "win32":
55 | from threading2.t2_win32 import *
56 | else:
57 | from threading2.t2_posix import *
58 | except ImportError:
59 | from threading2.t2_base import *
60 | del sys
61 |
62 |
63 | __all__ = ["active_count","activeCount","Condition","current_thread",
64 | "currentThread","enumerate","Event","local","Lock","RLock",
65 | "Semaphore","BoundedSemaphore","Thread","ThreadGroup","Timer",
66 | "SHLock","setprofile","settrace","stack_size","group_local",
67 | "CPUSet","system_affinity","process_affinity"]
68 |
69 |
70 | class ThreadGroup(object):
71 | """Object for managing many threads at once.
72 |
73 | ThreadGroup objects are a simple container for a set of threads, allowing
74 | them all to the managed as a single unit. Operations that can be applied
75 | to a group include:
76 |
77 | * setting priority and affinity
78 | * joining and testing for liveness
79 |
80 | """
81 |
82 | def __init__(self,name=None):
83 | self.name = name
84 | self.__lock = Lock()
85 | self.__priority = None
86 | self.__affinity = None
87 | # Ideally we'd use a WeakSet here, but it's not available
88 | # in older versions of python.
89 | self.__threads = weakref.WeakKeyDictionary()
90 |
91 | def __str__(self):
92 | if not self.name:
93 | return super(ThreadGroup,self).__str__()
94 | return "" % (self.name,id(self),)
95 |
96 | def _add_thread(self,thread):
97 | self.__threads[thread] = True
98 |
99 | @property
100 | def priority(self):
101 | return self.__priority
102 |
103 | @priority.setter
104 | def priority(self,priority):
105 | """Set the priority for all threads in this group.
106 |
107 | If setting priority fails on any thread, the priority of all threads
108 | is restored to its previous value.
109 | """
110 | with self.__lock:
111 | old_priorities = {}
112 | try:
113 | for thread in self.__threads:
114 | old_priorities[thread] = thread.priority
115 | thread.priority = priority
116 | except Exception:
117 | for (thread,old_priority) in old_priorities.iteritems():
118 | try:
119 | thread.priority = old_priority
120 | except Exception:
121 | pass
122 | raise
123 | else:
124 | self.__priority = priority
125 |
126 | @property
127 | def affinity(self):
128 | return self.__affinity
129 |
130 | @affinity.setter
131 | def affinity(self,affinity):
132 | """Set the affinity for all threads in this group.
133 |
134 | If setting affinity fails on any thread, the affinity of all threads
135 | is restored to its previous value.
136 | """
137 | with self.__lock:
138 | old_affinities = {}
139 | try:
140 | for thread in self.__threads:
141 | old_affinities[thread] = thread.affinity
142 | thread.affinity = affinity
143 | except Exception:
144 | for (thread,old_affinity) in old_affinities.iteritems():
145 | try:
146 | thread.affinity = old_affinity
147 | except Exception:
148 | pass
149 | raise
150 | else:
151 | self.__affinity = affinity
152 |
153 | def is_alive(self):
154 | """Check whether any thread in this group is alive."""
155 | return any(thread.is_alive() for thread in self.__threads)
156 | isAlive = is_alive
157 |
158 | def join(self,timeout=None):
159 | """Join all threads in this group.
160 |
161 | If the optional "timeout" argument is given, give up after that many
162 | seconds. This method returns True is the threads were successfully
163 | joined, False if a timeout occurred.
164 | """
165 | if timeout is None:
166 | for thread in self.__threads:
167 | thread.join()
168 | else:
169 | deadline = _time() + timeout
170 | for thread in self.__threads:
171 | delay = deadline - _time()
172 | if delay <= 0:
173 | return False
174 | if not thread.join(delay):
175 | return False
176 | return True
177 |
178 |
179 | default_group = ThreadGroup()
180 |
181 |
182 | class group_local(object):
183 | """Group-local storage object.
184 |
185 | Instances of group_local behave simlarly to threading.local() instance,
186 | except that the values of their attributes are common to all threads in
187 | a single group.
188 | """
189 |
190 | def __init__(self):
191 | self.__lock = Lock()
192 | self.__group_locks = {}
193 | self.__attrs = {}
194 |
195 | def __getattr__(self,name):
196 | group = current_thread().group
197 | try:
198 | return self.__attrs[group][name]
199 | except KeyError:
200 | raise AttributeError(name)
201 |
202 | def __setattr__(self,name,value):
203 | group = current_thread().group
204 | try:
205 | lock = self._group_locks[group]
206 | except KeyError:
207 | with self.__lock:
208 | lock = self._group_locks.setdefault(group,Lock())
209 | with lock:
210 | try:
211 | self.__attrs[group][name] = value
212 | except KeyError:
213 | self.__attrs[group] = {}
214 | self.__attrs[group][name] = value
215 |
216 | def __delattr__(self,name):
217 | group = current_thread().group
218 | try:
219 | lock = self._group_locks[group]
220 | except KeyError:
221 | with self.__lock:
222 | lock = self._group_locks.setdefault(group,Lock())
223 | with lock:
224 | try:
225 | del self.__attrs[group][name]
226 | except KeyError:
227 | raise AttributeError(name)
228 |
229 |
230 | # Patch current_thread() and enumerate() to always return instances
231 | # of our extended Thread class.
232 |
233 | _current_thread = current_thread
234 | def current_thread():
235 | thread = _current_thread()
236 | if not isinstance(thread,Thread):
237 | thread = Thread.from_thread(thread)
238 | return thread
239 | currentThread = current_thread
240 | current_thread.__doc__ = _current_thread.__doc__
241 |
242 | _enumerate = enumerate
243 | def enumerate():
244 | threads = _enumerate()
245 | for i in xrange(len(threads)):
246 | if not isinstance(threads[i],Thread):
247 | threads[i] = Thread.from_thread(threads[i])
248 | return threads
249 | enumerate.__doc__ = _enumerate.__doc__
250 |
251 |
252 |
--------------------------------------------------------------------------------
/threading2/tests/stdregr/lock_te_sts.py:
--------------------------------------------------------------------------------
1 | ###
2 | ### This is a verbatim copy of the "lock_tests" regression tests from the
3 | ### tandard Python distribution (svn revision 77924 to be precise). It's
4 | ### included here to support running the standard threading test suite against
5 | ### our new objects.
6 | ###
7 | ###
8 | ### If you want to get technical, the copyright for this file is thus:
9 | ###
10 | ### Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
11 | ### Python Software Foundation; All Rights Reserved
12 | ###
13 |
14 | """
15 | Various tests for synchronization primitives.
16 | """
17 |
18 | import sys
19 | import time
20 | from thread import start_new_thread, get_ident
21 | import threading
22 | import unittest
23 |
24 | from test import test_support as support
25 |
26 |
27 | def _wait():
28 | # A crude wait/yield function not relying on synchronization primitives.
29 | time.sleep(0.01)
30 |
31 | class Bunch(object):
32 | """
33 | A bunch of threads.
34 | """
35 | def __init__(self, f, n, wait_before_exit=False):
36 | """
37 | Construct a bunch of `n` threads running the same function `f`.
38 | If `wait_before_exit` is True, the threads won't terminate until
39 | do_finish() is called.
40 | """
41 | self.f = f
42 | self.n = n
43 | self.started = []
44 | self.finished = []
45 | self._can_exit = not wait_before_exit
46 | def task():
47 | tid = get_ident()
48 | self.started.append(tid)
49 | try:
50 | f()
51 | finally:
52 | self.finished.append(tid)
53 | while not self._can_exit:
54 | _wait()
55 | for i in range(n):
56 | start_new_thread(task, ())
57 |
58 | def wait_for_started(self):
59 | while len(self.started) < self.n:
60 | _wait()
61 |
62 | def wait_for_finished(self):
63 | while len(self.finished) < self.n:
64 | _wait()
65 |
66 | def do_finish(self):
67 | self._can_exit = True
68 |
69 |
70 | class BaseTestCase(unittest.TestCase):
71 | def setUp(self):
72 | self._threads = support.threading_setup()
73 |
74 | def tearDown(self):
75 | support.threading_cleanup(*self._threads)
76 | support.reap_children()
77 |
78 |
79 | class BaseLockTests(BaseTestCase):
80 | """
81 | Tests for both recursive and non-recursive locks.
82 | """
83 |
84 | def test_constructor(self):
85 | lock = self.locktype()
86 | del lock
87 |
88 | def test_acquire_destroy(self):
89 | lock = self.locktype()
90 | lock.acquire()
91 | del lock
92 |
93 | def test_acquire_release(self):
94 | lock = self.locktype()
95 | lock.acquire()
96 | lock.release()
97 | del lock
98 |
99 | def test_try_acquire(self):
100 | lock = self.locktype()
101 | self.assertTrue(lock.acquire(False))
102 | lock.release()
103 |
104 | def test_try_acquire_contended(self):
105 | lock = self.locktype()
106 | lock.acquire()
107 | result = []
108 | def f():
109 | result.append(lock.acquire(False))
110 | Bunch(f, 1).wait_for_finished()
111 | self.assertFalse(result[0])
112 | lock.release()
113 |
114 | def test_acquire_contended(self):
115 | lock = self.locktype()
116 | lock.acquire()
117 | N = 5
118 | def f():
119 | lock.acquire()
120 | lock.release()
121 |
122 | b = Bunch(f, N)
123 | b.wait_for_started()
124 | _wait()
125 | self.assertEqual(len(b.finished), 0)
126 | lock.release()
127 | b.wait_for_finished()
128 | self.assertEqual(len(b.finished), N)
129 |
130 | def test_with(self):
131 | lock = self.locktype()
132 | def f():
133 | lock.acquire()
134 | lock.release()
135 | def _with(err=None):
136 | with lock:
137 | if err is not None:
138 | raise err
139 | _with()
140 | # Check the lock is unacquired
141 | Bunch(f, 1).wait_for_finished()
142 | self.assertRaises(TypeError, _with, TypeError)
143 | # Check the lock is unacquired
144 | Bunch(f, 1).wait_for_finished()
145 |
146 | def test_thread_leak(self):
147 | # The lock shouldn't leak a Thread instance when used from a foreign
148 | # (non-threading) thread.
149 | lock = self.locktype()
150 | def f():
151 | lock.acquire()
152 | lock.release()
153 | print threading.enumerate()
154 | n = len(threading.enumerate())
155 | # We run many threads in the hope that existing threads ids won't
156 | # be recycled.
157 | Bunch(f, 15).wait_for_finished()
158 | print threading.enumerate()
159 | self.assertEqual(n, len(threading.enumerate()))
160 |
161 |
162 | class LockTests(BaseLockTests):
163 | """
164 | Tests for non-recursive, weak locks
165 | (which can be acquired and released from different threads).
166 | """
167 | def test_reacquire(self):
168 | # Lock needs to be released before re-acquiring.
169 | lock = self.locktype()
170 | phase = []
171 | def f():
172 | lock.acquire()
173 | phase.append(None)
174 | lock.acquire()
175 | phase.append(None)
176 | start_new_thread(f, ())
177 | while len(phase) == 0:
178 | _wait()
179 | _wait()
180 | self.assertEqual(len(phase), 1)
181 | lock.release()
182 | while len(phase) == 1:
183 | _wait()
184 | self.assertEqual(len(phase), 2)
185 |
186 | def test_different_thread(self):
187 | # Lock can be released from a different thread.
188 | lock = self.locktype()
189 | lock.acquire()
190 | def f():
191 | lock.release()
192 | b = Bunch(f, 1)
193 | b.wait_for_finished()
194 | lock.acquire()
195 | lock.release()
196 |
197 |
198 | class RLockTests(BaseLockTests):
199 | """
200 | Tests for recursive locks.
201 | """
202 | def test_reacquire(self):
203 | lock = self.locktype()
204 | lock.acquire()
205 | lock.acquire()
206 | lock.release()
207 | lock.acquire()
208 | lock.release()
209 | lock.release()
210 |
211 | def test_release_unacquired(self):
212 | # Cannot release an unacquired lock
213 | lock = self.locktype()
214 | self.assertRaises(RuntimeError, lock.release)
215 | lock.acquire()
216 | lock.acquire()
217 | lock.release()
218 | lock.acquire()
219 | lock.release()
220 | lock.release()
221 | self.assertRaises(RuntimeError, lock.release)
222 |
223 | def test_different_thread(self):
224 | # Cannot release from a different thread
225 | lock = self.locktype()
226 | def f():
227 | lock.acquire()
228 | b = Bunch(f, 1, True)
229 | try:
230 | self.assertRaises(RuntimeError, lock.release)
231 | finally:
232 | b.do_finish()
233 |
234 | def test__is_owned(self):
235 | lock = self.locktype()
236 | self.assertFalse(lock._is_owned())
237 | lock.acquire()
238 | self.assertTrue(lock._is_owned())
239 | lock.acquire()
240 | self.assertTrue(lock._is_owned())
241 | result = []
242 | def f():
243 | result.append(lock._is_owned())
244 | Bunch(f, 1).wait_for_finished()
245 | self.assertFalse(result[0])
246 | lock.release()
247 | self.assertTrue(lock._is_owned())
248 | lock.release()
249 | self.assertFalse(lock._is_owned())
250 |
251 |
252 | class EventTests(BaseTestCase):
253 | """
254 | Tests for Event objects.
255 | """
256 |
257 | def test_is_set(self):
258 | evt = self.eventtype()
259 | self.assertFalse(evt.is_set())
260 | evt.set()
261 | self.assertTrue(evt.is_set())
262 | evt.set()
263 | self.assertTrue(evt.is_set())
264 | evt.clear()
265 | self.assertFalse(evt.is_set())
266 | evt.clear()
267 | self.assertFalse(evt.is_set())
268 |
269 | def _check_notify(self, evt):
270 | # All threads get notified
271 | N = 5
272 | results1 = []
273 | results2 = []
274 | def f():
275 | results1.append(evt.wait())
276 | results2.append(evt.wait())
277 | b = Bunch(f, N)
278 | b.wait_for_started()
279 | _wait()
280 | self.assertEqual(len(results1), 0)
281 | evt.set()
282 | b.wait_for_finished()
283 | self.assertEqual(results1, [True] * N)
284 | self.assertEqual(results2, [True] * N)
285 |
286 | def test_notify(self):
287 | evt = self.eventtype()
288 | self._check_notify(evt)
289 | # Another time, after an explicit clear()
290 | evt.set()
291 | evt.clear()
292 | self._check_notify(evt)
293 |
294 | def test_timeout(self):
295 | evt = self.eventtype()
296 | results1 = []
297 | results2 = []
298 | N = 5
299 | def f():
300 | results1.append(evt.wait(0.0))
301 | t1 = time.time()
302 | r = evt.wait(0.2)
303 | t2 = time.time()
304 | results2.append((r, t2 - t1))
305 | Bunch(f, N).wait_for_finished()
306 | self.assertEqual(results1, [False] * N)
307 | for r, dt in results2:
308 | self.assertFalse(r)
309 | self.assertTrue(dt >= 0.2, dt)
310 | # The event is set
311 | results1 = []
312 | results2 = []
313 | evt.set()
314 | Bunch(f, N).wait_for_finished()
315 | self.assertEqual(results1, [True] * N)
316 | for r, dt in results2:
317 | self.assertTrue(r)
318 |
319 |
320 | class ConditionTests(BaseTestCase):
321 | """
322 | Tests for condition variables.
323 | """
324 |
325 | def test_acquire(self):
326 | cond = self.condtype()
327 | # Be default we have an RLock: the condition can be acquired multiple
328 | # times.
329 | cond.acquire()
330 | cond.acquire()
331 | cond.release()
332 | cond.release()
333 | lock = threading.Lock()
334 | cond = self.condtype(lock)
335 | cond.acquire()
336 | self.assertFalse(lock.acquire(False))
337 | cond.release()
338 | self.assertTrue(lock.acquire(False))
339 | self.assertFalse(cond.acquire(False))
340 | lock.release()
341 | with cond:
342 | self.assertFalse(lock.acquire(False))
343 |
344 | def test_unacquired_wait(self):
345 | cond = self.condtype()
346 | self.assertRaises(RuntimeError, cond.wait)
347 |
348 | def test_unacquired_notify(self):
349 | cond = self.condtype()
350 | self.assertRaises(RuntimeError, cond.notify)
351 |
352 | def _check_notify(self, cond):
353 | N = 5
354 | results1 = []
355 | results2 = []
356 | phase_num = 0
357 | def f():
358 | cond.acquire()
359 | cond.wait()
360 | cond.release()
361 | results1.append(phase_num)
362 | cond.acquire()
363 | cond.wait()
364 | cond.release()
365 | results2.append(phase_num)
366 | b = Bunch(f, N)
367 | b.wait_for_started()
368 | _wait()
369 | self.assertEqual(results1, [])
370 | # Notify 3 threads at first
371 | cond.acquire()
372 | cond.notify(3)
373 | _wait()
374 | phase_num = 1
375 | cond.release()
376 | while len(results1) < 3:
377 | _wait()
378 | self.assertEqual(results1, [1] * 3)
379 | self.assertEqual(results2, [])
380 | # Notify 5 threads: they might be in their first or second wait
381 | cond.acquire()
382 | cond.notify(5)
383 | _wait()
384 | phase_num = 2
385 | cond.release()
386 | while len(results1) + len(results2) < 8:
387 | _wait()
388 | self.assertEqual(results1, [1] * 3 + [2] * 2)
389 | self.assertEqual(results2, [2] * 3)
390 | # Notify all threads: they are all in their second wait
391 | cond.acquire()
392 | cond.notify_all()
393 | _wait()
394 | phase_num = 3
395 | cond.release()
396 | while len(results2) < 5:
397 | _wait()
398 | self.assertEqual(results1, [1] * 3 + [2] * 2)
399 | self.assertEqual(results2, [2] * 3 + [3] * 2)
400 | b.wait_for_finished()
401 |
402 | def test_notify(self):
403 | cond = self.condtype()
404 | self._check_notify(cond)
405 | # A second time, to check internal state is still ok.
406 | self._check_notify(cond)
407 |
408 | def test_timeout(self):
409 | cond = self.condtype()
410 | results = []
411 | N = 5
412 | def f():
413 | cond.acquire()
414 | t1 = time.time()
415 | cond.wait(0.2)
416 | t2 = time.time()
417 | cond.release()
418 | results.append(t2 - t1)
419 | Bunch(f, N).wait_for_finished()
420 | self.assertEqual(len(results), 5)
421 | for dt in results:
422 | self.assertTrue(dt >= 0.2, dt)
423 |
424 |
425 | class BaseSemaphoreTests(BaseTestCase):
426 | """
427 | Common tests for {bounded, unbounded} semaphore objects.
428 | """
429 |
430 | def test_constructor(self):
431 | self.assertRaises(ValueError, self.semtype, value = -1)
432 | self.assertRaises(ValueError, self.semtype, value = -sys.maxint)
433 |
434 | def test_acquire(self):
435 | sem = self.semtype(1)
436 | sem.acquire()
437 | sem.release()
438 | sem = self.semtype(2)
439 | sem.acquire()
440 | sem.acquire()
441 | sem.release()
442 | sem.release()
443 |
444 | def test_acquire_destroy(self):
445 | sem = self.semtype()
446 | sem.acquire()
447 | del sem
448 |
449 | def test_acquire_contended(self):
450 | sem = self.semtype(7)
451 | sem.acquire()
452 | N = 10
453 | results1 = []
454 | results2 = []
455 | phase_num = 0
456 | def f():
457 | sem.acquire()
458 | results1.append(phase_num)
459 | sem.acquire()
460 | results2.append(phase_num)
461 | b = Bunch(f, 10)
462 | b.wait_for_started()
463 | while len(results1) + len(results2) < 6:
464 | _wait()
465 | self.assertEqual(results1 + results2, [0] * 6)
466 | phase_num = 1
467 | for i in range(7):
468 | sem.release()
469 | while len(results1) + len(results2) < 13:
470 | _wait()
471 | self.assertEqual(sorted(results1 + results2), [0] * 6 + [1] * 7)
472 | phase_num = 2
473 | for i in range(6):
474 | sem.release()
475 | while len(results1) + len(results2) < 19:
476 | _wait()
477 | self.assertEqual(sorted(results1 + results2), [0] * 6 + [1] * 7 + [2] * 6)
478 | # The semaphore is still locked
479 | self.assertFalse(sem.acquire(False))
480 | # Final release, to let the last thread finish
481 | sem.release()
482 | b.wait_for_finished()
483 |
484 | def test_try_acquire(self):
485 | sem = self.semtype(2)
486 | self.assertTrue(sem.acquire(False))
487 | self.assertTrue(sem.acquire(False))
488 | self.assertFalse(sem.acquire(False))
489 | sem.release()
490 | self.assertTrue(sem.acquire(False))
491 |
492 | def test_try_acquire_contended(self):
493 | sem = self.semtype(4)
494 | sem.acquire()
495 | results = []
496 | def f():
497 | results.append(sem.acquire(False))
498 | results.append(sem.acquire(False))
499 | Bunch(f, 5).wait_for_finished()
500 | # There can be a thread switch between acquiring the semaphore and
501 | # appending the result, therefore results will not necessarily be
502 | # ordered.
503 | self.assertEqual(sorted(results), [False] * 7 + [True] * 3 )
504 |
505 | def test_default_value(self):
506 | # The default initial value is 1.
507 | sem = self.semtype()
508 | sem.acquire()
509 | def f():
510 | sem.acquire()
511 | sem.release()
512 | b = Bunch(f, 1)
513 | b.wait_for_started()
514 | _wait()
515 | self.assertFalse(b.finished)
516 | sem.release()
517 | b.wait_for_finished()
518 |
519 | def test_with(self):
520 | sem = self.semtype(2)
521 | def _with(err=None):
522 | with sem:
523 | self.assertTrue(sem.acquire(False))
524 | sem.release()
525 | with sem:
526 | self.assertFalse(sem.acquire(False))
527 | if err:
528 | raise err
529 | _with()
530 | self.assertTrue(sem.acquire(False))
531 | sem.release()
532 | self.assertRaises(TypeError, _with, TypeError)
533 | self.assertTrue(sem.acquire(False))
534 | sem.release()
535 |
536 | class SemaphoreTests(BaseSemaphoreTests):
537 | """
538 | Tests for unbounded semaphores.
539 | """
540 |
541 | def test_release_unacquired(self):
542 | # Unbounded releases are allowed and increment the semaphore's value
543 | sem = self.semtype(1)
544 | sem.release()
545 | sem.acquire()
546 | sem.acquire()
547 | sem.release()
548 |
549 |
550 | class BoundedSemaphoreTests(BaseSemaphoreTests):
551 | """
552 | Tests for bounded semaphores.
553 | """
554 |
555 | def test_release_unacquired(self):
556 | # Cannot go past the initial value
557 | sem = self.semtype()
558 | self.assertRaises(ValueError, sem.release)
559 | sem.acquire()
560 | sem.release()
561 | self.assertRaises(ValueError, sem.release)
562 |
--------------------------------------------------------------------------------
/threading2/tests/stdregr/test_threading.py:
--------------------------------------------------------------------------------
1 | ###
2 | ### This is a verbatim copy of the "test_threading" regression tests from
3 | ### the standard Python distribution (svn revision 77924 to be precise) with
4 | ### some simple changes to make it run using threading2:
5 | ###
6 | ### * "import threading" => "import threading2 as threading"
7 | ### * "from test import lock_tests" => "from stdregr import lock_tests"
8 | ### * "self.assertIn(a,b)" => "self.assertTrue(a in b)"
9 | ### * "self.assertNotIn(a,b)" => "self.assertFalse(a in b)"
10 | ### * "self.assertIsInstance(a,b)" => "self.assertTrue(isinstance(a,b))"
11 | ### * disabled main_test so it's not detected by nose, py.test, etc.
12 | ### * disabled test_join_nondaemon_on_shutdown; it fails on old pythons
13 | ###
14 | ###
15 | ### If you want to get technical, the copyright for this file is thus:
16 | ###
17 | ### Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
18 | ### Python Software Foundation; All Rights Reserved
19 | ###
20 |
21 | # Very rudimentary test of threading module
22 |
23 | import test.test_support
24 | from test.test_support import verbose
25 | import random
26 | import re
27 | import sys
28 | import threading2 as threading
29 | import thread
30 | import time
31 | import unittest
32 | import weakref
33 |
34 | from threading2.tests.stdregr import lock_te_sts as lock_tests
35 |
36 | # A trivial mutable counter.
37 | class Counter(object):
38 | def __init__(self):
39 | self.value = 0
40 | def inc(self):
41 | self.value += 1
42 | def dec(self):
43 | self.value -= 1
44 | def get(self):
45 | return self.value
46 |
47 | class TestThread(threading.Thread):
48 | def __init__(self, name, testcase, sema, mutex, nrunning):
49 | threading.Thread.__init__(self, name=name)
50 | self.testcase = testcase
51 | self.sema = sema
52 | self.mutex = mutex
53 | self.nrunning = nrunning
54 |
55 | def run(self):
56 | delay = random.random() / 10000.0
57 | if verbose:
58 | print 'task %s will run for %.1f usec' % (
59 | self.name, delay * 1e6)
60 |
61 | with self.sema:
62 | with self.mutex:
63 | self.nrunning.inc()
64 | if verbose:
65 | print self.nrunning.get(), 'tasks are running'
66 | self.testcase.assertTrue(self.nrunning.get() <= 3)
67 |
68 | time.sleep(delay)
69 | if verbose:
70 | print 'task', self.name, 'done'
71 |
72 | with self.mutex:
73 | self.nrunning.dec()
74 | self.testcase.assertTrue(self.nrunning.get() >= 0)
75 | if verbose:
76 | print '%s is finished. %d tasks are running' % (
77 | self.name, self.nrunning.get())
78 |
79 | class BaseTestCase(unittest.TestCase):
80 | def setUp(self):
81 | self._threads = test.test_support.threading_setup()
82 |
83 | def tearDown(self):
84 | test.test_support.threading_cleanup(*self._threads)
85 | test.test_support.reap_children()
86 |
87 |
88 | class ThreadTests(BaseTestCase):
89 |
90 | # Create a bunch of threads, let each do some work, wait until all are
91 | # done.
92 | def test_various_ops(self):
93 | # This takes about n/3 seconds to run (about n/3 clumps of tasks,
94 | # times about 1 second per clump).
95 | NUMTASKS = 10
96 |
97 | # no more than 3 of the 10 can run at once
98 | sema = threading.BoundedSemaphore(value=3)
99 | mutex = threading.RLock()
100 | numrunning = Counter()
101 |
102 | threads = []
103 |
104 | for i in range(NUMTASKS):
105 | t = TestThread(""%i, self, sema, mutex, numrunning)
106 | threads.append(t)
107 | self.assertEqual(t.ident, None)
108 | self.assertTrue(re.match('', repr(t)))
109 | t.start()
110 |
111 | if verbose:
112 | print 'waiting for all tasks to complete'
113 | for t in threads:
114 | t.join(NUMTASKS)
115 | self.assertTrue(not t.is_alive())
116 | self.assertNotEqual(t.ident, 0)
117 | self.assertFalse(t.ident is None)
118 | self.assertTrue(re.match('', repr(t)))
119 | if verbose:
120 | print 'all tasks done'
121 | self.assertEqual(numrunning.get(), 0)
122 |
123 | def test_ident_of_no_threading_threads(self):
124 | # The ident still must work for the main thread and dummy threads.
125 | self.assertFalse(threading.currentThread().ident is None)
126 | def f():
127 | ident.append(threading.currentThread().ident)
128 | done.set()
129 | done = threading.Event()
130 | ident = []
131 | thread.start_new_thread(f, ())
132 | done.wait()
133 | self.assertFalse(ident[0] is None)
134 | # Kill the "immortal" _DummyThread
135 | del threading._active[ident[0]]
136 |
137 | # run with a small(ish) thread stack size (256kB)
138 | def test_various_ops_small_stack(self):
139 | if verbose:
140 | print 'with 256kB thread stack size...'
141 | try:
142 | threading.stack_size(262144)
143 | except thread.error:
144 | if verbose:
145 | print 'platform does not support changing thread stack size'
146 | return
147 | self.test_various_ops()
148 | threading.stack_size(0)
149 |
150 | # run with a large thread stack size (1MB)
151 | def test_various_ops_large_stack(self):
152 | if verbose:
153 | print 'with 1MB thread stack size...'
154 | try:
155 | threading.stack_size(0x100000)
156 | except thread.error:
157 | if verbose:
158 | print 'platform does not support changing thread stack size'
159 | return
160 | self.test_various_ops()
161 | threading.stack_size(0)
162 |
163 | def test_foreign_thread(self):
164 | # Check that a "foreign" thread can use the threading module.
165 | def f(mutex):
166 | # Calling current_thread() forces an entry for the foreign
167 | # thread to get made in the threading._active map.
168 | threading.current_thread()
169 | mutex.release()
170 |
171 | mutex = threading.Lock()
172 | mutex.acquire()
173 | tid = thread.start_new_thread(f, (mutex,))
174 | # Wait for the thread to finish.
175 | mutex.acquire()
176 | self.assertTrue(tid in threading._active)
177 | self.assertTrue(isinstance(threading._active[tid],threading._DummyThread))
178 | del threading._active[tid]
179 |
180 | # PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
181 | # exposed at the Python level. This test relies on ctypes to get at it.
182 | def test_PyThreadState_SetAsyncExc(self):
183 | try:
184 | import ctypes
185 | except ImportError:
186 | if verbose:
187 | print "test_PyThreadState_SetAsyncExc can't import ctypes"
188 | return # can't do anything
189 |
190 | set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
191 |
192 | class AsyncExc(Exception):
193 | pass
194 |
195 | exception = ctypes.py_object(AsyncExc)
196 |
197 | # First check it works when setting the exception from the same thread.
198 | tid = thread.get_ident()
199 |
200 | try:
201 | result = set_async_exc(ctypes.c_long(tid), exception)
202 | # The exception is async, so we might have to keep the VM busy until
203 | # it notices.
204 | while True:
205 | pass
206 | except AsyncExc:
207 | pass
208 | else:
209 | # This code is unreachable but it reflects the intent. If we wanted
210 | # to be smarter the above loop wouldn't be infinite.
211 | self.fail("AsyncExc not raised")
212 | try:
213 | self.assertEqual(result, 1) # one thread state modified
214 | except UnboundLocalError:
215 | # The exception was raised too quickly for us to get the result.
216 | pass
217 |
218 | # `worker_started` is set by the thread when it's inside a try/except
219 | # block waiting to catch the asynchronously set AsyncExc exception.
220 | # `worker_saw_exception` is set by the thread upon catching that
221 | # exception.
222 | worker_started = threading.Event()
223 | worker_saw_exception = threading.Event()
224 |
225 | class Worker(threading.Thread):
226 | def run(self):
227 | self.id = thread.get_ident()
228 | self.finished = False
229 |
230 | try:
231 | while True:
232 | worker_started.set()
233 | time.sleep(0.1)
234 | except AsyncExc:
235 | self.finished = True
236 | worker_saw_exception.set()
237 |
238 | t = Worker()
239 | t.daemon = True # so if this fails, we don't hang Python at shutdown
240 | t.start()
241 | if verbose:
242 | print " started worker thread"
243 |
244 | # Try a thread id that doesn't make sense.
245 | if verbose:
246 | print " trying nonsensical thread id"
247 | result = set_async_exc(ctypes.c_long(-1), exception)
248 | self.assertEqual(result, 0) # no thread states modified
249 |
250 | # Now raise an exception in the worker thread.
251 | if verbose:
252 | print " waiting for worker thread to get started"
253 | ret = worker_started.wait()
254 | self.assertTrue(ret)
255 | if verbose:
256 | print " verifying worker hasn't exited"
257 | self.assertTrue(not t.finished)
258 | if verbose:
259 | print " attempting to raise asynch exception in worker"
260 | result = set_async_exc(ctypes.c_long(t.id), exception)
261 | self.assertEqual(result, 1) # one thread state modified
262 | if verbose:
263 | print " waiting for worker to say it caught the exception"
264 | worker_saw_exception.wait(timeout=10)
265 | self.assertTrue(t.finished)
266 | if verbose:
267 | print " all OK -- joining worker"
268 | if t.finished:
269 | t.join()
270 | # else the thread is still running, and we have no way to kill it
271 |
272 | def test_finalize_runnning_thread(self):
273 | # Issue 1402: the PyGILState_Ensure / _Release functions may be called
274 | # very late on python exit: on deallocation of a running thread for
275 | # example.
276 | try:
277 | import ctypes
278 | except ImportError:
279 | if verbose:
280 | print("test_finalize_with_runnning_thread can't import ctypes")
281 | return # can't do anything
282 |
283 | import subprocess
284 | rc = subprocess.call([sys.executable, "-c", """if 1:
285 | import ctypes, sys, time, thread
286 |
287 | # This lock is used as a simple event variable.
288 | ready = thread.allocate_lock()
289 | ready.acquire()
290 |
291 | # Module globals are cleared before __del__ is run
292 | # So we save the functions in class dict
293 | class C:
294 | ensure = ctypes.pythonapi.PyGILState_Ensure
295 | release = ctypes.pythonapi.PyGILState_Release
296 | def __del__(self):
297 | state = self.ensure()
298 | self.release(state)
299 |
300 | def waitingThread():
301 | x = C()
302 | ready.release()
303 | time.sleep(100)
304 |
305 | thread.start_new_thread(waitingThread, ())
306 | ready.acquire() # Be sure the other thread is waiting.
307 | sys.exit(42)
308 | """])
309 | self.assertEqual(rc, 42)
310 |
311 | def test_finalize_with_trace(self):
312 | # Issue1733757
313 | # Avoid a deadlock when sys.settrace steps into threading._shutdown
314 | import subprocess
315 | rc = subprocess.call([sys.executable, "-c", """if 1:
316 | import sys, threading
317 |
318 | # A deadlock-killer, to prevent the
319 | # testsuite to hang forever
320 | def killer():
321 | import os, time
322 | time.sleep(2)
323 | print 'program blocked; aborting'
324 | os._exit(2)
325 | t = threading.Thread(target=killer)
326 | t.daemon = True
327 | t.start()
328 |
329 | # This is the trace function
330 | def func(frame, event, arg):
331 | threading.current_thread()
332 | return func
333 |
334 | sys.settrace(func)
335 | """])
336 | self.assertFalse(rc == 2, "interpreted was blocked")
337 | self.assertTrue(rc == 0, "Unexpected error")
338 |
339 | # def test_join_nondaemon_on_shutdown(self):
340 | # # Issue 1722344
341 | # # Raising SystemExit skipped threading._shutdown
342 | # import subprocess
343 | # p = subprocess.Popen([sys.executable, "-c", """if 1:
344 | # import threading
345 | # from time import sleep
346 | #
347 | # def child():
348 | # sleep(1)
349 | # # As a non-daemon thread we SHOULD wake up and nothing
350 | # # should be torn down yet
351 | # print "Woke up, sleep function is:", sleep
352 | #
353 | # threading.Thread(target=child).start()
354 | # raise SystemExit
355 | # """],
356 | # stdout=subprocess.PIPE,
357 | # stderr=subprocess.PIPE)
358 | # stdout, stderr = p.communicate()
359 | # self.assertEqual(stdout.strip(),
360 | # "Woke up, sleep function is: ")
361 | # stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
362 | # self.assertEqual(stderr, "")
363 |
364 | def test_enumerate_after_join(self):
365 | # Try hard to trigger #1703448: a thread is still returned in
366 | # threading.enumerate() after it has been join()ed.
367 | enum = threading.enumerate
368 | old_interval = sys.getcheckinterval()
369 | try:
370 | for i in xrange(1, 100):
371 | # Try a couple times at each thread-switching interval
372 | # to get more interleavings.
373 | sys.setcheckinterval(i // 5)
374 | t = threading.Thread(target=lambda: None)
375 | t.start()
376 | t.join()
377 | l = enum()
378 | self.assertFalse(t in l,
379 | "#1703448 triggered after %d trials: %s" % (i, l))
380 | finally:
381 | sys.setcheckinterval(old_interval)
382 |
383 | def test_no_refcycle_through_target(self):
384 | class RunSelfFunction(object):
385 | def __init__(self, should_raise):
386 | # The links in this refcycle from Thread back to self
387 | # should be cleaned up when the thread completes.
388 | self.should_raise = should_raise
389 | self.thread = threading.Thread(target=self._run,
390 | args=(self,),
391 | kwargs={'yet_another':self})
392 | self.thread.start()
393 |
394 | def _run(self, other_ref, yet_another):
395 | if self.should_raise:
396 | raise SystemExit
397 |
398 | cyclic_object = RunSelfFunction(should_raise=False)
399 | weak_cyclic_object = weakref.ref(cyclic_object)
400 | cyclic_object.thread.join()
401 | del cyclic_object
402 | self.assertEquals(None, weak_cyclic_object(),
403 | msg=('%d references still around' %
404 | sys.getrefcount(weak_cyclic_object())))
405 |
406 | raising_cyclic_object = RunSelfFunction(should_raise=True)
407 | weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
408 | raising_cyclic_object.thread.join()
409 | del raising_cyclic_object
410 | self.assertEquals(None, weak_raising_cyclic_object(),
411 | msg=('%d references still around' %
412 | sys.getrefcount(weak_raising_cyclic_object())))
413 |
414 |
415 | class ThreadJoinOnShutdown(BaseTestCase):
416 |
417 | def _run_and_join(self, script):
418 | script = """if 1:
419 | import sys, os, time, threading
420 |
421 | # a thread, which waits for the main program to terminate
422 | def joiningfunc(mainthread):
423 | mainthread.join()
424 | print 'end of thread'
425 | \n""" + script
426 |
427 | import subprocess
428 | p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
429 | rc = p.wait()
430 | data = p.stdout.read().replace('\r', '')
431 | self.assertEqual(data, "end of main\nend of thread\n")
432 | self.assertFalse(rc == 2, "interpreter was blocked")
433 | self.assertTrue(rc == 0, "Unexpected error")
434 |
435 | def test_1_join_on_shutdown(self):
436 | # The usual case: on exit, wait for a non-daemon thread
437 | script = """if 1:
438 | import os
439 | t = threading.Thread(target=joiningfunc,
440 | args=(threading.current_thread(),))
441 | t.start()
442 | time.sleep(0.1)
443 | print 'end of main'
444 | """
445 | self._run_and_join(script)
446 |
447 |
448 | def test_2_join_in_forked_process(self):
449 | # Like the test above, but from a forked interpreter
450 | import os
451 | if not hasattr(os, 'fork'):
452 | return
453 | script = """if 1:
454 | childpid = os.fork()
455 | if childpid != 0:
456 | os.waitpid(childpid, 0)
457 | sys.exit(0)
458 |
459 | t = threading.Thread(target=joiningfunc,
460 | args=(threading.current_thread(),))
461 | t.start()
462 | print 'end of main'
463 | """
464 | self._run_and_join(script)
465 |
466 | def test_3_join_in_forked_from_thread(self):
467 | # Like the test above, but fork() was called from a worker thread
468 | # In the forked process, the main Thread object must be marked as stopped.
469 | import os
470 | if not hasattr(os, 'fork'):
471 | return
472 | # Skip platforms with known problems forking from a worker thread.
473 | # See http://bugs.python.org/issue3863.
474 | if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
475 | print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
476 | ' due to known OS bugs on'), sys.platform
477 | return
478 | script = """if 1:
479 | main_thread = threading.current_thread()
480 | def worker():
481 | childpid = os.fork()
482 | if childpid != 0:
483 | os.waitpid(childpid, 0)
484 | sys.exit(0)
485 |
486 | t = threading.Thread(target=joiningfunc,
487 | args=(main_thread,))
488 | print 'end of main'
489 | t.start()
490 | t.join() # Should not block: main_thread is already stopped
491 |
492 | w = threading.Thread(target=worker)
493 | w.start()
494 | """
495 | self._run_and_join(script)
496 |
497 |
498 | class ThreadingExceptionTests(BaseTestCase):
499 | # A RuntimeError should be raised if Thread.start() is called
500 | # multiple times.
501 | def test_start_thread_again(self):
502 | thread = threading.Thread()
503 | thread.start()
504 | self.assertRaises(RuntimeError, thread.start)
505 |
506 | def test_joining_current_thread(self):
507 | current_thread = threading.current_thread()
508 | self.assertRaises(RuntimeError, current_thread.join);
509 |
510 | def test_joining_inactive_thread(self):
511 | thread = threading.Thread()
512 | self.assertRaises(RuntimeError, thread.join)
513 |
514 | def test_daemonize_active_thread(self):
515 | thread = threading.Thread()
516 | thread.start()
517 | self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
518 |
519 |
520 | class LockTests(lock_tests.LockTests):
521 | locktype = staticmethod(threading.Lock)
522 |
523 | class RLockTests(lock_tests.RLockTests):
524 | locktype = staticmethod(threading.RLock)
525 |
526 | class EventTests(lock_tests.EventTests):
527 | eventtype = staticmethod(threading.Event)
528 |
529 | class ConditionAsRLockTests(lock_tests.RLockTests):
530 | # An Condition uses an RLock by default and exports its API.
531 | locktype = staticmethod(threading.Condition)
532 |
533 | class ConditionTests(lock_tests.ConditionTests):
534 | condtype = staticmethod(threading.Condition)
535 |
536 | class SemaphoreTests(lock_tests.SemaphoreTests):
537 | semtype = staticmethod(threading.Semaphore)
538 |
539 | class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
540 | semtype = staticmethod(threading.BoundedSemaphore)
541 |
542 |
543 | #def test_main():
544 | # test.test_support.run_unittest(LockTests, RLockTests, EventTests,
545 | # ConditionAsRLockTests, ConditionTests,
546 | # SemaphoreTests, BoundedSemaphoreTests,
547 | # ThreadTests,
548 | # ThreadJoinOnShutdown,
549 | # ThreadingExceptionTests,
550 | # )
551 | #
552 | #if __name__ == "__main__":
553 | # test_main()
554 |
--------------------------------------------------------------------------------
/threading2/t2_base.py:
--------------------------------------------------------------------------------
1 | import threading2
2 | from threading import *
3 | from threading import _RLock,_Event,_Condition,_Semaphore,_BoundedSemaphore, \
4 | _Timer,ThreadError,_time,_sleep,_get_ident,_allocate_lock
5 |
6 |
7 |
8 | __all__ = ["active_count","activeCount","Condition","current_thread",
9 | "currentThread","enumerate","Event","local","Lock","RLock",
10 | "Semaphore","BoundedSemaphore","Thread","Timer","SHLock",
11 | "setprofile","settrace","stack_size","CPUSet","system_affinity",
12 | "process_affinity"]
13 |
14 |
15 |
16 | class _ContextManagerMixin(object):
17 | """Simple mixin mapping __enter__/__exit__ to acquire/release."""
18 |
19 | def __enter__(self):
20 | self.acquire()
21 | return self
22 |
23 | def __exit__(self,exc_type,exc_value,traceback):
24 | self.release()
25 |
26 |
27 | class Lock(_ContextManagerMixin):
28 | """Class-based Lock object.
29 |
30 | This is a very thin wrapper around Python's native lock objects. It's
31 | here to provide easy subclassability and to add a "timeout" argument
32 | to Lock.acquire().
33 | """
34 |
35 | def __init__(self):
36 | self.__lock = _allocate_lock()
37 | super(Lock,self).__init__()
38 |
39 | def acquire(self,blocking=True,timeout=None):
40 | """Attempt to acquire this lock.
41 |
42 | If the optional argument "blocking" is True and "timeout" is None,
43 | this methods blocks until is successfully acquires the lock. If
44 | "blocking" is False, it returns immediately if the lock could not
45 | be acquired. Otherwise, it blocks for at most "timeout" seconds
46 | trying to acquire the lock.
47 |
48 | In all cases, this methods returns True if the lock was successfully
49 | acquired and False otherwise.
50 | """
51 | if timeout is None:
52 | return self.__lock.acquire(blocking)
53 | else:
54 | # Simulated timeout using progressively longer sleeps.
55 | # This is the same timeout scheme used in the stdlib Condition
56 | # class. If there's lots of contention on the lock then there's
57 | # a good chance you won't get it; but then again, Python doesn't
58 | # guarantee fairness anyway. We hope that platform-specific
59 | # extensions can provide a better mechanism.
60 | endtime = _time() + timeout
61 | delay = 0.0005
62 | while not self.__lock.acquire(False):
63 | remaining = endtime - _time()
64 | if remaining <= 0:
65 | return False
66 | delay = min(delay*2,remaining,0.05)
67 | _sleep(delay)
68 | return True
69 |
70 | def release(self):
71 | """Release this lock."""
72 | self.__lock.release()
73 |
74 |
75 | class RLock(_ContextManagerMixin,_RLock):
76 | """Re-implemented RLock object.
77 |
78 | This is pretty much a direct clone of the RLock object from the standard
79 | threading module; the only difference is that it uses a custom Lock class
80 | so that acquire() has a "timeout" parameter.
81 |
82 | It also includes a fix for a memory leak present in Python 2.6 and older.
83 | """
84 |
85 | _LockClass = Lock
86 |
87 | def __init__(self):
88 | super(RLock,self).__init__()
89 | self.__block = self._LockClass()
90 | self.__owner = None
91 | self.__count = 0
92 |
93 | def acquire(self,blocking=True,timeout=None):
94 | me = _get_ident()
95 | if self.__owner == me:
96 | self.__count += 1
97 | return True
98 | if self.__block.acquire(blocking,timeout):
99 | self.__owner = me
100 | self.__count = 1
101 | return True
102 | return False
103 |
104 | def release(self):
105 | if self.__owner != _get_ident():
106 | raise RuntimeError("cannot release un-aquired lock")
107 | self.__count -= 1
108 | if not self.__count:
109 | self.__owner = None
110 | self.__block.release()
111 |
112 | def _is_owned(self):
113 | return self.__owner == _get_ident()
114 |
115 |
116 |
117 | class Condition(_Condition):
118 | """Re-implemented Condition class.
119 |
120 | This is pretty much a direct clone of the Condition class from the standard
121 | threading module; the only difference is that it uses a custom Lock class
122 | so that acquire() has a "timeout" parameter.
123 | """
124 |
125 | _LockClass = RLock
126 | _WaiterLockClass = Lock
127 |
128 | def __init__(self,lock=None):
129 | if lock is None:
130 | lock = self._LockClass()
131 | super(Condition,self).__init__(lock)
132 |
133 | # This is essentially the same as the base version, but it returns
134 | # True if the wait was successful and False if it timed out.
135 | def wait(self,timeout=None):
136 | if not self._is_owned():
137 | raise RuntimeError("cannot wait on un-aquired lock")
138 | waiter = self._WaiterLockClass()
139 | waiter.acquire()
140 | self.__waiters.append(waiter)
141 | saved_state = self._release_save()
142 | try:
143 | if not waiter.acquire(timeout=timeout):
144 | try:
145 | self.__waiters.remove(waiter)
146 | except ValueError:
147 | pass
148 | return False
149 | else:
150 | return True
151 | finally:
152 | self._acquire_restore(saved_state)
153 |
154 |
155 | class Semaphore(_ContextManagerMixin):
156 | """Re-implemented Semaphore class.
157 |
158 | This is pretty much a direct clone of the Semaphore class from the standard
159 | threading module; the only difference is that it uses a custom Condition
160 | class so that acquire() has a "timeout" parameter.
161 | """
162 |
163 | _ConditionClass = Condition
164 |
165 | def __init__(self,value=1):
166 | if value < 0:
167 | raise ValueError("semaphore initial value must be >= 0")
168 | super(Semaphore,self).__init__()
169 | self.__cond = self._ConditionClass()
170 | self.__value = value
171 |
172 | def acquire(self,blocking=True,timeout=None):
173 | with self.__cond:
174 | while self.__value == 0:
175 | if not blocking:
176 | return False
177 | if not self.__cond.wait(timeout=timeout):
178 | return False
179 | self.__value = self.__value - 1
180 | return True
181 |
182 | def release(self):
183 | with self.__cond:
184 | self.__value = self.__value + 1
185 | self.__cond.notify()
186 |
187 |
188 | class BoundedSemaphore(Semaphore):
189 | """Semaphore that checks that # releases is <= # acquires"""
190 |
191 | def __init__(self,value=1):
192 | super(BoundedSemaphore,self).__init__(value)
193 | self._initial_value = value
194 |
195 | def release(self):
196 | if self._Semaphore__value >= self._initial_value:
197 | raise ValueError("Semaphore released too many times")
198 | return super(BoundedSemaphore,self).release()
199 |
200 |
201 | class Event(object):
202 | """Re-implemented Event class.
203 |
204 | This is pretty much a direct clone of the Event class from the standard
205 | threading module; the only difference is that it uses a custom Condition
206 | class for easy extensibility.
207 | """
208 |
209 | _ConditionClass = Condition
210 |
211 | def __init__(self):
212 | super(Event,self).__init__()
213 | self.__cond = self._ConditionClass()
214 | self.__flag = False
215 |
216 | def is_set(self):
217 | return self.__flag
218 | isSet = is_set
219 |
220 | def set(self):
221 | with self.__cond:
222 | self.__flag = True
223 | self.__cond.notify_all()
224 |
225 | def clear(self):
226 | with self.__cond:
227 | self.__flag = False
228 |
229 | def wait(self,timeout=None):
230 | with self.__cond:
231 | if self.__flag:
232 | return True
233 | return self.__cond.wait(timeout)
234 |
235 |
236 | class Timer(_Timer):
237 | """Re-implemented Timer class.
238 |
239 | Actually there's nothing new here, it just exposes the Timer class from
240 | the stdlib as a normal class in case you want to extend it.
241 | """
242 | pass
243 |
244 |
245 | class Thread(Thread):
246 | """Extended Thread class.
247 |
248 | This is a subclass of the standard python Thread class, which adds support
249 | for the following new features:
250 |
251 | * a "priority" attribute, through which you can set the (advisory)
252 | priority of a thread to a float between 0 and 1.
253 | * an "affinity" attribute, through which you can set the (advisory)
254 | CPU affinity of a thread.
255 | * before_run() and after_run() methods that can be safely extended
256 | in subclasses.
257 |
258 | It also provides some niceities over the standard thread class:
259 |
260 | * support for thread groups using the existing "group" argument
261 | * support for "daemon" as an argument to the constructor
262 | * join() returns a bool indicating success of the join
263 |
264 | """
265 |
266 | _ConditionClass = None
267 |
268 | def __init__(self,group=None,target=None,name=None,args=(),kwargs={},
269 | daemon=None,priority=None,affinity=None):
270 | super(Thread,self).__init__(None,target,name,args,kwargs)
271 | if self._ConditionClass is not None:
272 | self.__block = self._ConditionClass()
273 | self.__ident = None
274 | if daemon is not None:
275 | self.daemon = daemon
276 | if group is None:
277 | self.group = threading2.default_group
278 | else:
279 | self.group = group
280 | if priority is not None:
281 | self.priority = priority
282 | else:
283 | self.__priority = None
284 | if affinity is not None:
285 | self.affinity = affinity
286 | else:
287 | self.__affinity = None
288 |
289 | @classmethod
290 | def from_thread(cls,thread):
291 | """Convert a vanilla thread object into an instance of this class.
292 |
293 | This method "upgrades" a vanilla thread object to an instance of this
294 | extended class. You might need to call this if you obtain a reference
295 | to a thread by some means other than (a) creating it, or (b) from the
296 | methods of the threading2 module.
297 | """
298 | new_classes = []
299 | for new_cls in cls.__mro__:
300 | if new_cls not in thread.__class__.__mro__:
301 | new_classes.append(new_cls)
302 | if isinstance(thread,cls):
303 | pass
304 | elif issubclass(cls,thread.__class__):
305 | thread.__class__ = cls
306 | else:
307 | class UpgradedThread(thread.__class__,cls):
308 | pass
309 | thread.__class__ = UpgradedThread
310 | for new_cls in new_classes:
311 | if hasattr(new_cls,"_upgrade_thread"):
312 | new_cls._upgrade_thread(thread)
313 | return thread
314 |
315 | def _upgrade_thread(self):
316 | self.__priority = None
317 | self.__affinity = None
318 | if getattr(self,"group",None) is None:
319 | self.group = threading2.default_group
320 |
321 | def join(self,timeout=None):
322 | super(Thread,self).join(timeout)
323 | return not self.is_alive()
324 |
325 | def start(self):
326 | # Trick the base class into running our wrapper methods
327 | self_run = self.run
328 | def run():
329 | self.before_run()
330 | try:
331 | self_run()
332 | finally:
333 | self.after_run()
334 | self.run = run
335 | super(Thread,self).start()
336 |
337 | def before_run(self):
338 | if self.__priority is not None:
339 | self._set_priority(self.__priority)
340 | if self.__affinity is not None:
341 | self._set_affinity(self.__affinity)
342 |
343 | def after_run(self):
344 | pass
345 |
346 | # Backport "ident" attribute for older python versions
347 | if "ident" not in Thread.__dict__:
348 | def before_run(self):
349 | self.__ident = _get_ident()
350 | if self.__priority is not None:
351 | self._set_priority(self.__priority)
352 | if self.__affinity is not None:
353 | self._set_affinity(self.__affinity)
354 | @property
355 | def ident(self):
356 | return self.__ident
357 |
358 | def _get_group(self):
359 | return self.__group
360 | def _set_group(self,group):
361 | try:
362 | self.__group
363 | except AttributeError:
364 | self.__group = group
365 | group._add_thread(self)
366 | else:
367 | raise AttributeError("cannot set group after thread creation")
368 | group = property(_get_group,_set_group)
369 |
370 | def _get_priority(self):
371 | return self.__priority
372 | def _set_priority(self,priority):
373 | if not 0 <= priority <= 1:
374 | raise ValueError("priority must be between 0 and 1")
375 | self.__priority = priority
376 | if self.is_alive():
377 | self.priority = priority
378 | return priority
379 | priority = property(_get_priority,_set_priority)
380 | def _set_priority(self,priority):
381 | return priority
382 |
383 | def _get_affinity(self):
384 | return self.__affinity
385 | def _set_affinity(self,affinity):
386 | if not isinstance(affinity,CPUSet):
387 | affinity = CPUSet(affinity)
388 | self.__affinity = affinity
389 | if self.is_alive():
390 | self.affinity = affinity
391 | return affinity
392 | affinity = property(_get_affinity,_set_affinity)
393 | def _set_affinity(self,affinity):
394 | return affinity
395 |
396 |
397 | class SHLock(_ContextManagerMixin):
398 | """Shareable lock class.
399 |
400 | This functions just like an RLock except that you can also request a
401 | "shared" lock mode. Shared locks can co-exist with other shared locks
402 | but block exclusive locks. You might also know this as a read/write lock.
403 |
404 | Currently attempting to upgrade or downgrade between shared and exclusive
405 | locks will cause a deadlock. This restriction may go away in future.
406 | """
407 |
408 | class Context(_ContextManagerMixin):
409 | def __init__(self, parent,
410 | blocking=True, timeout=None, shared=False):
411 | self.parent = parent
412 | self.blocking = blocking
413 | self.timeout = timeout
414 | self.shared = shared
415 |
416 | def acquire(self):
417 | self.parent.acquire(blocking=self.blocking,
418 | timeout=self.timeout,
419 | shared=self.shared)
420 |
421 | def release(self):
422 | self.parent.release()
423 |
424 | _LockClass = Lock
425 | _ConditionClass = Condition
426 |
427 | def __init__(self):
428 | self._lock = self._LockClass()
429 | # When a shared lock is held, is_shared will give the cumulative
430 | # number of locks and _shared_owners maps each owning thread to
431 | # the number of locks is holds.
432 | self.is_shared = 0
433 | self._shared_owners = {}
434 | # When an exclusive lock is held, is_exclusive will give the number
435 | # of locks held and _exclusive_owner will give the owning thread
436 | self.is_exclusive = 0
437 | self._exclusive_owner = None
438 | # When someonce is forced to wait for a lock, they add themselves
439 | # to one of these queues along with a "waiter" condition that
440 | # is used to wake them up.
441 | self._shared_queue = []
442 | self._exclusive_queue = []
443 | # This is for recycling waiter objects.
444 | self._free_waiters = []
445 |
446 | def __call__(self,blocking=True,timeout=None,shared=False):
447 | return SHLock.Context(self, blocking=blocking,
448 | timeout=timeout, shared=shared)
449 |
450 | def acquire(self,blocking=True,timeout=None,shared=False):
451 | """Acquire the lock in shared or exclusive mode."""
452 | with self._lock:
453 | if shared:
454 | self._acquire_shared(blocking,timeout)
455 | else:
456 | self._acquire_exclusive(blocking,timeout)
457 | assert not (self.is_shared and self.is_exclusive)
458 |
459 | def release(self):
460 | """Release the lock."""
461 | # This decrements the appropriate lock counters, and if the lock
462 | # becomes free, it looks for a queued thread to hand it off to.
463 | # By doing the handoff here we ensure fairness.
464 | me = currentThread()
465 | with self._lock:
466 | if self.is_exclusive:
467 | if self._exclusive_owner is not me:
468 | raise RuntimeError("release() called on unheld lock")
469 | self.is_exclusive -= 1
470 | if not self.is_exclusive:
471 | self._exclusive_owner = None
472 | # If there are waiting shared locks, issue it to them
473 | # all and then wake everyone up.
474 | if self._shared_queue:
475 | for (thread,waiter) in self._shared_queue:
476 | self.is_shared += 1
477 | self._shared_owners[thread] = 1
478 | waiter.notify()
479 | del self._shared_queue[:]
480 | # Otherwise, if there are waiting exclusive locks,
481 | # they get first dibbs on the lock.
482 | elif self._exclusive_queue:
483 | (thread,waiter) = self._exclusive_queue.pop(0)
484 | self._exclusive_owner = thread
485 | self.is_exclusive += 1
486 | waiter.notify()
487 | elif self.is_shared:
488 | try:
489 | self._shared_owners[me] -= 1
490 | if self._shared_owners[me] == 0:
491 | del self._shared_owners[me]
492 | except KeyError:
493 | raise RuntimeError("release() called on unheld lock")
494 | self.is_shared -= 1
495 | if not self.is_shared:
496 | # If there are waiting exclusive locks,
497 | # they get first dibbs on the lock.
498 | if self._exclusive_queue:
499 | (thread,waiter) = self._exclusive_queue.pop(0)
500 | self._exclusive_owner = thread
501 | self.is_exclusive += 1
502 | waiter.notify()
503 | else:
504 | assert not self._shared_queue
505 | else:
506 | raise RuntimeError("release() called on unheld lock")
507 |
508 | def _acquire_shared(self,blocking=True,timeout=None):
509 | me = currentThread()
510 | # Each case: acquiring a lock we already hold.
511 | if self.is_shared and me in self._shared_owners:
512 | self.is_shared += 1
513 | self._shared_owners[me] += 1
514 | return True
515 | # If the lock is already spoken for by an exclusive, add us
516 | # to the shared queue and it will give us the lock eventually.
517 | if self.is_exclusive or self._exclusive_queue:
518 | if self._exclusive_owner is me:
519 | raise RuntimeError("can't downgrade SHLock object")
520 | if not blocking:
521 | return False
522 | waiter = self._take_waiter()
523 | try:
524 | self._shared_queue.append((me,waiter))
525 | if not waiter.wait(timeout=timeout):
526 | self._shared_queue.remove((me,waiter))
527 | return False
528 | assert not self.is_exclusive
529 | finally:
530 | self._return_waiter(waiter)
531 | else:
532 | self.is_shared += 1
533 | self._shared_owners[me] = 1
534 |
535 | def _acquire_exclusive(self,blocking=True,timeout=None):
536 | me = currentThread()
537 | # Each case: acquiring a lock we already hold.
538 | if self._exclusive_owner is me:
539 | assert self.is_exclusive
540 | self.is_exclusive += 1
541 | return True
542 | # If the lock is already spoken for, add us to the exclusive queue.
543 | # This will eventually give us the lock when it's our turn.
544 | if self.is_shared or self.is_exclusive:
545 | if not blocking:
546 | return False
547 | waiter = self._take_waiter()
548 | try:
549 | self._exclusive_queue.append((me,waiter))
550 | if not waiter.wait(timeout=timeout):
551 | self._exclusive_queue.remove((me,waiter))
552 | return False
553 | finally:
554 | self._return_waiter(waiter)
555 | else:
556 | self._exclusive_owner = me
557 | self.is_exclusive += 1
558 |
559 | def _take_waiter(self):
560 | try:
561 | return self._free_waiters.pop()
562 | except IndexError:
563 | return self._ConditionClass(self._lock)
564 |
565 | def _return_waiter(self,waiter):
566 | self._free_waiters.append(waiter)
567 |
568 |
569 |
570 | # Utilities for handling CPU affinity
571 |
572 | class CPUSet(set):
573 | """Object representing a set of CPUs on which a thread is to run.
574 |
575 | This is a python-level representation of the concept of a "CPU mask" as
576 | used in various thread-affinity libraries. Each CPU in the system is
577 | represented by an integer, with the first being CPU zero.
578 | """
579 |
580 | def __init__(self,set_or_mask=None):
581 | super(CPUSet,self).__init__()
582 | if set_or_mask is not None:
583 | if isinstance(set_or_mask,(int,long)):
584 | cpu = 0
585 | cur_mask = set_or_mask
586 | while cur_mask:
587 | if cur_mask & 1:
588 | self.add(cpu)
589 | cur_mask = cur_mask >> 1
590 | cpu += 1
591 | else:
592 | for i in set_or_mask:
593 | self.add(i)
594 |
595 | def add(self,cpu):
596 | return super(CPUSet,self).add(int(cpu))
597 |
598 | def to_bitmask(self):
599 | bitmask = 0
600 | for cpu in self:
601 | bitmask |= 1 << cpu
602 | return bitmask
603 |
604 |
605 | def system_affinity():
606 | """Get the set of CPUs available on this system."""
607 | return CPUSet((0,))
608 |
609 |
610 | def process_affinity(affinity=None):
611 | """Get or set the CPU affinity set for the current process.
612 |
613 | This will affect all future threads spawned by this process. It is
614 | implementation-defined whether it will also affect previously-spawned
615 | threads.
616 | """
617 | if affinity is not None:
618 | affinity = CPUSet(affinity)
619 | if not affinity.issubset(system_affinity()):
620 | raise ValueError("unknown cpus: %s" % affinity)
621 | return system_affinity()
622 |
623 |
--------------------------------------------------------------------------------