simple timerpool implementation for uncritical event-purposes. The "tick" is an abstract value and depends on the selected timebase and the environment
166 |
Its useful if you need wakeup-timers for protocol implementations or you like to calculate/interpolate something for a given timeslot
167 |
For each TimerPool object only one tickthread is spawned which handles the message-queue and the lifecycle of the TimerHandle. The maximum amount of timers is only constrained by memory and the given timebase.
168 |
The allocation of a new TimerHandle always block but is threadsafe. The maximum blocking-time relates directly to your given timebase of the pool
169 |
There is a blocking and a nonblocking API on the TimerHandles which can be used simulataneously from different threads at once. All actions on the TimerHandles are completely threadsafe and the ptrs itself can be shared between threads.
170 |
the following example demonstrates the basic use. For detailed api use and for multithreading examples use the tests as a starter.
171 |
importtimerpool
172 |
173 | let
174 | tpRef=timerpool.newTimerPool(10.int)# timerpool with 10ms timebase
175 | timerhdl=allocTimer(tpRef)
176 |
177 | timerhdl.setAlarmCounter(5)# set expiration to 50ms (timebase * 5)
178 |
179 | whiletimerhdl.getAlarmCounter()>0:# you can poll it
180 | discard
181 |
182 | timerhdl.waitForAlarm()# or sleep till timer expired
183 | timerhdl.deallocTimer()# pushes the timer back to pool
184 | tpRef.shutdownTimerPool()# shutdown the pool and blocks till all
185 | # timers are expired
235 |
236 | container type returned by waitForGetStats. the sum of runningCount,freedCount and inactiveCount is the total amount of timerhandles within the pool
237 |
238 |
257 |
258 | creator proc. The tickval is of milliseconds and the default timebase is 100 milliseconds the default of the mintimers parameter is 5 (shrink_pool leave this minimum amount of freed timers within the pool)
259 |
260 |
returns a timerhandle. the timer is always of type:oneshot but could also act as a continous one. in this case the caller needs to reset the alarm to the needed value. This threadsafe call blocks till the request was handled by the pool-tick-thread
275 |
before calling (if the pool was not spawned by the calling thread) initThreadContext() should be called
276 |
raises TPError if the pointer parameter is nil and/or the threadContext was not initialised with initThreadContext
the timer handle is pushed back to the pool. once freed it is not handled by the timerscan any more and its recycled for later use
293 |
this proc could be called from multiple threads simultaneously. if one ore more threads are waiting on the timers signal all threads gets informed. This call is part of the nonblocking api
shrinks the pool of freed Timers. the given minFreedTimers value at pool construction specifies the lower watermark
343 |
this is a nonblocking call. raises TPError if the pointer parameter is nil and/or the threadContext was not initialised with initThreadContext (only needed if the pool was not spawned by the caller)
362 |
363 | convenience template to get the TimerPoolPtr from the ref
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 |
373 |
378 |
379 |
380 |
381 |
382 |
383 |
384 |
--------------------------------------------------------------------------------
/tests/humanecho.nim:
--------------------------------------------------------------------------------
1 | import terminal,strutils,random,os,threadpool
2 | import ../timerpool
3 |
4 | # funstuff: human type simulator
5 | # simple showcase to demonstrate the timerpool usage
6 |
7 | # we split the string into words and calculate random
8 | # waits on each char
9 | # each word is written to the console in different threads
10 | # (for each word one thread and timer)
11 | # at different timeslots (this lead to simulated type errors)
12 |
13 | type
14 | WordIndex = tuple[startidx:int,length:int]
15 | # each word inside the string is indexed with start and length
16 |
17 | proc getWordIdx(p : string) : seq[WordIndex] =
18 | result = newSeq[WordIndex](0)
19 | var subresult : WordIndex = (0,p.high)
20 |
21 | for i in 0 .. p.high:
22 | if isSpaceAscii(p[i]) or i == p.high:
23 | # iterate over the string and check word boundary or end of string
24 | subresult.length = i
25 | result.add(subresult)
26 | subresult = (i+1,p.high)
27 |
28 | type
29 | StringPtr = ptr string
30 | WordChunk = tuple[payload : StringPtr,idxrange : WordIndex,
31 | timer: TimerHandlePtr]
32 |
33 | proc outputWord(dest : File, output:WordChunk,
34 | rwaits:seq[int], timeslot : int = 0) : void {.thread.} =
35 | ## output worker method.
36 | var ctr = 0
37 | if timeslot > 0:
38 | output.timer.setAlarmCounter(timeslot)
39 | output.timer.waitForAlarm # wait on our timeslot
40 | for i in output.idxrange.startidx .. output.idxrange.length:
41 | output.timer.setAlarmCounter(rwaits[ctr])
42 | stdout.write(output.payload[i])
43 | output.timer.waitForAlarm
44 | inc(ctr)
45 | output.timer.deallocTimer()
46 |
47 | proc absTypingTime(val:seq[int]) : int =
48 | ## get sum of rand (absolute typing time per word)
49 | result = 0
50 | for x in val:
51 | result = result + x
52 |
53 | proc generateAbsRandomWaitsPerChar(val:string, metadata: WordIndex) : seq[int] =
54 | result = newSeq[int](0)
55 | for idx in metadata.startidx..metadata.length:
56 | result.add(rand(range[10.int..20.int])) # TODO parameterize the behaviour
57 |
58 | proc echoTyped*(dest : File, payload : string) =
59 | ## funny string output with possible errors
60 | let tp = newTimerPool(10)
61 | var
62 | pl : string = payload
63 | words :seq[WordIndex] = getWordIdx(pl)
64 | sptr : StringPtr = pl.addr
65 | offset = 0.int
66 |
67 | for word in words:
68 | let waitsperchar = generateAbsRandomWaitsPerChar(pl,word)
69 | var chunk = (sptr,word,tp.allocTimer)
70 | spawn outputWord(dest,chunk,waitsperchar,offset)
71 | offset = offset + (absTypingTime(waitsperchar) - rand(range[1.int..25.int]))
72 | # simulate type error by substracting something from the timeslot-offset
73 |
74 | sync()
75 | tp.shutdownTimerPool()
76 |
77 | when isMainModule:
78 | stdout.echoTyped("Hello Nim....")
79 | stdout.echoTyped(" follow the white rabbit ")
80 | stdout.echoTyped("and dont forget to take the red pill .. :-)")
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/tests/timerpool_test.nim:
--------------------------------------------------------------------------------
1 | import ../timerpool
2 | import times,threadpool,os
3 | import unittest
4 |
5 | suite "general_tests":
6 | setup:
7 | var
8 | stpRef = timerpool.newTimerPool(10.int,1.int)
9 | timerhdls = newSeq[TimerHandlePtr](10)
10 | stpPtr = poolRef2Ptr(stpRef)
11 |
12 | for i in timerhdls.low .. timerhdls.high:
13 | timerhdls[i] = poolRef2Ptr(stpRef).allocTimer()
14 |
15 | teardown:
16 | stpRef.shutdownTimerPool()
17 |
18 | # test the timerstates
19 | test "test_timerstate":
20 |
21 | # set all timers fire after 5 ticks
22 | for i in timerhdls.low .. timerhdls.high:
23 | timerhdls[i].setAlarmCounter(5.int) # run for about 50ms (timerbase*5)
24 | var statsBefore : PoolStats = stpPtr.waitForGetStats
25 | sleep(70) # wait till timer fired
26 | var statsAfter = stpPtr.waitForGetStats
27 | for i in timerhdls:
28 | i.deallocTimer()
29 | var statsFinal = stpPtr.waitForGetStats
30 | check:
31 | statsBefore.runningCount == 10
32 | statsBefore.freedCount == 0
33 | statsBefore.inactiveCount == 0
34 | statsAfter.runningCount == 0
35 | statsAfter.freedCount == 0
36 | statsAfter.inactiveCount == 10
37 | statsFinal.runningCount == 0
38 | statsFinal.freedCount == 10
39 | statsFinal.inactiveCount == 0
40 |
41 | test "shrinkPool":
42 | for i in timerhdls:
43 | i.deallocTimer()
44 | var statsAfterDealloc = stpPtr.waitForGetStats
45 | stpPtr.shrinkTimerPool
46 | sleep(500)
47 | var statsAfterShrink = stpPtr.waitForGetStats
48 |
49 | check:
50 | statsAfterDealloc.runningCount == 0
51 | statsAfterDealloc.freedCount == 10
52 | statsAfterDealloc.inactiveCount == 0
53 | statsAfterShrink.runningCount == 0
54 | statsAfterShrink.freedCount == 1
55 | statsAfterShrink.inactiveCount == 0
56 |
57 |
58 | test "timerExceptions":
59 | for i in timerhdls.low .. timerhdls.high:
60 | timerhdls[i].deallocTimer()
61 | # call on freed timer should thow an exception
62 | expect(timerpool.TPError):
63 | timerhdls[timerhdls.low].setAlarmCounter(50)
64 | expect(timerpool.TPError):
65 | timerhdls[timerhdls.low].deallocTimer()
66 | expect(timerpool.TPError):
67 | discard timerhdls[timerhdls.low].getAlarmCounter()
68 | expect(timerpool.TPError):
69 | timerhdls[timerhdls.low].waitForAlarm()
70 |
71 | suite "test_threading":
72 | setup:
73 | var
74 | stpRef= timerpool.newTimerPool(10.int)
75 | timerhdls = newSeq[TimerHandlePtr](5)
76 | for i in timerhdls.low .. timerhdls.high:
77 | timerhdls[i] = (poolRef2Ptr(stpRef)).allocTimer()
78 |
79 | teardown:
80 | stpRef.shutdownTimerPool()
81 |
82 | test "one_timer_200_childthreads":
83 | # worker proc per thread
84 | proc dosomething(timerhdl :TimerHandlePtr) : int =
85 | result = 1
86 | timerhdl.waitForAlarm()
87 |
88 | var presults = newSeq[FlowVar[int]](200)
89 | timerhdls[0].setAlarmCounter(10) # 100ms (timerbase*10)
90 |
91 | for i in presults.low..presults.high:
92 | presults[i] = spawn dosomething(timerhdls[0])
93 | discard stpRef.poolRef2Ptr.waitForGetStats
94 | timerhdls[0].waitForAlarm()
95 | # every thread is also waiting on it. when finished the results
96 | # are present
97 | var tresult : int = 0
98 | for i in presults.low..presults.high:
99 | tresult = tresult + ^presults[i]
100 |
101 | check:
102 | tresult == 200
103 |
104 | test "early_wakeup":
105 | # multiple threads are waiting on a timer
106 | # we dealloc the timer before it's done
107 | # all threads should wakeup immediately
108 | proc dosomething(timerhdl :TimerHandlePtr) : int =
109 | result = 1
110 | timerhdl.waitForAlarm()
111 | var presults = newSeq[FlowVar[int]](250)
112 |
113 | timerhdls[0].setAlarmCounter(900) # 9000ms
114 | timerhdls[1].setAlarmCounter(50)
115 |
116 | var ctime = cpuTime()
117 |
118 | for i in presults.low..presults.high:
119 | presults[i] = spawn dosomething(timerhdls[0])
120 | timerhdls[0].deallocTimer() # dealloc before done
121 | # every thread is also waiting on it. if finished the results
122 | # are present
123 | var tresult : int = 0
124 | for i in presults.low..presults.high:
125 | tresult = tresult + ^presults[i]
126 | ctime = cpuTime() - ctime
127 |
128 | check:
129 | tresult == 250
130 | ctime < 500
131 |
132 | test "multiple_threads_alloc":
133 | # multiple threads requesting a new timer from the pool
134 | proc dosomething(poolhdl :TimerPoolPtr) : int =
135 | var timer : TimerHandlePtr = nil
136 | try:
137 | initThreadContext(poolhdl)
138 | timer = poolhdl.allocTimer()
139 | timer.setAlarmCounter(2)
140 | # do something till timeout reached
141 | while timer.getAlarmCounter() > 0:
142 | result = result + 1
143 | except:
144 | echo getCurrentExceptionMsg()
145 | finally:
146 | timer.deallocTimer()
147 | deinitThreadContext(poolhdl)
148 |
149 | var presults = newSeq[FlowVar[int]](250)
150 | for i in presults.low..presults.high:
151 | presults[i] = spawn dosomething(poolRef2Ptr(stpRef))
152 |
153 | var tresult : int = 0
154 |
155 | for i in presults.low..presults.high:
156 | tresult = tresult + ^presults[i]
157 |
158 | # snd run
159 | for i in presults.low..presults.high:
160 | presults[i] = spawn dosomething(poolRef2Ptr(stpRef))
161 |
162 | var tresult2 : int = 0
163 |
164 | for i in presults.low..presults.high:
165 | tresult2 = tresult2 + ^presults[i]
166 |
167 | #thrd run
168 |
169 | for i in presults.low..presults.high:
170 | presults[i] = spawn dosomething(poolRef2Ptr(stpRef))
171 |
172 | var tresult3 : int = 0
173 |
174 | for i in presults.low..presults.high:
175 | tresult3 = tresult3 + ^presults[i]
176 |
--------------------------------------------------------------------------------
/timerpool.nim:
--------------------------------------------------------------------------------
1 | # simple timerpool implementation in Nim
2 | # Copyright (c) 2017 Michael Krauter
3 | # please see the LICENSE-file for details.
4 |
5 | import times, sequtils, deques, locks, os, concurrency/atomics
6 |
7 | ## simple timerpool implementation for uncritical event-purposes.
8 | ## The "tick" is an abstract value and depends
9 | ## on the selected timebase and the environment
10 | ##
11 | ## Its useful if you need wakeup-timers for protocol implementations or you like
12 | ## to calculate/interpolate something for a given timeslot
13 | ##
14 | ## For each TimerPool object only one tickthread is spawned which handles
15 | ## the message-queue and the lifecycle of the TimerHandle.
16 | ## The maximum amount of timers is only constrained by memory
17 | ## and the given timebase.
18 | ##
19 | ## The allocation of a new TimerHandle always block but is threadsafe.
20 | ## The maximum blocking-time relates directly to your given
21 | ## timebase of the pool
22 | ##
23 | ## There is a blocking and a nonblocking API on the TimerHandles
24 | ## which can be used simulataneously from different threads at once.
25 | ## All actions on the TimerHandles are completely threadsafe
26 | ## and the ptrs itself can be shared between threads.
27 | ##
28 | ## the following example demonstrates the basic use.
29 | ## For detailed api use and for multithreading examples
30 | ## use the tests as a starter.
31 | ##
32 | ## .. code-block:: nim
33 | ## import timerpool
34 | ##
35 | ## let
36 | ## tpRef = timerpool.newTimerPool(10.int) # timerpool with 10ms timebase
37 | ## timerhdl = allocTimer(tpRef)
38 | ##
39 | ## timerhdl.setAlarmCounter(5) # set expiration to 50ms (timebase * 5)
40 | ##
41 | ## while timerhdl.getAlarmCounter() > 0: # you can poll it
42 | ## discard
43 | ##
44 | ## timerhdl.waitForAlarm() # or sleep till timer expired
45 | ## timerhdl.deallocTimer() # pushes the timer back to pool
46 | ## tpRef.shutdownTimerPool() # shutdown the pool and blocks till all
47 | ## # timers are expired
48 | ##
49 | ##
50 | # TODO: test more envs - at the moment only tested
51 | # on windows10 (Intel N3540,x64)
52 | #
53 | #
54 | # some implementation hints:
55 | # the TimerHandles are maintained and owned by the tickthread
56 | #
57 | # instead of maintaining and handling multiple
58 | # messages per thread there is only one message/action per thread possible (PMsg).
59 | # The pointer to this object is stored within the thread-local var
60 | # "threadContext" and it's initialized by calling "initThreadContext".
61 | # By calling newTimerPool this proc is called implicitly.
62 | # Due to that (and to simplify the api) the allocation of a new timer
63 | # and retrieving some pool statistics is always blocking. The maximum
64 | # idle time is related to your timebase.
65 | # Once allocated, all actions on the timer itself could be blocking
66 | # or nonblocking dependend on your needs
67 | #
68 | # tested with gcc(x64)
69 | # Thread model: posix
70 | # gcc version 5.1.0 (tdm64-1)
71 | #
72 | # cl.exe 19.11.25507.1(x64)
73 |
74 | when not compileOption("threads"):
75 | {.error: "TimerPool requires --threads:on option.".}
76 |
77 | type
78 | TimerHandle = object
79 | # the timer is active if alarmctr > 0 and not freed
80 | alarmctr: Atomic[int] # countdown field
81 | waitLock: Lock # lock used for the blocking-style alarm api
82 | waitCond: Cond # condition associated to the waitLock
83 | isFreed: Atomic[bool] # true if the owner of the handle is the pool
84 | waitingOnLockCount: Atomic[int]
85 | # counts how many threads waiting on the lock.
86 | # needed that no signal is lost
87 |
88 | TimerHandleRef = ref TimerHandle # used by the tickthread
89 |
90 | TimerHandlePtr* = ptr TimerHandle
91 | ## pointer type to the timerpoolhandle.
92 |
93 | SomePtr = ptr object # ugly solution cause Thread needs a concrete type
94 | TPError* = object of Exception
95 | ## generic exception
96 | type
97 | PoolCmd = enum requestTimer, poolStats, killPool, shrinkPool, noOp
98 | PoolReply = enum success, abort
99 | # success is the default reply; abort is always answered if the
100 | # pool is about to shutdown
101 |
102 | # guard pragma not possible here because the lock and the fields
103 | # reside within different objects
104 | type
105 | PMsg = object # message which is passed to the tickthread
106 | cmd: PoolCmd
107 | reply: PoolReply
108 | allocTimerCompleteCond: Cond
109 | replyTimerHandlePtr: TimerHandlePtr
110 | poolStatsCompleteCond: Cond # allows waiting for the getStats
111 | statRunningTimers: int # alarmcounter > 0
112 | statInactiveTimers: int # alarmcounter == 0, prev fired
113 | statFreedTimers: int # hdl released back to pool
114 |
115 | PMsgPtr = ptr PMsg
116 | PMsgRef = ref PMsg
117 | # global var which needs to be initialized with initThreadContext
118 | # if we are not the owner of the object
119 | var threadContext {.threadvar.}: PMsgRef
120 |
121 | type
122 | # queue for emiting the pool commands to the workerthread
123 | # for low resource environments an array could be used instead
124 | CmdQueuePtr = ptr Deque[PmsgPtr]
125 | CmdQueue = Deque[PmsgPtr]
126 | ThreadArg = tuple[poolobjptr: SomePtr, minFreedTimers: int]
127 |
128 | type
129 | TimerPool = object
130 | timebase: int # the timebase of the tickthread
131 | tickthread: Thread[ThreadArg]
132 | # Lock for accessing the cmd-queue and check for poolShutdownDone
133 | poolReqLock: Lock
134 | cmdQueue {.guard: poolReqLock.}: CmdQueue
135 | poolShutdownDoneCond: Cond
136 | spawningThreadId: int
137 |
138 | type
139 | TimerPoolPtr* = ptr TimerPool
140 | ## used to share among threads
141 | TimerPoolRef* = ref TimerPool
142 |
143 | # timer_state templates
144 | template timerRunning(timerref: TimerHandleRef): bool =
145 | not atomics.load(timerref[].isFreed).bool and
146 | atomics.load(timerref[].alarmctr).int > 0
147 |
148 | template timerDone(timerref: TimerHandleRef): bool =
149 | not atomics.load(timerref[].isFreed).bool and
150 | atomics.load(timerref[].alarmctr).int == 0
151 |
152 | template timerFreed(timerref: TimerHandleRef): bool =
153 | atomics.load(timerref.isFreed).bool
154 |
155 | template threadWaiting(timerref: TimerHandleRef): bool =
156 | atomics.load(timerref.waitingOnLockCount).int > 0
157 |
158 | # api templates
159 | template checkForValidThreadContext(): void =
160 | if threadContext.isNil:
161 | raise newException(
162 | TPError, " please call initThreadContext() before using the API ")
163 |
164 | template checkForNil*(timerhdl: TimerHandlePtr,
165 | callingProc: string = ""): void =
166 | ## checks if the timerhdl is nil. if so a TPError is raised
167 | if timerhdl.isNil:
168 | raise newException(TPError, callingProc & ": timer_handle is nil ")
169 |
170 | template checkForNil(stpp: TimerPoolPtr, callingProc: string = ""): void =
171 | if stpp.isNil:
172 | raise newException(TPError, callingProc & ": TimerPoolPtr is nil ")
173 |
174 | template checkIfSpawningThread(tpptr: TimerPoolPtr) =
175 | if tpptr.spawningThreadId == getThreadId():
176 | raise newException(TPError, " execution of this proc prohibited within the owning thread ")
177 |
178 | template poolRef2Ptr*(stpp: TimerPoolRef): TimerPoolPtr =
179 | ## convenience template to get the TimerPoolPtr from the ref
180 | (cast[TimerPoolPtr](stpp))
181 |
182 | template msgRef2Ptr(pmsgref: PMsgRef): PMsgPtr =
183 | (cast[PMsgPtr](pmsgref))
184 |
185 | template abortWhenTimerFreed(timerhdl: TimerHandlePtr, p: string) =
186 | if atomics.load(timerhdl.isFreed).bool:
187 | # TODO: provide better debug info which timer was freed
188 | # and from which source to trackdown nasty sharing errors
189 | raise newException(TPError, p & " timer already freed ")
190 |
191 | template waitOnTimerhdl(timerhdl: TimerHandlePtr) =
192 | # wait counter. each wait_condition is counted. this ensures
193 | # that the signaling side (the worker thread which calls "signal")
194 | # knows how many times "signal" must be called to wake up all waiting
195 | # threads properly (the Lock-api has no notify_all-style call at the moment)
196 | discard atomics.fetchAdd(timerhdl.waitingOnLockCount, 1)
197 | wait(timerhdl.waitCond, timerhdl.waitLock)
198 | discard atomics.fetchSub(timerhdl.waitingOnLockCount, 1)
199 |
200 | template waitOnStatsComplete(stpp: TimerPoolPtr, req: PMsgRef) =
201 | wait(req.poolStatsCompleteCond, stpp.poolReqLock)
202 |
203 | template validatePoolReply(rep: PMsgRef) =
204 | if rep.reply == PoolReply.abort:
205 | raise newException(TPError, " pool is about to shutdown - request aborted ")
206 |
207 | type
208 | ShutdownState = enum poolRunning, shutdownRequested, doShutdown
209 | # once shutdown recognised, the commandqueue isn´t processed anymore
210 | # but the workerloop still processes the running timers (shutdownRequested)
211 | # once all timers are fired, the state goes to doShutdown, all resources
212 | # are freed and the workerthread bails out
213 |
214 | proc findFreeTimer(sptr: seq[TimerHandleRef]): TimerHandleRef =
215 | # searches for an unused timerhdl (isFreed)
216 | # nil is returned if no unused timerhdl present
217 | result = nil
218 |
219 | for n in filter[TimerHandleRef](sptr,
220 | proc (x: TimerHandleRef): bool =
221 | if not x.isNil:
222 | result = cast[system.bool](timerFreed(x))
223 | else:
224 | result = false):
225 | result = n
226 | break
227 |
228 |
229 | proc timerPoolWorkLoop(startupcontext: ThreadArg) {.thread.} =
230 | let
231 | sptr: TimerPoolPtr = cast[TimerPoolPtr](startupcontext.poolobjptr)
232 | mintimers: int = startupcontext.minFreedTimers
233 | var
234 | allTHandles: seq[TimerHandleRef] = newSeq[TimerHandleRef](0)
235 | runningTimersCount: int
236 | freedTimersCount: int
237 | inactiveTimersCount: int
238 | shutdownState: ShutdownState = ShutdownState.poolRunning
239 | currTime: float
240 | poolIdle: bool # true if all timers freed
241 |
242 | poolIdle = false
243 |
244 | while true:
245 |
246 | # measure the time we need for waiting on the lock and doing the work,
247 | # substract this from the given sleeping-time to get a smoothed timebase
248 | currTime = cpuTime()
249 |
250 | runningTimersCount = 0
251 | inactiveTimersCount = 0
252 |
253 | if not poolIdle: # perform pool scan
254 | freedTimersCount = 0 # preserve the last known value if poolIdle
255 | for i in allTHandles.low .. allTHandles.high:
256 | let timer = allTHandles[i]
257 | if not timer.isNil:
258 | if timerRunning(allTHandles[i]):
259 | discard atomics.fetchSub(allTHandles[i].alarmctr, 1)
260 | runningTimersCount = runningTimersCount + 1
261 | elif timerFreed(allTHandles[i]):
262 | freedTimersCount = freedTimersCount + 1
263 | else:
264 | inactiveTimersCount = inactiveTimersCount + 1
265 |
266 | if timerDone(allTHandles[i]) or timerFreed(allTHandles[i]):
267 | # we need also check for freed-state because the timer could
268 | # be freed while it's counting
269 | while threadWaiting(allTHandles[i]):
270 | signal(allTHandles[i].waitCond)
271 | # we call signal for each waiting thread
272 |
273 | poolIdle = (runningTimersCount + inactiveTimersCount) == 0
274 | # TODO: perform sleep if the pool stays, for given amount of cycles, idle
275 | # we need a new signal which must be sent every time when a new command
276 | # is put into the queue
277 |
278 | if shutdownState == ShutdownState.poolRunning:
279 | # read out the queue. for each run we consume the entire queue
280 |
281 | withLock(sptr.poolReqLock):
282 | # only ptr-type allowed to prevent the thread local gc
283 | # playing with it
284 | let cmdqueueptr: CmdQueuePtr =
285 | cast[CmdQueuePtr](sptr.cmdQueue.addr)
286 |
287 | while cmdqueueptr[].len > 0:
288 | let pmsgptr: PMsgPtr = cmdqueueptr[].popLast
289 | let activeCommand = pmsgptr.cmd
290 |
291 | case activeCommand
292 |
293 | of requestTimer:
294 | poolIdle = false
295 | var timerHandle = findFreeTimer(allTHandles)
296 | if timerHandle.isNil:
297 | # initialise new handle
298 | # as stated here by araq https://forum.nim-lang.org/t/104
299 | # allocShared is not needed (also see TimerPool ctor)
300 | # and the gc does the job for us
301 | timerhandle = cast[TimerHandleRef]
302 | (new TimerHandle)
303 | initLock(timerHandle.waitLock)
304 | initCond(timerHandle.waitCond)
305 | allTHandles.add(timerHandle)
306 | # recycled handle found
307 | atomics.store(timerHandle.alarmctr, 0.int)
308 | atomics.store(timerHandle.isFreed, false)
309 | atomics.store(timerHandle.waitingOnLockCount, 0.int)
310 | # init defaults
311 |
312 | pmsgptr.reply = PoolReply.success
313 | pmsgptr.replyTimerHandlePtr = cast[TimerHandlePtr]
314 | (timerHandle)
315 | signal(pmsgptr.allocTimerCompleteCond)
316 | # send response back to calling thread
317 |
318 | of poolStats:
319 | pmsgptr.statRunningTimers = runningTimersCount
320 | pmsgptr.statFreedTimers = freedTimersCount
321 | pmsgptr.statInactiveTimers = inactiveTimersCount
322 | signal(pmsgptr.poolStatsCompleteCond)
323 |
324 | of killPool:
325 | shutdownState = ShutdownState.shutdownRequested
326 |
327 | of shrinkPool:
328 | if freedTimersCount > minTimers:
329 | freedTimersCount = 0
330 | var
331 | newAllTHandles: seq[TimerHandleRef] = newSeq[TimerHandleRef](
332 | runningTimersCount +
333 | inactiveTimersCount+minTimers)
334 | newIdx: int = 0
335 | recoveredCount: int = 0
336 |
337 | for hdl in allTHandles:
338 | if not hdl.isNil:
339 | if not atomics.load(hdl.isFreed).bool or recoveredCount < minTimers:
340 | newAllTHandles[newIdx] = hdl
341 | inc newIdx
342 |
343 | if atomics.load(hdl.isFreed).bool:
344 | inc recoveredCount
345 | inc freedTimersCount
346 |
347 | allTHandles.delete(allTHandles.low, allTHandles.high)
348 | allTHandles = newAllTHandles
349 | # FIXME: reuse seq
350 |
351 | else:
352 | discard
353 |
354 | else:
355 | if shutdownState == ShutdownState.shutdownRequested:
356 | # probe if all timers are done. if so, enter state doShutdown
357 | # do not consume command queue any more
358 | if runningTimersCount == 0:
359 | shutdownState = ShutdownState.doShutdown
360 |
361 | elif shutdownState == ShutdownState.doShutdown:
362 | for i in allTHandles.low .. allTHandles.high:
363 | let timer = allTHandles[i]
364 | if not timer.isNil:
365 | deinitLock(allTHandles[i].waitLock)
366 | deinitCond(allTHandles[i].waitCond)
367 |
368 | allTHandles.delete(allTHandles.low, allTHandles.high)
369 | signal(sptr.poolShutdownDoneCond)
370 | break # exit worker loop
371 |
372 | # adjust timebase and sleep / msused is in millisecs
373 | # to eliminate jitter
374 | let msused: int = cast[int]((cpuTime() - currTime)*1_000)
375 | if sptr.timebase > msused:
376 | sleep(sptr.timebase - msused)
377 |
378 |
379 | proc createTimerPool(tbase: int): ref TimerPool =
380 | result = new TimerPool
381 | result.timebase = tbase
382 | result.spawningThreadId = getThreadId()
383 | # used by checkIfSpawningThread
384 | initLock(result.poolReqLock)
385 | initCond(result.poolShutdownDoneCond)
386 | withLock(result.poolReqLock):
387 | # lock needed to make compiler happy
388 | result.cmdQueue = deques.initDeque[PMsgPtr](8)
389 |
390 | # public api
391 | type
392 | Tickval* = range[1..int.high]
393 | MinTimerval* = range[1..int.high]
394 | ## integer type used to initialise the timerpool and to set the
395 | ## timeout of the timer
396 |
397 | proc initThreadvar(): void =
398 | threadContext = new PMsg
399 | initCond(threadContext.allocTimerCompleteCond)
400 | initCond(threadContext.poolStatsCompleteCond)
401 | threadContext.cmd = PoolCmd.noOp
402 |
403 | proc deinitThreadvar(): void =
404 | deinitCond(threadContext.allocTimerCompleteCond)
405 | deinitCond(threadContext.poolStatsCompleteCond)
406 |
407 | proc initThreadContext*(tpptr: TimerPoolPtr): void {.raises: [TPError].} =
408 | ## to be called explicit if the pool-accessing thread is not the
409 | ## owner of the timerpool (initialises threadvar globs)
410 | ##
411 | ## raises a TPError if called within the spawning thread
412 | checkIfSpawningThread(tpptr)
413 | initThreadvar()
414 |
415 | proc newTimerPool*(tbase_ms: Tickval = 100,
416 | minFreedTimers: MinTimerval = 5): ref TimerPool =
417 | ## creator proc.
418 | ## The tickval is of milliseconds and
419 | ## the default timebase is 100 milliseconds
420 | ## the default of the mintimers parameter is 5 (shrink_pool leave this
421 | ## minimum amount of freed timers within the pool)
422 | result = createTimerPool(tbase_ms)
423 | initThreadvar()
424 | createThread(result.tickthread, timerPoolWorkLoop, (cast[SomePtr](result),
425 | cast[int](minFreedTimers)))
426 |
427 | proc deinitThreadContext*(tpptr: TimerPoolPtr): void {.raises: [TPError].} =
428 | ## call this proc if the pool-accessing thread should be
429 | ## detached from the timerpool (cleanup threadvar globs)
430 | ##
431 | ## call this proc only if the current thread is not owner of the
432 | ## timerpool. if not a TPError is raised
433 | checkIfSpawningThread(tpptr)
434 | deinitThreadvar()
435 |
436 | proc shutdownTimerPool*(tpref: TimerPoolRef): void =
437 | ## shut down the timerpool (graceful) and frees
438 | ## all resources (timerHandles and the pool itself)
439 | ##
440 | ## this call blocks till all timers are fired
441 | ## also only the spawning/owning thread is allowed to shutdown the pool
442 | ## this is guarded/ensured by the ref-parameter type within the public ctor
443 | threadContext.cmd = PoolCmd.killPool
444 | withLock(tpref.poolReqLock):
445 | tpref.cmdqueue.addLast(cast[PMsgPtr](threadContext))
446 | wait(tpref.poolShutdownDoneCond, tpref.poolReqLock)
447 | while tpref.cmdqueue.len > 0:
448 | # flush queue and inform possible waiting threads
449 | let pendingcmds = tpref.cmdqueue.popLast()
450 | pendingcmds.reply = PoolReply.abort
451 |
452 | case pendingcmds.cmd
453 |
454 | of requestTimer:
455 | signal(pendingcmds.allocTimerCompleteCond)
456 | of poolStats:
457 | signal(pendingcmds.poolStatsCompleteCond)
458 | else:
459 | discard
460 |
461 | deinitCond(tpref.poolShutdownDoneCond)
462 | deinitLock(tpref.poolReqLock)
463 | deinitThreadvar()
464 |
465 | proc allocTimer*(tpptr: TimerPoolPtr): TimerHandlePtr {.raises: [TPError].} =
466 | ## returns a timerhandle. the timer is always of type:oneshot but could
467 | ## also act as a continous one. in this case the caller needs to reset the
468 | ## alarm to the needed value. This threadsafe call blocks till the request
469 | ## was handled by the pool-tick-thread
470 | ##
471 | ## before calling (if the pool was not spawned by the calling thread)
472 | ## initThreadContext() should be called
473 | ##
474 | ## raises TPError if the pointer parameter is nil and/or the threadContext
475 | ## was not initialised with initThreadContext
476 | checkForNil(tpptr, "allocTimer")
477 | checkForValidThreadContext()
478 | threadContext.cmd = PoolCmd.requestTimer
479 | withLock(tpptr.poolReqLock):
480 | tpptr.cmdqueue.addLast(msgRef2Ptr(threadContext))
481 | wait(threadContext.allocTimerCompleteCond, tpptr.poolReqLock)
482 |
483 | validatePoolReply(threadContext)
484 | result = threadContext.replyTimerHandlePtr
485 |
486 | proc allocTimer*(tpptr: TimerPoolRef): TimerHandlePtr {.inline, raises: [TPError].} =
487 | return allocTimer(poolRef2Ptr(tpptr))
488 |
489 | proc deallocTimer*(timerhdl: TimerHandlePtr): void {.raises: [TPError].} =
490 | ## the timer handle is pushed back to the pool.
491 | ## once freed it is not handled by the timerscan any more and its recycled for later use
492 | ##
493 | ## this proc could be called from multiple threads simultaneously.
494 | ## if one ore more threads are waiting on the timers signal all threads
495 | ## gets informed. This call is part of the nonblocking api
496 | ##
497 | ## raises TPError if the pointer parameter is nil
498 | checkForNil(timerhdl, "deallocTimer")
499 | abortWhenTimerFreed(timerhdl, "deallocTimer")
500 | atomics.store[bool](timerhdl.isFreed, true)
501 |
502 | proc setAlarmCounter*(timerhdl: TimerHandlePtr, value: Tickval): void {.
503 | raises: [TPError].} =
504 | ## sets the timers countdown alarm-value to the given one.
505 | ## reset the counter after it´s fired to obtain a continous timer
506 | ##
507 | ## this call is threadsafe and part of the nonblocking-api
508 | ##
509 | ## raises TPError if the pointer parameter is nil or the timer is freed
510 | checkForNil(timerhdl, "setAlarmCounter")
511 | abortWhenTimerFreed(timerhdl, "setAlarmCounter")
512 | atomics.store[int](timerhdl.alarmctr, value)
513 |
514 | proc getAlarmCounter*(timerhdl: TimerHandlePtr): int {.raises: [TPError].} =
515 | ## returns the current value of the alarmcounter
516 | ## could be used for a polling-style-waiting_for_timer_fired
517 | ##
518 | ## this call is threadsafe and part of the nonblocking-api
519 | ##
520 | ## raises TPError if the pointer parameter is nil or the timer already freed
521 | checkForNil(timerhdl, "getAlarmCounter")
522 | abortWhenTimerFreed(timerhdl, "getAlarmCounter")
523 | result = atomics.load[int](timerhdl.alarmctr)
524 |
525 | proc waitForAlarm*(timerhdl: TimerHandlePtr): void {.raises: [TPError].} =
526 | ## blocking wait till the alarmcounter is decremented to 0
527 | ##
528 | ## threadsafe impl and could be called by multiple threads simultaniously
529 | ##
530 | ## raises TPError if the pointer parameter is nil or the timer already freed
531 | checkForNil(timerhdl, "waitForAlarm")
532 | abortWhenTimerFreed(timerhdl, "waitForAlarm")
533 | withLock(timerhdl.waitLock):
534 | waitOnTimerhdl(timerhdl)
535 |
536 | type
537 | PoolStats* = tuple[runningCount: int,
538 | freedCount: int,
539 | inactiveCount: int]
540 | ## container type returned by waitForGetStats. the sum of
541 | ## runningCount,freedCount and inactiveCount is the total amount
542 | ## of timerhandles within the pool
543 |
544 | proc waitForGetStats*(tpptr: TimerPoolPtr): PoolStats {.raises: [TPError].} =
545 | ## fetches some pool statistics for debugging purposes
546 | ##
547 | ## raises TPError if the pointer parameter is nil or the threadContext
548 | ## was not initialized with initThreadContext
549 | checkForNil(tpptr, "waitForGetStats")
550 | checkForValidThreadContext()
551 | threadContext.cmd = PoolCmd.poolStats
552 | withLock(tpptr.poolReqLock):
553 | tpptr.cmdqueue.addLast(msgRef2Ptr(threadContext))
554 | waitOnStatsComplete(tpptr, threadContext)
555 |
556 | validatePoolReply(threadContext)
557 | result.runningCount = threadContext.statRunningTimers
558 | result.freedCount = threadContext.statFreedTimers
559 | result.inactiveCount = threadContext.statInactiveTimers
560 |
561 | proc shrinkTimerPool*(tpptr: TimerPoolPtr) {.raises: [TPError].} =
562 | ## shrinks the pool of freed Timers.
563 | ## the given minFreedTimers value at pool construction specifies the lower watermark
564 | ##
565 | ## this is a nonblocking call.
566 | ## raises TPError if the pointer parameter is nil and/or the threadContext
567 | ## was not initialised with initThreadContext (only needed if the pool was not
568 | ## spawned by the caller)
569 | checkForNil(tpptr, "shrinkTimerPool")
570 | checkForValidThreadContext()
571 | threadContext.cmd = PoolCmd.shrinkPool
572 | withLock(tpptr.poolReqLock):
573 | tpptr.cmdqueue.addLast(msgRef2Ptr(threadContext))
574 |
--------------------------------------------------------------------------------
/timerpool.nimble:
--------------------------------------------------------------------------------
1 | # Package
2 | version = "0.1.0"
3 | author = "Michael Krauter"
4 | description = "single thread Timerpool implementation in Nim for event purpose"
5 | license = "MIT"
6 | skipDirs = @["tests"]
7 |
8 | # Dependencies
9 | requires "nim >= 0.17.0"
10 |
11 | task test, "running tests":
12 | exec "nim timerpool_tests"
--------------------------------------------------------------------------------