├── .travis.yml ├── DESIGN.md ├── LICENSE.md ├── Makefile ├── README.md ├── equeue.c ├── equeue.h ├── equeue_freertos.c ├── equeue_mbed.cpp ├── equeue_platform.h ├── equeue_posix.c ├── equeue_windows.c └── tests ├── prof.c └── tests.c /.travis.yml: -------------------------------------------------------------------------------- 1 | # Environment variables 2 | env: 3 | global: 4 | - CFLAGS=-Werror 5 | 6 | # CI jobs 7 | jobs: 8 | include: 9 | # Test stage 10 | - stage: test 11 | env: 12 | - STAGE=test 13 | - NAME=test 14 | script: 15 | # Strict compilation of library only (tests use gcc features) 16 | - make CFLAGS+=-pedantic 17 | # Run tests 18 | - make test 19 | # Find code size with smallest configuration 20 | - make clean size OBJ=equeue.o | tee sizes 21 | 22 | # Update status with code size, compare with master if possible 23 | - | 24 | if [ "$TRAVIS_TEST_RESULT" -eq 0 ] 25 | then 26 | CURR=$(tail -n1 sizes | awk '{print $1}') 27 | PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \ 28 | | jq -re "select(.sha != \"$TRAVIS_COMMIT\") 29 | | .statuses[] | select(.context == \"$STAGE/$NAME\").description 30 | | capture(\"code size is (?[0-9]+)\").size" \ 31 | || echo 0) 32 | 33 | STATUS="Passed, code size is ${CURR}B" 34 | if [ "$PREV" -ne 0 ] 35 | then 36 | STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)" 37 | fi 38 | fi 39 | 40 | # Runtime profiling stage 41 | - stage: test 42 | env: 43 | - STAGE=test 44 | - NAME=prof 45 | script: 46 | # Relative profiling against master 47 | - if ( git clone https://github.com/geky/equeue master && 48 | make -s -C master prof | tee master/runtime ) ; 49 | then 50 | cat master/runtime | make prof | tee runtime ; 51 | else 52 | make prof | tee runtime ; 53 | fi 54 | 55 | # Update status with profile results, compare with master if possible 56 | - | 57 | if [ "$TRAVIS_TEST_RESULT" -eq 0 ] 58 | then 59 | CURR=$(grep -o '[0-9]\+ cycles' runtime | \ 60 | awk '{sum += $1} END {print sum}') 61 | PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \ 62 | | jq -re "select(.sha != \"$TRAVIS_COMMIT\") 63 | | .statuses[] | select(.context == \"$STAGE/$NAME\").description 64 | | capture(\"runtime is (?[0-9]+)\").runtime" \ 65 | || echo 0) 66 | 67 | STATUS="Passed, runtime is ${CURR} cycles" 68 | if [ "$PREV" -ne 0 ] 69 | then 70 | STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)" 71 | fi 72 | fi 73 | 74 | # Deploy stage for updating versions and tags 75 | - stage: deploy 76 | env: 77 | - STAGE=deploy 78 | - NAME=deploy 79 | script: 80 | - | 81 | bash << 'SCRIPT' 82 | set -ev 83 | # Find version defined in equeue.h 84 | EQUEUE_VERSION=$(grep -ox '#define EQUEUE_VERSION .*' equeue.h \ 85 | | cut -d ' ' -f3) 86 | EQUEUE_VERSION_MAJOR=$((0xffff & ($EQUEUE_VERSION >> 16))) 87 | EQUEUE_VERSION_MINOR=$((0xffff & ($EQUEUE_VERSION >> 0))) 88 | # Grab latest patch from repo tags, default to 0, needs finagling 89 | # to get past GitHub's pagination API 90 | PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$EQUEUE_VERSION_MAJOR.$EQUEUE_VERSION_MINOR. 91 | PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \ 92 | | sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \ 93 | || echo $PREV_URL) 94 | EQUEUE_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \ 95 | | jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g") 96 | .captures[].string | tonumber) | max + 1' \ 97 | || echo 0) 98 | # We have our new version 99 | EQUEUE_VERSION="v$EQUEUE_VERSION_MAJOR.$EQUEUE_VERSION_MINOR.$EQUEUE_VERSION_PATCH" 100 | echo "VERSION $EQUEUE_VERSION" 101 | # Check that we're the most recent commit 102 | CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \ 103 | https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \ 104 | | jq -re '.sha') 105 | [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0 106 | # Create major branch (vN) 107 | git branch v$EQUEUE_VERSION_MAJOR HEAD 108 | git push https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \ 109 | v$EQUEUE_VERSION_MAJOR 110 | # Create patch version tag (vN.N.N) 111 | curl -f -u "$GEKY_BOT_RELEASES" -X POST \ 112 | https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs \ 113 | -d "{ 114 | \"ref\": \"refs/tags/$EQUEUE_VERSION\", 115 | \"sha\": \"$TRAVIS_COMMIT\" 116 | }" 117 | # Build release notes 118 | PREV=$(git tag --sort=-v:refname -l "v*" | head -1) 119 | if [ ! -z "$PREV" ] 120 | then 121 | echo "PREV $PREV" 122 | CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep) 123 | printf "CHANGES\n%s\n\n" "$CHANGES" 124 | fi 125 | # Create the release 126 | curl -f -u "$GEKY_BOT_RELEASES" -X POST \ 127 | https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \ 128 | -d "{ 129 | \"tag_name\": \"$EQUEUE_VERSION\", 130 | \"name\": \"${EQUEUE_VERSION%.0}\", 131 | \"draft\": $(jq -R 'endswith(".0")' <<< "$EQUEUE_VERSION"), 132 | \"body\": $(jq -sR '.' <<< "$CHANGES") 133 | }" #" 134 | SCRIPT 135 | 136 | # Manage statuses 137 | before_install: 138 | - | 139 | curl -u "$GEKY_BOT_STATUSES" -X POST \ 140 | https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ 141 | -d "{ 142 | \"context\": \"$STAGE/$NAME\", 143 | \"state\": \"pending\", 144 | \"description\": \"${STATUS:-In progress}\", 145 | \"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\" 146 | }" 147 | 148 | after_failure: 149 | - | 150 | curl -u "$GEKY_BOT_STATUSES" -X POST \ 151 | https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ 152 | -d "{ 153 | \"context\": \"$STAGE/$NAME\", 154 | \"state\": \"failure\", 155 | \"description\": \"${STATUS:-Failed}\", 156 | \"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\" 157 | }" 158 | 159 | after_success: 160 | - | 161 | curl -u "$GEKY_BOT_STATUSES" -X POST \ 162 | https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ 163 | -d "{ 164 | \"context\": \"$STAGE/$NAME\", 165 | \"state\": \"success\", 166 | \"description\": \"${STATUS:-Passed}\", 167 | \"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\" 168 | }" 169 | 170 | # Job control 171 | stages: 172 | - name: test 173 | - name: deploy 174 | if: branch = master AND type = push 175 | -------------------------------------------------------------------------------- /DESIGN.md: -------------------------------------------------------------------------------- 1 | ## The design of the equeue library ## 2 | 3 | The equeue library is designed to be a "Swiss Army knife" for scheduling 4 | on embedded systems. 5 | 6 | Targeting embedded systems comes with several interesting constraints: 7 | - Low RAM footprint 8 | - Low ROM footprint 9 | - Power consumption 10 | - Interrupt contexts with jitter constraints 11 | 12 | However, the primary design goal of the equeue library is to be a reliable 13 | "Swiss Army knife", that is, provide a set of useful robust tools that come 14 | with the fewest surprises to the user, to encourage fast application 15 | development. 16 | 17 | To reach this goal, the equeue library prioritizes simplicity and puts in 18 | the extra effort to match user expectations when the behaviour may otherwise 19 | be undefined. 20 | 21 | ## Scheduler design ## 22 | 23 | The primary component of the equeue library is the scheduler itself. The 24 | scheduler went through several iterations before arriving at the current 25 | implementation. To start, here are several existing schedulers that were 26 | considered. 27 | 28 | #### Existing design - Sorted linked-list #### 29 | 30 | ``` 31 | +-----------+ +-----------+ +-----------+ +-----------+ +-----------+ 32 | ->| event t=1 |->| event t=1 |->| event t=4 |->| event t=5 |->| event t=5 | 33 | | | | | | | | | | | 34 | +-----------+ +-----------+ +-----------+ +-----------+ +-----------+ 35 | ``` 36 | 37 | Perhaps one of the simpliest schedulers, a sorted linked list is difficult 38 | to beat in terms of simplicity. In fact, a sorted linked list started as the 39 | initial design of the equeue library. 40 | 41 | However, a sorted linked list has the largest cost for insertion. To maintain 42 | insertion order (what the user expects), insertion must iterate over all 43 | events in the same timeslice. For delayed events, this isn't that bad, but 44 | if the queue is used to defer events from interrupt context, non-constant 45 | jitter may be unacceptable. (Forshadowing, if only we had some way to skip 46 | over events in the same timeslice). 47 | 48 | #### Existing design - Unsorted linked-list #### 49 | 50 | ``` 51 | +-----------+ +-----------+ +-----------+ +-----------+ +-----------+ 52 | ->| event t=4 |->| event t=1 |->| event t=5 |->| event t=1 |->| event t=5 | 53 | --| |--| |--| |--| |->| | 54 | +-----------+ +-----------+ +-----------+ +-----------+ +-----------+ 55 | ``` 56 | 57 | Very common in embedded systems when only used to defer from interrupt 58 | contexts. Timing can be accomplished with a separate scheduler or hardware 59 | registers. A pointer to the end of the list allows insertions (while 60 | maintaining insertion order) in constant time. 61 | 62 | For the equeue library, an unsorted linked list could be extended to support 63 | timing by simply storing the time in each event, and iterating the entire 64 | list to find the event that will expire the soonest. This does solve the 65 | jitter problem of the sorted linked-list, but results in a large cost 66 | for dispatch, since the entire list has to be iterated over. 67 | 68 | #### Existing design - Heap #### 69 | 70 | ``` 71 | +-----------+ 72 | >| event t=5 | 73 | +-----------+/ | | 74 | ->| event t=1 | +-----------+ +-----------+ 75 | | | >| event t=5 | 76 | +-----------+\ +-----------+/ | | 77 | >| event t=1 | +-----------+ 78 | | | 79 | +-----------+\ +-----------+ 80 | >| event t=4 | 81 | | | 82 | +-----------+ 83 | ``` 84 | 85 | A very useful data structure for ordering elements, a heap is a tree 86 | where each parent is set to expire sooner than any of its children. 87 | Consuming the next event requires iterating through the height of the 88 | tree to maintain this property. A heap provides O(log n) insertion and 89 | dispatch, which beats most other data structures in terms of algorithmic 90 | complexity. 91 | 92 | For scheduling on embedded systems, a heap has a few shortcomings. The 93 | O(log n) insertion cost is difficult to short-circuit for events without 94 | delays, putting the heap after the unsorted linked-list in terms of 95 | jitter. Additionally, a heap is inherently unstable, that is, dispatch 96 | does not mainain insertion order. These shortcomings led to the equeue 97 | library pursuing a simpler data structure. 98 | 99 | #### Existing design - Timing wheel #### 100 | 101 | ``` 102 | +-+ +-+ +-+ +-+ +-----------+ 103 | |_| |_|->|_| |_|->| event t=4 | 104 | |_| |_| |_| +-----------+ +-----------+ |_| | | 105 | |_| |_| |_|->| event t=5 |->| event t=5 | |_| +-----------+ 106 | |_| |_| |_| | | | | |_| 107 | |_|->|_| |_| +-----------+ +-----------+ |_| +-----------+ +-----------+ 108 | |_| |_| |_| |_|->| event t=1 |->| event t=1 | 109 | |_| |_| |_|------------------------------->|_| | | | | 110 | | | | | | | | | +-----------+ +-----------+ 111 | +-+ +-+ +-+ +-+ 112 | ``` 113 | 114 | Perhaps the most efficient scheduler in terms of runtime, a timing 115 | wheel is a sort of hash-table that uses the relative expiration time 116 | as an offset into an array of linked lists. A hierarchical timing 117 | wheel expands on this by maintaining layers of events at different 118 | granularities that "cascade" into the lower layers when a wheel 119 | has been exhausted. A hierarchical timing wheel is a very graceful 120 | data structure that naturally groups delays on their relative order 121 | of magnitude. 122 | 123 | Unfortunately, a timing wheel is a poor fit for embedded systems. 124 | The arrays that back the timing wheel can take a large amount of RAM, 125 | and the cascade operation can unexpected spikes in runtime for dispatch 126 | operations. 127 | 128 | #### The equeue scheduler #### 129 | 130 | The current scheduler prioritizes a small RAM footprint and a constant 131 | jitter for events without delays. The core data structure is the sorted 132 | linked-list, but with a nested linked list for all events in a single 133 | timeslice. An event can be inserted in a timeslice in constant-time 134 | in a similar manner to the unsorted linked-list. 135 | 136 | ``` 137 | +-----------+ +-----------+ +-----------+ 138 | ->| event t=1 |->| event t=4 |->| event t=5 | 139 | | | | | | | 140 | +-----------+ +-----------+ +-----------+ 141 | v v 142 | +-----------+ +-----------+ 143 | | event t=1 | | event t=5 | 144 | | | | | 145 | +-----------+ +-----------+ 146 | ``` 147 | 148 | This may seem like a small improvement, but this garuntees a constant-time 149 | insertion if no delays are used. This means that a system using the equeue 150 | library to simply defer events from interrupt context could find the number 151 | of instructions needed to insert an event, and put a hard upper-bound on the 152 | jitter the event queue introduces into the system. 153 | 154 | A few other small improvements were added to the base design: 155 | 156 | - Each event contains a back-reference to any pointers in the data structure. 157 | This allows any event to be cancelled and removed from the data structure 158 | in constant-time, as opposed to iterating over the data structure to find 159 | which nested list the event lives in. If you notice in the above diagram, 160 | there is only ever a single pointer referencing an event, so only one 161 | back-reference is needed. 162 | 163 | - Rather than store a pointer to the end of the list for each event, events 164 | are just pushed onto the head of the list. This means each timeslice is 165 | actually stored in reverse order of insertion, but the original order can 166 | be constructed by reversing the list during dispatch. Additionally, this 167 | reversal can be performed outside of any critical section, and only a 168 | constant-time operation is needed to get the current timeslice out of the 169 | event queue. 170 | 171 | #### Other considerations #### 172 | 173 | There were a few other considerations for the scheduler. Many features 174 | were omitted to best match the goal of stability, though it would be very 175 | interesting to see event queue designs that build on the core library with 176 | different features. 177 | 178 | - Lock-less data structures - Being primarily pointer based, it may be 179 | possible to implement the event queue using only atomic operations. While 180 | the potential improvement in contention is very appealing, lock-less 181 | algorithms are notoriously difficult to get right. The equeue library 182 | avoided lock-less data structures, prioritizing stability. 183 | 184 | - Tolerance-aware scheduling - In the context of embedded systems there has 185 | been some interesting work in schedulers that rearrange events to try to 186 | best meet the deadlines of events with different tolerances. However, this 187 | feature did not mesh well with the prioritization of stability and user 188 | experience. The equeue library does coalesce events in the same timeslice 189 | (1ms by default), but otherwise maintains insertion order. 190 | 191 | ## Allocator design ## 192 | 193 | The secondary component of the equeue library is the memory allocator. The 194 | initial uses of the event queue quickly identified the tricky problem of 195 | handling memory in interrupt contexts. Most embedded systems do not provide 196 | irq-safe synchronization primitives short of the jitter-inducing mechanism 197 | of disabling interrupts. For this reason, most system-wide heaps are 198 | off-limits in interrupt context. Other embedded systems may simply not 199 | provide a system-wide heap at all. 200 | 201 | With the goal of providing a "Swiss Army knife" for scheduling, the equeue 202 | library includes a built-in memory allocator to prevent the user from needing 203 | to roll their own. Fitting in with the rest of the event queue, the allocator 204 | is designed to be irq-safe, with ability to provide constant jitter. The 205 | allocator can also manage variable sized events, giving the user more 206 | flexibility with the context they associate with an event. 207 | 208 | The most difficult constraint for the memory allocator is the constant 209 | runtime requirement. This leaves us with only a few allocators to work with. 210 | 211 | #### Existing design - Never free allocator #### 212 | 213 | ``` 214 | +-----------------------------------------+ 215 | | used memory | unused memory | 216 | | |-> | 217 | | | | 218 | +-----------------------------------------+ 219 | ``` 220 | 221 | Perhaps the simplest allocator, a never free heap is just a pointer into 222 | a slab of memory that indicates what has already been allocated. Allocation 223 | is just a pointer update and trivially constant-time. This allocator does have 224 | one glaring flaw though, you can not free memory, making it nearly useless for 225 | an embedded system. Attempts to reclaim memory by wrapping at the slab 226 | boundary adds work which removes the constant runtime of the allocator. 227 | 228 | #### Existing design - Fixed sized allocator #### 229 | 230 | ``` 231 | +-----------+ +-----------+ +-----------+ +-----------+ +-----------+ 232 | ->| |->| |->| |->| |->| | 233 | | | | | | | | | | | 234 | +-----------+ +-----------+ +-----------+ +-----------+ +-----------+ 235 | ``` 236 | 237 | One of the most useful allocators is the fixed sized allocator. If we assume 238 | fixed-sized allocations, the chunks can be strung together in a simple 239 | linked-list that resides in unused memory. Allocation and freeing is a single 240 | pointer update and easily constant-time. Fixed-size allocators are a 241 | fundamental building-block for designing higher-level memory allocators. The 242 | only downside is in its namesake, the user must decide the size of the 243 | memory allocations beforehand. 244 | 245 | 246 | #### The equeue allocator #### 247 | 248 | To accomplish the goal of an irq-safe allocator with variable sized events, 249 | the equeue allocator takes a hybrid approach. The primary allocator for the 250 | event queue is a set of fixed-size chunk allocators. These chunk allocators 251 | are fed by a slab of memory driven by a never-free allocator. The resulting 252 | allocator can allocate in constant-time, and provides variable sized events. 253 | 254 | ``` 255 | +-----------+ +-----------+ +-----------+ 256 | chunks ->| |->| |->| | 257 | +-----------+ | | | | 258 | v +-----------+ | | 259 | +-----------+ v | | 260 | | | +-----------+ | | 261 | +-----------+ | | +-----------+ 262 | v | | 263 | +-----------+ +-----------+ 264 | | | v 265 | +-----------+ +-----------+ 266 | | | 267 | | | 268 | +-----------+ 269 | 270 | +-----------------------------------------+ 271 | slab ->| used memory | unused memory | 272 | | |-> | 273 | | | | 274 | +-----------------------------------------+ 275 | ``` 276 | 277 | An attentive reader may note that the above allocator does, in fact, 278 | _not_ allocate in constant time. This is true, but the dependent variable 279 | is not the quantity of events, but the quantity of _sizes_ of events. 280 | This means that if the number of differently sized events is kept finite, 281 | the resulting runtime will also be finite. The best example is when there 282 | is only one event size, in which case the above allocator will devolve into 283 | a simple fixed-size allocator. This property makes this allocator unreasonable 284 | as a general purpose memory allocator, but useful for a scheduler, where most 285 | of the events are similar sizes, just unknown to the user. 286 | 287 | #### Other considerations #### 288 | 289 | There are a few other things to consider related to the memory allocator. 290 | 291 | - Fragmentation - A benefit of the equeue allocator is that there is zero 292 | internal fragmentation. Once a set of events is allocated, the events 293 | will never coalesce. This is valuable for an embedded system, where 294 | devices should run for years without accruing issues. However, it is 295 | up to the user to avoid external fragmentation. Once chunked, memory is 296 | not returned to the slab allocator, so if there is an issue with external 297 | fragmentation, it should be quickly noticable, but this means that memory 298 | can not be shared between events of different sizes. 299 | 300 | - Memory regions - The equeue library provides the rather useful operation of 301 | queue chaining. If a user needs more control over the memory backing events, 302 | the user can create multiple event queues with different memory regions, and 303 | chain them to a single dispatch context. 304 | 305 | - Measuring memory - Because of the nature of the allocator, the measurements 306 | that can be reported on memory usage are a bit limited. The most useful 307 | measurement is stored in the equeue struct. The `equeue.slab.size` variable 308 | contains the size of the slab that has never been touched. The internals 309 | of the event queue is susceptible to change, but it can be useful for 310 | evaluating memory consumption. 311 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 Christopher Haster 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a 4 | copy of this software and associated documentation files (the "Software"), 5 | to deal in the Software without restriction, including without limitation 6 | the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | and/or sell copies of the Software, and to permit persons to whom the 8 | Software is furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 16 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | DEALINGS IN THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGET = libequeue.a 2 | 3 | CC ?= gcc 4 | AR ?= ar 5 | SIZE ?= size 6 | 7 | SRC += $(wildcard *.c) 8 | OBJ := $(SRC:.c=.o) 9 | DEP := $(SRC:.c=.d) 10 | ASM := $(SRC:.c=.s) 11 | 12 | ifdef DEBUG 13 | override CFLAGS += -O0 -g3 14 | else 15 | override CFLAGS += -Os 16 | endif 17 | ifdef WORD 18 | override CFLAGS += -m$(WORD) 19 | endif 20 | override CFLAGS += -I. 21 | override CFLAGS += -std=c99 22 | override CFLAGS += -Wall 23 | override CFLAGS += -D_XOPEN_SOURCE=600 24 | 25 | override LFLAGS += -pthread 26 | 27 | 28 | all: $(TARGET) 29 | 30 | test: tests/tests.o $(OBJ) 31 | $(CC) $(CFLAGS) $^ $(LFLAGS) -o tests/tests 32 | tests/tests 33 | 34 | prof: tests/prof.o $(OBJ) 35 | $(CC) $(CFLAGS) $^ $(LFLAGS) -o tests/prof 36 | tests/prof 37 | 38 | asm: $(ASM) 39 | 40 | size: $(OBJ) 41 | $(SIZE) -t $^ 42 | 43 | -include $(DEP) 44 | 45 | %.a: $(OBJ) 46 | $(AR) rcs $@ $^ 47 | 48 | %.o: %.c 49 | $(CC) -c -MMD $(CFLAGS) $< -o $@ 50 | 51 | %.s: %.c 52 | $(CC) -S $(CFLAGS) $< -o $@ 53 | 54 | clean: 55 | rm -f $(TARGET) 56 | rm -f tests/tests tests/tests.o tests/tests.d 57 | rm -f tests/prof tests/prof.o tests/prof.d 58 | rm -f $(OBJ) 59 | rm -f $(DEP) 60 | rm -f $(ASM) 61 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## The equeue library ## 2 | 3 | A "Swiss Army knife" for scheduling on embedded systems, the equeue library 4 | is a simple but powerful libarary for scheduling events on composable 5 | event queues. 6 | 7 | ``` c 8 | #include "equeue.h" 9 | #include 10 | 11 | int main() { 12 | // creates a queue with space for 32 basic events 13 | equeue_t queue; 14 | equeue_create(&queue, 32*EQUEUE_EVENT_SIZE); 15 | 16 | // events can be simple callbacks 17 | equeue_call(&queue, print, "called immediately"); 18 | equeue_call_in(&queue, 2000, print, "called in 2 seconds"); 19 | equeue_call_every(&queue, 1000, print, "called every 1 seconds"); 20 | 21 | // events are executed in equeue_dispatch 22 | equeue_dispatch(&queue, 3000); 23 | 24 | print("called after 3 seconds"); 25 | 26 | equeue_destroy(&queue); 27 | } 28 | ``` 29 | 30 | The equeue library can be used as a normal event loop, or it can be 31 | backgrounded on a single hardware timer or even another event loop. It 32 | is both thread and irq safe, and provides functions for easily composing 33 | multiple queues. 34 | 35 | The equeue library can act as a drop-in scheduler, provide synchronization 36 | between multiple threads, or just act as a mechanism for moving events 37 | out of interrupt contexts. 38 | 39 | ## Documentation ## 40 | 41 | The in-depth documentation on specific functions can be found in 42 | [equeue.h](equeue.h). 43 | 44 | The core of the equeue library is the `equeue_t` type which represents a 45 | single event queue, and the `equeue_dispatch` function which runs the equeue, 46 | providing the context for executing events. 47 | 48 | On top of this, `equeue_call`, `equeue_call_in`, and `equeue_call_every` 49 | provide easy methods for posting events to execute in the context of the 50 | `equeue_dispatch` function. 51 | 52 | ``` c 53 | #include "equeue.h" 54 | #include "game.h" 55 | 56 | equeue_t queue; 57 | struct game game; 58 | 59 | // button_isr may be in interrupt context 60 | void button_isr(void) { 61 | equeue_call(&queue, game_button_update, &game); 62 | } 63 | 64 | // a simple user-interface framework 65 | int main() { 66 | equeue_create(&queue, 4096); 67 | game_create(&game); 68 | 69 | // call game_screen_udpate at 60 Hz 70 | equeue_call_every(&queue, 1000/60, game_screen_update, &game); 71 | 72 | // dispatch forever 73 | equeue_dispatch(&queue, -1); 74 | } 75 | ``` 76 | 77 | In addition to simple callbacks, an event can be manually allocated with 78 | `equeue_alloc` and posted with `equeue_post` to allow passing an arbitrary 79 | amount of context to the execution of the event. This memory is allocated out 80 | of the equeue's buffer, and dynamic memory can be completely avoided. 81 | 82 | The equeue allocator is designed to minimize jitter in interrupt contexts as 83 | well as avoid memory fragmentation on small devices. The allocator achieves 84 | both constant-runtime and zero-fragmentation for fixed-size events, however 85 | grows linearly as the quantity of differently-sized allocations increases. 86 | 87 | ``` c 88 | #include "equeue.h" 89 | 90 | equeue_t queue; 91 | 92 | // arbitrary data can be moved to a different context 93 | int enet_consume(void *buffer, int size) { 94 | if (size > 512) { 95 | size = 512; 96 | } 97 | 98 | void *data = equeue_alloc(&queue, 512); 99 | memcpy(data, buffer, size); 100 | equeue_post(&queue, handle_data_elsewhere, data); 101 | 102 | return size; 103 | } 104 | ``` 105 | 106 | Additionally, in-flight events can be cancelled with `equeue_cancel`. Events 107 | are given unique ids on post, allowing safe cancellation of expired events. 108 | 109 | ``` c 110 | #include "equeue.h" 111 | 112 | equeue_t queue; 113 | int sonar_value; 114 | int sonar_timeout_id; 115 | 116 | void sonar_isr(int value) { 117 | equeue_cancel(&queue, sonar_timeout_id); 118 | sonar_value = value; 119 | } 120 | 121 | void sonar_timeout(void *) { 122 | sonar_value = -1; 123 | } 124 | 125 | void sonar_read(void) { 126 | sonar_timeout_id = equeue_call_in(&queue, 300, sonar_timeout, 0); 127 | sonar_start(); 128 | } 129 | ``` 130 | 131 | From an architectural standpoint, event queues easily align with module 132 | boundaries, where internal state can be implicitly synchronized through 133 | event dispatch. 134 | 135 | On platforms where multiple threads are unavailable, multiple modules 136 | can use independent event queues and still be composed through the 137 | `equeue_chain` function. 138 | 139 | ``` c 140 | #include "equeue.h" 141 | 142 | // run a simultaneous localization and mapping loop in one queue 143 | struct slam { 144 | equeue_t queue; 145 | }; 146 | 147 | void slam_create(struct slam *s, equeue_t *target) { 148 | equeue_create(&s->queue, 4096); 149 | equeue_chain(&s->queue, target); 150 | equeue_call_every(&s->queue, 100, slam_filter); 151 | } 152 | 153 | // run a sonar with it's own queue 154 | struct sonar { 155 | equeue_t equeue; 156 | struct slam *slam; 157 | }; 158 | 159 | void sonar_create(struct sonar *s, equeue_t *target) { 160 | equeue_create(&s->queue, 64); 161 | equeue_chain(&s->queue, target); 162 | equeue_call_in(&s->queue, 5, sonar_update, s); 163 | } 164 | 165 | // all of the above queues can be combined into a single thread of execution 166 | int main() { 167 | equeue_t queue; 168 | equeue_create(&queue, 1024); 169 | 170 | struct sonar s1, s2, s3; 171 | sonar_create(&s1, &queue); 172 | sonar_create(&s2, &queue); 173 | sonar_create(&s3, &queue); 174 | 175 | struct slam slam; 176 | slam_create(&slam, &queue); 177 | 178 | // dispatches events from all of the modules 179 | equeue_dispatch(&queue, -1); 180 | } 181 | ``` 182 | 183 | ## Design ## 184 | 185 | See [DESIGN.md](DESIGN.md) for more information on the underlying design 186 | of the event queue and the tradeoffs related to trying to provide a 187 | simple and robust scheduler. 188 | 189 | ## Platform ## 190 | 191 | The equeue library has a minimal porting layer that is flexible depending 192 | on the requirements of the underlying platform. Platform specific declarations 193 | and more information can be found in [equeue_platform.h](equeue_platform.h). 194 | 195 | ## Tests ## 196 | 197 | The equeue library uses a set of local tests based on the posix implementation. 198 | 199 | Runtime tests are located in [tests.c](tests/tests.c): 200 | 201 | ``` bash 202 | make test 203 | ``` 204 | 205 | Profiling tests based on rdtsc are located in [prof.c](tests/prof.c): 206 | 207 | ``` bash 208 | make prof 209 | ``` 210 | 211 | To make profiling results more tangible, the profiler also supports percentage 212 | comparison with previous runs: 213 | ``` bash 214 | make prof | tee results.txt 215 | cat results.txt | make prof 216 | ``` 217 | 218 | -------------------------------------------------------------------------------- /equeue.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Flexible event queue for dispatching events 3 | * 4 | * Copyright (c) 2016 Christopher Haster 5 | * Distributed under the MIT license 6 | */ 7 | #include "equeue.h" 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | // calculate the relative-difference between absolute times while 14 | // correctly handling overflow conditions 15 | static inline int equeue_tickdiff(unsigned a, unsigned b) { 16 | return (int)(unsigned)(a - b); 17 | } 18 | 19 | // calculate the relative-difference between absolute times, but 20 | // also clamp to zero, resulting in only non-zero values. 21 | static inline int equeue_clampdiff(unsigned a, unsigned b) { 22 | int diff = equeue_tickdiff(a, b); 23 | return ~(diff >> (8*sizeof(int)-1)) & diff; 24 | } 25 | 26 | // Increment the unique id in an event, hiding the event from cancel 27 | static inline void equeue_incid(equeue_t *q, struct equeue_event *e) { 28 | e->id += 1; 29 | if ((e->id << q->npw2) == 0) { 30 | e->id = 1; 31 | } 32 | } 33 | 34 | 35 | // equeue lifetime management 36 | int equeue_create(equeue_t *q, size_t size) { 37 | // dynamically allocate the specified buffer 38 | void *buffer = malloc(size); 39 | if (!buffer) { 40 | return -1; 41 | } 42 | 43 | int err = equeue_create_inplace(q, size, buffer); 44 | q->allocated = buffer; 45 | return err; 46 | } 47 | 48 | int equeue_create_inplace(equeue_t *q, size_t size, void *buffer) { 49 | // setup queue around provided buffer 50 | // ensure buffer and size are aligned 51 | q->buffer = (void *)(((uintptr_t) buffer + sizeof(void *) -1) & ~(sizeof(void *) -1)); 52 | size -= (char *) q->buffer - (char *) buffer; 53 | size &= ~(sizeof(void *) -1); 54 | 55 | q->allocated = 0; 56 | 57 | q->npw2 = 0; 58 | for (unsigned s = size; s; s >>= 1) { 59 | q->npw2++; 60 | } 61 | 62 | q->chunks = 0; 63 | q->slab.size = size; 64 | q->slab.data = q->buffer; 65 | 66 | q->queue = 0; 67 | q->tick = equeue_tick(); 68 | q->generation = 0; 69 | q->break_requested = false; 70 | 71 | q->background.active = false; 72 | q->background.update = 0; 73 | q->background.timer = 0; 74 | 75 | // initialize platform resources 76 | int err; 77 | err = equeue_sema_create(&q->eventsema); 78 | if (err < 0) { 79 | return err; 80 | } 81 | 82 | err = equeue_mutex_create(&q->queuelock); 83 | if (err < 0) { 84 | return err; 85 | } 86 | 87 | err = equeue_mutex_create(&q->memlock); 88 | if (err < 0) { 89 | return err; 90 | } 91 | 92 | return 0; 93 | } 94 | 95 | void equeue_destroy(equeue_t *q) { 96 | // call destructors on pending events 97 | for (struct equeue_event *es = q->queue; es; es = es->next) { 98 | for (struct equeue_event *e = es->sibling; e; e = e->sibling) { 99 | if (e->dtor) { 100 | e->dtor(e + 1); 101 | } 102 | } 103 | if (es->dtor) { 104 | es->dtor(es + 1); 105 | } 106 | } 107 | // notify background timer 108 | if (q->background.update) { 109 | q->background.update(q->background.timer, -1); 110 | } 111 | 112 | // clean up platform resources + memory 113 | equeue_mutex_destroy(&q->memlock); 114 | equeue_mutex_destroy(&q->queuelock); 115 | equeue_sema_destroy(&q->eventsema); 116 | free(q->allocated); 117 | } 118 | 119 | 120 | // equeue chunk allocation functions 121 | static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size) { 122 | // add event overhead 123 | size += sizeof(struct equeue_event); 124 | size = (size + sizeof(void*)-1) & ~(sizeof(void*)-1); 125 | 126 | equeue_mutex_lock(&q->memlock); 127 | 128 | // check if a good chunk is available 129 | for (struct equeue_event **p = &q->chunks; *p; p = &(*p)->next) { 130 | if ((*p)->size >= size) { 131 | struct equeue_event *e = *p; 132 | if (e->sibling) { 133 | *p = e->sibling; 134 | (*p)->next = e->next; 135 | } else { 136 | *p = e->next; 137 | } 138 | 139 | equeue_mutex_unlock(&q->memlock); 140 | return e; 141 | } 142 | } 143 | 144 | // otherwise allocate a new chunk out of the slab 145 | if (q->slab.size >= size) { 146 | struct equeue_event *e = (struct equeue_event *)q->slab.data; 147 | q->slab.data += size; 148 | q->slab.size -= size; 149 | e->size = size; 150 | e->id = 1; 151 | 152 | equeue_mutex_unlock(&q->memlock); 153 | return e; 154 | } 155 | 156 | equeue_mutex_unlock(&q->memlock); 157 | return 0; 158 | } 159 | 160 | static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e) { 161 | equeue_mutex_lock(&q->memlock); 162 | 163 | // stick chunk into list of chunks 164 | struct equeue_event **p = &q->chunks; 165 | while (*p && (*p)->size < e->size) { 166 | p = &(*p)->next; 167 | } 168 | 169 | if (*p && (*p)->size == e->size) { 170 | e->sibling = *p; 171 | e->next = (*p)->next; 172 | } else { 173 | e->sibling = 0; 174 | e->next = *p; 175 | } 176 | *p = e; 177 | 178 | equeue_mutex_unlock(&q->memlock); 179 | } 180 | 181 | void *equeue_alloc(equeue_t *q, size_t size) { 182 | struct equeue_event *e = equeue_mem_alloc(q, size); 183 | if (!e) { 184 | return 0; 185 | } 186 | 187 | e->target = 0; 188 | e->period = -1; 189 | e->dtor = 0; 190 | 191 | return e + 1; 192 | } 193 | 194 | void equeue_dealloc(equeue_t *q, void *p) { 195 | struct equeue_event *e = (struct equeue_event*)p - 1; 196 | 197 | if (e->dtor) { 198 | e->dtor(e+1); 199 | } 200 | 201 | equeue_mem_dealloc(q, e); 202 | } 203 | 204 | 205 | // equeue scheduling functions 206 | static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) { 207 | // setup event and hash local id with buffer offset for unique id 208 | int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer); 209 | e->target = tick + equeue_clampdiff(e->target, tick); 210 | e->generation = q->generation; 211 | 212 | equeue_mutex_lock(&q->queuelock); 213 | 214 | // find the event slot 215 | struct equeue_event **p = &q->queue; 216 | while (*p && equeue_tickdiff((*p)->target, e->target) < 0) { 217 | p = &(*p)->next; 218 | } 219 | 220 | // insert at head in slot 221 | if (*p && (*p)->target == e->target) { 222 | e->next = (*p)->next; 223 | if (e->next) { 224 | e->next->ref = &e->next; 225 | } 226 | e->sibling = *p; 227 | e->sibling->next = 0; 228 | e->sibling->ref = &e->sibling; 229 | } else { 230 | e->next = *p; 231 | if (e->next) { 232 | e->next->ref = &e->next; 233 | } 234 | 235 | e->sibling = 0; 236 | } 237 | 238 | *p = e; 239 | e->ref = p; 240 | 241 | // notify background timer 242 | if ((q->background.update && q->background.active) && 243 | (q->queue == e && !e->sibling)) { 244 | q->background.update(q->background.timer, 245 | equeue_clampdiff(e->target, tick)); 246 | } 247 | 248 | equeue_mutex_unlock(&q->queuelock); 249 | 250 | return id; 251 | } 252 | 253 | static struct equeue_event *equeue_unqueue(equeue_t *q, int id) { 254 | // decode event from unique id and check that the local id matches 255 | struct equeue_event *e = (struct equeue_event *) 256 | &q->buffer[id & ((1 << q->npw2)-1)]; 257 | 258 | equeue_mutex_lock(&q->queuelock); 259 | if (e->id != id >> q->npw2) { 260 | equeue_mutex_unlock(&q->queuelock); 261 | return 0; 262 | } 263 | 264 | // clear the event and check if already in-flight 265 | e->cb = 0; 266 | e->period = -1; 267 | 268 | int diff = equeue_tickdiff(e->target, q->tick); 269 | if (diff < 0 || (diff == 0 && e->generation != q->generation)) { 270 | equeue_mutex_unlock(&q->queuelock); 271 | return 0; 272 | } 273 | 274 | // disentangle from queue 275 | if (e->sibling) { 276 | e->sibling->next = e->next; 277 | if (e->sibling->next) { 278 | e->sibling->next->ref = &e->sibling->next; 279 | } 280 | 281 | *e->ref = e->sibling; 282 | e->sibling->ref = e->ref; 283 | } else { 284 | *e->ref = e->next; 285 | if (e->next) { 286 | e->next->ref = e->ref; 287 | } 288 | } 289 | 290 | equeue_incid(q, e); 291 | equeue_mutex_unlock(&q->queuelock); 292 | 293 | return e; 294 | } 295 | 296 | static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) { 297 | equeue_mutex_lock(&q->queuelock); 298 | 299 | // find all expired events and mark a new generation 300 | q->generation += 1; 301 | if (equeue_tickdiff(q->tick, target) <= 0) { 302 | q->tick = target; 303 | } 304 | 305 | struct equeue_event *head = q->queue; 306 | struct equeue_event **p = &head; 307 | while (*p && equeue_tickdiff((*p)->target, target) <= 0) { 308 | p = &(*p)->next; 309 | } 310 | 311 | q->queue = *p; 312 | if (q->queue) { 313 | q->queue->ref = &q->queue; 314 | } 315 | 316 | *p = 0; 317 | 318 | equeue_mutex_unlock(&q->queuelock); 319 | 320 | // reverse and flatten each slot to match insertion order 321 | struct equeue_event **tail = &head; 322 | struct equeue_event *ess = head; 323 | while (ess) { 324 | struct equeue_event *es = ess; 325 | ess = es->next; 326 | 327 | struct equeue_event *prev = 0; 328 | for (struct equeue_event *e = es; e; e = e->sibling) { 329 | e->next = prev; 330 | prev = e; 331 | } 332 | 333 | *tail = prev; 334 | tail = &es->next; 335 | } 336 | 337 | return head; 338 | } 339 | 340 | int equeue_post(equeue_t *q, void (*cb)(void*), void *p) { 341 | struct equeue_event *e = (struct equeue_event*)p - 1; 342 | unsigned tick = equeue_tick(); 343 | e->cb = cb; 344 | e->target = tick + e->target; 345 | 346 | int id = equeue_enqueue(q, e, tick); 347 | equeue_sema_signal(&q->eventsema); 348 | return id; 349 | } 350 | 351 | void equeue_cancel(equeue_t *q, int id) { 352 | if (!id) { 353 | return; 354 | } 355 | 356 | struct equeue_event *e = equeue_unqueue(q, id); 357 | if (e) { 358 | equeue_dealloc(q, e + 1); 359 | } 360 | } 361 | 362 | int equeue_timeleft(equeue_t *q, int id) { 363 | int ret = -1; 364 | 365 | if (!id) { 366 | return -1; 367 | } 368 | 369 | // decode event from unique id and check that the local id matches 370 | struct equeue_event *e = (struct equeue_event *) 371 | &q->buffer[id & ((1 << q->npw2)-1)]; 372 | 373 | equeue_mutex_lock(&q->queuelock); 374 | if (e->id == id >> q->npw2) { 375 | ret = equeue_clampdiff(e->target, equeue_tick()); 376 | } 377 | equeue_mutex_unlock(&q->queuelock); 378 | return ret; 379 | } 380 | 381 | void equeue_break(equeue_t *q) { 382 | equeue_mutex_lock(&q->queuelock); 383 | q->break_requested = true; 384 | equeue_mutex_unlock(&q->queuelock); 385 | equeue_sema_signal(&q->eventsema); 386 | } 387 | 388 | void equeue_dispatch(equeue_t *q, int ms) { 389 | unsigned tick = equeue_tick(); 390 | unsigned timeout = tick + ms; 391 | q->background.active = false; 392 | 393 | while (1) { 394 | // collect all the available events and next deadline 395 | struct equeue_event *es = equeue_dequeue(q, tick); 396 | 397 | // dispatch events 398 | while (es) { 399 | struct equeue_event *e = es; 400 | es = e->next; 401 | 402 | // actually dispatch the callbacks 403 | void (*cb)(void *) = e->cb; 404 | if (cb) { 405 | cb(e + 1); 406 | } 407 | 408 | // reenqueue periodic events or deallocate 409 | if (e->period >= 0) { 410 | e->target += e->period; 411 | equeue_enqueue(q, e, equeue_tick()); 412 | } else { 413 | equeue_incid(q, e); 414 | equeue_dealloc(q, e+1); 415 | } 416 | } 417 | 418 | int deadline = -1; 419 | tick = equeue_tick(); 420 | 421 | // check if we should stop dispatching soon 422 | if (ms >= 0) { 423 | deadline = equeue_tickdiff(timeout, tick); 424 | if (deadline <= 0) { 425 | // update background timer if necessary 426 | if (q->background.update) { 427 | equeue_mutex_lock(&q->queuelock); 428 | if (q->background.update && q->queue) { 429 | q->background.update(q->background.timer, 430 | equeue_clampdiff(q->queue->target, tick)); 431 | } 432 | q->background.active = true; 433 | equeue_mutex_unlock(&q->queuelock); 434 | } 435 | q->break_requested = false; 436 | return; 437 | } 438 | } 439 | 440 | // find closest deadline 441 | equeue_mutex_lock(&q->queuelock); 442 | if (q->queue) { 443 | int diff = equeue_clampdiff(q->queue->target, tick); 444 | if ((unsigned)diff < (unsigned)deadline) { 445 | deadline = diff; 446 | } 447 | } 448 | equeue_mutex_unlock(&q->queuelock); 449 | 450 | // wait for events 451 | equeue_sema_wait(&q->eventsema, deadline); 452 | 453 | // check if we were notified to break out of dispatch 454 | if (q->break_requested) { 455 | equeue_mutex_lock(&q->queuelock); 456 | if (q->break_requested) { 457 | q->break_requested = false; 458 | equeue_mutex_unlock(&q->queuelock); 459 | return; 460 | } 461 | equeue_mutex_unlock(&q->queuelock); 462 | } 463 | 464 | // update tick for next iteration 465 | tick = equeue_tick(); 466 | } 467 | } 468 | 469 | 470 | // event functions 471 | void equeue_event_delay(void *p, int ms) { 472 | struct equeue_event *e = (struct equeue_event*)p - 1; 473 | e->target = ms; 474 | } 475 | 476 | void equeue_event_period(void *p, int ms) { 477 | struct equeue_event *e = (struct equeue_event*)p - 1; 478 | e->period = ms; 479 | } 480 | 481 | void equeue_event_dtor(void *p, void (*dtor)(void *)) { 482 | struct equeue_event *e = (struct equeue_event*)p - 1; 483 | e->dtor = dtor; 484 | } 485 | 486 | 487 | // simple callbacks 488 | struct ecallback { 489 | void (*cb)(void*); 490 | void *data; 491 | }; 492 | 493 | static void ecallback_dispatch(void *p) { 494 | struct ecallback *e = (struct ecallback*)p; 495 | e->cb(e->data); 496 | } 497 | 498 | int equeue_call(equeue_t *q, void (*cb)(void*), void *data) { 499 | struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback)); 500 | if (!e) { 501 | return 0; 502 | } 503 | 504 | e->cb = cb; 505 | e->data = data; 506 | return equeue_post(q, ecallback_dispatch, e); 507 | } 508 | 509 | int equeue_call_in(equeue_t *q, int ms, void (*cb)(void*), void *data) { 510 | struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback)); 511 | if (!e) { 512 | return 0; 513 | } 514 | 515 | equeue_event_delay(e, ms); 516 | e->cb = cb; 517 | e->data = data; 518 | return equeue_post(q, ecallback_dispatch, e); 519 | } 520 | 521 | int equeue_call_every(equeue_t *q, int ms, void (*cb)(void*), void *data) { 522 | struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback)); 523 | if (!e) { 524 | return 0; 525 | } 526 | 527 | equeue_event_delay(e, ms); 528 | equeue_event_period(e, ms); 529 | e->cb = cb; 530 | e->data = data; 531 | return equeue_post(q, ecallback_dispatch, e); 532 | } 533 | 534 | 535 | // backgrounding 536 | void equeue_background(equeue_t *q, 537 | void (*update)(void *timer, int ms), void *timer) { 538 | equeue_mutex_lock(&q->queuelock); 539 | if (q->background.update) { 540 | q->background.update(q->background.timer, -1); 541 | } 542 | 543 | q->background.update = update; 544 | q->background.timer = timer; 545 | 546 | if (q->background.update && q->queue) { 547 | q->background.update(q->background.timer, 548 | equeue_clampdiff(q->queue->target, equeue_tick())); 549 | } 550 | q->background.active = true; 551 | equeue_mutex_unlock(&q->queuelock); 552 | } 553 | 554 | struct equeue_chain_context { 555 | equeue_t *q; 556 | equeue_t *target; 557 | int id; 558 | }; 559 | 560 | static void equeue_chain_dispatch(void *p) { 561 | equeue_dispatch((equeue_t *)p, 0); 562 | } 563 | 564 | static void equeue_chain_update(void *p, int ms) { 565 | struct equeue_chain_context *c = (struct equeue_chain_context *)p; 566 | equeue_cancel(c->target, c->id); 567 | 568 | if (ms >= 0) { 569 | c->id = equeue_call_in(c->target, ms, equeue_chain_dispatch, c->q); 570 | } else { 571 | equeue_dealloc(c->q, c); 572 | } 573 | } 574 | 575 | int equeue_chain(equeue_t *q, equeue_t *target) { 576 | if (!target) { 577 | equeue_background(q, 0, 0); 578 | return 0; 579 | } 580 | 581 | struct equeue_chain_context *c = equeue_alloc(q, 582 | sizeof(struct equeue_chain_context)); 583 | if (!c) { 584 | return -1; 585 | } 586 | 587 | c->q = q; 588 | c->target = target; 589 | c->id = 0; 590 | 591 | equeue_background(q, equeue_chain_update, c); 592 | return 0; 593 | } 594 | -------------------------------------------------------------------------------- /equeue.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Flexible event queue for dispatching events 3 | * 4 | * Copyright (c) 2016 Christopher Haster 5 | * Distributed under the MIT license 6 | */ 7 | #ifndef EQUEUE_H 8 | #define EQUEUE_H 9 | 10 | #ifdef __cplusplus 11 | extern "C" { 12 | #endif 13 | 14 | // Platform specific files 15 | #include "equeue_platform.h" 16 | 17 | #include 18 | #include 19 | 20 | 21 | // Version info 22 | // Major (top-nibble), incremented on backwards incompatible changes 23 | // Minor (bottom-nibble), incremented on feature additions 24 | #define EQUEUE_VERSION 0x00010001 25 | #define EQUEUE_VERSION_MAJOR (0xffff & (EQUEUE_VERSION >> 16)) 26 | #define EQUEUE_VERSION_MINOR (0xffff & (EQUEUE_VERSION >> 0)) 27 | 28 | 29 | // The minimum size of an event 30 | // This size is guaranteed to fit events created by event_call 31 | #define EQUEUE_EVENT_SIZE (sizeof(struct equeue_event) + 2*sizeof(void*)) 32 | 33 | // Internal event structure 34 | struct equeue_event { 35 | unsigned size; 36 | uint8_t id; 37 | uint8_t generation; 38 | 39 | struct equeue_event *next; 40 | struct equeue_event *sibling; 41 | struct equeue_event **ref; 42 | 43 | unsigned target; 44 | int period; 45 | void (*dtor)(void *); 46 | 47 | void (*cb)(void *); 48 | // data follows 49 | }; 50 | 51 | // Event queue structure 52 | typedef struct equeue { 53 | struct equeue_event *queue; 54 | unsigned tick; 55 | bool break_requested; 56 | uint8_t generation; 57 | 58 | unsigned char *buffer; 59 | unsigned npw2; 60 | void *allocated; 61 | 62 | struct equeue_event *chunks; 63 | struct equeue_slab { 64 | size_t size; 65 | unsigned char *data; 66 | } slab; 67 | 68 | struct equeue_background { 69 | bool active; 70 | void (*update)(void *timer, int ms); 71 | void *timer; 72 | } background; 73 | 74 | equeue_sema_t eventsema; 75 | equeue_mutex_t queuelock; 76 | equeue_mutex_t memlock; 77 | } equeue_t; 78 | 79 | 80 | // Queue lifetime operations 81 | // 82 | // Creates and destroys an event queue. The event queue either allocates a 83 | // buffer of the specified size with malloc or uses a user provided buffer 84 | // if constructed with equeue_create_inplace. 85 | // 86 | // If the event queue creation fails, equeue_create returns a negative, 87 | // platform-specific error code. 88 | int equeue_create(equeue_t *queue, size_t size); 89 | int equeue_create_inplace(equeue_t *queue, size_t size, void *buffer); 90 | void equeue_destroy(equeue_t *queue); 91 | 92 | // Dispatch events 93 | // 94 | // Executes events until the specified milliseconds have passed. If ms is 95 | // negative, equeue_dispatch will dispatch events indefinitely or until 96 | // equeue_break is called on this queue. 97 | // 98 | // When called with a finite timeout, the equeue_dispatch function is 99 | // guaranteed to terminate. When called with a timeout of 0, the 100 | // equeue_dispatch does not wait and is irq safe. 101 | void equeue_dispatch(equeue_t *queue, int ms); 102 | 103 | // Break out of a running event loop 104 | // 105 | // Forces the specified event queue's dispatch loop to terminate. Pending 106 | // events may finish executing, but no new events will be executed. 107 | void equeue_break(equeue_t *queue); 108 | 109 | // Simple event calls 110 | // 111 | // The specified callback will be executed in the context of the event queue's 112 | // dispatch loop. When the callback is executed depends on the call function. 113 | // 114 | // equeue_call - Immediately post an event to the queue 115 | // equeue_call_in - Post an event after a specified time in milliseconds 116 | // equeue_call_every - Post an event periodically every milliseconds 117 | // 118 | // All equeue_call functions are irq safe and can act as a mechanism for 119 | // moving events out of irq contexts. 120 | // 121 | // The return value is a unique id that represents the posted event and can 122 | // be passed to equeue_cancel. If there is not enough memory to allocate the 123 | // event, equeue_call returns an id of 0. 124 | int equeue_call(equeue_t *queue, void (*cb)(void *), void *data); 125 | int equeue_call_in(equeue_t *queue, int ms, void (*cb)(void *), void *data); 126 | int equeue_call_every(equeue_t *queue, int ms, void (*cb)(void *), void *data); 127 | 128 | // Allocate memory for events 129 | // 130 | // The equeue_alloc function allocates an event that can be manually dispatched 131 | // with equeue_post. The equeue_dealloc function may be used to free an event 132 | // that has not been posted. Once posted, an event's memory is managed by the 133 | // event queue and should not be deallocated. 134 | // 135 | // Both equeue_alloc and equeue_dealloc are irq safe. 136 | // 137 | // The equeue allocator is designed to minimize jitter in interrupt contexts as 138 | // well as avoid memory fragmentation on small devices. The allocator achieves 139 | // both constant-runtime and zero-fragmentation for fixed-size events, however 140 | // grows linearly as the quantity of different sized allocations increases. 141 | // 142 | // The equeue_alloc function returns a pointer to the event's allocated memory 143 | // and acts as a handle to the underlying event. If there is not enough memory 144 | // to allocate the event, equeue_alloc returns null. 145 | void *equeue_alloc(equeue_t *queue, size_t size); 146 | void equeue_dealloc(equeue_t *queue, void *event); 147 | 148 | // Configure an allocated event 149 | // 150 | // equeue_event_delay - Millisecond delay before dispatching an event 151 | // equeue_event_period - Millisecond period for repeating dispatching an event 152 | // equeue_event_dtor - Destructor to run when the event is deallocated 153 | void equeue_event_delay(void *event, int ms); 154 | void equeue_event_period(void *event, int ms); 155 | void equeue_event_dtor(void *event, void (*dtor)(void *)); 156 | 157 | // Post an event onto the event queue 158 | // 159 | // The equeue_post function takes a callback and a pointer to an event 160 | // allocated by equeue_alloc. The specified callback will be executed in the 161 | // context of the event queue's dispatch loop with the allocated event 162 | // as its argument. 163 | // 164 | // The equeue_post function is irq safe and can act as a mechanism for 165 | // moving events out of irq contexts. 166 | // 167 | // The return value is a unique id that represents the posted event and can 168 | // be passed to equeue_cancel. 169 | int equeue_post(equeue_t *queue, void (*cb)(void *), void *event); 170 | 171 | // Cancel an in-flight event 172 | // 173 | // Attempts to cancel an event referenced by the unique id returned from 174 | // equeue_call or equeue_post. It is safe to call equeue_cancel after an event 175 | // has already been dispatched. 176 | // 177 | // The equeue_cancel function is irq safe. 178 | // 179 | // If called while the event queue's dispatch loop is active, equeue_cancel 180 | // does not guarantee that the event will not not execute after it returns as 181 | // the event may have already begun executing. 182 | void equeue_cancel(equeue_t *queue, int id); 183 | 184 | // Query how much time is left for delayed event 185 | // 186 | // If event is delayed, this function can be used to query how much time 187 | // is left until the event is due to be dispatched. 188 | // 189 | // This function is irq safe. 190 | // 191 | int equeue_timeleft(equeue_t *q, int id); 192 | 193 | // Background an event queue onto a single-shot timer 194 | // 195 | // The provided update function will be called to indicate when the queue 196 | // should be dispatched. A negative timeout will be passed to the update 197 | // function when the timer is no longer needed. 198 | // 199 | // Passing a null update function disables the existing timer. 200 | // 201 | // The equeue_background function allows an event queue to take advantage 202 | // of hardware timers or even other event loops, allowing an event queue to 203 | // be effectively backgrounded. 204 | void equeue_background(equeue_t *queue, 205 | void (*update)(void *timer, int ms), void *timer); 206 | 207 | // Chain an event queue onto another event queue 208 | // 209 | // After chaining a queue to a target, calling equeue_dispatch on the 210 | // target queue will also dispatch events from this queue. The queues 211 | // use their own buffers and events must be managed independently. 212 | // 213 | // Passing a null queue as the target will unchain the existing queue. 214 | // 215 | // The equeue_chain function allows multiple equeues to be composed, sharing 216 | // the context of a dispatch loop while still being managed independently. 217 | // 218 | // If the event queue chaining fails, equeue_chain returns a negative, 219 | // platform-specific error code. 220 | int equeue_chain(equeue_t *queue, equeue_t *target); 221 | 222 | 223 | #ifdef __cplusplus 224 | } 225 | #endif 226 | 227 | #endif 228 | -------------------------------------------------------------------------------- /equeue_freertos.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Implementation for the mbed library 3 | * https://github.com/mbedmicro/mbed 4 | * 5 | * Copyright (c) 2016 Christopher Haster 6 | * Distributed under the MIT license 7 | */ 8 | #include "equeue_platform.h" 9 | 10 | #if defined(EQUEUE_PLATFORM_FREERTOS) 11 | 12 | #include "task.h" 13 | 14 | 15 | // Ticker operations 16 | unsigned equeue_tick(void) { 17 | return xTaskGetTickCountFromISR() * portTICK_PERIOD_MS; 18 | } 19 | 20 | 21 | // Mutex operations 22 | int equeue_mutex_create(equeue_mutex_t *m) { return 0; } 23 | void equeue_mutex_destroy(equeue_mutex_t *m) { } 24 | 25 | void equeue_mutex_lock(equeue_mutex_t *m) { 26 | *m = taskENTER_CRITICAL_FROM_ISR(); 27 | } 28 | 29 | void equeue_mutex_unlock(equeue_mutex_t *m) { 30 | taskEXIT_CRITICAL_FROM_ISR(*m); 31 | } 32 | 33 | 34 | // Semaphore operations 35 | int equeue_sema_create(equeue_sema_t *s) { 36 | s->handle = xSemaphoreCreateBinaryStatic(&s->buffer); 37 | return s->handle ? 0 : -1; 38 | } 39 | 40 | void equeue_sema_destroy(equeue_sema_t *s) { 41 | vSemaphoreDelete(s->handle); 42 | } 43 | 44 | void equeue_sema_signal(equeue_sema_t *s) { 45 | xSemaphoreGiveFromISR(s->handle, NULL); 46 | } 47 | 48 | bool equeue_sema_wait(equeue_sema_t *s, int ms) { 49 | if (ms < 0) { 50 | ms = portMAX_DELAY; 51 | } else { 52 | ms = ms / portTICK_PERIOD_MS; 53 | } 54 | 55 | return xSemaphoreTake(s->handle, ms); 56 | } 57 | 58 | 59 | #endif 60 | -------------------------------------------------------------------------------- /equeue_mbed.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Implementation for the mbed library 3 | * https://github.com/mbedmicro/mbed 4 | * 5 | * Copyright (c) 2016 Christopher Haster 6 | * Distributed under the MIT license 7 | */ 8 | #include "equeue_platform.h" 9 | 10 | #if defined(EQUEUE_PLATFORM_MBED) 11 | 12 | #include 13 | #include 14 | #include "platform/mbed_critical.h" 15 | #include "drivers/Timer.h" 16 | #include "drivers/Ticker.h" 17 | #include "drivers/Timeout.h" 18 | #include "drivers/LowPowerTimeout.h" 19 | #include "drivers/LowPowerTicker.h" 20 | #include "drivers/LowPowerTimer.h" 21 | 22 | using namespace mbed; 23 | 24 | // Ticker operations 25 | #if MBED_CONF_RTOS_API_PRESENT 26 | 27 | #include "rtos/Kernel.h" 28 | #include "platform/mbed_os_timer.h" 29 | 30 | static bool equeue_tick_inited = false; 31 | 32 | static void equeue_tick_init() { 33 | #if defined MBED_TICKLESS || !MBED_CONF_RTOS_PRESENT 34 | mbed::internal::init_os_timer(); 35 | #endif 36 | 37 | equeue_tick_inited = true; 38 | } 39 | 40 | unsigned equeue_tick() { 41 | if (!equeue_tick_inited) { 42 | equeue_tick_init(); 43 | } 44 | 45 | #if defined MBED_TICKLESS || !MBED_CONF_RTOS_PRESENT 46 | // It is not safe to call get_ms_count from ISRs, both 47 | // because documentation says so, and because it will give 48 | // a stale value from the RTOS if the interrupt has woken 49 | // us out of sleep - the RTOS will not have updated its 50 | // ticks yet. 51 | if (core_util_is_isr_active()) { 52 | // And the documentation further says that this 53 | // should not be called from critical sections, for 54 | // performance reasons, but I don't have a good 55 | // current alternative! 56 | return mbed::internal::os_timer->get_time() / 1000; 57 | } else { 58 | return rtos::Kernel::get_ms_count(); 59 | } 60 | #else 61 | // And this is the legacy behaviour - if running in 62 | // non-tickless mode, this works fine, despite Mbed OS 63 | // documentation saying no. (Most recent CMSIS-RTOS 64 | // permits `ososKernelGetTickCount` from IRQ, and our 65 | // `rtos::Kernel` wrapper copes too). 66 | return rtos::Kernel::get_ms_count(); 67 | #endif 68 | } 69 | 70 | #else 71 | 72 | #if MBED_CONF_EVENTS_USE_LOWPOWER_TIMER_TICKER 73 | 74 | #define ALIAS_TIMER LowPowerTimer 75 | #define ALIAS_TICKER LowPowerTicker 76 | #define ALIAS_TIMEOUT LowPowerTimeout 77 | #else 78 | #define ALIAS_TIMER Timer 79 | #define ALIAS_TICKER Ticker 80 | #define ALIAS_TIMEOUT Timeout 81 | #endif 82 | 83 | static bool equeue_tick_inited = false; 84 | static volatile unsigned equeue_minutes = 0; 85 | static unsigned equeue_timer[ 86 | (sizeof(ALIAS_TIMER)+sizeof(unsigned)-1)/sizeof(unsigned)]; 87 | static unsigned equeue_ticker[ 88 | (sizeof(ALIAS_TICKER)+sizeof(unsigned)-1)/sizeof(unsigned)]; 89 | 90 | static void equeue_tick_update() { 91 | equeue_minutes += reinterpret_cast(equeue_timer)->read_ms(); 92 | reinterpret_cast(equeue_timer)->reset(); 93 | } 94 | 95 | static void equeue_tick_init() { 96 | MBED_STATIC_ASSERT(sizeof(equeue_timer) >= sizeof(ALIAS_TIMER), 97 | "The equeue_timer buffer must fit the class Timer"); 98 | MBED_STATIC_ASSERT(sizeof(equeue_ticker) >= sizeof(ALIAS_TICKER), 99 | "The equeue_ticker buffer must fit the class Ticker"); 100 | ALIAS_TIMER *timer = new (equeue_timer) ALIAS_TIMER; 101 | ALIAS_TICKER *ticker = new (equeue_ticker) ALIAS_TICKER; 102 | 103 | equeue_minutes = 0; 104 | timer->start(); 105 | ticker->attach_us(equeue_tick_update, 1000 << 16); 106 | 107 | equeue_tick_inited = true; 108 | } 109 | 110 | unsigned equeue_tick() { 111 | if (!equeue_tick_inited) { 112 | equeue_tick_init(); 113 | } 114 | 115 | unsigned minutes; 116 | unsigned ms; 117 | 118 | do { 119 | minutes = equeue_minutes; 120 | ms = reinterpret_cast(equeue_timer)->read_ms(); 121 | } while (minutes != equeue_minutes); 122 | 123 | return minutes + ms; 124 | } 125 | 126 | #endif 127 | 128 | // Mutex operations 129 | int equeue_mutex_create(equeue_mutex_t *m) { return 0; } 130 | void equeue_mutex_destroy(equeue_mutex_t *m) { } 131 | 132 | void equeue_mutex_lock(equeue_mutex_t *m) { 133 | core_util_critical_section_enter(); 134 | } 135 | 136 | void equeue_mutex_unlock(equeue_mutex_t *m) { 137 | core_util_critical_section_exit(); 138 | } 139 | 140 | 141 | // Semaphore operations 142 | #ifdef MBED_CONF_RTOS_PRESENT 143 | 144 | int equeue_sema_create(equeue_sema_t *s) { 145 | osEventFlagsAttr_t attr; 146 | memset(&attr, 0, sizeof(attr)); 147 | attr.cb_mem = &s->mem; 148 | attr.cb_size = sizeof(s->mem); 149 | 150 | s->id = osEventFlagsNew(&attr); 151 | return !s->id ? -1 : 0; 152 | } 153 | 154 | void equeue_sema_destroy(equeue_sema_t *s) { 155 | osEventFlagsDelete(s->id); 156 | } 157 | 158 | void equeue_sema_signal(equeue_sema_t *s) { 159 | osEventFlagsSet(s->id, 1); 160 | } 161 | 162 | bool equeue_sema_wait(equeue_sema_t *s, int ms) { 163 | if (ms < 0) { 164 | ms = osWaitForever; 165 | } 166 | 167 | return (osEventFlagsWait(s->id, 1, osFlagsWaitAny, ms) == 1); 168 | } 169 | 170 | #else 171 | 172 | // Semaphore operations 173 | int equeue_sema_create(equeue_sema_t *s) { 174 | *s = false; 175 | return 0; 176 | } 177 | 178 | void equeue_sema_destroy(equeue_sema_t *s) { 179 | } 180 | 181 | void equeue_sema_signal(equeue_sema_t *s) { 182 | *s = 1; 183 | } 184 | 185 | static void equeue_sema_timeout(equeue_sema_t *s) { 186 | *s = -1; 187 | } 188 | 189 | bool equeue_sema_wait(equeue_sema_t *s, int ms) { 190 | int signal = 0; 191 | ALIAS_TIMEOUT timeout; 192 | if (ms == 0) { 193 | return false; 194 | } else if (ms > 0) { 195 | timeout.attach_us(callback(equeue_sema_timeout, s), (us_timestamp_t)ms*1000); 196 | } 197 | 198 | core_util_critical_section_enter(); 199 | while (!*s) { 200 | sleep(); 201 | core_util_critical_section_exit(); 202 | core_util_critical_section_enter(); 203 | } 204 | 205 | signal = *s; 206 | *s = false; 207 | core_util_critical_section_exit(); 208 | 209 | return (signal > 0); 210 | } 211 | 212 | #endif 213 | 214 | #endif 215 | -------------------------------------------------------------------------------- /equeue_platform.h: -------------------------------------------------------------------------------- 1 | /* 2 | * System specific implementation 3 | * 4 | * Copyright (c) 2016 Christopher Haster 5 | * Distributed under the MIT license 6 | */ 7 | #ifndef EQUEUE_PLATFORM_H 8 | #define EQUEUE_PLATFORM_H 9 | 10 | #ifdef __cplusplus 11 | extern "C" { 12 | #endif 13 | 14 | #include 15 | 16 | // Currently supported platforms 17 | // 18 | // Uncomment to select a supported platform or reimplement this file 19 | // for a specific target. 20 | //#define EQUEUE_PLATFORM_POSIX 21 | //#define EQUEUE_PLATFORM_WINDOWS 22 | //#define EQUEUE_PLATFORM_MBED 23 | //#define EQUEUE_PLATFORM_FREERTOS 24 | 25 | // Try to infer a platform if none was manually selected 26 | #if !defined(EQUEUE_PLATFORM_POSIX) \ 27 | && !defined(EQUEUE_PLATFORM_WINDOWS) \ 28 | && !defined(EQUEUE_PLATFORM_MBED) \ 29 | && !defined(EQUEUE_PLATFORM_FREERTOS) 30 | #if defined(__unix__) 31 | #define EQUEUE_PLATFORM_POSIX 32 | #elif defined(_WIN32) 33 | #define EQUEUE_PLATFORM_WINDOWS 34 | #elif defined(__MBED__) 35 | #define EQUEUE_PLATFORM_MBED 36 | #else 37 | #warning "Unknown platform! Please update equeue_platform.h" 38 | #endif 39 | #endif 40 | 41 | // Platform includes 42 | #if defined(EQUEUE_PLATFORM_POSIX) 43 | #include 44 | #elif defined(EQUEUE_PLATFORM_WINDOWS) 45 | #include 46 | #elif defined(EQUEUE_PLATFORM_FREERTOS) 47 | #include "FreeRTOS.h" 48 | #include "semphr.h" 49 | #elif defined(EQUEUE_PLATFORM_MBED) && defined(MBED_CONF_RTOS_PRESENT) 50 | #include "cmsis_os2.h" 51 | #include "mbed_rtos_storage.h" 52 | #endif 53 | 54 | 55 | // Platform millisecond counter 56 | // 57 | // Return a tick that represents the number of milliseconds that have passed 58 | // since an arbitrary point in time. The granularity does not need to be at 59 | // the millisecond level, however the accuracy of the equeue library is 60 | // limited by the accuracy of this tick. 61 | // 62 | // Must intentionally overflow to 0 after 2^32-1 63 | unsigned equeue_tick(void); 64 | 65 | 66 | // Platform mutex type 67 | // 68 | // The equeue library requires at minimum a non-recursive mutex that is 69 | // safe in interrupt contexts. The mutex section is help for a bounded 70 | // amount of time, so simply disabling interrupts is acceptable 71 | // 72 | // If irq safety is not required, a regular blocking mutex can be used. 73 | #if defined(EQUEUE_PLATFORM_POSIX) 74 | typedef pthread_mutex_t equeue_mutex_t; 75 | #elif defined(EQUEUE_PLATFORM_WINDOWS) 76 | typedef CRITICAL_SECTION equeue_mutex_t; 77 | #elif defined(EQUEUE_PLATFORM_MBED) 78 | typedef unsigned equeue_mutex_t; 79 | #elif defined(EQUEUE_PLATFORM_FREERTOS) 80 | typedef UBaseType_t equeue_mutex_t; 81 | #endif 82 | 83 | // Platform mutex operations 84 | // 85 | // The equeue_mutex_create and equeue_mutex_destroy manage the lifetime 86 | // of the mutex. On error, equeue_mutex_create should return a negative 87 | // error code. 88 | // 89 | // The equeue_mutex_lock and equeue_mutex_unlock lock and unlock the 90 | // underlying mutex. 91 | int equeue_mutex_create(equeue_mutex_t *mutex); 92 | void equeue_mutex_destroy(equeue_mutex_t *mutex); 93 | void equeue_mutex_lock(equeue_mutex_t *mutex); 94 | void equeue_mutex_unlock(equeue_mutex_t *mutex); 95 | 96 | 97 | // Platform semaphore type 98 | // 99 | // The equeue library requires a binary semaphore type that can be safely 100 | // signaled from interrupt contexts and from inside a equeue_mutex section. 101 | // 102 | // The equeue_signal_wait is relied upon by the equeue library to sleep the 103 | // processor between events. Spurious wakeups have no negative-effects. 104 | // 105 | // A counting semaphore will also work, however may cause the event queue 106 | // dispatch loop to run unnecessarily. For that matter, equeue_signal_wait 107 | // may even be implemented as a single return statement. 108 | #if defined(EQUEUE_PLATFORM_POSIX) 109 | typedef struct equeue_sema { 110 | pthread_mutex_t mutex; 111 | pthread_cond_t cond; 112 | bool signal; 113 | } equeue_sema_t; 114 | #elif defined(EQUEUE_PLATFORM_WINDOWS) 115 | typedef HANDLE equeue_sema_t; 116 | #elif defined(EQUEUE_PLATFORM_MBED) && defined(MBED_CONF_RTOS_PRESENT) 117 | typedef struct equeue_sema { 118 | osEventFlagsId_t id; 119 | mbed_rtos_storage_event_flags_t mem; 120 | } equeue_sema_t; 121 | #elif defined(EQUEUE_PLATFORM_MBED) 122 | typedef volatile int equeue_sema_t; 123 | #elif defined(EQUEUE_PLATFORM_FREERTOS) 124 | typedef struct equeue_sema { 125 | SemaphoreHandle_t handle; 126 | StaticSemaphore_t buffer; 127 | } equeue_sema_t; 128 | #endif 129 | 130 | // Platform semaphore operations 131 | // 132 | // The equeue_sema_create and equeue_sema_destroy manage the lifetime 133 | // of the semaphore. On error, equeue_sema_create should return a negative 134 | // error code. 135 | // 136 | // The equeue_sema_signal marks a semaphore as signalled such that the next 137 | // equeue_sema_wait will return true. 138 | // 139 | // The equeue_sema_wait waits for a semaphore to be signalled or returns 140 | // immediately if equeue_sema_signal had been called since the last 141 | // equeue_sema_wait. The equeue_sema_wait returns true if it detected that 142 | // equeue_sema_signal had been called. If ms is negative, equeue_sema_wait 143 | // will wait for a signal indefinitely. 144 | int equeue_sema_create(equeue_sema_t *sema); 145 | void equeue_sema_destroy(equeue_sema_t *sema); 146 | void equeue_sema_signal(equeue_sema_t *sema); 147 | bool equeue_sema_wait(equeue_sema_t *sema, int ms); 148 | 149 | 150 | #ifdef __cplusplus 151 | } 152 | #endif 153 | 154 | #endif 155 | -------------------------------------------------------------------------------- /equeue_posix.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Implementation for Posix compliant platforms 3 | * 4 | * Copyright (c) 2016 Christopher Haster 5 | * Distributed under the MIT license 6 | */ 7 | #include "equeue_platform.h" 8 | 9 | #if defined(EQUEUE_PLATFORM_POSIX) 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | 16 | // Tick operations 17 | unsigned equeue_tick(void) { 18 | struct timeval tv; 19 | gettimeofday(&tv, 0); 20 | return (unsigned)(tv.tv_sec*1000 + tv.tv_usec/1000); 21 | } 22 | 23 | 24 | // Mutex operations 25 | int equeue_mutex_create(equeue_mutex_t *m) { 26 | return pthread_mutex_init(m, 0); 27 | } 28 | 29 | void equeue_mutex_destroy(equeue_mutex_t *m) { 30 | pthread_mutex_destroy(m); 31 | } 32 | 33 | void equeue_mutex_lock(equeue_mutex_t *m) { 34 | pthread_mutex_lock(m); 35 | } 36 | 37 | void equeue_mutex_unlock(equeue_mutex_t *m) { 38 | pthread_mutex_unlock(m); 39 | } 40 | 41 | 42 | // Semaphore operations 43 | int equeue_sema_create(equeue_sema_t *s) { 44 | int err = pthread_mutex_init(&s->mutex, 0); 45 | if (err) { 46 | return err; 47 | } 48 | 49 | err = pthread_cond_init(&s->cond, 0); 50 | if (err) { 51 | return err; 52 | } 53 | 54 | s->signal = false; 55 | return 0; 56 | } 57 | 58 | void equeue_sema_destroy(equeue_sema_t *s) { 59 | pthread_cond_destroy(&s->cond); 60 | pthread_mutex_destroy(&s->mutex); 61 | } 62 | 63 | void equeue_sema_signal(equeue_sema_t *s) { 64 | pthread_mutex_lock(&s->mutex); 65 | s->signal = true; 66 | pthread_cond_signal(&s->cond); 67 | pthread_mutex_unlock(&s->mutex); 68 | } 69 | 70 | bool equeue_sema_wait(equeue_sema_t *s, int ms) { 71 | pthread_mutex_lock(&s->mutex); 72 | if (!s->signal) { 73 | if (ms < 0) { 74 | pthread_cond_wait(&s->cond, &s->mutex); 75 | } else { 76 | struct timeval tv; 77 | gettimeofday(&tv, 0); 78 | 79 | struct timespec ts = { 80 | .tv_sec = ms/1000 + tv.tv_sec, 81 | .tv_nsec = ms*1000000 + tv.tv_usec*1000, 82 | }; 83 | 84 | pthread_cond_timedwait(&s->cond, &s->mutex, &ts); 85 | } 86 | } 87 | 88 | bool signal = s->signal; 89 | s->signal = false; 90 | pthread_mutex_unlock(&s->mutex); 91 | 92 | return signal; 93 | } 94 | 95 | #endif 96 | -------------------------------------------------------------------------------- /equeue_windows.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Implementation for Windows 3 | * 4 | * Copyright (c) 2016 Christopher Haster 5 | * Distributed under the MIT license 6 | */ 7 | #include "equeue_platform.h" 8 | 9 | #if defined(EQUEUE_PLATFORM_WINDOWS) 10 | 11 | #include 12 | 13 | 14 | // Tick operations 15 | unsigned equeue_tick(void) { 16 | return GetTickCount(); 17 | } 18 | 19 | 20 | // Mutex operations 21 | int equeue_mutex_create(equeue_mutex_t *m) { 22 | InitializeCriticalSection(m); 23 | return 0; 24 | } 25 | 26 | void equeue_mutex_destroy(equeue_mutex_t *m) { 27 | DeleteCriticalSection(m); 28 | } 29 | 30 | void equeue_mutex_lock(equeue_mutex_t *m) { 31 | EnterCriticalSection(m); 32 | } 33 | 34 | void equeue_mutex_unlock(equeue_mutex_t *m) { 35 | LeaveCriticalSection(m); 36 | } 37 | 38 | 39 | // Semaphore operations 40 | int equeue_sema_create(equeue_sema_t *s) { 41 | *s = CreateSemaphore(NULL, 0, 1, NULL); 42 | return *s ? 0 : -1; 43 | } 44 | 45 | void equeue_sema_destroy(equeue_sema_t *s) { 46 | CloseHandle(*s); 47 | } 48 | 49 | void equeue_sema_signal(equeue_sema_t *s) { 50 | ReleaseSemaphore(*s, 1, NULL); 51 | } 52 | 53 | bool equeue_sema_wait(equeue_sema_t *s, int ms) { 54 | if (ms < 0) { 55 | ms = INFINITE; 56 | } 57 | 58 | return WaitForSingleObject(*s, ms) == WAIT_OBJECT_0; 59 | } 60 | 61 | 62 | #endif 63 | -------------------------------------------------------------------------------- /tests/prof.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Profiling framework for the events library 3 | * 4 | * Copyright (c) 2016 Christopher Haster 5 | * Distributed under the MIT license 6 | */ 7 | #include "equeue.h" 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | 17 | // Performance measurement utils 18 | #define PROF_RUNS 5 19 | #define PROF_INTERVAL 100000000 20 | 21 | #define prof_volatile(t) __attribute__((unused)) volatile t 22 | 23 | typedef uint64_t prof_cycle_t; 24 | 25 | static volatile prof_cycle_t prof_start_cycle; 26 | static volatile prof_cycle_t prof_stop_cycle; 27 | static prof_cycle_t prof_accum_cycle; 28 | static prof_cycle_t prof_baseline_cycle; 29 | static prof_cycle_t prof_iterations; 30 | static const char *prof_units; 31 | 32 | #define prof_cycle() ({ \ 33 | uint32_t a, b; \ 34 | __asm__ volatile ("rdtsc" : "=a" (a), "=d" (b)); \ 35 | ((uint64_t)b << 32) | (uint64_t)a; \ 36 | }) 37 | 38 | #define prof_loop() \ 39 | for (prof_iterations = 0; \ 40 | prof_accum_cycle < PROF_INTERVAL; \ 41 | prof_iterations++) 42 | 43 | #define prof_start() ({ \ 44 | prof_start_cycle = prof_cycle(); \ 45 | }) 46 | 47 | #define prof_stop() ({ \ 48 | prof_stop_cycle = prof_cycle(); \ 49 | prof_accum_cycle += prof_stop_cycle - prof_start_cycle; \ 50 | }) 51 | 52 | #define prof_result(value, units) ({ \ 53 | prof_accum_cycle = value+prof_baseline_cycle; \ 54 | prof_iterations = 1; \ 55 | prof_units = units; \ 56 | }) 57 | 58 | #define prof_measure(func, ...) ({ \ 59 | printf("%s: ...", #func); \ 60 | fflush(stdout); \ 61 | \ 62 | prof_units = "cycles"; \ 63 | prof_cycle_t runs[PROF_RUNS]; \ 64 | for (int i = 0; i < PROF_RUNS; i++) { \ 65 | prof_accum_cycle = 0; \ 66 | prof_iterations = 0; \ 67 | func(__VA_ARGS__); \ 68 | runs[i] = prof_accum_cycle / prof_iterations; \ 69 | } \ 70 | \ 71 | prof_cycle_t res = runs[0]; \ 72 | for (int i = 0; i < PROF_RUNS; i++) { \ 73 | if (runs[i] < res) { \ 74 | res = runs[i]; \ 75 | } \ 76 | } \ 77 | res -= prof_baseline_cycle; \ 78 | printf("\r%s: %"PRIu64" %s", #func, res, prof_units); \ 79 | \ 80 | if (!isatty(0)) { \ 81 | prof_cycle_t prev; \ 82 | while (scanf("%*[^0-9]%"PRIu64, &prev) == 0); \ 83 | int64_t perc = 100*((int64_t)prev - (int64_t)res) / (int64_t)prev; \ 84 | \ 85 | if (perc > 10) { \ 86 | printf(" (\e[32m%+"PRId64"%%\e[0m)", perc); \ 87 | } else if (perc < -10) { \ 88 | printf(" (\e[31m%+"PRId64"%%\e[0m)", perc); \ 89 | } else { \ 90 | printf(" (%+"PRId64"%%)", perc); \ 91 | } \ 92 | } \ 93 | \ 94 | printf("\n"); \ 95 | res; \ 96 | }) 97 | 98 | #define prof_baseline(func, ...) ({ \ 99 | prof_baseline_cycle = 0; \ 100 | prof_baseline_cycle = prof_measure(func, __VA_ARGS__); \ 101 | }) 102 | 103 | 104 | // Various test functions 105 | void no_func(void *eh) { 106 | } 107 | 108 | 109 | // Actual performance tests 110 | void baseline_prof(void) { 111 | prof_loop() { 112 | prof_start(); 113 | __asm__ volatile (""); 114 | prof_stop(); 115 | } 116 | } 117 | 118 | void equeue_tick_prof(void) { 119 | prof_volatile(unsigned) res; 120 | prof_loop() { 121 | prof_start(); 122 | res = equeue_tick(); 123 | prof_stop(); 124 | } 125 | } 126 | 127 | void equeue_alloc_prof(void) { 128 | struct equeue q; 129 | equeue_create(&q, 32*EQUEUE_EVENT_SIZE); 130 | 131 | prof_loop() { 132 | prof_start(); 133 | void *e = equeue_alloc(&q, 8 * sizeof(int)); 134 | prof_stop(); 135 | 136 | equeue_dealloc(&q, e); 137 | } 138 | 139 | equeue_destroy(&q); 140 | } 141 | 142 | void equeue_alloc_many_prof(int count) { 143 | struct equeue q; 144 | equeue_create(&q, count*EQUEUE_EVENT_SIZE); 145 | 146 | void *es[count]; 147 | 148 | for (int i = 0; i < count; i++) { 149 | es[i] = equeue_alloc(&q, (i % 4) * sizeof(int)); 150 | } 151 | 152 | for (int i = 0; i < count; i++) { 153 | equeue_dealloc(&q, es[i]); 154 | } 155 | 156 | prof_loop() { 157 | prof_start(); 158 | void *e = equeue_alloc(&q, 8 * sizeof(int)); 159 | prof_stop(); 160 | 161 | equeue_dealloc(&q, e); 162 | } 163 | 164 | equeue_destroy(&q); 165 | } 166 | 167 | void equeue_post_prof(void) { 168 | struct equeue q; 169 | equeue_create(&q, EQUEUE_EVENT_SIZE); 170 | 171 | prof_loop() { 172 | void *e = equeue_alloc(&q, 0); 173 | 174 | prof_start(); 175 | int id = equeue_post(&q, no_func, e); 176 | prof_stop(); 177 | 178 | equeue_cancel(&q, id); 179 | } 180 | 181 | equeue_destroy(&q); 182 | } 183 | 184 | void equeue_post_many_prof(int count) { 185 | struct equeue q; 186 | equeue_create(&q, count*EQUEUE_EVENT_SIZE); 187 | 188 | for (int i = 0; i < count-1; i++) { 189 | equeue_call(&q, no_func, 0); 190 | } 191 | 192 | prof_loop() { 193 | void *e = equeue_alloc(&q, 0); 194 | 195 | prof_start(); 196 | int id = equeue_post(&q, no_func, e); 197 | prof_stop(); 198 | 199 | equeue_cancel(&q, id); 200 | } 201 | 202 | equeue_destroy(&q); 203 | } 204 | 205 | void equeue_post_future_prof(void) { 206 | struct equeue q; 207 | equeue_create(&q, EQUEUE_EVENT_SIZE); 208 | 209 | prof_loop() { 210 | void *e = equeue_alloc(&q, 0); 211 | equeue_event_delay(e, 1000); 212 | 213 | prof_start(); 214 | int id = equeue_post(&q, no_func, e); 215 | prof_stop(); 216 | 217 | equeue_cancel(&q, id); 218 | } 219 | 220 | equeue_destroy(&q); 221 | } 222 | 223 | void equeue_post_future_many_prof(int count) { 224 | struct equeue q; 225 | equeue_create(&q, count*EQUEUE_EVENT_SIZE); 226 | 227 | for (int i = 0; i < count-1; i++) { 228 | equeue_call(&q, no_func, 0); 229 | } 230 | 231 | prof_loop() { 232 | void *e = equeue_alloc(&q, 0); 233 | equeue_event_delay(e, 1000); 234 | 235 | prof_start(); 236 | int id = equeue_post(&q, no_func, e); 237 | prof_stop(); 238 | 239 | equeue_cancel(&q, id); 240 | } 241 | 242 | equeue_destroy(&q); 243 | } 244 | 245 | void equeue_dispatch_prof(void) { 246 | struct equeue q; 247 | equeue_create(&q, EQUEUE_EVENT_SIZE); 248 | 249 | prof_loop() { 250 | equeue_call(&q, no_func, 0); 251 | 252 | prof_start(); 253 | equeue_dispatch(&q, 0); 254 | prof_stop(); 255 | } 256 | 257 | equeue_destroy(&q); 258 | } 259 | 260 | void equeue_dispatch_many_prof(int count) { 261 | struct equeue q; 262 | equeue_create(&q, count*EQUEUE_EVENT_SIZE); 263 | 264 | prof_loop() { 265 | for (int i = 0; i < count; i++) { 266 | equeue_call(&q, no_func, 0); 267 | } 268 | 269 | prof_start(); 270 | equeue_dispatch(&q, 0); 271 | prof_stop(); 272 | } 273 | 274 | equeue_destroy(&q); 275 | } 276 | 277 | void equeue_cancel_prof(void) { 278 | struct equeue q; 279 | equeue_create(&q, EQUEUE_EVENT_SIZE); 280 | 281 | prof_loop() { 282 | int id = equeue_call(&q, no_func, 0); 283 | 284 | prof_start(); 285 | equeue_cancel(&q, id); 286 | prof_stop(); 287 | } 288 | 289 | equeue_destroy(&q); 290 | } 291 | 292 | void equeue_cancel_many_prof(int count) { 293 | struct equeue q; 294 | equeue_create(&q, count*EQUEUE_EVENT_SIZE); 295 | 296 | for (int i = 0; i < count-1; i++) { 297 | equeue_call(&q, no_func, 0); 298 | } 299 | 300 | prof_loop() { 301 | int id = equeue_call(&q, no_func, 0); 302 | 303 | prof_start(); 304 | equeue_cancel(&q, id); 305 | prof_stop(); 306 | } 307 | 308 | equeue_destroy(&q); 309 | } 310 | 311 | void equeue_alloc_size_prof(void) { 312 | size_t size = 32*EQUEUE_EVENT_SIZE; 313 | 314 | struct equeue q; 315 | equeue_create(&q, size); 316 | equeue_alloc(&q, 0); 317 | 318 | prof_result(size - q.slab.size, "bytes"); 319 | 320 | equeue_destroy(&q); 321 | } 322 | 323 | void equeue_alloc_many_size_prof(int count) { 324 | size_t size = count*EQUEUE_EVENT_SIZE; 325 | 326 | struct equeue q; 327 | equeue_create(&q, size); 328 | 329 | for (int i = 0; i < count; i++) { 330 | equeue_alloc(&q, (i % 4) * sizeof(int)); 331 | } 332 | 333 | prof_result(size - q.slab.size, "bytes"); 334 | 335 | equeue_destroy(&q); 336 | } 337 | 338 | void equeue_alloc_fragmented_size_prof(int count) { 339 | size_t size = count*EQUEUE_EVENT_SIZE; 340 | 341 | struct equeue q; 342 | equeue_create(&q, size); 343 | 344 | void *es[count]; 345 | 346 | for (int i = 0; i < count; i++) { 347 | es[i] = equeue_alloc(&q, (i % 4) * sizeof(int)); 348 | } 349 | 350 | for (int i = 0; i < count; i++) { 351 | equeue_dealloc(&q, es[i]); 352 | } 353 | 354 | for (int i = count-1; i >= 0; i--) { 355 | es[i] = equeue_alloc(&q, (i % 4) * sizeof(int)); 356 | } 357 | 358 | for (int i = count-1; i >= 0; i--) { 359 | equeue_dealloc(&q, es[i]); 360 | } 361 | 362 | for (int i = 0; i < count; i++) { 363 | equeue_alloc(&q, (i % 4) * sizeof(int)); 364 | } 365 | 366 | prof_result(size - q.slab.size, "bytes"); 367 | 368 | equeue_destroy(&q); 369 | } 370 | 371 | 372 | // Entry point 373 | int main() { 374 | printf("beginning profiling...\n"); 375 | 376 | prof_baseline(baseline_prof); 377 | 378 | prof_measure(equeue_tick_prof); 379 | prof_measure(equeue_alloc_prof); 380 | prof_measure(equeue_post_prof); 381 | prof_measure(equeue_post_future_prof); 382 | prof_measure(equeue_dispatch_prof); 383 | prof_measure(equeue_cancel_prof); 384 | 385 | prof_measure(equeue_alloc_many_prof, 1000); 386 | prof_measure(equeue_post_many_prof, 1000); 387 | prof_measure(equeue_post_future_many_prof, 1000); 388 | prof_measure(equeue_dispatch_many_prof, 100); 389 | prof_measure(equeue_cancel_many_prof, 100); 390 | 391 | prof_measure(equeue_alloc_size_prof); 392 | prof_measure(equeue_alloc_many_size_prof, 1000); 393 | prof_measure(equeue_alloc_fragmented_size_prof, 1000); 394 | 395 | printf("done!\n"); 396 | } 397 | -------------------------------------------------------------------------------- /tests/tests.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Testing framework for the events library 3 | * 4 | * Copyright (c) 2016 Christopher Haster 5 | * Distributed under the MIT license 6 | */ 7 | #include "equeue.h" 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | 16 | // Testing setup 17 | static jmp_buf test_buf; 18 | static int test_line; 19 | static int test_failure; 20 | 21 | #define test_assert(test) ({ \ 22 | if (!(test)) { \ 23 | test_line = __LINE__; \ 24 | longjmp(test_buf, 1); \ 25 | } \ 26 | }) 27 | 28 | #define test_run(func, ...) ({ \ 29 | printf("%s: ...", #func); \ 30 | fflush(stdout); \ 31 | \ 32 | if (!setjmp(test_buf)) { \ 33 | func(__VA_ARGS__); \ 34 | printf("\r%s: \e[32mpassed\e[0m\n", #func); \ 35 | } else { \ 36 | printf("\r%s: \e[31mfailed\e[0m at line %d\n", #func, test_line); \ 37 | test_failure = true; \ 38 | } \ 39 | }) 40 | 41 | 42 | // Test functions 43 | void pass_func(void *eh) { 44 | } 45 | 46 | void simple_func(void *p) { 47 | (*(int *)p)++; 48 | } 49 | 50 | void sloth_func(void *p) { 51 | usleep(100000); 52 | (*(int *)p)++; 53 | } 54 | 55 | struct indirect { 56 | int *touched; 57 | uint8_t buffer[7]; 58 | }; 59 | 60 | void indirect_func(void *p) { 61 | struct indirect *i = (struct indirect*)p; 62 | (*i->touched)++; 63 | } 64 | 65 | struct timing { 66 | unsigned tick; 67 | unsigned delay; 68 | }; 69 | 70 | void timing_func(void *p) { 71 | struct timing *timing = (struct timing*)p; 72 | unsigned tick = equeue_tick(); 73 | 74 | unsigned t1 = timing->delay; 75 | unsigned t2 = tick - timing->tick; 76 | test_assert(t1 > t2 - 100 && t1 < t2 + 100); 77 | 78 | timing->tick = tick; 79 | } 80 | 81 | struct fragment { 82 | equeue_t *q; 83 | size_t size; 84 | struct timing timing; 85 | }; 86 | 87 | void fragment_func(void *p) { 88 | struct fragment *fragment = (struct fragment*)p; 89 | timing_func(&fragment->timing); 90 | 91 | struct fragment *nfragment = equeue_alloc(fragment->q, fragment->size); 92 | test_assert(nfragment); 93 | 94 | *nfragment = *fragment; 95 | equeue_event_delay(nfragment, fragment->timing.delay); 96 | 97 | int id = equeue_post(nfragment->q, fragment_func, nfragment); 98 | test_assert(id); 99 | } 100 | 101 | struct cancel { 102 | equeue_t *q; 103 | int id; 104 | }; 105 | 106 | void cancel_func(void *p) { 107 | struct cancel *cancel = (struct cancel *)p; 108 | equeue_cancel(cancel->q, cancel->id); 109 | } 110 | 111 | struct nest { 112 | equeue_t *q; 113 | void (*cb)(void *); 114 | void *data; 115 | }; 116 | 117 | void nest_func(void *p) { 118 | struct nest *nest = (struct nest *)p; 119 | equeue_call(nest->q, nest->cb, nest->data); 120 | 121 | usleep(100000); 122 | } 123 | 124 | 125 | // Simple call tests 126 | void simple_call_test(void) { 127 | equeue_t q; 128 | int err = equeue_create(&q, 2048); 129 | test_assert(!err); 130 | 131 | bool touched = false; 132 | equeue_call(&q, simple_func, &touched); 133 | equeue_dispatch(&q, 0); 134 | test_assert(touched); 135 | 136 | equeue_destroy(&q); 137 | } 138 | 139 | void simple_call_in_test(void) { 140 | equeue_t q; 141 | int err = equeue_create(&q, 2048); 142 | test_assert(!err); 143 | 144 | bool touched = false; 145 | int id = equeue_call_in(&q, 100, simple_func, &touched); 146 | test_assert(id); 147 | 148 | equeue_dispatch(&q, 150); 149 | test_assert(touched); 150 | 151 | equeue_destroy(&q); 152 | } 153 | 154 | void simple_call_every_test(void) { 155 | equeue_t q; 156 | int err = equeue_create(&q, 2048); 157 | test_assert(!err); 158 | 159 | bool touched = false; 160 | int id = equeue_call_every(&q, 100, simple_func, &touched); 161 | test_assert(id); 162 | 163 | equeue_dispatch(&q, 150); 164 | test_assert(touched); 165 | 166 | equeue_destroy(&q); 167 | } 168 | 169 | void simple_post_test(void) { 170 | equeue_t q; 171 | int err = equeue_create(&q, 2048); 172 | test_assert(!err); 173 | 174 | int touched = false; 175 | struct indirect *i = equeue_alloc(&q, sizeof(struct indirect)); 176 | test_assert(i); 177 | 178 | i->touched = &touched; 179 | int id = equeue_post(&q, indirect_func, i); 180 | test_assert(id); 181 | 182 | equeue_dispatch(&q, 0); 183 | test_assert(*i->touched); 184 | 185 | equeue_destroy(&q); 186 | } 187 | 188 | // Misc tests 189 | void destructor_test(void) { 190 | equeue_t q; 191 | int err = equeue_create(&q, 2048); 192 | test_assert(!err); 193 | 194 | int touched; 195 | struct indirect *e; 196 | int ids[3]; 197 | 198 | touched = 0; 199 | for (int i = 0; i < 3; i++) { 200 | e = equeue_alloc(&q, sizeof(struct indirect)); 201 | test_assert(e); 202 | 203 | e->touched = &touched; 204 | equeue_event_dtor(e, indirect_func); 205 | int id = equeue_post(&q, pass_func, e); 206 | test_assert(id); 207 | } 208 | 209 | equeue_dispatch(&q, 0); 210 | test_assert(touched == 3); 211 | 212 | touched = 0; 213 | for (int i = 0; i < 3; i++) { 214 | e = equeue_alloc(&q, sizeof(struct indirect)); 215 | test_assert(e); 216 | 217 | e->touched = &touched; 218 | equeue_event_dtor(e, indirect_func); 219 | ids[i] = equeue_post(&q, pass_func, e); 220 | test_assert(ids[i]); 221 | } 222 | 223 | for (int i = 0; i < 3; i++) { 224 | equeue_cancel(&q, ids[i]); 225 | } 226 | 227 | equeue_dispatch(&q, 0); 228 | test_assert(touched == 3); 229 | 230 | touched = 0; 231 | for (int i = 0; i < 3; i++) { 232 | e = equeue_alloc(&q, sizeof(struct indirect)); 233 | test_assert(e); 234 | 235 | e->touched = &touched; 236 | equeue_event_dtor(e, indirect_func); 237 | int id = equeue_post(&q, pass_func, e); 238 | test_assert(id); 239 | } 240 | 241 | equeue_destroy(&q); 242 | test_assert(touched == 3); 243 | } 244 | 245 | void allocation_failure_test(void) { 246 | equeue_t q; 247 | int err = equeue_create(&q, 2048); 248 | test_assert(!err); 249 | 250 | void *p = equeue_alloc(&q, 4096); 251 | test_assert(!p); 252 | 253 | for (int i = 0; i < 100; i++) { 254 | p = equeue_alloc(&q, 0); 255 | } 256 | test_assert(!p); 257 | 258 | equeue_destroy(&q); 259 | } 260 | 261 | void cancel_test(int N) { 262 | equeue_t q; 263 | int err = equeue_create(&q, 2048); 264 | test_assert(!err); 265 | 266 | bool touched = false; 267 | int *ids = malloc(N*sizeof(int)); 268 | 269 | for (int i = 0; i < N; i++) { 270 | ids[i] = equeue_call(&q, simple_func, &touched); 271 | } 272 | 273 | for (int i = N-1; i >= 0; i--) { 274 | equeue_cancel(&q, ids[i]); 275 | } 276 | 277 | free(ids); 278 | 279 | equeue_dispatch(&q, 0); 280 | test_assert(!touched); 281 | 282 | equeue_destroy(&q); 283 | } 284 | 285 | void cancel_inflight_test(void) { 286 | equeue_t q; 287 | int err = equeue_create(&q, 2048); 288 | test_assert(!err); 289 | 290 | bool touched = false; 291 | 292 | int id = equeue_call(&q, simple_func, &touched); 293 | equeue_cancel(&q, id); 294 | 295 | equeue_dispatch(&q, 0); 296 | test_assert(!touched); 297 | 298 | id = equeue_call(&q, simple_func, &touched); 299 | equeue_cancel(&q, id); 300 | 301 | equeue_dispatch(&q, 0); 302 | test_assert(!touched); 303 | 304 | struct cancel *cancel = equeue_alloc(&q, sizeof(struct cancel)); 305 | test_assert(cancel); 306 | cancel->q = &q; 307 | cancel->id = 0; 308 | 309 | id = equeue_post(&q, cancel_func, cancel); 310 | test_assert(id); 311 | 312 | cancel->id = equeue_call(&q, simple_func, &touched); 313 | 314 | equeue_dispatch(&q, 0); 315 | test_assert(!touched); 316 | 317 | equeue_destroy(&q); 318 | } 319 | 320 | void cancel_unnecessarily_test(void) { 321 | equeue_t q; 322 | int err = equeue_create(&q, 2048); 323 | test_assert(!err); 324 | 325 | int id = equeue_call(&q, pass_func, 0); 326 | for (int i = 0; i < 5; i++) { 327 | equeue_cancel(&q, id); 328 | } 329 | 330 | id = equeue_call(&q, pass_func, 0); 331 | equeue_dispatch(&q, 0); 332 | for (int i = 0; i < 5; i++) { 333 | equeue_cancel(&q, id); 334 | } 335 | 336 | bool touched = false; 337 | equeue_call(&q, simple_func, &touched); 338 | for (int i = 0; i < 5; i++) { 339 | equeue_cancel(&q, id); 340 | } 341 | 342 | equeue_dispatch(&q, 0); 343 | test_assert(touched); 344 | 345 | equeue_destroy(&q); 346 | } 347 | 348 | void loop_protect_test(void) { 349 | equeue_t q; 350 | int err = equeue_create(&q, 2048); 351 | test_assert(!err); 352 | 353 | bool touched = false; 354 | equeue_call_every(&q, 0, simple_func, &touched); 355 | 356 | equeue_dispatch(&q, 0); 357 | test_assert(touched); 358 | 359 | touched = false; 360 | equeue_call_every(&q, 1, simple_func, &touched); 361 | 362 | equeue_dispatch(&q, 0); 363 | test_assert(touched); 364 | 365 | equeue_destroy(&q); 366 | } 367 | 368 | void break_test(void) { 369 | equeue_t q; 370 | int err = equeue_create(&q, 2048); 371 | test_assert(!err); 372 | 373 | bool touched = false; 374 | equeue_call_every(&q, 0, simple_func, &touched); 375 | 376 | equeue_break(&q); 377 | equeue_dispatch(&q, -1); 378 | test_assert(touched); 379 | 380 | equeue_destroy(&q); 381 | } 382 | 383 | void break_no_windup_test(void) { 384 | equeue_t q; 385 | int err = equeue_create(&q, 2048); 386 | test_assert(!err); 387 | 388 | int count = 0; 389 | equeue_call_every(&q, 0, simple_func, &count); 390 | 391 | equeue_break(&q); 392 | equeue_break(&q); 393 | equeue_dispatch(&q, -1); 394 | test_assert(count == 1); 395 | 396 | count = 0; 397 | equeue_dispatch(&q, 550); 398 | test_assert(count > 1); 399 | 400 | equeue_destroy(&q); 401 | } 402 | 403 | void period_test(void) { 404 | equeue_t q; 405 | int err = equeue_create(&q, 2048); 406 | test_assert(!err); 407 | 408 | int count = 0; 409 | equeue_call_every(&q, 100, simple_func, &count); 410 | 411 | equeue_dispatch(&q, 550); 412 | test_assert(count == 5); 413 | 414 | equeue_destroy(&q); 415 | } 416 | 417 | void nested_test(void) { 418 | equeue_t q; 419 | int err = equeue_create(&q, 2048); 420 | test_assert(!err); 421 | 422 | int touched = 0; 423 | struct nest *nest = equeue_alloc(&q, sizeof(struct nest)); 424 | test_assert(nest); 425 | nest->q = &q; 426 | nest->cb = simple_func; 427 | nest->data = &touched; 428 | 429 | int id = equeue_post(&q, nest_func, nest); 430 | test_assert(id); 431 | 432 | equeue_dispatch(&q, 50); 433 | test_assert(touched == 0); 434 | 435 | equeue_dispatch(&q, 50); 436 | test_assert(touched == 1); 437 | 438 | touched = 0; 439 | nest = equeue_alloc(&q, sizeof(struct nest)); 440 | test_assert(nest); 441 | nest->q = &q; 442 | nest->cb = simple_func; 443 | nest->data = &touched; 444 | 445 | id = equeue_post(&q, nest_func, nest); 446 | test_assert(id); 447 | 448 | equeue_dispatch(&q, 200); 449 | test_assert(touched == 1); 450 | 451 | equeue_destroy(&q); 452 | } 453 | 454 | void sloth_test(void) { 455 | equeue_t q; 456 | int err = equeue_create(&q, 2048); 457 | test_assert(!err); 458 | 459 | int touched = 0; 460 | int id = equeue_call(&q, sloth_func, &touched); 461 | test_assert(id); 462 | 463 | id = equeue_call_in(&q, 50, simple_func, &touched); 464 | test_assert(id); 465 | 466 | id = equeue_call_in(&q, 150, simple_func, &touched); 467 | test_assert(id); 468 | 469 | equeue_dispatch(&q, 200); 470 | test_assert(touched == 3); 471 | 472 | equeue_destroy(&q); 473 | } 474 | 475 | void *multithread_thread(void *p) { 476 | equeue_t *q = (equeue_t *)p; 477 | equeue_dispatch(q, -1); 478 | return 0; 479 | } 480 | 481 | void multithread_test(void) { 482 | equeue_t q; 483 | int err = equeue_create(&q, 2048); 484 | test_assert(!err); 485 | 486 | int touched = 0; 487 | equeue_call_every(&q, 1, simple_func, &touched); 488 | 489 | pthread_t thread; 490 | err = pthread_create(&thread, 0, multithread_thread, &q); 491 | test_assert(!err); 492 | 493 | usleep(100000); 494 | equeue_break(&q); 495 | err = pthread_join(thread, 0); 496 | test_assert(!err); 497 | 498 | test_assert(touched); 499 | 500 | equeue_destroy(&q); 501 | } 502 | 503 | void background_func(void *p, int ms) { 504 | *(unsigned *)p = ms; 505 | } 506 | 507 | void background_test(void) { 508 | equeue_t q; 509 | int err = equeue_create(&q, 2048); 510 | test_assert(!err); 511 | 512 | int id = equeue_call_in(&q, 200, pass_func, 0); 513 | test_assert(id); 514 | 515 | unsigned ms; 516 | equeue_background(&q, background_func, &ms); 517 | test_assert(ms == 200); 518 | 519 | id = equeue_call_in(&q, 100, pass_func, 0); 520 | test_assert(id); 521 | test_assert(ms == 100); 522 | 523 | id = equeue_call(&q, pass_func, 0); 524 | test_assert(id); 525 | test_assert(ms == 0); 526 | 527 | equeue_dispatch(&q, 0); 528 | test_assert(ms == 100); 529 | 530 | equeue_destroy(&q); 531 | test_assert(ms == -1); 532 | } 533 | 534 | void chain_test(void) { 535 | equeue_t q1; 536 | int err = equeue_create(&q1, 2048); 537 | test_assert(!err); 538 | 539 | equeue_t q2; 540 | err = equeue_create(&q2, 2048); 541 | test_assert(!err); 542 | 543 | equeue_chain(&q2, &q1); 544 | 545 | int touched = 0; 546 | 547 | int id1 = equeue_call_in(&q1, 200, simple_func, &touched); 548 | int id2 = equeue_call_in(&q2, 200, simple_func, &touched); 549 | test_assert(id1 && id2); 550 | 551 | id1 = equeue_call(&q1, simple_func, &touched); 552 | id2 = equeue_call(&q2, simple_func, &touched); 553 | test_assert(id1 && id2); 554 | 555 | id1 = equeue_call_in(&q1, 50, simple_func, &touched); 556 | id2 = equeue_call_in(&q2, 50, simple_func, &touched); 557 | test_assert(id1 && id2); 558 | 559 | equeue_cancel(&q1, id1); 560 | equeue_cancel(&q2, id2); 561 | 562 | id1 = equeue_call_in(&q1, 100, simple_func, &touched); 563 | id2 = equeue_call_in(&q2, 100, simple_func, &touched); 564 | test_assert(id1 && id2); 565 | 566 | equeue_dispatch(&q1, 300); 567 | 568 | test_assert(touched == 6); 569 | 570 | equeue_destroy(&q1); 571 | equeue_destroy(&q2); 572 | } 573 | 574 | void unchain_test(void) { 575 | equeue_t q1; 576 | int err = equeue_create(&q1, 2048); 577 | test_assert(!err); 578 | 579 | equeue_t q2; 580 | err = equeue_create(&q2, 2048); 581 | test_assert(!err); 582 | 583 | equeue_chain(&q2, &q1); 584 | 585 | int touched = 0; 586 | int id1 = equeue_call(&q1, simple_func, &touched); 587 | int id2 = equeue_call(&q2, simple_func, &touched); 588 | test_assert(id1 && id2); 589 | 590 | equeue_dispatch(&q1, 0); 591 | test_assert(touched == 2); 592 | 593 | equeue_chain(&q2, 0); 594 | equeue_chain(&q1, &q2); 595 | 596 | id1 = equeue_call(&q1, simple_func, &touched); 597 | id2 = equeue_call(&q2, simple_func, &touched); 598 | test_assert(id1 && id2); 599 | 600 | equeue_dispatch(&q2, 0); 601 | test_assert(touched == 4); 602 | 603 | equeue_destroy(&q1); 604 | equeue_destroy(&q2); 605 | } 606 | 607 | struct count_and_queue { 608 | int p; 609 | equeue_t* q; 610 | }; 611 | 612 | void simple_breaker(void *p) { 613 | struct count_and_queue* caq = (struct count_and_queue*)p; 614 | equeue_break(caq->q); 615 | usleep(100000); 616 | caq->p++; 617 | } 618 | 619 | void break_request_cleared_on_timeout(void) { 620 | equeue_t q; 621 | int err = equeue_create(&q, 2048); 622 | test_assert(!err); 623 | 624 | struct count_and_queue pq; 625 | pq.p = 0; 626 | pq.q = &q; 627 | 628 | int id = equeue_call_every(&q, 100, simple_breaker, &pq); 629 | 630 | equeue_dispatch(&q, 100); 631 | test_assert(pq.p == 1); 632 | 633 | equeue_cancel(&q, id); 634 | 635 | int count = 0; 636 | equeue_call_every(&q, 100, simple_func, &count); 637 | 638 | equeue_dispatch(&q, 550); 639 | test_assert(count > 1); 640 | 641 | equeue_destroy(&q); 642 | } 643 | 644 | void sibling_test(void) { 645 | equeue_t q; 646 | int err = equeue_create(&q, 1024); 647 | test_assert(!err); 648 | 649 | int id0 = equeue_call_in(&q, 1, pass_func, 0); 650 | int id1 = equeue_call_in(&q, 1, pass_func, 0); 651 | int id2 = equeue_call_in(&q, 1, pass_func, 0); 652 | 653 | struct equeue_event *e = q.queue; 654 | 655 | for (; e; e = e->next) { 656 | for (struct equeue_event *s = e->sibling; s; s = s->sibling) { 657 | test_assert(!s->next); 658 | } 659 | } 660 | equeue_cancel(&q, id0); 661 | equeue_cancel(&q, id1); 662 | equeue_cancel(&q, id2); 663 | equeue_destroy(&q); 664 | } 665 | 666 | // Barrage tests 667 | void simple_barrage_test(int N) { 668 | equeue_t q; 669 | int err = equeue_create(&q, N*(EQUEUE_EVENT_SIZE+sizeof(struct timing))); 670 | test_assert(!err); 671 | 672 | for (int i = 0; i < N; i++) { 673 | struct timing *timing = equeue_alloc(&q, sizeof(struct timing)); 674 | test_assert(timing); 675 | 676 | timing->tick = equeue_tick(); 677 | timing->delay = (i+1)*1000; 678 | equeue_event_delay(timing, timing->delay); 679 | equeue_event_period(timing, timing->delay); 680 | 681 | int id = equeue_post(&q, timing_func, timing); 682 | test_assert(id); 683 | } 684 | 685 | equeue_dispatch(&q, N*1000); 686 | 687 | equeue_destroy(&q); 688 | } 689 | 690 | void fragmenting_barrage_test(int N) { 691 | equeue_t q; 692 | int err = equeue_create(&q, 693 | 2*N*(EQUEUE_EVENT_SIZE+sizeof(struct fragment)+N*sizeof(int))); 694 | test_assert(!err); 695 | 696 | for (int i = 0; i < N; i++) { 697 | size_t size = sizeof(struct fragment) + i*sizeof(int); 698 | struct fragment *fragment = equeue_alloc(&q, size); 699 | test_assert(fragment); 700 | 701 | fragment->q = &q; 702 | fragment->size = size; 703 | fragment->timing.tick = equeue_tick(); 704 | fragment->timing.delay = (i+1)*1000; 705 | equeue_event_delay(fragment, fragment->timing.delay); 706 | 707 | int id = equeue_post(&q, fragment_func, fragment); 708 | test_assert(id); 709 | } 710 | 711 | equeue_dispatch(&q, N*1000); 712 | 713 | equeue_destroy(&q); 714 | } 715 | 716 | struct ethread { 717 | pthread_t thread; 718 | equeue_t *q; 719 | int ms; 720 | }; 721 | 722 | static void *ethread_dispatch(void *p) { 723 | struct ethread *t = (struct ethread*)p; 724 | equeue_dispatch(t->q, t->ms); 725 | return 0; 726 | } 727 | 728 | void multithreaded_barrage_test(int N) { 729 | equeue_t q; 730 | int err = equeue_create(&q, N*(EQUEUE_EVENT_SIZE+sizeof(struct timing))); 731 | test_assert(!err); 732 | 733 | struct ethread t; 734 | t.q = &q; 735 | t.ms = N*1000; 736 | err = pthread_create(&t.thread, 0, ethread_dispatch, &t); 737 | test_assert(!err); 738 | 739 | for (int i = 0; i < N; i++) { 740 | struct timing *timing = equeue_alloc(&q, sizeof(struct timing)); 741 | test_assert(timing); 742 | 743 | timing->tick = equeue_tick(); 744 | timing->delay = (i+1)*1000; 745 | equeue_event_delay(timing, timing->delay); 746 | equeue_event_period(timing, timing->delay); 747 | 748 | int id = equeue_post(&q, timing_func, timing); 749 | test_assert(id); 750 | } 751 | 752 | err = pthread_join(t.thread, 0); 753 | test_assert(!err); 754 | 755 | equeue_destroy(&q); 756 | } 757 | 758 | int main() { 759 | printf("beginning tests...\n"); 760 | 761 | test_run(simple_call_test); 762 | test_run(simple_call_in_test); 763 | test_run(simple_call_every_test); 764 | test_run(simple_post_test); 765 | test_run(destructor_test); 766 | test_run(allocation_failure_test); 767 | test_run(cancel_test, 20); 768 | test_run(cancel_inflight_test); 769 | test_run(cancel_unnecessarily_test); 770 | test_run(loop_protect_test); 771 | test_run(break_test); 772 | test_run(break_no_windup_test); 773 | test_run(period_test); 774 | test_run(nested_test); 775 | test_run(sloth_test); 776 | test_run(background_test); 777 | test_run(chain_test); 778 | test_run(unchain_test); 779 | test_run(multithread_test); 780 | test_run(break_request_cleared_on_timeout); 781 | test_run(sibling_test); 782 | test_run(simple_barrage_test, 10); 783 | test_run(fragmenting_barrage_test, 10); 784 | test_run(multithreaded_barrage_test, 10); 785 | printf("done!\n"); 786 | return test_failure; 787 | } 788 | --------------------------------------------------------------------------------