├── .gitignore ├── COPYING ├── FUNDING.yml ├── README.md ├── examples ├── meson.build ├── nested-ticker.c ├── reader.c └── ticker.c ├── include ├── aml.h ├── backend.h ├── sys │ └── queue.h └── thread-pool.h ├── meson.build ├── meson_options.txt └── src ├── aml.c ├── epoll.c ├── kqueue.c ├── posix.c └── thread-pool.c /.gitignore: -------------------------------------------------------------------------------- 1 | build* 2 | .ycm_extra_conf.py 3 | .clang_complete 4 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020 - 2022 Andri Yngvason 2 | 3 | Permission to use, copy, modify, and/or distribute this software for any purpose 4 | with or without fee is hereby granted, provided that the above copyright notice 5 | and this permission notice appear in all copies. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 8 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND 9 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 10 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS 11 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 12 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF 13 | THIS SOFTWARE. 14 | -------------------------------------------------------------------------------- /FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: any1 2 | patreon: andriyngvason 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Andri's Main Loop 2 | 3 | Goals: 4 | * Portability 5 | * Utility 6 | * Simplicity 7 | 8 | Non-goals: 9 | * MS Windows (TM) support 10 | * Solving the C10K problem 11 | 12 | Features: 13 | * File descriptor event handlers 14 | * Timers 15 | * Tickers 16 | * Signal handlers 17 | * Idle dispatch callbacks 18 | * Thread pool 19 | * Interoperability with other event loops 20 | -------------------------------------------------------------------------------- /examples/meson.build: -------------------------------------------------------------------------------- 1 | executable( 2 | 'ticker', 3 | [ 4 | 'ticker.c', 5 | ], 6 | dependencies: [ 7 | aml_dep, 8 | threads, 9 | ] 10 | ) 11 | 12 | executable( 13 | 'reader', 14 | [ 15 | 'reader.c', 16 | ], 17 | dependencies: [ 18 | aml_dep, 19 | threads, 20 | ] 21 | ) 22 | 23 | executable( 24 | 'nested-ticker', 25 | [ 26 | 'nested-ticker.c', 27 | ], 28 | dependencies: [ 29 | aml_dep, 30 | threads, 31 | ] 32 | ) 33 | -------------------------------------------------------------------------------- /examples/nested-ticker.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | static int do_exit = 0; 8 | 9 | static void on_tick(struct aml_ticker* ticker) 10 | { 11 | int* count_ptr = aml_get_userdata(ticker); 12 | 13 | *count_ptr += 1; 14 | 15 | printf("tick %d!\n", *count_ptr); 16 | 17 | if (*count_ptr >= 10) 18 | aml_exit(aml_get_default()); 19 | } 20 | 21 | static void on_sigint(struct aml_signal* sig) 22 | { 23 | do_exit = 1; 24 | } 25 | 26 | int main() 27 | { 28 | struct aml* aml = aml_new(); 29 | if (!aml) 30 | return 1; 31 | 32 | aml_set_default(aml); 33 | 34 | int fd = aml_get_fd(aml); 35 | assert(fd >= 0); 36 | 37 | int count = 0; 38 | 39 | struct aml_signal* sig = aml_signal_new(SIGINT, on_sigint, NULL, NULL); 40 | if (!sig) 41 | goto failure; 42 | 43 | aml_start(aml, sig); 44 | aml_unref(sig); 45 | 46 | struct aml_ticker* ticker = aml_ticker_new(1000000, on_tick, &count, NULL); 47 | if (!ticker) 48 | goto failure; 49 | 50 | aml_start(aml, ticker); 51 | aml_unref(ticker); 52 | 53 | struct pollfd pollfd = { 54 | .fd = fd, 55 | .events = POLLIN, 56 | }; 57 | 58 | while (!do_exit) { 59 | aml_poll(aml, 0); 60 | aml_dispatch(aml); 61 | 62 | int nfds = poll(&pollfd, 1, -1); 63 | if (nfds != 1) 64 | continue; 65 | } 66 | 67 | printf("Exiting...\n"); 68 | 69 | aml_unref(aml); 70 | return 0; 71 | 72 | failure: 73 | aml_unref(aml); 74 | return 1; 75 | } 76 | -------------------------------------------------------------------------------- /examples/reader.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | static void on_line(struct aml_handler* handler) 23 | { 24 | char line[256]; 25 | fscanf(stdin, "%s", line); 26 | 27 | printf("Got line: %s\n", line); 28 | 29 | if (strncmp(line, "exit", sizeof(line)) == 0) 30 | aml_exit(aml_get_default()); 31 | } 32 | 33 | static void on_sigint(struct aml_signal* sig) 34 | { 35 | aml_exit(aml_get_default()); 36 | } 37 | 38 | int main() 39 | { 40 | struct aml* aml = aml_new(); 41 | if (!aml) 42 | return 1; 43 | 44 | aml_set_default(aml); 45 | 46 | struct aml_signal* sig = aml_signal_new(SIGINT, on_sigint, NULL, NULL); 47 | if (!sig) 48 | goto failure; 49 | 50 | aml_start(aml, sig); 51 | aml_unref(sig); 52 | 53 | struct aml_handler* handler = 54 | aml_handler_new(fileno(stdin), on_line, NULL, NULL); 55 | if (!handler) 56 | goto failure; 57 | 58 | aml_start(aml, handler); 59 | aml_unref(handler); 60 | 61 | aml_run(aml); 62 | 63 | printf("Exiting...\n"); 64 | 65 | aml_unref(aml); 66 | return 0; 67 | 68 | failure: 69 | aml_unref(aml); 70 | return 1; 71 | } 72 | -------------------------------------------------------------------------------- /examples/ticker.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | static void on_tick(struct aml_ticker* ticker) 6 | { 7 | int* count_ptr = aml_get_userdata(ticker); 8 | 9 | *count_ptr += 1; 10 | 11 | printf("tick %d!\n", *count_ptr); 12 | 13 | if (*count_ptr >= 10) 14 | aml_exit(aml_get_default()); 15 | } 16 | 17 | static void on_sigint(struct aml_signal* sig) 18 | { 19 | aml_exit(aml_get_default()); 20 | } 21 | 22 | int main() 23 | { 24 | struct aml* aml = aml_new(); 25 | if (!aml) 26 | return 1; 27 | 28 | aml_set_default(aml); 29 | 30 | int count = 0; 31 | 32 | struct aml_signal* sig = aml_signal_new(SIGINT, on_sigint, NULL, NULL); 33 | if (!sig) 34 | goto failure; 35 | 36 | aml_start(aml, sig); 37 | aml_unref(sig); 38 | 39 | struct aml_ticker* ticker = aml_ticker_new(1000000, on_tick, &count, NULL); 40 | if (!ticker) 41 | goto failure; 42 | 43 | aml_start(aml, ticker); 44 | aml_unref(ticker); 45 | 46 | aml_run(aml); 47 | 48 | printf("Exiting...\n"); 49 | 50 | aml_unref(aml); 51 | return 0; 52 | 53 | failure: 54 | aml_unref(aml); 55 | return 1; 56 | } 57 | -------------------------------------------------------------------------------- /include/aml.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 - 2024 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #define aml_ref(obj) _Generic((obj), \ 24 | struct aml*: aml_loop_ref, \ 25 | struct aml_handler*: aml_handler_ref, \ 26 | struct aml_timer*: aml_timer_ref, \ 27 | struct aml_ticker*: aml_ticker_ref, \ 28 | struct aml_signal*: aml_signal_ref, \ 29 | struct aml_work*: aml_work_ref, \ 30 | struct aml_idle*: aml_idle_ref \ 31 | )(obj) 32 | 33 | #define aml_unref(obj) _Generic((obj), \ 34 | struct aml*: aml_loop_unref, \ 35 | struct aml_handler*: aml_handler_unref, \ 36 | struct aml_timer*: aml_timer_unref, \ 37 | struct aml_ticker*: aml_ticker_unref, \ 38 | struct aml_signal*: aml_signal_unref, \ 39 | struct aml_work*: aml_work_unref, \ 40 | struct aml_idle*: aml_idle_unref \ 41 | )(obj) 42 | 43 | #define aml_set_userdata(aml, obj) _Generic((obj), \ 44 | struct aml*: aml_loop_set_userdata, \ 45 | struct aml_handler*: aml_handler_set_userdata, \ 46 | struct aml_timer*: aml_timer_set_userdata, \ 47 | struct aml_ticker*: aml_ticker_set_userdata, \ 48 | struct aml_signal*: aml_signal_set_userdata, \ 49 | struct aml_work*: aml_work_set_userdata, \ 50 | struct aml_idle*: aml_idle_set_userdata \ 51 | )(aml, obj) 52 | 53 | #define aml_get_userdata(obj) _Generic((obj), \ 54 | struct aml*: aml_loop_get_userdata, \ 55 | const struct aml*: aml_loop_get_userdata, \ 56 | struct aml_handler*: aml_handler_get_userdata, \ 57 | const struct aml_handler*: aml_handler_get_userdata, \ 58 | struct aml_timer*: aml_timer_get_userdata, \ 59 | const struct aml_timer*: aml_timer_get_userdata, \ 60 | struct aml_ticker*: aml_ticker_get_userdata, \ 61 | const struct aml_ticker*: aml_ticker_get_userdata, \ 62 | struct aml_signal*: aml_signal_get_userdata, \ 63 | const struct aml_signal*: aml_signal_get_userdata, \ 64 | struct aml_work*: aml_work_get_userdata, \ 65 | const struct aml_work*: aml_work_get_userdata, \ 66 | struct aml_idle*: aml_idle_get_userdata, \ 67 | const struct aml_idle*: aml_idle_get_userdata \ 68 | )(obj) 69 | 70 | #define aml_get_fd(obj) _Generic((obj), \ 71 | struct aml*: aml_loop_get_fd, \ 72 | const struct aml*: aml_loop_get_fd, \ 73 | struct aml_handler*: aml_handler_get_fd, \ 74 | const struct aml_handler*: aml_handler_get_fd \ 75 | )(obj) 76 | 77 | #define aml_set_duration(obj, duration) _Generic((obj), \ 78 | struct aml_timer*: aml_timer_set_duration, \ 79 | struct aml_ticker*: aml_ticker_set_duration \ 80 | )(obj, duration) 81 | 82 | #define aml_start(aml, obj) _Generic((obj), \ 83 | struct aml_handler*: aml_start_handler, \ 84 | struct aml_timer*: aml_start_timer, \ 85 | struct aml_ticker*: aml_start_ticker, \ 86 | struct aml_signal*: aml_start_signal, \ 87 | struct aml_work*: aml_start_work, \ 88 | struct aml_idle*: aml_start_idle \ 89 | )(aml, obj) 90 | 91 | #define aml_stop(aml, obj) _Generic((obj), \ 92 | struct aml_handler*: aml_stop_handler, \ 93 | struct aml_timer*: aml_stop_timer, \ 94 | struct aml_ticker*: aml_stop_ticker, \ 95 | struct aml_signal*: aml_stop_signal, \ 96 | struct aml_work*: aml_stop_work, \ 97 | struct aml_idle*: aml_stop_idle \ 98 | )(aml, obj) 99 | 100 | #define aml_is_started(aml, obj) _Generic((obj), \ 101 | struct aml_handler*: aml_is_handler_started, \ 102 | struct aml_timer*: aml_is_timer_started, \ 103 | struct aml_ticker*: aml_is_ticker_started, \ 104 | struct aml_signal*: aml_is_signal_started, \ 105 | struct aml_work*: aml_is_work_started, \ 106 | struct aml_idle*: aml_is_idle_started \ 107 | )(aml, obj) 108 | 109 | struct aml; 110 | struct aml_handler; 111 | struct aml_timer; 112 | struct aml_ticker; 113 | struct aml_signal; 114 | struct aml_work; 115 | struct aml_idle; 116 | 117 | enum aml_event { 118 | AML_EVENT_NONE = 0, 119 | AML_EVENT_READ = 1 << 0, 120 | AML_EVENT_WRITE = 1 << 1, 121 | AML_EVENT_OOB = 1 << 2, 122 | }; 123 | 124 | typedef void (*aml_free_fn)(void*); 125 | 126 | extern const char aml_version[]; 127 | 128 | /* Create a new main loop instance */ 129 | struct aml* aml_new(void); 130 | 131 | /* The backend should supply a minimum of n worker threads in its thread pool. 132 | * 133 | * If n == -1, the backend should supply as many workers as there are available 134 | * CPU cores/threads on the system. 135 | */ 136 | int aml_require_workers(struct aml*, int n); 137 | 138 | /* Get/set the default main loop instance */ 139 | void aml_set_default(struct aml*); 140 | struct aml* aml_get_default(void); 141 | 142 | /* Check if there are pending events. The user should call aml_dispatch() 143 | * afterwards if there are any pending events. 144 | * 145 | * This function behaves like poll(): it will wait for either a timeout (in µs) 146 | * or a signal. Will block indefinitely if timeout is -1. 147 | * 148 | * Returns: -1 on timeout or signal; otherwise number of pending events. 149 | */ 150 | int aml_poll(struct aml*, int64_t timeout); 151 | 152 | /* This is a convenience function that calls aml_poll() and aml_dispatch() in 153 | * a loop until aml_exit() is called. 154 | */ 155 | int aml_run(struct aml*); 156 | 157 | /* Instruct the main loop to exit. 158 | */ 159 | void aml_exit(struct aml*); 160 | 161 | /* Dispatch pending events */ 162 | void aml_dispatch(struct aml* self); 163 | 164 | /* Trigger an immediate return from aml_poll(). 165 | */ 166 | void aml_interrupt(struct aml*); 167 | 168 | /* Increment the reference count by one. 169 | * 170 | * Returns how many references there were BEFORE the call. 171 | */ 172 | int aml_loop_ref(struct aml* loop); 173 | int aml_handler_ref(struct aml_handler* obj); 174 | int aml_timer_ref(struct aml_timer* obj); 175 | int aml_ticker_ref(struct aml_ticker* obj); 176 | int aml_signal_ref(struct aml_signal* obj); 177 | int aml_work_ref(struct aml_work* obj); 178 | int aml_idle_ref(struct aml_idle* obj); 179 | 180 | /* Decrement the reference count by one. 181 | * 182 | * Returns how many references there are AFTER the call. 183 | */ 184 | int aml_loop_unref(struct aml* loop); 185 | int aml_handler_unref(struct aml_handler* obj); 186 | int aml_timer_unref(struct aml_timer* obj); 187 | int aml_ticker_unref(struct aml_ticker* obj); 188 | int aml_signal_unref(struct aml_signal* obj); 189 | int aml_work_unref(struct aml_work* obj); 190 | int aml_idle_unref(struct aml_idle* obj); 191 | 192 | /* The following calls create event handler objects. 193 | * 194 | * An object will have a reference count of 1 upon creation and must be freed 195 | * using aml_unref(). 196 | */ 197 | struct aml_handler* aml_handler_new(int fd, void (*cb)(struct aml_handler*), 198 | void* userdata, aml_free_fn); 199 | 200 | struct aml_timer* aml_timer_new(uint64_t timeout, void (*cb)(struct aml_timer*), 201 | void* userdata, aml_free_fn); 202 | 203 | struct aml_ticker* aml_ticker_new(uint64_t period, void (*cb)(struct aml_ticker*), 204 | void* userdata, aml_free_fn); 205 | 206 | struct aml_signal* aml_signal_new(int signo, void (*cb)(struct aml_signal*), 207 | void* userdata, aml_free_fn); 208 | 209 | struct aml_work* aml_work_new(void (*work_fn)(struct aml_work*), 210 | void (*done_fn)(struct aml_work*), void* userdata, aml_free_fn); 211 | 212 | struct aml_idle* aml_idle_new(void (*cb)(struct aml_idle*), void* userdata, 213 | aml_free_fn); 214 | 215 | /* Get the file descriptor associated with either a handler or the main loop. 216 | * 217 | * Calling this on objects of other types is illegal and may cause SIGABRT to 218 | * be raised. 219 | * 220 | * The fd returned from the main loop object can be used in other main loops to 221 | * monitor events on an aml main loop. 222 | */ 223 | int aml_loop_get_fd(const struct aml* self); 224 | int aml_handler_get_fd(const struct aml_handler* self); 225 | 226 | /* Associate random data with an object. 227 | * 228 | * If a free function is defined, it will be called to free the assigned 229 | * userdata when the object is freed as a result of aml_unref(). 230 | */ 231 | void aml_loop_set_userdata(struct aml* obj, void* userdata, aml_free_fn); 232 | void aml_handler_set_userdata(struct aml_handler* obj, void* userdata, aml_free_fn); 233 | void aml_timer_set_userdata(struct aml_timer* obj, void* userdata, aml_free_fn); 234 | void aml_ticker_set_userdata(struct aml_ticker* obj, void* userdata, aml_free_fn); 235 | void aml_signal_set_userdata(struct aml_signal* obj, void* userdata, aml_free_fn); 236 | void aml_work_set_userdata(struct aml_work* obj, void* userdata, aml_free_fn); 237 | void aml_idle_set_userdata(struct aml_idle* obj, void* userdata, aml_free_fn); 238 | 239 | void* aml_loop_get_userdata(const struct aml* obj); 240 | void* aml_handler_get_userdata(const struct aml_handler* obj); 241 | void* aml_timer_get_userdata(const struct aml_timer* obj); 242 | void* aml_ticker_get_userdata(const struct aml_ticker* obj); 243 | void* aml_signal_get_userdata(const struct aml_signal* obj); 244 | void* aml_work_get_userdata(const struct aml_work* obj); 245 | void* aml_idle_get_userdata(const struct aml_idle* obj); 246 | 247 | void aml_set_event_mask(struct aml_handler* obj, enum aml_event mask); 248 | enum aml_event aml_get_event_mask(const struct aml_handler* obj); 249 | 250 | /* Check which events are pending on an fd event handler. 251 | */ 252 | enum aml_event aml_get_revents(const struct aml_handler* obj); 253 | 254 | /* Set timeout/period of a timer/ticker in µs 255 | * 256 | * Calling this on a started timer/ticker yields undefined behaviour 257 | */ 258 | void aml_timer_set_duration(struct aml_timer* self, uint64_t value); 259 | void aml_ticker_set_duration(struct aml_ticker* self, uint64_t value); 260 | 261 | /* Start an event handler. 262 | * 263 | * This increases the reference count on the handler object. 264 | * 265 | * Returns: 0 on success, -1 if the handler is already started. 266 | */ 267 | int aml_start_handler(struct aml*, struct aml_handler*); 268 | int aml_start_timer(struct aml*, struct aml_timer*); 269 | int aml_start_ticker(struct aml*, struct aml_ticker*); 270 | int aml_start_signal(struct aml*, struct aml_signal*); 271 | int aml_start_work(struct aml*, struct aml_work*); 272 | int aml_start_idle(struct aml*, struct aml_idle*); 273 | 274 | /* Stop an event handler. 275 | * 276 | * This decreases the reference count on a handler object. 277 | * 278 | * The callback or done function will not be run after this is called. However, 279 | * for aml_work, the work function may already be executing and it will be 280 | * allowed to complete. 281 | * 282 | * Returns: 0 on success, -1 if the handler is already stopped. 283 | */ 284 | int aml_stop_handler(struct aml*, struct aml_handler*); 285 | int aml_stop_timer(struct aml*, struct aml_timer*); 286 | int aml_stop_ticker(struct aml*, struct aml_ticker*); 287 | int aml_stop_signal(struct aml*, struct aml_signal*); 288 | int aml_stop_work(struct aml*, struct aml_work*); 289 | int aml_stop_idle(struct aml*, struct aml_idle*); 290 | 291 | /* Check if an event handler is started. 292 | * 293 | * Returns: true if it has been started, false otherwise. 294 | */ 295 | bool aml_is_handler_started(struct aml*, struct aml_handler* obj); 296 | bool aml_is_timer_started(struct aml*, struct aml_timer* obj); 297 | bool aml_is_ticker_started(struct aml*, struct aml_ticker* obj); 298 | bool aml_is_signal_started(struct aml*, struct aml_signal* obj); 299 | bool aml_is_work_started(struct aml*, struct aml_work* obj); 300 | bool aml_is_idle_started(struct aml*, struct aml_idle* obj); 301 | 302 | /* Get the signal assigned to a signal handler. 303 | */ 304 | int aml_get_signo(const struct aml_signal* sig); 305 | -------------------------------------------------------------------------------- /include/backend.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | 21 | struct aml; 22 | struct aml_handler; 23 | struct aml_signal; 24 | struct aml_work; 25 | 26 | typedef void (*aml_callback_fn)(void* obj); 27 | 28 | enum { 29 | AML_BACKEND_EDGE_TRIGGERED = 1 << 0, 30 | }; 31 | 32 | struct aml_backend { 33 | uint32_t flags; 34 | uint32_t clock; 35 | void* (*new_state)(struct aml*); 36 | void (*del_state)(void* state); 37 | int (*get_fd)(const void* state); 38 | int (*poll)(void* state, int timeout); 39 | void (*exit)(void* state); 40 | int (*add_fd)(void* state, struct aml_handler*); 41 | int (*mod_fd)(void* state, struct aml_handler*); 42 | int (*del_fd)(void* state, struct aml_handler*); 43 | int (*add_signal)(void* state, struct aml_signal*); 44 | int (*del_signal)(void* state, struct aml_signal*); 45 | int (*set_deadline)(void* state, uint64_t deadline); 46 | void (*post_dispatch)(void* state); 47 | void (*interrupt)(void* state); 48 | int (*thread_pool_acquire)(struct aml*, int n_threads); 49 | void (*thread_pool_release)(struct aml*); 50 | int (*thread_pool_enqueue)(struct aml*, struct aml_work*); 51 | }; 52 | 53 | /* These are for setting random data required by the backend implementation. 54 | * 55 | * The backend implementation shall NOT use aml_set_userdata() or 56 | * aml_get_userdata(). 57 | */ 58 | void aml_set_backend_data(void* ptr, void* data); 59 | void* aml_get_backend_data(const void* ptr); 60 | 61 | void* aml_get_backend_state(const struct aml*); 62 | 63 | /* Get the work function pointer assigned to a work object. 64 | */ 65 | aml_callback_fn aml_get_work_fn(const struct aml_work*); 66 | 67 | /* revents is only used for fd events. Zero otherwise. 68 | * This function may be called inside a signal handler 69 | */ 70 | void aml_emit(struct aml* self, void* obj, uint32_t revents); 71 | 72 | /* Get time in milliseconds until the next timeout event. 73 | * 74 | * If timeout is -1, this returns: 75 | * -1 if no event is pending 76 | * 0 if a timer has already expired 77 | * time until next event, otherwise 78 | * 79 | * Otherwise, if timeout is less than the time until the next event, timeout is 80 | * returned, if it is greater, then the time until next event is returned. 81 | */ 82 | int aml_get_next_timeout(struct aml* self, int timeout); 83 | 84 | /* Create a new weak reference to the object. 85 | * 86 | * The reference object must be deleted using aml_weak_ref_del(). 87 | */ 88 | struct aml_weak_ref* aml_weak_ref_new(void* obj); 89 | 90 | /* Delete a weak reference created by aml_weak_ref_new(). 91 | */ 92 | void aml_weak_ref_del(struct aml_weak_ref* self); 93 | 94 | /* Try to get a new strong reference from a weak reference object. 95 | * 96 | * If the weak reference is still valid, the reference count on the returned 97 | * aml object will be increased by one. Otherwise NULL is returned. 98 | */ 99 | void* aml_weak_ref_read(struct aml_weak_ref* self); 100 | -------------------------------------------------------------------------------- /include/sys/queue.h: -------------------------------------------------------------------------------- 1 | /*- 2 | * SPDX-License-Identifier: BSD-3-Clause 3 | * 4 | * Copyright (c) 1991, 1993 5 | * The Regents of the University of California. All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without 8 | * modification, are permitted provided that the following conditions 9 | * are met: 10 | * 1. Redistributions of source code must retain the above copyright 11 | * notice, this list of conditions and the following disclaimer. 12 | * 2. Redistributions in binary form must reproduce the above copyright 13 | * notice, this list of conditions and the following disclaimer in the 14 | * documentation and/or other materials provided with the distribution. 15 | * 3. Neither the name of the University nor the names of its contributors 16 | * may be used to endorse or promote products derived from this software 17 | * without specific prior written permission. 18 | * 19 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 | * SUCH DAMAGE. 30 | * 31 | * @(#)queue.h 8.5 (Berkeley) 8/20/94 32 | * $FreeBSD$ 33 | */ 34 | 35 | #ifndef _SYS_QUEUE_H_ 36 | #define _SYS_QUEUE_H_ 37 | 38 | /* 39 | * This file defines four types of data structures: singly-linked lists, 40 | * singly-linked tail queues, lists and tail queues. 41 | * 42 | * A singly-linked list is headed by a single forward pointer. The elements 43 | * are singly linked for minimum space and pointer manipulation overhead at 44 | * the expense of O(n) removal for arbitrary elements. New elements can be 45 | * added to the list after an existing element or at the head of the list. 46 | * Elements being removed from the head of the list should use the explicit 47 | * macro for this purpose for optimum efficiency. A singly-linked list may 48 | * only be traversed in the forward direction. Singly-linked lists are ideal 49 | * for applications with large datasets and few or no removals or for 50 | * implementing a LIFO queue. 51 | * 52 | * A singly-linked tail queue is headed by a pair of pointers, one to the 53 | * head of the list and the other to the tail of the list. The elements are 54 | * singly linked for minimum space and pointer manipulation overhead at the 55 | * expense of O(n) removal for arbitrary elements. New elements can be added 56 | * to the list after an existing element, at the head of the list, or at the 57 | * end of the list. Elements being removed from the head of the tail queue 58 | * should use the explicit macro for this purpose for optimum efficiency. 59 | * A singly-linked tail queue may only be traversed in the forward direction. 60 | * Singly-linked tail queues are ideal for applications with large datasets 61 | * and few or no removals or for implementing a FIFO queue. 62 | * 63 | * A list is headed by a single forward pointer (or an array of forward 64 | * pointers for a hash table header). The elements are doubly linked 65 | * so that an arbitrary element can be removed without a need to 66 | * traverse the list. New elements can be added to the list before 67 | * or after an existing element or at the head of the list. A list 68 | * may be traversed in either direction. 69 | * 70 | * A tail queue is headed by a pair of pointers, one to the head of the 71 | * list and the other to the tail of the list. The elements are doubly 72 | * linked so that an arbitrary element can be removed without a need to 73 | * traverse the list. New elements can be added to the list before or 74 | * after an existing element, at the head of the list, or at the end of 75 | * the list. A tail queue may be traversed in either direction. 76 | * 77 | * For details on the use of these macros, see the queue(3) manual page. 78 | * 79 | * Below is a summary of implemented functions where: 80 | * + means the macro is available 81 | * - means the macro is not available 82 | * s means the macro is available but is slow (runs in O(n) time) 83 | * 84 | * SLIST LIST STAILQ TAILQ 85 | * _HEAD + + + + 86 | * _CLASS_HEAD + + + + 87 | * _HEAD_INITIALIZER + + + + 88 | * _ENTRY + + + + 89 | * _CLASS_ENTRY + + + + 90 | * _INIT + + + + 91 | * _EMPTY + + + + 92 | * _FIRST + + + + 93 | * _NEXT + + + + 94 | * _PREV - + - + 95 | * _LAST - - + + 96 | * _LAST_FAST - - - + 97 | * _FOREACH + + + + 98 | * _FOREACH_FROM + + + + 99 | * _FOREACH_SAFE + + + + 100 | * _FOREACH_FROM_SAFE + + + + 101 | * _FOREACH_REVERSE - - - + 102 | * _FOREACH_REVERSE_FROM - - - + 103 | * _FOREACH_REVERSE_SAFE - - - + 104 | * _FOREACH_REVERSE_FROM_SAFE - - - + 105 | * _INSERT_HEAD + + + + 106 | * _INSERT_BEFORE - + - + 107 | * _INSERT_AFTER + + + + 108 | * _INSERT_TAIL - - + + 109 | * _CONCAT s s + + 110 | * _REMOVE_AFTER + - + - 111 | * _REMOVE_HEAD + - + - 112 | * _REMOVE s + s + 113 | * _SWAP + + + + 114 | * 115 | */ 116 | #ifdef QUEUE_MACRO_DEBUG 117 | #warn Use QUEUE_MACRO_DEBUG_TRACE and/or QUEUE_MACRO_DEBUG_TRASH 118 | #define QUEUE_MACRO_DEBUG_TRACE 119 | #define QUEUE_MACRO_DEBUG_TRASH 120 | #endif 121 | 122 | #ifdef QUEUE_MACRO_DEBUG_TRACE 123 | /* Store the last 2 places the queue element or head was altered */ 124 | struct qm_trace { 125 | unsigned long lastline; 126 | unsigned long prevline; 127 | const char *lastfile; 128 | const char *prevfile; 129 | }; 130 | 131 | #define TRACEBUF struct qm_trace trace; 132 | #define TRACEBUF_INITIALIZER { __LINE__, 0, __FILE__, NULL } , 133 | 134 | #define QMD_TRACE_HEAD(head) do { \ 135 | (head)->trace.prevline = (head)->trace.lastline; \ 136 | (head)->trace.prevfile = (head)->trace.lastfile; \ 137 | (head)->trace.lastline = __LINE__; \ 138 | (head)->trace.lastfile = __FILE__; \ 139 | } while (0) 140 | 141 | #define QMD_TRACE_ELEM(elem) do { \ 142 | (elem)->trace.prevline = (elem)->trace.lastline; \ 143 | (elem)->trace.prevfile = (elem)->trace.lastfile; \ 144 | (elem)->trace.lastline = __LINE__; \ 145 | (elem)->trace.lastfile = __FILE__; \ 146 | } while (0) 147 | 148 | #else /* !QUEUE_MACRO_DEBUG_TRACE */ 149 | #define QMD_TRACE_ELEM(elem) 150 | #define QMD_TRACE_HEAD(head) 151 | #define TRACEBUF 152 | #define TRACEBUF_INITIALIZER 153 | #endif /* QUEUE_MACRO_DEBUG_TRACE */ 154 | 155 | #ifdef QUEUE_MACRO_DEBUG_TRASH 156 | #define QMD_SAVELINK(name, link) void **name = (void *)&(link) 157 | #define TRASHIT(x) do {(x) = (void *)-1;} while (0) 158 | #define QMD_IS_TRASHED(x) ((x) == (void *)(intptr_t)-1) 159 | #else /* !QUEUE_MACRO_DEBUG_TRASH */ 160 | #define QMD_SAVELINK(name, link) 161 | #define TRASHIT(x) 162 | #define QMD_IS_TRASHED(x) 0 163 | #endif /* QUEUE_MACRO_DEBUG_TRASH */ 164 | 165 | #ifdef __cplusplus 166 | /* 167 | * In C++ there can be structure lists and class lists: 168 | */ 169 | #define QUEUE_TYPEOF(type) type 170 | #else 171 | #define QUEUE_TYPEOF(type) struct type 172 | #endif 173 | 174 | /* 175 | * Singly-linked List declarations. 176 | */ 177 | #define SLIST_HEAD(name, type) \ 178 | struct name { \ 179 | struct type *slh_first; /* first element */ \ 180 | } 181 | 182 | #define SLIST_CLASS_HEAD(name, type) \ 183 | struct name { \ 184 | class type *slh_first; /* first element */ \ 185 | } 186 | 187 | #define SLIST_HEAD_INITIALIZER(head) \ 188 | { NULL } 189 | 190 | #define SLIST_ENTRY(type) \ 191 | struct { \ 192 | struct type *sle_next; /* next element */ \ 193 | } 194 | 195 | #define SLIST_CLASS_ENTRY(type) \ 196 | struct { \ 197 | class type *sle_next; /* next element */ \ 198 | } 199 | 200 | /* 201 | * Singly-linked List functions. 202 | */ 203 | #if (defined(_KERNEL) && defined(INVARIANTS)) 204 | #define QMD_SLIST_CHECK_PREVPTR(prevp, elm) do { \ 205 | if (*(prevp) != (elm)) \ 206 | panic("Bad prevptr *(%p) == %p != %p", \ 207 | (prevp), *(prevp), (elm)); \ 208 | } while (0) 209 | #else 210 | #define QMD_SLIST_CHECK_PREVPTR(prevp, elm) 211 | #endif 212 | 213 | #define SLIST_CONCAT(head1, head2, type, field) do { \ 214 | QUEUE_TYPEOF(type) *curelm = SLIST_FIRST(head1); \ 215 | if (curelm == NULL) { \ 216 | if ((SLIST_FIRST(head1) = SLIST_FIRST(head2)) != NULL) \ 217 | SLIST_INIT(head2); \ 218 | } else if (SLIST_FIRST(head2) != NULL) { \ 219 | while (SLIST_NEXT(curelm, field) != NULL) \ 220 | curelm = SLIST_NEXT(curelm, field); \ 221 | SLIST_NEXT(curelm, field) = SLIST_FIRST(head2); \ 222 | SLIST_INIT(head2); \ 223 | } \ 224 | } while (0) 225 | 226 | #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 227 | 228 | #define SLIST_FIRST(head) ((head)->slh_first) 229 | 230 | #define SLIST_FOREACH(var, head, field) \ 231 | for ((var) = SLIST_FIRST((head)); \ 232 | (var); \ 233 | (var) = SLIST_NEXT((var), field)) 234 | 235 | #define SLIST_FOREACH_FROM(var, head, field) \ 236 | for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \ 237 | (var); \ 238 | (var) = SLIST_NEXT((var), field)) 239 | 240 | #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ 241 | for ((var) = SLIST_FIRST((head)); \ 242 | (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ 243 | (var) = (tvar)) 244 | 245 | #define SLIST_FOREACH_FROM_SAFE(var, head, field, tvar) \ 246 | for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \ 247 | (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ 248 | (var) = (tvar)) 249 | 250 | #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ 251 | for ((varp) = &SLIST_FIRST((head)); \ 252 | ((var) = *(varp)) != NULL; \ 253 | (varp) = &SLIST_NEXT((var), field)) 254 | 255 | #define SLIST_INIT(head) do { \ 256 | SLIST_FIRST((head)) = NULL; \ 257 | } while (0) 258 | 259 | #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ 260 | SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ 261 | SLIST_NEXT((slistelm), field) = (elm); \ 262 | } while (0) 263 | 264 | #define SLIST_INSERT_HEAD(head, elm, field) do { \ 265 | SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ 266 | SLIST_FIRST((head)) = (elm); \ 267 | } while (0) 268 | 269 | #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) 270 | 271 | #define SLIST_REMOVE(head, elm, type, field) do { \ 272 | QMD_SAVELINK(oldnext, (elm)->field.sle_next); \ 273 | if (SLIST_FIRST((head)) == (elm)) { \ 274 | SLIST_REMOVE_HEAD((head), field); \ 275 | } \ 276 | else { \ 277 | QUEUE_TYPEOF(type) *curelm = SLIST_FIRST(head); \ 278 | while (SLIST_NEXT(curelm, field) != (elm)) \ 279 | curelm = SLIST_NEXT(curelm, field); \ 280 | SLIST_REMOVE_AFTER(curelm, field); \ 281 | } \ 282 | TRASHIT(*oldnext); \ 283 | } while (0) 284 | 285 | #define SLIST_REMOVE_AFTER(elm, field) do { \ 286 | SLIST_NEXT(elm, field) = \ 287 | SLIST_NEXT(SLIST_NEXT(elm, field), field); \ 288 | } while (0) 289 | 290 | #define SLIST_REMOVE_HEAD(head, field) do { \ 291 | SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ 292 | } while (0) 293 | 294 | #define SLIST_REMOVE_PREVPTR(prevp, elm, field) do { \ 295 | QMD_SLIST_CHECK_PREVPTR(prevp, elm); \ 296 | *(prevp) = SLIST_NEXT(elm, field); \ 297 | TRASHIT((elm)->field.sle_next); \ 298 | } while (0) 299 | 300 | #define SLIST_SWAP(head1, head2, type) do { \ 301 | QUEUE_TYPEOF(type) *swap_first = SLIST_FIRST(head1); \ 302 | SLIST_FIRST(head1) = SLIST_FIRST(head2); \ 303 | SLIST_FIRST(head2) = swap_first; \ 304 | } while (0) 305 | 306 | /* 307 | * Singly-linked Tail queue declarations. 308 | */ 309 | #define STAILQ_HEAD(name, type) \ 310 | struct name { \ 311 | struct type *stqh_first;/* first element */ \ 312 | struct type **stqh_last;/* addr of last next element */ \ 313 | } 314 | 315 | #define STAILQ_CLASS_HEAD(name, type) \ 316 | struct name { \ 317 | class type *stqh_first; /* first element */ \ 318 | class type **stqh_last; /* addr of last next element */ \ 319 | } 320 | 321 | #define STAILQ_HEAD_INITIALIZER(head) \ 322 | { NULL, &(head).stqh_first } 323 | 324 | #define STAILQ_ENTRY(type) \ 325 | struct { \ 326 | struct type *stqe_next; /* next element */ \ 327 | } 328 | 329 | #define STAILQ_CLASS_ENTRY(type) \ 330 | struct { \ 331 | class type *stqe_next; /* next element */ \ 332 | } 333 | 334 | /* 335 | * Singly-linked Tail queue functions. 336 | */ 337 | #define STAILQ_CONCAT(head1, head2) do { \ 338 | if (!STAILQ_EMPTY((head2))) { \ 339 | *(head1)->stqh_last = (head2)->stqh_first; \ 340 | (head1)->stqh_last = (head2)->stqh_last; \ 341 | STAILQ_INIT((head2)); \ 342 | } \ 343 | } while (0) 344 | 345 | #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 346 | 347 | #define STAILQ_FIRST(head) ((head)->stqh_first) 348 | 349 | #define STAILQ_FOREACH(var, head, field) \ 350 | for((var) = STAILQ_FIRST((head)); \ 351 | (var); \ 352 | (var) = STAILQ_NEXT((var), field)) 353 | 354 | #define STAILQ_FOREACH_FROM(var, head, field) \ 355 | for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \ 356 | (var); \ 357 | (var) = STAILQ_NEXT((var), field)) 358 | 359 | #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ 360 | for ((var) = STAILQ_FIRST((head)); \ 361 | (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ 362 | (var) = (tvar)) 363 | 364 | #define STAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \ 365 | for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \ 366 | (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ 367 | (var) = (tvar)) 368 | 369 | #define STAILQ_INIT(head) do { \ 370 | STAILQ_FIRST((head)) = NULL; \ 371 | (head)->stqh_last = &STAILQ_FIRST((head)); \ 372 | } while (0) 373 | 374 | #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ 375 | if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ 376 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 377 | STAILQ_NEXT((tqelm), field) = (elm); \ 378 | } while (0) 379 | 380 | #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 381 | if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ 382 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 383 | STAILQ_FIRST((head)) = (elm); \ 384 | } while (0) 385 | 386 | #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 387 | STAILQ_NEXT((elm), field) = NULL; \ 388 | *(head)->stqh_last = (elm); \ 389 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 390 | } while (0) 391 | 392 | #define STAILQ_LAST(head, type, field) \ 393 | (STAILQ_EMPTY((head)) ? NULL : \ 394 | __containerof((head)->stqh_last, \ 395 | QUEUE_TYPEOF(type), field.stqe_next)) 396 | 397 | #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) 398 | 399 | #define STAILQ_REMOVE(head, elm, type, field) do { \ 400 | QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \ 401 | if (STAILQ_FIRST((head)) == (elm)) { \ 402 | STAILQ_REMOVE_HEAD((head), field); \ 403 | } \ 404 | else { \ 405 | QUEUE_TYPEOF(type) *curelm = STAILQ_FIRST(head); \ 406 | while (STAILQ_NEXT(curelm, field) != (elm)) \ 407 | curelm = STAILQ_NEXT(curelm, field); \ 408 | STAILQ_REMOVE_AFTER(head, curelm, field); \ 409 | } \ 410 | TRASHIT(*oldnext); \ 411 | } while (0) 412 | 413 | #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ 414 | if ((STAILQ_NEXT(elm, field) = \ 415 | STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ 416 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 417 | } while (0) 418 | 419 | #define STAILQ_REMOVE_HEAD(head, field) do { \ 420 | if ((STAILQ_FIRST((head)) = \ 421 | STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ 422 | (head)->stqh_last = &STAILQ_FIRST((head)); \ 423 | } while (0) 424 | 425 | #define STAILQ_SWAP(head1, head2, type) do { \ 426 | QUEUE_TYPEOF(type) *swap_first = STAILQ_FIRST(head1); \ 427 | QUEUE_TYPEOF(type) **swap_last = (head1)->stqh_last; \ 428 | STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \ 429 | (head1)->stqh_last = (head2)->stqh_last; \ 430 | STAILQ_FIRST(head2) = swap_first; \ 431 | (head2)->stqh_last = swap_last; \ 432 | if (STAILQ_EMPTY(head1)) \ 433 | (head1)->stqh_last = &STAILQ_FIRST(head1); \ 434 | if (STAILQ_EMPTY(head2)) \ 435 | (head2)->stqh_last = &STAILQ_FIRST(head2); \ 436 | } while (0) 437 | 438 | 439 | /* 440 | * List declarations. 441 | */ 442 | #define LIST_HEAD(name, type) \ 443 | struct name { \ 444 | struct type *lh_first; /* first element */ \ 445 | } 446 | 447 | #define LIST_CLASS_HEAD(name, type) \ 448 | struct name { \ 449 | class type *lh_first; /* first element */ \ 450 | } 451 | 452 | #define LIST_HEAD_INITIALIZER(head) \ 453 | { NULL } 454 | 455 | #define LIST_ENTRY(type) \ 456 | struct { \ 457 | struct type *le_next; /* next element */ \ 458 | struct type **le_prev; /* address of previous next element */ \ 459 | } 460 | 461 | #define LIST_CLASS_ENTRY(type) \ 462 | struct { \ 463 | class type *le_next; /* next element */ \ 464 | class type **le_prev; /* address of previous next element */ \ 465 | } 466 | 467 | /* 468 | * List functions. 469 | */ 470 | 471 | #if (defined(_KERNEL) && defined(INVARIANTS)) 472 | /* 473 | * QMD_LIST_CHECK_HEAD(LIST_HEAD *head, LIST_ENTRY NAME) 474 | * 475 | * If the list is non-empty, validates that the first element of the list 476 | * points back at 'head.' 477 | */ 478 | #define QMD_LIST_CHECK_HEAD(head, field) do { \ 479 | if (LIST_FIRST((head)) != NULL && \ 480 | LIST_FIRST((head))->field.le_prev != \ 481 | &LIST_FIRST((head))) \ 482 | panic("Bad list head %p first->prev != head", (head)); \ 483 | } while (0) 484 | 485 | /* 486 | * QMD_LIST_CHECK_NEXT(TYPE *elm, LIST_ENTRY NAME) 487 | * 488 | * If an element follows 'elm' in the list, validates that the next element 489 | * points back at 'elm.' 490 | */ 491 | #define QMD_LIST_CHECK_NEXT(elm, field) do { \ 492 | if (LIST_NEXT((elm), field) != NULL && \ 493 | LIST_NEXT((elm), field)->field.le_prev != \ 494 | &((elm)->field.le_next)) \ 495 | panic("Bad link elm %p next->prev != elm", (elm)); \ 496 | } while (0) 497 | 498 | /* 499 | * QMD_LIST_CHECK_PREV(TYPE *elm, LIST_ENTRY NAME) 500 | * 501 | * Validates that the previous element (or head of the list) points to 'elm.' 502 | */ 503 | #define QMD_LIST_CHECK_PREV(elm, field) do { \ 504 | if (*(elm)->field.le_prev != (elm)) \ 505 | panic("Bad link elm %p prev->next != elm", (elm)); \ 506 | } while (0) 507 | #else 508 | #define QMD_LIST_CHECK_HEAD(head, field) 509 | #define QMD_LIST_CHECK_NEXT(elm, field) 510 | #define QMD_LIST_CHECK_PREV(elm, field) 511 | #endif /* (_KERNEL && INVARIANTS) */ 512 | 513 | #define LIST_CONCAT(head1, head2, type, field) do { \ 514 | QUEUE_TYPEOF(type) *curelm = LIST_FIRST(head1); \ 515 | if (curelm == NULL) { \ 516 | if ((LIST_FIRST(head1) = LIST_FIRST(head2)) != NULL) { \ 517 | LIST_FIRST(head2)->field.le_prev = \ 518 | &LIST_FIRST((head1)); \ 519 | LIST_INIT(head2); \ 520 | } \ 521 | } else if (LIST_FIRST(head2) != NULL) { \ 522 | while (LIST_NEXT(curelm, field) != NULL) \ 523 | curelm = LIST_NEXT(curelm, field); \ 524 | LIST_NEXT(curelm, field) = LIST_FIRST(head2); \ 525 | LIST_FIRST(head2)->field.le_prev = &LIST_NEXT(curelm, field); \ 526 | LIST_INIT(head2); \ 527 | } \ 528 | } while (0) 529 | 530 | #define LIST_EMPTY(head) ((head)->lh_first == NULL) 531 | 532 | #define LIST_FIRST(head) ((head)->lh_first) 533 | 534 | #define LIST_FOREACH(var, head, field) \ 535 | for ((var) = LIST_FIRST((head)); \ 536 | (var); \ 537 | (var) = LIST_NEXT((var), field)) 538 | 539 | #define LIST_FOREACH_FROM(var, head, field) \ 540 | for ((var) = ((var) ? (var) : LIST_FIRST((head))); \ 541 | (var); \ 542 | (var) = LIST_NEXT((var), field)) 543 | 544 | #define LIST_FOREACH_SAFE(var, head, field, tvar) \ 545 | for ((var) = LIST_FIRST((head)); \ 546 | (var) && ((tvar) = LIST_NEXT((var), field), 1); \ 547 | (var) = (tvar)) 548 | 549 | #define LIST_FOREACH_FROM_SAFE(var, head, field, tvar) \ 550 | for ((var) = ((var) ? (var) : LIST_FIRST((head))); \ 551 | (var) && ((tvar) = LIST_NEXT((var), field), 1); \ 552 | (var) = (tvar)) 553 | 554 | #define LIST_INIT(head) do { \ 555 | LIST_FIRST((head)) = NULL; \ 556 | } while (0) 557 | 558 | #define LIST_INSERT_AFTER(listelm, elm, field) do { \ 559 | QMD_LIST_CHECK_NEXT(listelm, field); \ 560 | if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ 561 | LIST_NEXT((listelm), field)->field.le_prev = \ 562 | &LIST_NEXT((elm), field); \ 563 | LIST_NEXT((listelm), field) = (elm); \ 564 | (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ 565 | } while (0) 566 | 567 | #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ 568 | QMD_LIST_CHECK_PREV(listelm, field); \ 569 | (elm)->field.le_prev = (listelm)->field.le_prev; \ 570 | LIST_NEXT((elm), field) = (listelm); \ 571 | *(listelm)->field.le_prev = (elm); \ 572 | (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ 573 | } while (0) 574 | 575 | #define LIST_INSERT_HEAD(head, elm, field) do { \ 576 | QMD_LIST_CHECK_HEAD((head), field); \ 577 | if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ 578 | LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ 579 | LIST_FIRST((head)) = (elm); \ 580 | (elm)->field.le_prev = &LIST_FIRST((head)); \ 581 | } while (0) 582 | 583 | #define LIST_NEXT(elm, field) ((elm)->field.le_next) 584 | 585 | #define LIST_PREV(elm, head, type, field) \ 586 | ((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL : \ 587 | __containerof((elm)->field.le_prev, \ 588 | QUEUE_TYPEOF(type), field.le_next)) 589 | 590 | #define LIST_REMOVE(elm, field) do { \ 591 | QMD_SAVELINK(oldnext, (elm)->field.le_next); \ 592 | QMD_SAVELINK(oldprev, (elm)->field.le_prev); \ 593 | QMD_LIST_CHECK_NEXT(elm, field); \ 594 | QMD_LIST_CHECK_PREV(elm, field); \ 595 | if (LIST_NEXT((elm), field) != NULL) \ 596 | LIST_NEXT((elm), field)->field.le_prev = \ 597 | (elm)->field.le_prev; \ 598 | *(elm)->field.le_prev = LIST_NEXT((elm), field); \ 599 | TRASHIT(*oldnext); \ 600 | TRASHIT(*oldprev); \ 601 | } while (0) 602 | 603 | #define LIST_SWAP(head1, head2, type, field) do { \ 604 | QUEUE_TYPEOF(type) *swap_tmp = LIST_FIRST(head1); \ 605 | LIST_FIRST((head1)) = LIST_FIRST((head2)); \ 606 | LIST_FIRST((head2)) = swap_tmp; \ 607 | if ((swap_tmp = LIST_FIRST((head1))) != NULL) \ 608 | swap_tmp->field.le_prev = &LIST_FIRST((head1)); \ 609 | if ((swap_tmp = LIST_FIRST((head2))) != NULL) \ 610 | swap_tmp->field.le_prev = &LIST_FIRST((head2)); \ 611 | } while (0) 612 | 613 | /* 614 | * Tail queue declarations. 615 | */ 616 | #define TAILQ_HEAD(name, type) \ 617 | struct name { \ 618 | struct type *tqh_first; /* first element */ \ 619 | struct type **tqh_last; /* addr of last next element */ \ 620 | TRACEBUF \ 621 | } 622 | 623 | #define TAILQ_CLASS_HEAD(name, type) \ 624 | struct name { \ 625 | class type *tqh_first; /* first element */ \ 626 | class type **tqh_last; /* addr of last next element */ \ 627 | TRACEBUF \ 628 | } 629 | 630 | #define TAILQ_HEAD_INITIALIZER(head) \ 631 | { NULL, &(head).tqh_first, TRACEBUF_INITIALIZER } 632 | 633 | #define TAILQ_ENTRY(type) \ 634 | struct { \ 635 | struct type *tqe_next; /* next element */ \ 636 | struct type **tqe_prev; /* address of previous next element */ \ 637 | TRACEBUF \ 638 | } 639 | 640 | #define TAILQ_CLASS_ENTRY(type) \ 641 | struct { \ 642 | class type *tqe_next; /* next element */ \ 643 | class type **tqe_prev; /* address of previous next element */ \ 644 | TRACEBUF \ 645 | } 646 | 647 | /* 648 | * Tail queue functions. 649 | */ 650 | #if (defined(_KERNEL) && defined(INVARIANTS)) 651 | /* 652 | * QMD_TAILQ_CHECK_HEAD(TAILQ_HEAD *head, TAILQ_ENTRY NAME) 653 | * 654 | * If the tailq is non-empty, validates that the first element of the tailq 655 | * points back at 'head.' 656 | */ 657 | #define QMD_TAILQ_CHECK_HEAD(head, field) do { \ 658 | if (!TAILQ_EMPTY(head) && \ 659 | TAILQ_FIRST((head))->field.tqe_prev != \ 660 | &TAILQ_FIRST((head))) \ 661 | panic("Bad tailq head %p first->prev != head", (head)); \ 662 | } while (0) 663 | 664 | /* 665 | * QMD_TAILQ_CHECK_TAIL(TAILQ_HEAD *head, TAILQ_ENTRY NAME) 666 | * 667 | * Validates that the tail of the tailq is a pointer to pointer to NULL. 668 | */ 669 | #define QMD_TAILQ_CHECK_TAIL(head, field) do { \ 670 | if (*(head)->tqh_last != NULL) \ 671 | panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \ 672 | } while (0) 673 | 674 | /* 675 | * QMD_TAILQ_CHECK_NEXT(TYPE *elm, TAILQ_ENTRY NAME) 676 | * 677 | * If an element follows 'elm' in the tailq, validates that the next element 678 | * points back at 'elm.' 679 | */ 680 | #define QMD_TAILQ_CHECK_NEXT(elm, field) do { \ 681 | if (TAILQ_NEXT((elm), field) != NULL && \ 682 | TAILQ_NEXT((elm), field)->field.tqe_prev != \ 683 | &((elm)->field.tqe_next)) \ 684 | panic("Bad link elm %p next->prev != elm", (elm)); \ 685 | } while (0) 686 | 687 | /* 688 | * QMD_TAILQ_CHECK_PREV(TYPE *elm, TAILQ_ENTRY NAME) 689 | * 690 | * Validates that the previous element (or head of the tailq) points to 'elm.' 691 | */ 692 | #define QMD_TAILQ_CHECK_PREV(elm, field) do { \ 693 | if (*(elm)->field.tqe_prev != (elm)) \ 694 | panic("Bad link elm %p prev->next != elm", (elm)); \ 695 | } while (0) 696 | #else 697 | #define QMD_TAILQ_CHECK_HEAD(head, field) 698 | #define QMD_TAILQ_CHECK_TAIL(head, headname) 699 | #define QMD_TAILQ_CHECK_NEXT(elm, field) 700 | #define QMD_TAILQ_CHECK_PREV(elm, field) 701 | #endif /* (_KERNEL && INVARIANTS) */ 702 | 703 | #define TAILQ_CONCAT(head1, head2, field) do { \ 704 | if (!TAILQ_EMPTY(head2)) { \ 705 | *(head1)->tqh_last = (head2)->tqh_first; \ 706 | (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ 707 | (head1)->tqh_last = (head2)->tqh_last; \ 708 | TAILQ_INIT((head2)); \ 709 | QMD_TRACE_HEAD(head1); \ 710 | QMD_TRACE_HEAD(head2); \ 711 | } \ 712 | } while (0) 713 | 714 | #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 715 | 716 | #define TAILQ_FIRST(head) ((head)->tqh_first) 717 | 718 | #define TAILQ_FOREACH(var, head, field) \ 719 | for ((var) = TAILQ_FIRST((head)); \ 720 | (var); \ 721 | (var) = TAILQ_NEXT((var), field)) 722 | 723 | #define TAILQ_FOREACH_FROM(var, head, field) \ 724 | for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \ 725 | (var); \ 726 | (var) = TAILQ_NEXT((var), field)) 727 | 728 | #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ 729 | for ((var) = TAILQ_FIRST((head)); \ 730 | (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ 731 | (var) = (tvar)) 732 | 733 | #define TAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \ 734 | for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \ 735 | (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ 736 | (var) = (tvar)) 737 | 738 | #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 739 | for ((var) = TAILQ_LAST((head), headname); \ 740 | (var); \ 741 | (var) = TAILQ_PREV((var), headname, field)) 742 | 743 | #define TAILQ_FOREACH_REVERSE_FROM(var, head, headname, field) \ 744 | for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \ 745 | (var); \ 746 | (var) = TAILQ_PREV((var), headname, field)) 747 | 748 | #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ 749 | for ((var) = TAILQ_LAST((head), headname); \ 750 | (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ 751 | (var) = (tvar)) 752 | 753 | #define TAILQ_FOREACH_REVERSE_FROM_SAFE(var, head, headname, field, tvar) \ 754 | for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \ 755 | (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ 756 | (var) = (tvar)) 757 | 758 | #define TAILQ_INIT(head) do { \ 759 | TAILQ_FIRST((head)) = NULL; \ 760 | (head)->tqh_last = &TAILQ_FIRST((head)); \ 761 | QMD_TRACE_HEAD(head); \ 762 | } while (0) 763 | 764 | #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 765 | QMD_TAILQ_CHECK_NEXT(listelm, field); \ 766 | if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ 767 | TAILQ_NEXT((elm), field)->field.tqe_prev = \ 768 | &TAILQ_NEXT((elm), field); \ 769 | else { \ 770 | (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 771 | QMD_TRACE_HEAD(head); \ 772 | } \ 773 | TAILQ_NEXT((listelm), field) = (elm); \ 774 | (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ 775 | QMD_TRACE_ELEM(&(elm)->field); \ 776 | QMD_TRACE_ELEM(&(listelm)->field); \ 777 | } while (0) 778 | 779 | #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 780 | QMD_TAILQ_CHECK_PREV(listelm, field); \ 781 | (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 782 | TAILQ_NEXT((elm), field) = (listelm); \ 783 | *(listelm)->field.tqe_prev = (elm); \ 784 | (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ 785 | QMD_TRACE_ELEM(&(elm)->field); \ 786 | QMD_TRACE_ELEM(&(listelm)->field); \ 787 | } while (0) 788 | 789 | #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 790 | QMD_TAILQ_CHECK_HEAD(head, field); \ 791 | if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ 792 | TAILQ_FIRST((head))->field.tqe_prev = \ 793 | &TAILQ_NEXT((elm), field); \ 794 | else \ 795 | (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 796 | TAILQ_FIRST((head)) = (elm); \ 797 | (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ 798 | QMD_TRACE_HEAD(head); \ 799 | QMD_TRACE_ELEM(&(elm)->field); \ 800 | } while (0) 801 | 802 | #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 803 | QMD_TAILQ_CHECK_TAIL(head, field); \ 804 | TAILQ_NEXT((elm), field) = NULL; \ 805 | (elm)->field.tqe_prev = (head)->tqh_last; \ 806 | *(head)->tqh_last = (elm); \ 807 | (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 808 | QMD_TRACE_HEAD(head); \ 809 | QMD_TRACE_ELEM(&(elm)->field); \ 810 | } while (0) 811 | 812 | #define TAILQ_LAST(head, headname) \ 813 | (*(((struct headname *)((head)->tqh_last))->tqh_last)) 814 | 815 | /* 816 | * The FAST function is fast in that it causes no data access other 817 | * then the access to the head. The standard LAST function above 818 | * will cause a data access of both the element you want and 819 | * the previous element. FAST is very useful for instances when 820 | * you may want to prefetch the last data element. 821 | */ 822 | #define TAILQ_LAST_FAST(head, type, field) \ 823 | (TAILQ_EMPTY(head) ? NULL : __containerof((head)->tqh_last, QUEUE_TYPEOF(type), field.tqe_next)) 824 | 825 | #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 826 | 827 | #define TAILQ_PREV(elm, headname, field) \ 828 | (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) 829 | 830 | #define TAILQ_PREV_FAST(elm, head, type, field) \ 831 | ((elm)->field.tqe_prev == &(head)->tqh_first ? NULL : \ 832 | __containerof((elm)->field.tqe_prev, QUEUE_TYPEOF(type), field.tqe_next)) 833 | 834 | #define TAILQ_REMOVE(head, elm, field) do { \ 835 | QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \ 836 | QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \ 837 | QMD_TAILQ_CHECK_NEXT(elm, field); \ 838 | QMD_TAILQ_CHECK_PREV(elm, field); \ 839 | if ((TAILQ_NEXT((elm), field)) != NULL) \ 840 | TAILQ_NEXT((elm), field)->field.tqe_prev = \ 841 | (elm)->field.tqe_prev; \ 842 | else { \ 843 | (head)->tqh_last = (elm)->field.tqe_prev; \ 844 | QMD_TRACE_HEAD(head); \ 845 | } \ 846 | *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ 847 | TRASHIT(*oldnext); \ 848 | TRASHIT(*oldprev); \ 849 | QMD_TRACE_ELEM(&(elm)->field); \ 850 | } while (0) 851 | 852 | #define TAILQ_SWAP(head1, head2, type, field) do { \ 853 | QUEUE_TYPEOF(type) *swap_first = (head1)->tqh_first; \ 854 | QUEUE_TYPEOF(type) **swap_last = (head1)->tqh_last; \ 855 | (head1)->tqh_first = (head2)->tqh_first; \ 856 | (head1)->tqh_last = (head2)->tqh_last; \ 857 | (head2)->tqh_first = swap_first; \ 858 | (head2)->tqh_last = swap_last; \ 859 | if ((swap_first = (head1)->tqh_first) != NULL) \ 860 | swap_first->field.tqe_prev = &(head1)->tqh_first; \ 861 | else \ 862 | (head1)->tqh_last = &(head1)->tqh_first; \ 863 | if ((swap_first = (head2)->tqh_first) != NULL) \ 864 | swap_first->field.tqe_prev = &(head2)->tqh_first; \ 865 | else \ 866 | (head2)->tqh_last = &(head2)->tqh_first; \ 867 | } while (0) 868 | 869 | #endif /* !_SYS_QUEUE_H_ */ 870 | -------------------------------------------------------------------------------- /include/thread-pool.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #pragma once 18 | 19 | struct aml; 20 | struct aml_work; 21 | 22 | int thread_pool_acquire_default(struct aml*, int n_threads); 23 | void thread_pool_release_default(struct aml*); 24 | int thread_pool_enqueue_default(struct aml*, struct aml_work*); 25 | -------------------------------------------------------------------------------- /meson.build: -------------------------------------------------------------------------------- 1 | project( 2 | 'aml', 3 | 'c', 4 | version: '1.0.0-rc0', 5 | license: 'ISC', 6 | default_options: [ 7 | 'c_std=c11', 8 | 'warning_level=2', 9 | ] 10 | ) 11 | 12 | buildtype = get_option('buildtype') 13 | default_library = get_option('default_library') 14 | is_static_subproject = meson.is_subproject() and default_library == 'static' 15 | 16 | c_args = [ 17 | '-DPROJECT_VERSION="@0@"'.format(meson.project_version()), 18 | '-D_POSIX_C_SOURCE=200809L', 19 | '-fvisibility=hidden', 20 | '-Wmissing-prototypes', 21 | '-Wno-unused-parameter', 22 | ] 23 | 24 | git = find_program('git', native: true, required: false) 25 | if git.found() 26 | git_describe = run_command([git, 'describe', '--tags', '--long'], 27 | check: false) 28 | git_branch = run_command([git, 'rev-parse', '--abbrev-ref', 'HEAD'], 29 | check: false) 30 | if git_describe.returncode() == 0 and git_branch.returncode() == 0 31 | c_args += '-DGIT_VERSION="@0@ (@1@)"'.format( 32 | git_describe.stdout().strip(), 33 | git_branch.stdout().strip(), 34 | ) 35 | endif 36 | endif 37 | 38 | if buildtype != 'debug' and buildtype != 'debugoptimized' 39 | c_args += '-DNDEBUG' 40 | endif 41 | 42 | add_project_arguments(c_args, language: 'c') 43 | 44 | cc = meson.get_compiler('c') 45 | 46 | librt = cc.find_library('rt', required: false) 47 | threads = dependency('threads') 48 | 49 | inc = include_directories('include') 50 | 51 | sources = [ 52 | 'src/aml.c', 53 | 'src/thread-pool.c', 54 | ] 55 | 56 | have_epoll = cc.has_header_symbol('sys/epoll.h', 'epoll_create') 57 | have_kqueue = cc.has_header_symbol('sys/event.h', 'kqueue') 58 | 59 | if have_epoll 60 | sources += 'src/epoll.c' 61 | message('epoll backend chosen') 62 | elif have_kqueue 63 | sources += 'src/kqueue.c' 64 | message('kqueue backend chosen') 65 | else 66 | error('Unsupported system') 67 | endif 68 | 69 | dependencies = [ 70 | librt, 71 | threads, 72 | ] 73 | 74 | aml = library( 75 | 'aml', 76 | sources, 77 | version: '1.0.0', 78 | dependencies: dependencies, 79 | include_directories: inc, 80 | install: not is_static_subproject, 81 | ) 82 | 83 | aml_dep = declare_dependency( 84 | include_directories: inc, 85 | link_with: aml, 86 | ) 87 | 88 | if get_option('examples') 89 | subdir('examples') 90 | endif 91 | 92 | if not is_static_subproject 93 | install_headers('include/aml.h') 94 | 95 | pkgconfig = import('pkgconfig') 96 | pkgconfig.generate( 97 | aml, 98 | version: meson.project_version(), 99 | filebase: meson.project_name(), 100 | name: meson.project_name(), 101 | description: 'Another main loop library', 102 | ) 103 | endif 104 | -------------------------------------------------------------------------------- /meson_options.txt: -------------------------------------------------------------------------------- 1 | option( 2 | 'examples', 3 | type: 'boolean', 4 | value: false, 5 | description: 'Build examples', 6 | ) 7 | -------------------------------------------------------------------------------- /src/aml.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 - 2024 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | #include "aml.h" 30 | #include "backend.h" 31 | #include "sys/queue.h" 32 | #include "thread-pool.h" 33 | 34 | #define EXPORT __attribute__((visibility("default"))) 35 | 36 | #define EVENT_MASK_DEFAULT AML_EVENT_READ 37 | 38 | #ifndef MIN 39 | #define MIN(a, b) ((a) < (b) ? (a) : (b)) 40 | #endif 41 | 42 | enum aml_obj_type { 43 | AML_OBJ_UNSPEC = 0, 44 | AML_OBJ_AML, 45 | AML_OBJ_HANDLER, 46 | AML_OBJ_TIMER, 47 | AML_OBJ_TICKER, 48 | AML_OBJ_SIGNAL, 49 | AML_OBJ_WORK, 50 | AML_OBJ_IDLE, 51 | }; 52 | 53 | struct aml_weak_ref { 54 | void* obj; 55 | LIST_ENTRY(aml_weak_ref) link; 56 | }; 57 | 58 | LIST_HEAD(aml_weak_ref_list, aml_weak_ref); 59 | 60 | struct aml_obj { 61 | enum aml_obj_type type; 62 | int ref; 63 | void* userdata; 64 | aml_free_fn free_fn; 65 | aml_callback_fn cb; 66 | unsigned long long id; 67 | uint32_t n_events; 68 | struct aml_weak_ref_list weak_refs; 69 | 70 | void* backend_data; 71 | 72 | LIST_ENTRY(aml_obj) link; 73 | TAILQ_ENTRY(aml_obj) event_link; 74 | }; 75 | 76 | LIST_HEAD(aml_obj_list, aml_obj); 77 | TAILQ_HEAD(aml_obj_queue, aml_obj); 78 | 79 | struct aml_handler { 80 | struct aml_obj obj; 81 | 82 | int fd; 83 | enum aml_event event_mask; 84 | atomic_uint revents; 85 | 86 | struct aml* parent; 87 | }; 88 | 89 | struct aml_timer { 90 | struct aml_obj obj; 91 | 92 | uint64_t timeout; 93 | uint64_t deadline; 94 | bool expired; 95 | 96 | LIST_ENTRY(aml_timer) link; 97 | }; 98 | 99 | struct aml_ticker { 100 | struct aml_obj obj; 101 | 102 | uint64_t timeout; 103 | uint64_t deadline; 104 | bool expired; 105 | 106 | LIST_ENTRY(aml_timer) link; 107 | }; 108 | 109 | LIST_HEAD(aml_timer_list, aml_timer); 110 | 111 | struct aml_signal { 112 | struct aml_obj obj; 113 | 114 | int signo; 115 | }; 116 | 117 | struct aml_work { 118 | struct aml_obj obj; 119 | 120 | aml_callback_fn work_fn; 121 | }; 122 | 123 | struct aml_idle { 124 | struct aml_obj obj; 125 | 126 | LIST_ENTRY(aml_idle) link; 127 | }; 128 | 129 | LIST_HEAD(aml_idle_list, aml_idle); 130 | 131 | struct aml { 132 | struct aml_obj obj; 133 | 134 | void* state; 135 | struct aml_backend backend; 136 | 137 | int self_pipe_rfd, self_pipe_wfd; 138 | 139 | bool do_exit; 140 | 141 | struct aml_obj_list obj_list; 142 | pthread_mutex_t obj_list_mutex; 143 | 144 | struct aml_timer_list timer_list; 145 | pthread_mutex_t timer_list_mutex; 146 | 147 | struct aml_idle_list idle_list; 148 | 149 | struct aml_obj_queue event_queue; 150 | pthread_mutex_t event_queue_mutex; 151 | 152 | bool have_thread_pool; 153 | }; 154 | 155 | static struct aml* aml__default = NULL; 156 | 157 | // TODO: Properly initialise this? 158 | static pthread_mutex_t aml__ref_mutex; 159 | 160 | extern struct aml_backend implementation; 161 | 162 | static struct aml_timer* aml__get_timer_with_earliest_deadline(struct aml* self); 163 | 164 | #if defined(GIT_VERSION) 165 | EXPORT const char aml_version[] = GIT_VERSION; 166 | #elif defined(PROJECT_VERSION) 167 | EXPORT const char aml_version[] = PROJECT_VERSION; 168 | #else 169 | EXPORT const char aml_version[] = "UNKNOWN"; 170 | #endif 171 | 172 | static void aml__free(struct aml* self); 173 | 174 | EXPORT 175 | void aml_set_default(struct aml* aml) 176 | { 177 | aml__default = aml; 178 | } 179 | 180 | EXPORT 181 | struct aml* aml_get_default(void) 182 | { 183 | return aml__default; 184 | } 185 | 186 | static int aml__poll(struct aml* self, int timeout) 187 | { 188 | return self->backend.poll(self->state, timeout); 189 | } 190 | 191 | static int aml__add_fd(struct aml* self, struct aml_handler* handler) 192 | { 193 | return self->backend.add_fd(self->state, handler); 194 | } 195 | 196 | static int aml__del_fd(struct aml* self, struct aml_handler* handler) 197 | { 198 | return self->backend.del_fd(self->state, handler); 199 | } 200 | 201 | static int aml__mod_fd(struct aml* self, struct aml_handler* handler) 202 | { 203 | if (!self->backend.mod_fd) { 204 | aml__del_fd(self, handler); 205 | return aml__add_fd(self, handler); 206 | } 207 | 208 | return self->backend.mod_fd(self->state, handler); 209 | } 210 | 211 | static int aml__set_deadline(struct aml* self, uint64_t deadline) 212 | { 213 | return self->backend.set_deadline(self->state, deadline); 214 | } 215 | 216 | static void aml__post_dispatch(struct aml* self) 217 | { 218 | if (self->backend.post_dispatch) 219 | self->backend.post_dispatch(self->state); 220 | } 221 | 222 | static void aml__dont_block(int fd) 223 | { 224 | fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK); 225 | } 226 | 227 | static uint64_t aml__gettime_us(struct aml* self) 228 | { 229 | struct timespec ts = { 0 }; 230 | clock_gettime(self->backend.clock, &ts); 231 | return ts.tv_sec * UINT64_C(1000000) + ts.tv_nsec / UINT64_C(1000); 232 | } 233 | 234 | static void aml__ref_lock(void) 235 | { 236 | pthread_mutex_lock(&aml__ref_mutex); 237 | } 238 | 239 | static void aml__ref_unlock(void) 240 | { 241 | pthread_mutex_unlock(&aml__ref_mutex); 242 | } 243 | 244 | EXPORT 245 | struct aml_weak_ref* aml_weak_ref_new(void* obj_ptr) 246 | { 247 | struct aml_obj* obj = obj_ptr; 248 | 249 | struct aml_weak_ref* self = calloc(1, sizeof(*self)); 250 | if (!self) 251 | return NULL; 252 | 253 | self->obj = obj; 254 | aml__ref_lock(); 255 | LIST_INSERT_HEAD(&obj->weak_refs, self, link); 256 | aml__ref_unlock(); 257 | 258 | return self; 259 | } 260 | 261 | EXPORT 262 | void aml_weak_ref_del(struct aml_weak_ref* self) 263 | { 264 | if (!self) 265 | return; 266 | 267 | aml__ref_lock(); 268 | if (self->obj) 269 | LIST_REMOVE(self, link); 270 | aml__ref_unlock(); 271 | free(self); 272 | } 273 | 274 | EXPORT 275 | void* aml_weak_ref_read(struct aml_weak_ref* self) 276 | { 277 | aml__ref_lock(); 278 | struct aml_obj* obj = self->obj; 279 | if (obj) 280 | obj->ref++; 281 | aml__ref_unlock(); 282 | return obj; 283 | } 284 | 285 | static int aml_obj_ref(struct aml_obj* self) 286 | { 287 | aml__ref_lock(); 288 | assert(self->ref >= 0); 289 | int ref = self->ref++; 290 | aml__ref_unlock(); 291 | return ref; 292 | } 293 | 294 | static int aml_obj_unref(struct aml_obj* self) 295 | { 296 | aml__ref_lock(); 297 | int ref = --self->ref; 298 | aml__ref_unlock(); 299 | 300 | assert(ref >= 0); 301 | if (ref > 0) 302 | goto done; 303 | 304 | aml__ref_lock(); 305 | while (!LIST_EMPTY(&self->weak_refs)) { 306 | struct aml_weak_ref* ref = LIST_FIRST(&self->weak_refs); 307 | ref->obj = NULL; 308 | LIST_REMOVE(ref, link); 309 | } 310 | aml__ref_unlock(); 311 | 312 | switch (self->type) { 313 | case AML_OBJ_AML: 314 | aml__free((struct aml*)self); 315 | break; 316 | case AML_OBJ_HANDLER: 317 | case AML_OBJ_TIMER: 318 | case AML_OBJ_TICKER: 319 | case AML_OBJ_SIGNAL: 320 | case AML_OBJ_WORK: 321 | case AML_OBJ_IDLE: 322 | if (self->free_fn) 323 | self->free_fn(self->userdata); 324 | free(self); 325 | break; 326 | default: 327 | abort(); 328 | break; 329 | } 330 | 331 | done: 332 | return ref; 333 | } 334 | 335 | static void* aml_obj_get_userdata(const struct aml_obj* obj) 336 | { 337 | return obj->userdata; 338 | } 339 | 340 | static void aml_obj_set_userdata(struct aml_obj* obj, void* userdata, 341 | aml_free_fn free_fn) 342 | { 343 | obj->userdata = userdata; 344 | obj->free_fn = free_fn; 345 | } 346 | 347 | #define X(type, name) \ 348 | EXPORT int aml_ ## name ## _ref(struct type* self) \ 349 | { \ 350 | return aml_obj_ref(&self->obj); \ 351 | } \ 352 | EXPORT int aml_ ## name ## _unref(struct type* self) \ 353 | { \ 354 | return aml_obj_unref(&self->obj); \ 355 | } \ 356 | EXPORT void aml_ ## name ## _set_userdata(struct type* self, void* ud, \ 357 | aml_free_fn free_fn) \ 358 | { \ 359 | aml_obj_set_userdata(&self->obj, ud, free_fn); \ 360 | } \ 361 | EXPORT void* aml_ ## name ## _get_userdata(const struct type* self) \ 362 | { \ 363 | return aml_obj_get_userdata(&self->obj); \ 364 | } 365 | 366 | X(aml, loop) 367 | X(aml_handler, handler) 368 | X(aml_timer, timer) 369 | X(aml_ticker, ticker) 370 | X(aml_signal, signal) 371 | X(aml_work, work) 372 | X(aml_idle, idle) 373 | 374 | #undef X 375 | 376 | static void on_self_pipe_read(struct aml_handler* handler) { 377 | struct aml* self = aml_get_userdata(handler); 378 | assert(self); 379 | assert(self->self_pipe_rfd == aml_get_fd(handler)); 380 | 381 | char dummy[256]; 382 | while (read(self->self_pipe_rfd, dummy, sizeof(dummy)) > 0); 383 | } 384 | 385 | static void aml__destroy_self_pipe(void* userdata) 386 | { 387 | struct aml* self = userdata; 388 | 389 | close(self->self_pipe_rfd); 390 | close(self->self_pipe_wfd); 391 | } 392 | 393 | static int aml__init_self_pipe(struct aml* self) 394 | { 395 | if (self->backend.interrupt) 396 | return 0; 397 | 398 | int fds[2]; 399 | if (pipe(fds) < 0) 400 | return -1; 401 | 402 | aml__dont_block(fds[0]); 403 | aml__dont_block(fds[1]); 404 | 405 | self->self_pipe_rfd = fds[0]; 406 | self->self_pipe_wfd = fds[1]; 407 | 408 | struct aml_handler* handler = 409 | aml_handler_new(self->self_pipe_rfd, on_self_pipe_read, self, 410 | aml__destroy_self_pipe); 411 | if (!handler) 412 | goto failure; 413 | 414 | aml_start(self, handler); 415 | aml_unref(handler); 416 | 417 | return 0; 418 | 419 | failure: 420 | close(fds[1]); 421 | close(fds[0]); 422 | return -1; 423 | } 424 | 425 | EXPORT 426 | void aml_interrupt(struct aml* self) 427 | { 428 | if (self->backend.interrupt) { 429 | self->backend.interrupt(self->state); 430 | return; 431 | } 432 | 433 | char one = 1; 434 | write(self->self_pipe_wfd, &one, sizeof(one)); 435 | } 436 | 437 | EXPORT 438 | struct aml* aml_new(void) 439 | { 440 | struct aml* self = calloc(1, sizeof(*self)); 441 | if (!self) 442 | return NULL; 443 | 444 | self->obj.type = AML_OBJ_AML; 445 | self->obj.ref = 1; 446 | LIST_INIT(&self->obj.weak_refs); 447 | 448 | LIST_INIT(&self->obj_list); 449 | LIST_INIT(&self->timer_list); 450 | LIST_INIT(&self->idle_list); 451 | TAILQ_INIT(&self->event_queue); 452 | 453 | pthread_mutex_init(&self->event_queue_mutex, NULL); 454 | pthread_mutex_init(&self->obj_list_mutex, NULL); 455 | pthread_mutex_init(&self->timer_list_mutex, NULL); 456 | 457 | memcpy(&self->backend, &implementation, sizeof(self->backend)); 458 | 459 | if (!self->backend.thread_pool_acquire) 460 | self->backend.thread_pool_acquire = thread_pool_acquire_default; 461 | if (!self->backend.thread_pool_release) 462 | self->backend.thread_pool_release = thread_pool_release_default; 463 | if (!self->backend.thread_pool_enqueue) 464 | self->backend.thread_pool_enqueue = thread_pool_enqueue_default; 465 | 466 | self->state = self->backend.new_state(self); 467 | if (!self->state) 468 | goto failure; 469 | 470 | if (aml__init_self_pipe(self) < 0) 471 | goto pipe_failure; 472 | 473 | return self; 474 | 475 | pipe_failure: 476 | self->backend.del_state(self->state); 477 | failure: 478 | free(self); 479 | return NULL; 480 | } 481 | 482 | static int get_n_processors(void) 483 | { 484 | #ifdef _SC_NPROCESSORS_ONLN 485 | return sysconf(_SC_NPROCESSORS_ONLN); 486 | #else 487 | return 4; /* Guess */ 488 | #endif 489 | } 490 | 491 | EXPORT 492 | int aml_require_workers(struct aml* self, int n) 493 | { 494 | if (n < 0) 495 | n = get_n_processors(); 496 | 497 | if (self->backend.thread_pool_acquire(self, n) < 0) 498 | return -1; 499 | 500 | self->have_thread_pool = true; 501 | return 0; 502 | } 503 | 504 | EXPORT 505 | struct aml_handler* aml_handler_new(int fd, 506 | void (*callback)(struct aml_handler*), void* userdata, 507 | aml_free_fn free_fn) 508 | { 509 | struct aml_handler* self = calloc(1, sizeof(*self)); 510 | if (!self) 511 | return NULL; 512 | 513 | self->obj.type = AML_OBJ_HANDLER; 514 | self->obj.ref = 1; 515 | self->obj.userdata = userdata; 516 | self->obj.free_fn = free_fn; 517 | self->obj.cb = (void*)callback; 518 | LIST_INIT(&self->obj.weak_refs); 519 | 520 | self->fd = fd; 521 | self->event_mask = EVENT_MASK_DEFAULT; 522 | 523 | return self; 524 | } 525 | 526 | EXPORT 527 | struct aml_timer* aml_timer_new(uint64_t timeout, 528 | void (*callback)(struct aml_timer*), void* userdata, 529 | aml_free_fn free_fn) 530 | { 531 | struct aml_timer* self = calloc(1, sizeof(*self)); 532 | if (!self) 533 | return NULL; 534 | 535 | self->obj.type = AML_OBJ_TIMER; 536 | self->obj.ref = 1; 537 | self->obj.userdata = userdata; 538 | self->obj.free_fn = free_fn; 539 | self->obj.cb = (void*)callback; 540 | LIST_INIT(&self->obj.weak_refs); 541 | 542 | self->timeout = timeout; 543 | 544 | return self; 545 | } 546 | 547 | EXPORT 548 | struct aml_ticker* aml_ticker_new(uint64_t period, 549 | void (*callback)(struct aml_ticker*), 550 | void* userdata, aml_free_fn free_fn) 551 | { 552 | struct aml_timer* timer = 553 | aml_timer_new(period, (void*)callback, userdata, free_fn); 554 | timer->obj.type = AML_OBJ_TICKER; 555 | return (struct aml_ticker*)timer; 556 | } 557 | 558 | EXPORT 559 | struct aml_signal* aml_signal_new(int signo, 560 | void (*callback)(struct aml_signal*), void* userdata, 561 | aml_free_fn free_fn) 562 | { 563 | struct aml_signal* self = calloc(1, sizeof(*self)); 564 | if (!self) 565 | return NULL; 566 | 567 | self->obj.type = AML_OBJ_SIGNAL; 568 | self->obj.ref = 1; 569 | self->obj.userdata = userdata; 570 | self->obj.free_fn = free_fn; 571 | self->obj.cb = (void*)callback; 572 | LIST_INIT(&self->obj.weak_refs); 573 | 574 | self->signo = signo; 575 | 576 | return self; 577 | } 578 | 579 | EXPORT 580 | struct aml_work* aml_work_new(void (*work_fn)(struct aml_work*), 581 | void (*callback)(struct aml_work*), void* userdata, 582 | aml_free_fn free_fn) 583 | { 584 | struct aml_work* self = calloc(1, sizeof(*self)); 585 | if (!self) 586 | return NULL; 587 | 588 | self->obj.type = AML_OBJ_WORK; 589 | self->obj.ref = 1; 590 | self->obj.userdata = userdata; 591 | self->obj.free_fn = free_fn; 592 | self->obj.cb = (void*)callback; 593 | LIST_INIT(&self->obj.weak_refs); 594 | 595 | self->work_fn = (void*)work_fn; 596 | 597 | return self; 598 | } 599 | 600 | EXPORT 601 | struct aml_idle* aml_idle_new(void (*callback)(struct aml_idle*), 602 | void* userdata, aml_free_fn free_fn) 603 | { 604 | struct aml_idle* self = calloc(1, sizeof(*self)); 605 | if (!self) 606 | return NULL; 607 | 608 | self->obj.type = AML_OBJ_IDLE; 609 | self->obj.ref = 1; 610 | self->obj.userdata = userdata; 611 | self->obj.free_fn = free_fn; 612 | self->obj.cb = (void*)callback; 613 | LIST_INIT(&self->obj.weak_refs); 614 | 615 | return self; 616 | } 617 | 618 | static bool aml__obj_is_single_shot(void* ptr) 619 | { 620 | struct aml_obj* obj = ptr; 621 | switch (obj->type) { 622 | case AML_OBJ_TIMER: /* fallthrough */ 623 | case AML_OBJ_WORK: 624 | return true; 625 | default:; 626 | } 627 | return false; 628 | } 629 | 630 | static bool aml__obj_is_started_unlocked(struct aml* self, void* obj) 631 | { 632 | struct aml_obj* elem; 633 | LIST_FOREACH(elem, &self->obj_list, link) 634 | if (elem == obj) 635 | return true; 636 | 637 | return false; 638 | } 639 | 640 | static bool aml_is_obj_started(struct aml* self, struct aml_obj* obj) 641 | { 642 | pthread_mutex_lock(&self->obj_list_mutex); 643 | bool result = aml__obj_is_started_unlocked(self, obj); 644 | pthread_mutex_unlock(&self->obj_list_mutex); 645 | return result; 646 | } 647 | 648 | static int aml__obj_try_add(struct aml* self, struct aml_obj* obj) 649 | { 650 | int rc = -1; 651 | 652 | pthread_mutex_lock(&self->obj_list_mutex); 653 | 654 | if (!aml__obj_is_started_unlocked(self, obj)) { 655 | aml_obj_ref(obj); 656 | LIST_INSERT_HEAD(&self->obj_list, (struct aml_obj*)obj, link); 657 | rc = 0; 658 | } 659 | 660 | pthread_mutex_unlock(&self->obj_list_mutex); 661 | 662 | return rc; 663 | } 664 | 665 | static void aml__obj_remove_unlocked(struct aml* self, void* obj) 666 | { 667 | LIST_REMOVE((struct aml_obj*)obj, link); 668 | aml_obj_unref(obj); 669 | } 670 | 671 | static void aml__obj_remove(struct aml* self, void* obj) 672 | { 673 | pthread_mutex_lock(&self->obj_list_mutex); 674 | aml__obj_remove_unlocked(self, obj); 675 | pthread_mutex_unlock(&self->obj_list_mutex); 676 | } 677 | 678 | static int aml__obj_try_remove(struct aml* self, void* obj) 679 | { 680 | int rc = -1; 681 | 682 | pthread_mutex_lock(&self->obj_list_mutex); 683 | 684 | if (aml__obj_is_started_unlocked(self, obj)) { 685 | aml__obj_remove_unlocked(self, obj); 686 | rc = 0; 687 | } 688 | 689 | pthread_mutex_unlock(&self->obj_list_mutex); 690 | 691 | return rc; 692 | } 693 | 694 | static int aml__start_handler(struct aml* self, struct aml_handler* handler) 695 | { 696 | if (aml__add_fd(self, handler) < 0) 697 | return -1; 698 | 699 | handler->parent = self; 700 | 701 | return 0; 702 | } 703 | 704 | static int aml__start_timer(struct aml* self, struct aml_timer* timer) 705 | { 706 | timer->deadline = aml__gettime_us(self) + timer->timeout; 707 | timer->expired = false; 708 | 709 | pthread_mutex_lock(&self->timer_list_mutex); 710 | LIST_INSERT_HEAD(&self->timer_list, timer, link); 711 | pthread_mutex_unlock(&self->timer_list_mutex); 712 | 713 | if (timer->timeout == 0) { 714 | assert(timer->obj.type != AML_OBJ_TICKER); 715 | aml_emit(self, timer, 0); 716 | aml_interrupt(self); 717 | return 0; 718 | } 719 | 720 | struct aml_timer* earliest = aml__get_timer_with_earliest_deadline(self); 721 | if (earliest == timer) 722 | aml__set_deadline(self, timer->deadline); 723 | 724 | return 0; 725 | } 726 | 727 | static int aml__start_signal(struct aml* self, struct aml_signal* sig) 728 | { 729 | return self->backend.add_signal(self->state, sig); 730 | } 731 | 732 | static int aml__start_work(struct aml* self, struct aml_work* work) 733 | { 734 | return self->backend.thread_pool_enqueue(self, work); 735 | } 736 | 737 | static int aml__start_idle(struct aml* self, struct aml_idle* idle) 738 | { 739 | LIST_INSERT_HEAD(&self->idle_list, idle, link); 740 | return 0; 741 | } 742 | 743 | static int aml__start_unchecked(struct aml* self, void* obj) 744 | { 745 | struct aml_obj* head = obj; 746 | 747 | switch (head->type) { 748 | case AML_OBJ_AML: return -1; 749 | case AML_OBJ_HANDLER: return aml__start_handler(self, obj); 750 | case AML_OBJ_TIMER: /* fallthrough */ 751 | case AML_OBJ_TICKER: return aml__start_timer(self, obj); 752 | case AML_OBJ_SIGNAL: return aml__start_signal(self, obj); 753 | case AML_OBJ_WORK: return aml__start_work(self, obj); 754 | case AML_OBJ_IDLE: return aml__start_idle(self, obj); 755 | case AML_OBJ_UNSPEC: break; 756 | } 757 | 758 | abort(); 759 | return -1; 760 | } 761 | 762 | static int aml_start_obj(struct aml* self, struct aml_obj* obj) 763 | { 764 | if (aml__obj_try_add(self, obj) < 0) 765 | return -1; 766 | 767 | if (aml__start_unchecked(self, obj) == 0) 768 | return 0; 769 | 770 | aml__obj_remove(self, obj); 771 | return -1; 772 | } 773 | 774 | static int aml__stop_handler(struct aml* self, struct aml_handler* handler) 775 | { 776 | if (aml__del_fd(self, handler) < 0) 777 | return -1; 778 | 779 | handler->parent = NULL; 780 | 781 | return 0; 782 | } 783 | 784 | static int aml__stop_timer(struct aml* self, struct aml_timer* timer) 785 | { 786 | pthread_mutex_lock(&self->timer_list_mutex); 787 | LIST_REMOVE(timer, link); 788 | pthread_mutex_unlock(&self->timer_list_mutex); 789 | return 0; 790 | } 791 | 792 | static int aml__stop_signal(struct aml* self, struct aml_signal* sig) 793 | { 794 | return self->backend.del_signal(self->state, sig); 795 | } 796 | 797 | static int aml__stop_work(struct aml* self, struct aml_work* work) 798 | { 799 | return 0; 800 | } 801 | 802 | static int aml__stop_idle(struct aml* self, struct aml_idle* idle) 803 | { 804 | LIST_REMOVE(idle, link); 805 | return 0; 806 | } 807 | 808 | static int aml__stop_unchecked(struct aml* self, void* obj) 809 | { 810 | struct aml_obj* head = obj; 811 | 812 | switch (head->type) { 813 | case AML_OBJ_AML: return -1; 814 | case AML_OBJ_HANDLER: return aml__stop_handler(self, obj); 815 | case AML_OBJ_TIMER: /* fallthrough */ 816 | case AML_OBJ_TICKER: return aml__stop_timer(self, obj); 817 | case AML_OBJ_SIGNAL: return aml__stop_signal(self, obj); 818 | case AML_OBJ_WORK: return aml__stop_work(self, obj); 819 | case AML_OBJ_IDLE: return aml__stop_idle(self, obj); 820 | case AML_OBJ_UNSPEC: break; 821 | } 822 | 823 | abort(); 824 | return -1; 825 | } 826 | 827 | static int aml_stop_obj(struct aml* self, struct aml_obj* obj) 828 | { 829 | aml_obj_ref(obj); 830 | 831 | if (aml__obj_try_remove(self, obj) >= 0) 832 | aml__stop_unchecked(self, obj); 833 | 834 | aml_obj_unref(obj); 835 | 836 | return 0; 837 | } 838 | 839 | #define X(type) \ 840 | EXPORT int aml_start_ ## type(struct aml* aml, struct aml_ ## type* obj) \ 841 | { \ 842 | return aml_start_obj(aml, &obj->obj); \ 843 | } \ 844 | EXPORT int aml_stop_ ## type(struct aml* aml, struct aml_ ## type* obj) \ 845 | { \ 846 | return aml_stop_obj(aml, &obj->obj); \ 847 | } \ 848 | EXPORT bool aml_is_ ## type ## _started(struct aml* aml, struct aml_ ## type* obj) \ 849 | { \ 850 | return aml_is_obj_started(aml, &obj->obj); \ 851 | } 852 | 853 | X(handler) 854 | X(timer) 855 | X(ticker) 856 | X(signal) 857 | X(work) 858 | X(idle) 859 | 860 | #undef X 861 | 862 | static struct aml_timer* aml__get_timer_with_earliest_deadline(struct aml* self) 863 | { 864 | uint64_t deadline = UINT64_MAX; 865 | struct aml_timer* result = NULL; 866 | 867 | struct aml_timer* timer; 868 | 869 | pthread_mutex_lock(&self->timer_list_mutex); 870 | LIST_FOREACH(timer, &self->timer_list, link) 871 | if (!timer->expired && timer->deadline < deadline) { 872 | deadline = timer->deadline; 873 | result = timer; 874 | } 875 | pthread_mutex_unlock(&self->timer_list_mutex); 876 | 877 | return result; 878 | } 879 | 880 | static bool aml__handle_timeout(struct aml* self, uint64_t now) 881 | { 882 | struct aml_timer* timer = aml__get_timer_with_earliest_deadline(self); 883 | if (!timer || timer->deadline > now) 884 | return false; 885 | 886 | aml_emit(self, timer, 0); 887 | 888 | switch (timer->obj.type) { 889 | case AML_OBJ_TIMER: 890 | timer->expired = true; 891 | break; 892 | case AML_OBJ_TICKER: 893 | timer->deadline += timer->timeout; 894 | break; 895 | default: 896 | abort(); 897 | break; 898 | } 899 | 900 | return true; 901 | } 902 | 903 | static void aml__handle_idle(struct aml* self) 904 | { 905 | struct aml_idle* idle; 906 | struct aml_idle* tmp; 907 | 908 | LIST_FOREACH_SAFE(idle, &self->idle_list, link, tmp) 909 | if (idle->obj.cb && aml_is_started(self, idle)) 910 | idle->obj.cb(idle); 911 | } 912 | 913 | static void aml__handle_event(struct aml* self, struct aml_obj* obj) 914 | { 915 | /* A reference is kept here in case an object is stopped inside the 916 | * callback. We want the object to live until we're done with it. 917 | */ 918 | aml_obj_ref(obj); 919 | 920 | if (aml_is_obj_started(self, obj)) { 921 | /* Single-shot objects must be stopped before the callback so 922 | * that they can be restarted from within the callback. 923 | */ 924 | if (aml__obj_is_single_shot(obj)) 925 | aml_stop_obj(self, obj); 926 | 927 | if (obj->cb) 928 | obj->cb(obj); 929 | } 930 | 931 | if (obj->type == AML_OBJ_HANDLER) { 932 | struct aml_handler* handler = (struct aml_handler*)obj; 933 | handler->revents = 0; 934 | 935 | if (self->backend.flags & AML_BACKEND_EDGE_TRIGGERED) 936 | aml__mod_fd(self, handler); 937 | } 938 | 939 | aml_obj_unref(obj); 940 | } 941 | 942 | /* Might exit earlier than timeout. It's up to the user to check */ 943 | EXPORT 944 | int aml_poll(struct aml* self, int64_t timeout_us) 945 | { 946 | int timeout_ms = (timeout_us == INT64_C(-1)) 947 | ? -1 : timeout_us / INT64_C(1000); 948 | return aml__poll(self, timeout_ms); 949 | } 950 | 951 | static struct aml_obj* aml__event_dequeue(struct aml* self) 952 | { 953 | pthread_mutex_lock(&self->event_queue_mutex); 954 | struct aml_obj* obj = TAILQ_FIRST(&self->event_queue); 955 | if (obj && --obj->n_events == 0) 956 | TAILQ_REMOVE(&self->event_queue, obj, event_link); 957 | pthread_mutex_unlock(&self->event_queue_mutex); 958 | return obj; 959 | } 960 | 961 | EXPORT 962 | void aml_dispatch(struct aml* self) 963 | { 964 | uint64_t now = aml__gettime_us(self); 965 | while (aml__handle_timeout(self, now)); 966 | 967 | struct aml_timer* earliest = aml__get_timer_with_earliest_deadline(self); 968 | if (earliest) { 969 | assert(earliest->deadline > now); 970 | aml__set_deadline(self, earliest->deadline); 971 | } 972 | 973 | sigset_t sig_old, sig_new; 974 | sigfillset(&sig_new); 975 | 976 | pthread_sigmask(SIG_BLOCK, &sig_new, &sig_old); 977 | 978 | struct aml_obj* obj; 979 | while ((obj = aml__event_dequeue(self)) != NULL) { 980 | aml__handle_event(self, obj); 981 | aml_obj_unref(obj); 982 | } 983 | 984 | pthread_sigmask(SIG_SETMASK, &sig_old, NULL); 985 | 986 | aml__handle_idle(self); 987 | aml__post_dispatch(self); 988 | } 989 | 990 | EXPORT 991 | int aml_run(struct aml* self) 992 | { 993 | self->do_exit = false; 994 | 995 | do { 996 | aml_poll(self, -1); 997 | aml_dispatch(self); 998 | } while (!self->do_exit); 999 | 1000 | return 0; 1001 | } 1002 | 1003 | EXPORT 1004 | void aml_exit(struct aml* self) 1005 | { 1006 | self->do_exit = true; 1007 | 1008 | if (self->backend.exit) 1009 | self->backend.exit(self->state); 1010 | } 1011 | 1012 | static void aml__free(struct aml* self) 1013 | { 1014 | while (!LIST_EMPTY(&self->obj_list)) { 1015 | struct aml_obj* obj = LIST_FIRST(&self->obj_list); 1016 | 1017 | aml__stop_unchecked(self, obj); 1018 | aml__obj_remove_unlocked(self, obj); 1019 | } 1020 | 1021 | if (self->have_thread_pool) 1022 | self->backend.thread_pool_release(self); 1023 | 1024 | self->backend.del_state(self->state); 1025 | 1026 | while (!TAILQ_EMPTY(&self->event_queue)) { 1027 | struct aml_obj* obj = TAILQ_FIRST(&self->event_queue); 1028 | TAILQ_REMOVE(&self->event_queue, obj, event_link); 1029 | aml_obj_unref(obj); 1030 | } 1031 | 1032 | pthread_mutex_destroy(&self->timer_list_mutex); 1033 | pthread_mutex_destroy(&self->obj_list_mutex); 1034 | pthread_mutex_destroy(&self->event_queue_mutex); 1035 | 1036 | free(self); 1037 | } 1038 | 1039 | void aml_emit(struct aml* self, void* ptr, uint32_t revents) 1040 | { 1041 | struct aml_obj* obj = ptr; 1042 | 1043 | if (obj->type == AML_OBJ_HANDLER) { 1044 | struct aml_handler* handler = ptr; 1045 | uint32_t old = atomic_fetch_or(&handler->revents, revents); 1046 | if (old != 0) 1047 | return; 1048 | } 1049 | 1050 | sigset_t sig_old, sig_new; 1051 | sigfillset(&sig_new); 1052 | 1053 | pthread_sigmask(SIG_BLOCK, &sig_new, &sig_old); 1054 | pthread_mutex_lock(&self->event_queue_mutex); 1055 | if (obj->n_events++ == 0) 1056 | TAILQ_INSERT_TAIL(&self->event_queue, obj, event_link); 1057 | aml_obj_ref(obj); 1058 | pthread_mutex_unlock(&self->event_queue_mutex); 1059 | pthread_sigmask(SIG_SETMASK, &sig_old, NULL); 1060 | } 1061 | 1062 | EXPORT 1063 | enum aml_event aml_get_event_mask(const struct aml_handler* handler) 1064 | { 1065 | return handler->event_mask; 1066 | } 1067 | 1068 | EXPORT 1069 | void aml_set_event_mask(struct aml_handler* handler, enum aml_event mask) 1070 | { 1071 | handler->event_mask = mask; 1072 | 1073 | if (handler->parent && aml_is_started(handler->parent, handler)) 1074 | aml__mod_fd(handler->parent, handler); 1075 | } 1076 | 1077 | EXPORT 1078 | enum aml_event aml_get_revents(const struct aml_handler* handler) 1079 | { 1080 | return handler->revents; 1081 | } 1082 | 1083 | EXPORT 1084 | int aml_loop_get_fd(const struct aml* self) 1085 | { 1086 | return self->backend.get_fd ? 1087 | self->backend.get_fd(self->state) : -1; 1088 | } 1089 | 1090 | EXPORT 1091 | int aml_handler_get_fd(const struct aml_handler* self) 1092 | { 1093 | return self->fd; 1094 | } 1095 | 1096 | EXPORT 1097 | int aml_get_signo(const struct aml_signal* sig) 1098 | { 1099 | return sig->signo; 1100 | } 1101 | 1102 | aml_callback_fn aml_get_work_fn(const struct aml_work* work) 1103 | { 1104 | return work->work_fn; 1105 | } 1106 | 1107 | void* aml_get_backend_data(const void* ptr) 1108 | { 1109 | const struct aml_obj* obj = ptr; 1110 | return obj->backend_data; 1111 | } 1112 | 1113 | void aml_set_backend_data(void* ptr, void* data) 1114 | { 1115 | struct aml_obj* obj = ptr; 1116 | obj->backend_data = data; 1117 | } 1118 | 1119 | void* aml_get_backend_state(const struct aml* self) 1120 | { 1121 | return self->state; 1122 | } 1123 | 1124 | EXPORT 1125 | void aml_timer_set_duration(struct aml_timer* self, uint64_t duration) 1126 | { 1127 | self->timeout = duration; 1128 | } 1129 | 1130 | EXPORT 1131 | void aml_ticker_set_duration(struct aml_ticker* self, uint64_t duration) 1132 | { 1133 | self->timeout = duration; 1134 | } 1135 | -------------------------------------------------------------------------------- /src/epoll.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 - 2022 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #include "aml.h" 18 | #include "backend.h" 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | struct epoll_state { 31 | struct aml* aml; 32 | 33 | int epoll_fd; 34 | int timer_fd; 35 | }; 36 | 37 | struct epoll_signal { 38 | struct epoll_state* state; 39 | int fd; 40 | struct aml_weak_ref* ref; 41 | }; 42 | 43 | static void* epoll_new_state(struct aml* aml) 44 | { 45 | struct epoll_state* self = calloc(1, sizeof(*self)); 46 | if (!self) 47 | return NULL; 48 | 49 | self->aml = aml; 50 | 51 | self->epoll_fd = epoll_create(16); 52 | if (self->epoll_fd < 0) 53 | goto epoll_failure; 54 | 55 | self->timer_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); 56 | if (self->timer_fd < 0) 57 | goto timer_fd_failure; 58 | 59 | struct epoll_event event = { 60 | .events = EPOLLIN, 61 | }; 62 | if (epoll_ctl(self->epoll_fd, EPOLL_CTL_ADD, self->timer_fd, &event) < 0) 63 | goto timer_add_failure; 64 | 65 | return self; 66 | 67 | timer_add_failure: 68 | close(self->timer_fd); 69 | timer_fd_failure: 70 | close(self->epoll_fd); 71 | epoll_failure: 72 | free(self); 73 | return NULL; 74 | } 75 | 76 | static void epoll_del_state(void* state) 77 | { 78 | struct epoll_state* self = state; 79 | close(self->timer_fd); 80 | close(self->epoll_fd); 81 | free(self); 82 | } 83 | 84 | static int epoll_get_fd(const void* state) 85 | { 86 | const struct epoll_state* self = state; 87 | return self->epoll_fd; 88 | } 89 | 90 | static void epoll_emit_event(struct epoll_state* self, 91 | struct epoll_event* event) 92 | { 93 | if (event->data.ptr == NULL) { 94 | // Must be the timerfd 95 | uint64_t count = 0; 96 | (void)read(self->timer_fd, &count, sizeof(count)); 97 | return; 98 | } 99 | 100 | enum aml_event aml_events = AML_EVENT_NONE; 101 | if (event->events & EPOLLIN) 102 | aml_events |= AML_EVENT_READ; 103 | if (event->events & EPOLLOUT) 104 | aml_events |= AML_EVENT_WRITE; 105 | if (event->events & EPOLLPRI) 106 | aml_events |= AML_EVENT_OOB; 107 | 108 | aml_emit(self->aml, event->data.ptr, aml_events); 109 | } 110 | 111 | static int epoll_poll(void* state, int timeout) 112 | { 113 | struct epoll_state* self = state; 114 | struct epoll_event events[16]; 115 | size_t max_events = sizeof(events) / sizeof(events[0]); 116 | 117 | int nfds = epoll_wait(self->epoll_fd, events, max_events, timeout); 118 | for (int i = 0; i < nfds; ++i) 119 | epoll_emit_event(self, &events[i]); 120 | 121 | return nfds; 122 | } 123 | 124 | static void epoll_event_from_aml_handler(struct epoll_event* event, 125 | struct aml_handler* handler) 126 | { 127 | enum aml_event in = aml_get_event_mask(handler); 128 | 129 | event->events = 0; 130 | if (in & AML_EVENT_READ) 131 | event->events |= EPOLLIN; 132 | if (in & AML_EVENT_WRITE) 133 | event->events |= EPOLLOUT; 134 | if (in & AML_EVENT_OOB) 135 | event->events |= EPOLLPRI; 136 | 137 | event->data.ptr = handler; 138 | } 139 | 140 | static int epoll_add_fd(void* state, struct aml_handler* handler) 141 | { 142 | struct epoll_state* self = state; 143 | struct epoll_event event; 144 | epoll_event_from_aml_handler(&event, handler); 145 | return epoll_ctl(self->epoll_fd, EPOLL_CTL_ADD, aml_get_fd(handler), 146 | &event); 147 | } 148 | 149 | static int epoll_mod_fd(void* state, struct aml_handler* handler) 150 | { 151 | struct epoll_state* self = state; 152 | struct epoll_event event; 153 | epoll_event_from_aml_handler(&event, handler); 154 | return epoll_ctl(self->epoll_fd, EPOLL_CTL_MOD, aml_get_fd(handler), 155 | &event); 156 | } 157 | 158 | static int epoll_del_fd(void* state, struct aml_handler* handler) 159 | { 160 | struct epoll_state* self = state; 161 | // Dummy event to appease valgrind 162 | struct epoll_event event = { 0 }; 163 | return epoll_ctl(self->epoll_fd, EPOLL_CTL_DEL, aml_get_fd(handler), 164 | &event); 165 | } 166 | 167 | static void epoll_signal_cleanup(void* userdata) 168 | { 169 | struct epoll_signal* sig = userdata; 170 | close(sig->fd); 171 | aml_weak_ref_del(sig->ref); 172 | free(sig); 173 | } 174 | 175 | static void epoll_on_signal(struct aml_handler* handler) 176 | { 177 | struct epoll_signal* ctx = aml_get_userdata(handler); 178 | 179 | struct signalfd_siginfo fdsi; 180 | (void)read(ctx->fd, &fdsi, sizeof(fdsi)); 181 | 182 | struct aml_signal* sig = aml_weak_ref_read(ctx->ref); 183 | if (!sig) 184 | return; 185 | 186 | aml_emit(ctx->state->aml, sig, 0); 187 | aml_unref(sig); 188 | } 189 | 190 | static int epoll_add_signal(void* state, struct aml_signal* sig) 191 | { 192 | struct epoll_state* self = state; 193 | 194 | struct epoll_signal* ctx = calloc(1, sizeof(*ctx)); 195 | if (!ctx) 196 | return -1; 197 | 198 | int signo = aml_get_signo(sig); 199 | 200 | sigset_t ss; 201 | sigemptyset(&ss); 202 | sigaddset(&ss, signo); 203 | 204 | ctx->state = self; 205 | ctx->ref = aml_weak_ref_new(sig); 206 | 207 | ctx->fd = signalfd(-1, &ss, SFD_NONBLOCK | SFD_CLOEXEC); 208 | if (ctx->fd < 0) 209 | goto signalfd_failure; 210 | 211 | struct aml_handler* handler = 212 | aml_handler_new(ctx->fd, epoll_on_signal, ctx, 213 | epoll_signal_cleanup); 214 | if (!handler) 215 | goto handler_failure; 216 | 217 | if (aml_start(self->aml, handler) < 0) 218 | goto start_failure; 219 | 220 | aml_set_backend_data(sig, handler); 221 | 222 | pthread_sigmask(SIG_BLOCK, &ss, NULL); 223 | return 0; 224 | 225 | start_failure: 226 | aml_unref(handler); 227 | handler_failure: 228 | close(ctx->fd); 229 | signalfd_failure: 230 | free(ctx); 231 | return -1; 232 | } 233 | 234 | static int epoll_del_signal(void* state, struct aml_signal* sig) 235 | { 236 | struct epoll_state* self = state; 237 | 238 | struct aml_handler* handler = aml_get_backend_data(sig); 239 | assert(handler); 240 | 241 | int rc = aml_stop(self->aml, handler); 242 | if (rc >= 0) 243 | aml_unref(handler); 244 | 245 | return rc; 246 | } 247 | 248 | static int epoll_set_deadline(void* state, uint64_t deadline) 249 | { 250 | struct epoll_state* self = state; 251 | 252 | struct itimerspec it = { 253 | .it_value = { 254 | .tv_sec = (uint32_t)(deadline / UINT64_C(1000000)), 255 | .tv_nsec = (uint32_t)((deadline % UINT64_C(1000000)) * 256 | UINT64_C(1000)), 257 | }, 258 | }; 259 | 260 | return timerfd_settime(self->timer_fd, TFD_TIMER_ABSTIME, &it, NULL); 261 | } 262 | 263 | const struct aml_backend implementation = { 264 | .new_state = epoll_new_state, 265 | .del_state = epoll_del_state, 266 | .clock = CLOCK_MONOTONIC, 267 | .get_fd = epoll_get_fd, 268 | .poll = epoll_poll, 269 | .add_fd = epoll_add_fd, 270 | .mod_fd = epoll_mod_fd, 271 | .del_fd = epoll_del_fd, 272 | .add_signal = epoll_add_signal, 273 | .del_signal = epoll_del_signal, 274 | .set_deadline = epoll_set_deadline, 275 | }; 276 | -------------------------------------------------------------------------------- /src/kqueue.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #include "aml.h" 18 | #include "backend.h" 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | struct kq_state { 29 | struct aml* aml; 30 | int fd; 31 | }; 32 | 33 | static void* kq_new_state(struct aml* aml) 34 | { 35 | struct kq_state* self = calloc(1, sizeof(*self)); 36 | if (!self) 37 | return NULL; 38 | 39 | self->aml = aml; 40 | 41 | self->fd = kqueue(); 42 | if (self->fd < 0) 43 | goto kqueue_failure; 44 | 45 | return self; 46 | 47 | kqueue_failure: 48 | free(self); 49 | return NULL; 50 | } 51 | 52 | static void kq_del_state(void* state) 53 | { 54 | struct kq_state* self = state; 55 | close(self->fd); 56 | free(self); 57 | } 58 | 59 | static int kq_get_fd(const void* state) 60 | { 61 | const struct kq_state* self = state; 62 | return self->fd; 63 | } 64 | 65 | static void kq_emit_event(struct kq_state* self, struct kevent* event) 66 | { 67 | // TODO: Maybe joint read/write into one for fds? 68 | switch (event->filter) { 69 | case EVFILT_READ: 70 | aml_emit(self->aml, event->udata, AML_EVENT_READ); 71 | break; 72 | case EVFILT_WRITE: 73 | aml_emit(self->aml, event->udata, AML_EVENT_WRITE); 74 | break; 75 | case EVFILT_SIGNAL: 76 | aml_emit(self->aml, event->udata, 0); 77 | break; 78 | case EVFILT_TIMER: 79 | assert(event->ident == 0); 80 | break; 81 | } 82 | } 83 | 84 | static int kq_poll(void* state, int timeout) 85 | { 86 | struct kq_state* self = state; 87 | 88 | struct timespec ts = { 89 | .tv_sec = timeout / 1000UL, 90 | .tv_nsec = (timeout % 1000UL) * 1000000UL, 91 | }; 92 | 93 | struct kevent events[16]; 94 | size_t max_events = sizeof(events) / sizeof(events[0]); 95 | 96 | int nfds = kevent(self->fd, NULL, 0, events, max_events, timeout >= 0 ? &ts : NULL); 97 | for (int i = 0; i < nfds; ++i) 98 | kq_emit_event(self, &events[i]); 99 | 100 | return nfds; 101 | } 102 | 103 | static int kq_add_fd(void* state, struct aml_handler* handler) 104 | { 105 | struct kq_state* self = state; 106 | int fd = aml_get_fd(handler); 107 | 108 | enum aml_event last_mask = (intptr_t)aml_get_backend_data(handler); 109 | enum aml_event mask = aml_get_event_mask(handler); 110 | aml_set_backend_data(handler, (void*)(intptr_t)mask); 111 | 112 | struct kevent events[2]; 113 | int n = 0; 114 | 115 | if ((mask ^ last_mask) & AML_EVENT_READ) 116 | EV_SET(&events[n++], fd, EVFILT_READ, 117 | mask & AML_EVENT_READ ? EV_ADD : EV_DELETE, 118 | 0, 0, handler); 119 | 120 | if ((mask ^ last_mask) & AML_EVENT_WRITE) 121 | EV_SET(&events[n++], fd, EVFILT_WRITE, 122 | mask & AML_EVENT_WRITE ? EV_ADD : EV_DELETE, 123 | 0, 0, handler); 124 | 125 | return kevent(self->fd, events, n, NULL, 0, NULL); 126 | } 127 | 128 | static int kq_del_fd(void* state, struct aml_handler* handler) 129 | { 130 | struct kq_state* self = state; 131 | int fd = aml_get_fd(handler); 132 | 133 | enum aml_event last_mask = (intptr_t)aml_get_backend_data(handler); 134 | 135 | struct kevent events[2]; 136 | int n = 0; 137 | 138 | if (last_mask & AML_EVENT_READ) 139 | EV_SET(&events[n++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); 140 | 141 | if (last_mask & AML_EVENT_WRITE) 142 | EV_SET(&events[n++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); 143 | 144 | return kevent(self->fd, events, n, NULL, 0, NULL); 145 | } 146 | 147 | static int kq_add_signal(void* state, struct aml_signal* sig) 148 | { 149 | struct kq_state* self = state; 150 | int signo = aml_get_signo(sig); 151 | 152 | struct kevent event; 153 | EV_SET(&event, signo, EVFILT_SIGNAL, EV_ADD, 0, 0, sig); 154 | 155 | int rc = kevent(self->fd, &event, 1, NULL, 0, NULL); 156 | 157 | sigset_t ss; 158 | sigemptyset(&ss); 159 | sigaddset(&ss, signo); 160 | pthread_sigmask(SIG_BLOCK, &ss, NULL); 161 | 162 | return rc; 163 | } 164 | 165 | static int kq_del_signal(void* state, struct aml_signal* sig) 166 | { 167 | struct kq_state* self = state; 168 | int signo = aml_get_signo(sig); 169 | 170 | struct kevent event; 171 | EV_SET(&event, signo, EVFILT_SIGNAL, EV_DELETE, 0, 0, NULL); 172 | 173 | // TODO: Restore signal mask 174 | 175 | return kevent(self->fd, &event, 1, NULL, 0, NULL); 176 | } 177 | 178 | static int kq_set_deadline(void* state, uint64_t deadline) 179 | { 180 | struct kq_state* self = state; 181 | 182 | struct kevent event; 183 | #ifdef __MACH__ 184 | struct timespec ts = { 0 }; 185 | clock_gettime(CLOCK_REALTIME, &ts); 186 | uint64_t current_time = ts.tv_sec * UINT64_C(1000000) + ts.tv_nsec / UINT64_C(1000); 187 | uint64_t relative_deadline = deadline > current_time ? deadline - current_time : 0; 188 | EV_SET(&event, 0, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 189 | NOTE_USECONDS, relative_deadline, NULL); 190 | #else 191 | EV_SET(&event, 0, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 192 | NOTE_USECONDS | NOTE_ABSTIME, deadline, NULL); 193 | #endif 194 | 195 | return kevent(self->fd, &event, 1, NULL, 0, NULL); 196 | } 197 | 198 | const struct aml_backend implementation = { 199 | .new_state = kq_new_state, 200 | .del_state = kq_del_state, 201 | .clock = CLOCK_REALTIME, 202 | .get_fd = kq_get_fd, 203 | .poll = kq_poll, 204 | .add_fd = kq_add_fd, 205 | .mod_fd = kq_add_fd, // Same as add_fd 206 | .del_fd = kq_del_fd, 207 | .add_signal = kq_add_signal, 208 | .del_signal = kq_del_signal, 209 | .set_deadline = kq_set_deadline, 210 | }; 211 | -------------------------------------------------------------------------------- /src/posix.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | #include "aml.h" 29 | #include "backend.h" 30 | #include "sys/queue.h" 31 | 32 | struct posix_state; 33 | 34 | typedef void (*fd_op_fn)(struct posix_state*, struct aml_handler*); 35 | 36 | struct posix_fd_op { 37 | struct aml_handler* handler; 38 | fd_op_fn call; 39 | TAILQ_ENTRY(posix_fd_op) link; 40 | }; 41 | 42 | TAILQ_HEAD(posix_fd_op_queue, posix_fd_op); 43 | 44 | struct posix_state { 45 | struct aml* aml; 46 | 47 | struct pollfd* fds; 48 | struct aml_handler** handlers; 49 | 50 | uint32_t max_fds; 51 | uint32_t num_fds; 52 | 53 | pthread_t poller_thread; 54 | 55 | int event_pipe_rfd, event_pipe_wfd; 56 | 57 | struct posix_fd_op_queue fd_ops; 58 | pthread_mutex_t fd_ops_mutex; 59 | 60 | int nfds; 61 | pthread_mutex_t wait_mutex; 62 | pthread_cond_t wait_cond; 63 | 64 | bool waiting_for_dispatch; 65 | pthread_mutex_t dispatch_mutex; 66 | pthread_cond_t dispatch_cond; 67 | }; 68 | 69 | struct signal_handler { 70 | struct posix_state* state; 71 | struct aml_signal* sig; 72 | 73 | LIST_ENTRY(signal_handler) link; 74 | }; 75 | 76 | LIST_HEAD(signal_handler_list, signal_handler); 77 | 78 | static int posix_spawn_poller(struct posix_state* self); 79 | static void posix_post_dispatch(void* state); 80 | static void posix_interrupt(void* state); 81 | 82 | static struct signal_handler_list signal_handlers = LIST_HEAD_INITIALIZER(NULL); 83 | 84 | static int posix__enqueue_fd_op(struct posix_state* self, fd_op_fn call, 85 | struct aml_handler* handler) 86 | { 87 | struct posix_fd_op* op = calloc(1, sizeof(*op)); 88 | if (!op) 89 | return -1; 90 | 91 | aml_ref(handler); 92 | 93 | op->call = call; 94 | op->handler = handler; 95 | 96 | pthread_mutex_lock(&self->fd_ops_mutex); 97 | TAILQ_INSERT_TAIL(&self->fd_ops, op, link); 98 | pthread_mutex_unlock(&self->fd_ops_mutex); 99 | 100 | posix_interrupt(self); 101 | 102 | return 0; 103 | } 104 | 105 | static struct posix_fd_op* posix__dequeue_fd_op(struct posix_state* self) 106 | { 107 | pthread_mutex_lock(&self->fd_ops_mutex); 108 | struct posix_fd_op* op = TAILQ_FIRST(&self->fd_ops); 109 | if (op) 110 | TAILQ_REMOVE(&self->fd_ops, op, link); 111 | pthread_mutex_unlock(&self->fd_ops_mutex); 112 | return op; 113 | } 114 | 115 | static struct signal_handler* signal_handler_find_by_signo(int signo) 116 | { 117 | struct signal_handler* handler; 118 | 119 | LIST_FOREACH(handler, &signal_handlers, link) 120 | if (aml_get_signo(handler->sig) == signo) 121 | return handler; 122 | 123 | return NULL; 124 | } 125 | 126 | static struct signal_handler* signal_handler_find_by_obj(struct aml_signal* obj) 127 | { 128 | struct signal_handler* handler; 129 | 130 | LIST_FOREACH(handler, &signal_handlers, link) 131 | if (handler->sig == obj) 132 | return handler; 133 | 134 | return NULL; 135 | } 136 | 137 | static void posix__signal_handler(int signo) 138 | { 139 | struct signal_handler* handler; 140 | 141 | LIST_FOREACH(handler, &signal_handlers, link) 142 | if (aml_get_signo(handler->sig) == signo) 143 | aml_emit(handler->state->aml, handler->sig, 0); 144 | } 145 | 146 | static void dont_block(int fd) 147 | { 148 | fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK); 149 | } 150 | 151 | static int posix_init_event_pipe(struct posix_state* self) 152 | { 153 | int fds[2]; 154 | if (pipe(fds) < 0) 155 | return -1; 156 | 157 | dont_block(fds[0]); 158 | dont_block(fds[1]); 159 | 160 | self->event_pipe_rfd = fds[0]; 161 | self->event_pipe_wfd = fds[1]; 162 | 163 | return 0; 164 | } 165 | 166 | static void* posix_new_state(struct aml* aml) 167 | { 168 | struct posix_state* self = calloc(1, sizeof(*self)); 169 | if (!self) 170 | return NULL; 171 | 172 | self->aml = aml; 173 | self->max_fds = 128; 174 | self->fds = malloc(sizeof(*self->fds) * self->max_fds); 175 | self->handlers = malloc(sizeof(*self->handlers) * self->max_fds); 176 | if (!self->fds || !self->handlers) { 177 | free(self->fds); 178 | free(self->handlers); 179 | goto failure; 180 | } 181 | 182 | TAILQ_INIT(&self->fd_ops); 183 | pthread_mutex_init(&self->fd_ops_mutex, NULL); 184 | 185 | pthread_mutex_init(&self->wait_mutex, NULL); 186 | pthread_cond_init(&self->wait_cond, NULL); 187 | 188 | pthread_mutex_init(&self->dispatch_mutex, NULL); 189 | pthread_cond_init(&self->dispatch_cond, NULL); 190 | 191 | if (posix_init_event_pipe(self) < 0) 192 | goto pipe_failure; 193 | 194 | if (posix_spawn_poller(self) < 0) 195 | goto thread_failure; 196 | 197 | return self; 198 | 199 | thread_failure: 200 | close(self->event_pipe_rfd); 201 | close(self->event_pipe_wfd); 202 | pipe_failure: 203 | pthread_mutex_destroy(&self->fd_ops_mutex); 204 | failure: 205 | free(self); 206 | return NULL; 207 | } 208 | 209 | static int posix__find_handler(struct posix_state* self, 210 | struct aml_handler* handler) 211 | { 212 | for (uint32_t i = 0; i < self->num_fds; ++i) 213 | if (self->handlers[i] == handler) 214 | return i; 215 | 216 | return -1; 217 | } 218 | 219 | static void posix_del_state(void* state) 220 | { 221 | struct posix_state* self = state; 222 | 223 | posix_post_dispatch(self); 224 | 225 | pthread_cancel(self->poller_thread); 226 | pthread_join(self->poller_thread, NULL); 227 | 228 | struct posix_fd_op* op; 229 | while ((op = posix__dequeue_fd_op(self))) { 230 | aml_unref(op->handler); 231 | free(op); 232 | } 233 | 234 | close(self->event_pipe_rfd); 235 | close(self->event_pipe_wfd); 236 | 237 | pthread_cond_destroy(&self->dispatch_cond); 238 | pthread_mutex_destroy(&self->dispatch_mutex); 239 | pthread_cond_destroy(&self->wait_cond); 240 | pthread_mutex_destroy(&self->wait_mutex); 241 | pthread_mutex_destroy(&self->fd_ops_mutex); 242 | free(self->handlers); 243 | free(self->fds); 244 | free(self); 245 | } 246 | 247 | static int posix_get_fd(const void* state) 248 | { 249 | const struct posix_state* self = state; 250 | return self->event_pipe_rfd; 251 | } 252 | 253 | static void posix__apply_fd_ops(struct posix_state* self) 254 | { 255 | while (1) { 256 | struct posix_fd_op* op = posix__dequeue_fd_op(self); 257 | if (!op) 258 | break; 259 | 260 | op->call(self, op->handler); 261 | aml_unref(op->handler); 262 | free(op); 263 | } 264 | } 265 | 266 | static enum aml_event posix_poll_events_to_aml_events(uint32_t poll_events) 267 | { 268 | enum aml_event aml_events = 0; 269 | 270 | if (poll_events & (POLLIN | POLLPRI)) 271 | aml_events |= AML_EVENT_READ; 272 | if (poll_events & POLLOUT) 273 | aml_events |= AML_EVENT_READ; 274 | 275 | return aml_events; 276 | } 277 | 278 | static int posix_do_poll(struct posix_state* self, int timeout) 279 | { 280 | int nfds = poll(self->fds, self->num_fds, timeout); 281 | if (nfds <= 0) 282 | return nfds; 283 | 284 | for (uint32_t i = 0; i < self->num_fds; ++i) 285 | if (self->fds[i].revents) { 286 | struct pollfd* pfd = &self->fds[i]; 287 | struct aml_handler* handler = self->handlers[i]; 288 | 289 | assert(pfd->fd == aml_get_fd(handler)); 290 | enum aml_event events = 291 | posix_poll_events_to_aml_events(pfd->revents); 292 | aml_emit(self->aml, handler, events); 293 | } 294 | 295 | return nfds; 296 | } 297 | 298 | static void posix_wake_up_main(struct posix_state* self, int nfds) 299 | { 300 | pthread_mutex_lock(&self->dispatch_mutex); 301 | self->waiting_for_dispatch = true; 302 | pthread_mutex_unlock(&self->dispatch_mutex); 303 | 304 | pthread_mutex_lock(&self->wait_mutex); 305 | self->nfds = nfds; 306 | pthread_cond_signal(&self->wait_cond); 307 | pthread_mutex_unlock(&self->wait_mutex); 308 | 309 | pthread_mutex_lock(&self->dispatch_mutex); 310 | while (self->waiting_for_dispatch) 311 | pthread_cond_wait(&self->dispatch_cond, &self->dispatch_mutex); 312 | pthread_mutex_unlock(&self->dispatch_mutex); 313 | } 314 | 315 | static void dummy_handler() 316 | { 317 | } 318 | 319 | static void* posix_poll_thread(void* state) 320 | { 321 | struct posix_state* self = state; 322 | 323 | while (1) { 324 | posix__apply_fd_ops(self); 325 | 326 | int nfds = posix_do_poll(self, -1); 327 | if (nfds > 0) { 328 | char one = 1; 329 | write(self->event_pipe_wfd, &one, sizeof(one)); 330 | } 331 | 332 | if (nfds != 0) 333 | posix_wake_up_main(self, nfds); 334 | } 335 | 336 | return NULL; 337 | } 338 | 339 | static int posix_spawn_poller(struct posix_state* self) 340 | { 341 | struct sigaction sa = { .sa_handler = dummy_handler }; 342 | struct sigaction sa_old; 343 | sigaction(SIGUSR1, &sa, &sa_old); 344 | 345 | return pthread_create(&self->poller_thread, NULL, posix_poll_thread, 346 | self); 347 | 348 | sigaction(SIGUSR1, &sa_old, NULL); 349 | } 350 | 351 | static int posix_poll(void* state, int timeout) 352 | { 353 | struct posix_state* self = state; 354 | int nfds; 355 | 356 | if (timeout == 0) { 357 | pthread_mutex_lock(&self->wait_mutex); 358 | nfds = self->nfds; 359 | self->nfds = 0; 360 | pthread_mutex_unlock(&self->wait_mutex); 361 | } else if (timeout < 0) { 362 | pthread_mutex_lock(&self->wait_mutex); 363 | while (self->nfds == 0) 364 | pthread_cond_wait(&self->wait_cond, &self->wait_mutex); 365 | nfds = self->nfds; 366 | self->nfds = 0; 367 | pthread_mutex_unlock(&self->wait_mutex); 368 | } else { 369 | struct timespec ts = { 0 }; 370 | clock_gettime(CLOCK_REALTIME, &ts); 371 | uint32_t ms = timeout + ts.tv_nsec / 1000000UL; 372 | ts.tv_sec += ms / 1000UL; 373 | ts.tv_nsec = (ms % 1000UL) * 1000000UL; 374 | 375 | pthread_mutex_lock(&self->wait_mutex); 376 | while (self->nfds == 0) { 377 | int rc = pthread_cond_timedwait(&self->wait_cond, 378 | &self->wait_mutex, &ts); 379 | if (rc == ETIMEDOUT) 380 | break; 381 | } 382 | nfds = self->nfds; 383 | self->nfds = 0; 384 | pthread_mutex_unlock(&self->wait_mutex); 385 | } 386 | 387 | if (nfds > 0) { 388 | char dummy[256]; 389 | while (read(self->event_pipe_rfd, dummy, sizeof(dummy)) == sizeof(dummy)); 390 | } else if (nfds < 0) { 391 | errno = EINTR; 392 | } 393 | 394 | return nfds; 395 | } 396 | 397 | static uint32_t posix_get_event_mask(struct aml_handler* handler) 398 | { 399 | uint32_t poll_events = 0; 400 | enum aml_event aml_events = aml_get_event_mask(handler); 401 | 402 | if (aml_events & AML_EVENT_READ) 403 | poll_events |= POLLIN | POLLPRI; 404 | if (aml_events & AML_EVENT_WRITE) 405 | poll_events |= POLLOUT; 406 | 407 | return poll_events; 408 | } 409 | 410 | static void posix_add_fd_op(struct posix_state* self, struct aml_handler* handler) 411 | { 412 | if (self->num_fds >= self->max_fds) { 413 | uint32_t new_max = self->max_fds * 2; 414 | struct pollfd* fds = realloc(self->fds, sizeof(*fds) * new_max); 415 | struct aml_handler** hds = 416 | realloc(self->handlers, sizeof(*hds) * new_max); 417 | assert(fds && hds); 418 | 419 | self->fds = fds; 420 | self->handlers = hds; 421 | self->max_fds = new_max; 422 | } 423 | 424 | struct pollfd* event = &self->fds[self->num_fds]; 425 | event->events = posix_get_event_mask(handler); 426 | event->revents = 0; 427 | event->fd = aml_get_fd(handler); 428 | 429 | self->handlers[self->num_fds] = handler; 430 | 431 | self->num_fds++; 432 | } 433 | 434 | static void posix_mod_fd_op(struct posix_state* self, struct aml_handler* handler) 435 | { 436 | int index = posix__find_handler(self, handler); 437 | if (index < 0) 438 | return; 439 | 440 | self->fds[index].fd = aml_get_fd(handler); 441 | self->fds[index].events = posix_get_event_mask(handler); 442 | } 443 | 444 | static void posix_del_fd_op(struct posix_state* self, struct aml_handler* handler) 445 | { 446 | int index = posix__find_handler(self, handler); 447 | if (index < 0) 448 | return; 449 | 450 | self->num_fds--; 451 | 452 | self->fds[index] = self->fds[self->num_fds]; 453 | self->handlers[index] = self->handlers[self->num_fds]; 454 | } 455 | 456 | static int posix_add_fd(void* state, struct aml_handler* handler) 457 | { 458 | return posix__enqueue_fd_op(state, posix_add_fd_op, handler); 459 | } 460 | 461 | static int posix_mod_fd(void* state, struct aml_handler* handler) 462 | { 463 | return posix__enqueue_fd_op(state, posix_mod_fd_op, handler); 464 | } 465 | 466 | static int posix_del_fd(void* state, struct aml_handler* handler) 467 | { 468 | return posix__enqueue_fd_op(state, posix_del_fd_op, handler); 469 | } 470 | 471 | static int posix_add_signal(void* state, struct aml_signal* sig) 472 | { 473 | int signo = aml_get_signo(sig); 474 | 475 | struct signal_handler* handler = calloc(1, sizeof(*handler)); 476 | if (!handler) 477 | return -1; 478 | 479 | handler->state = state; 480 | handler->sig = sig; 481 | 482 | if (!signal_handler_find_by_signo(signo)) { 483 | sigset_t set; 484 | sigemptyset(&set); 485 | sigaddset(&set, signo); 486 | pthread_sigmask(SIG_BLOCK, &set, NULL); 487 | 488 | struct sigaction sa = { 489 | .sa_handler = posix__signal_handler, 490 | }; 491 | 492 | if (sigaction(aml_get_signo(sig), &sa, NULL) < 0) 493 | goto failure; 494 | } 495 | 496 | LIST_INSERT_HEAD(&signal_handlers, handler, link); 497 | 498 | return 0; 499 | 500 | failure: 501 | free(handler); 502 | return -1; 503 | } 504 | 505 | static int posix_del_signal(void* state, struct aml_signal* sig) 506 | { 507 | struct signal_handler* handler = signal_handler_find_by_obj(sig); 508 | if (!handler) 509 | return -1; 510 | 511 | LIST_REMOVE(handler, link); 512 | 513 | if (!signal_handler_find_by_signo(aml_get_signo(sig))) { 514 | struct sigaction sa = { 515 | .sa_handler = SIG_DFL, 516 | }; 517 | 518 | int signo = aml_get_signo(sig); 519 | 520 | sigaction(signo, &sa, NULL); 521 | 522 | sigset_t set; 523 | sigemptyset(&set); 524 | sigaddset(&set, signo); 525 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); 526 | } 527 | 528 | free(handler); 529 | return 0; 530 | } 531 | 532 | static void posix_post_dispatch(void* state) 533 | { 534 | struct posix_state* self = state; 535 | 536 | pthread_mutex_lock(&self->dispatch_mutex); 537 | self->waiting_for_dispatch = false; 538 | pthread_cond_signal(&self->dispatch_cond); 539 | pthread_mutex_unlock(&self->dispatch_mutex); 540 | } 541 | 542 | static void posix_interrupt(void* state) 543 | { 544 | struct posix_state* self = state; 545 | pthread_kill(self->poller_thread, SIGUSR1); 546 | } 547 | 548 | const struct aml_backend posix_backend = { 549 | .new_state = posix_new_state, 550 | .del_state = posix_del_state, 551 | .get_fd = posix_get_fd, 552 | .poll = posix_poll, 553 | .exit = NULL, 554 | .add_fd = posix_add_fd, 555 | .mod_fd = posix_mod_fd, 556 | .del_fd = posix_del_fd, 557 | .add_signal = posix_add_signal, 558 | .del_signal = posix_del_signal, 559 | .post_dispatch = posix_post_dispatch, 560 | .interrupt = posix_interrupt, 561 | }; 562 | -------------------------------------------------------------------------------- /src/thread-pool.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 - 2022 Andri Yngvason 3 | * 4 | * Permission to use, copy, modify, and/or distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 | * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 | * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 | * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 13 | * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 | * PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #include "aml.h" 27 | #include "backend.h" 28 | #include "thread-pool.h" 29 | #include "sys/queue.h" 30 | 31 | struct default_work { 32 | struct aml_weak_ref* aml_ref; 33 | struct aml_work* work; 34 | 35 | TAILQ_ENTRY(default_work) link; 36 | }; 37 | 38 | TAILQ_HEAD(default_work_queue, default_work); 39 | 40 | static struct default_work_queue default_work_queue = 41 | TAILQ_HEAD_INITIALIZER(default_work_queue); 42 | 43 | static atomic_int n_thread_pool_users = 0; 44 | 45 | static pthread_t* thread_pool = NULL; 46 | static pthread_mutex_t work_queue_mutex; 47 | static pthread_cond_t work_queue_cond; 48 | 49 | static int n_threads = 0; 50 | 51 | static int enqueue_work(struct aml* aml, struct aml_work* work, int broadcast); 52 | 53 | static void reap_threads(void) 54 | { 55 | enqueue_work(NULL, NULL, 1); 56 | 57 | for (int i = 0; i < n_threads; ++i) 58 | pthread_join(thread_pool[i], NULL); 59 | 60 | free(thread_pool); 61 | thread_pool = NULL; 62 | 63 | pthread_mutex_destroy(&work_queue_mutex); 64 | pthread_cond_destroy(&work_queue_cond); 65 | 66 | while (!TAILQ_EMPTY(&default_work_queue)) { 67 | struct default_work* work = TAILQ_FIRST(&default_work_queue); 68 | TAILQ_REMOVE(&default_work_queue, work, link); 69 | if (work->work) 70 | aml_unref(work->work); 71 | free(work); 72 | } 73 | } 74 | 75 | static struct default_work* dequeue_work(void) 76 | { 77 | struct default_work* work; 78 | 79 | pthread_mutex_lock(&work_queue_mutex); 80 | 81 | while ((work = TAILQ_FIRST(&default_work_queue)) == NULL) 82 | pthread_cond_wait(&work_queue_cond, &work_queue_mutex); 83 | 84 | if (work->work) 85 | TAILQ_REMOVE(&default_work_queue, work, link); 86 | 87 | pthread_mutex_unlock(&work_queue_mutex); 88 | 89 | return work; 90 | } 91 | 92 | static void* worker_fn(void* context) 93 | { 94 | (void)context; 95 | sigset_t ss; 96 | sigfillset(&ss); 97 | sigdelset(&ss, SIGCHLD); 98 | pthread_sigmask(SIG_BLOCK, &ss, NULL); 99 | #ifndef __ANDROID__ 100 | pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); 101 | #endif 102 | 103 | while (1) { 104 | struct default_work* work = dequeue_work(); 105 | assert(work); 106 | 107 | if (!work->work) 108 | break; 109 | 110 | aml_callback_fn cb = aml_get_work_fn(work->work); 111 | if (cb) 112 | cb(work->work); 113 | 114 | struct aml* aml = work->aml_ref ? 115 | aml_weak_ref_read(work->aml_ref) : NULL; 116 | if (aml) { 117 | aml_emit(aml, work->work, 0); 118 | aml_interrupt(aml); 119 | aml_unref(aml); 120 | } 121 | 122 | aml_weak_ref_del(work->aml_ref); 123 | aml_unref(work->work); 124 | free(work); 125 | } 126 | 127 | return NULL; 128 | } 129 | 130 | int thread_pool_acquire_default(struct aml* aml, int n) 131 | { 132 | (void)aml; 133 | 134 | int rc = 0; 135 | 136 | if (n_threads == 0) { 137 | pthread_mutex_init(&work_queue_mutex, NULL); 138 | pthread_cond_init(&work_queue_cond, NULL); 139 | } 140 | 141 | if (n > n_threads) { 142 | pthread_t* new_pool = 143 | realloc(thread_pool, n * sizeof(pthread_t)); 144 | if (!new_pool) 145 | return -1; 146 | 147 | thread_pool = new_pool; 148 | } 149 | 150 | int i; 151 | for (i = n_threads; i < n; ++i) { 152 | rc = pthread_create(&thread_pool[i], NULL, worker_fn, NULL); 153 | if (rc < 0) 154 | break; 155 | } 156 | 157 | n_threads = i; 158 | 159 | if (rc < 0) 160 | goto failure; 161 | 162 | ++n_thread_pool_users; 163 | 164 | return rc; 165 | 166 | failure: 167 | errno = rc; 168 | reap_threads(); 169 | return -1; 170 | } 171 | 172 | static int enqueue_work(struct aml* aml, struct aml_work* work, int broadcast) 173 | { 174 | struct default_work* default_work = calloc(1, sizeof(*default_work)); 175 | if (!default_work) 176 | return -1; 177 | 178 | if (work) 179 | aml_ref(work); 180 | 181 | default_work->work = work; 182 | default_work->aml_ref = aml ? aml_weak_ref_new(aml) : NULL; 183 | 184 | pthread_mutex_lock(&work_queue_mutex); 185 | TAILQ_INSERT_TAIL(&default_work_queue, default_work, link); 186 | 187 | if (broadcast) 188 | pthread_cond_broadcast(&work_queue_cond); 189 | else 190 | pthread_cond_signal(&work_queue_cond); 191 | 192 | pthread_mutex_unlock(&work_queue_mutex); 193 | return 0; 194 | } 195 | 196 | int thread_pool_enqueue_default(struct aml* aml, struct aml_work* work) 197 | { 198 | return enqueue_work(aml, work, 0); 199 | } 200 | 201 | void thread_pool_release_default(struct aml* aml) 202 | { 203 | if (--n_thread_pool_users == 0) 204 | reap_threads(); 205 | } 206 | --------------------------------------------------------------------------------