├── README ├── cvs-vers ├── src ├── Changes ├── LICENSE ├── Makefile.am ├── README ├── README.embed ├── Symbols.ev ├── Symbols.event ├── autogen.sh ├── configure.ac ├── ev++.c ├── ev++.h ├── ev.3 ├── ev.c ├── ev.h ├── ev.html ├── ev.m4 ├── ev.pod ├── ev_epoll.c ├── ev_iouring.c ├── ev_kqueue.c ├── ev_linuxaio.c ├── ev_poll.c ├── ev_port.c ├── ev_select.c ├── ev_vars.h ├── ev_win32.c ├── ev_wrap.h ├── evdns.c ├── evdns.h ├── event.c ├── event.h ├── event_compat.h ├── import_libevent ├── libev.m4 ├── update_ev_c ├── update_ev_wrap └── update_symbols └── util └── sync-cvs /README: -------------------------------------------------------------------------------- 1 | src/README -------------------------------------------------------------------------------- /cvs-vers: -------------------------------------------------------------------------------- 1 | Changes:1.368 2 | LICENSE:1.11 3 | Makefile.am:1.9 4 | README:1.22 5 | README.embed:1.29 6 | Symbols.ev:1.15 7 | Symbols.event:1.4 8 | autogen.sh:1.3 9 | configure.ac:1.46 10 | ev++.C:1.3 11 | ev++.h:1.68 12 | ev.3:1.99 13 | ev.c:1.534 14 | ev.h:1.205 15 | ev.html:1.80 16 | ev.m4:1.2 17 | ev.pod:1.467 18 | ev_epoll.c:1.82 19 | ev_iouring.c:1.24 20 | ev_kqueue.c:1.61 21 | ev_linuxaio.c:1.53 22 | ev_poll.c:1.48 23 | ev_port.c:1.33 24 | ev_select.c:1.58 25 | ev_vars.h:1.68 26 | ev_win32.c:1.21 27 | ev_wrap.h:1.45 28 | evdns.c:1.23 29 | evdns.h:1.2 30 | event.c:1.52 31 | event.h:1.26 32 | event_compat.h:1.8 33 | import_libevent:1.29 34 | libev.m4:1.18 35 | update_ev_c:1.3 36 | update_ev_wrap:1.6 37 | update_symbols:1.1 38 | -------------------------------------------------------------------------------- /src/LICENSE: -------------------------------------------------------------------------------- 1 | All files in libev are 2 | Copyright (c)2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | Alternatively, the contents of this package may be used under the terms 29 | of the GNU General Public License ("GPL") version 2 or any later version, 30 | in which case the provisions of the GPL are applicable instead of the 31 | above. If you wish to allow the use of your version of this package only 32 | under the terms of the GPL and not to allow others to use your version of 33 | this file under the BSD license, indicate your decision by deleting the 34 | provisions above and replace them with the notice and other provisions 35 | required by the GPL in this and the other files of this package. If you do 36 | not delete the provisions above, a recipient may use your version of this 37 | file under either the BSD or the GPL. 38 | -------------------------------------------------------------------------------- /src/Makefile.am: -------------------------------------------------------------------------------- 1 | AUTOMAKE_OPTIONS = foreign 2 | 3 | VERSION_INFO = 4:0:0 4 | 5 | EXTRA_DIST = LICENSE Changes libev.m4 autogen.sh \ 6 | ev_vars.h ev_wrap.h \ 7 | ev_epoll.c ev_select.c ev_poll.c ev_kqueue.c ev_port.c ev_win32.c \ 8 | ev.3 ev.pod Symbols.ev Symbols.event 9 | 10 | man_MANS = ev.3 11 | 12 | include_HEADERS = ev.h ev++.h event.h 13 | 14 | lib_LTLIBRARIES = libev.la 15 | 16 | libev_la_SOURCES = ev.c event.c 17 | libev_la_LDFLAGS = -version-info $(VERSION_INFO) 18 | 19 | ev.3: ev.pod 20 | pod2man -n LIBEV -r "libev-$(VERSION)" -c "libev - high performance full featured event loop" -s3 <$< >$@ 21 | -------------------------------------------------------------------------------- /src/README: -------------------------------------------------------------------------------- 1 | libev is a high-performance event loop/event model with lots of features. 2 | (see benchmark at http://libev.schmorp.de/bench.html) 3 | 4 | 5 | ABOUT 6 | 7 | Homepage: http://software.schmorp.de/pkg/libev 8 | Mailinglist: libev@lists.schmorp.de 9 | http://lists.schmorp.de/cgi-bin/mailman/listinfo/libev 10 | Library Documentation: http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod 11 | 12 | Libev is modelled (very losely) after libevent and the Event perl 13 | module, but is faster, scales better and is more correct, and also more 14 | featureful. And also smaller. Yay. 15 | 16 | Some of the specialties of libev not commonly found elsewhere are: 17 | 18 | - extensive and detailed, readable documentation (not doxygen garbage). 19 | - fully supports fork, can detect fork in various ways and automatically 20 | re-arms kernel mechanisms that do not support fork. 21 | - highly optimised select, poll, linux epoll, linux aio, bsd kqueue 22 | and solaris event ports backends. 23 | - filesystem object (path) watching (with optional linux inotify support). 24 | - wallclock-based times (using absolute time, cron-like). 25 | - relative timers/timeouts (handle time jumps). 26 | - fast intra-thread communication between multiple 27 | event loops (with optional fast linux eventfd backend). 28 | - extremely easy to embed (fully documented, no dependencies, 29 | autoconf supported but optional). 30 | - very small codebase, no bloated library, simple code. 31 | - fully extensible by being able to plug into the event loop, 32 | integrate other event loops, integrate other event loop users. 33 | - very little memory use (small watchers, small event loop data). 34 | - optional C++ interface allowing method and function callbacks 35 | at no extra memory or runtime overhead. 36 | - optional Perl interface with similar characteristics (capable 37 | of running Glib/Gtk2 on libev). 38 | - support for other languages (multiple C++ interfaces, D, Ruby, 39 | Python) available from third-parties. 40 | 41 | Examples of programs that embed libev: the EV perl module, node.js, 42 | auditd, rxvt-unicode, gvpe (GNU Virtual Private Ethernet), the 43 | Deliantra MMORPG server (http://www.deliantra.net/), Rubinius (a 44 | next-generation Ruby VM), the Ebb web server, the Rev event toolkit. 45 | 46 | 47 | CONTRIBUTORS 48 | 49 | libev was written and designed by Marc Lehmann and Emanuele Giaquinta. 50 | 51 | The following people sent in patches or made other noteworthy 52 | contributions to the design (for minor patches, see the Changes 53 | file. If I forgot to include you, please shout at me, it was an 54 | accident): 55 | 56 | W.C.A. Wijngaards 57 | Christopher Layne 58 | Chris Brody 59 | 60 | -------------------------------------------------------------------------------- /src/README.embed: -------------------------------------------------------------------------------- 1 | This file is now included in the main libev documentation, see 2 | 3 | http://cvs.schmorp.de/libev/ev.html 4 | -------------------------------------------------------------------------------- /src/Symbols.ev: -------------------------------------------------------------------------------- 1 | ev_async_send 2 | ev_async_start 3 | ev_async_stop 4 | ev_backend 5 | ev_break 6 | ev_check_start 7 | ev_check_stop 8 | ev_child_start 9 | ev_child_stop 10 | ev_cleanup_start 11 | ev_cleanup_stop 12 | ev_clear_pending 13 | ev_default_loop 14 | ev_default_loop_ptr 15 | ev_depth 16 | ev_embeddable_backends 17 | ev_embed_start 18 | ev_embed_stop 19 | ev_embed_sweep 20 | ev_feed_event 21 | ev_feed_fd_event 22 | ev_feed_signal 23 | ev_feed_signal_event 24 | ev_fork_start 25 | ev_fork_stop 26 | ev_idle_start 27 | ev_idle_stop 28 | ev_invoke 29 | ev_invoke_pending 30 | ev_io_start 31 | ev_io_stop 32 | ev_iteration 33 | ev_loop_destroy 34 | ev_loop_fork 35 | ev_loop_new 36 | ev_now 37 | ev_now_update 38 | ev_once 39 | ev_pending_count 40 | ev_periodic_again 41 | ev_periodic_start 42 | ev_periodic_stop 43 | ev_prepare_start 44 | ev_prepare_stop 45 | ev_recommended_backends 46 | ev_ref 47 | ev_resume 48 | ev_run 49 | ev_set_allocator 50 | ev_set_invoke_pending_cb 51 | ev_set_io_collect_interval 52 | ev_set_loop_release_cb 53 | ev_set_syserr_cb 54 | ev_set_timeout_collect_interval 55 | ev_set_userdata 56 | ev_signal_start 57 | ev_signal_stop 58 | ev_sleep 59 | ev_stat_start 60 | ev_stat_stat 61 | ev_stat_stop 62 | ev_supported_backends 63 | ev_suspend 64 | ev_time 65 | ev_timer_again 66 | ev_timer_remaining 67 | ev_timer_start 68 | ev_timer_stop 69 | ev_unref 70 | ev_userdata 71 | ev_verify 72 | ev_version_major 73 | ev_version_minor 74 | -------------------------------------------------------------------------------- /src/Symbols.event: -------------------------------------------------------------------------------- 1 | event_active 2 | event_add 3 | event_base_dispatch 4 | event_base_free 5 | event_base_get_method 6 | event_base_loop 7 | event_base_loopexit 8 | event_base_new 9 | event_base_once 10 | event_base_priority_init 11 | event_base_set 12 | event_del 13 | event_dispatch 14 | event_get_callback 15 | event_get_method 16 | event_get_version 17 | event_init 18 | event_loop 19 | event_loopexit 20 | event_once 21 | event_pending 22 | event_priority_init 23 | event_priority_set 24 | event_set 25 | -------------------------------------------------------------------------------- /src/autogen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | autoreconf --install --symlink --force 4 | -------------------------------------------------------------------------------- /src/configure.ac: -------------------------------------------------------------------------------- 1 | dnl also update ev.h! 2 | AC_INIT([libev], [4.33]) 3 | 4 | orig_CFLAGS="$CFLAGS" 5 | 6 | AC_CONFIG_SRCDIR([ev_epoll.c]) 7 | AM_INIT_AUTOMAKE 8 | 9 | AC_CONFIG_HEADERS([config.h]) 10 | AM_MAINTAINER_MODE 11 | 12 | AC_PROG_CC 13 | 14 | dnl Supply default CFLAGS, if not specified 15 | if test -z "$orig_CFLAGS"; then 16 | if test x$GCC = xyes; then 17 | CFLAGS="-g -O3" 18 | fi 19 | fi 20 | 21 | AC_PROG_INSTALL 22 | AC_PROG_LIBTOOL 23 | 24 | m4_include([libev.m4]) 25 | 26 | AC_CONFIG_FILES([Makefile]) 27 | AC_OUTPUT 28 | -------------------------------------------------------------------------------- /src/ev++.c: -------------------------------------------------------------------------------- 1 | #include "ev++.h" 2 | 3 | namespace ev { 4 | extern "C" { 5 | void cb_io (struct ev_io *w, int revents) { (*static_cast(w))(revents); } 6 | void cb_timer (struct ev_timer *w, int revents) { (*static_cast(w))(revents); } 7 | #if EV_PERIODICS 8 | void cb_periodic (struct ev_periodic *w, int revents) { (*static_cast(w))(revents); } 9 | #endif 10 | void cb_idle (struct ev_idle *w, int revents) { (*static_cast(w))(revents); } 11 | void cb_prepare (struct ev_prepare *w, int revents) { (*static_cast(w))(revents); } 12 | void cb_check (struct ev_check *w, int revents) { (*static_cast(w))(revents); } 13 | void cb_sig (struct ev_signal *w, int revents) { (*static_cast(w))(revents); } 14 | void cb_child (struct ev_child *w, int revents) { (*static_cast(w))(revents); } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /src/ev++.h: -------------------------------------------------------------------------------- 1 | /* 2 | * libev simple C++ wrapper classes 3 | * 4 | * Copyright (c) 2007,2008,2010,2018,2020 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #ifndef EVPP_H__ 41 | #define EVPP_H__ 42 | 43 | #ifdef EV_H 44 | # include EV_H 45 | #else 46 | # include "ev.h" 47 | #endif 48 | 49 | #ifndef EV_USE_STDEXCEPT 50 | # define EV_USE_STDEXCEPT 1 51 | #endif 52 | 53 | #if EV_USE_STDEXCEPT 54 | # include 55 | #endif 56 | 57 | namespace ev { 58 | 59 | typedef ev_tstamp tstamp; 60 | 61 | enum { 62 | UNDEF = EV_UNDEF, 63 | NONE = EV_NONE, 64 | READ = EV_READ, 65 | WRITE = EV_WRITE, 66 | #if EV_COMPAT3 67 | TIMEOUT = EV_TIMEOUT, 68 | #endif 69 | TIMER = EV_TIMER, 70 | PERIODIC = EV_PERIODIC, 71 | SIGNAL = EV_SIGNAL, 72 | CHILD = EV_CHILD, 73 | STAT = EV_STAT, 74 | IDLE = EV_IDLE, 75 | CHECK = EV_CHECK, 76 | PREPARE = EV_PREPARE, 77 | FORK = EV_FORK, 78 | ASYNC = EV_ASYNC, 79 | EMBED = EV_EMBED, 80 | # undef ERROR // some systems stupidly #define ERROR 81 | ERROR = EV_ERROR 82 | }; 83 | 84 | enum 85 | { 86 | AUTO = EVFLAG_AUTO, 87 | NOENV = EVFLAG_NOENV, 88 | FORKCHECK = EVFLAG_FORKCHECK, 89 | 90 | SELECT = EVBACKEND_SELECT, 91 | POLL = EVBACKEND_POLL, 92 | EPOLL = EVBACKEND_EPOLL, 93 | KQUEUE = EVBACKEND_KQUEUE, 94 | DEVPOLL = EVBACKEND_DEVPOLL, 95 | PORT = EVBACKEND_PORT 96 | }; 97 | 98 | enum 99 | { 100 | #if EV_COMPAT3 101 | NONBLOCK = EVLOOP_NONBLOCK, 102 | ONESHOT = EVLOOP_ONESHOT, 103 | #endif 104 | NOWAIT = EVRUN_NOWAIT, 105 | ONCE = EVRUN_ONCE 106 | }; 107 | 108 | enum how_t 109 | { 110 | ONE = EVBREAK_ONE, 111 | ALL = EVBREAK_ALL 112 | }; 113 | 114 | struct bad_loop 115 | #if EV_USE_STDEXCEPT 116 | : std::exception 117 | #endif 118 | { 119 | #if EV_USE_STDEXCEPT 120 | const char *what () const EV_NOEXCEPT 121 | { 122 | return "libev event loop cannot be initialized, bad value of LIBEV_FLAGS?"; 123 | } 124 | #endif 125 | }; 126 | 127 | #ifdef EV_AX 128 | # undef EV_AX 129 | #endif 130 | 131 | #ifdef EV_AX_ 132 | # undef EV_AX_ 133 | #endif 134 | 135 | #if EV_MULTIPLICITY 136 | # define EV_AX raw_loop 137 | # define EV_AX_ raw_loop, 138 | #else 139 | # define EV_AX 140 | # define EV_AX_ 141 | #endif 142 | 143 | struct loop_ref 144 | { 145 | loop_ref (EV_P) EV_NOEXCEPT 146 | #if EV_MULTIPLICITY 147 | : EV_AX (EV_A) 148 | #endif 149 | { 150 | } 151 | 152 | bool operator == (const loop_ref &other) const EV_NOEXCEPT 153 | { 154 | #if EV_MULTIPLICITY 155 | return EV_AX == other.EV_AX; 156 | #else 157 | return true; 158 | #endif 159 | } 160 | 161 | bool operator != (const loop_ref &other) const EV_NOEXCEPT 162 | { 163 | #if EV_MULTIPLICITY 164 | return ! (*this == other); 165 | #else 166 | return false; 167 | #endif 168 | } 169 | 170 | #if EV_MULTIPLICITY 171 | bool operator == (const EV_P) const EV_NOEXCEPT 172 | { 173 | return this->EV_AX == EV_A; 174 | } 175 | 176 | bool operator != (const EV_P) const EV_NOEXCEPT 177 | { 178 | return ! (*this == EV_A); 179 | } 180 | 181 | operator struct ev_loop * () const EV_NOEXCEPT 182 | { 183 | return EV_AX; 184 | } 185 | 186 | operator const struct ev_loop * () const EV_NOEXCEPT 187 | { 188 | return EV_AX; 189 | } 190 | 191 | bool is_default () const EV_NOEXCEPT 192 | { 193 | return EV_AX == ev_default_loop (0); 194 | } 195 | #endif 196 | 197 | #if EV_COMPAT3 198 | void loop (int flags = 0) 199 | { 200 | ev_run (EV_AX_ flags); 201 | } 202 | 203 | void unloop (how_t how = ONE) EV_NOEXCEPT 204 | { 205 | ev_break (EV_AX_ how); 206 | } 207 | #endif 208 | 209 | void run (int flags = 0) 210 | { 211 | ev_run (EV_AX_ flags); 212 | } 213 | 214 | void break_loop (how_t how = ONE) EV_NOEXCEPT 215 | { 216 | ev_break (EV_AX_ how); 217 | } 218 | 219 | void post_fork () EV_NOEXCEPT 220 | { 221 | ev_loop_fork (EV_AX); 222 | } 223 | 224 | unsigned int backend () const EV_NOEXCEPT 225 | { 226 | return ev_backend (EV_AX); 227 | } 228 | 229 | tstamp now () const EV_NOEXCEPT 230 | { 231 | return ev_now (EV_AX); 232 | } 233 | 234 | void ref () EV_NOEXCEPT 235 | { 236 | ev_ref (EV_AX); 237 | } 238 | 239 | void unref () EV_NOEXCEPT 240 | { 241 | ev_unref (EV_AX); 242 | } 243 | 244 | #if EV_FEATURE_API 245 | unsigned int iteration () const EV_NOEXCEPT 246 | { 247 | return ev_iteration (EV_AX); 248 | } 249 | 250 | unsigned int depth () const EV_NOEXCEPT 251 | { 252 | return ev_depth (EV_AX); 253 | } 254 | 255 | void set_io_collect_interval (tstamp interval) EV_NOEXCEPT 256 | { 257 | ev_set_io_collect_interval (EV_AX_ interval); 258 | } 259 | 260 | void set_timeout_collect_interval (tstamp interval) EV_NOEXCEPT 261 | { 262 | ev_set_timeout_collect_interval (EV_AX_ interval); 263 | } 264 | #endif 265 | 266 | // function callback 267 | void once (int fd, int events, tstamp timeout, void (*cb)(int, void *), void *arg = 0) EV_NOEXCEPT 268 | { 269 | ev_once (EV_AX_ fd, events, timeout, cb, arg); 270 | } 271 | 272 | // method callback 273 | template 274 | void once (int fd, int events, tstamp timeout, K *object) EV_NOEXCEPT 275 | { 276 | once (fd, events, timeout, method_thunk, object); 277 | } 278 | 279 | // default method == operator () 280 | template 281 | void once (int fd, int events, tstamp timeout, K *object) EV_NOEXCEPT 282 | { 283 | once (fd, events, timeout, method_thunk, object); 284 | } 285 | 286 | template 287 | static void method_thunk (int revents, void *arg) 288 | { 289 | (static_cast(arg)->*method) 290 | (revents); 291 | } 292 | 293 | // no-argument method callback 294 | template 295 | void once (int fd, int events, tstamp timeout, K *object) EV_NOEXCEPT 296 | { 297 | once (fd, events, timeout, method_noargs_thunk, object); 298 | } 299 | 300 | template 301 | static void method_noargs_thunk (int revents, void *arg) 302 | { 303 | (static_cast(arg)->*method) 304 | (); 305 | } 306 | 307 | // simpler function callback 308 | template 309 | void once (int fd, int events, tstamp timeout) EV_NOEXCEPT 310 | { 311 | once (fd, events, timeout, simpler_func_thunk); 312 | } 313 | 314 | template 315 | static void simpler_func_thunk (int revents, void *arg) 316 | { 317 | (*cb) 318 | (revents); 319 | } 320 | 321 | // simplest function callback 322 | template 323 | void once (int fd, int events, tstamp timeout) EV_NOEXCEPT 324 | { 325 | once (fd, events, timeout, simplest_func_thunk); 326 | } 327 | 328 | template 329 | static void simplest_func_thunk (int revents, void *arg) 330 | { 331 | (*cb) 332 | (); 333 | } 334 | 335 | void feed_fd_event (int fd, int revents) EV_NOEXCEPT 336 | { 337 | ev_feed_fd_event (EV_AX_ fd, revents); 338 | } 339 | 340 | void feed_signal_event (int signum) EV_NOEXCEPT 341 | { 342 | ev_feed_signal_event (EV_AX_ signum); 343 | } 344 | 345 | #if EV_MULTIPLICITY 346 | struct ev_loop* EV_AX; 347 | #endif 348 | 349 | }; 350 | 351 | #if EV_MULTIPLICITY 352 | struct dynamic_loop : loop_ref 353 | { 354 | 355 | dynamic_loop (unsigned int flags = AUTO) 356 | : loop_ref (ev_loop_new (flags)) 357 | { 358 | if (!EV_AX) 359 | throw bad_loop (); 360 | } 361 | 362 | ~dynamic_loop () EV_NOEXCEPT 363 | { 364 | ev_loop_destroy (EV_AX); 365 | EV_AX = 0; 366 | } 367 | 368 | private: 369 | 370 | dynamic_loop (const dynamic_loop &); 371 | 372 | dynamic_loop & operator= (const dynamic_loop &); 373 | 374 | }; 375 | #endif 376 | 377 | struct default_loop : loop_ref 378 | { 379 | default_loop (unsigned int flags = AUTO) 380 | #if EV_MULTIPLICITY 381 | : loop_ref (ev_default_loop (flags)) 382 | #endif 383 | { 384 | if ( 385 | #if EV_MULTIPLICITY 386 | !EV_AX 387 | #else 388 | !ev_default_loop (flags) 389 | #endif 390 | ) 391 | throw bad_loop (); 392 | } 393 | 394 | private: 395 | default_loop (const default_loop &); 396 | default_loop &operator = (const default_loop &); 397 | }; 398 | 399 | inline loop_ref get_default_loop () EV_NOEXCEPT 400 | { 401 | #if EV_MULTIPLICITY 402 | return ev_default_loop (0); 403 | #else 404 | return loop_ref (); 405 | #endif 406 | } 407 | 408 | #undef EV_AX 409 | #undef EV_AX_ 410 | 411 | #undef EV_PX 412 | #undef EV_PX_ 413 | #if EV_MULTIPLICITY 414 | # define EV_PX loop_ref EV_A 415 | # define EV_PX_ loop_ref EV_A_ 416 | #else 417 | # define EV_PX 418 | # define EV_PX_ 419 | #endif 420 | 421 | template 422 | struct base : ev_watcher 423 | { 424 | // scoped pause/unpause of a watcher 425 | struct freeze_guard 426 | { 427 | watcher &w; 428 | bool active; 429 | 430 | freeze_guard (watcher *self) EV_NOEXCEPT 431 | : w (*self), active (w.is_active ()) 432 | { 433 | if (active) w.stop (); 434 | } 435 | 436 | ~freeze_guard () 437 | { 438 | if (active) w.start (); 439 | } 440 | }; 441 | 442 | #if EV_MULTIPLICITY 443 | EV_PX; 444 | 445 | // loop set 446 | void set (EV_P) EV_NOEXCEPT 447 | { 448 | this->EV_A = EV_A; 449 | } 450 | #endif 451 | 452 | base (EV_PX) EV_NOEXCEPT 453 | #if EV_MULTIPLICITY 454 | : EV_A (EV_A) 455 | #endif 456 | { 457 | ev_init (this, 0); 458 | } 459 | 460 | void set_ (const void *data, void (*cb)(EV_P_ ev_watcher *w, int revents)) EV_NOEXCEPT 461 | { 462 | this->data = (void *)data; 463 | ev_set_cb (static_cast(this), cb); 464 | } 465 | 466 | // function callback 467 | template 468 | void set (void *data = 0) EV_NOEXCEPT 469 | { 470 | set_ (data, function_thunk); 471 | } 472 | 473 | template 474 | static void function_thunk (EV_P_ ev_watcher *w, int revents) 475 | { 476 | function 477 | (*static_cast(w), revents); 478 | } 479 | 480 | // method callback 481 | template 482 | void set (K *object) EV_NOEXCEPT 483 | { 484 | set_ (object, method_thunk); 485 | } 486 | 487 | // default method == operator () 488 | template 489 | void set (K *object) EV_NOEXCEPT 490 | { 491 | set_ (object, method_thunk); 492 | } 493 | 494 | template 495 | static void method_thunk (EV_P_ ev_watcher *w, int revents) 496 | { 497 | (static_cast(w->data)->*method) 498 | (*static_cast(w), revents); 499 | } 500 | 501 | // no-argument callback 502 | template 503 | void set (K *object) EV_NOEXCEPT 504 | { 505 | set_ (object, method_noargs_thunk); 506 | } 507 | 508 | template 509 | static void method_noargs_thunk (EV_P_ ev_watcher *w, int revents) 510 | { 511 | (static_cast(w->data)->*method) 512 | (); 513 | } 514 | 515 | void operator ()(int events = EV_UNDEF) 516 | { 517 | return 518 | ev_cb (static_cast(this)) 519 | (static_cast(this), events); 520 | } 521 | 522 | bool is_active () const EV_NOEXCEPT 523 | { 524 | return ev_is_active (static_cast(this)); 525 | } 526 | 527 | bool is_pending () const EV_NOEXCEPT 528 | { 529 | return ev_is_pending (static_cast(this)); 530 | } 531 | 532 | void feed_event (int revents) EV_NOEXCEPT 533 | { 534 | ev_feed_event (EV_A_ static_cast(this), revents); 535 | } 536 | }; 537 | 538 | inline tstamp now (EV_P) EV_NOEXCEPT 539 | { 540 | return ev_now (EV_A); 541 | } 542 | 543 | inline void delay (tstamp interval) EV_NOEXCEPT 544 | { 545 | ev_sleep (interval); 546 | } 547 | 548 | inline int version_major () EV_NOEXCEPT 549 | { 550 | return ev_version_major (); 551 | } 552 | 553 | inline int version_minor () EV_NOEXCEPT 554 | { 555 | return ev_version_minor (); 556 | } 557 | 558 | inline unsigned int supported_backends () EV_NOEXCEPT 559 | { 560 | return ev_supported_backends (); 561 | } 562 | 563 | inline unsigned int recommended_backends () EV_NOEXCEPT 564 | { 565 | return ev_recommended_backends (); 566 | } 567 | 568 | inline unsigned int embeddable_backends () EV_NOEXCEPT 569 | { 570 | return ev_embeddable_backends (); 571 | } 572 | 573 | inline void set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT 574 | { 575 | ev_set_allocator (cb); 576 | } 577 | 578 | inline void set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT 579 | { 580 | ev_set_syserr_cb (cb); 581 | } 582 | 583 | #if EV_MULTIPLICITY 584 | #define EV_CONSTRUCT(cppstem,cstem) \ 585 | (EV_PX = get_default_loop ()) EV_NOEXCEPT \ 586 | : base (EV_A) \ 587 | { \ 588 | } 589 | #else 590 | #define EV_CONSTRUCT(cppstem,cstem) \ 591 | () EV_NOEXCEPT \ 592 | { \ 593 | } 594 | #endif 595 | 596 | /* using a template here would require quite a few more lines, 597 | * so a macro solution was chosen */ 598 | #define EV_BEGIN_WATCHER(cppstem,cstem) \ 599 | \ 600 | struct cppstem : base \ 601 | { \ 602 | void start () EV_NOEXCEPT \ 603 | { \ 604 | ev_ ## cstem ## _start (EV_A_ static_cast(this)); \ 605 | } \ 606 | \ 607 | void stop () EV_NOEXCEPT \ 608 | { \ 609 | ev_ ## cstem ## _stop (EV_A_ static_cast(this)); \ 610 | } \ 611 | \ 612 | cppstem EV_CONSTRUCT(cppstem,cstem) \ 613 | \ 614 | ~cppstem () EV_NOEXCEPT \ 615 | { \ 616 | stop (); \ 617 | } \ 618 | \ 619 | using base::set; \ 620 | \ 621 | private: \ 622 | \ 623 | cppstem (const cppstem &o); \ 624 | \ 625 | cppstem &operator =(const cppstem &o); \ 626 | \ 627 | public: 628 | 629 | #define EV_END_WATCHER(cppstem,cstem) \ 630 | }; 631 | 632 | EV_BEGIN_WATCHER (io, io) 633 | void set (int fd, int events) EV_NOEXCEPT 634 | { 635 | freeze_guard freeze (this); 636 | ev_io_set (static_cast(this), fd, events); 637 | } 638 | 639 | void set (int events) EV_NOEXCEPT 640 | { 641 | freeze_guard freeze (this); 642 | ev_io_modify (static_cast(this), events); 643 | } 644 | 645 | void start (int fd, int events) EV_NOEXCEPT 646 | { 647 | set (fd, events); 648 | start (); 649 | } 650 | EV_END_WATCHER (io, io) 651 | 652 | EV_BEGIN_WATCHER (timer, timer) 653 | void set (ev_tstamp after, ev_tstamp repeat = 0.) EV_NOEXCEPT 654 | { 655 | freeze_guard freeze (this); 656 | ev_timer_set (static_cast(this), after, repeat); 657 | } 658 | 659 | void start (ev_tstamp after, ev_tstamp repeat = 0.) EV_NOEXCEPT 660 | { 661 | set (after, repeat); 662 | start (); 663 | } 664 | 665 | void again () EV_NOEXCEPT 666 | { 667 | ev_timer_again (EV_A_ static_cast(this)); 668 | } 669 | 670 | ev_tstamp remaining () 671 | { 672 | return ev_timer_remaining (EV_A_ static_cast(this)); 673 | } 674 | EV_END_WATCHER (timer, timer) 675 | 676 | #if EV_PERIODIC_ENABLE 677 | EV_BEGIN_WATCHER (periodic, periodic) 678 | void set (ev_tstamp at, ev_tstamp interval = 0.) EV_NOEXCEPT 679 | { 680 | freeze_guard freeze (this); 681 | ev_periodic_set (static_cast(this), at, interval, 0); 682 | } 683 | 684 | void start (ev_tstamp at, ev_tstamp interval = 0.) EV_NOEXCEPT 685 | { 686 | set (at, interval); 687 | start (); 688 | } 689 | 690 | void again () EV_NOEXCEPT 691 | { 692 | ev_periodic_again (EV_A_ static_cast(this)); 693 | } 694 | EV_END_WATCHER (periodic, periodic) 695 | #endif 696 | 697 | #if EV_SIGNAL_ENABLE 698 | EV_BEGIN_WATCHER (sig, signal) 699 | void set (int signum) EV_NOEXCEPT 700 | { 701 | freeze_guard freeze (this); 702 | ev_signal_set (static_cast(this), signum); 703 | } 704 | 705 | void start (int signum) EV_NOEXCEPT 706 | { 707 | set (signum); 708 | start (); 709 | } 710 | EV_END_WATCHER (sig, signal) 711 | #endif 712 | 713 | #if EV_CHILD_ENABLE 714 | EV_BEGIN_WATCHER (child, child) 715 | void set (int pid, int trace = 0) EV_NOEXCEPT 716 | { 717 | freeze_guard freeze (this); 718 | ev_child_set (static_cast(this), pid, trace); 719 | } 720 | 721 | void start (int pid, int trace = 0) EV_NOEXCEPT 722 | { 723 | set (pid, trace); 724 | start (); 725 | } 726 | EV_END_WATCHER (child, child) 727 | #endif 728 | 729 | #if EV_STAT_ENABLE 730 | EV_BEGIN_WATCHER (stat, stat) 731 | void set (const char *path, ev_tstamp interval = 0.) EV_NOEXCEPT 732 | { 733 | freeze_guard freeze (this); 734 | ev_stat_set (static_cast(this), path, interval); 735 | } 736 | 737 | void start (const char *path, ev_tstamp interval = 0.) EV_NOEXCEPT 738 | { 739 | stop (); 740 | set (path, interval); 741 | start (); 742 | } 743 | 744 | void update () EV_NOEXCEPT 745 | { 746 | ev_stat_stat (EV_A_ static_cast(this)); 747 | } 748 | EV_END_WATCHER (stat, stat) 749 | #endif 750 | 751 | #if EV_IDLE_ENABLE 752 | EV_BEGIN_WATCHER (idle, idle) 753 | void set () EV_NOEXCEPT { } 754 | EV_END_WATCHER (idle, idle) 755 | #endif 756 | 757 | #if EV_PREPARE_ENABLE 758 | EV_BEGIN_WATCHER (prepare, prepare) 759 | void set () EV_NOEXCEPT { } 760 | EV_END_WATCHER (prepare, prepare) 761 | #endif 762 | 763 | #if EV_CHECK_ENABLE 764 | EV_BEGIN_WATCHER (check, check) 765 | void set () EV_NOEXCEPT { } 766 | EV_END_WATCHER (check, check) 767 | #endif 768 | 769 | #if EV_EMBED_ENABLE 770 | EV_BEGIN_WATCHER (embed, embed) 771 | void set_embed (struct ev_loop *embedded_loop) EV_NOEXCEPT 772 | { 773 | freeze_guard freeze (this); 774 | ev_embed_set (static_cast(this), embedded_loop); 775 | } 776 | 777 | void start (struct ev_loop *embedded_loop) EV_NOEXCEPT 778 | { 779 | set (embedded_loop); 780 | start (); 781 | } 782 | 783 | void sweep () 784 | { 785 | ev_embed_sweep (EV_A_ static_cast(this)); 786 | } 787 | EV_END_WATCHER (embed, embed) 788 | #endif 789 | 790 | #if EV_FORK_ENABLE 791 | EV_BEGIN_WATCHER (fork, fork) 792 | void set () EV_NOEXCEPT { } 793 | EV_END_WATCHER (fork, fork) 794 | #endif 795 | 796 | #if EV_ASYNC_ENABLE 797 | EV_BEGIN_WATCHER (async, async) 798 | void send () EV_NOEXCEPT 799 | { 800 | ev_async_send (EV_A_ static_cast(this)); 801 | } 802 | 803 | bool async_pending () EV_NOEXCEPT 804 | { 805 | return ev_async_pending (static_cast(this)); 806 | } 807 | EV_END_WATCHER (async, async) 808 | #endif 809 | 810 | #undef EV_PX 811 | #undef EV_PX_ 812 | #undef EV_CONSTRUCT 813 | #undef EV_BEGIN_WATCHER 814 | #undef EV_END_WATCHER 815 | } 816 | 817 | #endif 818 | 819 | -------------------------------------------------------------------------------- /src/ev.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 301 Moved Permanently 4 | 5 |

Moved Permanently

6 | The document has moved here.

7 |


8 |
nethype-ews/7.3.42 Server at cvs.schmorp.de Port 80
9 | 10 | -------------------------------------------------------------------------------- /src/ev.m4: -------------------------------------------------------------------------------- 1 | dnl this file is part of libev, do not make local modifications 2 | dnl http://software.schmorp.de/pkg/libev 3 | 4 | dnl libev support 5 | AC_CHECK_HEADERS(sys/epoll.h sys/event.h sys/queue.h) 6 | 7 | AC_CHECK_FUNCS(epoll_ctl kqueue) 8 | 9 | AC_CHECK_FUNC(clock_gettime, [], [ 10 | AC_CHECK_LIB(rt, clock_gettime) 11 | ]) 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/ev_epoll.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev epoll fd activity backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* 41 | * general notes about epoll: 42 | * 43 | * a) epoll silently removes fds from the fd set. as nothing tells us 44 | * that an fd has been removed otherwise, we have to continually 45 | * "rearm" fds that we suspect *might* have changed (same 46 | * problem with kqueue, but much less costly there). 47 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) 48 | * and seems not to have any advantage. 49 | * c) the inability to handle fork or file descriptors (think dup) 50 | * limits the applicability over poll, so this is not a generic 51 | * poll replacement. 52 | * d) epoll doesn't work the same as select with many file descriptors 53 | * (such as files). while not critical, no other advanced interface 54 | * seems to share this (rather non-unixy) limitation. 55 | * e) epoll claims to be embeddable, but in practise you never get 56 | * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). 57 | * f) epoll_ctl returning EPERM means the fd is always ready. 58 | * 59 | * lots of "weird code" and complication handling in this file is due 60 | * to these design problems with epoll, as we try very hard to avoid 61 | * epoll_ctl syscalls for common usage patterns and handle the breakage 62 | * ensuing from receiving events for closed and otherwise long gone 63 | * file descriptors. 64 | */ 65 | 66 | #include 67 | 68 | #define EV_EMASK_EPERM 0x80 69 | 70 | static void 71 | epoll_modify (EV_P_ int fd, int oev, int nev) 72 | { 73 | struct epoll_event ev; 74 | unsigned char oldmask; 75 | 76 | /* 77 | * we handle EPOLL_CTL_DEL by ignoring it here 78 | * on the assumption that the fd is gone anyways 79 | * if that is wrong, we have to handle the spurious 80 | * event in epoll_poll. 81 | * if the fd is added again, we try to ADD it, and, if that 82 | * fails, we assume it still has the same eventmask. 83 | */ 84 | if (!nev) 85 | return; 86 | 87 | oldmask = anfds [fd].emask; 88 | anfds [fd].emask = nev; 89 | 90 | /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ 91 | ev.data.u64 = (uint64_t)(uint32_t)fd 92 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); 93 | ev.events = (nev & EV_READ ? EPOLLIN : 0) 94 | | (nev & EV_WRITE ? EPOLLOUT : 0); 95 | 96 | if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) 97 | return; 98 | 99 | if (ecb_expect_true (errno == ENOENT)) 100 | { 101 | /* if ENOENT then the fd went away, so try to do the right thing */ 102 | if (!nev) 103 | goto dec_egen; 104 | 105 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) 106 | return; 107 | } 108 | else if (ecb_expect_true (errno == EEXIST)) 109 | { 110 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ 111 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ 112 | if (oldmask == nev) 113 | goto dec_egen; 114 | 115 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) 116 | return; 117 | } 118 | else if (ecb_expect_true (errno == EPERM)) 119 | { 120 | /* EPERM means the fd is always ready, but epoll is too snobbish */ 121 | /* to handle it, unlike select or poll. */ 122 | anfds [fd].emask = EV_EMASK_EPERM; 123 | 124 | /* add fd to epoll_eperms, if not already inside */ 125 | if (!(oldmask & EV_EMASK_EPERM)) 126 | { 127 | array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit); 128 | epoll_eperms [epoll_epermcnt++] = fd; 129 | } 130 | 131 | return; 132 | } 133 | else 134 | assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL)); 135 | 136 | fd_kill (EV_A_ fd); 137 | 138 | dec_egen: 139 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ 140 | --anfds [fd].egen; 141 | } 142 | 143 | static void 144 | epoll_poll (EV_P_ ev_tstamp timeout) 145 | { 146 | int i; 147 | int eventcnt; 148 | 149 | if (ecb_expect_false (epoll_epermcnt)) 150 | timeout = EV_TS_CONST (0.); 151 | 152 | /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ 153 | /* the default libev max wait time, however. */ 154 | EV_RELEASE_CB; 155 | eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MSEC (timeout)); 156 | EV_ACQUIRE_CB; 157 | 158 | if (ecb_expect_false (eventcnt < 0)) 159 | { 160 | if (errno != EINTR) 161 | ev_syserr ("(libev) epoll_wait"); 162 | 163 | return; 164 | } 165 | 166 | for (i = 0; i < eventcnt; ++i) 167 | { 168 | struct epoll_event *ev = epoll_events + i; 169 | 170 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ 171 | int want = anfds [fd].events; 172 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) 173 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); 174 | 175 | /* 176 | * check for spurious notification. 177 | * this only finds spurious notifications on egen updates 178 | * other spurious notifications will be found by epoll_ctl, below 179 | * we assume that fd is always in range, as we never shrink the anfds array 180 | */ 181 | if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) 182 | { 183 | /* recreate kernel state */ 184 | postfork |= 2; 185 | continue; 186 | } 187 | 188 | if (ecb_expect_false (got & ~want)) 189 | { 190 | anfds [fd].emask = want; 191 | 192 | /* 193 | * we received an event but are not interested in it, try mod or del 194 | * this often happens because we optimistically do not unregister fds 195 | * when we are no longer interested in them, but also when we get spurious 196 | * notifications for fds from another process. this is partially handled 197 | * above with the gencounter check (== our fd is not the event fd), and 198 | * partially here, when epoll_ctl returns an error (== a child has the fd 199 | * but we closed it). 200 | * note: for events such as POLLHUP, where we can't know whether it refers 201 | * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls. 202 | */ 203 | ev->events = (want & EV_READ ? EPOLLIN : 0) 204 | | (want & EV_WRITE ? EPOLLOUT : 0); 205 | 206 | /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ 207 | /* which is fortunately easy to do for us. */ 208 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) 209 | { 210 | postfork |= 2; /* an error occurred, recreate kernel state */ 211 | continue; 212 | } 213 | } 214 | 215 | fd_event (EV_A_ fd, got); 216 | } 217 | 218 | /* if the receive array was full, increase its size */ 219 | if (ecb_expect_false (eventcnt == epoll_eventmax)) 220 | { 221 | ev_free (epoll_events); 222 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); 223 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 224 | } 225 | 226 | /* now synthesize events for all fds where epoll fails, while select works... */ 227 | for (i = epoll_epermcnt; i--; ) 228 | { 229 | int fd = epoll_eperms [i]; 230 | unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); 231 | 232 | if (anfds [fd].emask & EV_EMASK_EPERM && events) 233 | fd_event (EV_A_ fd, events); 234 | else 235 | { 236 | epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; 237 | anfds [fd].emask = 0; 238 | } 239 | } 240 | } 241 | 242 | static int 243 | epoll_epoll_create (void) 244 | { 245 | int fd; 246 | 247 | #if defined EPOLL_CLOEXEC && !defined __ANDROID__ 248 | fd = epoll_create1 (EPOLL_CLOEXEC); 249 | 250 | if (fd < 0 && (errno == EINVAL || errno == ENOSYS)) 251 | #endif 252 | { 253 | fd = epoll_create (256); 254 | 255 | if (fd >= 0) 256 | fcntl (fd, F_SETFD, FD_CLOEXEC); 257 | } 258 | 259 | return fd; 260 | } 261 | 262 | inline_size 263 | int 264 | epoll_init (EV_P_ int flags) 265 | { 266 | if ((backend_fd = epoll_epoll_create ()) < 0) 267 | return 0; 268 | 269 | backend_mintime = EV_TS_CONST (1e-3); /* epoll does sometimes return early, this is just to avoid the worst */ 270 | backend_modify = epoll_modify; 271 | backend_poll = epoll_poll; 272 | 273 | epoll_eventmax = 64; /* initial number of events receivable per poll */ 274 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 275 | 276 | return EVBACKEND_EPOLL; 277 | } 278 | 279 | inline_size 280 | void 281 | epoll_destroy (EV_P) 282 | { 283 | ev_free (epoll_events); 284 | array_free (epoll_eperm, EMPTY); 285 | } 286 | 287 | ecb_cold 288 | static void 289 | epoll_fork (EV_P) 290 | { 291 | close (backend_fd); 292 | 293 | while ((backend_fd = epoll_epoll_create ()) < 0) 294 | ev_syserr ("(libev) epoll_create"); 295 | 296 | fd_rearm_all (EV_A); 297 | } 298 | 299 | -------------------------------------------------------------------------------- /src/ev_iouring.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev linux io_uring fd activity backend 3 | * 4 | * Copyright (c) 2019-2020 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* 41 | * general notes about linux io_uring: 42 | * 43 | * a) it's the best interface I have seen so far. on linux. 44 | * b) best is not necessarily very good. 45 | * c) it's better than the aio mess, doesn't suffer from the fork problems 46 | * of linux aio or epoll and so on and so on. and you could do event stuff 47 | * without any syscalls. what's not to like? 48 | * d) ok, it's vastly more complex, but that's ok, really. 49 | * e) why two mmaps instead of one? one would be more space-efficient, 50 | * and I can't see what benefit two would have (other than being 51 | * somehow resizable/relocatable, but that's apparently not possible). 52 | * f) hmm, it's practically undebuggable (gdb can't access the memory, and 53 | * the bizarre way structure offsets are communicated makes it hard to 54 | * just print the ring buffer heads, even *iff* the memory were visible 55 | * in gdb. but then, that's also ok, really. 56 | * g) well, you cannot specify a timeout when waiting for events. no, 57 | * seriously, the interface doesn't support a timeout. never seen _that_ 58 | * before. sure, you can use a timerfd, but that's another syscall 59 | * you could have avoided. overall, this bizarre omission smells 60 | * like a µ-optimisation by the io_uring author for his personal 61 | * applications, to the detriment of everybody else who just wants 62 | * an event loop. but, umm, ok, if that's all, it could be worse. 63 | * (from what I gather from the author Jens Axboe, it simply didn't 64 | * occur to him, and he made good on it by adding an unlimited number 65 | * of timeouts later :). 66 | * h) initially there was a hardcoded limit of 4096 outstanding events. 67 | * later versions not only bump this to 32k, but also can handle 68 | * an unlimited amount of events, so this only affects the batch size. 69 | * i) unlike linux aio, you *can* register more then the limit 70 | * of fd events. while early verisons of io_uring signalled an overflow 71 | * and you ended up getting wet. 5.5+ does not do this anymore. 72 | * j) but, oh my! it had exactly the same bugs as the linux aio backend, 73 | * where some undocumented poll combinations just fail. fortunately, 74 | * after finally reaching the author, he was more than willing to fix 75 | * this probably in 5.6+. 76 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. 77 | * once the bugs ae fixed (probably in 5.6+), it will be without 78 | * competition. 79 | */ 80 | 81 | /* TODO: use internal TIMEOUT */ 82 | /* TODO: take advantage of single mmap, NODROP etc. */ 83 | /* TODO: resize cq/sq size independently */ 84 | 85 | #include 86 | #include 87 | #include 88 | #include 89 | 90 | #define IOURING_INIT_ENTRIES 32 91 | 92 | /*****************************************************************************/ 93 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ 94 | 95 | #include 96 | #include 97 | 98 | /* mostly directly taken from the kernel or documentation */ 99 | 100 | struct io_uring_sqe 101 | { 102 | __u8 opcode; 103 | __u8 flags; 104 | __u16 ioprio; 105 | __s32 fd; 106 | union { 107 | __u64 off; 108 | __u64 addr2; 109 | }; 110 | __u64 addr; 111 | __u32 len; 112 | union { 113 | __kernel_rwf_t rw_flags; 114 | __u32 fsync_flags; 115 | __u16 poll_events; 116 | __u32 sync_range_flags; 117 | __u32 msg_flags; 118 | __u32 timeout_flags; 119 | __u32 accept_flags; 120 | __u32 cancel_flags; 121 | __u32 open_flags; 122 | __u32 statx_flags; 123 | __u32 fadvise_advice; 124 | }; 125 | __u64 user_data; 126 | union { 127 | __u16 buf_index; 128 | __u16 personality; 129 | __u64 __pad2[3]; 130 | }; 131 | }; 132 | 133 | struct io_uring_cqe 134 | { 135 | __u64 user_data; 136 | __s32 res; 137 | __u32 flags; 138 | }; 139 | 140 | struct io_sqring_offsets 141 | { 142 | __u32 head; 143 | __u32 tail; 144 | __u32 ring_mask; 145 | __u32 ring_entries; 146 | __u32 flags; 147 | __u32 dropped; 148 | __u32 array; 149 | __u32 resv1; 150 | __u64 resv2; 151 | }; 152 | 153 | struct io_cqring_offsets 154 | { 155 | __u32 head; 156 | __u32 tail; 157 | __u32 ring_mask; 158 | __u32 ring_entries; 159 | __u32 overflow; 160 | __u32 cqes; 161 | __u64 resv[2]; 162 | }; 163 | 164 | struct io_uring_params 165 | { 166 | __u32 sq_entries; 167 | __u32 cq_entries; 168 | __u32 flags; 169 | __u32 sq_thread_cpu; 170 | __u32 sq_thread_idle; 171 | __u32 features; 172 | __u32 resv[4]; 173 | struct io_sqring_offsets sq_off; 174 | struct io_cqring_offsets cq_off; 175 | }; 176 | 177 | #define IORING_FEAT_SINGLE_MMAP 0x00000001 178 | #define IORING_FEAT_NODROP 0x00000002 179 | #define IORING_FEAT_SUBMIT_STABLE 0x00000004 180 | 181 | #define IORING_SETUP_CQSIZE 0x00000008 182 | #define IORING_SETUP_CLAMP 0x00000010 183 | 184 | #define IORING_OP_POLL_ADD 6 185 | #define IORING_OP_POLL_REMOVE 7 186 | #define IORING_OP_TIMEOUT 11 187 | #define IORING_OP_TIMEOUT_REMOVE 12 188 | 189 | #define IORING_REGISTER_EVENTFD 4 190 | #define IORING_REGISTER_EVENTFD_ASYNC 7 191 | #define IORING_REGISTER_PROBE 8 192 | 193 | #define IO_URING_OP_SUPPORTED 1 194 | 195 | struct io_uring_probe_op { 196 | __u8 op; 197 | __u8 resv; 198 | __u16 flags; 199 | __u32 resv2; 200 | }; 201 | 202 | struct io_uring_probe 203 | { 204 | __u8 last_op; 205 | __u8 ops_len; 206 | __u16 resv; 207 | __u32 resv2[3]; 208 | struct io_uring_probe_op ops[0]; 209 | }; 210 | 211 | /* relative or absolute, reference clock is CLOCK_MONOTONIC */ 212 | struct iouring_kernel_timespec 213 | { 214 | int64_t tv_sec; 215 | long long tv_nsec; 216 | }; 217 | 218 | #define IORING_TIMEOUT_ABS 0x00000001 219 | 220 | #define IORING_ENTER_GETEVENTS 0x01 221 | 222 | #define IORING_OFF_SQ_RING 0x00000000ULL 223 | #define IORING_OFF_SQES 0x10000000ULL 224 | 225 | #define IORING_FEAT_SINGLE_MMAP 0x00000001 226 | #define IORING_FEAT_NODROP 0x00000002 227 | #define IORING_FEAT_SUBMIT_STABLE 0x00000004 228 | 229 | inline_size 230 | int 231 | evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) 232 | { 233 | return ev_syscall2 (SYS_io_uring_setup, entries, params); 234 | } 235 | 236 | inline_size 237 | int 238 | evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz) 239 | { 240 | return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz); 241 | } 242 | 243 | inline_size 244 | int 245 | evsys_io_uring_register (unsigned int fd, unsigned int opcode, void *arg, unsigned int nr_args) 246 | { 247 | return ev_syscall4 (SYS_io_uring_register, fd, opcode, arg, nr_args); 248 | } 249 | 250 | /*****************************************************************************/ 251 | /* actual backend implementation */ 252 | 253 | /* we hope that volatile will make the compiler access this variables only once */ 254 | #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_ring + iouring_sq_ ## name) 255 | #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_ring + iouring_cq_ ## name) 256 | 257 | /* the index array */ 258 | #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_ring + iouring_sq_array)) 259 | 260 | /* the submit/completion queue entries */ 261 | #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) 262 | #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_ring + iouring_cq_cqes)) 263 | 264 | inline_speed 265 | int 266 | iouring_enter (EV_P_ ev_tstamp timeout) 267 | { 268 | int res; 269 | 270 | EV_RELEASE_CB; 271 | 272 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, 273 | timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); 274 | 275 | assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit))); 276 | 277 | iouring_to_submit = 0; 278 | 279 | EV_ACQUIRE_CB; 280 | 281 | return res; 282 | } 283 | 284 | /* TODO: can we move things around so we don't need this forward-reference? */ 285 | static void 286 | iouring_poll (EV_P_ ev_tstamp timeout); 287 | 288 | static 289 | struct io_uring_sqe * 290 | iouring_sqe_get (EV_P) 291 | { 292 | unsigned tail; 293 | 294 | for (;;) 295 | { 296 | tail = EV_SQ_VAR (tail); 297 | 298 | if (ecb_expect_true (tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))) 299 | break; /* whats the problem, we have free sqes */ 300 | 301 | /* queue full, need to flush and possibly handle some events */ 302 | 303 | #if EV_FEATURE_CODE 304 | /* first we ask the kernel nicely, most often this frees up some sqes */ 305 | int res = iouring_enter (EV_A_ EV_TS_CONST (0.)); 306 | 307 | ECB_MEMORY_FENCE_ACQUIRE; /* better safe than sorry */ 308 | 309 | if (res >= 0) 310 | continue; /* yes, it worked, try again */ 311 | #endif 312 | 313 | /* some problem, possibly EBUSY - do the full poll and let it handle any issues */ 314 | 315 | iouring_poll (EV_A_ EV_TS_CONST (0.)); 316 | /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE for us */ 317 | } 318 | 319 | /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/ 320 | 321 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); 322 | } 323 | 324 | inline_size 325 | struct io_uring_sqe * 326 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) 327 | { 328 | unsigned idx = sqe - EV_SQES; 329 | 330 | EV_SQ_ARRAY [idx] = idx; 331 | ECB_MEMORY_FENCE_RELEASE; 332 | ++EV_SQ_VAR (tail); 333 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ 334 | ++iouring_to_submit; 335 | } 336 | 337 | /*****************************************************************************/ 338 | 339 | /* when the timerfd expires we simply note the fact, 340 | * as the purpose of the timerfd is to wake us up, nothing else. 341 | * the next iteration should re-set it. 342 | */ 343 | static void 344 | iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) 345 | { 346 | iouring_tfd_to = EV_TSTAMP_HUGE; 347 | } 348 | 349 | /* called for full and partial cleanup */ 350 | ecb_cold 351 | static int 352 | iouring_internal_destroy (EV_P) 353 | { 354 | close (iouring_tfd); 355 | close (iouring_fd); 356 | 357 | if (iouring_ring != MAP_FAILED) munmap (iouring_ring, iouring_ring_size); 358 | if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes, iouring_sqes_size); 359 | 360 | if (ev_is_active (&iouring_tfd_w)) 361 | { 362 | ev_ref (EV_A); 363 | ev_io_stop (EV_A_ &iouring_tfd_w); 364 | } 365 | } 366 | 367 | ecb_cold 368 | static int 369 | iouring_internal_init (EV_P) 370 | { 371 | struct io_uring_params params = { 0 }; 372 | uint32_t sq_size, cq_size; 373 | 374 | params.flags = IORING_SETUP_CLAMP; 375 | 376 | iouring_to_submit = 0; 377 | 378 | iouring_tfd = -1; 379 | iouring_ring = MAP_FAILED; 380 | iouring_sqes = MAP_FAILED; 381 | 382 | if (!have_monotonic) /* cannot really happen, but what if11 */ 383 | return -1; 384 | 385 | iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); 386 | 387 | if (iouring_fd < 0) 388 | return -1; 389 | 390 | if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEAT_SINGLE_MMAP | IORING_FEAT_SUBMIT_STABLE)) 391 | return -1; /* we require the above features */ 392 | 393 | /* TODO: remember somehow whether our queue size has been clamped */ 394 | 395 | sq_size = params.sq_off.array + params.sq_entries * sizeof (unsigned); 396 | cq_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe); 397 | 398 | iouring_ring_size = sq_size > cq_size ? sq_size : cq_size; 399 | iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe); 400 | 401 | iouring_ring = mmap (0, iouring_ring_size, PROT_READ | PROT_WRITE, 402 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING); 403 | iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE, 404 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES); 405 | 406 | if (iouring_ring == MAP_FAILED || iouring_sqes == MAP_FAILED) 407 | return -1; 408 | 409 | iouring_sq_head = params.sq_off.head; 410 | iouring_sq_tail = params.sq_off.tail; 411 | iouring_sq_ring_mask = params.sq_off.ring_mask; 412 | iouring_sq_ring_entries = params.sq_off.ring_entries; 413 | iouring_sq_flags = params.sq_off.flags; 414 | iouring_sq_dropped = params.sq_off.dropped; 415 | iouring_sq_array = params.sq_off.array; 416 | 417 | iouring_cq_head = params.cq_off.head; 418 | iouring_cq_tail = params.cq_off.tail; 419 | iouring_cq_ring_mask = params.cq_off.ring_mask; 420 | iouring_cq_ring_entries = params.cq_off.ring_entries; 421 | iouring_cq_overflow = params.cq_off.overflow; 422 | iouring_cq_cqes = params.cq_off.cqes; 423 | 424 | iouring_tfd_to = EV_TSTAMP_HUGE; 425 | 426 | iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC); 427 | 428 | if (iouring_tfd < 0) 429 | return -1; 430 | 431 | return 0; 432 | } 433 | 434 | ecb_cold 435 | static void 436 | iouring_fork (EV_P) 437 | { 438 | iouring_internal_destroy (EV_A); 439 | 440 | while (iouring_internal_init (EV_A) < 0) 441 | ev_syserr ("(libev) io_uring_setup"); 442 | 443 | fd_rearm_all (EV_A); 444 | 445 | ev_io_stop (EV_A_ &iouring_tfd_w); 446 | ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); 447 | ev_io_start (EV_A_ &iouring_tfd_w); 448 | } 449 | 450 | /*****************************************************************************/ 451 | 452 | static void 453 | iouring_modify (EV_P_ int fd, int oev, int nev) 454 | { 455 | if (oev) 456 | { 457 | /* we assume the sqe's are all "properly" initialised */ 458 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); 459 | sqe->opcode = IORING_OP_POLL_REMOVE; 460 | sqe->fd = fd; 461 | /* Jens Axboe notified me that user_data is not what is documented, but is 462 | * some kind of unique ID that has to match, otherwise the request cannot 463 | * be removed. Since we don't *really* have that, we pass in the old 464 | * generation counter - if that fails, too bad, it will hopefully be removed 465 | * at close time and then be ignored. */ 466 | sqe->addr = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); 467 | sqe->user_data = (uint64_t)-1; 468 | iouring_sqe_submit (EV_A_ sqe); 469 | 470 | /* increment generation counter to avoid handling old events */ 471 | ++anfds [fd].egen; 472 | } 473 | 474 | if (nev) 475 | { 476 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); 477 | sqe->opcode = IORING_OP_POLL_ADD; 478 | sqe->fd = fd; 479 | sqe->addr = 0; 480 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); 481 | sqe->poll_events = 482 | (nev & EV_READ ? POLLIN : 0) 483 | | (nev & EV_WRITE ? POLLOUT : 0); 484 | iouring_sqe_submit (EV_A_ sqe); 485 | } 486 | } 487 | 488 | inline_size 489 | void 490 | iouring_tfd_update (EV_P_ ev_tstamp timeout) 491 | { 492 | ev_tstamp tfd_to = mn_now + timeout; 493 | 494 | /* we assume there will be many iterations per timer change, so 495 | * we only re-set the timerfd when we have to because its expiry 496 | * is too late. 497 | */ 498 | if (ecb_expect_false (tfd_to < iouring_tfd_to)) 499 | { 500 | struct itimerspec its; 501 | 502 | iouring_tfd_to = tfd_to; 503 | EV_TS_SET (its.it_interval, 0.); 504 | EV_TS_SET (its.it_value, tfd_to); 505 | 506 | if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0) 507 | assert (("libev: iouring timerfd_settime failed", 0)); 508 | } 509 | } 510 | 511 | inline_size 512 | void 513 | iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe) 514 | { 515 | int fd = cqe->user_data & 0xffffffffU; 516 | uint32_t gen = cqe->user_data >> 32; 517 | int res = cqe->res; 518 | 519 | /* user_data -1 is a remove that we are not atm. interested in */ 520 | if (cqe->user_data == (uint64_t)-1) 521 | return; 522 | 523 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); 524 | 525 | /* documentation lies, of course. the result value is NOT like 526 | * normal syscalls, but like linux raw syscalls, i.e. negative 527 | * error numbers. fortunate, as otherwise there would be no way 528 | * to get error codes at all. still, why not document this? 529 | */ 530 | 531 | /* ignore event if generation doesn't match */ 532 | /* other than skipping removal events, */ 533 | /* this should actually be very rare */ 534 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) 535 | return; 536 | 537 | if (ecb_expect_false (res < 0)) 538 | { 539 | /*TODO: EINVAL handling (was something failed with this fd)*/ 540 | 541 | if (res == -EBADF) 542 | { 543 | assert (("libev: event loop rejected bad fd", res != -EBADF)); 544 | fd_kill (EV_A_ fd); 545 | } 546 | else 547 | { 548 | errno = -res; 549 | ev_syserr ("(libev) IORING_OP_POLL_ADD"); 550 | } 551 | 552 | return; 553 | } 554 | 555 | /* feed events, we do not expect or handle POLLNVAL */ 556 | fd_event ( 557 | EV_A_ 558 | fd, 559 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 560 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 561 | ); 562 | 563 | /* io_uring is oneshot, so we need to re-arm the fd next iteration */ 564 | /* this also means we usually have to do at least one syscall per iteration */ 565 | anfds [fd].events = 0; 566 | fd_change (EV_A_ fd, EV_ANFD_REIFY); 567 | } 568 | 569 | /* called when the event queue overflows */ 570 | ecb_cold 571 | static void 572 | iouring_overflow (EV_P) 573 | { 574 | /* we have two options, resize the queue (by tearing down 575 | * everything and recreating it, or living with it 576 | * and polling. 577 | * we implement this by resizing the queue, and, if that fails, 578 | * we just recreate the state on every failure, which 579 | * kind of is a very inefficient poll. 580 | * one danger is, due to the bios toward lower fds, 581 | * we will only really get events for those, so 582 | * maybe we need a poll() fallback, after all. 583 | */ 584 | /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */ 585 | 586 | fd_rearm_all (EV_A); 587 | 588 | /* we double the size until we hit the hard-to-probe maximum */ 589 | if (!iouring_max_entries) 590 | { 591 | iouring_entries <<= 1; 592 | iouring_fork (EV_A); 593 | } 594 | else 595 | { 596 | /* we hit the kernel limit, we should fall back to something else. 597 | * we can either poll() a few times and hope for the best, 598 | * poll always, or switch to epoll. 599 | * TODO: is this necessary with newer kernels? 600 | */ 601 | 602 | iouring_internal_destroy (EV_A); 603 | 604 | /* this should make it so that on return, we don't call any uring functions */ 605 | iouring_to_submit = 0; 606 | 607 | for (;;) 608 | { 609 | backend = epoll_init (EV_A_ 0); 610 | 611 | if (backend) 612 | break; 613 | 614 | ev_syserr ("(libev) iouring switch to epoll"); 615 | } 616 | } 617 | } 618 | 619 | /* handle any events in the completion queue, return true if there were any */ 620 | static int 621 | iouring_handle_cq (EV_P) 622 | { 623 | unsigned head, tail, mask; 624 | 625 | head = EV_CQ_VAR (head); 626 | ECB_MEMORY_FENCE_ACQUIRE; 627 | tail = EV_CQ_VAR (tail); 628 | 629 | if (head == tail) 630 | return 0; 631 | 632 | /* it can only overflow if we have events, yes, yes? */ 633 | if (ecb_expect_false (EV_CQ_VAR (overflow))) 634 | { 635 | iouring_overflow (EV_A); 636 | return 1; 637 | } 638 | 639 | mask = EV_CQ_VAR (ring_mask); 640 | 641 | do 642 | iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]); 643 | while (head != tail); 644 | 645 | EV_CQ_VAR (head) = head; 646 | ECB_MEMORY_FENCE_RELEASE; 647 | 648 | return 1; 649 | } 650 | 651 | static void 652 | iouring_poll (EV_P_ ev_tstamp timeout) 653 | { 654 | /* if we have events, no need for extra syscalls, but we might have to queue events */ 655 | /* we also clar the timeout if there are outstanding fdchanges */ 656 | /* the latter should only happen if both the sq and cq are full, most likely */ 657 | /* because we have a lot of event sources that immediately complete */ 658 | /* TODO: fdchacngecnt is always 0 because fd_reify does not have two buffers yet */ 659 | if (iouring_handle_cq (EV_A) || fdchangecnt) 660 | timeout = EV_TS_CONST (0.); 661 | else 662 | /* no events, so maybe wait for some */ 663 | iouring_tfd_update (EV_A_ timeout); 664 | 665 | /* only enter the kernel if we have something to submit, or we need to wait */ 666 | if (timeout || iouring_to_submit) 667 | { 668 | int res = iouring_enter (EV_A_ timeout); 669 | 670 | if (ecb_expect_false (res < 0)) 671 | if (errno == EINTR) 672 | /* ignore */; 673 | else if (errno == EBUSY) 674 | /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */; 675 | else 676 | ev_syserr ("(libev) iouring setup"); 677 | else 678 | iouring_handle_cq (EV_A); 679 | } 680 | } 681 | 682 | inline_size 683 | int 684 | iouring_init (EV_P_ int flags) 685 | { 686 | iouring_entries = IOURING_INIT_ENTRIES; 687 | iouring_max_entries = 0; 688 | 689 | if (iouring_internal_init (EV_A) < 0) 690 | { 691 | iouring_internal_destroy (EV_A); 692 | return 0; 693 | } 694 | 695 | ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); 696 | ev_set_priority (&iouring_tfd_w, EV_MINPRI); 697 | ev_io_start (EV_A_ &iouring_tfd_w); 698 | ev_unref (EV_A); /* watcher should not keep loop alive */ 699 | 700 | backend_modify = iouring_modify; 701 | backend_poll = iouring_poll; 702 | 703 | return EVBACKEND_IOURING; 704 | } 705 | 706 | inline_size 707 | void 708 | iouring_destroy (EV_P) 709 | { 710 | iouring_internal_destroy (EV_A); 711 | } 712 | 713 | -------------------------------------------------------------------------------- /src/ev_kqueue.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev kqueue backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2016,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | 46 | inline_speed 47 | void 48 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) 49 | { 50 | ++kqueue_changecnt; 51 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, array_needsize_noinit); 52 | 53 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); 54 | } 55 | 56 | /* OS X at least needs this */ 57 | #ifndef EV_ENABLE 58 | # define EV_ENABLE 0 59 | #endif 60 | #ifndef NOTE_EOF 61 | # define NOTE_EOF 0 62 | #endif 63 | 64 | static void 65 | kqueue_modify (EV_P_ int fd, int oev, int nev) 66 | { 67 | if (oev != nev) 68 | { 69 | if (oev & EV_READ) 70 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); 71 | 72 | if (oev & EV_WRITE) 73 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); 74 | } 75 | 76 | /* to detect close/reopen reliably, we have to re-add */ 77 | /* event requests even when oev == nev */ 78 | 79 | if (nev & EV_READ) 80 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); 81 | 82 | if (nev & EV_WRITE) 83 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); 84 | } 85 | 86 | static void 87 | kqueue_poll (EV_P_ ev_tstamp timeout) 88 | { 89 | int res, i; 90 | struct timespec ts; 91 | 92 | /* need to resize so there is enough space for errors */ 93 | if (kqueue_changecnt > kqueue_eventmax) 94 | { 95 | ev_free (kqueue_events); 96 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); 97 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); 98 | } 99 | 100 | EV_RELEASE_CB; 101 | EV_TS_SET (ts, timeout); 102 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); 103 | EV_ACQUIRE_CB; 104 | kqueue_changecnt = 0; 105 | 106 | if (ecb_expect_false (res < 0)) 107 | { 108 | if (errno != EINTR) 109 | ev_syserr ("(libev) kqueue kevent"); 110 | 111 | return; 112 | } 113 | 114 | for (i = 0; i < res; ++i) 115 | { 116 | int fd = kqueue_events [i].ident; 117 | 118 | if (ecb_expect_false (kqueue_events [i].flags & EV_ERROR)) 119 | { 120 | int err = kqueue_events [i].data; 121 | 122 | /* we are only interested in errors for fds that we are interested in :) */ 123 | if (anfds [fd].events) 124 | { 125 | if (err == ENOENT) /* resubmit changes on ENOENT */ 126 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); 127 | else if (err == EBADF) /* on EBADF, we re-check the fd */ 128 | { 129 | if (fd_valid (fd)) 130 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); 131 | else 132 | { 133 | assert (("libev: kqueue found invalid fd", 0)); 134 | fd_kill (EV_A_ fd); 135 | } 136 | } 137 | else /* on all other errors, we error out on the fd */ 138 | { 139 | assert (("libev: kqueue found invalid fd", 0)); 140 | fd_kill (EV_A_ fd); 141 | } 142 | } 143 | } 144 | else 145 | fd_event ( 146 | EV_A_ 147 | fd, 148 | kqueue_events [i].filter == EVFILT_READ ? EV_READ 149 | : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE 150 | : 0 151 | ); 152 | } 153 | 154 | if (ecb_expect_false (res == kqueue_eventmax)) 155 | { 156 | ev_free (kqueue_events); 157 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); 158 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); 159 | } 160 | } 161 | 162 | inline_size 163 | int 164 | kqueue_init (EV_P_ int flags) 165 | { 166 | /* initialize the kernel queue */ 167 | kqueue_fd_pid = getpid (); 168 | if ((backend_fd = kqueue ()) < 0) 169 | return 0; 170 | 171 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ 172 | 173 | backend_mintime = EV_TS_CONST (1e-9); /* apparently, they did the right thing in freebsd */ 174 | backend_modify = kqueue_modify; 175 | backend_poll = kqueue_poll; 176 | 177 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ 178 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); 179 | 180 | kqueue_changes = 0; 181 | kqueue_changemax = 0; 182 | kqueue_changecnt = 0; 183 | 184 | return EVBACKEND_KQUEUE; 185 | } 186 | 187 | inline_size 188 | void 189 | kqueue_destroy (EV_P) 190 | { 191 | ev_free (kqueue_events); 192 | ev_free (kqueue_changes); 193 | } 194 | 195 | inline_size 196 | void 197 | kqueue_fork (EV_P) 198 | { 199 | /* some BSD kernels don't just destroy the kqueue itself, 200 | * but also close the fd, which isn't documented, and 201 | * impossible to support properly. 202 | * we remember the pid of the kqueue call and only close 203 | * the fd if the pid is still the same. 204 | * this leaks fds on sane kernels, but BSD interfaces are 205 | * notoriously buggy and rarely get fixed. 206 | */ 207 | pid_t newpid = getpid (); 208 | 209 | if (newpid == kqueue_fd_pid) 210 | close (backend_fd); 211 | 212 | kqueue_fd_pid = newpid; 213 | while ((backend_fd = kqueue ()) < 0) 214 | ev_syserr ("(libev) kqueue"); 215 | 216 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 217 | 218 | /* re-register interest in fds */ 219 | fd_rearm_all (EV_A); 220 | } 221 | 222 | /* sys/event.h defines EV_ERROR */ 223 | #undef EV_ERROR 224 | 225 | -------------------------------------------------------------------------------- /src/ev_linuxaio.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev linux aio fd activity backend 3 | * 4 | * Copyright (c) 2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* 41 | * general notes about linux aio: 42 | * 43 | * a) at first, the linux aio IOCB_CMD_POLL functionality introduced in 44 | * 4.18 looks too good to be true: both watchers and events can be 45 | * batched, and events can even be handled in userspace using 46 | * a ring buffer shared with the kernel. watchers can be canceled 47 | * regardless of whether the fd has been closed. no problems with fork. 48 | * ok, the ring buffer is 200% undocumented (there isn't even a 49 | * header file), but otherwise, it's pure bliss! 50 | * b) ok, watchers are one-shot, so you have to re-arm active ones 51 | * on every iteration. so much for syscall-less event handling, 52 | * but at least these re-arms can be batched, no big deal, right? 53 | * c) well, linux as usual: the documentation lies to you: io_submit 54 | * sometimes returns EINVAL because the kernel doesn't feel like 55 | * handling your poll mask - ttys can be polled for POLLOUT, 56 | * POLLOUT|POLLIN, but polling for POLLIN fails. just great, 57 | * so we have to fall back to something else (hello, epoll), 58 | * but at least the fallback can be slow, because these are 59 | * exceptional cases, right? 60 | * d) hmm, you have to tell the kernel the maximum number of watchers 61 | * you want to queue when initialising the aio context. but of 62 | * course the real limit is magically calculated in the kernel, and 63 | * is often higher then we asked for. so we just have to destroy 64 | * the aio context and re-create it a bit larger if we hit the limit. 65 | * (starts to remind you of epoll? well, it's a bit more deterministic 66 | * and less gambling, but still ugly as hell). 67 | * e) that's when you find out you can also hit an arbitrary system-wide 68 | * limit. or the kernel simply doesn't want to handle your watchers. 69 | * what the fuck do we do then? you guessed it, in the middle 70 | * of event handling we have to switch to 100% epoll polling. and 71 | * that better is as fast as normal epoll polling, so you practically 72 | * have to use the normal epoll backend with all its quirks. 73 | * f) end result of this train wreck: it inherits all the disadvantages 74 | * from epoll, while adding a number on its own. why even bother to use 75 | * it? because if conditions are right and your fds are supported and you 76 | * don't hit a limit, this backend is actually faster, doesn't gamble with 77 | * your fds, batches watchers and events and doesn't require costly state 78 | * recreates. well, until it does. 79 | * g) all of this makes this backend use almost twice as much code as epoll. 80 | * which in turn uses twice as much code as poll. and that#s not counting 81 | * the fact that this backend also depends on the epoll backend, making 82 | * it three times as much code as poll, or kqueue. 83 | * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now 84 | * it's clear that whatever linux comes up with is far, far, far worse. 85 | */ 86 | 87 | #include /* actually linux/time.h, but we must assume they are compatible */ 88 | #include 89 | #include 90 | 91 | /*****************************************************************************/ 92 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ 93 | 94 | #include /* no glibc wrappers */ 95 | 96 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ 97 | #define IOCB_CMD_POLL 5 98 | 99 | /* taken from linux/fs/aio.c. yup, that's a .c file. 100 | * not only is this totally undocumented, not even the source code 101 | * can tell you what the future semantics of compat_features and 102 | * incompat_features are, or what header_length actually is for. 103 | */ 104 | #define AIO_RING_MAGIC 0xa10a10a1 105 | #define EV_AIO_RING_INCOMPAT_FEATURES 0 106 | struct aio_ring 107 | { 108 | unsigned id; /* kernel internal index number */ 109 | unsigned nr; /* number of io_events */ 110 | unsigned head; /* Written to by userland or by kernel. */ 111 | unsigned tail; 112 | 113 | unsigned magic; 114 | unsigned compat_features; 115 | unsigned incompat_features; 116 | unsigned header_length; /* size of aio_ring */ 117 | 118 | struct io_event io_events[0]; 119 | }; 120 | 121 | inline_size 122 | int 123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) 124 | { 125 | return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); 126 | } 127 | 128 | inline_size 129 | int 130 | evsys_io_destroy (aio_context_t ctx_id) 131 | { 132 | return ev_syscall1 (SYS_io_destroy, ctx_id); 133 | } 134 | 135 | inline_size 136 | int 137 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) 138 | { 139 | return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); 140 | } 141 | 142 | inline_size 143 | int 144 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) 145 | { 146 | return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); 147 | } 148 | 149 | inline_size 150 | int 151 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) 152 | { 153 | return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); 154 | } 155 | 156 | /*****************************************************************************/ 157 | /* actual backed implementation */ 158 | 159 | ecb_cold 160 | static int 161 | linuxaio_nr_events (EV_P) 162 | { 163 | /* we start with 16 iocbs and incraese from there 164 | * that's tiny, but the kernel has a rather low system-wide 165 | * limit that can be reached quickly, so let's be parsimonious 166 | * with this resource. 167 | * Rest assured, the kernel generously rounds up small and big numbers 168 | * in different ways (but doesn't seem to charge you for it). 169 | * The 15 here is because the kernel usually has a power of two as aio-max-nr, 170 | * and this helps to take advantage of that limit. 171 | */ 172 | 173 | /* we try to fill 4kB pages exactly. 174 | * the ring buffer header is 32 bytes, every io event is 32 bytes. 175 | * the kernel takes the io requests number, doubles it, adds 2 176 | * and adds the ring buffer. 177 | * the way we use this is by starting low, and then roughly doubling the 178 | * size each time we hit a limit. 179 | */ 180 | 181 | int requests = 15 << linuxaio_iteration; 182 | int one_page = (4096 183 | / sizeof (struct io_event) ) / 2; /* how many fit into one page */ 184 | int first_page = ((4096 - sizeof (struct aio_ring)) 185 | / sizeof (struct io_event) - 2) / 2; /* how many fit into the first page */ 186 | 187 | /* if everything fits into one page, use count exactly */ 188 | if (requests > first_page) 189 | /* otherwise, round down to full pages and add the first page */ 190 | requests = requests / one_page * one_page + first_page; 191 | 192 | return requests; 193 | } 194 | 195 | /* we use out own wrapper structure in case we ever want to do something "clever" */ 196 | typedef struct aniocb 197 | { 198 | struct iocb io; 199 | /*int inuse;*/ 200 | } *ANIOCBP; 201 | 202 | inline_size 203 | void 204 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) 205 | { 206 | while (count--) 207 | { 208 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */ 209 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); 210 | 211 | /* full zero initialise is probably not required at the moment, but 212 | * this is not well documented, so we better do it. 213 | */ 214 | memset (iocb, 0, sizeof (*iocb)); 215 | 216 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; 217 | iocb->io.aio_fildes = offset; 218 | 219 | base [offset++] = iocb; 220 | } 221 | } 222 | 223 | ecb_cold 224 | static void 225 | linuxaio_free_iocbp (EV_P) 226 | { 227 | while (linuxaio_iocbpmax--) 228 | ev_free (linuxaio_iocbps [linuxaio_iocbpmax]); 229 | 230 | linuxaio_iocbpmax = 0; /* next resize will completely reallocate the array, at some overhead */ 231 | } 232 | 233 | static void 234 | linuxaio_modify (EV_P_ int fd, int oev, int nev) 235 | { 236 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); 237 | ANIOCBP iocb = linuxaio_iocbps [fd]; 238 | ANFD *anfd = &anfds [fd]; 239 | 240 | if (ecb_expect_false (iocb->io.aio_reqprio < 0)) 241 | { 242 | /* we handed this fd over to epoll, so undo this first */ 243 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ 244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); 245 | anfd->emask = 0; 246 | iocb->io.aio_reqprio = 0; 247 | } 248 | else if (ecb_expect_false (iocb->io.aio_buf)) 249 | { 250 | /* iocb active, so cancel it first before resubmit */ 251 | /* this assumes we only ever get one call per fd per loop iteration */ 252 | for (;;) 253 | { 254 | /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ 255 | if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) 256 | break; 257 | 258 | if (ecb_expect_true (errno == EINPROGRESS)) 259 | break; 260 | 261 | /* the EINPROGRESS test is for nicer error message. clumsy. */ 262 | if (errno != EINTR) 263 | { 264 | assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); 265 | break; 266 | } 267 | } 268 | 269 | /* increment generation counter to avoid handling old events */ 270 | ++anfd->egen; 271 | } 272 | 273 | iocb->io.aio_buf = (nev & EV_READ ? POLLIN : 0) 274 | | (nev & EV_WRITE ? POLLOUT : 0); 275 | 276 | if (nev) 277 | { 278 | iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32); 279 | 280 | /* queue iocb up for io_submit */ 281 | /* this assumes we only ever get one call per fd per loop iteration */ 282 | ++linuxaio_submitcnt; 283 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); 284 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; 285 | } 286 | } 287 | 288 | static void 289 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) 290 | { 291 | epoll_poll (EV_A_ 0); 292 | } 293 | 294 | inline_speed 295 | void 296 | linuxaio_fd_rearm (EV_P_ int fd) 297 | { 298 | anfds [fd].events = 0; 299 | linuxaio_iocbps [fd]->io.aio_buf = 0; 300 | fd_change (EV_A_ fd, EV_ANFD_REIFY); 301 | } 302 | 303 | static void 304 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) 305 | { 306 | while (nr) 307 | { 308 | int fd = ev->data & 0xffffffff; 309 | uint32_t gen = ev->data >> 32; 310 | int res = ev->res; 311 | 312 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); 313 | 314 | /* only accept events if generation counter matches */ 315 | if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen)) 316 | { 317 | /* feed events, we do not expect or handle POLLNVAL */ 318 | fd_event ( 319 | EV_A_ 320 | fd, 321 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 322 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 323 | ); 324 | 325 | /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ 326 | linuxaio_fd_rearm (EV_A_ fd); 327 | } 328 | 329 | --nr; 330 | ++ev; 331 | } 332 | } 333 | 334 | /* get any events from ring buffer, return true if any were handled */ 335 | static int 336 | linuxaio_get_events_from_ring (EV_P) 337 | { 338 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; 339 | unsigned head, tail; 340 | 341 | /* the kernel reads and writes both of these variables, */ 342 | /* as a C extension, we assume that volatile use here */ 343 | /* both makes reads atomic and once-only */ 344 | head = *(volatile unsigned *)&ring->head; 345 | ECB_MEMORY_FENCE_ACQUIRE; 346 | tail = *(volatile unsigned *)&ring->tail; 347 | 348 | if (head == tail) 349 | return 0; 350 | 351 | /* parse all available events, but only once, to avoid starvation */ 352 | if (ecb_expect_true (tail > head)) /* normal case around */ 353 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); 354 | else /* wrapped around */ 355 | { 356 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); 357 | linuxaio_parse_events (EV_A_ ring->io_events, tail); 358 | } 359 | 360 | ECB_MEMORY_FENCE_RELEASE; 361 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ 362 | *(volatile unsigned *)&ring->head = tail; 363 | 364 | return 1; 365 | } 366 | 367 | inline_size 368 | int 369 | linuxaio_ringbuf_valid (EV_P) 370 | { 371 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; 372 | 373 | return ecb_expect_true (ring->magic == AIO_RING_MAGIC) 374 | && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES 375 | && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ 376 | } 377 | 378 | /* read at least one event from kernel, or timeout */ 379 | inline_size 380 | void 381 | linuxaio_get_events (EV_P_ ev_tstamp timeout) 382 | { 383 | struct timespec ts; 384 | struct io_event ioev[8]; /* 256 octet stack space */ 385 | int want = 1; /* how many events to request */ 386 | int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); 387 | 388 | if (ecb_expect_true (ringbuf_valid)) 389 | { 390 | /* if the ring buffer has any events, we don't wait or call the kernel at all */ 391 | if (linuxaio_get_events_from_ring (EV_A)) 392 | return; 393 | 394 | /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ 395 | if (!timeout) 396 | return; 397 | } 398 | else 399 | /* no ringbuffer, request slightly larger batch */ 400 | want = sizeof (ioev) / sizeof (ioev [0]); 401 | 402 | /* no events, so wait for some 403 | * for fairness reasons, we do this in a loop, to fetch all events 404 | */ 405 | for (;;) 406 | { 407 | int res; 408 | 409 | EV_RELEASE_CB; 410 | 411 | EV_TS_SET (ts, timeout); 412 | res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); 413 | 414 | EV_ACQUIRE_CB; 415 | 416 | if (res < 0) 417 | if (errno == EINTR) 418 | /* ignored, retry */; 419 | else 420 | ev_syserr ("(libev) linuxaio io_getevents"); 421 | else if (res) 422 | { 423 | /* at least one event available, handle them */ 424 | linuxaio_parse_events (EV_A_ ioev, res); 425 | 426 | if (ecb_expect_true (ringbuf_valid)) 427 | { 428 | /* if we have a ring buffer, handle any remaining events in it */ 429 | linuxaio_get_events_from_ring (EV_A); 430 | 431 | /* at this point, we should have handled all outstanding events */ 432 | break; 433 | } 434 | else if (res < want) 435 | /* otherwise, if there were fewere events than we wanted, we assume there are no more */ 436 | break; 437 | } 438 | else 439 | break; /* no events from the kernel, we are done */ 440 | 441 | timeout = EV_TS_CONST (0.); /* only wait in the first iteration */ 442 | } 443 | } 444 | 445 | inline_size 446 | int 447 | linuxaio_io_setup (EV_P) 448 | { 449 | linuxaio_ctx = 0; 450 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); 451 | } 452 | 453 | static void 454 | linuxaio_poll (EV_P_ ev_tstamp timeout) 455 | { 456 | int submitted; 457 | 458 | /* first phase: submit new iocbs */ 459 | 460 | /* io_submit might return less than the requested number of iocbs */ 461 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ 462 | /* which allows us to pinpoint the erroneous iocb */ 463 | for (submitted = 0; submitted < linuxaio_submitcnt; ) 464 | { 465 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); 466 | 467 | if (ecb_expect_false (res < 0)) 468 | if (errno == EINVAL) 469 | { 470 | /* This happens for unsupported fds, officially, but in my testing, 471 | * also randomly happens for supported fds. We fall back to good old 472 | * poll() here, under the assumption that this is a very rare case. 473 | * See https://lore.kernel.org/patchwork/patch/1047453/ to see 474 | * discussion about such a case (ttys) where polling for POLLIN 475 | * fails but POLLIN|POLLOUT works. 476 | */ 477 | struct iocb *iocb = linuxaio_submits [submitted]; 478 | epoll_modify (EV_A_ iocb->aio_fildes, 0, anfds [iocb->aio_fildes].events); 479 | iocb->aio_reqprio = -1; /* mark iocb as epoll */ 480 | 481 | res = 1; /* skip this iocb - another iocb, another chance */ 482 | } 483 | else if (errno == EAGAIN) 484 | { 485 | /* This happens when the ring buffer is full, or some other shit we 486 | * don't know and isn't documented. Most likely because we have too 487 | * many requests and linux aio can't be assed to handle them. 488 | * In this case, we try to allocate a larger ring buffer, freeing 489 | * ours first. This might fail, in which case we have to fall back to 100% 490 | * epoll. 491 | * God, how I hate linux not getting its act together. Ever. 492 | */ 493 | evsys_io_destroy (linuxaio_ctx); 494 | linuxaio_submitcnt = 0; 495 | 496 | /* rearm all fds with active iocbs */ 497 | { 498 | int fd; 499 | for (fd = 0; fd < linuxaio_iocbpmax; ++fd) 500 | if (linuxaio_iocbps [fd]->io.aio_buf) 501 | linuxaio_fd_rearm (EV_A_ fd); 502 | } 503 | 504 | ++linuxaio_iteration; 505 | if (linuxaio_io_setup (EV_A) < 0) 506 | { 507 | /* TODO: rearm all and recreate epoll backend from scratch */ 508 | /* TODO: might be more prudent? */ 509 | 510 | /* to bad, we can't get a new aio context, go 100% epoll */ 511 | linuxaio_free_iocbp (EV_A); 512 | ev_io_stop (EV_A_ &linuxaio_epoll_w); 513 | ev_ref (EV_A); 514 | linuxaio_ctx = 0; 515 | 516 | backend = EVBACKEND_EPOLL; 517 | backend_modify = epoll_modify; 518 | backend_poll = epoll_poll; 519 | } 520 | 521 | timeout = EV_TS_CONST (0.); 522 | /* it's easiest to handle this mess in another iteration */ 523 | return; 524 | } 525 | else if (errno == EBADF) 526 | { 527 | assert (("libev: event loop rejected bad fd", errno != EBADF)); 528 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); 529 | 530 | res = 1; /* skip this iocb */ 531 | } 532 | else if (errno == EINTR) /* not seen in reality, not documented */ 533 | res = 0; /* silently ignore and retry */ 534 | else 535 | { 536 | ev_syserr ("(libev) linuxaio io_submit"); 537 | res = 0; 538 | } 539 | 540 | submitted += res; 541 | } 542 | 543 | linuxaio_submitcnt = 0; 544 | 545 | /* second phase: fetch and parse events */ 546 | 547 | linuxaio_get_events (EV_A_ timeout); 548 | } 549 | 550 | inline_size 551 | int 552 | linuxaio_init (EV_P_ int flags) 553 | { 554 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ 555 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ 556 | /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */ 557 | if (ev_linux_version () < 0x041300) 558 | return 0; 559 | 560 | if (!epoll_init (EV_A_ 0)) 561 | return 0; 562 | 563 | linuxaio_iteration = 0; 564 | 565 | if (linuxaio_io_setup (EV_A) < 0) 566 | { 567 | epoll_destroy (EV_A); 568 | return 0; 569 | } 570 | 571 | ev_io_init (&linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); 572 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); 573 | ev_io_start (EV_A_ &linuxaio_epoll_w); 574 | ev_unref (EV_A); /* watcher should not keep loop alive */ 575 | 576 | backend_modify = linuxaio_modify; 577 | backend_poll = linuxaio_poll; 578 | 579 | linuxaio_iocbpmax = 0; 580 | linuxaio_iocbps = 0; 581 | 582 | linuxaio_submits = 0; 583 | linuxaio_submitmax = 0; 584 | linuxaio_submitcnt = 0; 585 | 586 | return EVBACKEND_LINUXAIO; 587 | } 588 | 589 | inline_size 590 | void 591 | linuxaio_destroy (EV_P) 592 | { 593 | epoll_destroy (EV_A); 594 | linuxaio_free_iocbp (EV_A); 595 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ 596 | } 597 | 598 | ecb_cold 599 | static void 600 | linuxaio_fork (EV_P) 601 | { 602 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ 603 | linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */ 604 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ 605 | 606 | linuxaio_iteration = 0; /* we start over in the child */ 607 | 608 | while (linuxaio_io_setup (EV_A) < 0) 609 | ev_syserr ("(libev) linuxaio io_setup"); 610 | 611 | /* forking epoll should also effectively unregister all fds from the backend */ 612 | epoll_fork (EV_A); 613 | /* epoll_fork already did this. hopefully */ 614 | /*fd_rearm_all (EV_A);*/ 615 | 616 | ev_io_stop (EV_A_ &linuxaio_epoll_w); 617 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); 618 | ev_io_start (EV_A_ &linuxaio_epoll_w); 619 | } 620 | 621 | -------------------------------------------------------------------------------- /src/ev_poll.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev poll fd activity backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2016,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #include 41 | 42 | inline_size 43 | void 44 | array_needsize_pollidx (int *base, int offset, int count) 45 | { 46 | /* using memset (.., -1, ...) is tempting, we we try 47 | * to be ultraportable 48 | */ 49 | base += offset; 50 | while (count--) 51 | *base++ = -1; 52 | } 53 | 54 | static void 55 | poll_modify (EV_P_ int fd, int oev, int nev) 56 | { 57 | int idx; 58 | 59 | if (oev == nev) 60 | return; 61 | 62 | array_needsize (int, pollidxs, pollidxmax, fd + 1, array_needsize_pollidx); 63 | 64 | idx = pollidxs [fd]; 65 | 66 | if (idx < 0) /* need to allocate a new pollfd */ 67 | { 68 | pollidxs [fd] = idx = pollcnt++; 69 | array_needsize (struct pollfd, polls, pollmax, pollcnt, array_needsize_noinit); 70 | polls [idx].fd = fd; 71 | } 72 | 73 | assert (polls [idx].fd == fd); 74 | 75 | if (nev) 76 | polls [idx].events = 77 | (nev & EV_READ ? POLLIN : 0) 78 | | (nev & EV_WRITE ? POLLOUT : 0); 79 | else /* remove pollfd */ 80 | { 81 | pollidxs [fd] = -1; 82 | 83 | if (ecb_expect_true (idx < --pollcnt)) 84 | { 85 | polls [idx] = polls [pollcnt]; 86 | pollidxs [polls [idx].fd] = idx; 87 | } 88 | } 89 | } 90 | 91 | static void 92 | poll_poll (EV_P_ ev_tstamp timeout) 93 | { 94 | struct pollfd *p; 95 | int res; 96 | 97 | EV_RELEASE_CB; 98 | res = poll (polls, pollcnt, EV_TS_TO_MSEC (timeout)); 99 | EV_ACQUIRE_CB; 100 | 101 | if (ecb_expect_false (res < 0)) 102 | { 103 | if (errno == EBADF) 104 | fd_ebadf (EV_A); 105 | else if (errno == ENOMEM && !syserr_cb) 106 | fd_enomem (EV_A); 107 | else if (errno != EINTR) 108 | ev_syserr ("(libev) poll"); 109 | } 110 | else 111 | for (p = polls; res; ++p) 112 | { 113 | assert (("libev: poll returned illegal result, broken BSD kernel?", p < polls + pollcnt)); 114 | 115 | if (ecb_expect_false (p->revents)) /* this expect is debatable */ 116 | { 117 | --res; 118 | 119 | if (ecb_expect_false (p->revents & POLLNVAL)) 120 | { 121 | assert (("libev: poll found invalid fd in poll set", 0)); 122 | fd_kill (EV_A_ p->fd); 123 | } 124 | else 125 | fd_event ( 126 | EV_A_ 127 | p->fd, 128 | (p->revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 129 | | (p->revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 130 | ); 131 | } 132 | } 133 | } 134 | 135 | inline_size 136 | int 137 | poll_init (EV_P_ int flags) 138 | { 139 | backend_mintime = EV_TS_CONST (1e-3); 140 | backend_modify = poll_modify; 141 | backend_poll = poll_poll; 142 | 143 | pollidxs = 0; pollidxmax = 0; 144 | polls = 0; pollmax = 0; pollcnt = 0; 145 | 146 | return EVBACKEND_POLL; 147 | } 148 | 149 | inline_size 150 | void 151 | poll_destroy (EV_P) 152 | { 153 | ev_free (pollidxs); 154 | ev_free (polls); 155 | } 156 | 157 | -------------------------------------------------------------------------------- /src/ev_port.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev solaris event port backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* useful reading: 41 | * 42 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6268715 (random results) 43 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6455223 (just totally broken) 44 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6873782 (manpage ETIME) 45 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6874410 (implementation ETIME) 46 | * http://www.mail-archive.com/networking-discuss@opensolaris.org/msg11898.html ETIME vs. nget 47 | * http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/event_port.c (libc) 48 | * http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/fs/portfs/port.c#1325 (kernel) 49 | */ 50 | 51 | #include 52 | #include 53 | #include 54 | #include 55 | #include 56 | #include 57 | 58 | inline_speed 59 | void 60 | port_associate_and_check (EV_P_ int fd, int ev) 61 | { 62 | if (0 > 63 | port_associate ( 64 | backend_fd, PORT_SOURCE_FD, fd, 65 | (ev & EV_READ ? POLLIN : 0) 66 | | (ev & EV_WRITE ? POLLOUT : 0), 67 | 0 68 | ) 69 | ) 70 | { 71 | if (errno == EBADFD) 72 | { 73 | assert (("libev: port_associate found invalid fd", errno != EBADFD)); 74 | fd_kill (EV_A_ fd); 75 | } 76 | else 77 | ev_syserr ("(libev) port_associate"); 78 | } 79 | } 80 | 81 | static void 82 | port_modify (EV_P_ int fd, int oev, int nev) 83 | { 84 | /* we need to reassociate no matter what, as closes are 85 | * once more silently being discarded. 86 | */ 87 | if (!nev) 88 | { 89 | if (oev) 90 | port_dissociate (backend_fd, PORT_SOURCE_FD, fd); 91 | } 92 | else 93 | port_associate_and_check (EV_A_ fd, nev); 94 | } 95 | 96 | static void 97 | port_poll (EV_P_ ev_tstamp timeout) 98 | { 99 | int res, i; 100 | struct timespec ts; 101 | uint_t nget = 1; 102 | 103 | /* we initialise this to something we will skip in the loop, as */ 104 | /* port_getn can return with nget unchanged, but no indication */ 105 | /* whether it was the original value or has been updated :/ */ 106 | port_events [0].portev_source = 0; 107 | 108 | EV_RELEASE_CB; 109 | EV_TS_SET (ts, timeout); 110 | res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts); 111 | EV_ACQUIRE_CB; 112 | 113 | /* port_getn may or may not set nget on error */ 114 | /* so we rely on port_events [0].portev_source not being updated */ 115 | if (res == -1 && errno != ETIME && errno != EINTR) 116 | ev_syserr ("(libev) port_getn (see http://bugs.opensolaris.org/view_bug.do?bug_id=6268715, try LIBEV_FLAGS=3 env variable)"); 117 | 118 | for (i = 0; i < nget; ++i) 119 | { 120 | if (port_events [i].portev_source == PORT_SOURCE_FD) 121 | { 122 | int fd = port_events [i].portev_object; 123 | 124 | fd_event ( 125 | EV_A_ 126 | fd, 127 | (port_events [i].portev_events & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 128 | | (port_events [i].portev_events & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 129 | ); 130 | 131 | fd_change (EV_A_ fd, EV__IOFDSET); 132 | } 133 | } 134 | 135 | if (ecb_expect_false (nget == port_eventmax)) 136 | { 137 | ev_free (port_events); 138 | port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1); 139 | port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); 140 | } 141 | } 142 | 143 | inline_size 144 | int 145 | port_init (EV_P_ int flags) 146 | { 147 | /* Initialize the kernel queue */ 148 | if ((backend_fd = port_create ()) < 0) 149 | return 0; 150 | 151 | assert (("libev: PORT_SOURCE_FD must not be zero", PORT_SOURCE_FD)); 152 | 153 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ 154 | 155 | /* if my reading of the opensolaris kernel sources are correct, then 156 | * opensolaris does something very stupid: it checks if the time has already 157 | * elapsed and doesn't round up if that is the case, otherwise it DOES round 158 | * up. Since we can't know what the case is, we need to guess by using a 159 | * "large enough" timeout. Normally, 1e-9 would be correct. 160 | */ 161 | backend_mintime = EV_TS_CONST (1e-3); /* needed to compensate for port_getn returning early */ 162 | backend_modify = port_modify; 163 | backend_poll = port_poll; 164 | 165 | port_eventmax = 64; /* initial number of events receivable per poll */ 166 | port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); 167 | 168 | return EVBACKEND_PORT; 169 | } 170 | 171 | inline_size 172 | void 173 | port_destroy (EV_P) 174 | { 175 | ev_free (port_events); 176 | } 177 | 178 | inline_size 179 | void 180 | port_fork (EV_P) 181 | { 182 | close (backend_fd); 183 | 184 | while ((backend_fd = port_create ()) < 0) 185 | ev_syserr ("(libev) port"); 186 | 187 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 188 | 189 | /* re-register interest in fds */ 190 | fd_rearm_all (EV_A); 191 | } 192 | 193 | -------------------------------------------------------------------------------- /src/ev_select.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev select fd activity backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #ifndef _WIN32 41 | /* for unix systems */ 42 | # include 43 | # ifndef __hpux 44 | /* for REAL unix systems */ 45 | # include 46 | # endif 47 | #endif 48 | 49 | #ifndef EV_SELECT_USE_FD_SET 50 | # ifdef NFDBITS 51 | # define EV_SELECT_USE_FD_SET 0 52 | # else 53 | # define EV_SELECT_USE_FD_SET 1 54 | # endif 55 | #endif 56 | 57 | #if EV_SELECT_IS_WINSOCKET 58 | # undef EV_SELECT_USE_FD_SET 59 | # define EV_SELECT_USE_FD_SET 1 60 | # undef NFDBITS 61 | # define NFDBITS 0 62 | #endif 63 | 64 | #if !EV_SELECT_USE_FD_SET 65 | # define NFDBYTES (NFDBITS / 8) 66 | #endif 67 | 68 | #include 69 | 70 | static void 71 | select_modify (EV_P_ int fd, int oev, int nev) 72 | { 73 | if (oev == nev) 74 | return; 75 | 76 | { 77 | #if EV_SELECT_USE_FD_SET 78 | 79 | #if EV_SELECT_IS_WINSOCKET 80 | SOCKET handle = anfds [fd].handle; 81 | #else 82 | int handle = fd; 83 | #endif 84 | 85 | assert (("libev: fd >= FD_SETSIZE passed to fd_set-based select backend", fd < FD_SETSIZE)); 86 | 87 | /* FD_SET is broken on windows (it adds the fd to a set twice or more, 88 | * which eventually leads to overflows). Need to call it only on changes. 89 | */ 90 | #if EV_SELECT_IS_WINSOCKET 91 | if ((oev ^ nev) & EV_READ) 92 | #endif 93 | if (nev & EV_READ) 94 | FD_SET (handle, (fd_set *)vec_ri); 95 | else 96 | FD_CLR (handle, (fd_set *)vec_ri); 97 | 98 | #if EV_SELECT_IS_WINSOCKET 99 | if ((oev ^ nev) & EV_WRITE) 100 | #endif 101 | if (nev & EV_WRITE) 102 | FD_SET (handle, (fd_set *)vec_wi); 103 | else 104 | FD_CLR (handle, (fd_set *)vec_wi); 105 | 106 | #else 107 | 108 | int word = fd / NFDBITS; 109 | fd_mask mask = 1UL << (fd % NFDBITS); 110 | 111 | if (ecb_expect_false (vec_max <= word)) 112 | { 113 | int new_max = word + 1; 114 | 115 | vec_ri = ev_realloc (vec_ri, new_max * NFDBYTES); 116 | vec_ro = ev_realloc (vec_ro, new_max * NFDBYTES); /* could free/malloc */ 117 | vec_wi = ev_realloc (vec_wi, new_max * NFDBYTES); 118 | vec_wo = ev_realloc (vec_wo, new_max * NFDBYTES); /* could free/malloc */ 119 | #ifdef _WIN32 120 | vec_eo = ev_realloc (vec_eo, new_max * NFDBYTES); /* could free/malloc */ 121 | #endif 122 | 123 | for (; vec_max < new_max; ++vec_max) 124 | ((fd_mask *)vec_ri) [vec_max] = 125 | ((fd_mask *)vec_wi) [vec_max] = 0; 126 | } 127 | 128 | ((fd_mask *)vec_ri) [word] |= mask; 129 | if (!(nev & EV_READ)) 130 | ((fd_mask *)vec_ri) [word] &= ~mask; 131 | 132 | ((fd_mask *)vec_wi) [word] |= mask; 133 | if (!(nev & EV_WRITE)) 134 | ((fd_mask *)vec_wi) [word] &= ~mask; 135 | #endif 136 | } 137 | } 138 | 139 | static void 140 | select_poll (EV_P_ ev_tstamp timeout) 141 | { 142 | struct timeval tv; 143 | int res; 144 | int fd_setsize; 145 | 146 | EV_RELEASE_CB; 147 | EV_TV_SET (tv, timeout); 148 | 149 | #if EV_SELECT_USE_FD_SET 150 | fd_setsize = sizeof (fd_set); 151 | #else 152 | fd_setsize = vec_max * NFDBYTES; 153 | #endif 154 | 155 | memcpy (vec_ro, vec_ri, fd_setsize); 156 | memcpy (vec_wo, vec_wi, fd_setsize); 157 | 158 | #ifdef _WIN32 159 | /* pass in the write set as except set. 160 | * the idea behind this is to work around a windows bug that causes 161 | * errors to be reported as an exception and not by setting 162 | * the writable bit. this is so uncontrollably lame. 163 | */ 164 | memcpy (vec_eo, vec_wi, fd_setsize); 165 | res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, (fd_set *)vec_eo, &tv); 166 | #elif EV_SELECT_USE_FD_SET 167 | fd_setsize = anfdmax < FD_SETSIZE ? anfdmax : FD_SETSIZE; 168 | res = select (fd_setsize, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); 169 | #else 170 | res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); 171 | #endif 172 | EV_ACQUIRE_CB; 173 | 174 | if (ecb_expect_false (res < 0)) 175 | { 176 | #if EV_SELECT_IS_WINSOCKET 177 | errno = WSAGetLastError (); 178 | #endif 179 | #ifdef WSABASEERR 180 | /* on windows, select returns incompatible error codes, fix this */ 181 | if (errno >= WSABASEERR && errno < WSABASEERR + 1000) 182 | if (errno == WSAENOTSOCK) 183 | errno = EBADF; 184 | else 185 | errno -= WSABASEERR; 186 | #endif 187 | 188 | #ifdef _WIN32 189 | /* select on windows erroneously returns EINVAL when no fd sets have been 190 | * provided (this is documented). what microsoft doesn't tell you that this bug 191 | * exists even when the fd sets _are_ provided, so we have to check for this bug 192 | * here and emulate by sleeping manually. 193 | * we also get EINVAL when the timeout is invalid, but we ignore this case here 194 | * and assume that EINVAL always means: you have to wait manually. 195 | */ 196 | if (errno == EINVAL) 197 | { 198 | if (timeout) 199 | { 200 | unsigned long ms = EV_TS_TO_MSEC (timeout); 201 | Sleep (ms ? ms : 1); 202 | } 203 | 204 | return; 205 | } 206 | #endif 207 | 208 | if (errno == EBADF) 209 | fd_ebadf (EV_A); 210 | else if (errno == ENOMEM && !syserr_cb) 211 | fd_enomem (EV_A); 212 | else if (errno != EINTR) 213 | ev_syserr ("(libev) select"); 214 | 215 | return; 216 | } 217 | 218 | #if EV_SELECT_USE_FD_SET 219 | 220 | { 221 | int fd; 222 | 223 | for (fd = 0; fd < anfdmax; ++fd) 224 | if (anfds [fd].events) 225 | { 226 | int events = 0; 227 | #if EV_SELECT_IS_WINSOCKET 228 | SOCKET handle = anfds [fd].handle; 229 | #else 230 | int handle = fd; 231 | #endif 232 | 233 | if (FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; 234 | if (FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; 235 | #ifdef _WIN32 236 | if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; 237 | #endif 238 | 239 | if (ecb_expect_true (events)) 240 | fd_event (EV_A_ fd, events); 241 | } 242 | } 243 | 244 | #else 245 | 246 | { 247 | int word, bit; 248 | for (word = vec_max; word--; ) 249 | { 250 | fd_mask word_r = ((fd_mask *)vec_ro) [word]; 251 | fd_mask word_w = ((fd_mask *)vec_wo) [word]; 252 | #ifdef _WIN32 253 | word_w |= ((fd_mask *)vec_eo) [word]; 254 | #endif 255 | 256 | if (word_r || word_w) 257 | for (bit = NFDBITS; bit--; ) 258 | { 259 | fd_mask mask = 1UL << bit; 260 | int events = 0; 261 | 262 | events |= word_r & mask ? EV_READ : 0; 263 | events |= word_w & mask ? EV_WRITE : 0; 264 | 265 | if (ecb_expect_true (events)) 266 | fd_event (EV_A_ word * NFDBITS + bit, events); 267 | } 268 | } 269 | } 270 | 271 | #endif 272 | } 273 | 274 | inline_size 275 | int 276 | select_init (EV_P_ int flags) 277 | { 278 | backend_mintime = EV_TS_CONST (1e-6); 279 | backend_modify = select_modify; 280 | backend_poll = select_poll; 281 | 282 | #if EV_SELECT_USE_FD_SET 283 | vec_ri = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_ri); 284 | vec_ro = ev_malloc (sizeof (fd_set)); 285 | vec_wi = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_wi); 286 | vec_wo = ev_malloc (sizeof (fd_set)); 287 | #ifdef _WIN32 288 | vec_eo = ev_malloc (sizeof (fd_set)); 289 | #endif 290 | #else 291 | vec_max = 0; 292 | vec_ri = 0; 293 | vec_ro = 0; 294 | vec_wi = 0; 295 | vec_wo = 0; 296 | #ifdef _WIN32 297 | vec_eo = 0; 298 | #endif 299 | #endif 300 | 301 | return EVBACKEND_SELECT; 302 | } 303 | 304 | inline_size 305 | void 306 | select_destroy (EV_P) 307 | { 308 | ev_free (vec_ri); 309 | ev_free (vec_ro); 310 | ev_free (vec_wi); 311 | ev_free (vec_wo); 312 | #ifdef _WIN32 313 | ev_free (vec_eo); 314 | #endif 315 | } 316 | 317 | -------------------------------------------------------------------------------- /src/ev_vars.h: -------------------------------------------------------------------------------- 1 | /* 2 | * loop member variable declarations 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #define VARx(type,name) VAR(name, type name) 41 | 42 | VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */ 43 | VARx(ev_tstamp, mn_now) /* monotonic clock "now" */ 44 | VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ 45 | 46 | /* for reverse feeding of events */ 47 | VARx(W *, rfeeds) 48 | VARx(int, rfeedmax) 49 | VARx(int, rfeedcnt) 50 | 51 | VAR (pendings, ANPENDING *pendings [NUMPRI]) 52 | VAR (pendingmax, int pendingmax [NUMPRI]) 53 | VAR (pendingcnt, int pendingcnt [NUMPRI]) 54 | VARx(int, pendingpri) /* highest priority currently pending */ 55 | VARx(ev_prepare, pending_w) /* dummy pending watcher */ 56 | 57 | VARx(ev_tstamp, io_blocktime) 58 | VARx(ev_tstamp, timeout_blocktime) 59 | 60 | VARx(int, backend) 61 | VARx(int, activecnt) /* total number of active events ("refcount") */ 62 | VARx(EV_ATOMIC_T, loop_done) /* signal by ev_break */ 63 | 64 | VARx(int, backend_fd) 65 | VARx(ev_tstamp, backend_mintime) /* assumed typical timer resolution */ 66 | VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev)) 67 | VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) 68 | 69 | VARx(ANFD *, anfds) 70 | VARx(int, anfdmax) 71 | 72 | VAR (evpipe, int evpipe [2]) 73 | VARx(ev_io, pipe_w) 74 | VARx(EV_ATOMIC_T, pipe_write_wanted) 75 | VARx(EV_ATOMIC_T, pipe_write_skipped) 76 | 77 | #if !defined(_WIN32) || EV_GENWRAP 78 | VARx(pid_t, curpid) 79 | #endif 80 | 81 | VARx(char, postfork) /* true if we need to recreate kernel state after fork */ 82 | 83 | #if EV_USE_SELECT || EV_GENWRAP 84 | VARx(void *, vec_ri) 85 | VARx(void *, vec_ro) 86 | VARx(void *, vec_wi) 87 | VARx(void *, vec_wo) 88 | #if defined(_WIN32) || EV_GENWRAP 89 | VARx(void *, vec_eo) 90 | #endif 91 | VARx(int, vec_max) 92 | #endif 93 | 94 | #if EV_USE_POLL || EV_GENWRAP 95 | VARx(struct pollfd *, polls) 96 | VARx(int, pollmax) 97 | VARx(int, pollcnt) 98 | VARx(int *, pollidxs) /* maps fds into structure indices */ 99 | VARx(int, pollidxmax) 100 | #endif 101 | 102 | #if EV_USE_EPOLL || EV_GENWRAP 103 | VARx(struct epoll_event *, epoll_events) 104 | VARx(int, epoll_eventmax) 105 | VARx(int *, epoll_eperms) 106 | VARx(int, epoll_epermcnt) 107 | VARx(int, epoll_epermmax) 108 | #endif 109 | 110 | #if EV_USE_LINUXAIO || EV_GENWRAP 111 | VARx(aio_context_t, linuxaio_ctx) 112 | VARx(int, linuxaio_iteration) 113 | VARx(struct aniocb **, linuxaio_iocbps) 114 | VARx(int, linuxaio_iocbpmax) 115 | VARx(struct iocb **, linuxaio_submits) 116 | VARx(int, linuxaio_submitcnt) 117 | VARx(int, linuxaio_submitmax) 118 | VARx(ev_io, linuxaio_epoll_w) 119 | #endif 120 | 121 | #if EV_USE_IOURING || EV_GENWRAP 122 | VARx(int, iouring_fd) 123 | VARx(unsigned, iouring_to_submit); 124 | VARx(int, iouring_entries) 125 | VARx(int, iouring_max_entries) 126 | VARx(void *, iouring_ring) 127 | VARx(void *, iouring_sqes) 128 | VARx(uint32_t, iouring_ring_size) 129 | VARx(uint32_t, iouring_sqes_size) 130 | VARx(uint32_t, iouring_sq_head) 131 | VARx(uint32_t, iouring_sq_tail) 132 | VARx(uint32_t, iouring_sq_ring_mask) 133 | VARx(uint32_t, iouring_sq_ring_entries) 134 | VARx(uint32_t, iouring_sq_flags) 135 | VARx(uint32_t, iouring_sq_dropped) 136 | VARx(uint32_t, iouring_sq_array) 137 | VARx(uint32_t, iouring_cq_head) 138 | VARx(uint32_t, iouring_cq_tail) 139 | VARx(uint32_t, iouring_cq_ring_mask) 140 | VARx(uint32_t, iouring_cq_ring_entries) 141 | VARx(uint32_t, iouring_cq_overflow) 142 | VARx(uint32_t, iouring_cq_cqes) 143 | VARx(ev_tstamp, iouring_tfd_to) 144 | VARx(int, iouring_tfd) 145 | VARx(ev_io, iouring_tfd_w) 146 | #endif 147 | 148 | #if EV_USE_KQUEUE || EV_GENWRAP 149 | VARx(pid_t, kqueue_fd_pid) 150 | VARx(struct kevent *, kqueue_changes) 151 | VARx(int, kqueue_changemax) 152 | VARx(int, kqueue_changecnt) 153 | VARx(struct kevent *, kqueue_events) 154 | VARx(int, kqueue_eventmax) 155 | #endif 156 | 157 | #if EV_USE_PORT || EV_GENWRAP 158 | VARx(struct port_event *, port_events) 159 | VARx(int, port_eventmax) 160 | #endif 161 | 162 | #if EV_USE_IOCP || EV_GENWRAP 163 | VARx(HANDLE, iocp) 164 | #endif 165 | 166 | VARx(int *, fdchanges) 167 | VARx(int, fdchangemax) 168 | VARx(int, fdchangecnt) 169 | 170 | VARx(ANHE *, timers) 171 | VARx(int, timermax) 172 | VARx(int, timercnt) 173 | 174 | #if EV_PERIODIC_ENABLE || EV_GENWRAP 175 | VARx(ANHE *, periodics) 176 | VARx(int, periodicmax) 177 | VARx(int, periodiccnt) 178 | #endif 179 | 180 | #if EV_IDLE_ENABLE || EV_GENWRAP 181 | VAR (idles, ev_idle **idles [NUMPRI]) 182 | VAR (idlemax, int idlemax [NUMPRI]) 183 | VAR (idlecnt, int idlecnt [NUMPRI]) 184 | #endif 185 | VARx(int, idleall) /* total number */ 186 | 187 | VARx(struct ev_prepare **, prepares) 188 | VARx(int, preparemax) 189 | VARx(int, preparecnt) 190 | 191 | VARx(struct ev_check **, checks) 192 | VARx(int, checkmax) 193 | VARx(int, checkcnt) 194 | 195 | #if EV_FORK_ENABLE || EV_GENWRAP 196 | VARx(struct ev_fork **, forks) 197 | VARx(int, forkmax) 198 | VARx(int, forkcnt) 199 | #endif 200 | 201 | #if EV_CLEANUP_ENABLE || EV_GENWRAP 202 | VARx(struct ev_cleanup **, cleanups) 203 | VARx(int, cleanupmax) 204 | VARx(int, cleanupcnt) 205 | #endif 206 | 207 | #if EV_ASYNC_ENABLE || EV_GENWRAP 208 | VARx(EV_ATOMIC_T, async_pending) 209 | VARx(struct ev_async **, asyncs) 210 | VARx(int, asyncmax) 211 | VARx(int, asynccnt) 212 | #endif 213 | 214 | #if EV_USE_INOTIFY || EV_GENWRAP 215 | VARx(int, fs_fd) 216 | VARx(ev_io, fs_w) 217 | VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */ 218 | VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE]) 219 | #endif 220 | 221 | VARx(EV_ATOMIC_T, sig_pending) 222 | #if EV_USE_SIGNALFD || EV_GENWRAP 223 | VARx(int, sigfd) 224 | VARx(ev_io, sigfd_w) 225 | VARx(sigset_t, sigfd_set) 226 | #endif 227 | 228 | #if EV_USE_TIMERFD || EV_GENWRAP 229 | VARx(int, timerfd) /* timerfd for time jump detection */ 230 | VARx(ev_io, timerfd_w) 231 | #endif 232 | 233 | VARx(unsigned int, origflags) /* original loop flags */ 234 | 235 | #if EV_FEATURE_API || EV_GENWRAP 236 | VARx(unsigned int, loop_count) /* total number of loop iterations/blocks */ 237 | VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */ 238 | 239 | VARx(void *, userdata) 240 | /* C++ doesn't support the ev_loop_callback typedef here. stinks. */ 241 | VAR (release_cb, void (*release_cb)(EV_P) EV_NOEXCEPT) 242 | VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_NOEXCEPT) 243 | VAR (invoke_cb , ev_loop_callback invoke_cb) 244 | #endif 245 | 246 | #undef VARx 247 | 248 | -------------------------------------------------------------------------------- /src/ev_win32.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev win32 compatibility cruft (_not_ a backend) 3 | * 4 | * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #ifdef _WIN32 41 | 42 | /* note: the comment below could not be substantiated, but what would I care */ 43 | /* MSDN says this is required to handle SIGFPE */ 44 | /* my wild guess would be that using something floating-pointy is required */ 45 | /* for the crt to do something about it */ 46 | volatile double SIGFPE_REQ = 0.0f; 47 | 48 | static SOCKET 49 | ev_tcp_socket (void) 50 | { 51 | #if EV_USE_WSASOCKET 52 | return WSASocket (AF_INET, SOCK_STREAM, 0, 0, 0, 0); 53 | #else 54 | return socket (AF_INET, SOCK_STREAM, 0); 55 | #endif 56 | } 57 | 58 | /* oh, the humanity! */ 59 | static int 60 | ev_pipe (int filedes [2]) 61 | { 62 | struct sockaddr_in addr = { 0 }; 63 | int addr_size = sizeof (addr); 64 | struct sockaddr_in adr2; 65 | int adr2_size = sizeof (adr2); 66 | SOCKET listener; 67 | SOCKET sock [2] = { -1, -1 }; 68 | 69 | if ((listener = ev_tcp_socket ()) == INVALID_SOCKET) 70 | return -1; 71 | 72 | addr.sin_family = AF_INET; 73 | addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK); 74 | addr.sin_port = 0; 75 | 76 | if (bind (listener, (struct sockaddr *)&addr, addr_size)) 77 | goto fail; 78 | 79 | if (getsockname (listener, (struct sockaddr *)&addr, &addr_size)) 80 | goto fail; 81 | 82 | if (listen (listener, 1)) 83 | goto fail; 84 | 85 | if ((sock [0] = ev_tcp_socket ()) == INVALID_SOCKET) 86 | goto fail; 87 | 88 | if (connect (sock [0], (struct sockaddr *)&addr, addr_size)) 89 | goto fail; 90 | 91 | /* TODO: returns INVALID_SOCKET on winsock accept, not < 0. fix it */ 92 | /* when convenient, probably by just removing error checking altogether? */ 93 | if ((sock [1] = accept (listener, 0, 0)) < 0) 94 | goto fail; 95 | 96 | /* windows vista returns fantasy port numbers for sockets: 97 | * example for two interconnected tcp sockets: 98 | * 99 | * (Socket::unpack_sockaddr_in getsockname $sock0)[0] == 53364 100 | * (Socket::unpack_sockaddr_in getpeername $sock0)[0] == 53363 101 | * (Socket::unpack_sockaddr_in getsockname $sock1)[0] == 53363 102 | * (Socket::unpack_sockaddr_in getpeername $sock1)[0] == 53365 103 | * 104 | * wow! tridirectional sockets! 105 | * 106 | * this way of checking ports seems to work: 107 | */ 108 | if (getpeername (sock [0], (struct sockaddr *)&addr, &addr_size)) 109 | goto fail; 110 | 111 | if (getsockname (sock [1], (struct sockaddr *)&adr2, &adr2_size)) 112 | goto fail; 113 | 114 | errno = WSAEINVAL; 115 | if (addr_size != adr2_size 116 | || addr.sin_addr.s_addr != adr2.sin_addr.s_addr /* just to be sure, I mean, it's windows */ 117 | || addr.sin_port != adr2.sin_port) 118 | goto fail; 119 | 120 | closesocket (listener); 121 | 122 | #if EV_SELECT_IS_WINSOCKET 123 | filedes [0] = EV_WIN32_HANDLE_TO_FD (sock [0]); 124 | filedes [1] = EV_WIN32_HANDLE_TO_FD (sock [1]); 125 | #else 126 | /* when select isn't winsocket, we also expect socket, connect, accept etc. 127 | * to work on fds */ 128 | filedes [0] = sock [0]; 129 | filedes [1] = sock [1]; 130 | #endif 131 | 132 | return 0; 133 | 134 | fail: 135 | closesocket (listener); 136 | 137 | if (sock [0] != INVALID_SOCKET) closesocket (sock [0]); 138 | if (sock [1] != INVALID_SOCKET) closesocket (sock [1]); 139 | 140 | return -1; 141 | } 142 | 143 | #undef pipe 144 | #define pipe(filedes) ev_pipe (filedes) 145 | 146 | #define EV_HAVE_EV_TIME 1 147 | ev_tstamp 148 | ev_time (void) 149 | { 150 | FILETIME ft; 151 | ULARGE_INTEGER ui; 152 | 153 | GetSystemTimeAsFileTime (&ft); 154 | ui.u.LowPart = ft.dwLowDateTime; 155 | ui.u.HighPart = ft.dwHighDateTime; 156 | 157 | /* also, msvc cannot convert ulonglong to double... yes, it is that sucky */ 158 | return EV_TS_FROM_USEC (((LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-1)); 159 | } 160 | 161 | #endif 162 | 163 | -------------------------------------------------------------------------------- /src/ev_wrap.h: -------------------------------------------------------------------------------- 1 | /* DO NOT EDIT, automatically generated by update_ev_wrap */ 2 | #ifndef EV_WRAP_H 3 | #define EV_WRAP_H 4 | #define acquire_cb ((loop)->acquire_cb) 5 | #define activecnt ((loop)->activecnt) 6 | #define anfdmax ((loop)->anfdmax) 7 | #define anfds ((loop)->anfds) 8 | #define async_pending ((loop)->async_pending) 9 | #define asynccnt ((loop)->asynccnt) 10 | #define asyncmax ((loop)->asyncmax) 11 | #define asyncs ((loop)->asyncs) 12 | #define backend ((loop)->backend) 13 | #define backend_fd ((loop)->backend_fd) 14 | #define backend_mintime ((loop)->backend_mintime) 15 | #define backend_modify ((loop)->backend_modify) 16 | #define backend_poll ((loop)->backend_poll) 17 | #define checkcnt ((loop)->checkcnt) 18 | #define checkmax ((loop)->checkmax) 19 | #define checks ((loop)->checks) 20 | #define cleanupcnt ((loop)->cleanupcnt) 21 | #define cleanupmax ((loop)->cleanupmax) 22 | #define cleanups ((loop)->cleanups) 23 | #define curpid ((loop)->curpid) 24 | #define epoll_epermcnt ((loop)->epoll_epermcnt) 25 | #define epoll_epermmax ((loop)->epoll_epermmax) 26 | #define epoll_eperms ((loop)->epoll_eperms) 27 | #define epoll_eventmax ((loop)->epoll_eventmax) 28 | #define epoll_events ((loop)->epoll_events) 29 | #define evpipe ((loop)->evpipe) 30 | #define fdchangecnt ((loop)->fdchangecnt) 31 | #define fdchangemax ((loop)->fdchangemax) 32 | #define fdchanges ((loop)->fdchanges) 33 | #define forkcnt ((loop)->forkcnt) 34 | #define forkmax ((loop)->forkmax) 35 | #define forks ((loop)->forks) 36 | #define fs_2625 ((loop)->fs_2625) 37 | #define fs_fd ((loop)->fs_fd) 38 | #define fs_hash ((loop)->fs_hash) 39 | #define fs_w ((loop)->fs_w) 40 | #define idleall ((loop)->idleall) 41 | #define idlecnt ((loop)->idlecnt) 42 | #define idlemax ((loop)->idlemax) 43 | #define idles ((loop)->idles) 44 | #define invoke_cb ((loop)->invoke_cb) 45 | #define io_blocktime ((loop)->io_blocktime) 46 | #define iocp ((loop)->iocp) 47 | #define iouring_cq_cqes ((loop)->iouring_cq_cqes) 48 | #define iouring_cq_head ((loop)->iouring_cq_head) 49 | #define iouring_cq_overflow ((loop)->iouring_cq_overflow) 50 | #define iouring_cq_ring_entries ((loop)->iouring_cq_ring_entries) 51 | #define iouring_cq_ring_mask ((loop)->iouring_cq_ring_mask) 52 | #define iouring_cq_tail ((loop)->iouring_cq_tail) 53 | #define iouring_entries ((loop)->iouring_entries) 54 | #define iouring_fd ((loop)->iouring_fd) 55 | #define iouring_max_entries ((loop)->iouring_max_entries) 56 | #define iouring_ring ((loop)->iouring_ring) 57 | #define iouring_ring_size ((loop)->iouring_ring_size) 58 | #define iouring_sq_array ((loop)->iouring_sq_array) 59 | #define iouring_sq_dropped ((loop)->iouring_sq_dropped) 60 | #define iouring_sq_flags ((loop)->iouring_sq_flags) 61 | #define iouring_sq_head ((loop)->iouring_sq_head) 62 | #define iouring_sq_ring_entries ((loop)->iouring_sq_ring_entries) 63 | #define iouring_sq_ring_mask ((loop)->iouring_sq_ring_mask) 64 | #define iouring_sq_tail ((loop)->iouring_sq_tail) 65 | #define iouring_sqes ((loop)->iouring_sqes) 66 | #define iouring_sqes_size ((loop)->iouring_sqes_size) 67 | #define iouring_tfd ((loop)->iouring_tfd) 68 | #define iouring_tfd_to ((loop)->iouring_tfd_to) 69 | #define iouring_tfd_w ((loop)->iouring_tfd_w) 70 | #define iouring_to_submit ((loop)->iouring_to_submit) 71 | #define kqueue_changecnt ((loop)->kqueue_changecnt) 72 | #define kqueue_changemax ((loop)->kqueue_changemax) 73 | #define kqueue_changes ((loop)->kqueue_changes) 74 | #define kqueue_eventmax ((loop)->kqueue_eventmax) 75 | #define kqueue_events ((loop)->kqueue_events) 76 | #define kqueue_fd_pid ((loop)->kqueue_fd_pid) 77 | #define linuxaio_ctx ((loop)->linuxaio_ctx) 78 | #define linuxaio_epoll_w ((loop)->linuxaio_epoll_w) 79 | #define linuxaio_iocbpmax ((loop)->linuxaio_iocbpmax) 80 | #define linuxaio_iocbps ((loop)->linuxaio_iocbps) 81 | #define linuxaio_iteration ((loop)->linuxaio_iteration) 82 | #define linuxaio_submitcnt ((loop)->linuxaio_submitcnt) 83 | #define linuxaio_submitmax ((loop)->linuxaio_submitmax) 84 | #define linuxaio_submits ((loop)->linuxaio_submits) 85 | #define loop_count ((loop)->loop_count) 86 | #define loop_depth ((loop)->loop_depth) 87 | #define loop_done ((loop)->loop_done) 88 | #define mn_now ((loop)->mn_now) 89 | #define now_floor ((loop)->now_floor) 90 | #define origflags ((loop)->origflags) 91 | #define pending_w ((loop)->pending_w) 92 | #define pendingcnt ((loop)->pendingcnt) 93 | #define pendingmax ((loop)->pendingmax) 94 | #define pendingpri ((loop)->pendingpri) 95 | #define pendings ((loop)->pendings) 96 | #define periodiccnt ((loop)->periodiccnt) 97 | #define periodicmax ((loop)->periodicmax) 98 | #define periodics ((loop)->periodics) 99 | #define pipe_w ((loop)->pipe_w) 100 | #define pipe_write_skipped ((loop)->pipe_write_skipped) 101 | #define pipe_write_wanted ((loop)->pipe_write_wanted) 102 | #define pollcnt ((loop)->pollcnt) 103 | #define pollidxmax ((loop)->pollidxmax) 104 | #define pollidxs ((loop)->pollidxs) 105 | #define pollmax ((loop)->pollmax) 106 | #define polls ((loop)->polls) 107 | #define port_eventmax ((loop)->port_eventmax) 108 | #define port_events ((loop)->port_events) 109 | #define postfork ((loop)->postfork) 110 | #define preparecnt ((loop)->preparecnt) 111 | #define preparemax ((loop)->preparemax) 112 | #define prepares ((loop)->prepares) 113 | #define release_cb ((loop)->release_cb) 114 | #define rfeedcnt ((loop)->rfeedcnt) 115 | #define rfeedmax ((loop)->rfeedmax) 116 | #define rfeeds ((loop)->rfeeds) 117 | #define rtmn_diff ((loop)->rtmn_diff) 118 | #define sig_pending ((loop)->sig_pending) 119 | #define sigfd ((loop)->sigfd) 120 | #define sigfd_set ((loop)->sigfd_set) 121 | #define sigfd_w ((loop)->sigfd_w) 122 | #define timeout_blocktime ((loop)->timeout_blocktime) 123 | #define timercnt ((loop)->timercnt) 124 | #define timerfd ((loop)->timerfd) 125 | #define timerfd_w ((loop)->timerfd_w) 126 | #define timermax ((loop)->timermax) 127 | #define timers ((loop)->timers) 128 | #define userdata ((loop)->userdata) 129 | #define vec_eo ((loop)->vec_eo) 130 | #define vec_max ((loop)->vec_max) 131 | #define vec_ri ((loop)->vec_ri) 132 | #define vec_ro ((loop)->vec_ro) 133 | #define vec_wi ((loop)->vec_wi) 134 | #define vec_wo ((loop)->vec_wo) 135 | #else 136 | #undef EV_WRAP_H 137 | #undef acquire_cb 138 | #undef activecnt 139 | #undef anfdmax 140 | #undef anfds 141 | #undef async_pending 142 | #undef asynccnt 143 | #undef asyncmax 144 | #undef asyncs 145 | #undef backend 146 | #undef backend_fd 147 | #undef backend_mintime 148 | #undef backend_modify 149 | #undef backend_poll 150 | #undef checkcnt 151 | #undef checkmax 152 | #undef checks 153 | #undef cleanupcnt 154 | #undef cleanupmax 155 | #undef cleanups 156 | #undef curpid 157 | #undef epoll_epermcnt 158 | #undef epoll_epermmax 159 | #undef epoll_eperms 160 | #undef epoll_eventmax 161 | #undef epoll_events 162 | #undef evpipe 163 | #undef fdchangecnt 164 | #undef fdchangemax 165 | #undef fdchanges 166 | #undef forkcnt 167 | #undef forkmax 168 | #undef forks 169 | #undef fs_2625 170 | #undef fs_fd 171 | #undef fs_hash 172 | #undef fs_w 173 | #undef idleall 174 | #undef idlecnt 175 | #undef idlemax 176 | #undef idles 177 | #undef invoke_cb 178 | #undef io_blocktime 179 | #undef iocp 180 | #undef iouring_cq_cqes 181 | #undef iouring_cq_head 182 | #undef iouring_cq_overflow 183 | #undef iouring_cq_ring_entries 184 | #undef iouring_cq_ring_mask 185 | #undef iouring_cq_tail 186 | #undef iouring_entries 187 | #undef iouring_fd 188 | #undef iouring_max_entries 189 | #undef iouring_ring 190 | #undef iouring_ring_size 191 | #undef iouring_sq_array 192 | #undef iouring_sq_dropped 193 | #undef iouring_sq_flags 194 | #undef iouring_sq_head 195 | #undef iouring_sq_ring_entries 196 | #undef iouring_sq_ring_mask 197 | #undef iouring_sq_tail 198 | #undef iouring_sqes 199 | #undef iouring_sqes_size 200 | #undef iouring_tfd 201 | #undef iouring_tfd_to 202 | #undef iouring_tfd_w 203 | #undef iouring_to_submit 204 | #undef kqueue_changecnt 205 | #undef kqueue_changemax 206 | #undef kqueue_changes 207 | #undef kqueue_eventmax 208 | #undef kqueue_events 209 | #undef kqueue_fd_pid 210 | #undef linuxaio_ctx 211 | #undef linuxaio_epoll_w 212 | #undef linuxaio_iocbpmax 213 | #undef linuxaio_iocbps 214 | #undef linuxaio_iteration 215 | #undef linuxaio_submitcnt 216 | #undef linuxaio_submitmax 217 | #undef linuxaio_submits 218 | #undef loop_count 219 | #undef loop_depth 220 | #undef loop_done 221 | #undef mn_now 222 | #undef now_floor 223 | #undef origflags 224 | #undef pending_w 225 | #undef pendingcnt 226 | #undef pendingmax 227 | #undef pendingpri 228 | #undef pendings 229 | #undef periodiccnt 230 | #undef periodicmax 231 | #undef periodics 232 | #undef pipe_w 233 | #undef pipe_write_skipped 234 | #undef pipe_write_wanted 235 | #undef pollcnt 236 | #undef pollidxmax 237 | #undef pollidxs 238 | #undef pollmax 239 | #undef polls 240 | #undef port_eventmax 241 | #undef port_events 242 | #undef postfork 243 | #undef preparecnt 244 | #undef preparemax 245 | #undef prepares 246 | #undef release_cb 247 | #undef rfeedcnt 248 | #undef rfeedmax 249 | #undef rfeeds 250 | #undef rtmn_diff 251 | #undef sig_pending 252 | #undef sigfd 253 | #undef sigfd_set 254 | #undef sigfd_w 255 | #undef timeout_blocktime 256 | #undef timercnt 257 | #undef timerfd 258 | #undef timerfd_w 259 | #undef timermax 260 | #undef timers 261 | #undef userdata 262 | #undef vec_eo 263 | #undef vec_max 264 | #undef vec_ri 265 | #undef vec_ro 266 | #undef vec_wi 267 | #undef vec_wo 268 | #endif 269 | -------------------------------------------------------------------------------- /src/evdns.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2006 Niels Provos 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 3. The name of the author may not be used to endorse or promote products 14 | * derived from this software without specific prior written permission. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | */ 27 | 28 | /* 29 | * The original DNS code is due to Adam Langley with heavy 30 | * modifications by Nick Mathewson. Adam put his DNS software in the 31 | * public domain. You can find his original copyright below. Please, 32 | * aware that the code as part of libevent is governed by the 3-clause 33 | * BSD license above. 34 | * 35 | * This software is Public Domain. To view a copy of the public domain dedication, 36 | * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to 37 | * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA. 38 | * 39 | * I ask and expect, but do not require, that all derivative works contain an 40 | * attribution similar to: 41 | * Parts developed by Adam Langley 42 | * 43 | * You may wish to replace the word "Parts" with something else depending on 44 | * the amount of original code. 45 | * 46 | * (Derivative works does not include programs which link against, run or include 47 | * the source verbatim in their source distributions) 48 | */ 49 | 50 | /* 51 | * Welcome, gentle reader 52 | * 53 | * Async DNS lookups are really a whole lot harder than they should be, 54 | * mostly stemming from the fact that the libc resolver has never been 55 | * very good at them. Before you use this library you should see if libc 56 | * can do the job for you with the modern async call getaddrinfo_a 57 | * (see http://www.imperialviolet.org/page25.html#e498). Otherwise, 58 | * please continue. 59 | * 60 | * This code is based on libevent and you must call event_init before 61 | * any of the APIs in this file. You must also seed the OpenSSL random 62 | * source if you are using OpenSSL for ids (see below). 63 | * 64 | * This library is designed to be included and shipped with your source 65 | * code. You statically link with it. You should also test for the 66 | * existence of strtok_r and define HAVE_STRTOK_R if you have it. 67 | * 68 | * The DNS protocol requires a good source of id numbers and these 69 | * numbers should be unpredictable for spoofing reasons. There are 70 | * three methods for generating them here and you must define exactly 71 | * one of them. In increasing order of preference: 72 | * 73 | * DNS_USE_GETTIMEOFDAY_FOR_ID: 74 | * Using the bottom 16 bits of the usec result from gettimeofday. This 75 | * is a pretty poor solution but should work anywhere. 76 | * DNS_USE_CPU_CLOCK_FOR_ID: 77 | * Using the bottom 16 bits of the nsec result from the CPU's time 78 | * counter. This is better, but may not work everywhere. Requires 79 | * POSIX realtime support and you'll need to link against -lrt on 80 | * glibc systems at least. 81 | * DNS_USE_OPENSSL_FOR_ID: 82 | * Uses the OpenSSL RAND_bytes call to generate the data. You must 83 | * have seeded the pool before making any calls to this library. 84 | * 85 | * The library keeps track of the state of nameservers and will avoid 86 | * them when they go down. Otherwise it will round robin between them. 87 | * 88 | * Quick start guide: 89 | * #include "evdns.h" 90 | * void callback(int result, char type, int count, int ttl, 91 | * void *addresses, void *arg); 92 | * evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf"); 93 | * evdns_resolve("www.hostname.com", 0, callback, NULL); 94 | * 95 | * When the lookup is complete the callback function is called. The 96 | * first argument will be one of the DNS_ERR_* defines in evdns.h. 97 | * Hopefully it will be DNS_ERR_NONE, in which case type will be 98 | * DNS_IPv4_A, count will be the number of IP addresses, ttl is the time 99 | * which the data can be cached for (in seconds), addresses will point 100 | * to an array of uint32_t's and arg will be whatever you passed to 101 | * evdns_resolve. 102 | * 103 | * Searching: 104 | * 105 | * In order for this library to be a good replacement for glibc's resolver it 106 | * supports searching. This involves setting a list of default domains, in 107 | * which names will be queried for. The number of dots in the query name 108 | * determines the order in which this list is used. 109 | * 110 | * Searching appears to be a single lookup from the point of view of the API, 111 | * although many DNS queries may be generated from a single call to 112 | * evdns_resolve. Searching can also drastically slow down the resolution 113 | * of names. 114 | * 115 | * To disable searching: 116 | * 1. Never set it up. If you never call evdns_resolv_conf_parse or 117 | * evdns_search_add then no searching will occur. 118 | * 119 | * 2. If you do call evdns_resolv_conf_parse then don't pass 120 | * DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it). 121 | * 122 | * 3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag. 123 | * 124 | * The order of searches depends on the number of dots in the name. If the 125 | * number is greater than the ndots setting then the names is first tried 126 | * globally. Otherwise each search domain is appended in turn. 127 | * 128 | * The ndots setting can either be set from a resolv.conf, or by calling 129 | * evdns_search_ndots_set. 130 | * 131 | * For example, with ndots set to 1 (the default) and a search domain list of 132 | * ["myhome.net"]: 133 | * Query: www 134 | * Order: www.myhome.net, www. 135 | * 136 | * Query: www.abc 137 | * Order: www.abc., www.abc.myhome.net 138 | * 139 | * API reference: 140 | * 141 | * int evdns_nameserver_add(unsigned long int address) 142 | * Add a nameserver. The address should be an IP address in 143 | * network byte order. The type of address is chosen so that 144 | * it matches in_addr.s_addr. 145 | * Returns non-zero on error. 146 | * 147 | * int evdns_nameserver_ip_add(const char *ip_as_string) 148 | * This wraps the above function by parsing a string as an IP 149 | * address and adds it as a nameserver. 150 | * Returns non-zero on error 151 | * 152 | * int evdns_resolve(const char *name, int flags, 153 | * evdns_callback_type callback, 154 | * void *ptr) 155 | * Resolve a name. The name parameter should be a DNS name. 156 | * The flags parameter should be 0, or DNS_QUERY_NO_SEARCH 157 | * which disables searching for this query. (see defn of 158 | * searching above). 159 | * 160 | * The callback argument is a function which is called when 161 | * this query completes and ptr is an argument which is passed 162 | * to that callback function. 163 | * 164 | * Returns non-zero on error 165 | * 166 | * void evdns_search_clear() 167 | * Clears the list of search domains 168 | * 169 | * void evdns_search_add(const char *domain) 170 | * Add a domain to the list of search domains 171 | * 172 | * void evdns_search_ndots_set(int ndots) 173 | * Set the number of dots which, when found in a name, causes 174 | * the first query to be without any search domain. 175 | * 176 | * int evdns_count_nameservers(void) 177 | * Return the number of configured nameservers (not necessarily the 178 | * number of running nameservers). This is useful for double-checking 179 | * whether our calls to the various nameserver configuration functions 180 | * have been successful. 181 | * 182 | * int evdns_clear_nameservers_and_suspend(void) 183 | * Remove all currently configured nameservers, and suspend all pending 184 | * resolves. Resolves will not necessarily be re-attempted until 185 | * evdns_resume() is called. 186 | * 187 | * int evdns_resume(void) 188 | * Re-attempt resolves left in limbo after an earlier call to 189 | * evdns_clear_nameservers_and_suspend(). 190 | * 191 | * int evdns_config_windows_nameservers(void) 192 | * Attempt to configure a set of nameservers based on platform settings on 193 | * a win32 host. Preferentially tries to use GetNetworkParams; if that fails, 194 | * looks in the registry. Returns 0 on success, nonzero on failure. 195 | * 196 | * int evdns_resolv_conf_parse(int flags, const char *filename) 197 | * Parse a resolv.conf like file from the given filename. 198 | * 199 | * See the man page for resolv.conf for the format of this file. 200 | * The flags argument determines what information is parsed from 201 | * this file: 202 | * DNS_OPTION_SEARCH - domain, search and ndots options 203 | * DNS_OPTION_NAMESERVERS - nameserver lines 204 | * DNS_OPTION_MISC - timeout and attempts options 205 | * DNS_OPTIONS_ALL - all of the above 206 | * The following directives are not parsed from the file: 207 | * sortlist, rotate, no-check-names, inet6, debug 208 | * 209 | * Returns non-zero on error: 210 | * 0 no errors 211 | * 1 failed to open file 212 | * 2 failed to stat file 213 | * 3 file too large 214 | * 4 out of memory 215 | * 5 short read from file 216 | * 6 no nameservers in file 217 | * 218 | * Internals: 219 | * 220 | * Requests are kept in two queues. The first is the inflight queue. In 221 | * this queue requests have an allocated transaction id and nameserver. 222 | * They will soon be transmitted if they haven't already been. 223 | * 224 | * The second is the waiting queue. The size of the inflight ring is 225 | * limited and all other requests wait in waiting queue for space. This 226 | * bounds the number of concurrent requests so that we don't flood the 227 | * nameserver. Several algorithms require a full walk of the inflight 228 | * queue and so bounding its size keeps thing going nicely under huge 229 | * (many thousands of requests) loads. 230 | * 231 | * If a nameserver loses too many requests it is considered down and we 232 | * try not to use it. After a while we send a probe to that nameserver 233 | * (a lookup for google.com) and, if it replies, we consider it working 234 | * again. If the nameserver fails a probe we wait longer to try again 235 | * with the next probe. 236 | */ 237 | 238 | #ifndef EVENTDNS_H 239 | #define EVENTDNS_H 240 | 241 | #ifdef __cplusplus 242 | extern "C" { 243 | #endif 244 | 245 | /* Error codes 0-5 are as described in RFC 1035. */ 246 | #define DNS_ERR_NONE 0 247 | /* The name server was unable to interpret the query */ 248 | #define DNS_ERR_FORMAT 1 249 | /* The name server was unable to process this query due to a problem with the 250 | * name server */ 251 | #define DNS_ERR_SERVERFAILED 2 252 | /* The domain name does not exist */ 253 | #define DNS_ERR_NOTEXIST 3 254 | /* The name server does not support the requested kind of query */ 255 | #define DNS_ERR_NOTIMPL 4 256 | /* The name server refuses to reform the specified operation for policy 257 | * reasons */ 258 | #define DNS_ERR_REFUSED 5 259 | /* The reply was truncated or ill-formated */ 260 | #define DNS_ERR_TRUNCATED 65 261 | /* An unknown error occurred */ 262 | #define DNS_ERR_UNKNOWN 66 263 | /* Communication with the server timed out */ 264 | #define DNS_ERR_TIMEOUT 67 265 | /* The request was canceled because the DNS subsystem was shut down. */ 266 | #define DNS_ERR_SHUTDOWN 68 267 | 268 | #define DNS_IPv4_A 1 269 | #define DNS_PTR 2 270 | #define DNS_IPv6_AAAA 3 271 | 272 | #define DNS_QUERY_NO_SEARCH 1 273 | 274 | #define DNS_OPTION_SEARCH 1 275 | #define DNS_OPTION_NAMESERVERS 2 276 | #define DNS_OPTION_MISC 4 277 | #define DNS_OPTIONS_ALL 7 278 | 279 | /* 280 | * The callback that contains the results from a lookup. 281 | * - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA 282 | * - count contains the number of addresses of form type 283 | * - ttl is the number of seconds the resolution may be cached for. 284 | * - addresses needs to be cast according to type 285 | */ 286 | typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg); 287 | 288 | int evdns_init(void); 289 | void evdns_shutdown(int fail_requests); 290 | const char *evdns_err_to_string(int err); 291 | int evdns_nameserver_add(unsigned long int address); 292 | int evdns_count_nameservers(void); 293 | int evdns_clear_nameservers_and_suspend(void); 294 | int evdns_resume(void); 295 | int evdns_nameserver_ip_add(const char *ip_as_string); 296 | int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr); 297 | int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr); 298 | struct in_addr; 299 | struct in6_addr; 300 | int evdns_resolve_reverse(struct in_addr *in, int flags, evdns_callback_type callback, void *ptr); 301 | int evdns_resolve_reverse_ipv6(struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr); 302 | int evdns_set_option(const char *option, const char *val, int flags); 303 | int evdns_resolv_conf_parse(int flags, const char *); 304 | #ifdef MS_WINDOWS 305 | int evdns_config_windows_nameservers(void); 306 | #endif 307 | void evdns_search_clear(void); 308 | void evdns_search_add(const char *domain); 309 | void evdns_search_ndots_set(const int ndots); 310 | 311 | typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg); 312 | void evdns_set_log_fn(evdns_debug_log_fn_type fn); 313 | 314 | #define DNS_NO_SEARCH 1 315 | 316 | #ifdef __cplusplus 317 | } 318 | #endif 319 | 320 | /* 321 | * Structures and functions used to implement a DNS server. 322 | */ 323 | 324 | struct evdns_server_request { 325 | int flags; 326 | int nquestions; 327 | struct evdns_server_question **questions; 328 | }; 329 | struct evdns_server_question { 330 | int type; 331 | int class; 332 | char name[1]; 333 | }; 334 | typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *); 335 | #define EVDNS_ANSWER_SECTION 0 336 | #define EVDNS_AUTHORITY_SECTION 1 337 | #define EVDNS_ADDITIONAL_SECTION 2 338 | 339 | #define EVDNS_TYPE_A 1 340 | #define EVDNS_TYPE_NS 2 341 | #define EVDNS_TYPE_CNAME 5 342 | #define EVDNS_TYPE_SOA 6 343 | #define EVDNS_TYPE_PTR 12 344 | #define EVDNS_TYPE_MX 15 345 | #define EVDNS_TYPE_TXT 16 346 | #define EVDNS_TYPE_AAAA 28 347 | 348 | #define EVDNS_QTYPE_AXFR 252 349 | #define EVDNS_QTYPE_ALL 255 350 | 351 | #define EVDNS_CLASS_INET 1 352 | 353 | struct evdns_server_port *evdns_add_server_port(int socket, int is_tcp, evdns_request_callback_fn_type callback, void *user_data); 354 | void evdns_close_server_port(struct evdns_server_port *port); 355 | 356 | int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int class, int ttl, int datalen, int is_name, const char *data); 357 | int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl); 358 | int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl); 359 | int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl); 360 | int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl); 361 | 362 | int evdns_server_request_respond(struct evdns_server_request *req, int err); 363 | int evdns_server_request_drop(struct evdns_server_request *req); 364 | struct sockaddr; 365 | int evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len); 366 | 367 | #endif /* !EVENTDNS_H */ 368 | -------------------------------------------------------------------------------- /src/event.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libevent compatibility layer 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2012 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #include 41 | #include 42 | #include 43 | 44 | #ifdef EV_EVENT_H 45 | # include EV_EVENT_H 46 | #else 47 | # include "event.h" 48 | #endif 49 | 50 | #if EV_MULTIPLICITY 51 | # define dLOOPev struct ev_loop *loop = (struct ev_loop *)ev->ev_base 52 | # define dLOOPbase struct ev_loop *loop = (struct ev_loop *)base 53 | #else 54 | # define dLOOPev 55 | # define dLOOPbase 56 | #endif 57 | 58 | /* never accessed, will always be cast from/to ev_loop */ 59 | struct event_base 60 | { 61 | int dummy; 62 | }; 63 | 64 | static struct event_base *ev_x_cur; 65 | 66 | static ev_tstamp 67 | ev_tv_get (struct timeval *tv) 68 | { 69 | if (tv) 70 | { 71 | ev_tstamp after = tv->tv_sec + tv->tv_usec * 1e-6; 72 | return after ? after : 1e-6; 73 | } 74 | else 75 | return -1.; 76 | } 77 | 78 | #define EVENT_STRINGIFY(s) # s 79 | #define EVENT_VERSION(a,b) EVENT_STRINGIFY (a) "." EVENT_STRINGIFY (b) 80 | 81 | const char * 82 | event_get_version (void) 83 | { 84 | /* returns ABI, not API or library, version */ 85 | return EVENT_VERSION (EV_VERSION_MAJOR, EV_VERSION_MINOR); 86 | } 87 | 88 | const char * 89 | event_get_method (void) 90 | { 91 | return "libev"; 92 | } 93 | 94 | void *event_init (void) 95 | { 96 | #if EV_MULTIPLICITY 97 | if (ev_x_cur) 98 | ev_x_cur = (struct event_base *)ev_loop_new (EVFLAG_AUTO); 99 | else 100 | ev_x_cur = (struct event_base *)ev_default_loop (EVFLAG_AUTO); 101 | #else 102 | assert (("libev: multiple event bases not supported when not compiled with EV_MULTIPLICITY", !ev_x_cur)); 103 | 104 | ev_x_cur = (struct event_base *)(long)ev_default_loop (EVFLAG_AUTO); 105 | #endif 106 | 107 | return ev_x_cur; 108 | } 109 | 110 | const char * 111 | event_base_get_method (const struct event_base *base) 112 | { 113 | return "libev"; 114 | } 115 | 116 | struct event_base * 117 | event_base_new (void) 118 | { 119 | #if EV_MULTIPLICITY 120 | return (struct event_base *)ev_loop_new (EVFLAG_AUTO); 121 | #else 122 | assert (("libev: multiple event bases not supported when not compiled with EV_MULTIPLICITY")); 123 | return NULL; 124 | #endif 125 | } 126 | 127 | void event_base_free (struct event_base *base) 128 | { 129 | dLOOPbase; 130 | 131 | #if EV_MULTIPLICITY 132 | if (!ev_is_default_loop (loop)) 133 | ev_loop_destroy (loop); 134 | #endif 135 | } 136 | 137 | int event_dispatch (void) 138 | { 139 | return event_base_dispatch (ev_x_cur); 140 | } 141 | 142 | #ifdef EV_STANDALONE 143 | void event_set_log_callback (event_log_cb cb) 144 | { 145 | /* nop */ 146 | } 147 | #endif 148 | 149 | int event_loop (int flags) 150 | { 151 | return event_base_loop (ev_x_cur, flags); 152 | } 153 | 154 | int event_loopexit (struct timeval *tv) 155 | { 156 | return event_base_loopexit (ev_x_cur, tv); 157 | } 158 | 159 | event_callback_fn event_get_callback 160 | (const struct event *ev) 161 | { 162 | return ev->ev_callback; 163 | } 164 | 165 | static void 166 | ev_x_cb (struct event *ev, int revents) 167 | { 168 | revents &= EV_READ | EV_WRITE | EV_TIMER | EV_SIGNAL; 169 | 170 | ev->ev_res = revents; 171 | ev->ev_callback (ev->ev_fd, (short)revents, ev->ev_arg); 172 | } 173 | 174 | static void 175 | ev_x_cb_sig (EV_P_ struct ev_signal *w, int revents) 176 | { 177 | struct event *ev = (struct event *)(((char *)w) - offsetof (struct event, iosig.sig)); 178 | 179 | if (revents & EV_ERROR) 180 | event_del (ev); 181 | 182 | ev_x_cb (ev, revents); 183 | } 184 | 185 | static void 186 | ev_x_cb_io (EV_P_ struct ev_io *w, int revents) 187 | { 188 | struct event *ev = (struct event *)(((char *)w) - offsetof (struct event, iosig.io)); 189 | 190 | if ((revents & EV_ERROR) || !(ev->ev_events & EV_PERSIST)) 191 | event_del (ev); 192 | 193 | ev_x_cb (ev, revents); 194 | } 195 | 196 | static void 197 | ev_x_cb_to (EV_P_ struct ev_timer *w, int revents) 198 | { 199 | struct event *ev = (struct event *)(((char *)w) - offsetof (struct event, to)); 200 | 201 | event_del (ev); 202 | 203 | ev_x_cb (ev, revents); 204 | } 205 | 206 | void event_set (struct event *ev, int fd, short events, void (*cb)(int, short, void *), void *arg) 207 | { 208 | if (events & EV_SIGNAL) 209 | ev_init (&ev->iosig.sig, ev_x_cb_sig); 210 | else 211 | ev_init (&ev->iosig.io, ev_x_cb_io); 212 | 213 | ev_init (&ev->to, ev_x_cb_to); 214 | 215 | ev->ev_base = ev_x_cur; /* not threadsafe, but it's how libevent works */ 216 | ev->ev_fd = fd; 217 | ev->ev_events = events; 218 | ev->ev_pri = 0; 219 | ev->ev_callback = cb; 220 | ev->ev_arg = arg; 221 | ev->ev_res = 0; 222 | ev->ev_flags = EVLIST_INIT; 223 | } 224 | 225 | int event_once (int fd, short events, void (*cb)(int, short, void *), void *arg, struct timeval *tv) 226 | { 227 | return event_base_once (ev_x_cur, fd, events, cb, arg, tv); 228 | } 229 | 230 | int event_add (struct event *ev, struct timeval *tv) 231 | { 232 | dLOOPev; 233 | 234 | if (ev->ev_events & EV_SIGNAL) 235 | { 236 | if (!ev_is_active (&ev->iosig.sig)) 237 | { 238 | ev_signal_set (&ev->iosig.sig, ev->ev_fd); 239 | ev_signal_start (EV_A_ &ev->iosig.sig); 240 | 241 | ev->ev_flags |= EVLIST_SIGNAL; 242 | } 243 | } 244 | else if (ev->ev_events & (EV_READ | EV_WRITE)) 245 | { 246 | if (!ev_is_active (&ev->iosig.io)) 247 | { 248 | ev_io_set (&ev->iosig.io, ev->ev_fd, ev->ev_events & (EV_READ | EV_WRITE)); 249 | ev_io_start (EV_A_ &ev->iosig.io); 250 | 251 | ev->ev_flags |= EVLIST_INSERTED; 252 | } 253 | } 254 | 255 | if (tv) 256 | { 257 | ev->to.repeat = ev_tv_get (tv); 258 | ev_timer_again (EV_A_ &ev->to); 259 | ev->ev_flags |= EVLIST_TIMEOUT; 260 | } 261 | else 262 | { 263 | ev_timer_stop (EV_A_ &ev->to); 264 | ev->ev_flags &= ~EVLIST_TIMEOUT; 265 | } 266 | 267 | ev->ev_flags |= EVLIST_ACTIVE; 268 | 269 | return 0; 270 | } 271 | 272 | int event_del (struct event *ev) 273 | { 274 | dLOOPev; 275 | 276 | if (ev->ev_events & EV_SIGNAL) 277 | ev_signal_stop (EV_A_ &ev->iosig.sig); 278 | else if (ev->ev_events & (EV_READ | EV_WRITE)) 279 | ev_io_stop (EV_A_ &ev->iosig.io); 280 | 281 | if (ev_is_active (&ev->to)) 282 | ev_timer_stop (EV_A_ &ev->to); 283 | 284 | ev->ev_flags = EVLIST_INIT; 285 | 286 | return 0; 287 | } 288 | 289 | void event_active (struct event *ev, int res, short ncalls) 290 | { 291 | dLOOPev; 292 | 293 | if (res & EV_TIMEOUT) 294 | ev_feed_event (EV_A_ &ev->to, res & EV_TIMEOUT); 295 | 296 | if (res & EV_SIGNAL) 297 | ev_feed_event (EV_A_ &ev->iosig.sig, res & EV_SIGNAL); 298 | 299 | if (res & (EV_READ | EV_WRITE)) 300 | ev_feed_event (EV_A_ &ev->iosig.io, res & (EV_READ | EV_WRITE)); 301 | } 302 | 303 | int event_pending (struct event *ev, short events, struct timeval *tv) 304 | { 305 | short revents = 0; 306 | dLOOPev; 307 | 308 | if (ev->ev_events & EV_SIGNAL) 309 | { 310 | /* sig */ 311 | if (ev_is_active (&ev->iosig.sig) || ev_is_pending (&ev->iosig.sig)) 312 | revents |= EV_SIGNAL; 313 | } 314 | else if (ev->ev_events & (EV_READ | EV_WRITE)) 315 | { 316 | /* io */ 317 | if (ev_is_active (&ev->iosig.io) || ev_is_pending (&ev->iosig.io)) 318 | revents |= ev->ev_events & (EV_READ | EV_WRITE); 319 | } 320 | 321 | if (ev->ev_events & EV_TIMEOUT || ev_is_active (&ev->to) || ev_is_pending (&ev->to)) 322 | { 323 | revents |= EV_TIMEOUT; 324 | 325 | if (tv) 326 | { 327 | ev_tstamp at = ev_now (EV_A); 328 | 329 | tv->tv_sec = (long)at; 330 | tv->tv_usec = (long)((at - (ev_tstamp)tv->tv_sec) * 1e6); 331 | } 332 | } 333 | 334 | return events & revents; 335 | } 336 | 337 | int event_priority_init (int npri) 338 | { 339 | return event_base_priority_init (ev_x_cur, npri); 340 | } 341 | 342 | int event_priority_set (struct event *ev, int pri) 343 | { 344 | ev->ev_pri = pri; 345 | 346 | return 0; 347 | } 348 | 349 | int event_base_set (struct event_base *base, struct event *ev) 350 | { 351 | ev->ev_base = base; 352 | 353 | return 0; 354 | } 355 | 356 | int event_base_loop (struct event_base *base, int flags) 357 | { 358 | dLOOPbase; 359 | 360 | return !ev_run (EV_A_ flags); 361 | } 362 | 363 | int event_base_dispatch (struct event_base *base) 364 | { 365 | return event_base_loop (base, 0); 366 | } 367 | 368 | static void 369 | ev_x_loopexit_cb (int revents, void *base) 370 | { 371 | dLOOPbase; 372 | 373 | ev_break (EV_A_ EVBREAK_ONE); 374 | } 375 | 376 | int event_base_loopexit (struct event_base *base, struct timeval *tv) 377 | { 378 | ev_tstamp after = ev_tv_get (tv); 379 | dLOOPbase; 380 | 381 | ev_once (EV_A_ -1, 0, after >= 0. ? after : 0., ev_x_loopexit_cb, (void *)base); 382 | 383 | return 0; 384 | } 385 | 386 | struct ev_x_once 387 | { 388 | int fd; 389 | void (*cb)(int, short, void *); 390 | void *arg; 391 | }; 392 | 393 | static void 394 | ev_x_once_cb (int revents, void *arg) 395 | { 396 | struct ev_x_once *once = (struct ev_x_once *)arg; 397 | 398 | once->cb (once->fd, (short)revents, once->arg); 399 | free (once); 400 | } 401 | 402 | int event_base_once (struct event_base *base, int fd, short events, void (*cb)(int, short, void *), void *arg, struct timeval *tv) 403 | { 404 | struct ev_x_once *once = (struct ev_x_once *)malloc (sizeof (struct ev_x_once)); 405 | dLOOPbase; 406 | 407 | if (!once) 408 | return -1; 409 | 410 | once->fd = fd; 411 | once->cb = cb; 412 | once->arg = arg; 413 | 414 | ev_once (EV_A_ fd, events & (EV_READ | EV_WRITE), ev_tv_get (tv), ev_x_once_cb, (void *)once); 415 | 416 | return 0; 417 | } 418 | 419 | int event_base_priority_init (struct event_base *base, int npri) 420 | { 421 | /*dLOOPbase;*/ 422 | 423 | return 0; 424 | } 425 | 426 | -------------------------------------------------------------------------------- /src/event.h: -------------------------------------------------------------------------------- 1 | /* 2 | * libevent compatibility header, only core events supported 3 | * 4 | * Copyright (c) 2007,2008,2010,2012 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #ifndef EVENT_H_ 41 | #define EVENT_H_ 42 | 43 | #ifdef EV_H 44 | # include EV_H 45 | #else 46 | # include "ev.h" 47 | #endif 48 | 49 | #ifndef EVLOOP_NONBLOCK 50 | # define EVLOOP_NONBLOCK EVRUN_NOWAIT 51 | #endif 52 | #ifndef EVLOOP_ONESHOT 53 | # define EVLOOP_ONESHOT EVRUN_ONCE 54 | #endif 55 | #ifndef EV_TIMEOUT 56 | # define EV_TIMEOUT EV_TIMER 57 | #endif 58 | 59 | #ifdef __cplusplus 60 | extern "C" { 61 | #endif 62 | 63 | /* we need sys/time.h for struct timeval only */ 64 | #if !defined (WIN32) || defined (__MINGW32__) 65 | # include /* mingw seems to need this, for whatever reason */ 66 | # include 67 | #endif 68 | 69 | struct event_base; 70 | 71 | #define EVLIST_TIMEOUT 0x01 72 | #define EVLIST_INSERTED 0x02 73 | #define EVLIST_SIGNAL 0x04 74 | #define EVLIST_ACTIVE 0x08 75 | #define EVLIST_INTERNAL 0x10 76 | #define EVLIST_INIT 0x80 77 | 78 | typedef void (*event_callback_fn)(int, short, void *); 79 | 80 | struct event 81 | { 82 | /* libev watchers we map onto */ 83 | union { 84 | struct ev_io io; 85 | struct ev_signal sig; 86 | } iosig; 87 | struct ev_timer to; 88 | 89 | /* compatibility slots */ 90 | struct event_base *ev_base; 91 | event_callback_fn ev_callback; 92 | void *ev_arg; 93 | int ev_fd; 94 | int ev_pri; 95 | int ev_res; 96 | int ev_flags; 97 | short ev_events; 98 | }; 99 | 100 | event_callback_fn event_get_callback (const struct event *ev); 101 | 102 | #define EV_READ EV_READ 103 | #define EV_WRITE EV_WRITE 104 | #define EV_PERSIST 0x10 105 | #define EV_ET 0x20 /* nop */ 106 | 107 | #define EVENT_SIGNAL(ev) ((int) (ev)->ev_fd) 108 | #define EVENT_FD(ev) ((int) (ev)->ev_fd) 109 | 110 | #define event_initialized(ev) ((ev)->ev_flags & EVLIST_INIT) 111 | 112 | #define evtimer_add(ev,tv) event_add (ev, tv) 113 | #define evtimer_set(ev,cb,data) event_set (ev, -1, 0, cb, data) 114 | #define evtimer_del(ev) event_del (ev) 115 | #define evtimer_pending(ev,tv) event_pending (ev, EV_TIMEOUT, tv) 116 | #define evtimer_initialized(ev) event_initialized (ev) 117 | 118 | #define timeout_add(ev,tv) evtimer_add (ev, tv) 119 | #define timeout_set(ev,cb,data) evtimer_set (ev, cb, data) 120 | #define timeout_del(ev) evtimer_del (ev) 121 | #define timeout_pending(ev,tv) evtimer_pending (ev, tv) 122 | #define timeout_initialized(ev) evtimer_initialized (ev) 123 | 124 | #define signal_add(ev,tv) event_add (ev, tv) 125 | #define signal_set(ev,sig,cb,data) event_set (ev, sig, EV_SIGNAL | EV_PERSIST, cb, data) 126 | #define signal_del(ev) event_del (ev) 127 | #define signal_pending(ev,tv) event_pending (ev, EV_SIGNAL, tv) 128 | #define signal_initialized(ev) event_initialized (ev) 129 | 130 | const char *event_get_version (void); 131 | const char *event_get_method (void); 132 | 133 | void *event_init (void); 134 | void event_base_free (struct event_base *base); 135 | 136 | #define EVLOOP_ONCE EVLOOP_ONESHOT 137 | int event_loop (int); 138 | int event_loopexit (struct timeval *tv); 139 | int event_dispatch (void); 140 | 141 | #define _EVENT_LOG_DEBUG 0 142 | #define _EVENT_LOG_MSG 1 143 | #define _EVENT_LOG_WARN 2 144 | #define _EVENT_LOG_ERR 3 145 | typedef void (*event_log_cb)(int severity, const char *msg); 146 | void event_set_log_callback(event_log_cb cb); 147 | 148 | void event_set (struct event *ev, int fd, short events, void (*cb)(int, short, void *), void *arg); 149 | int event_once (int fd, short events, void (*cb)(int, short, void *), void *arg, struct timeval *tv); 150 | 151 | int event_add (struct event *ev, struct timeval *tv); 152 | int event_del (struct event *ev); 153 | void event_active (struct event *ev, int res, short ncalls); /* ncalls is being ignored */ 154 | 155 | int event_pending (struct event *ev, short, struct timeval *tv); 156 | 157 | int event_priority_init (int npri); 158 | int event_priority_set (struct event *ev, int pri); 159 | 160 | struct event_base *event_base_new (void); 161 | const char *event_base_get_method (const struct event_base *); 162 | int event_base_set (struct event_base *base, struct event *ev); 163 | int event_base_loop (struct event_base *base, int); 164 | int event_base_loopexit (struct event_base *base, struct timeval *tv); 165 | int event_base_dispatch (struct event_base *base); 166 | int event_base_once (struct event_base *base, int fd, short events, void (*cb)(int, short, void *), void *arg, struct timeval *tv); 167 | int event_base_priority_init (struct event_base *base, int fd); 168 | 169 | /* next line is different in the libevent+libev version */ 170 | /*libevent-include*/ 171 | 172 | #ifdef __cplusplus 173 | } 174 | #endif 175 | 176 | #endif 177 | 178 | -------------------------------------------------------------------------------- /src/event_compat.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2000-2004 Niels Provos 3 | * Copyright (c) 2008 Marc Alexander Lehmann 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions 8 | * are met: 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 2. Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * 3. The name of the author may not be used to endorse or promote products 15 | * derived from this software without specific prior written permission. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | #ifdef __cplusplus 29 | extern "C" { 30 | #endif 31 | 32 | #ifdef _WIN32 33 | # define WIN32_LEAN_AND_MEAN 34 | # include 35 | # undef WIN32_LEAN_AND_MEAN 36 | typedef unsigned char u_char; 37 | typedef unsigned short u_short; 38 | #else 39 | # include 40 | # include 41 | # include 42 | #endif 43 | 44 | #include 45 | 46 | /* Fix so that ppl dont have to run with */ 47 | #ifndef TAILQ_ENTRY 48 | #define _EVENT_DEFINED_TQENTRY 49 | #define TAILQ_ENTRY(type) \ 50 | struct { \ 51 | struct type *tqe_next; /* next element */ \ 52 | struct type **tqe_prev; /* address of previous next element */ \ 53 | } 54 | #endif /* !TAILQ_ENTRY */ 55 | #ifndef RB_ENTRY 56 | #define _EVENT_DEFINED_RBENTRY 57 | #define RB_ENTRY(type) \ 58 | struct { \ 59 | struct type *rbe_left; /* left element */ \ 60 | struct type *rbe_right; /* right element */ \ 61 | struct type *rbe_parent; /* parent element */ \ 62 | int rbe_color; /* node color */ \ 63 | } 64 | #endif /* !RB_ENTRY */ 65 | 66 | /* 67 | * Key-Value pairs. Can be used for HTTP headers but also for 68 | * query argument parsing. 69 | */ 70 | struct evkeyval { 71 | TAILQ_ENTRY(evkeyval) next; 72 | 73 | char *key; 74 | char *value; 75 | }; 76 | 77 | #ifdef _EVENT_DEFINED_TQENTRY 78 | #undef TAILQ_ENTRY 79 | struct event_list; 80 | struct evkeyvalq; 81 | #undef _EVENT_DEFINED_TQENTRY 82 | #else 83 | TAILQ_HEAD (event_list, event); 84 | TAILQ_HEAD (evkeyvalq, evkeyval); 85 | #endif /* _EVENT_DEFINED_TQENTRY */ 86 | #ifdef _EVENT_DEFINED_RBENTRY 87 | #undef RB_ENTRY 88 | #undef _EVENT_DEFINED_RBENTRY 89 | #endif /* _EVENT_DEFINED_RBENTRY */ 90 | 91 | struct eventop { 92 | char *name; 93 | void *(*init)(struct event_base *); 94 | int (*add)(void *, struct event *); 95 | int (*del)(void *, struct event *); 96 | int (*recalc)(struct event_base *, void *, int); 97 | int (*dispatch)(struct event_base *, void *, struct timeval *); 98 | void (*dealloc)(struct event_base *, void *); 99 | }; 100 | 101 | /* These functions deal with buffering input and output */ 102 | 103 | struct evbuffer { 104 | u_char *buffer; 105 | u_char *orig_buffer; 106 | 107 | size_t misalign; 108 | size_t totallen; 109 | size_t off; 110 | 111 | void (*cb)(struct evbuffer *, size_t, size_t, void *); 112 | void *cbarg; 113 | }; 114 | 115 | /* Just for error reporting - use other constants otherwise */ 116 | #define EVBUFFER_READ 0x01 117 | #define EVBUFFER_WRITE 0x02 118 | #define EVBUFFER_EOF 0x10 119 | #define EVBUFFER_ERROR 0x20 120 | #define EVBUFFER_TIMEOUT 0x40 121 | 122 | struct bufferevent; 123 | typedef void (*evbuffercb)(struct bufferevent *, void *); 124 | typedef void (*everrorcb)(struct bufferevent *, short what, void *); 125 | 126 | struct event_watermark { 127 | size_t low; 128 | size_t high; 129 | }; 130 | 131 | struct bufferevent { 132 | struct event ev_read; 133 | struct event ev_write; 134 | 135 | struct evbuffer *input; 136 | struct evbuffer *output; 137 | 138 | struct event_watermark wm_read; 139 | struct event_watermark wm_write; 140 | 141 | evbuffercb readcb; 142 | evbuffercb writecb; 143 | everrorcb errorcb; 144 | void *cbarg; 145 | 146 | int timeout_read; /* in seconds */ 147 | int timeout_write; /* in seconds */ 148 | 149 | short enabled; /* events that are currently enabled */ 150 | }; 151 | 152 | struct bufferevent *bufferevent_new(int fd, 153 | evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg); 154 | int bufferevent_base_set(struct event_base *base, struct bufferevent *bufev); 155 | int bufferevent_priority_set(struct bufferevent *bufev, int pri); 156 | void bufferevent_free(struct bufferevent *bufev); 157 | int bufferevent_write(struct bufferevent *bufev, const void *data, size_t size); 158 | int bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf); 159 | size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size); 160 | int bufferevent_enable(struct bufferevent *bufev, short event); 161 | int bufferevent_disable(struct bufferevent *bufev, short event); 162 | void bufferevent_settimeout(struct bufferevent *bufev, 163 | int timeout_read, int timeout_write); 164 | 165 | #define EVBUFFER_LENGTH(x) (x)->off 166 | #define EVBUFFER_DATA(x) (x)->buffer 167 | #define EVBUFFER_INPUT(x) (x)->input 168 | #define EVBUFFER_OUTPUT(x) (x)->output 169 | 170 | struct evbuffer *evbuffer_new(void); 171 | void evbuffer_free(struct evbuffer *); 172 | int evbuffer_expand(struct evbuffer *, size_t); 173 | int evbuffer_add(struct evbuffer *, const void *, size_t); 174 | int evbuffer_remove(struct evbuffer *, void *, size_t); 175 | char *evbuffer_readline(struct evbuffer *); 176 | int evbuffer_add_buffer(struct evbuffer *, struct evbuffer *); 177 | int evbuffer_add_printf(struct evbuffer *, const char *fmt, ...); 178 | int evbuffer_add_vprintf(struct evbuffer *, const char *fmt, va_list ap); 179 | void evbuffer_drain(struct evbuffer *, size_t); 180 | int evbuffer_write(struct evbuffer *, int); 181 | int evbuffer_read(struct evbuffer *, int, int); 182 | u_char *evbuffer_find(struct evbuffer *, const u_char *, size_t); 183 | void evbuffer_setcb(struct evbuffer *, void (*)(struct evbuffer *, size_t, size_t, void *), void *); 184 | 185 | /* 186 | * Marshaling tagged data - We assume that all tags are inserted in their 187 | * numeric order - so that unknown tags will always be higher than the 188 | * known ones - and we can just ignore the end of an event buffer. 189 | */ 190 | 191 | void evtag_init(void); 192 | 193 | void evtag_marshal(struct evbuffer *evbuf, uint32_t tag, const void *data, 194 | uint32_t len); 195 | 196 | void encode_int(struct evbuffer *evbuf, uint32_t number); 197 | 198 | void evtag_marshal_int(struct evbuffer *evbuf, uint32_t tag, uint32_t integer); 199 | 200 | void evtag_marshal_string(struct evbuffer *buf, uint32_t tag, 201 | const char *string); 202 | 203 | void evtag_marshal_timeval(struct evbuffer *evbuf, uint32_t tag, 204 | struct timeval *tv); 205 | 206 | int evtag_unmarshal(struct evbuffer *src, uint32_t *ptag, struct evbuffer *dst); 207 | int evtag_peek(struct evbuffer *evbuf, uint32_t *ptag); 208 | int evtag_peek_length(struct evbuffer *evbuf, uint32_t *plength); 209 | int evtag_payload_length(struct evbuffer *evbuf, uint32_t *plength); 210 | int evtag_consume(struct evbuffer *evbuf); 211 | 212 | int evtag_unmarshal_int(struct evbuffer *evbuf, uint32_t need_tag, 213 | uint32_t *pinteger); 214 | 215 | int evtag_unmarshal_fixed(struct evbuffer *src, uint32_t need_tag, void *data, 216 | size_t len); 217 | 218 | int evtag_unmarshal_string(struct evbuffer *evbuf, uint32_t need_tag, 219 | char **pstring); 220 | 221 | int evtag_unmarshal_timeval(struct evbuffer *evbuf, uint32_t need_tag, 222 | struct timeval *ptv); 223 | 224 | #ifdef __cplusplus 225 | } 226 | #endif 227 | -------------------------------------------------------------------------------- /src/import_libevent: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | LE=../libevent-1.4.3-stable 4 | 5 | if ! [ -e evbuffer.c ]; then 6 | echo do not run this programm unless you know what you are doing 7 | exit 1 8 | fi 9 | 10 | # this program combines libev and libevent into a single package 11 | 12 | cvs update -AdP libev 13 | rsync -avP libev/. . --exclude CVS 14 | 15 | rm -f configure.ac 16 | 17 | cp $LE/evdns.h . 18 | 19 | perl -i -pe 's%^/.libevent-include./%#include "event_compat.h"%' event.h 20 | 21 | perl -ne ' 22 | s/\s+char buf\[64\];/\tchar buf[96];/; 23 | if (/#include "event.h"/) { 24 | print "#ifndef EV_STANDALONE\n$_#endif\n"; 25 | next; 26 | } 27 | if (/#include "misc.h"/) { 28 | print "#ifndef EV_STANDALONE\n$_#endif\n"; 29 | next; 30 | } 31 | if (/#include "(unistd.h|sys\/time.h)"/) { 32 | print "#ifndef WIN32\n$_#endif\n"; 33 | next; 34 | } 35 | next if /#include "log.h"/; 36 | 37 | print; 38 | ' <$LE/evdns.c >evdns.c 39 | 40 | cp $LE/autogen.sh . 41 | cp $LE/epoll_sub.c . 42 | cp $LE/evbuffer.c . 43 | cp $LE/buffer.c . 44 | cp $LE/evhttp.h . 45 | cp $LE/evutil.h . 46 | cp $LE/evutil.c . 47 | cp $LE/event-config.h . 48 | cp $LE/event-internal.h . 49 | cp $LE/evrpc.h . 50 | cp $LE/evrpc.c . 51 | cp $LE/evrpc-internal.h . 52 | cp $LE/http.c . 53 | cp $LE/event_tagging.c . 54 | cp $LE/http-internal.h . 55 | cp $LE/strlcpy-internal.h . 56 | cp $LE/log.c . 57 | cp $LE/log.h . 58 | cp $LE/strlcpy.c . 59 | rsync -a $LE/WIN32* $LE/sample $LE/test $LE/compat . --del 60 | #rename 's/libevent/libev/' WIN32-Prj/lib* 61 | cp $LE/aclocal.m4 . 62 | #cp $LE/acconfig.h . 63 | cp $LE/config.h.in . 64 | cp $LE/event_rpcgen.py . 65 | cp $LE/*.3 . 66 | 67 | #perl -i -pe 's/libevent/libev/g' sample/Makefile.am 68 | #perl -i -pe 's/libevent/libev/g' test/Makefile.am 69 | 70 | perl -i -pe 's/#include $/#include "event.h"/' test/*.c 71 | 72 | perl -i -ne ' 73 | next if /"event-internal.h"/; 74 | s/base\d?->sig.ev_signal_added/0/; 75 | s/base\d?->sig.ev_signal_pair\[0\]/-1/; 76 | s/base->sig.evsignal_caught/0/; 77 | next if /^\ttest_signal_(dealloc|pipeloss|switchbase|assert|restore)\(\)/; 78 | next if /^\ttest_simplesignal\(\)/; # non-default-loop 79 | next if /^\ttest_immediatesignal\(\)/; # non-default-loop 80 | next if /test_priorities\(\d\)/; 81 | print; 82 | ' test/regress.c 83 | 84 | perl -ne ' 85 | s/\bmin_heap.h\b//g; 86 | s/\bsignal.c\b//g; 87 | s/\bevport.c\b//g; 88 | s/\bkqueue.c\b//g; 89 | s/\bdevpoll.c\b//g; 90 | s/\brtsig.c\b//g; 91 | s/\bselect.c\b//g; 92 | s/\bpoll.c\b//g; 93 | s/\bepoll.c\b//g; 94 | s/\bepoll_sub.c\b//g; 95 | s/\bevent-internal.h\b//g; 96 | s/\bevsignal.h\b//g; 97 | s/^(man_MANS\s*=)/$1 ev.3 /; 98 | s/^(EXTRA_DIST\s*=)/$1 libev.m4 ev.h ev_vars.h ev_wrap.h event_compat.h ev++.h ev_epoll.c ev_select.c ev_poll.c ev_kqueue.c ev_port.c ev_win32.c ev.3 ev.pod /; 99 | s/^(include_HEADERS\s*=)/$1 ev.h event_compat.h ev++.h /; 100 | s/^(CORE_SRC\s*=)/$1 ev.c /; 101 | s/^(SYS_LIBS\s*=)/$1 -lm /; 102 | #s/libevent/libev/g; 103 | print; 104 | ' <$LE/Makefile.am >Makefile.am 105 | 106 | perl -ne ' 107 | #s/-Wall/-Wall -Wno-comment -Wunused-function -Wno-unused-value/; 108 | s/-Wall//g; 109 | #s/libevent/libev/g; 110 | #VERSION 111 | s/AM_INIT_AUTOMAKE\s*\(.*,(.*)\)/AM_INIT_AUTOMAKE(libevent-$1+libev,3.1)/; 112 | s/AC_LIBOBJ\(select\)/: ;/g; 113 | s/AC_LIBOBJ\(poll\)/: ;/g; 114 | s/AC_LIBOBJ\(kqueue\)/: ;/g; 115 | s/AC_LIBOBJ\(epoll\)/: ;/g; 116 | s/AC_LIBOBJ\(devpoll\)/: ;/g; 117 | s/AC_LIBOBJ\(evport\)/: ;/g; 118 | s/AC_LIBOBJ\(signal\)/: ;/g; 119 | s/AC_LIBOBJ\(rtsig\)/: ;/g; 120 | print "m4_include([libev.m4])\n" if /^AC_OUTPUT/; 121 | print; 122 | ' <$LE/configure.in >configure.in 123 | 124 | aclocal-1.7 125 | automake-1.7 --add-missing 126 | autoconf 127 | autoheader 128 | libtoolize 129 | CC="ccache gcc" ./configure --prefix=/opt/libev --disable-shared "$@" 130 | 131 | 132 | -------------------------------------------------------------------------------- /src/libev.m4: -------------------------------------------------------------------------------- 1 | dnl this file is part of libev, do not make local modifications 2 | dnl http://software.schmorp.de/pkg/libev 3 | 4 | dnl libev support 5 | AC_CHECK_HEADERS(sys/inotify.h sys/epoll.h sys/event.h port.h poll.h sys/timerfd.h) 6 | AC_CHECK_HEADERS(sys/select.h sys/eventfd.h sys/signalfd.h linux/aio_abi.h linux/fs.h) 7 | 8 | AC_CHECK_FUNCS(inotify_init epoll_ctl kqueue port_create poll select eventfd signalfd) 9 | 10 | AC_CHECK_FUNCS(clock_gettime, [], [ 11 | dnl on linux, try syscall wrapper first 12 | if test $(uname) = Linux; then 13 | AC_MSG_CHECKING(for clock_gettime syscall) 14 | AC_LINK_IFELSE([AC_LANG_PROGRAM( 15 | [#include 16 | #include 17 | #include ], 18 | [struct timespec ts; int status = syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts)])], 19 | [ac_have_clock_syscall=1 20 | AC_DEFINE(HAVE_CLOCK_SYSCALL, 1, Define to 1 to use the syscall interface for clock_gettime) 21 | AC_MSG_RESULT(yes)], 22 | [AC_MSG_RESULT(no)]) 23 | fi 24 | if test -z "$LIBEV_M4_AVOID_LIBRT" && test -z "$ac_have_clock_syscall"; then 25 | AC_CHECK_LIB(rt, clock_gettime) 26 | unset ac_cv_func_clock_gettime 27 | AC_CHECK_FUNCS(clock_gettime) 28 | fi 29 | ]) 30 | 31 | AC_CHECK_FUNCS(nanosleep, [], [ 32 | if test -z "$LIBEV_M4_AVOID_LIBRT"; then 33 | AC_CHECK_LIB(rt, nanosleep) 34 | unset ac_cv_func_nanosleep 35 | AC_CHECK_FUNCS(nanosleep) 36 | fi 37 | ]) 38 | 39 | AC_CHECK_TYPE(__kernel_rwf_t, [ 40 | AC_DEFINE(HAVE_KERNEL_RWF_T, 1, Define to 1 if linux/fs.h defined kernel_rwf_t) 41 | ], [], [#include ]) 42 | 43 | if test -z "$LIBEV_M4_AVOID_LIBM"; then 44 | LIBM=m 45 | fi 46 | AC_SEARCH_LIBS(floor, $LIBM, [AC_DEFINE(HAVE_FLOOR, 1, Define to 1 if the floor function is available)]) 47 | 48 | -------------------------------------------------------------------------------- /src/update_ev_c: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | ( 4 | sed -ne '1,\%/\* ECB.H BEGIN \*/%p' ev.c 5 | #perl -ne 'print unless /^#if ECB_CPP/ .. /^#endif/' <~/src/libecb/ecb.h 6 | cat ~/src/libecb/ecb.h 7 | sed -ne '\%/\* ECB.H END \*/%,$p' ev.c 8 | ) >ev.c~ && mv ev.c~ ev.c 9 | 10 | -------------------------------------------------------------------------------- /src/update_ev_wrap: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ( 4 | echo '#define VAR(name,decl) name' 5 | echo '#define EV_GENWRAP 1' 6 | cat ev_vars.h 7 | ) | cc -E -o - - | perl -ne ' 8 | while (<>) { 9 | push @syms, $1 if /(^\w+)/; 10 | } 11 | print "/* DO NOT EDIT, automatically generated by update_ev_wrap */\n", 12 | "#ifndef EV_WRAP_H\n", 13 | "#define EV_WRAP_H\n", 14 | (map "#define $_ ((loop)->$_)\n", sort @syms), 15 | "#else\n", 16 | "#undef EV_WRAP_H\n", 17 | (map "#undef $_\n", sort @syms), 18 | "#endif\n"; 19 | ' >ev_wrap.h 20 | -------------------------------------------------------------------------------- /src/update_symbols: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | make ev.o event.o || exit 4 | 5 | nm ev.o | perl -ne 'print "$1\n" if /\S+ [A-Z] (\S+)/' > Symbols.ev 6 | nm event.o | perl -ne 'print "$1\n" if /\S+ [A-Z] (\S+)/' > Symbols.event 7 | 8 | -------------------------------------------------------------------------------- /util/sync-cvs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | 3 | my $verfile = 'cvs-vers'; 4 | 5 | my %vers = (); 6 | 7 | if (-f $verfile) { 8 | open(my $f, '<', $verfile) or die $!; 9 | while (<$f>) { 10 | if (m/^(.+):([\d.]+)$/) { 11 | $vers{$1} = $2; 12 | } 13 | } 14 | close $f; 15 | } 16 | 17 | my $rpath = "http://cvs.schmorp.de/libev"; 18 | open(my $in, '-|', "curl -s $rpath/") or die $!; 19 | 20 | my %newf = (); 21 | 22 | while (<$in>) { 23 | if (m/href="\/libev\/([^?]+)\?revision=([\d.]+)/) { 24 | my $f = $1; 25 | $v = $2; 26 | $f =~ s/%2B/+/g; 27 | $newf{$f} = $v; 28 | } 29 | } 30 | 31 | my $w = 0; 32 | 33 | for my $key (keys %newf) { 34 | my $v = $newf{$key}; 35 | my $oldv = $vers{$key}; 36 | 37 | print "file \"$key\" $oldv -> $v\n"; 38 | if ((!$oldv) || $v > $oldv) { 39 | print "downloading $key:\n"; 40 | @args = ('curl', '-m', '30', "$rpath/$key", '-o', "src/$key"); 41 | if (system(@args) == 0) { 42 | $w = 1; 43 | $vers{$key} = $v; 44 | } else { 45 | print "download $key fail.\n"; 46 | } 47 | } 48 | } 49 | 50 | if ($w > 0) { 51 | open(my $f, '>', $verfile) or die $!; 52 | for my $key (sort keys %vers) { 53 | printf $f "%s:%s\n", $key, $vers{$key}; 54 | } 55 | close $f; 56 | 57 | print "file change\n"; 58 | } else { 59 | print "all file unchange\n"; 60 | } 61 | 62 | print "done\n"; 63 | 64 | --------------------------------------------------------------------------------