├── .gitmodules ├── .travis.yml ├── CMakeLists.txt ├── LICENSE ├── README.md ├── h3cli.c ├── libev ├── ev.c ├── ev.h ├── ev_epoll.c ├── ev_iouring.c ├── ev_kqueue.c ├── ev_linuxaio.c ├── ev_poll.c ├── ev_port.c ├── ev_select.c ├── ev_vars.h ├── ev_win32.c └── ev_wrap.h ├── mycert-cert.pem ├── mycert-key.pem ├── tools ├── gen-tags.sh └── print-glibc-version.sh └── tut.c /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "boringssl"] 2 | path = boringssl 3 | url = https://boringssl.googlesource.com/boringssl 4 | [submodule "lsquic"] 5 | path = lsquic 6 | url = https://github.com/dtikhonov/lsquic 7 | branch = 202002141134-tutorial-mods 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | os: linux 3 | dist: xenial 4 | jobs: 5 | include: 6 | - name: Linux (gcc) 7 | os: linux 8 | compiler: gcc 9 | - name: Linux (clang) 10 | os: linux 11 | compiler: clang 12 | before_install: 13 | - if [ $TRAVIS_OS_NAME = linux ]; then sudo add-apt-repository -y ppa:longsleep/golang-backports; fi 14 | - if [ $TRAVIS_OS_NAME = linux ]; then sudo apt-get update; fi 15 | - if [ $TRAVIS_OS_NAME = linux ]; then sudo apt-get install golang-1.13-go; fi 16 | install: 17 | - if [ $TRAVIS_OS_NAME = linux ]; then export PATH=/usr/lib/go-1.13/bin:$PATH; fi 18 | - if [ $TRAVIS_OS_NAME = linux ]; then export GOPATH=/usr/lib/go-1.13:$GOPATH; fi 19 | - if [ $TRAVIS_OS_NAME = linux ]; then export GOROOT=/usr/lib/go-1.13; fi 20 | before_script: 21 | - git submodule init 22 | - git submodule update --recursive 23 | - cmake . 24 | script: 25 | - make 26 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 3.5) 2 | PROJECT(lsquic-tutorial C) 3 | 4 | IF(CMAKE_BUILD_TYPE STREQUAL "") 5 | SET(CMAKE_BUILD_TYPE Debug) 6 | ENDIF() 7 | MESSAGE(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 8 | IF (CMAKE_BUILD_TYPE STREQUAL Debug) 9 | SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0") 10 | ELSEIF (CMAKE_BUILD_TYPE STREQUAL Debug) 11 | SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O2") 12 | ENDIF() 13 | SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} $ENV{EXTRA_CFLAGS}") 14 | 15 | MESSAGE(STATUS "Compiler flags: ${CMAKE_C_FLAGS}") 16 | 17 | ADD_SUBDIRECTORY(boringssl) 18 | 19 | INCLUDE_DIRECTORIES(boringssl/include) 20 | INCLUDE_DIRECTORIES(lsquic/include) 21 | INCLUDE_DIRECTORIES(lsquic/src/liblsquic) # For lsquic_xxhash.h 22 | ADD_SUBDIRECTORY(lsquic/src/liblsquic) 23 | 24 | INCLUDE_DIRECTORIES(libev) 25 | ADD_EXECUTABLE(tut tut.c) 26 | ADD_EXECUTABLE(h3cli h3cli.c) 27 | SET(LIBS lsquic crypto ssl z m) 28 | IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") 29 | # If using older glibc, need to link with -lrt. See clock_getres(2). 30 | EXECUTE_PROCESS( 31 | COMMAND ${PROJECT_SOURCE_DIR}/print-glibc-version.sh ${CMAKE_C_COMPILER} 32 | OUTPUT_VARIABLE GLIBC_VERSION) 33 | IF(NOT GLIBC_VERSION EQUAL "" AND GLIBC_VERSION VERSION_LESS 2.17) 34 | SET(LIBS ${LIBS} rt) 35 | ENDIF() 36 | ENDIF() 37 | TARGET_LINK_LIBRARIES(tut ${LIBS}) 38 | TARGET_LINK_LIBRARIES(h3cli ${LIBS}) 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Dmitri Tikhonov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/dtikhonov/lsquic-tutorial.svg?branch=master)](https://travis-ci.org/dtikhonov/lsquic-tutorial) 2 | 3 | # lsquic-tutorial 4 | lsquic tutorial teaches one how to build an application using [lsquic](https://github.com/litespeedtech/lsquic). 5 | 6 | ## Description 7 | The tutorial program, tut.c, contains client and server logic for a simple echo program. 8 | The client connects to the server and sends lines of text; the server reverses the lines of text and sends them back. 9 | 10 | The tutorial program was written as an educational aid. Various aspects of the ways LSQUIC 11 | is used in it are considered in [Netdev 0x14 slides](https://github.com/dtikhonov/talks/blob/master/netdev-0x14/lsquic-slides.md). 12 | In addition to the slides, please refer to the [LSQUIC API Reference](https://lsquic.readthedocs.io/en/latest/apiref.html). 13 | 14 | tut.c contains several versions of reading and writing from stream to illustrate different ways of 15 | using the library. There are also two ways to send packets. 16 | 17 | ## Building 18 | To build the tutorial: 19 | ```bash 20 | git submodule update --init --recursive 21 | cmake . 22 | make 23 | ``` 24 | 25 | This clones and builds BoringSSL, so it may take a few minutes. 26 | 27 | ## Usage 28 | 29 | ### All options 30 | ```bash 31 | sh$ ./tut -h 32 | Usage: tut [-c cert -k key] [options] IP port 33 | 34 | -c cert.file Certificate. 35 | -k key.file Key file. 36 | -f log.file Log message to this log file. If not specified, the 37 | are printed to stderr. 38 | -L level Set library-wide log level. Defaults to 'warn'. 39 | -l module=level Set log level of specific module. Several of these 40 | can be specified via multiple -l flags or by combining 41 | these with comma, e.g. -l event=debug,conn=info. 42 | -v Verbose: log program messages as well. 43 | -b VERSION Use callbacks version VERSION. 44 | -p VERSION Use packets_out version VERSION. 45 | -w VERSION Use server write callback version VERSION. 46 | -o opt=val Set lsquic engine setting to some value, overriding the 47 | defaults. For example, 48 | -o version=ff00001c -o cc_algo=2 49 | -G DIR Log TLS secrets to a file in directory DIR. 50 | -h Print this help screen and exit. 51 | ``` 52 | 53 | ### Running the server 54 | Both client and server logic are contained in the `tut` program. It knows it is meant to run in the server 55 | mode when `-c` and `-k` options are specified: 56 | 57 | ```bash 58 | sh$ ./tut -c mycert-cert.pem -k mycert-key.pem ::0 12345 -p 1 -L debug -f server.log 59 | ``` 60 | 61 | The server can select one of two versions of "on stream write" callbacks. Use `-w` command-line option for that. 62 | 63 | ### Running the client 64 | ```bash 65 | sh$ ./tut ::1 12345 -L debug -f client.log 66 | Hello! 67 | !olleH 68 | ^D 69 | sh$ 70 | ``` 71 | 72 | The server can select one of three versions of "on stream read" callbacks. Use `-b` command-line option for that. 73 | 74 | Both client and server can use the `-p` option to select one of two "send packets out" callbacks. 75 | 76 | ## HTTP/3 Client 77 | As a bonus, a simple [HTTP/3](https://en.wikipedia.org/wiki/HTTP/3) client is provided. Example: 78 | 79 | ```bash 80 | sh$ ./h3cli -M HEAD www.litespeedtech.com 443 / 81 | HTTP/1.1 200 OK 82 | x-powered-by: PHP/7.3.5 83 | x-logged-in: False 84 | x-content-powered-by: K2 v2.7.1 (by JoomlaWorks) 85 | content-type: text/html; charset=utf-8 86 | expires: Wed, 17 Aug 2005 00:00:00 GMT 87 | last-modified: Wed, 12 Aug 2020 18:54:05 GMT 88 | cache-control: no-store, no-cache, must-revalidate, post-check=0, pre-check=0 89 | pragma: no-cache 90 | etag: "23485-1597258445;gz" 91 | vary: Accept-Encoding 92 | x-frame-options: SAMEORIGIN 93 | x-lsadc-cache: hit 94 | date: Thu, 13 Aug 2020 02:48:06 GMT 95 | server: LiteSpeed 96 | ``` 97 | 98 | Besides www.litespeedtech.com, other websites to try are www.facebook.com and www.google.com. 99 | 100 | ## More Information 101 | Latest QUIC and HTTP/3 GitHub artefacts can be found [here](https://github.com/quicwg/base-drafts). 102 | The QUIC IETF Working Group materials are [here](https://datatracker.ietf.org/wg/quic/about/). 103 | 104 | The [LSQUIC GitHub Repo](https://github.com/litespeedtech/lsquic) contains several more advanced examples, among 105 | them HTTP/3 client and server programs. 106 | -------------------------------------------------------------------------------- /h3cli.c: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2020 LiteSpeed Technologies */ 2 | /* 3 | * h3cli.c is a simple HTTP/3 client. It is used to illustrate how to use 4 | * lsquic HTTP/3 API. 5 | * 6 | * Example: h3cli www.litespeedtech.com 443 / -M HEAD 7 | */ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | 25 | #define EV_STANDALONE 1 26 | #define EV_API_STATIC 1 27 | #include "ev.c" 28 | 29 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) 30 | 31 | #include "lsquic.h" 32 | #include "lsxpack_header.h" 33 | 34 | 35 | static FILE *s_log_fh; 36 | 37 | 38 | struct h3cli 39 | { 40 | int h3cli_sock_fd; /* socket */ 41 | ev_io h3cli_sock_w; /* socket watcher */ 42 | ev_timer h3cli_timer; 43 | struct ev_loop *h3cli_loop; 44 | lsquic_engine_t *h3cli_engine; 45 | const char *h3cli_method; 46 | const char *h3cli_path; 47 | const char *h3cli_hostname; 48 | lsquic_conn_t *h3cli_conn; 49 | struct sockaddr_storage h3cli_local_sas; 50 | }; 51 | 52 | static void h3cli_process_conns (struct h3cli *); 53 | 54 | static int 55 | h3cli_log_buf (void *ctx, const char *buf, size_t len) 56 | { 57 | FILE *out = ctx; 58 | fwrite(buf, 1, len, out); 59 | fflush(out); 60 | return 0; 61 | } 62 | static const struct lsquic_logger_if logger_if = { h3cli_log_buf, }; 63 | 64 | 65 | static int s_verbose; 66 | static void 67 | LOG (const char *fmt, ...) 68 | { 69 | if (s_verbose) 70 | { 71 | va_list ap; 72 | fprintf(s_log_fh, "LOG: "); 73 | va_start(ap, fmt); 74 | (void) vfprintf(s_log_fh, fmt, ap); 75 | va_end(ap); 76 | fprintf(s_log_fh, "\n"); 77 | } 78 | } 79 | 80 | 81 | static int 82 | h3cli_packets_out (void *packets_out_ctx, const struct lsquic_out_spec *specs, 83 | unsigned count) 84 | { 85 | unsigned n; 86 | int fd, s = 0; 87 | struct msghdr msg; 88 | 89 | if (0 == count) 90 | return 0; 91 | 92 | n = 0; 93 | msg.msg_flags = 0; 94 | msg.msg_control = NULL; 95 | msg.msg_controllen = 0; 96 | do 97 | { 98 | fd = (int) (uint64_t) specs[n].peer_ctx; 99 | msg.msg_name = (void *) specs[n].dest_sa; 100 | msg.msg_namelen = (AF_INET == specs[n].dest_sa->sa_family ? 101 | sizeof(struct sockaddr_in) : 102 | sizeof(struct sockaddr_in6)), 103 | msg.msg_iov = specs[n].iov; 104 | msg.msg_iovlen = specs[n].iovlen; 105 | s = sendmsg(fd, &msg, 0); 106 | if (s < 0) 107 | { 108 | LOG("sendmsg failed: %s", strerror(errno)); 109 | break; 110 | } 111 | ++n; 112 | } 113 | while (n < count); 114 | 115 | if (n < count) 116 | LOG("could not send all of them"); /* TODO */ 117 | 118 | if (n > 0) 119 | return n; 120 | else 121 | { 122 | assert(s < 0); 123 | return -1; 124 | } 125 | } 126 | 127 | 128 | static void 129 | h3cli_usage (const char *argv0) 130 | { 131 | const char *name; 132 | 133 | name = strchr(argv0, '/'); 134 | if (name) 135 | ++name; 136 | else 137 | name = argv0; 138 | 139 | fprintf(stdout, 140 | "Usage: %s [options] hostname port path\n" 141 | "\n" 142 | " -L level Set library-wide log level. Defaults to 'warn'.\n" 143 | " -l module=level Set log level of specific module. Several of these\n" 144 | " can be specified via multiple -l flags or by combining\n" 145 | " these with comma, e.g. -l event=debug,conn=info.\n" 146 | " -v Verbose: log program messages as well.\n" 147 | " -M METHOD Method. GET by default.\n" 148 | " -o opt=val Set lsquic engine setting to some value, overriding the\n" 149 | " defaults. For example,\n" 150 | " -o version=ff00001c -o cc_algo=2\n" 151 | " -G DIR Log TLS secrets to a file in directory DIR.\n" 152 | " -h Print this help screen and exit.\n" 153 | , name); 154 | } 155 | 156 | 157 | static lsquic_conn_ctx_t * 158 | h3cli_client_on_new_conn (void *stream_if_ctx, struct lsquic_conn *conn) 159 | { 160 | struct h3cli *const h3cli = stream_if_ctx; 161 | LOG("created connection"); 162 | lsquic_conn_make_stream(conn); 163 | return (void *) h3cli; 164 | } 165 | 166 | 167 | static void 168 | h3cli_client_on_conn_closed (struct lsquic_conn *conn) 169 | { 170 | struct h3cli *const h3cli = (void *) lsquic_conn_get_ctx(conn); 171 | 172 | LOG("client connection closed -- stop reading from socket"); 173 | ev_io_stop(h3cli->h3cli_loop, &h3cli->h3cli_sock_w); 174 | } 175 | 176 | 177 | static lsquic_stream_ctx_t * 178 | h3cli_client_on_new_stream (void *stream_if_ctx, struct lsquic_stream *stream) 179 | { 180 | struct h3cli *h3cli = stream_if_ctx; 181 | LOG("created new stream, we want to write"); 182 | lsquic_stream_wantwrite(stream, 1); 183 | /* return h3cli: we don't have any stream-specific context */ 184 | return (void *) h3cli; 185 | } 186 | 187 | 188 | static void 189 | h3cli_client_on_read (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 190 | { 191 | struct h3cli *h3cli = (struct h3cli *) h; 192 | ssize_t nread; 193 | unsigned char buf[0x1000]; 194 | 195 | nread = lsquic_stream_read(stream, buf, sizeof(buf)); 196 | if (nread > 0) 197 | { 198 | fwrite(buf, 1, nread, stdout); 199 | fflush(stdout); 200 | } 201 | else if (nread == 0) 202 | { 203 | LOG("read to end-of-stream: close connection"); 204 | lsquic_stream_shutdown(stream, 0); 205 | lsquic_conn_close( lsquic_stream_conn(stream) ); 206 | } 207 | else 208 | { 209 | LOG("error reading from stream (%s) -- exit loop"); 210 | ev_break(h3cli->h3cli_loop, EVBREAK_ONE); 211 | } 212 | } 213 | 214 | 215 | struct header_buf 216 | { 217 | unsigned off; 218 | char buf[UINT16_MAX]; 219 | }; 220 | 221 | 222 | /* Convenience wrapper around somewhat involved lsxpack APIs */ 223 | int 224 | h3cli_set_header (struct lsxpack_header *hdr, struct header_buf *header_buf, 225 | const char *name, size_t name_len, const char *val, size_t val_len) 226 | { 227 | if (header_buf->off + name_len + val_len <= sizeof(header_buf->buf)) 228 | { 229 | memcpy(header_buf->buf + header_buf->off, name, name_len); 230 | memcpy(header_buf->buf + header_buf->off + name_len, val, val_len); 231 | lsxpack_header_set_offset2(hdr, header_buf->buf + header_buf->off, 232 | 0, name_len, name_len, val_len); 233 | header_buf->off += name_len + val_len; 234 | return 0; 235 | } 236 | else 237 | return -1; 238 | } 239 | 240 | 241 | /* Send HTTP/3 request. We don't support payload, just send the headers. */ 242 | static void 243 | h3cli_client_on_write (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 244 | { 245 | struct h3cli *const h3cli = (void *) h; 246 | struct header_buf hbuf; 247 | struct lsxpack_header harray[5]; 248 | struct lsquic_http_headers headers = { 5, harray, }; 249 | 250 | hbuf.off = 0; 251 | #define V(v) (v), strlen(v) 252 | h3cli_set_header(&harray[0], &hbuf, V(":method"), V(h3cli->h3cli_method)); 253 | h3cli_set_header(&harray[1], &hbuf, V(":scheme"), V("https")); 254 | h3cli_set_header(&harray[2], &hbuf, V(":path"), V(h3cli->h3cli_path)); 255 | h3cli_set_header(&harray[3], &hbuf, V(":authority"), 256 | V(h3cli->h3cli_hostname)); 257 | h3cli_set_header(&harray[4], &hbuf, V("user-agent"), V("h3cli/lsquic")); 258 | 259 | if (0 == lsquic_stream_send_headers(stream, &headers, 0)) 260 | { 261 | lsquic_stream_shutdown(stream, 1); 262 | lsquic_stream_wantread(stream, 1); 263 | } 264 | else 265 | { 266 | LOG("ERROR: lsquic_stream_send_headers failed: %s", strerror(errno)); 267 | lsquic_conn_abort(lsquic_stream_conn(stream)); 268 | } 269 | } 270 | 271 | 272 | static void 273 | h3cli_client_on_close (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 274 | { 275 | LOG("stream closed"); 276 | } 277 | 278 | 279 | static struct lsquic_stream_if h3cli_client_callbacks = 280 | { 281 | .on_new_conn = h3cli_client_on_new_conn, 282 | .on_conn_closed = h3cli_client_on_conn_closed, 283 | .on_new_stream = h3cli_client_on_new_stream, 284 | .on_read = h3cli_client_on_read, 285 | .on_write = h3cli_client_on_write, 286 | .on_close = h3cli_client_on_close, 287 | }; 288 | 289 | 290 | static int 291 | h3cli_set_nonblocking (int fd) 292 | { 293 | int flags; 294 | 295 | flags = fcntl(fd, F_GETFL); 296 | if (-1 == flags) 297 | return -1; 298 | flags |= O_NONBLOCK; 299 | if (0 != fcntl(fd, F_SETFL, flags)) 300 | return -1; 301 | 302 | return 0; 303 | } 304 | 305 | 306 | static void 307 | h3cli_timer_expired (EV_P_ ev_timer *timer, int revents) 308 | { 309 | h3cli_process_conns(timer->data); 310 | } 311 | 312 | 313 | static void 314 | h3cli_process_conns (struct h3cli *h3cli) 315 | { 316 | int diff; 317 | ev_tstamp timeout; 318 | 319 | ev_timer_stop(h3cli->h3cli_loop, &h3cli->h3cli_timer); 320 | lsquic_engine_process_conns(h3cli->h3cli_engine); 321 | 322 | if (lsquic_engine_earliest_adv_tick(h3cli->h3cli_engine, &diff)) 323 | { 324 | if (diff >= LSQUIC_DF_CLOCK_GRANULARITY) 325 | /* Expected case: convert to seconds */ 326 | timeout = (ev_tstamp) diff / 1000000; 327 | else if (diff <= 0) 328 | /* It should not happen often that the next tick is in the past 329 | * as we just processed connections. Avoid a busy loop by 330 | * scheduling an event: 331 | */ 332 | timeout = 0.0; 333 | else 334 | /* Round up to granularity */ 335 | timeout = (ev_tstamp) LSQUIC_DF_CLOCK_GRANULARITY / 1000000; 336 | LOG("converted diff %d usec to %.4lf seconds", diff, timeout); 337 | ev_timer_init(&h3cli->h3cli_timer, h3cli_timer_expired, timeout, 0.); 338 | ev_timer_start(h3cli->h3cli_loop, &h3cli->h3cli_timer); 339 | } 340 | } 341 | 342 | 343 | static void 344 | h3cli_proc_ancillary (struct msghdr *msg, struct sockaddr_storage *storage, 345 | int *ecn) 346 | { 347 | const struct in6_pktinfo *in6_pkt; 348 | struct cmsghdr *cmsg; 349 | 350 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) 351 | { 352 | if ((cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == IP_TOS) 353 | || (cmsg->cmsg_level == IPPROTO_IPV6 354 | && cmsg->cmsg_type == IPV6_TCLASS)) 355 | { 356 | memcpy(ecn, CMSG_DATA(cmsg), sizeof(*ecn)); 357 | *ecn &= IPTOS_ECN_MASK; 358 | } 359 | } 360 | } 361 | 362 | 363 | #if defined(IP_RECVORIGDSTADDR) 364 | # define DST_MSG_SZ sizeof(struct sockaddr_in) 365 | #else 366 | # define DST_MSG_SZ sizeof(struct in_pktinfo) 367 | #endif 368 | 369 | #define ECN_SZ CMSG_SPACE(sizeof(int)) 370 | 371 | /* Amount of space required for incoming ancillary data */ 372 | #define CTL_SZ ECN_SZ 373 | 374 | 375 | static void 376 | h3cli_read_socket (EV_P_ ev_io *w, int revents) 377 | { 378 | struct h3cli *const h3cli = w->data; 379 | ssize_t nread; 380 | int ecn; 381 | struct sockaddr_storage peer_sas, local_sas; 382 | unsigned char buf[0x1000]; 383 | struct iovec vec[1] = {{ buf, sizeof(buf) }}; 384 | unsigned char ctl_buf[CTL_SZ]; 385 | 386 | struct msghdr msg = { 387 | .msg_name = &peer_sas, 388 | .msg_namelen = sizeof(peer_sas), 389 | .msg_iov = vec, 390 | .msg_iovlen = 1, 391 | .msg_control = ctl_buf, 392 | .msg_controllen = sizeof(ctl_buf), 393 | }; 394 | nread = recvmsg(w->fd, &msg, 0); 395 | if (-1 == nread) { 396 | if (!(EAGAIN == errno || EWOULDBLOCK == errno)) 397 | LOG("recvmsg: %s", strerror(errno)); 398 | return; 399 | } 400 | 401 | local_sas = h3cli->h3cli_local_sas; 402 | ecn = 0; 403 | h3cli_proc_ancillary(&msg, &local_sas, &ecn); 404 | 405 | (void) lsquic_engine_packet_in(h3cli->h3cli_engine, buf, nread, 406 | (struct sockaddr *) &local_sas, 407 | (struct sockaddr *) &peer_sas, 408 | (void *) (uintptr_t) w->fd, ecn); 409 | 410 | h3cli_process_conns(h3cli); 411 | } 412 | 413 | 414 | static void * 415 | keylog_open (void *ctx, lsquic_conn_t *conn) 416 | { 417 | const char *const dir = ctx ? ctx : "."; 418 | const lsquic_cid_t *cid; 419 | FILE *fh; 420 | int sz; 421 | unsigned i; 422 | char id_str[MAX_CID_LEN * 2 + 1]; 423 | char path[PATH_MAX]; 424 | static const char b2c[16] = "0123456789ABCDEF"; 425 | 426 | cid = lsquic_conn_id(conn); 427 | for (i = 0; i < cid->len; ++i) 428 | { 429 | id_str[i * 2 + 0] = b2c[ cid->idbuf[i] >> 4 ]; 430 | id_str[i * 2 + 1] = b2c[ cid->idbuf[i] & 0xF ]; 431 | } 432 | id_str[i * 2] = '\0'; 433 | sz = snprintf(path, sizeof(path), "%s/%s.keys", dir, id_str); 434 | if ((size_t) sz >= sizeof(path)) 435 | { 436 | LOG("WARN: %s: file too long", __func__); 437 | return NULL; 438 | } 439 | fh = fopen(path, "wb"); 440 | if (!fh) 441 | LOG("WARN: could not open %s for writing: %s", path, strerror(errno)); 442 | return fh; 443 | } 444 | 445 | 446 | static void 447 | keylog_log_line (void *handle, const char *line) 448 | { 449 | fputs(line, handle); 450 | fputs("\n", handle); 451 | fflush(handle); 452 | } 453 | 454 | 455 | static void 456 | keylog_close (void *handle) 457 | { 458 | fclose(handle); 459 | } 460 | 461 | 462 | static const struct lsquic_keylog_if keylog_if = 463 | { 464 | .kli_open = keylog_open, 465 | .kli_log_line = keylog_log_line, 466 | .kli_close = keylog_close, 467 | }; 468 | 469 | 470 | int 471 | main (int argc, char **argv) 472 | { 473 | struct lsquic_engine_api eapi; 474 | const char *cert_file = NULL, *key_file = NULL, *val, *port_str; 475 | int opt, version_cleared = 0, settings_initialized = 0; 476 | struct addrinfo hints, *res = NULL; 477 | socklen_t socklen; 478 | struct lsquic_engine_settings settings; 479 | struct h3cli h3cli; 480 | union { 481 | struct sockaddr sa; 482 | struct sockaddr_in addr4; 483 | struct sockaddr_in6 addr6; 484 | } addr; 485 | const char *key_log_dir = NULL; 486 | char errbuf[0x100]; 487 | 488 | s_log_fh = stderr; 489 | 490 | if (0 != lsquic_global_init(LSQUIC_GLOBAL_CLIENT)) 491 | { 492 | fprintf(stderr, "global initialization failed\n"); 493 | exit(EXIT_FAILURE); 494 | } 495 | 496 | memset(&h3cli, 0, sizeof(h3cli)); 497 | h3cli.h3cli_method = "GET"; 498 | 499 | while (opt = getopt(argc, argv, "f:l:o:G:L:M:hv"), opt != -1) 500 | { 501 | switch (opt) 502 | { 503 | case 'c': 504 | if (settings_initialized) 505 | { 506 | fprintf(stderr, "-c and -k should precede -o flags\n"); 507 | exit(EXIT_FAILURE); 508 | } 509 | cert_file = optarg; 510 | break; 511 | case 'f': 512 | s_log_fh = fopen(optarg, "ab"); 513 | if (!s_log_fh) 514 | { 515 | perror("cannot open log file for writing"); 516 | exit(EXIT_FAILURE); 517 | } 518 | break; 519 | case 'k': 520 | if (settings_initialized) 521 | { 522 | fprintf(stderr, "-c and -k should precede -o flags\n"); 523 | exit(EXIT_FAILURE); 524 | } 525 | key_file = optarg; 526 | break; 527 | case 'l': 528 | if (0 != lsquic_logger_lopt(optarg)) 529 | { 530 | fprintf(stderr, "error processing -l option\n"); 531 | exit(EXIT_FAILURE); 532 | } 533 | break; 534 | case 'G': 535 | key_log_dir = optarg; 536 | break; 537 | case 'L': 538 | if (0 != lsquic_set_log_level(optarg)) 539 | { 540 | fprintf(stderr, "error processing -L option\n"); 541 | exit(EXIT_FAILURE); 542 | } 543 | break; 544 | case 'M': 545 | h3cli.h3cli_method = optarg; 546 | break; 547 | case 'v': 548 | ++s_verbose; 549 | break; 550 | case 'o': /* For example: -o version=h3-27 */ 551 | if (!settings_initialized) 552 | { 553 | lsquic_engine_init_settings(&settings, LSENG_HTTP); 554 | settings_initialized = 1; 555 | } 556 | val = strchr(optarg, '='); 557 | if (!val) 558 | { 559 | fprintf(stderr, "error processing -o: no equal sign\n"); 560 | exit(EXIT_FAILURE); 561 | } 562 | ++val; 563 | if (0 == strncmp(optarg, "version=", val - optarg)) 564 | { 565 | if (!version_cleared) 566 | { 567 | /* Clear all version on first -o version= */ 568 | version_cleared = 1; 569 | settings.es_versions = 0; 570 | } 571 | enum lsquic_version ver = lsquic_str2ver(val, strlen(val)); 572 | if ((unsigned) ver < N_LSQVER) 573 | { 574 | settings.es_versions |= 1 << ver; 575 | break; 576 | } 577 | ver = lsquic_alpn2ver(val, strlen(val)); 578 | if ((unsigned) ver < N_LSQVER) 579 | { 580 | settings.es_versions |= 1 << ver; 581 | break; 582 | } 583 | fprintf(stderr, "error: unknown version `%s'\n", val); 584 | exit(EXIT_FAILURE); 585 | } 586 | else if (0 == strncmp(optarg, "cc_algo=", val - optarg)) 587 | settings.es_cc_algo = atoi(val); 588 | /* ...and so on: add more options here as necessary */ 589 | else 590 | { 591 | fprintf(stderr, "error: unknown option `%.*s'\n", 592 | (int) (val - 1 - optarg), optarg); 593 | exit(EXIT_FAILURE); 594 | } 595 | break; 596 | case 'h': 597 | h3cli_usage(argv[0]); 598 | exit(EXIT_SUCCESS); 599 | break; 600 | default: 601 | exit(EXIT_FAILURE); 602 | break; 603 | } 604 | } 605 | 606 | /* Need hostname, port, and path */ 607 | if (optind + 2 >= argc) 608 | { 609 | LOG("please specify hostname, port, and path"); 610 | exit(EXIT_FAILURE); 611 | } 612 | h3cli.h3cli_hostname = argv[optind]; 613 | port_str = argv[optind + 1]; 614 | h3cli.h3cli_path = argv[optind + 2]; 615 | 616 | /* Resolve hostname */ 617 | memset(&hints, 0, sizeof(hints)); 618 | hints.ai_flags = AI_NUMERICSERV; 619 | if (0 != getaddrinfo(h3cli.h3cli_hostname, port_str, &hints, &res)) 620 | { 621 | perror("getaddrinfo"); 622 | exit(EXIT_FAILURE); 623 | } 624 | memcpy(&addr.sa, res->ai_addr, res->ai_addrlen); 625 | 626 | if (!settings_initialized) 627 | lsquic_engine_init_settings(&settings, LSENG_HTTP); 628 | 629 | /* At the time of this writing, using the loss bits extension causes 630 | * decryption failures in Wireshark. For the purposes of the demo, we 631 | * override the default. 632 | */ 633 | settings.es_ql_bits = 0; 634 | 635 | /* Check settings */ 636 | if (0 != lsquic_engine_check_settings(&settings, LSENG_HTTP, 637 | errbuf, sizeof(errbuf))) 638 | { 639 | LOG("invalid settings: %s", errbuf); 640 | exit(EXIT_FAILURE); 641 | } 642 | 643 | /* Initialize event loop */ 644 | h3cli.h3cli_loop = EV_DEFAULT; 645 | h3cli.h3cli_sock_fd = socket(addr.sa.sa_family, SOCK_DGRAM, 0); 646 | 647 | /* Set up socket */ 648 | if (h3cli.h3cli_sock_fd < 0) 649 | { 650 | perror("socket"); 651 | exit(EXIT_FAILURE); 652 | } 653 | if (0 != h3cli_set_nonblocking(h3cli.h3cli_sock_fd)) 654 | { 655 | perror("fcntl"); 656 | exit(EXIT_FAILURE); 657 | } 658 | 659 | h3cli.h3cli_local_sas.ss_family = addr.sa.sa_family; 660 | socklen = sizeof(h3cli.h3cli_local_sas); 661 | if (0 != bind(h3cli.h3cli_sock_fd, 662 | (struct sockaddr *) &h3cli.h3cli_local_sas, socklen)) 663 | { 664 | perror("bind"); 665 | exit(EXIT_FAILURE); 666 | } 667 | ev_init(&h3cli.h3cli_timer, h3cli_timer_expired); 668 | ev_io_init(&h3cli.h3cli_sock_w, h3cli_read_socket, h3cli.h3cli_sock_fd, EV_READ); 669 | ev_io_start(h3cli.h3cli_loop, &h3cli.h3cli_sock_w); 670 | 671 | /* Initialize logging */ 672 | setvbuf(s_log_fh, NULL, _IOLBF, 0); 673 | lsquic_logger_init(&logger_if, s_log_fh, LLTS_HHMMSSUS); 674 | 675 | /* Initialize callbacks */ 676 | memset(&eapi, 0, sizeof(eapi)); 677 | eapi.ea_packets_out = h3cli_packets_out; 678 | eapi.ea_packets_out_ctx = &h3cli; 679 | eapi.ea_stream_if = &h3cli_client_callbacks; 680 | eapi.ea_stream_if_ctx = &h3cli; 681 | if (key_log_dir) 682 | { 683 | eapi.ea_keylog_if = &keylog_if; 684 | eapi.ea_keylog_ctx = (void *) key_log_dir; 685 | } 686 | eapi.ea_settings = &settings; 687 | 688 | h3cli.h3cli_engine = lsquic_engine_new(LSENG_HTTP, &eapi); 689 | if (!h3cli.h3cli_engine) 690 | { 691 | LOG("cannot create engine"); 692 | exit(EXIT_FAILURE); 693 | } 694 | 695 | h3cli.h3cli_timer.data = &h3cli; 696 | h3cli.h3cli_sock_w.data = &h3cli; 697 | h3cli.h3cli_conn = lsquic_engine_connect( 698 | h3cli.h3cli_engine, N_LSQVER, 699 | (struct sockaddr *) &h3cli.h3cli_local_sas, &addr.sa, 700 | (void *) (uintptr_t) h3cli.h3cli_sock_fd, /* Peer ctx */ 701 | NULL, h3cli.h3cli_hostname, 0, NULL, 0, NULL, 0); 702 | if (!h3cli.h3cli_conn) 703 | { 704 | LOG("cannot create connection"); 705 | exit(EXIT_FAILURE); 706 | } 707 | h3cli_process_conns(&h3cli); 708 | ev_run(h3cli.h3cli_loop, 0); 709 | 710 | lsquic_engine_destroy(h3cli.h3cli_engine); 711 | lsquic_global_cleanup(); 712 | exit(EXIT_SUCCESS); 713 | } 714 | -------------------------------------------------------------------------------- /libev/ev.h: -------------------------------------------------------------------------------- 1 | /* 2 | * libev native API header 3 | * 4 | * Copyright (c) 2007-2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #ifndef EV_H_ 41 | #define EV_H_ 42 | 43 | #ifdef __cplusplus 44 | # define EV_CPP(x) x 45 | # if __cplusplus >= 201103L 46 | # define EV_NOEXCEPT noexcept 47 | # else 48 | # define EV_NOEXCEPT 49 | # endif 50 | #else 51 | # define EV_CPP(x) 52 | # define EV_NOEXCEPT 53 | #endif 54 | #define EV_THROW EV_NOEXCEPT /* pre-4.25, do not use in new code */ 55 | 56 | EV_CPP(extern "C" {) 57 | 58 | /*****************************************************************************/ 59 | 60 | /* pre-4.0 compatibility */ 61 | #ifndef EV_COMPAT3 62 | # define EV_COMPAT3 1 63 | #endif 64 | 65 | #ifndef EV_FEATURES 66 | # if defined __OPTIMIZE_SIZE__ 67 | # define EV_FEATURES 0x7c 68 | # else 69 | # define EV_FEATURES 0x7f 70 | # endif 71 | #endif 72 | 73 | #define EV_FEATURE_CODE ((EV_FEATURES) & 1) 74 | #define EV_FEATURE_DATA ((EV_FEATURES) & 2) 75 | #define EV_FEATURE_CONFIG ((EV_FEATURES) & 4) 76 | #define EV_FEATURE_API ((EV_FEATURES) & 8) 77 | #define EV_FEATURE_WATCHERS ((EV_FEATURES) & 16) 78 | #define EV_FEATURE_BACKENDS ((EV_FEATURES) & 32) 79 | #define EV_FEATURE_OS ((EV_FEATURES) & 64) 80 | 81 | /* these priorities are inclusive, higher priorities will be invoked earlier */ 82 | #ifndef EV_MINPRI 83 | # define EV_MINPRI (EV_FEATURE_CONFIG ? -2 : 0) 84 | #endif 85 | #ifndef EV_MAXPRI 86 | # define EV_MAXPRI (EV_FEATURE_CONFIG ? +2 : 0) 87 | #endif 88 | 89 | #ifndef EV_MULTIPLICITY 90 | # define EV_MULTIPLICITY EV_FEATURE_CONFIG 91 | #endif 92 | 93 | #ifndef EV_PERIODIC_ENABLE 94 | # define EV_PERIODIC_ENABLE EV_FEATURE_WATCHERS 95 | #endif 96 | 97 | #ifndef EV_STAT_ENABLE 98 | # define EV_STAT_ENABLE EV_FEATURE_WATCHERS 99 | #endif 100 | 101 | #ifndef EV_PREPARE_ENABLE 102 | # define EV_PREPARE_ENABLE EV_FEATURE_WATCHERS 103 | #endif 104 | 105 | #ifndef EV_CHECK_ENABLE 106 | # define EV_CHECK_ENABLE EV_FEATURE_WATCHERS 107 | #endif 108 | 109 | #ifndef EV_IDLE_ENABLE 110 | # define EV_IDLE_ENABLE EV_FEATURE_WATCHERS 111 | #endif 112 | 113 | #ifndef EV_FORK_ENABLE 114 | # define EV_FORK_ENABLE EV_FEATURE_WATCHERS 115 | #endif 116 | 117 | #ifndef EV_CLEANUP_ENABLE 118 | # define EV_CLEANUP_ENABLE EV_FEATURE_WATCHERS 119 | #endif 120 | 121 | #ifndef EV_SIGNAL_ENABLE 122 | # define EV_SIGNAL_ENABLE EV_FEATURE_WATCHERS 123 | #endif 124 | 125 | #ifndef EV_CHILD_ENABLE 126 | # ifdef _WIN32 127 | # define EV_CHILD_ENABLE 0 128 | # else 129 | # define EV_CHILD_ENABLE EV_FEATURE_WATCHERS 130 | #endif 131 | #endif 132 | 133 | #ifndef EV_ASYNC_ENABLE 134 | # define EV_ASYNC_ENABLE EV_FEATURE_WATCHERS 135 | #endif 136 | 137 | #ifndef EV_EMBED_ENABLE 138 | # define EV_EMBED_ENABLE EV_FEATURE_WATCHERS 139 | #endif 140 | 141 | #ifndef EV_WALK_ENABLE 142 | # define EV_WALK_ENABLE 0 /* not yet */ 143 | #endif 144 | 145 | /*****************************************************************************/ 146 | 147 | #if EV_CHILD_ENABLE && !EV_SIGNAL_ENABLE 148 | # undef EV_SIGNAL_ENABLE 149 | # define EV_SIGNAL_ENABLE 1 150 | #endif 151 | 152 | /*****************************************************************************/ 153 | 154 | #ifndef EV_TSTAMP_T 155 | # define EV_TSTAMP_T double 156 | #endif 157 | typedef EV_TSTAMP_T ev_tstamp; 158 | 159 | #include /* for memmove */ 160 | 161 | #ifndef EV_ATOMIC_T 162 | # include 163 | # define EV_ATOMIC_T sig_atomic_t volatile 164 | #endif 165 | 166 | #if EV_STAT_ENABLE 167 | # ifdef _WIN32 168 | # include 169 | # include 170 | # endif 171 | # include 172 | #endif 173 | 174 | /* support multiple event loops? */ 175 | #if EV_MULTIPLICITY 176 | struct ev_loop; 177 | # define EV_P struct ev_loop *loop /* a loop as sole parameter in a declaration */ 178 | # define EV_P_ EV_P, /* a loop as first of multiple parameters */ 179 | # define EV_A loop /* a loop as sole argument to a function call */ 180 | # define EV_A_ EV_A, /* a loop as first of multiple arguments */ 181 | # define EV_DEFAULT_UC ev_default_loop_uc_ () /* the default loop, if initialised, as sole arg */ 182 | # define EV_DEFAULT_UC_ EV_DEFAULT_UC, /* the default loop as first of multiple arguments */ 183 | # define EV_DEFAULT ev_default_loop (0) /* the default loop as sole arg */ 184 | # define EV_DEFAULT_ EV_DEFAULT, /* the default loop as first of multiple arguments */ 185 | #else 186 | # define EV_P void 187 | # define EV_P_ 188 | # define EV_A 189 | # define EV_A_ 190 | # define EV_DEFAULT 191 | # define EV_DEFAULT_ 192 | # define EV_DEFAULT_UC 193 | # define EV_DEFAULT_UC_ 194 | # undef EV_EMBED_ENABLE 195 | #endif 196 | 197 | /* EV_INLINE is used for functions in header files */ 198 | #if __STDC_VERSION__ >= 199901L || __GNUC__ >= 3 199 | # define EV_INLINE static inline 200 | #else 201 | # define EV_INLINE static 202 | #endif 203 | 204 | #ifdef EV_API_STATIC 205 | # define EV_API_DECL static 206 | #else 207 | # define EV_API_DECL extern 208 | #endif 209 | 210 | /* EV_PROTOTYPES can be used to switch of prototype declarations */ 211 | #ifndef EV_PROTOTYPES 212 | # define EV_PROTOTYPES 1 213 | #endif 214 | 215 | /*****************************************************************************/ 216 | 217 | #define EV_VERSION_MAJOR 4 218 | #define EV_VERSION_MINOR 31 219 | 220 | /* eventmask, revents, events... */ 221 | enum { 222 | EV_UNDEF = (int)0xFFFFFFFF, /* guaranteed to be invalid */ 223 | EV_NONE = 0x00, /* no events */ 224 | EV_READ = 0x01, /* ev_io detected read will not block */ 225 | EV_WRITE = 0x02, /* ev_io detected write will not block */ 226 | EV__IOFDSET = 0x80, /* internal use only */ 227 | EV_IO = EV_READ, /* alias for type-detection */ 228 | EV_TIMER = 0x00000100, /* timer timed out */ 229 | #if EV_COMPAT3 230 | EV_TIMEOUT = EV_TIMER, /* pre 4.0 API compatibility */ 231 | #endif 232 | EV_PERIODIC = 0x00000200, /* periodic timer timed out */ 233 | EV_SIGNAL = 0x00000400, /* signal was received */ 234 | EV_CHILD = 0x00000800, /* child/pid had status change */ 235 | EV_STAT = 0x00001000, /* stat data changed */ 236 | EV_IDLE = 0x00002000, /* event loop is idling */ 237 | EV_PREPARE = 0x00004000, /* event loop about to poll */ 238 | EV_CHECK = 0x00008000, /* event loop finished poll */ 239 | EV_EMBED = 0x00010000, /* embedded event loop needs sweep */ 240 | EV_FORK = 0x00020000, /* event loop resumed in child */ 241 | EV_CLEANUP = 0x00040000, /* event loop resumed in child */ 242 | EV_ASYNC = 0x00080000, /* async intra-loop signal */ 243 | EV_CUSTOM = 0x01000000, /* for use by user code */ 244 | EV_ERROR = (int)0x80000000 /* sent when an error occurs */ 245 | }; 246 | 247 | /* can be used to add custom fields to all watchers, while losing binary compatibility */ 248 | #ifndef EV_COMMON 249 | # define EV_COMMON void *data; 250 | #endif 251 | 252 | #ifndef EV_CB_DECLARE 253 | # define EV_CB_DECLARE(type) void (*cb)(EV_P_ struct type *w, int revents); 254 | #endif 255 | #ifndef EV_CB_INVOKE 256 | # define EV_CB_INVOKE(watcher,revents) (watcher)->cb (EV_A_ (watcher), (revents)) 257 | #endif 258 | 259 | /* not official, do not use */ 260 | #define EV_CB(type,name) void name (EV_P_ struct ev_ ## type *w, int revents) 261 | 262 | /* 263 | * struct member types: 264 | * private: you may look at them, but not change them, 265 | * and they might not mean anything to you. 266 | * ro: can be read anytime, but only changed when the watcher isn't active. 267 | * rw: can be read and modified anytime, even when the watcher is active. 268 | * 269 | * some internal details that might be helpful for debugging: 270 | * 271 | * active is either 0, which means the watcher is not active, 272 | * or the array index of the watcher (periodics, timers) 273 | * or the array index + 1 (most other watchers) 274 | * or simply 1 for watchers that aren't in some array. 275 | * pending is either 0, in which case the watcher isn't, 276 | * or the array index + 1 in the pendings array. 277 | */ 278 | 279 | #if EV_MINPRI == EV_MAXPRI 280 | # define EV_DECL_PRIORITY 281 | #elif !defined (EV_DECL_PRIORITY) 282 | # define EV_DECL_PRIORITY int priority; 283 | #endif 284 | 285 | /* shared by all watchers */ 286 | #define EV_WATCHER(type) \ 287 | int active; /* private */ \ 288 | int pending; /* private */ \ 289 | EV_DECL_PRIORITY /* private */ \ 290 | EV_COMMON /* rw */ \ 291 | EV_CB_DECLARE (type) /* private */ 292 | 293 | #define EV_WATCHER_LIST(type) \ 294 | EV_WATCHER (type) \ 295 | struct ev_watcher_list *next; /* private */ 296 | 297 | #define EV_WATCHER_TIME(type) \ 298 | EV_WATCHER (type) \ 299 | ev_tstamp at; /* private */ 300 | 301 | /* base class, nothing to see here unless you subclass */ 302 | typedef struct ev_watcher 303 | { 304 | EV_WATCHER (ev_watcher) 305 | } ev_watcher; 306 | 307 | /* base class, nothing to see here unless you subclass */ 308 | typedef struct ev_watcher_list 309 | { 310 | EV_WATCHER_LIST (ev_watcher_list) 311 | } ev_watcher_list; 312 | 313 | /* base class, nothing to see here unless you subclass */ 314 | typedef struct ev_watcher_time 315 | { 316 | EV_WATCHER_TIME (ev_watcher_time) 317 | } ev_watcher_time; 318 | 319 | /* invoked when fd is either EV_READable or EV_WRITEable */ 320 | /* revent EV_READ, EV_WRITE */ 321 | typedef struct ev_io 322 | { 323 | EV_WATCHER_LIST (ev_io) 324 | 325 | int fd; /* ro */ 326 | int events; /* ro */ 327 | } ev_io; 328 | 329 | /* invoked after a specific time, repeatable (based on monotonic clock) */ 330 | /* revent EV_TIMEOUT */ 331 | typedef struct ev_timer 332 | { 333 | EV_WATCHER_TIME (ev_timer) 334 | 335 | ev_tstamp repeat; /* rw */ 336 | } ev_timer; 337 | 338 | /* invoked at some specific time, possibly repeating at regular intervals (based on UTC) */ 339 | /* revent EV_PERIODIC */ 340 | typedef struct ev_periodic 341 | { 342 | EV_WATCHER_TIME (ev_periodic) 343 | 344 | ev_tstamp offset; /* rw */ 345 | ev_tstamp interval; /* rw */ 346 | ev_tstamp (*reschedule_cb)(struct ev_periodic *w, ev_tstamp now) EV_NOEXCEPT; /* rw */ 347 | } ev_periodic; 348 | 349 | /* invoked when the given signal has been received */ 350 | /* revent EV_SIGNAL */ 351 | typedef struct ev_signal 352 | { 353 | EV_WATCHER_LIST (ev_signal) 354 | 355 | int signum; /* ro */ 356 | } ev_signal; 357 | 358 | /* invoked when sigchld is received and waitpid indicates the given pid */ 359 | /* revent EV_CHILD */ 360 | /* does not support priorities */ 361 | typedef struct ev_child 362 | { 363 | EV_WATCHER_LIST (ev_child) 364 | 365 | int flags; /* private */ 366 | int pid; /* ro */ 367 | int rpid; /* rw, holds the received pid */ 368 | int rstatus; /* rw, holds the exit status, use the macros from sys/wait.h */ 369 | } ev_child; 370 | 371 | #if EV_STAT_ENABLE 372 | /* st_nlink = 0 means missing file or other error */ 373 | # ifdef _WIN32 374 | typedef struct _stati64 ev_statdata; 375 | # else 376 | typedef struct stat ev_statdata; 377 | # endif 378 | 379 | /* invoked each time the stat data changes for a given path */ 380 | /* revent EV_STAT */ 381 | typedef struct ev_stat 382 | { 383 | EV_WATCHER_LIST (ev_stat) 384 | 385 | ev_timer timer; /* private */ 386 | ev_tstamp interval; /* ro */ 387 | const char *path; /* ro */ 388 | ev_statdata prev; /* ro */ 389 | ev_statdata attr; /* ro */ 390 | 391 | int wd; /* wd for inotify, fd for kqueue */ 392 | } ev_stat; 393 | #endif 394 | 395 | #if EV_IDLE_ENABLE 396 | /* invoked when the nothing else needs to be done, keeps the process from blocking */ 397 | /* revent EV_IDLE */ 398 | typedef struct ev_idle 399 | { 400 | EV_WATCHER (ev_idle) 401 | } ev_idle; 402 | #endif 403 | 404 | /* invoked for each run of the mainloop, just before the blocking call */ 405 | /* you can still change events in any way you like */ 406 | /* revent EV_PREPARE */ 407 | typedef struct ev_prepare 408 | { 409 | EV_WATCHER (ev_prepare) 410 | } ev_prepare; 411 | 412 | /* invoked for each run of the mainloop, just after the blocking call */ 413 | /* revent EV_CHECK */ 414 | typedef struct ev_check 415 | { 416 | EV_WATCHER (ev_check) 417 | } ev_check; 418 | 419 | #if EV_FORK_ENABLE 420 | /* the callback gets invoked before check in the child process when a fork was detected */ 421 | /* revent EV_FORK */ 422 | typedef struct ev_fork 423 | { 424 | EV_WATCHER (ev_fork) 425 | } ev_fork; 426 | #endif 427 | 428 | #if EV_CLEANUP_ENABLE 429 | /* is invoked just before the loop gets destroyed */ 430 | /* revent EV_CLEANUP */ 431 | typedef struct ev_cleanup 432 | { 433 | EV_WATCHER (ev_cleanup) 434 | } ev_cleanup; 435 | #endif 436 | 437 | #if EV_EMBED_ENABLE 438 | /* used to embed an event loop inside another */ 439 | /* the callback gets invoked when the event loop has handled events, and can be 0 */ 440 | typedef struct ev_embed 441 | { 442 | EV_WATCHER (ev_embed) 443 | 444 | struct ev_loop *other; /* ro */ 445 | ev_io io; /* private */ 446 | ev_prepare prepare; /* private */ 447 | ev_check check; /* unused */ 448 | ev_timer timer; /* unused */ 449 | ev_periodic periodic; /* unused */ 450 | ev_idle idle; /* unused */ 451 | ev_fork fork; /* private */ 452 | #if EV_CLEANUP_ENABLE 453 | ev_cleanup cleanup; /* unused */ 454 | #endif 455 | } ev_embed; 456 | #endif 457 | 458 | #if EV_ASYNC_ENABLE 459 | /* invoked when somebody calls ev_async_send on the watcher */ 460 | /* revent EV_ASYNC */ 461 | typedef struct ev_async 462 | { 463 | EV_WATCHER (ev_async) 464 | 465 | EV_ATOMIC_T sent; /* private */ 466 | } ev_async; 467 | 468 | # define ev_async_pending(w) (+(w)->sent) 469 | #endif 470 | 471 | /* the presence of this union forces similar struct layout */ 472 | union ev_any_watcher 473 | { 474 | struct ev_watcher w; 475 | struct ev_watcher_list wl; 476 | 477 | struct ev_io io; 478 | struct ev_timer timer; 479 | struct ev_periodic periodic; 480 | struct ev_signal signal; 481 | struct ev_child child; 482 | #if EV_STAT_ENABLE 483 | struct ev_stat stat; 484 | #endif 485 | #if EV_IDLE_ENABLE 486 | struct ev_idle idle; 487 | #endif 488 | struct ev_prepare prepare; 489 | struct ev_check check; 490 | #if EV_FORK_ENABLE 491 | struct ev_fork fork; 492 | #endif 493 | #if EV_CLEANUP_ENABLE 494 | struct ev_cleanup cleanup; 495 | #endif 496 | #if EV_EMBED_ENABLE 497 | struct ev_embed embed; 498 | #endif 499 | #if EV_ASYNC_ENABLE 500 | struct ev_async async; 501 | #endif 502 | }; 503 | 504 | /* flag bits for ev_default_loop and ev_loop_new */ 505 | enum { 506 | /* the default */ 507 | EVFLAG_AUTO = 0x00000000U, /* not quite a mask */ 508 | /* flag bits */ 509 | EVFLAG_NOENV = 0x01000000U, /* do NOT consult environment */ 510 | EVFLAG_FORKCHECK = 0x02000000U, /* check for a fork in each iteration */ 511 | /* debugging/feature disable */ 512 | EVFLAG_NOINOTIFY = 0x00100000U, /* do not attempt to use inotify */ 513 | #if EV_COMPAT3 514 | EVFLAG_NOSIGFD = 0, /* compatibility to pre-3.9 */ 515 | #endif 516 | EVFLAG_SIGNALFD = 0x00200000U, /* attempt to use signalfd */ 517 | EVFLAG_NOSIGMASK = 0x00400000U, /* avoid modifying the signal mask */ 518 | EVFLAG_NOTIMERFD = 0x00800000U /* avoid creating a timerfd */ 519 | }; 520 | 521 | /* method bits to be ored together */ 522 | enum { 523 | EVBACKEND_SELECT = 0x00000001U, /* available just about anywhere */ 524 | EVBACKEND_POLL = 0x00000002U, /* !win, !aix, broken on osx */ 525 | EVBACKEND_EPOLL = 0x00000004U, /* linux */ 526 | EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */ 527 | EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */ 528 | EVBACKEND_PORT = 0x00000020U, /* solaris 10 */ 529 | EVBACKEND_LINUXAIO = 0x00000040U, /* linuix AIO, 4.19+ */ 530 | EVBACKEND_IOURING = 0x00000080U, /* linux io_uring, 5.1+ */ 531 | EVBACKEND_ALL = 0x000000FFU, /* all known backends */ 532 | EVBACKEND_MASK = 0x0000FFFFU /* all future backends */ 533 | }; 534 | 535 | #if EV_PROTOTYPES 536 | EV_API_DECL int ev_version_major (void) EV_NOEXCEPT; 537 | EV_API_DECL int ev_version_minor (void) EV_NOEXCEPT; 538 | 539 | EV_API_DECL unsigned int ev_supported_backends (void) EV_NOEXCEPT; 540 | EV_API_DECL unsigned int ev_recommended_backends (void) EV_NOEXCEPT; 541 | EV_API_DECL unsigned int ev_embeddable_backends (void) EV_NOEXCEPT; 542 | 543 | EV_API_DECL ev_tstamp ev_time (void) EV_NOEXCEPT; 544 | EV_API_DECL void ev_sleep (ev_tstamp delay) EV_NOEXCEPT; /* sleep for a while */ 545 | 546 | /* Sets the allocation function to use, works like realloc. 547 | * It is used to allocate and free memory. 548 | * If it returns zero when memory needs to be allocated, the library might abort 549 | * or take some potentially destructive action. 550 | * The default is your system realloc function. 551 | */ 552 | EV_API_DECL void ev_set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT; 553 | 554 | /* set the callback function to call on a 555 | * retryable syscall error 556 | * (such as failed select, poll, epoll_wait) 557 | */ 558 | EV_API_DECL void ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT; 559 | 560 | #if EV_MULTIPLICITY 561 | 562 | /* the default loop is the only one that handles signals and child watchers */ 563 | /* you can call this as often as you like */ 564 | EV_API_DECL struct ev_loop *ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; 565 | 566 | #ifdef EV_API_STATIC 567 | EV_API_DECL struct ev_loop *ev_default_loop_ptr; 568 | #endif 569 | 570 | EV_INLINE struct ev_loop * 571 | ev_default_loop_uc_ (void) EV_NOEXCEPT 572 | { 573 | extern struct ev_loop *ev_default_loop_ptr; 574 | 575 | return ev_default_loop_ptr; 576 | } 577 | 578 | EV_INLINE int 579 | ev_is_default_loop (EV_P) EV_NOEXCEPT 580 | { 581 | return EV_A == EV_DEFAULT_UC; 582 | } 583 | 584 | /* create and destroy alternative loops that don't handle signals */ 585 | EV_API_DECL struct ev_loop *ev_loop_new (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; 586 | 587 | EV_API_DECL ev_tstamp ev_now (EV_P) EV_NOEXCEPT; /* time w.r.t. timers and the eventloop, updated after each poll */ 588 | 589 | #else 590 | 591 | EV_API_DECL int ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; /* returns true when successful */ 592 | 593 | EV_API_DECL ev_tstamp ev_rt_now; 594 | 595 | EV_INLINE ev_tstamp 596 | ev_now (void) EV_NOEXCEPT 597 | { 598 | return ev_rt_now; 599 | } 600 | 601 | /* looks weird, but ev_is_default_loop (EV_A) still works if this exists */ 602 | EV_INLINE int 603 | ev_is_default_loop (void) EV_NOEXCEPT 604 | { 605 | return 1; 606 | } 607 | 608 | #endif /* multiplicity */ 609 | 610 | /* destroy event loops, also works for the default loop */ 611 | EV_API_DECL void ev_loop_destroy (EV_P); 612 | 613 | /* this needs to be called after fork, to duplicate the loop */ 614 | /* when you want to re-use it in the child */ 615 | /* you can call it in either the parent or the child */ 616 | /* you can actually call it at any time, anywhere :) */ 617 | EV_API_DECL void ev_loop_fork (EV_P) EV_NOEXCEPT; 618 | 619 | EV_API_DECL unsigned int ev_backend (EV_P) EV_NOEXCEPT; /* backend in use by loop */ 620 | 621 | EV_API_DECL void ev_now_update (EV_P) EV_NOEXCEPT; /* update event loop time */ 622 | 623 | #if EV_WALK_ENABLE 624 | /* walk (almost) all watchers in the loop of a given type, invoking the */ 625 | /* callback on every such watcher. The callback might stop the watcher, */ 626 | /* but do nothing else with the loop */ 627 | EV_API_DECL void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT; 628 | #endif 629 | 630 | #endif /* prototypes */ 631 | 632 | /* ev_run flags values */ 633 | enum { 634 | EVRUN_NOWAIT = 1, /* do not block/wait */ 635 | EVRUN_ONCE = 2 /* block *once* only */ 636 | }; 637 | 638 | /* ev_break how values */ 639 | enum { 640 | EVBREAK_CANCEL = 0, /* undo unloop */ 641 | EVBREAK_ONE = 1, /* unloop once */ 642 | EVBREAK_ALL = 2 /* unloop all loops */ 643 | }; 644 | 645 | #if EV_PROTOTYPES 646 | EV_API_DECL int ev_run (EV_P_ int flags EV_CPP (= 0)); 647 | EV_API_DECL void ev_break (EV_P_ int how EV_CPP (= EVBREAK_ONE)) EV_NOEXCEPT; /* break out of the loop */ 648 | 649 | /* 650 | * ref/unref can be used to add or remove a refcount on the mainloop. every watcher 651 | * keeps one reference. if you have a long-running watcher you never unregister that 652 | * should not keep ev_loop from running, unref() after starting, and ref() before stopping. 653 | */ 654 | EV_API_DECL void ev_ref (EV_P) EV_NOEXCEPT; 655 | EV_API_DECL void ev_unref (EV_P) EV_NOEXCEPT; 656 | 657 | /* 658 | * convenience function, wait for a single event, without registering an event watcher 659 | * if timeout is < 0, do wait indefinitely 660 | */ 661 | EV_API_DECL void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT; 662 | 663 | # if EV_FEATURE_API 664 | EV_API_DECL unsigned int ev_iteration (EV_P) EV_NOEXCEPT; /* number of loop iterations */ 665 | EV_API_DECL unsigned int ev_depth (EV_P) EV_NOEXCEPT; /* #ev_loop enters - #ev_loop leaves */ 666 | EV_API_DECL void ev_verify (EV_P) EV_NOEXCEPT; /* abort if loop data corrupted */ 667 | 668 | EV_API_DECL void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT; /* sleep at least this time, default 0 */ 669 | EV_API_DECL void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT; /* sleep at least this time, default 0 */ 670 | 671 | /* advanced stuff for threading etc. support, see docs */ 672 | EV_API_DECL void ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT; 673 | EV_API_DECL void *ev_userdata (EV_P) EV_NOEXCEPT; 674 | typedef void (*ev_loop_callback)(EV_P); 675 | EV_API_DECL void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT; 676 | /* C++ doesn't allow the use of the ev_loop_callback typedef here, so we need to spell it out */ 677 | EV_API_DECL void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT; 678 | 679 | EV_API_DECL unsigned int ev_pending_count (EV_P) EV_NOEXCEPT; /* number of pending events, if any */ 680 | EV_API_DECL void ev_invoke_pending (EV_P); /* invoke all pending watchers */ 681 | 682 | /* 683 | * stop/start the timer handling. 684 | */ 685 | EV_API_DECL void ev_suspend (EV_P) EV_NOEXCEPT; 686 | EV_API_DECL void ev_resume (EV_P) EV_NOEXCEPT; 687 | #endif 688 | 689 | #endif 690 | 691 | /* these may evaluate ev multiple times, and the other arguments at most once */ 692 | /* either use ev_init + ev_TYPE_set, or the ev_TYPE_init macro, below, to first initialise a watcher */ 693 | #define ev_init(ev,cb_) do { \ 694 | ((ev_watcher *)(void *)(ev))->active = \ 695 | ((ev_watcher *)(void *)(ev))->pending = 0; \ 696 | ev_set_priority ((ev), 0); \ 697 | ev_set_cb ((ev), cb_); \ 698 | } while (0) 699 | 700 | #define ev_io_set(ev,fd_,events_) do { (ev)->fd = (fd_); (ev)->events = (events_) | EV__IOFDSET; } while (0) 701 | #define ev_timer_set(ev,after_,repeat_) do { ((ev_watcher_time *)(ev))->at = (after_); (ev)->repeat = (repeat_); } while (0) 702 | #define ev_periodic_set(ev,ofs_,ival_,rcb_) do { (ev)->offset = (ofs_); (ev)->interval = (ival_); (ev)->reschedule_cb = (rcb_); } while (0) 703 | #define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0) 704 | #define ev_child_set(ev,pid_,trace_) do { (ev)->pid = (pid_); (ev)->flags = !!(trace_); } while (0) 705 | #define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0) 706 | #define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */ 707 | #define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */ 708 | #define ev_check_set(ev) /* nop, yes, this is a serious in-joke */ 709 | #define ev_embed_set(ev,other_) do { (ev)->other = (other_); } while (0) 710 | #define ev_fork_set(ev) /* nop, yes, this is a serious in-joke */ 711 | #define ev_cleanup_set(ev) /* nop, yes, this is a serious in-joke */ 712 | #define ev_async_set(ev) /* nop, yes, this is a serious in-joke */ 713 | 714 | #define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0) 715 | #define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0) 716 | #define ev_periodic_init(ev,cb,ofs,ival,rcb) do { ev_init ((ev), (cb)); ev_periodic_set ((ev),(ofs),(ival),(rcb)); } while (0) 717 | #define ev_signal_init(ev,cb,signum) do { ev_init ((ev), (cb)); ev_signal_set ((ev), (signum)); } while (0) 718 | #define ev_child_init(ev,cb,pid,trace) do { ev_init ((ev), (cb)); ev_child_set ((ev),(pid),(trace)); } while (0) 719 | #define ev_stat_init(ev,cb,path,interval) do { ev_init ((ev), (cb)); ev_stat_set ((ev),(path),(interval)); } while (0) 720 | #define ev_idle_init(ev,cb) do { ev_init ((ev), (cb)); ev_idle_set ((ev)); } while (0) 721 | #define ev_prepare_init(ev,cb) do { ev_init ((ev), (cb)); ev_prepare_set ((ev)); } while (0) 722 | #define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0) 723 | #define ev_embed_init(ev,cb,other) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(other)); } while (0) 724 | #define ev_fork_init(ev,cb) do { ev_init ((ev), (cb)); ev_fork_set ((ev)); } while (0) 725 | #define ev_cleanup_init(ev,cb) do { ev_init ((ev), (cb)); ev_cleanup_set ((ev)); } while (0) 726 | #define ev_async_init(ev,cb) do { ev_init ((ev), (cb)); ev_async_set ((ev)); } while (0) 727 | 728 | #define ev_is_pending(ev) (0 + ((ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */ 729 | #define ev_is_active(ev) (0 + ((ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */ 730 | 731 | #define ev_cb_(ev) (ev)->cb /* rw */ 732 | #define ev_cb(ev) (memmove (&ev_cb_ (ev), &((ev_watcher *)(ev))->cb, sizeof (ev_cb_ (ev))), (ev)->cb) 733 | 734 | #if EV_MINPRI == EV_MAXPRI 735 | # define ev_priority(ev) ((ev), EV_MINPRI) 736 | # define ev_set_priority(ev,pri) ((ev), (pri)) 737 | #else 738 | # define ev_priority(ev) (+(((ev_watcher *)(void *)(ev))->priority)) 739 | # define ev_set_priority(ev,pri) ( (ev_watcher *)(void *)(ev))->priority = (pri) 740 | #endif 741 | 742 | #define ev_periodic_at(ev) (+((ev_watcher_time *)(ev))->at) 743 | 744 | #ifndef ev_set_cb 745 | # define ev_set_cb(ev,cb_) (ev_cb_ (ev) = (cb_), memmove (&((ev_watcher *)(ev))->cb, &ev_cb_ (ev), sizeof (ev_cb_ (ev)))) 746 | #endif 747 | 748 | /* stopping (enabling, adding) a watcher does nothing if it is already running */ 749 | /* stopping (disabling, deleting) a watcher does nothing unless it's already running */ 750 | #if EV_PROTOTYPES 751 | 752 | /* feeds an event into a watcher as if the event actually occurred */ 753 | /* accepts any ev_watcher type */ 754 | EV_API_DECL void ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT; 755 | EV_API_DECL void ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT; 756 | #if EV_SIGNAL_ENABLE 757 | EV_API_DECL void ev_feed_signal (int signum) EV_NOEXCEPT; 758 | EV_API_DECL void ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT; 759 | #endif 760 | EV_API_DECL void ev_invoke (EV_P_ void *w, int revents); 761 | EV_API_DECL int ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT; 762 | 763 | EV_API_DECL void ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT; 764 | EV_API_DECL void ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT; 765 | 766 | EV_API_DECL void ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT; 767 | EV_API_DECL void ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT; 768 | /* stops if active and no repeat, restarts if active and repeating, starts if inactive and repeating */ 769 | EV_API_DECL void ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT; 770 | /* return remaining time */ 771 | EV_API_DECL ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT; 772 | 773 | #if EV_PERIODIC_ENABLE 774 | EV_API_DECL void ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT; 775 | EV_API_DECL void ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT; 776 | EV_API_DECL void ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT; 777 | #endif 778 | 779 | /* only supported in the default loop */ 780 | #if EV_SIGNAL_ENABLE 781 | EV_API_DECL void ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT; 782 | EV_API_DECL void ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT; 783 | #endif 784 | 785 | /* only supported in the default loop */ 786 | # if EV_CHILD_ENABLE 787 | EV_API_DECL void ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT; 788 | EV_API_DECL void ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT; 789 | # endif 790 | 791 | # if EV_STAT_ENABLE 792 | EV_API_DECL void ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT; 793 | EV_API_DECL void ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT; 794 | EV_API_DECL void ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT; 795 | # endif 796 | 797 | # if EV_IDLE_ENABLE 798 | EV_API_DECL void ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT; 799 | EV_API_DECL void ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT; 800 | # endif 801 | 802 | #if EV_PREPARE_ENABLE 803 | EV_API_DECL void ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT; 804 | EV_API_DECL void ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT; 805 | #endif 806 | 807 | #if EV_CHECK_ENABLE 808 | EV_API_DECL void ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT; 809 | EV_API_DECL void ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT; 810 | #endif 811 | 812 | # if EV_FORK_ENABLE 813 | EV_API_DECL void ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT; 814 | EV_API_DECL void ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT; 815 | # endif 816 | 817 | # if EV_CLEANUP_ENABLE 818 | EV_API_DECL void ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT; 819 | EV_API_DECL void ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT; 820 | # endif 821 | 822 | # if EV_EMBED_ENABLE 823 | /* only supported when loop to be embedded is in fact embeddable */ 824 | EV_API_DECL void ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT; 825 | EV_API_DECL void ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT; 826 | EV_API_DECL void ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT; 827 | # endif 828 | 829 | # if EV_ASYNC_ENABLE 830 | EV_API_DECL void ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT; 831 | EV_API_DECL void ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT; 832 | EV_API_DECL void ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT; 833 | # endif 834 | 835 | #if EV_COMPAT3 836 | #define EVLOOP_NONBLOCK EVRUN_NOWAIT 837 | #define EVLOOP_ONESHOT EVRUN_ONCE 838 | #define EVUNLOOP_CANCEL EVBREAK_CANCEL 839 | #define EVUNLOOP_ONE EVBREAK_ONE 840 | #define EVUNLOOP_ALL EVBREAK_ALL 841 | #if EV_PROTOTYPES 842 | EV_INLINE void ev_loop (EV_P_ int flags) { ev_run (EV_A_ flags); } 843 | EV_INLINE void ev_unloop (EV_P_ int how ) { ev_break (EV_A_ how ); } 844 | EV_INLINE void ev_default_destroy (void) { ev_loop_destroy (EV_DEFAULT); } 845 | EV_INLINE void ev_default_fork (void) { ev_loop_fork (EV_DEFAULT); } 846 | #if EV_FEATURE_API 847 | EV_INLINE unsigned int ev_loop_count (EV_P) { return ev_iteration (EV_A); } 848 | EV_INLINE unsigned int ev_loop_depth (EV_P) { return ev_depth (EV_A); } 849 | EV_INLINE void ev_loop_verify (EV_P) { ev_verify (EV_A); } 850 | #endif 851 | #endif 852 | #else 853 | typedef struct ev_loop ev_loop; 854 | #endif 855 | 856 | #endif 857 | 858 | EV_CPP(}) 859 | 860 | #endif 861 | 862 | -------------------------------------------------------------------------------- /libev/ev_epoll.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev epoll fd activity backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* 41 | * general notes about epoll: 42 | * 43 | * a) epoll silently removes fds from the fd set. as nothing tells us 44 | * that an fd has been removed otherwise, we have to continually 45 | * "rearm" fds that we suspect *might* have changed (same 46 | * problem with kqueue, but much less costly there). 47 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) 48 | * and seems not to have any advantage. 49 | * c) the inability to handle fork or file descriptors (think dup) 50 | * limits the applicability over poll, so this is not a generic 51 | * poll replacement. 52 | * d) epoll doesn't work the same as select with many file descriptors 53 | * (such as files). while not critical, no other advanced interface 54 | * seems to share this (rather non-unixy) limitation. 55 | * e) epoll claims to be embeddable, but in practise you never get 56 | * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). 57 | * f) epoll_ctl returning EPERM means the fd is always ready. 58 | * 59 | * lots of "weird code" and complication handling in this file is due 60 | * to these design problems with epoll, as we try very hard to avoid 61 | * epoll_ctl syscalls for common usage patterns and handle the breakage 62 | * ensuing from receiving events for closed and otherwise long gone 63 | * file descriptors. 64 | */ 65 | 66 | #include 67 | 68 | #define EV_EMASK_EPERM 0x80 69 | 70 | static void 71 | epoll_modify (EV_P_ int fd, int oev, int nev) 72 | { 73 | struct epoll_event ev; 74 | unsigned char oldmask; 75 | 76 | /* 77 | * we handle EPOLL_CTL_DEL by ignoring it here 78 | * on the assumption that the fd is gone anyways 79 | * if that is wrong, we have to handle the spurious 80 | * event in epoll_poll. 81 | * if the fd is added again, we try to ADD it, and, if that 82 | * fails, we assume it still has the same eventmask. 83 | */ 84 | if (!nev) 85 | return; 86 | 87 | oldmask = anfds [fd].emask; 88 | anfds [fd].emask = nev; 89 | 90 | /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ 91 | ev.data.u64 = (uint64_t)(uint32_t)fd 92 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); 93 | ev.events = (nev & EV_READ ? EPOLLIN : 0) 94 | | (nev & EV_WRITE ? EPOLLOUT : 0); 95 | 96 | if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) 97 | return; 98 | 99 | if (ecb_expect_true (errno == ENOENT)) 100 | { 101 | /* if ENOENT then the fd went away, so try to do the right thing */ 102 | if (!nev) 103 | goto dec_egen; 104 | 105 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) 106 | return; 107 | } 108 | else if (ecb_expect_true (errno == EEXIST)) 109 | { 110 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ 111 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ 112 | if (oldmask == nev) 113 | goto dec_egen; 114 | 115 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) 116 | return; 117 | } 118 | else if (ecb_expect_true (errno == EPERM)) 119 | { 120 | /* EPERM means the fd is always ready, but epoll is too snobbish */ 121 | /* to handle it, unlike select or poll. */ 122 | anfds [fd].emask = EV_EMASK_EPERM; 123 | 124 | /* add fd to epoll_eperms, if not already inside */ 125 | if (!(oldmask & EV_EMASK_EPERM)) 126 | { 127 | array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit); 128 | epoll_eperms [epoll_epermcnt++] = fd; 129 | } 130 | 131 | return; 132 | } 133 | else 134 | assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL)); 135 | 136 | fd_kill (EV_A_ fd); 137 | 138 | dec_egen: 139 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ 140 | --anfds [fd].egen; 141 | } 142 | 143 | static void 144 | epoll_poll (EV_P_ ev_tstamp timeout) 145 | { 146 | int i; 147 | int eventcnt; 148 | 149 | if (ecb_expect_false (epoll_epermcnt)) 150 | timeout = EV_TS_CONST (0.); 151 | 152 | /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ 153 | /* the default libev max wait time, however. */ 154 | EV_RELEASE_CB; 155 | eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MSEC (timeout)); 156 | EV_ACQUIRE_CB; 157 | 158 | if (ecb_expect_false (eventcnt < 0)) 159 | { 160 | if (errno != EINTR) 161 | ev_syserr ("(libev) epoll_wait"); 162 | 163 | return; 164 | } 165 | 166 | for (i = 0; i < eventcnt; ++i) 167 | { 168 | struct epoll_event *ev = epoll_events + i; 169 | 170 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ 171 | int want = anfds [fd].events; 172 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) 173 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); 174 | 175 | /* 176 | * check for spurious notification. 177 | * this only finds spurious notifications on egen updates 178 | * other spurious notifications will be found by epoll_ctl, below 179 | * we assume that fd is always in range, as we never shrink the anfds array 180 | */ 181 | if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) 182 | { 183 | /* recreate kernel state */ 184 | postfork |= 2; 185 | continue; 186 | } 187 | 188 | if (ecb_expect_false (got & ~want)) 189 | { 190 | anfds [fd].emask = want; 191 | 192 | /* 193 | * we received an event but are not interested in it, try mod or del 194 | * this often happens because we optimistically do not unregister fds 195 | * when we are no longer interested in them, but also when we get spurious 196 | * notifications for fds from another process. this is partially handled 197 | * above with the gencounter check (== our fd is not the event fd), and 198 | * partially here, when epoll_ctl returns an error (== a child has the fd 199 | * but we closed it). 200 | * note: for events such as POLLHUP, where we can't know whether it refers 201 | * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls. 202 | */ 203 | ev->events = (want & EV_READ ? EPOLLIN : 0) 204 | | (want & EV_WRITE ? EPOLLOUT : 0); 205 | 206 | /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ 207 | /* which is fortunately easy to do for us. */ 208 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) 209 | { 210 | postfork |= 2; /* an error occurred, recreate kernel state */ 211 | continue; 212 | } 213 | } 214 | 215 | fd_event (EV_A_ fd, got); 216 | } 217 | 218 | /* if the receive array was full, increase its size */ 219 | if (ecb_expect_false (eventcnt == epoll_eventmax)) 220 | { 221 | ev_free (epoll_events); 222 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); 223 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 224 | } 225 | 226 | /* now synthesize events for all fds where epoll fails, while select works... */ 227 | for (i = epoll_epermcnt; i--; ) 228 | { 229 | int fd = epoll_eperms [i]; 230 | unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); 231 | 232 | if (anfds [fd].emask & EV_EMASK_EPERM && events) 233 | fd_event (EV_A_ fd, events); 234 | else 235 | { 236 | epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; 237 | anfds [fd].emask = 0; 238 | } 239 | } 240 | } 241 | 242 | static int 243 | epoll_epoll_create (void) 244 | { 245 | int fd; 246 | 247 | #if defined EPOLL_CLOEXEC && !defined __ANDROID__ 248 | fd = epoll_create1 (EPOLL_CLOEXEC); 249 | 250 | if (fd < 0 && (errno == EINVAL || errno == ENOSYS)) 251 | #endif 252 | { 253 | fd = epoll_create (256); 254 | 255 | if (fd >= 0) 256 | fcntl (fd, F_SETFD, FD_CLOEXEC); 257 | } 258 | 259 | return fd; 260 | } 261 | 262 | inline_size 263 | int 264 | epoll_init (EV_P_ int flags) 265 | { 266 | if ((backend_fd = epoll_epoll_create ()) < 0) 267 | return 0; 268 | 269 | backend_mintime = EV_TS_CONST (1e-3); /* epoll does sometimes return early, this is just to avoid the worst */ 270 | backend_modify = epoll_modify; 271 | backend_poll = epoll_poll; 272 | 273 | epoll_eventmax = 64; /* initial number of events receivable per poll */ 274 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 275 | 276 | return EVBACKEND_EPOLL; 277 | } 278 | 279 | inline_size 280 | void 281 | epoll_destroy (EV_P) 282 | { 283 | ev_free (epoll_events); 284 | array_free (epoll_eperm, EMPTY); 285 | } 286 | 287 | ecb_cold 288 | static void 289 | epoll_fork (EV_P) 290 | { 291 | close (backend_fd); 292 | 293 | while ((backend_fd = epoll_epoll_create ()) < 0) 294 | ev_syserr ("(libev) epoll_create"); 295 | 296 | fd_rearm_all (EV_A); 297 | } 298 | 299 | -------------------------------------------------------------------------------- /libev/ev_iouring.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev linux io_uring fd activity backend 3 | * 4 | * Copyright (c) 2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* 41 | * general notes about linux io_uring: 42 | * 43 | * a) it's the best interface I have seen so far. on linux. 44 | * b) best is not necessarily very good. 45 | * c) it's better than the aio mess, doesn't suffer from the fork problems 46 | * of linux aio or epoll and so on and so on. and you could do event stuff 47 | * without any syscalls. what's not to like? 48 | * d) ok, it's vastly more complex, but that's ok, really. 49 | * e) why 3 mmaps instead of one? one would be more space-efficient, 50 | * and I can't see what benefit three would have (other than being 51 | * somehow resizable/relocatable, but that's apparently not possible). 52 | * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and 53 | the bizarre way structure offsets are commuinicated makes it hard to 54 | * just print the ring buffer heads, even *iff* the memory were visible 55 | * in gdb. but then, that's also ok, really. 56 | * g) well, you cannot specify a timeout when waiting for events. no, 57 | * seriously, the interface doesn't support a timeout. never seen _that_ 58 | * before. sure, you can use a timerfd, but that's another syscall 59 | * you could have avoided. overall, this bizarre omission smells 60 | * like a µ-optimisation by the io_uring author for his personal 61 | * applications, to the detriment of everybody else who just wants 62 | * an event loop. but, umm, ok, if that's all, it could be worse. 63 | * h) there is a hardcoded limit of 4096 outstanding events. okay, 64 | * at least there is no arbitrary low system-wide limit... 65 | * i) unlike linux aio, you *can* register more then the limit 66 | * of fd events, and the kernel will "gracefully" signal an 67 | * overflow, after which you could destroy and recreate the kernel 68 | * state, a bit bigger, or fall back to e.g. poll. thats not 69 | * totally insane, but kind of questions the point a high 70 | * performance I/O framework when it doesn't really work 71 | * under stress. 72 | * j) but, oh my! is has exactly the same bugs as the linux aio backend, 73 | * where some undocumented poll combinations just fail. 74 | * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course, 75 | * this is completely undocumented, have I mantioned this already? 76 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. 77 | * the big isuess with it are the bugs requiring epoll, which might 78 | * or might not get fixed (do I hold my breath?). 79 | */ 80 | 81 | #include 82 | #include 83 | #include 84 | 85 | #define IOURING_INIT_ENTRIES 32 86 | 87 | /*****************************************************************************/ 88 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ 89 | 90 | #include 91 | #include 92 | 93 | /* mostly directly taken from the kernel or documentation */ 94 | 95 | struct io_uring_sqe 96 | { 97 | __u8 opcode; 98 | __u8 flags; 99 | __u16 ioprio; 100 | __s32 fd; 101 | __u64 off; 102 | __u64 addr; 103 | __u32 len; 104 | union { 105 | __kernel_rwf_t rw_flags; 106 | __u32 fsync_flags; 107 | __u16 poll_events; 108 | __u32 sync_range_flags; 109 | __u32 msg_flags; 110 | }; 111 | __u64 user_data; 112 | union { 113 | __u16 buf_index; 114 | __u64 __pad2[3]; 115 | }; 116 | }; 117 | 118 | struct io_uring_cqe 119 | { 120 | __u64 user_data; 121 | __s32 res; 122 | __u32 flags; 123 | }; 124 | 125 | struct io_sqring_offsets 126 | { 127 | __u32 head; 128 | __u32 tail; 129 | __u32 ring_mask; 130 | __u32 ring_entries; 131 | __u32 flags; 132 | __u32 dropped; 133 | __u32 array; 134 | __u32 resv1; 135 | __u64 resv2; 136 | }; 137 | 138 | struct io_cqring_offsets 139 | { 140 | __u32 head; 141 | __u32 tail; 142 | __u32 ring_mask; 143 | __u32 ring_entries; 144 | __u32 overflow; 145 | __u32 cqes; 146 | __u64 resv[2]; 147 | }; 148 | 149 | struct io_uring_params 150 | { 151 | __u32 sq_entries; 152 | __u32 cq_entries; 153 | __u32 flags; 154 | __u32 sq_thread_cpu; 155 | __u32 sq_thread_idle; 156 | __u32 resv[5]; 157 | struct io_sqring_offsets sq_off; 158 | struct io_cqring_offsets cq_off; 159 | }; 160 | 161 | #define IORING_OP_POLL_ADD 6 162 | #define IORING_OP_POLL_REMOVE 7 163 | 164 | #define IORING_ENTER_GETEVENTS 0x01 165 | 166 | #define IORING_OFF_SQ_RING 0x00000000ULL 167 | #define IORING_OFF_CQ_RING 0x08000000ULL 168 | #define IORING_OFF_SQES 0x10000000ULL 169 | 170 | inline_size 171 | int 172 | evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) 173 | { 174 | return ev_syscall2 (SYS_io_uring_setup, entries, params); 175 | } 176 | 177 | inline_size 178 | int 179 | evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz) 180 | { 181 | return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz); 182 | } 183 | 184 | /*****************************************************************************/ 185 | /* actual backed implementation */ 186 | 187 | /* we hope that volatile will make the compiler access this variables only once */ 188 | #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name) 189 | #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name) 190 | 191 | /* the index array */ 192 | #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array)) 193 | 194 | /* the submit/completion queue entries */ 195 | #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) 196 | #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes)) 197 | 198 | static 199 | struct io_uring_sqe * 200 | iouring_sqe_get (EV_P) 201 | { 202 | unsigned tail = EV_SQ_VAR (tail); 203 | 204 | if (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries)) 205 | { 206 | /* queue full, flush */ 207 | evsys_io_uring_enter (iouring_fd, iouring_to_submit, 0, 0, 0, 0); 208 | iouring_to_submit = 0; 209 | } 210 | 211 | assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))); 212 | 213 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); 214 | } 215 | 216 | inline_size 217 | struct io_uring_sqe * 218 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) 219 | { 220 | unsigned idx = sqe - EV_SQES; 221 | 222 | EV_SQ_ARRAY [idx] = idx; 223 | ECB_MEMORY_FENCE_RELEASE; 224 | ++EV_SQ_VAR (tail); 225 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ 226 | ++iouring_to_submit; 227 | } 228 | 229 | /*****************************************************************************/ 230 | 231 | /* when the timerfd expires we simply note the fact, 232 | * as the purpose of the timerfd is to wake us up, nothing else. 233 | * the next iteration should re-set it. 234 | */ 235 | static void 236 | iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) 237 | { 238 | iouring_tfd_to = EV_TSTAMP_HUGE; 239 | } 240 | 241 | static void 242 | iouring_epoll_cb (EV_P_ struct ev_io *w, int revents) 243 | { 244 | epoll_poll (EV_A_ 0); 245 | } 246 | 247 | /* called for full and partial cleanup */ 248 | ecb_cold 249 | static int 250 | iouring_internal_destroy (EV_P) 251 | { 252 | close (iouring_tfd); 253 | close (iouring_fd); 254 | 255 | if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size); 256 | if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size); 257 | if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size ); 258 | 259 | if (ev_is_active (&iouring_epoll_w)) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_epoll_w); 260 | if (ev_is_active (&iouring_tfd_w )) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w ); 261 | } 262 | 263 | ecb_cold 264 | static int 265 | iouring_internal_init (EV_P) 266 | { 267 | struct io_uring_params params = { 0 }; 268 | 269 | iouring_to_submit = 0; 270 | 271 | iouring_tfd = -1; 272 | iouring_sq_ring = MAP_FAILED; 273 | iouring_cq_ring = MAP_FAILED; 274 | iouring_sqes = MAP_FAILED; 275 | 276 | for (;;) 277 | { 278 | iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); 279 | 280 | if (iouring_fd >= 0) 281 | break; /* yippie */ 282 | 283 | if (errno != EINVAL) 284 | return -1; /* we failed */ 285 | 286 | /* EINVAL: lots of possible reasons, but maybe 287 | * it is because we hit the unqueryable hardcoded size limit 288 | */ 289 | 290 | /* we hit the limit already, give up */ 291 | if (iouring_max_entries) 292 | return -1; 293 | 294 | /* first time we hit EINVAL? assume we hit the limit, so go back and retry */ 295 | iouring_entries >>= 1; 296 | iouring_max_entries = iouring_entries; 297 | } 298 | 299 | iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned); 300 | iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe); 301 | iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe); 302 | 303 | iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE, 304 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING); 305 | iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE, 306 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING); 307 | iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE, 308 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES); 309 | 310 | if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED) 311 | return -1; 312 | 313 | iouring_sq_head = params.sq_off.head; 314 | iouring_sq_tail = params.sq_off.tail; 315 | iouring_sq_ring_mask = params.sq_off.ring_mask; 316 | iouring_sq_ring_entries = params.sq_off.ring_entries; 317 | iouring_sq_flags = params.sq_off.flags; 318 | iouring_sq_dropped = params.sq_off.dropped; 319 | iouring_sq_array = params.sq_off.array; 320 | 321 | iouring_cq_head = params.cq_off.head; 322 | iouring_cq_tail = params.cq_off.tail; 323 | iouring_cq_ring_mask = params.cq_off.ring_mask; 324 | iouring_cq_ring_entries = params.cq_off.ring_entries; 325 | iouring_cq_overflow = params.cq_off.overflow; 326 | iouring_cq_cqes = params.cq_off.cqes; 327 | 328 | iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC); 329 | 330 | if (iouring_tfd < 0) 331 | return iouring_tfd; 332 | 333 | iouring_tfd_to = EV_TSTAMP_HUGE; 334 | 335 | return 0; 336 | } 337 | 338 | ecb_cold 339 | static void 340 | iouring_fork (EV_P) 341 | { 342 | iouring_internal_destroy (EV_A); 343 | 344 | while (iouring_internal_init (EV_A) < 0) 345 | ev_syserr ("(libev) io_uring_setup"); 346 | 347 | /* forking epoll should also effectively unregister all fds from the backend */ 348 | epoll_fork (EV_A); 349 | /* epoll_fork already did this. hopefully */ 350 | /*fd_rearm_all (EV_A);*/ 351 | 352 | ev_io_stop (EV_A_ &iouring_epoll_w); 353 | ev_io_set (EV_A_ &iouring_epoll_w, backend_fd, EV_READ); 354 | ev_io_start (EV_A_ &iouring_epoll_w); 355 | 356 | ev_io_stop (EV_A_ &iouring_tfd_w); 357 | ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); 358 | ev_io_start (EV_A_ &iouring_tfd_w); 359 | } 360 | 361 | /*****************************************************************************/ 362 | 363 | static void 364 | iouring_modify (EV_P_ int fd, int oev, int nev) 365 | { 366 | if (ecb_expect_false (anfds [fd].eflags)) 367 | { 368 | /* we handed this fd over to epoll, so undo this first */ 369 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ 370 | epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0); 371 | anfds [fd].eflags = 0; 372 | oev = 0; 373 | } 374 | 375 | if (oev) 376 | { 377 | /* we assume the sqe's are all "properly" initialised */ 378 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); 379 | sqe->opcode = IORING_OP_POLL_REMOVE; 380 | sqe->fd = fd; 381 | sqe->user_data = -1; 382 | iouring_sqe_submit (EV_A_ sqe); 383 | 384 | /* increment generation counter to avoid handling old events */ 385 | ++anfds [fd].egen; 386 | } 387 | 388 | if (nev) 389 | { 390 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); 391 | sqe->opcode = IORING_OP_POLL_ADD; 392 | sqe->fd = fd; 393 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); 394 | sqe->poll_events = 395 | (nev & EV_READ ? POLLIN : 0) 396 | | (nev & EV_WRITE ? POLLOUT : 0); 397 | iouring_sqe_submit (EV_A_ sqe); 398 | } 399 | } 400 | 401 | inline_size 402 | void 403 | iouring_tfd_update (EV_P_ ev_tstamp timeout) 404 | { 405 | ev_tstamp tfd_to = mn_now + timeout; 406 | 407 | /* we assume there will be many iterations per timer change, so 408 | * we only re-set the timerfd when we have to because its expiry 409 | * is too late. 410 | */ 411 | if (ecb_expect_false (tfd_to < iouring_tfd_to)) 412 | { 413 | struct itimerspec its; 414 | 415 | iouring_tfd_to = tfd_to; 416 | EV_TS_SET (its.it_interval, 0.); 417 | EV_TS_SET (its.it_value, tfd_to); 418 | 419 | if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0) 420 | assert (("libev: iouring timerfd_settime failed", 0)); 421 | } 422 | } 423 | 424 | inline_size 425 | void 426 | iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe) 427 | { 428 | int fd = cqe->user_data & 0xffffffffU; 429 | uint32_t gen = cqe->user_data >> 32; 430 | int res = cqe->res; 431 | 432 | /* ignore fd removal events, if there are any. TODO: verify */ 433 | if (cqe->user_data == (__u64)-1) 434 | abort ();//D 435 | 436 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); 437 | 438 | /* documentation lies, of course. the result value is NOT like 439 | * normal syscalls, but like linux raw syscalls, i.e. negative 440 | * error numbers. fortunate, as otherwise there would be no way 441 | * to get error codes at all. still, why not document this? 442 | */ 443 | 444 | /* ignore event if generation doesn't match */ 445 | /* this should actually be very rare */ 446 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) 447 | return; 448 | 449 | if (ecb_expect_false (res < 0)) 450 | { 451 | if (res == -EINVAL) 452 | { 453 | /* we assume this error code means the fd/poll combination is buggy 454 | * and fall back to epoll. 455 | * this error code might also indicate a bug, but the kernel doesn't 456 | * distinguish between those two conditions, so... sigh... 457 | */ 458 | 459 | epoll_modify (EV_A_ fd, 0, anfds [fd].events); 460 | } 461 | else if (res == -EBADF) 462 | { 463 | assert (("libev: event loop rejected bad fd", res != -EBADF)); 464 | fd_kill (EV_A_ fd); 465 | } 466 | else 467 | { 468 | errno = -res; 469 | ev_syserr ("(libev) IORING_OP_POLL_ADD"); 470 | } 471 | 472 | return; 473 | } 474 | 475 | /* feed events, we do not expect or handle POLLNVAL */ 476 | fd_event ( 477 | EV_A_ 478 | fd, 479 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 480 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 481 | ); 482 | 483 | /* io_uring is oneshot, so we need to re-arm the fd next iteration */ 484 | /* this also means we usually have to do at least one syscall per iteration */ 485 | anfds [fd].events = 0; 486 | fd_change (EV_A_ fd, EV_ANFD_REIFY); 487 | } 488 | 489 | /* called when the event queue overflows */ 490 | ecb_cold 491 | static void 492 | iouring_overflow (EV_P) 493 | { 494 | /* we have two options, resize the queue (by tearing down 495 | * everything and recreating it, or living with it 496 | * and polling. 497 | * we implement this by resizing tghe queue, and, if that fails, 498 | * we just recreate the state on every failure, which 499 | * kind of is a very inefficient poll. 500 | * one danger is, due to the bios toward lower fds, 501 | * we will only really get events for those, so 502 | * maybe we need a poll() fallback, after all. 503 | */ 504 | /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */ 505 | 506 | fd_rearm_all (EV_A); 507 | 508 | /* we double the size until we hit the hard-to-probe maximum */ 509 | if (!iouring_max_entries) 510 | { 511 | iouring_entries <<= 1; 512 | iouring_fork (EV_A); 513 | } 514 | else 515 | { 516 | /* we hit the kernel limit, we should fall back to something else. 517 | * we can either poll() a few times and hope for the best, 518 | * poll always, or switch to epoll. 519 | * since we use epoll anyways, go epoll. 520 | */ 521 | 522 | iouring_internal_destroy (EV_A); 523 | 524 | /* this should make it so that on return, we don'T call any uring functions */ 525 | iouring_to_submit = 0; 526 | 527 | for (;;) 528 | { 529 | backend = epoll_init (EV_A_ 0); 530 | 531 | if (backend) 532 | break; 533 | 534 | ev_syserr ("(libev) iouring switch to epoll"); 535 | } 536 | } 537 | } 538 | 539 | /* handle any events in the completion queue, return true if there were any */ 540 | static int 541 | iouring_handle_cq (EV_P) 542 | { 543 | unsigned head, tail, mask; 544 | 545 | head = EV_CQ_VAR (head); 546 | ECB_MEMORY_FENCE_ACQUIRE; 547 | tail = EV_CQ_VAR (tail); 548 | 549 | if (head == tail) 550 | return 0; 551 | 552 | /* it can only overflow if we have events, yes, yes? */ 553 | if (ecb_expect_false (EV_CQ_VAR (overflow))) 554 | { 555 | iouring_overflow (EV_A); 556 | return 1; 557 | } 558 | 559 | mask = EV_CQ_VAR (ring_mask); 560 | 561 | do 562 | iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]); 563 | while (head != tail); 564 | 565 | EV_CQ_VAR (head) = head; 566 | ECB_MEMORY_FENCE_RELEASE; 567 | 568 | return 1; 569 | } 570 | 571 | static void 572 | iouring_poll (EV_P_ ev_tstamp timeout) 573 | { 574 | /* if we have events, no need for extra syscalls, but we might have to queue events */ 575 | if (iouring_handle_cq (EV_A)) 576 | timeout = EV_TS_CONST (0.); 577 | else 578 | /* no events, so maybe wait for some */ 579 | iouring_tfd_update (EV_A_ timeout); 580 | 581 | /* only enter the kernel if we have something to submit, or we need to wait */ 582 | if (timeout || iouring_to_submit) 583 | { 584 | int res; 585 | 586 | EV_RELEASE_CB; 587 | 588 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, 589 | timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); 590 | iouring_to_submit = 0; 591 | 592 | EV_ACQUIRE_CB; 593 | 594 | if (ecb_expect_false (res < 0)) 595 | if (errno == EINTR) 596 | /* ignore */; 597 | else 598 | ev_syserr ("(libev) iouring setup"); 599 | else 600 | iouring_handle_cq (EV_A); 601 | } 602 | } 603 | 604 | inline_size 605 | int 606 | iouring_init (EV_P_ int flags) 607 | { 608 | if (!epoll_init (EV_A_ 0)) 609 | return 0; 610 | 611 | iouring_entries = IOURING_INIT_ENTRIES; 612 | iouring_max_entries = 0; 613 | 614 | if (iouring_internal_init (EV_A) < 0) 615 | { 616 | iouring_internal_destroy (EV_A); 617 | return 0; 618 | } 619 | 620 | ev_io_init (&iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ); 621 | ev_set_priority (&iouring_epoll_w, EV_MAXPRI); 622 | 623 | ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); 624 | ev_set_priority (&iouring_tfd_w, EV_MAXPRI); 625 | 626 | ev_io_start (EV_A_ &iouring_epoll_w); 627 | ev_unref (EV_A); /* watcher should not keep loop alive */ 628 | 629 | ev_io_start (EV_A_ &iouring_tfd_w); 630 | ev_unref (EV_A); /* watcher should not keep loop alive */ 631 | 632 | backend_modify = iouring_modify; 633 | backend_poll = iouring_poll; 634 | 635 | return EVBACKEND_IOURING; 636 | } 637 | 638 | inline_size 639 | void 640 | iouring_destroy (EV_P) 641 | { 642 | iouring_internal_destroy (EV_A); 643 | epoll_destroy (EV_A); 644 | } 645 | 646 | -------------------------------------------------------------------------------- /libev/ev_kqueue.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev kqueue backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2016,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | 46 | inline_speed 47 | void 48 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) 49 | { 50 | ++kqueue_changecnt; 51 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, array_needsize_noinit); 52 | 53 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); 54 | } 55 | 56 | /* OS X at least needs this */ 57 | #ifndef EV_ENABLE 58 | # define EV_ENABLE 0 59 | #endif 60 | #ifndef NOTE_EOF 61 | # define NOTE_EOF 0 62 | #endif 63 | 64 | static void 65 | kqueue_modify (EV_P_ int fd, int oev, int nev) 66 | { 67 | if (oev != nev) 68 | { 69 | if (oev & EV_READ) 70 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); 71 | 72 | if (oev & EV_WRITE) 73 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); 74 | } 75 | 76 | /* to detect close/reopen reliably, we have to re-add */ 77 | /* event requests even when oev == nev */ 78 | 79 | if (nev & EV_READ) 80 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); 81 | 82 | if (nev & EV_WRITE) 83 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); 84 | } 85 | 86 | static void 87 | kqueue_poll (EV_P_ ev_tstamp timeout) 88 | { 89 | int res, i; 90 | struct timespec ts; 91 | 92 | /* need to resize so there is enough space for errors */ 93 | if (kqueue_changecnt > kqueue_eventmax) 94 | { 95 | ev_free (kqueue_events); 96 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); 97 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); 98 | } 99 | 100 | EV_RELEASE_CB; 101 | EV_TS_SET (ts, timeout); 102 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); 103 | EV_ACQUIRE_CB; 104 | kqueue_changecnt = 0; 105 | 106 | if (ecb_expect_false (res < 0)) 107 | { 108 | if (errno != EINTR) 109 | ev_syserr ("(libev) kqueue kevent"); 110 | 111 | return; 112 | } 113 | 114 | for (i = 0; i < res; ++i) 115 | { 116 | int fd = kqueue_events [i].ident; 117 | 118 | if (ecb_expect_false (kqueue_events [i].flags & EV_ERROR)) 119 | { 120 | int err = kqueue_events [i].data; 121 | 122 | /* we are only interested in errors for fds that we are interested in :) */ 123 | if (anfds [fd].events) 124 | { 125 | if (err == ENOENT) /* resubmit changes on ENOENT */ 126 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); 127 | else if (err == EBADF) /* on EBADF, we re-check the fd */ 128 | { 129 | if (fd_valid (fd)) 130 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); 131 | else 132 | { 133 | assert (("libev: kqueue found invalid fd", 0)); 134 | fd_kill (EV_A_ fd); 135 | } 136 | } 137 | else /* on all other errors, we error out on the fd */ 138 | { 139 | assert (("libev: kqueue found invalid fd", 0)); 140 | fd_kill (EV_A_ fd); 141 | } 142 | } 143 | } 144 | else 145 | fd_event ( 146 | EV_A_ 147 | fd, 148 | kqueue_events [i].filter == EVFILT_READ ? EV_READ 149 | : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE 150 | : 0 151 | ); 152 | } 153 | 154 | if (ecb_expect_false (res == kqueue_eventmax)) 155 | { 156 | ev_free (kqueue_events); 157 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); 158 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); 159 | } 160 | } 161 | 162 | inline_size 163 | int 164 | kqueue_init (EV_P_ int flags) 165 | { 166 | /* initialize the kernel queue */ 167 | kqueue_fd_pid = getpid (); 168 | if ((backend_fd = kqueue ()) < 0) 169 | return 0; 170 | 171 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ 172 | 173 | backend_mintime = EV_TS_CONST (1e-9); /* apparently, they did the right thing in freebsd */ 174 | backend_modify = kqueue_modify; 175 | backend_poll = kqueue_poll; 176 | 177 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ 178 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); 179 | 180 | kqueue_changes = 0; 181 | kqueue_changemax = 0; 182 | kqueue_changecnt = 0; 183 | 184 | return EVBACKEND_KQUEUE; 185 | } 186 | 187 | inline_size 188 | void 189 | kqueue_destroy (EV_P) 190 | { 191 | ev_free (kqueue_events); 192 | ev_free (kqueue_changes); 193 | } 194 | 195 | inline_size 196 | void 197 | kqueue_fork (EV_P) 198 | { 199 | /* some BSD kernels don't just destroy the kqueue itself, 200 | * but also close the fd, which isn't documented, and 201 | * impossible to support properly. 202 | * we remember the pid of the kqueue call and only close 203 | * the fd if the pid is still the same. 204 | * this leaks fds on sane kernels, but BSD interfaces are 205 | * notoriously buggy and rarely get fixed. 206 | */ 207 | pid_t newpid = getpid (); 208 | 209 | if (newpid == kqueue_fd_pid) 210 | close (backend_fd); 211 | 212 | kqueue_fd_pid = newpid; 213 | while ((backend_fd = kqueue ()) < 0) 214 | ev_syserr ("(libev) kqueue"); 215 | 216 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 217 | 218 | /* re-register interest in fds */ 219 | fd_rearm_all (EV_A); 220 | } 221 | 222 | /* sys/event.h defines EV_ERROR */ 223 | #undef EV_ERROR 224 | 225 | -------------------------------------------------------------------------------- /libev/ev_linuxaio.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev linux aio fd activity backend 3 | * 4 | * Copyright (c) 2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* 41 | * general notes about linux aio: 42 | * 43 | * a) at first, the linux aio IOCB_CMD_POLL functionality introduced in 44 | * 4.18 looks too good to be true: both watchers and events can be 45 | * batched, and events can even be handled in userspace using 46 | * a ring buffer shared with the kernel. watchers can be canceled 47 | * regardless of whether the fd has been closed. no problems with fork. 48 | * ok, the ring buffer is 200% undocumented (there isn't even a 49 | * header file), but otherwise, it's pure bliss! 50 | * b) ok, watchers are one-shot, so you have to re-arm active ones 51 | * on every iteration. so much for syscall-less event handling, 52 | * but at least these re-arms can be batched, no big deal, right? 53 | * c) well, linux as usual: the documentation lies to you: io_submit 54 | * sometimes returns EINVAL because the kernel doesn't feel like 55 | * handling your poll mask - ttys can be polled for POLLOUT, 56 | * POLLOUT|POLLIN, but polling for POLLIN fails. just great, 57 | * so we have to fall back to something else (hello, epoll), 58 | * but at least the fallback can be slow, because these are 59 | * exceptional cases, right? 60 | * d) hmm, you have to tell the kernel the maximum number of watchers 61 | * you want to queue when initialising the aio context. but of 62 | * course the real limit is magically calculated in the kernel, and 63 | * is often higher then we asked for. so we just have to destroy 64 | * the aio context and re-create it a bit larger if we hit the limit. 65 | * (starts to remind you of epoll? well, it's a bit more deterministic 66 | * and less gambling, but still ugly as hell). 67 | * e) that's when you find out you can also hit an arbitrary system-wide 68 | * limit. or the kernel simply doesn't want to handle your watchers. 69 | * what the fuck do we do then? you guessed it, in the middle 70 | * of event handling we have to switch to 100% epoll polling. and 71 | * that better is as fast as normal epoll polling, so you practically 72 | * have to use the normal epoll backend with all its quirks. 73 | * f) end result of this train wreck: it inherits all the disadvantages 74 | * from epoll, while adding a number on its own. why even bother to use 75 | * it? because if conditions are right and your fds are supported and you 76 | * don't hit a limit, this backend is actually faster, doesn't gamble with 77 | * your fds, batches watchers and events and doesn't require costly state 78 | * recreates. well, until it does. 79 | * g) all of this makes this backend use almost twice as much code as epoll. 80 | * which in turn uses twice as much code as poll. and that#s not counting 81 | * the fact that this backend also depends on the epoll backend, making 82 | * it three times as much code as poll, or kqueue. 83 | * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now 84 | * it's clear that whatever linux comes up with is far, far, far worse. 85 | */ 86 | 87 | #include /* actually linux/time.h, but we must assume they are compatible */ 88 | #include 89 | #include 90 | 91 | /*****************************************************************************/ 92 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ 93 | 94 | #include /* no glibc wrappers */ 95 | 96 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ 97 | #define IOCB_CMD_POLL 5 98 | 99 | /* taken from linux/fs/aio.c. yup, that's a .c file. 100 | * not only is this totally undocumented, not even the source code 101 | * can tell you what the future semantics of compat_features and 102 | * incompat_features are, or what header_length actually is for. 103 | */ 104 | #define AIO_RING_MAGIC 0xa10a10a1 105 | #define EV_AIO_RING_INCOMPAT_FEATURES 0 106 | struct aio_ring 107 | { 108 | unsigned id; /* kernel internal index number */ 109 | unsigned nr; /* number of io_events */ 110 | unsigned head; /* Written to by userland or by kernel. */ 111 | unsigned tail; 112 | 113 | unsigned magic; 114 | unsigned compat_features; 115 | unsigned incompat_features; 116 | unsigned header_length; /* size of aio_ring */ 117 | 118 | struct io_event io_events[0]; 119 | }; 120 | 121 | inline_size 122 | int 123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) 124 | { 125 | return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); 126 | } 127 | 128 | inline_size 129 | int 130 | evsys_io_destroy (aio_context_t ctx_id) 131 | { 132 | return ev_syscall1 (SYS_io_destroy, ctx_id); 133 | } 134 | 135 | inline_size 136 | int 137 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) 138 | { 139 | return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); 140 | } 141 | 142 | inline_size 143 | int 144 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) 145 | { 146 | return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); 147 | } 148 | 149 | inline_size 150 | int 151 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) 152 | { 153 | return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); 154 | } 155 | 156 | /*****************************************************************************/ 157 | /* actual backed implementation */ 158 | 159 | ecb_cold 160 | static int 161 | linuxaio_nr_events (EV_P) 162 | { 163 | /* we start with 16 iocbs and incraese from there 164 | * that's tiny, but the kernel has a rather low system-wide 165 | * limit that can be reached quickly, so let's be parsimonious 166 | * with this resource. 167 | * Rest assured, the kernel generously rounds up small and big numbers 168 | * in different ways (but doesn't seem to charge you for it). 169 | * The 15 here is because the kernel usually has a power of two as aio-max-nr, 170 | * and this helps to take advantage of that limit. 171 | */ 172 | 173 | /* we try to fill 4kB pages exactly. 174 | * the ring buffer header is 32 bytes, every io event is 32 bytes. 175 | * the kernel takes the io requests number, doubles it, adds 2 176 | * and adds the ring buffer. 177 | * the way we use this is by starting low, and then roughly doubling the 178 | * size each time we hit a limit. 179 | */ 180 | 181 | int requests = 15 << linuxaio_iteration; 182 | int one_page = (4096 183 | / sizeof (struct io_event) ) / 2; /* how many fit into one page */ 184 | int first_page = ((4096 - sizeof (struct aio_ring)) 185 | / sizeof (struct io_event) - 2) / 2; /* how many fit into the first page */ 186 | 187 | /* if everything fits into one page, use count exactly */ 188 | if (requests > first_page) 189 | /* otherwise, round down to full pages and add the first page */ 190 | requests = requests / one_page * one_page + first_page; 191 | 192 | return requests; 193 | } 194 | 195 | /* we use out own wrapper structure in case we ever want to do something "clever" */ 196 | typedef struct aniocb 197 | { 198 | struct iocb io; 199 | /*int inuse;*/ 200 | } *ANIOCBP; 201 | 202 | inline_size 203 | void 204 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) 205 | { 206 | while (count--) 207 | { 208 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */ 209 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); 210 | 211 | /* full zero initialise is probably not required at the moment, but 212 | * this is not well documented, so we better do it. 213 | */ 214 | memset (iocb, 0, sizeof (*iocb)); 215 | 216 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; 217 | iocb->io.aio_fildes = offset; 218 | 219 | base [offset++] = iocb; 220 | } 221 | } 222 | 223 | ecb_cold 224 | static void 225 | linuxaio_free_iocbp (EV_P) 226 | { 227 | while (linuxaio_iocbpmax--) 228 | ev_free (linuxaio_iocbps [linuxaio_iocbpmax]); 229 | 230 | linuxaio_iocbpmax = 0; /* next resize will completely reallocate the array, at some overhead */ 231 | } 232 | 233 | static void 234 | linuxaio_modify (EV_P_ int fd, int oev, int nev) 235 | { 236 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); 237 | ANIOCBP iocb = linuxaio_iocbps [fd]; 238 | ANFD *anfd = &anfds [fd]; 239 | 240 | if (ecb_expect_false (iocb->io.aio_reqprio < 0)) 241 | { 242 | /* we handed this fd over to epoll, so undo this first */ 243 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ 244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); 245 | anfd->emask = 0; 246 | iocb->io.aio_reqprio = 0; 247 | } 248 | else if (ecb_expect_false (iocb->io.aio_buf)) 249 | { 250 | /* iocb active, so cancel it first before resubmit */ 251 | /* this assumes we only ever get one call per fd per loop iteration */ 252 | for (;;) 253 | { 254 | /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ 255 | if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) 256 | break; 257 | 258 | if (ecb_expect_true (errno == EINPROGRESS)) 259 | break; 260 | 261 | /* the EINPROGRESS test is for nicer error message. clumsy. */ 262 | if (errno != EINTR) 263 | { 264 | assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); 265 | break; 266 | } 267 | } 268 | 269 | /* increment generation counter to avoid handling old events */ 270 | ++anfd->egen; 271 | } 272 | 273 | iocb->io.aio_buf = 274 | (nev & EV_READ ? POLLIN : 0) 275 | | (nev & EV_WRITE ? POLLOUT : 0); 276 | 277 | if (nev) 278 | { 279 | iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32); 280 | 281 | /* queue iocb up for io_submit */ 282 | /* this assumes we only ever get one call per fd per loop iteration */ 283 | ++linuxaio_submitcnt; 284 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); 285 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; 286 | } 287 | } 288 | 289 | static void 290 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) 291 | { 292 | epoll_poll (EV_A_ 0); 293 | } 294 | 295 | inline_speed 296 | void 297 | linuxaio_fd_rearm (EV_P_ int fd) 298 | { 299 | anfds [fd].events = 0; 300 | linuxaio_iocbps [fd]->io.aio_buf = 0; 301 | fd_change (EV_A_ fd, EV_ANFD_REIFY); 302 | } 303 | 304 | static void 305 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) 306 | { 307 | while (nr) 308 | { 309 | int fd = ev->data & 0xffffffff; 310 | uint32_t gen = ev->data >> 32; 311 | int res = ev->res; 312 | 313 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); 314 | 315 | /* only accept events if generation counter matches */ 316 | if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen)) 317 | { 318 | /* feed events, we do not expect or handle POLLNVAL */ 319 | fd_event ( 320 | EV_A_ 321 | fd, 322 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 323 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 324 | ); 325 | 326 | /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ 327 | linuxaio_fd_rearm (EV_A_ fd); 328 | } 329 | 330 | --nr; 331 | ++ev; 332 | } 333 | } 334 | 335 | /* get any events from ring buffer, return true if any were handled */ 336 | static int 337 | linuxaio_get_events_from_ring (EV_P) 338 | { 339 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; 340 | unsigned head, tail; 341 | 342 | /* the kernel reads and writes both of these variables, */ 343 | /* as a C extension, we assume that volatile use here */ 344 | /* both makes reads atomic and once-only */ 345 | head = *(volatile unsigned *)&ring->head; 346 | ECB_MEMORY_FENCE_ACQUIRE; 347 | tail = *(volatile unsigned *)&ring->tail; 348 | 349 | if (head == tail) 350 | return 0; 351 | 352 | /* parse all available events, but only once, to avoid starvation */ 353 | if (ecb_expect_true (tail > head)) /* normal case around */ 354 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); 355 | else /* wrapped around */ 356 | { 357 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); 358 | linuxaio_parse_events (EV_A_ ring->io_events, tail); 359 | } 360 | 361 | ECB_MEMORY_FENCE_RELEASE; 362 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ 363 | *(volatile unsigned *)&ring->head = tail; 364 | 365 | return 1; 366 | } 367 | 368 | inline_size 369 | int 370 | linuxaio_ringbuf_valid (EV_P) 371 | { 372 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; 373 | 374 | return ecb_expect_true (ring->magic == AIO_RING_MAGIC) 375 | && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES 376 | && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ 377 | } 378 | 379 | /* read at least one event from kernel, or timeout */ 380 | inline_size 381 | void 382 | linuxaio_get_events (EV_P_ ev_tstamp timeout) 383 | { 384 | struct timespec ts; 385 | struct io_event ioev[8]; /* 256 octet stack space */ 386 | int want = 1; /* how many events to request */ 387 | int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); 388 | 389 | if (ecb_expect_true (ringbuf_valid)) 390 | { 391 | /* if the ring buffer has any events, we don't wait or call the kernel at all */ 392 | if (linuxaio_get_events_from_ring (EV_A)) 393 | return; 394 | 395 | /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ 396 | if (!timeout) 397 | return; 398 | } 399 | else 400 | /* no ringbuffer, request slightly larger batch */ 401 | want = sizeof (ioev) / sizeof (ioev [0]); 402 | 403 | /* no events, so wait for some 404 | * for fairness reasons, we do this in a loop, to fetch all events 405 | */ 406 | for (;;) 407 | { 408 | int res; 409 | 410 | EV_RELEASE_CB; 411 | 412 | EV_TS_SET (ts, timeout); 413 | res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); 414 | 415 | EV_ACQUIRE_CB; 416 | 417 | if (res < 0) 418 | if (errno == EINTR) 419 | /* ignored, retry */; 420 | else 421 | ev_syserr ("(libev) linuxaio io_getevents"); 422 | else if (res) 423 | { 424 | /* at least one event available, handle them */ 425 | linuxaio_parse_events (EV_A_ ioev, res); 426 | 427 | if (ecb_expect_true (ringbuf_valid)) 428 | { 429 | /* if we have a ring buffer, handle any remaining events in it */ 430 | linuxaio_get_events_from_ring (EV_A); 431 | 432 | /* at this point, we should have handled all outstanding events */ 433 | break; 434 | } 435 | else if (res < want) 436 | /* otherwise, if there were fewere events than we wanted, we assume there are no more */ 437 | break; 438 | } 439 | else 440 | break; /* no events from the kernel, we are done */ 441 | 442 | timeout = EV_TS_CONST (0.); /* only wait in the first iteration */ 443 | } 444 | } 445 | 446 | inline_size 447 | int 448 | linuxaio_io_setup (EV_P) 449 | { 450 | linuxaio_ctx = 0; 451 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); 452 | } 453 | 454 | static void 455 | linuxaio_poll (EV_P_ ev_tstamp timeout) 456 | { 457 | int submitted; 458 | 459 | /* first phase: submit new iocbs */ 460 | 461 | /* io_submit might return less than the requested number of iocbs */ 462 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ 463 | /* which allows us to pinpoint the erroneous iocb */ 464 | for (submitted = 0; submitted < linuxaio_submitcnt; ) 465 | { 466 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); 467 | 468 | if (ecb_expect_false (res < 0)) 469 | if (errno == EINVAL) 470 | { 471 | /* This happens for unsupported fds, officially, but in my testing, 472 | * also randomly happens for supported fds. We fall back to good old 473 | * poll() here, under the assumption that this is a very rare case. 474 | * See https://lore.kernel.org/patchwork/patch/1047453/ to see 475 | * discussion about such a case (ttys) where polling for POLLIN 476 | * fails but POLLIN|POLLOUT works. 477 | */ 478 | struct iocb *iocb = linuxaio_submits [submitted]; 479 | epoll_modify (EV_A_ iocb->aio_fildes, 0, anfds [iocb->aio_fildes].events); 480 | iocb->aio_reqprio = -1; /* mark iocb as epoll */ 481 | 482 | res = 1; /* skip this iocb - another iocb, another chance */ 483 | } 484 | else if (errno == EAGAIN) 485 | { 486 | /* This happens when the ring buffer is full, or some other shit we 487 | * don't know and isn't documented. Most likely because we have too 488 | * many requests and linux aio can't be assed to handle them. 489 | * In this case, we try to allocate a larger ring buffer, freeing 490 | * ours first. This might fail, in which case we have to fall back to 100% 491 | * epoll. 492 | * God, how I hate linux not getting its act together. Ever. 493 | */ 494 | evsys_io_destroy (linuxaio_ctx); 495 | linuxaio_submitcnt = 0; 496 | 497 | /* rearm all fds with active iocbs */ 498 | { 499 | int fd; 500 | for (fd = 0; fd < linuxaio_iocbpmax; ++fd) 501 | if (linuxaio_iocbps [fd]->io.aio_buf) 502 | linuxaio_fd_rearm (EV_A_ fd); 503 | } 504 | 505 | ++linuxaio_iteration; 506 | if (linuxaio_io_setup (EV_A) < 0) 507 | { 508 | /* TODO: rearm all and recreate epoll backend from scratch */ 509 | /* TODO: might be more prudent? */ 510 | 511 | /* to bad, we can't get a new aio context, go 100% epoll */ 512 | linuxaio_free_iocbp (EV_A); 513 | ev_io_stop (EV_A_ &linuxaio_epoll_w); 514 | ev_ref (EV_A); 515 | linuxaio_ctx = 0; 516 | 517 | backend = EVBACKEND_EPOLL; 518 | backend_modify = epoll_modify; 519 | backend_poll = epoll_poll; 520 | } 521 | 522 | timeout = EV_TS_CONST (0.); 523 | /* it's easiest to handle this mess in another iteration */ 524 | return; 525 | } 526 | else if (errno == EBADF) 527 | { 528 | assert (("libev: event loop rejected bad fd", errno != EBADF)); 529 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); 530 | 531 | res = 1; /* skip this iocb */ 532 | } 533 | else if (errno == EINTR) /* not seen in reality, not documented */ 534 | res = 0; /* silently ignore and retry */ 535 | else 536 | { 537 | ev_syserr ("(libev) linuxaio io_submit"); 538 | res = 0; 539 | } 540 | 541 | submitted += res; 542 | } 543 | 544 | linuxaio_submitcnt = 0; 545 | 546 | /* second phase: fetch and parse events */ 547 | 548 | linuxaio_get_events (EV_A_ timeout); 549 | } 550 | 551 | inline_size 552 | int 553 | linuxaio_init (EV_P_ int flags) 554 | { 555 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ 556 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ 557 | /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */ 558 | if (ev_linux_version () < 0x041300) 559 | return 0; 560 | 561 | if (!epoll_init (EV_A_ 0)) 562 | return 0; 563 | 564 | linuxaio_iteration = 0; 565 | 566 | if (linuxaio_io_setup (EV_A) < 0) 567 | { 568 | epoll_destroy (EV_A); 569 | return 0; 570 | } 571 | 572 | ev_io_init (&linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); 573 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); 574 | ev_io_start (EV_A_ &linuxaio_epoll_w); 575 | ev_unref (EV_A); /* watcher should not keep loop alive */ 576 | 577 | backend_modify = linuxaio_modify; 578 | backend_poll = linuxaio_poll; 579 | 580 | linuxaio_iocbpmax = 0; 581 | linuxaio_iocbps = 0; 582 | 583 | linuxaio_submits = 0; 584 | linuxaio_submitmax = 0; 585 | linuxaio_submitcnt = 0; 586 | 587 | return EVBACKEND_LINUXAIO; 588 | } 589 | 590 | inline_size 591 | void 592 | linuxaio_destroy (EV_P) 593 | { 594 | epoll_destroy (EV_A); 595 | linuxaio_free_iocbp (EV_A); 596 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ 597 | } 598 | 599 | ecb_cold 600 | static void 601 | linuxaio_fork (EV_P) 602 | { 603 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ 604 | linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */ 605 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ 606 | 607 | linuxaio_iteration = 0; /* we start over in the child */ 608 | 609 | while (linuxaio_io_setup (EV_A) < 0) 610 | ev_syserr ("(libev) linuxaio io_setup"); 611 | 612 | /* forking epoll should also effectively unregister all fds from the backend */ 613 | epoll_fork (EV_A); 614 | /* epoll_fork already did this. hopefully */ 615 | /*fd_rearm_all (EV_A);*/ 616 | 617 | ev_io_stop (EV_A_ &linuxaio_epoll_w); 618 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); 619 | ev_io_start (EV_A_ &linuxaio_epoll_w); 620 | } 621 | 622 | -------------------------------------------------------------------------------- /libev/ev_poll.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev poll fd activity backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2016,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #include 41 | 42 | inline_size 43 | void 44 | array_needsize_pollidx (int *base, int offset, int count) 45 | { 46 | /* using memset (.., -1, ...) is tempting, we we try 47 | * to be ultraportable 48 | */ 49 | base += offset; 50 | while (count--) 51 | *base++ = -1; 52 | } 53 | 54 | static void 55 | poll_modify (EV_P_ int fd, int oev, int nev) 56 | { 57 | int idx; 58 | 59 | if (oev == nev) 60 | return; 61 | 62 | array_needsize (int, pollidxs, pollidxmax, fd + 1, array_needsize_pollidx); 63 | 64 | idx = pollidxs [fd]; 65 | 66 | if (idx < 0) /* need to allocate a new pollfd */ 67 | { 68 | pollidxs [fd] = idx = pollcnt++; 69 | array_needsize (struct pollfd, polls, pollmax, pollcnt, array_needsize_noinit); 70 | polls [idx].fd = fd; 71 | } 72 | 73 | assert (polls [idx].fd == fd); 74 | 75 | if (nev) 76 | polls [idx].events = 77 | (nev & EV_READ ? POLLIN : 0) 78 | | (nev & EV_WRITE ? POLLOUT : 0); 79 | else /* remove pollfd */ 80 | { 81 | pollidxs [fd] = -1; 82 | 83 | if (ecb_expect_true (idx < --pollcnt)) 84 | { 85 | polls [idx] = polls [pollcnt]; 86 | pollidxs [polls [idx].fd] = idx; 87 | } 88 | } 89 | } 90 | 91 | static void 92 | poll_poll (EV_P_ ev_tstamp timeout) 93 | { 94 | struct pollfd *p; 95 | int res; 96 | 97 | EV_RELEASE_CB; 98 | res = poll (polls, pollcnt, EV_TS_TO_MSEC (timeout)); 99 | EV_ACQUIRE_CB; 100 | 101 | if (ecb_expect_false (res < 0)) 102 | { 103 | if (errno == EBADF) 104 | fd_ebadf (EV_A); 105 | else if (errno == ENOMEM && !syserr_cb) 106 | fd_enomem (EV_A); 107 | else if (errno != EINTR) 108 | ev_syserr ("(libev) poll"); 109 | } 110 | else 111 | for (p = polls; res; ++p) 112 | { 113 | assert (("libev: poll returned illegal result, broken BSD kernel?", p < polls + pollcnt)); 114 | 115 | if (ecb_expect_false (p->revents)) /* this expect is debatable */ 116 | { 117 | --res; 118 | 119 | if (ecb_expect_false (p->revents & POLLNVAL)) 120 | { 121 | assert (("libev: poll found invalid fd in poll set", 0)); 122 | fd_kill (EV_A_ p->fd); 123 | } 124 | else 125 | fd_event ( 126 | EV_A_ 127 | p->fd, 128 | (p->revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 129 | | (p->revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 130 | ); 131 | } 132 | } 133 | } 134 | 135 | inline_size 136 | int 137 | poll_init (EV_P_ int flags) 138 | { 139 | backend_mintime = EV_TS_CONST (1e-3); 140 | backend_modify = poll_modify; 141 | backend_poll = poll_poll; 142 | 143 | pollidxs = 0; pollidxmax = 0; 144 | polls = 0; pollmax = 0; pollcnt = 0; 145 | 146 | return EVBACKEND_POLL; 147 | } 148 | 149 | inline_size 150 | void 151 | poll_destroy (EV_P) 152 | { 153 | ev_free (pollidxs); 154 | ev_free (polls); 155 | } 156 | 157 | -------------------------------------------------------------------------------- /libev/ev_port.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev solaris event port backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | /* useful reading: 41 | * 42 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6268715 (random results) 43 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6455223 (just totally broken) 44 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6873782 (manpage ETIME) 45 | * http://bugs.opensolaris.org/view_bug.do?bug_id=6874410 (implementation ETIME) 46 | * http://www.mail-archive.com/networking-discuss@opensolaris.org/msg11898.html ETIME vs. nget 47 | * http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/event_port.c (libc) 48 | * http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/fs/portfs/port.c#1325 (kernel) 49 | */ 50 | 51 | #include 52 | #include 53 | #include 54 | #include 55 | #include 56 | #include 57 | 58 | inline_speed 59 | void 60 | port_associate_and_check (EV_P_ int fd, int ev) 61 | { 62 | if (0 > 63 | port_associate ( 64 | backend_fd, PORT_SOURCE_FD, fd, 65 | (ev & EV_READ ? POLLIN : 0) 66 | | (ev & EV_WRITE ? POLLOUT : 0), 67 | 0 68 | ) 69 | ) 70 | { 71 | if (errno == EBADFD) 72 | { 73 | assert (("libev: port_associate found invalid fd", errno != EBADFD)); 74 | fd_kill (EV_A_ fd); 75 | } 76 | else 77 | ev_syserr ("(libev) port_associate"); 78 | } 79 | } 80 | 81 | static void 82 | port_modify (EV_P_ int fd, int oev, int nev) 83 | { 84 | /* we need to reassociate no matter what, as closes are 85 | * once more silently being discarded. 86 | */ 87 | if (!nev) 88 | { 89 | if (oev) 90 | port_dissociate (backend_fd, PORT_SOURCE_FD, fd); 91 | } 92 | else 93 | port_associate_and_check (EV_A_ fd, nev); 94 | } 95 | 96 | static void 97 | port_poll (EV_P_ ev_tstamp timeout) 98 | { 99 | int res, i; 100 | struct timespec ts; 101 | uint_t nget = 1; 102 | 103 | /* we initialise this to something we will skip in the loop, as */ 104 | /* port_getn can return with nget unchanged, but no indication */ 105 | /* whether it was the original value or has been updated :/ */ 106 | port_events [0].portev_source = 0; 107 | 108 | EV_RELEASE_CB; 109 | EV_TS_SET (ts, timeout); 110 | res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts); 111 | EV_ACQUIRE_CB; 112 | 113 | /* port_getn may or may not set nget on error */ 114 | /* so we rely on port_events [0].portev_source not being updated */ 115 | if (res == -1 && errno != ETIME && errno != EINTR) 116 | ev_syserr ("(libev) port_getn (see http://bugs.opensolaris.org/view_bug.do?bug_id=6268715, try LIBEV_FLAGS=3 env variable)"); 117 | 118 | for (i = 0; i < nget; ++i) 119 | { 120 | if (port_events [i].portev_source == PORT_SOURCE_FD) 121 | { 122 | int fd = port_events [i].portev_object; 123 | 124 | fd_event ( 125 | EV_A_ 126 | fd, 127 | (port_events [i].portev_events & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 128 | | (port_events [i].portev_events & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 129 | ); 130 | 131 | fd_change (EV_A_ fd, EV__IOFDSET); 132 | } 133 | } 134 | 135 | if (ecb_expect_false (nget == port_eventmax)) 136 | { 137 | ev_free (port_events); 138 | port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1); 139 | port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); 140 | } 141 | } 142 | 143 | inline_size 144 | int 145 | port_init (EV_P_ int flags) 146 | { 147 | /* Initialize the kernel queue */ 148 | if ((backend_fd = port_create ()) < 0) 149 | return 0; 150 | 151 | assert (("libev: PORT_SOURCE_FD must not be zero", PORT_SOURCE_FD)); 152 | 153 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ 154 | 155 | /* if my reading of the opensolaris kernel sources are correct, then 156 | * opensolaris does something very stupid: it checks if the time has already 157 | * elapsed and doesn't round up if that is the case, otherwise it DOES round 158 | * up. Since we can't know what the case is, we need to guess by using a 159 | * "large enough" timeout. Normally, 1e-9 would be correct. 160 | */ 161 | backend_mintime = EV_TS_CONST (1e-3); /* needed to compensate for port_getn returning early */ 162 | backend_modify = port_modify; 163 | backend_poll = port_poll; 164 | 165 | port_eventmax = 64; /* initial number of events receivable per poll */ 166 | port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); 167 | 168 | return EVBACKEND_PORT; 169 | } 170 | 171 | inline_size 172 | void 173 | port_destroy (EV_P) 174 | { 175 | ev_free (port_events); 176 | } 177 | 178 | inline_size 179 | void 180 | port_fork (EV_P) 181 | { 182 | close (backend_fd); 183 | 184 | while ((backend_fd = port_create ()) < 0) 185 | ev_syserr ("(libev) port"); 186 | 187 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 188 | 189 | /* re-register interest in fds */ 190 | fd_rearm_all (EV_A); 191 | } 192 | 193 | -------------------------------------------------------------------------------- /libev/ev_select.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev select fd activity backend 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #ifndef _WIN32 41 | /* for unix systems */ 42 | # include 43 | # ifndef __hpux 44 | /* for REAL unix systems */ 45 | # include 46 | # endif 47 | #endif 48 | 49 | #ifndef EV_SELECT_USE_FD_SET 50 | # ifdef NFDBITS 51 | # define EV_SELECT_USE_FD_SET 0 52 | # else 53 | # define EV_SELECT_USE_FD_SET 1 54 | # endif 55 | #endif 56 | 57 | #if EV_SELECT_IS_WINSOCKET 58 | # undef EV_SELECT_USE_FD_SET 59 | # define EV_SELECT_USE_FD_SET 1 60 | # undef NFDBITS 61 | # define NFDBITS 0 62 | #endif 63 | 64 | #if !EV_SELECT_USE_FD_SET 65 | # define NFDBYTES (NFDBITS / 8) 66 | #endif 67 | 68 | #include 69 | 70 | static void 71 | select_modify (EV_P_ int fd, int oev, int nev) 72 | { 73 | if (oev == nev) 74 | return; 75 | 76 | { 77 | #if EV_SELECT_USE_FD_SET 78 | 79 | #if EV_SELECT_IS_WINSOCKET 80 | SOCKET handle = anfds [fd].handle; 81 | #else 82 | int handle = fd; 83 | #endif 84 | 85 | assert (("libev: fd >= FD_SETSIZE passed to fd_set-based select backend", fd < FD_SETSIZE)); 86 | 87 | /* FD_SET is broken on windows (it adds the fd to a set twice or more, 88 | * which eventually leads to overflows). Need to call it only on changes. 89 | */ 90 | #if EV_SELECT_IS_WINSOCKET 91 | if ((oev ^ nev) & EV_READ) 92 | #endif 93 | if (nev & EV_READ) 94 | FD_SET (handle, (fd_set *)vec_ri); 95 | else 96 | FD_CLR (handle, (fd_set *)vec_ri); 97 | 98 | #if EV_SELECT_IS_WINSOCKET 99 | if ((oev ^ nev) & EV_WRITE) 100 | #endif 101 | if (nev & EV_WRITE) 102 | FD_SET (handle, (fd_set *)vec_wi); 103 | else 104 | FD_CLR (handle, (fd_set *)vec_wi); 105 | 106 | #else 107 | 108 | int word = fd / NFDBITS; 109 | fd_mask mask = 1UL << (fd % NFDBITS); 110 | 111 | if (ecb_expect_false (vec_max <= word)) 112 | { 113 | int new_max = word + 1; 114 | 115 | vec_ri = ev_realloc (vec_ri, new_max * NFDBYTES); 116 | vec_ro = ev_realloc (vec_ro, new_max * NFDBYTES); /* could free/malloc */ 117 | vec_wi = ev_realloc (vec_wi, new_max * NFDBYTES); 118 | vec_wo = ev_realloc (vec_wo, new_max * NFDBYTES); /* could free/malloc */ 119 | #ifdef _WIN32 120 | vec_eo = ev_realloc (vec_eo, new_max * NFDBYTES); /* could free/malloc */ 121 | #endif 122 | 123 | for (; vec_max < new_max; ++vec_max) 124 | ((fd_mask *)vec_ri) [vec_max] = 125 | ((fd_mask *)vec_wi) [vec_max] = 0; 126 | } 127 | 128 | ((fd_mask *)vec_ri) [word] |= mask; 129 | if (!(nev & EV_READ)) 130 | ((fd_mask *)vec_ri) [word] &= ~mask; 131 | 132 | ((fd_mask *)vec_wi) [word] |= mask; 133 | if (!(nev & EV_WRITE)) 134 | ((fd_mask *)vec_wi) [word] &= ~mask; 135 | #endif 136 | } 137 | } 138 | 139 | static void 140 | select_poll (EV_P_ ev_tstamp timeout) 141 | { 142 | struct timeval tv; 143 | int res; 144 | int fd_setsize; 145 | 146 | EV_RELEASE_CB; 147 | EV_TV_SET (tv, timeout); 148 | 149 | #if EV_SELECT_USE_FD_SET 150 | fd_setsize = sizeof (fd_set); 151 | #else 152 | fd_setsize = vec_max * NFDBYTES; 153 | #endif 154 | 155 | memcpy (vec_ro, vec_ri, fd_setsize); 156 | memcpy (vec_wo, vec_wi, fd_setsize); 157 | 158 | #ifdef _WIN32 159 | /* pass in the write set as except set. 160 | * the idea behind this is to work around a windows bug that causes 161 | * errors to be reported as an exception and not by setting 162 | * the writable bit. this is so uncontrollably lame. 163 | */ 164 | memcpy (vec_eo, vec_wi, fd_setsize); 165 | res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, (fd_set *)vec_eo, &tv); 166 | #elif EV_SELECT_USE_FD_SET 167 | fd_setsize = anfdmax < FD_SETSIZE ? anfdmax : FD_SETSIZE; 168 | res = select (fd_setsize, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); 169 | #else 170 | res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); 171 | #endif 172 | EV_ACQUIRE_CB; 173 | 174 | if (ecb_expect_false (res < 0)) 175 | { 176 | #if EV_SELECT_IS_WINSOCKET 177 | errno = WSAGetLastError (); 178 | #endif 179 | #ifdef WSABASEERR 180 | /* on windows, select returns incompatible error codes, fix this */ 181 | if (errno >= WSABASEERR && errno < WSABASEERR + 1000) 182 | if (errno == WSAENOTSOCK) 183 | errno = EBADF; 184 | else 185 | errno -= WSABASEERR; 186 | #endif 187 | 188 | #ifdef _WIN32 189 | /* select on windows erroneously returns EINVAL when no fd sets have been 190 | * provided (this is documented). what microsoft doesn't tell you that this bug 191 | * exists even when the fd sets _are_ provided, so we have to check for this bug 192 | * here and emulate by sleeping manually. 193 | * we also get EINVAL when the timeout is invalid, but we ignore this case here 194 | * and assume that EINVAL always means: you have to wait manually. 195 | */ 196 | if (errno == EINVAL) 197 | { 198 | if (timeout) 199 | { 200 | unsigned long ms = EV_TS_TO_MSEC (timeout); 201 | Sleep (ms ? ms : 1); 202 | } 203 | 204 | return; 205 | } 206 | #endif 207 | 208 | if (errno == EBADF) 209 | fd_ebadf (EV_A); 210 | else if (errno == ENOMEM && !syserr_cb) 211 | fd_enomem (EV_A); 212 | else if (errno != EINTR) 213 | ev_syserr ("(libev) select"); 214 | 215 | return; 216 | } 217 | 218 | #if EV_SELECT_USE_FD_SET 219 | 220 | { 221 | int fd; 222 | 223 | for (fd = 0; fd < anfdmax; ++fd) 224 | if (anfds [fd].events) 225 | { 226 | int events = 0; 227 | #if EV_SELECT_IS_WINSOCKET 228 | SOCKET handle = anfds [fd].handle; 229 | #else 230 | int handle = fd; 231 | #endif 232 | 233 | if (FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; 234 | if (FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; 235 | #ifdef _WIN32 236 | if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; 237 | #endif 238 | 239 | if (ecb_expect_true (events)) 240 | fd_event (EV_A_ fd, events); 241 | } 242 | } 243 | 244 | #else 245 | 246 | { 247 | int word, bit; 248 | for (word = vec_max; word--; ) 249 | { 250 | fd_mask word_r = ((fd_mask *)vec_ro) [word]; 251 | fd_mask word_w = ((fd_mask *)vec_wo) [word]; 252 | #ifdef _WIN32 253 | word_w |= ((fd_mask *)vec_eo) [word]; 254 | #endif 255 | 256 | if (word_r || word_w) 257 | for (bit = NFDBITS; bit--; ) 258 | { 259 | fd_mask mask = 1UL << bit; 260 | int events = 0; 261 | 262 | events |= word_r & mask ? EV_READ : 0; 263 | events |= word_w & mask ? EV_WRITE : 0; 264 | 265 | if (ecb_expect_true (events)) 266 | fd_event (EV_A_ word * NFDBITS + bit, events); 267 | } 268 | } 269 | } 270 | 271 | #endif 272 | } 273 | 274 | inline_size 275 | int 276 | select_init (EV_P_ int flags) 277 | { 278 | backend_mintime = EV_TS_CONST (1e-6); 279 | backend_modify = select_modify; 280 | backend_poll = select_poll; 281 | 282 | #if EV_SELECT_USE_FD_SET 283 | vec_ri = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_ri); 284 | vec_ro = ev_malloc (sizeof (fd_set)); 285 | vec_wi = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_wi); 286 | vec_wo = ev_malloc (sizeof (fd_set)); 287 | #ifdef _WIN32 288 | vec_eo = ev_malloc (sizeof (fd_set)); 289 | #endif 290 | #else 291 | vec_max = 0; 292 | vec_ri = 0; 293 | vec_ro = 0; 294 | vec_wi = 0; 295 | vec_wo = 0; 296 | #ifdef _WIN32 297 | vec_eo = 0; 298 | #endif 299 | #endif 300 | 301 | return EVBACKEND_SELECT; 302 | } 303 | 304 | inline_size 305 | void 306 | select_destroy (EV_P) 307 | { 308 | ev_free (vec_ri); 309 | ev_free (vec_ro); 310 | ev_free (vec_wi); 311 | ev_free (vec_wo); 312 | #ifdef _WIN32 313 | ev_free (vec_eo); 314 | #endif 315 | } 316 | 317 | -------------------------------------------------------------------------------- /libev/ev_vars.h: -------------------------------------------------------------------------------- 1 | /* 2 | * loop member variable declarations 3 | * 4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2019 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #define VARx(type,name) VAR(name, type name) 41 | 42 | VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */ 43 | VARx(ev_tstamp, mn_now) /* monotonic clock "now" */ 44 | VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ 45 | 46 | /* for reverse feeding of events */ 47 | VARx(W *, rfeeds) 48 | VARx(int, rfeedmax) 49 | VARx(int, rfeedcnt) 50 | 51 | VAR (pendings, ANPENDING *pendings [NUMPRI]) 52 | VAR (pendingmax, int pendingmax [NUMPRI]) 53 | VAR (pendingcnt, int pendingcnt [NUMPRI]) 54 | VARx(int, pendingpri) /* highest priority currently pending */ 55 | VARx(ev_prepare, pending_w) /* dummy pending watcher */ 56 | 57 | VARx(ev_tstamp, io_blocktime) 58 | VARx(ev_tstamp, timeout_blocktime) 59 | 60 | VARx(int, backend) 61 | VARx(int, activecnt) /* total number of active events ("refcount") */ 62 | VARx(EV_ATOMIC_T, loop_done) /* signal by ev_break */ 63 | 64 | VARx(int, backend_fd) 65 | VARx(ev_tstamp, backend_mintime) /* assumed typical timer resolution */ 66 | VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev)) 67 | VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) 68 | 69 | VARx(ANFD *, anfds) 70 | VARx(int, anfdmax) 71 | 72 | VAR (evpipe, int evpipe [2]) 73 | VARx(ev_io, pipe_w) 74 | VARx(EV_ATOMIC_T, pipe_write_wanted) 75 | VARx(EV_ATOMIC_T, pipe_write_skipped) 76 | 77 | #if !defined(_WIN32) || EV_GENWRAP 78 | VARx(pid_t, curpid) 79 | #endif 80 | 81 | VARx(char, postfork) /* true if we need to recreate kernel state after fork */ 82 | 83 | #if EV_USE_SELECT || EV_GENWRAP 84 | VARx(void *, vec_ri) 85 | VARx(void *, vec_ro) 86 | VARx(void *, vec_wi) 87 | VARx(void *, vec_wo) 88 | #if defined(_WIN32) || EV_GENWRAP 89 | VARx(void *, vec_eo) 90 | #endif 91 | VARx(int, vec_max) 92 | #endif 93 | 94 | #if EV_USE_POLL || EV_GENWRAP 95 | VARx(struct pollfd *, polls) 96 | VARx(int, pollmax) 97 | VARx(int, pollcnt) 98 | VARx(int *, pollidxs) /* maps fds into structure indices */ 99 | VARx(int, pollidxmax) 100 | #endif 101 | 102 | #if EV_USE_EPOLL || EV_GENWRAP 103 | VARx(struct epoll_event *, epoll_events) 104 | VARx(int, epoll_eventmax) 105 | VARx(int *, epoll_eperms) 106 | VARx(int, epoll_epermcnt) 107 | VARx(int, epoll_epermmax) 108 | #endif 109 | 110 | #if EV_USE_LINUXAIO || EV_GENWRAP 111 | VARx(aio_context_t, linuxaio_ctx) 112 | VARx(int, linuxaio_iteration) 113 | VARx(struct aniocb **, linuxaio_iocbps) 114 | VARx(int, linuxaio_iocbpmax) 115 | VARx(struct iocb **, linuxaio_submits) 116 | VARx(int, linuxaio_submitcnt) 117 | VARx(int, linuxaio_submitmax) 118 | VARx(ev_io, linuxaio_epoll_w) 119 | #endif 120 | 121 | #if EV_USE_IOURING || EV_GENWRAP 122 | VARx(int, iouring_fd) 123 | VARx(unsigned, iouring_to_submit); 124 | VARx(int, iouring_entries) 125 | VARx(int, iouring_max_entries) 126 | VARx(void *, iouring_sq_ring) 127 | VARx(void *, iouring_cq_ring) 128 | VARx(void *, iouring_sqes) 129 | VARx(uint32_t, iouring_sq_ring_size) 130 | VARx(uint32_t, iouring_cq_ring_size) 131 | VARx(uint32_t, iouring_sqes_size) 132 | VARx(uint32_t, iouring_sq_head) 133 | VARx(uint32_t, iouring_sq_tail) 134 | VARx(uint32_t, iouring_sq_ring_mask) 135 | VARx(uint32_t, iouring_sq_ring_entries) 136 | VARx(uint32_t, iouring_sq_flags) 137 | VARx(uint32_t, iouring_sq_dropped) 138 | VARx(uint32_t, iouring_sq_array) 139 | VARx(uint32_t, iouring_cq_head) 140 | VARx(uint32_t, iouring_cq_tail) 141 | VARx(uint32_t, iouring_cq_ring_mask) 142 | VARx(uint32_t, iouring_cq_ring_entries) 143 | VARx(uint32_t, iouring_cq_overflow) 144 | VARx(uint32_t, iouring_cq_cqes) 145 | VARx(ev_tstamp, iouring_tfd_to) 146 | VARx(int, iouring_tfd) 147 | VARx(ev_io, iouring_tfd_w) 148 | VARx(ev_io, iouring_epoll_w) 149 | #endif 150 | 151 | #if EV_USE_KQUEUE || EV_GENWRAP 152 | VARx(pid_t, kqueue_fd_pid) 153 | VARx(struct kevent *, kqueue_changes) 154 | VARx(int, kqueue_changemax) 155 | VARx(int, kqueue_changecnt) 156 | VARx(struct kevent *, kqueue_events) 157 | VARx(int, kqueue_eventmax) 158 | #endif 159 | 160 | #if EV_USE_PORT || EV_GENWRAP 161 | VARx(struct port_event *, port_events) 162 | VARx(int, port_eventmax) 163 | #endif 164 | 165 | #if EV_USE_IOCP || EV_GENWRAP 166 | VARx(HANDLE, iocp) 167 | #endif 168 | 169 | VARx(int *, fdchanges) 170 | VARx(int, fdchangemax) 171 | VARx(int, fdchangecnt) 172 | 173 | VARx(ANHE *, timers) 174 | VARx(int, timermax) 175 | VARx(int, timercnt) 176 | 177 | #if EV_PERIODIC_ENABLE || EV_GENWRAP 178 | VARx(ANHE *, periodics) 179 | VARx(int, periodicmax) 180 | VARx(int, periodiccnt) 181 | #endif 182 | 183 | #if EV_IDLE_ENABLE || EV_GENWRAP 184 | VAR (idles, ev_idle **idles [NUMPRI]) 185 | VAR (idlemax, int idlemax [NUMPRI]) 186 | VAR (idlecnt, int idlecnt [NUMPRI]) 187 | #endif 188 | VARx(int, idleall) /* total number */ 189 | 190 | VARx(struct ev_prepare **, prepares) 191 | VARx(int, preparemax) 192 | VARx(int, preparecnt) 193 | 194 | VARx(struct ev_check **, checks) 195 | VARx(int, checkmax) 196 | VARx(int, checkcnt) 197 | 198 | #if EV_FORK_ENABLE || EV_GENWRAP 199 | VARx(struct ev_fork **, forks) 200 | VARx(int, forkmax) 201 | VARx(int, forkcnt) 202 | #endif 203 | 204 | #if EV_CLEANUP_ENABLE || EV_GENWRAP 205 | VARx(struct ev_cleanup **, cleanups) 206 | VARx(int, cleanupmax) 207 | VARx(int, cleanupcnt) 208 | #endif 209 | 210 | #if EV_ASYNC_ENABLE || EV_GENWRAP 211 | VARx(EV_ATOMIC_T, async_pending) 212 | VARx(struct ev_async **, asyncs) 213 | VARx(int, asyncmax) 214 | VARx(int, asynccnt) 215 | #endif 216 | 217 | #if EV_USE_INOTIFY || EV_GENWRAP 218 | VARx(int, fs_fd) 219 | VARx(ev_io, fs_w) 220 | VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */ 221 | VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE]) 222 | #endif 223 | 224 | VARx(EV_ATOMIC_T, sig_pending) 225 | #if EV_USE_SIGNALFD || EV_GENWRAP 226 | VARx(int, sigfd) 227 | VARx(ev_io, sigfd_w) 228 | VARx(sigset_t, sigfd_set) 229 | #endif 230 | 231 | #if EV_USE_TIMERFD || EV_GENWRAP 232 | VARx(int, timerfd) /* timerfd for time jump detection */ 233 | VARx(ev_io, timerfd_w) 234 | #endif 235 | 236 | VARx(unsigned int, origflags) /* original loop flags */ 237 | 238 | #if EV_FEATURE_API || EV_GENWRAP 239 | VARx(unsigned int, loop_count) /* total number of loop iterations/blocks */ 240 | VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */ 241 | 242 | VARx(void *, userdata) 243 | /* C++ doesn't support the ev_loop_callback typedef here. stinks. */ 244 | VAR (release_cb, void (*release_cb)(EV_P) EV_NOEXCEPT) 245 | VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_NOEXCEPT) 246 | VAR (invoke_cb , ev_loop_callback invoke_cb) 247 | #endif 248 | 249 | #undef VARx 250 | 251 | -------------------------------------------------------------------------------- /libev/ev_win32.c: -------------------------------------------------------------------------------- 1 | /* 2 | * libev win32 compatibility cruft (_not_ a backend) 3 | * 4 | * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without modifica- 8 | * tion, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 | * OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | * Alternatively, the contents of this file may be used under the terms of 29 | * the GNU General Public License ("GPL") version 2 or any later version, 30 | * in which case the provisions of the GPL are applicable instead of 31 | * the above. If you wish to allow the use of your version of this file 32 | * only under the terms of the GPL and not to allow others to use your 33 | * version of this file under the BSD license, indicate your decision 34 | * by deleting the provisions above and replace them with the notice 35 | * and other provisions required by the GPL. If you do not delete the 36 | * provisions above, a recipient may use your version of this file under 37 | * either the BSD or the GPL. 38 | */ 39 | 40 | #ifdef _WIN32 41 | 42 | /* note: the comment below could not be substantiated, but what would I care */ 43 | /* MSDN says this is required to handle SIGFPE */ 44 | /* my wild guess would be that using something floating-pointy is required */ 45 | /* for the crt to do something about it */ 46 | volatile double SIGFPE_REQ = 0.0f; 47 | 48 | static SOCKET 49 | ev_tcp_socket (void) 50 | { 51 | #if EV_USE_WSASOCKET 52 | return WSASocket (AF_INET, SOCK_STREAM, 0, 0, 0, 0); 53 | #else 54 | return socket (AF_INET, SOCK_STREAM, 0); 55 | #endif 56 | } 57 | 58 | /* oh, the humanity! */ 59 | static int 60 | ev_pipe (int filedes [2]) 61 | { 62 | struct sockaddr_in addr = { 0 }; 63 | int addr_size = sizeof (addr); 64 | struct sockaddr_in adr2; 65 | int adr2_size = sizeof (adr2); 66 | SOCKET listener; 67 | SOCKET sock [2] = { -1, -1 }; 68 | 69 | if ((listener = ev_tcp_socket ()) == INVALID_SOCKET) 70 | return -1; 71 | 72 | addr.sin_family = AF_INET; 73 | addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK); 74 | addr.sin_port = 0; 75 | 76 | if (bind (listener, (struct sockaddr *)&addr, addr_size)) 77 | goto fail; 78 | 79 | if (getsockname (listener, (struct sockaddr *)&addr, &addr_size)) 80 | goto fail; 81 | 82 | if (listen (listener, 1)) 83 | goto fail; 84 | 85 | if ((sock [0] = ev_tcp_socket ()) == INVALID_SOCKET) 86 | goto fail; 87 | 88 | if (connect (sock [0], (struct sockaddr *)&addr, addr_size)) 89 | goto fail; 90 | 91 | /* TODO: returns INVALID_SOCKET on winsock accept, not < 0. fix it */ 92 | /* when convenient, probably by just removing error checking altogether? */ 93 | if ((sock [1] = accept (listener, 0, 0)) < 0) 94 | goto fail; 95 | 96 | /* windows vista returns fantasy port numbers for sockets: 97 | * example for two interconnected tcp sockets: 98 | * 99 | * (Socket::unpack_sockaddr_in getsockname $sock0)[0] == 53364 100 | * (Socket::unpack_sockaddr_in getpeername $sock0)[0] == 53363 101 | * (Socket::unpack_sockaddr_in getsockname $sock1)[0] == 53363 102 | * (Socket::unpack_sockaddr_in getpeername $sock1)[0] == 53365 103 | * 104 | * wow! tridirectional sockets! 105 | * 106 | * this way of checking ports seems to work: 107 | */ 108 | if (getpeername (sock [0], (struct sockaddr *)&addr, &addr_size)) 109 | goto fail; 110 | 111 | if (getsockname (sock [1], (struct sockaddr *)&adr2, &adr2_size)) 112 | goto fail; 113 | 114 | errno = WSAEINVAL; 115 | if (addr_size != adr2_size 116 | || addr.sin_addr.s_addr != adr2.sin_addr.s_addr /* just to be sure, I mean, it's windows */ 117 | || addr.sin_port != adr2.sin_port) 118 | goto fail; 119 | 120 | closesocket (listener); 121 | 122 | #if EV_SELECT_IS_WINSOCKET 123 | filedes [0] = EV_WIN32_HANDLE_TO_FD (sock [0]); 124 | filedes [1] = EV_WIN32_HANDLE_TO_FD (sock [1]); 125 | #else 126 | /* when select isn't winsocket, we also expect socket, connect, accept etc. 127 | * to work on fds */ 128 | filedes [0] = sock [0]; 129 | filedes [1] = sock [1]; 130 | #endif 131 | 132 | return 0; 133 | 134 | fail: 135 | closesocket (listener); 136 | 137 | if (sock [0] != INVALID_SOCKET) closesocket (sock [0]); 138 | if (sock [1] != INVALID_SOCKET) closesocket (sock [1]); 139 | 140 | return -1; 141 | } 142 | 143 | #undef pipe 144 | #define pipe(filedes) ev_pipe (filedes) 145 | 146 | #define EV_HAVE_EV_TIME 1 147 | ev_tstamp 148 | ev_time (void) 149 | { 150 | FILETIME ft; 151 | ULARGE_INTEGER ui; 152 | 153 | GetSystemTimeAsFileTime (&ft); 154 | ui.u.LowPart = ft.dwLowDateTime; 155 | ui.u.HighPart = ft.dwHighDateTime; 156 | 157 | /* also, msvc cannot convert ulonglong to double... yes, it is that sucky */ 158 | return EV_TS_FROM_USEC (((LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-1)); 159 | } 160 | 161 | #endif 162 | 163 | -------------------------------------------------------------------------------- /libev/ev_wrap.h: -------------------------------------------------------------------------------- 1 | /* DO NOT EDIT, automatically generated by update_ev_wrap */ 2 | #ifndef EV_WRAP_H 3 | #define EV_WRAP_H 4 | #define acquire_cb ((loop)->acquire_cb) 5 | #define activecnt ((loop)->activecnt) 6 | #define anfdmax ((loop)->anfdmax) 7 | #define anfds ((loop)->anfds) 8 | #define async_pending ((loop)->async_pending) 9 | #define asynccnt ((loop)->asynccnt) 10 | #define asyncmax ((loop)->asyncmax) 11 | #define asyncs ((loop)->asyncs) 12 | #define backend ((loop)->backend) 13 | #define backend_fd ((loop)->backend_fd) 14 | #define backend_mintime ((loop)->backend_mintime) 15 | #define backend_modify ((loop)->backend_modify) 16 | #define backend_poll ((loop)->backend_poll) 17 | #define checkcnt ((loop)->checkcnt) 18 | #define checkmax ((loop)->checkmax) 19 | #define checks ((loop)->checks) 20 | #define cleanupcnt ((loop)->cleanupcnt) 21 | #define cleanupmax ((loop)->cleanupmax) 22 | #define cleanups ((loop)->cleanups) 23 | #define curpid ((loop)->curpid) 24 | #define epoll_epermcnt ((loop)->epoll_epermcnt) 25 | #define epoll_epermmax ((loop)->epoll_epermmax) 26 | #define epoll_eperms ((loop)->epoll_eperms) 27 | #define epoll_eventmax ((loop)->epoll_eventmax) 28 | #define epoll_events ((loop)->epoll_events) 29 | #define evpipe ((loop)->evpipe) 30 | #define fdchangecnt ((loop)->fdchangecnt) 31 | #define fdchangemax ((loop)->fdchangemax) 32 | #define fdchanges ((loop)->fdchanges) 33 | #define forkcnt ((loop)->forkcnt) 34 | #define forkmax ((loop)->forkmax) 35 | #define forks ((loop)->forks) 36 | #define fs_2625 ((loop)->fs_2625) 37 | #define fs_fd ((loop)->fs_fd) 38 | #define fs_hash ((loop)->fs_hash) 39 | #define fs_w ((loop)->fs_w) 40 | #define idleall ((loop)->idleall) 41 | #define idlecnt ((loop)->idlecnt) 42 | #define idlemax ((loop)->idlemax) 43 | #define idles ((loop)->idles) 44 | #define invoke_cb ((loop)->invoke_cb) 45 | #define io_blocktime ((loop)->io_blocktime) 46 | #define iocp ((loop)->iocp) 47 | #define iouring_cq_cqes ((loop)->iouring_cq_cqes) 48 | #define iouring_cq_head ((loop)->iouring_cq_head) 49 | #define iouring_cq_overflow ((loop)->iouring_cq_overflow) 50 | #define iouring_cq_ring ((loop)->iouring_cq_ring) 51 | #define iouring_cq_ring_entries ((loop)->iouring_cq_ring_entries) 52 | #define iouring_cq_ring_mask ((loop)->iouring_cq_ring_mask) 53 | #define iouring_cq_ring_size ((loop)->iouring_cq_ring_size) 54 | #define iouring_cq_tail ((loop)->iouring_cq_tail) 55 | #define iouring_entries ((loop)->iouring_entries) 56 | #define iouring_epoll_w ((loop)->iouring_epoll_w) 57 | #define iouring_fd ((loop)->iouring_fd) 58 | #define iouring_max_entries ((loop)->iouring_max_entries) 59 | #define iouring_sq_array ((loop)->iouring_sq_array) 60 | #define iouring_sq_dropped ((loop)->iouring_sq_dropped) 61 | #define iouring_sq_flags ((loop)->iouring_sq_flags) 62 | #define iouring_sq_head ((loop)->iouring_sq_head) 63 | #define iouring_sq_ring ((loop)->iouring_sq_ring) 64 | #define iouring_sq_ring_entries ((loop)->iouring_sq_ring_entries) 65 | #define iouring_sq_ring_mask ((loop)->iouring_sq_ring_mask) 66 | #define iouring_sq_ring_size ((loop)->iouring_sq_ring_size) 67 | #define iouring_sq_tail ((loop)->iouring_sq_tail) 68 | #define iouring_sqes ((loop)->iouring_sqes) 69 | #define iouring_sqes_size ((loop)->iouring_sqes_size) 70 | #define iouring_tfd ((loop)->iouring_tfd) 71 | #define iouring_tfd_to ((loop)->iouring_tfd_to) 72 | #define iouring_tfd_w ((loop)->iouring_tfd_w) 73 | #define iouring_to_submit ((loop)->iouring_to_submit) 74 | #define kqueue_changecnt ((loop)->kqueue_changecnt) 75 | #define kqueue_changemax ((loop)->kqueue_changemax) 76 | #define kqueue_changes ((loop)->kqueue_changes) 77 | #define kqueue_eventmax ((loop)->kqueue_eventmax) 78 | #define kqueue_events ((loop)->kqueue_events) 79 | #define kqueue_fd_pid ((loop)->kqueue_fd_pid) 80 | #define linuxaio_ctx ((loop)->linuxaio_ctx) 81 | #define linuxaio_epoll_w ((loop)->linuxaio_epoll_w) 82 | #define linuxaio_iocbpmax ((loop)->linuxaio_iocbpmax) 83 | #define linuxaio_iocbps ((loop)->linuxaio_iocbps) 84 | #define linuxaio_iteration ((loop)->linuxaio_iteration) 85 | #define linuxaio_submitcnt ((loop)->linuxaio_submitcnt) 86 | #define linuxaio_submitmax ((loop)->linuxaio_submitmax) 87 | #define linuxaio_submits ((loop)->linuxaio_submits) 88 | #define loop_count ((loop)->loop_count) 89 | #define loop_depth ((loop)->loop_depth) 90 | #define loop_done ((loop)->loop_done) 91 | #define mn_now ((loop)->mn_now) 92 | #define now_floor ((loop)->now_floor) 93 | #define origflags ((loop)->origflags) 94 | #define pending_w ((loop)->pending_w) 95 | #define pendingcnt ((loop)->pendingcnt) 96 | #define pendingmax ((loop)->pendingmax) 97 | #define pendingpri ((loop)->pendingpri) 98 | #define pendings ((loop)->pendings) 99 | #define periodiccnt ((loop)->periodiccnt) 100 | #define periodicmax ((loop)->periodicmax) 101 | #define periodics ((loop)->periodics) 102 | #define pipe_w ((loop)->pipe_w) 103 | #define pipe_write_skipped ((loop)->pipe_write_skipped) 104 | #define pipe_write_wanted ((loop)->pipe_write_wanted) 105 | #define pollcnt ((loop)->pollcnt) 106 | #define pollidxmax ((loop)->pollidxmax) 107 | #define pollidxs ((loop)->pollidxs) 108 | #define pollmax ((loop)->pollmax) 109 | #define polls ((loop)->polls) 110 | #define port_eventmax ((loop)->port_eventmax) 111 | #define port_events ((loop)->port_events) 112 | #define postfork ((loop)->postfork) 113 | #define preparecnt ((loop)->preparecnt) 114 | #define preparemax ((loop)->preparemax) 115 | #define prepares ((loop)->prepares) 116 | #define release_cb ((loop)->release_cb) 117 | #define rfeedcnt ((loop)->rfeedcnt) 118 | #define rfeedmax ((loop)->rfeedmax) 119 | #define rfeeds ((loop)->rfeeds) 120 | #define rtmn_diff ((loop)->rtmn_diff) 121 | #define sig_pending ((loop)->sig_pending) 122 | #define sigfd ((loop)->sigfd) 123 | #define sigfd_set ((loop)->sigfd_set) 124 | #define sigfd_w ((loop)->sigfd_w) 125 | #define timeout_blocktime ((loop)->timeout_blocktime) 126 | #define timercnt ((loop)->timercnt) 127 | #define timerfd ((loop)->timerfd) 128 | #define timerfd_w ((loop)->timerfd_w) 129 | #define timermax ((loop)->timermax) 130 | #define timers ((loop)->timers) 131 | #define userdata ((loop)->userdata) 132 | #define vec_eo ((loop)->vec_eo) 133 | #define vec_max ((loop)->vec_max) 134 | #define vec_ri ((loop)->vec_ri) 135 | #define vec_ro ((loop)->vec_ro) 136 | #define vec_wi ((loop)->vec_wi) 137 | #define vec_wo ((loop)->vec_wo) 138 | #else 139 | #undef EV_WRAP_H 140 | #undef acquire_cb 141 | #undef activecnt 142 | #undef anfdmax 143 | #undef anfds 144 | #undef async_pending 145 | #undef asynccnt 146 | #undef asyncmax 147 | #undef asyncs 148 | #undef backend 149 | #undef backend_fd 150 | #undef backend_mintime 151 | #undef backend_modify 152 | #undef backend_poll 153 | #undef checkcnt 154 | #undef checkmax 155 | #undef checks 156 | #undef cleanupcnt 157 | #undef cleanupmax 158 | #undef cleanups 159 | #undef curpid 160 | #undef epoll_epermcnt 161 | #undef epoll_epermmax 162 | #undef epoll_eperms 163 | #undef epoll_eventmax 164 | #undef epoll_events 165 | #undef evpipe 166 | #undef fdchangecnt 167 | #undef fdchangemax 168 | #undef fdchanges 169 | #undef forkcnt 170 | #undef forkmax 171 | #undef forks 172 | #undef fs_2625 173 | #undef fs_fd 174 | #undef fs_hash 175 | #undef fs_w 176 | #undef idleall 177 | #undef idlecnt 178 | #undef idlemax 179 | #undef idles 180 | #undef invoke_cb 181 | #undef io_blocktime 182 | #undef iocp 183 | #undef iouring_cq_cqes 184 | #undef iouring_cq_head 185 | #undef iouring_cq_overflow 186 | #undef iouring_cq_ring 187 | #undef iouring_cq_ring_entries 188 | #undef iouring_cq_ring_mask 189 | #undef iouring_cq_ring_size 190 | #undef iouring_cq_tail 191 | #undef iouring_entries 192 | #undef iouring_epoll_w 193 | #undef iouring_fd 194 | #undef iouring_max_entries 195 | #undef iouring_sq_array 196 | #undef iouring_sq_dropped 197 | #undef iouring_sq_flags 198 | #undef iouring_sq_head 199 | #undef iouring_sq_ring 200 | #undef iouring_sq_ring_entries 201 | #undef iouring_sq_ring_mask 202 | #undef iouring_sq_ring_size 203 | #undef iouring_sq_tail 204 | #undef iouring_sqes 205 | #undef iouring_sqes_size 206 | #undef iouring_tfd 207 | #undef iouring_tfd_to 208 | #undef iouring_tfd_w 209 | #undef iouring_to_submit 210 | #undef kqueue_changecnt 211 | #undef kqueue_changemax 212 | #undef kqueue_changes 213 | #undef kqueue_eventmax 214 | #undef kqueue_events 215 | #undef kqueue_fd_pid 216 | #undef linuxaio_ctx 217 | #undef linuxaio_epoll_w 218 | #undef linuxaio_iocbpmax 219 | #undef linuxaio_iocbps 220 | #undef linuxaio_iteration 221 | #undef linuxaio_submitcnt 222 | #undef linuxaio_submitmax 223 | #undef linuxaio_submits 224 | #undef loop_count 225 | #undef loop_depth 226 | #undef loop_done 227 | #undef mn_now 228 | #undef now_floor 229 | #undef origflags 230 | #undef pending_w 231 | #undef pendingcnt 232 | #undef pendingmax 233 | #undef pendingpri 234 | #undef pendings 235 | #undef periodiccnt 236 | #undef periodicmax 237 | #undef periodics 238 | #undef pipe_w 239 | #undef pipe_write_skipped 240 | #undef pipe_write_wanted 241 | #undef pollcnt 242 | #undef pollidxmax 243 | #undef pollidxs 244 | #undef pollmax 245 | #undef polls 246 | #undef port_eventmax 247 | #undef port_events 248 | #undef postfork 249 | #undef preparecnt 250 | #undef preparemax 251 | #undef prepares 252 | #undef release_cb 253 | #undef rfeedcnt 254 | #undef rfeedmax 255 | #undef rfeeds 256 | #undef rtmn_diff 257 | #undef sig_pending 258 | #undef sigfd 259 | #undef sigfd_set 260 | #undef sigfd_w 261 | #undef timeout_blocktime 262 | #undef timercnt 263 | #undef timerfd 264 | #undef timerfd_w 265 | #undef timermax 266 | #undef timers 267 | #undef userdata 268 | #undef vec_eo 269 | #undef vec_max 270 | #undef vec_ri 271 | #undef vec_ro 272 | #undef vec_wi 273 | #undef vec_wo 274 | #endif 275 | -------------------------------------------------------------------------------- /mycert-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFazCCA1OgAwIBAgIUahEFhvU0J4G4aoLwjKStYSe5iWcwDQYJKoZIhvcNAQEL 3 | BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM 4 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA2MjUyMDI0MjFaFw0yMDA3 5 | MjUyMDI0MjFaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw 6 | HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggIiMA0GCSqGSIb3DQEB 7 | AQUAA4ICDwAwggIKAoICAQCw01MQ+9RnQZT4SxkOVtTuJL+ROTvo8v0QvyEhdcrX 8 | GYeas/m07H+hYqf6gAvnEQuOdKp7WDmRN1lhURDBkNgLEQu2pHOkat9LbagB7Ivo 9 | y47+Ou7EvFlCaPO31eAvhKX5mJtCAyNydb2fZOooarq694iT5p0y8TINnb8k8z7S 10 | l4DeD8feZ30fCYgHAJlJFwEypHF5F7ieLgsKQXPoMGlKBW0CBxtuirDhVCdRInFg 11 | kRonIldWyA+XK60ftsXzGJ/fVmqdYdYsi3ZjjA1S5cBhkhaShMnRpdw4VqbjNlv8 12 | 3yFXgLj6LKvUpxHc8HpRz164GW4rUi8Ov3yl0Cvz39NqQmnAjYsaZks5qxTyY9OU 13 | VBZdMWvbMXBQlHfzAdMAIWdoPPopQ/zT+vSdT5PZZpGLa6jKAcCsCvouY979zryU 14 | TAclMFJpG54PdmOlYEA1M93lm7/fMnZlDqMzCBodPIaCnOEZS9v//8QyELrZnK/H 15 | XrOR+xUxau6+1YnIL0zQx+O+ETpCnFppMybbmfHc6f8+ycCQp2gBZJ6P06Fx3hLl 16 | UWrpaxt9djP/W1saS4wQTIiBgR7xZLnMLo2pwCGmJ9SebDckerQxsFcpQ/nTAAJ5 17 | 8I5WAyEU/FOMeV7AMztFQqQBFNQeO6ZO3Lna2BZIKDsuAKTZF+/ddTJ8ZOnHKFUF 18 | HQIDAQABo1MwUTAdBgNVHQ4EFgQUCffngIO1OL5Xg2AiQ10W3EnwS68wHwYDVR0j 19 | BBgwFoAUCffngIO1OL5Xg2AiQ10W3EnwS68wDwYDVR0TAQH/BAUwAwEB/zANBgkq 20 | hkiG9w0BAQsFAAOCAgEAGxZIXMxQhCj2zgJT2LeYm8xBW+8MCEzgpjJDq11BewVu 21 | cVer1bObDweHz65+ldm2Lh3KIWQ9Dst9gN2E7KGI9vbmRVhv7fAUkJ+AnHyFTVo/ 22 | U1faQeEPRH7kI2w2VzEsgoSemmrGBeehr7QBAzl1fD3AB5vkDMLXY6TeSjG1GnU9 23 | bWbvGARL/Ol9jeNPTk2xQsVJo8hWIhDpuXcnJ33bAWlEM3l160ixqBsk6PbKuWLK 24 | /GDmERdSj3n8lzZHmcSosCUZXn7jROAWaDUbEZCv6NTNLUG3U2SG95dOGcL8U0ub 25 | b4+7xyThOoE/AMViqx64PxZkktRCThdweZ85eluE+Pr33GSWI5iIyFxLzPPucvqC 26 | X89B3C3HBVndkIfpz0vBS8nnniHGf9lvfN5NNM1y8Zas00h7bXkQ3+T9DGLT7j1W 27 | i2ot6qH3cc5k7EVKLYJ8eOpGKpmi1Sze3cN+OgWfTnptq7Zlp3G0pdKjHngC+TBD 28 | nKf6Nq/QhHyOzSwemIJcG5sw9hXRawb3LP6t/oPTknTFizTKppjYAYQZUHh6xCGb 29 | ToC8sKtUFT6sPY6D/aOsB0qRhGB3FZ6O2XOSQX2nV1XWkTVLFzdLRPGHqpW9d8BD 30 | dIO9DQBW7u7GSt4TopJyZy6Og1Wg9MLlzffUNF0nJ74IV23mciS8Csf+C6eSmvQ= 31 | -----END CERTIFICATE----- 32 | -------------------------------------------------------------------------------- /mycert-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQCw01MQ+9RnQZT4 3 | SxkOVtTuJL+ROTvo8v0QvyEhdcrXGYeas/m07H+hYqf6gAvnEQuOdKp7WDmRN1lh 4 | URDBkNgLEQu2pHOkat9LbagB7Ivoy47+Ou7EvFlCaPO31eAvhKX5mJtCAyNydb2f 5 | ZOooarq694iT5p0y8TINnb8k8z7Sl4DeD8feZ30fCYgHAJlJFwEypHF5F7ieLgsK 6 | QXPoMGlKBW0CBxtuirDhVCdRInFgkRonIldWyA+XK60ftsXzGJ/fVmqdYdYsi3Zj 7 | jA1S5cBhkhaShMnRpdw4VqbjNlv83yFXgLj6LKvUpxHc8HpRz164GW4rUi8Ov3yl 8 | 0Cvz39NqQmnAjYsaZks5qxTyY9OUVBZdMWvbMXBQlHfzAdMAIWdoPPopQ/zT+vSd 9 | T5PZZpGLa6jKAcCsCvouY979zryUTAclMFJpG54PdmOlYEA1M93lm7/fMnZlDqMz 10 | CBodPIaCnOEZS9v//8QyELrZnK/HXrOR+xUxau6+1YnIL0zQx+O+ETpCnFppMybb 11 | mfHc6f8+ycCQp2gBZJ6P06Fx3hLlUWrpaxt9djP/W1saS4wQTIiBgR7xZLnMLo2p 12 | wCGmJ9SebDckerQxsFcpQ/nTAAJ58I5WAyEU/FOMeV7AMztFQqQBFNQeO6ZO3Lna 13 | 2BZIKDsuAKTZF+/ddTJ8ZOnHKFUFHQIDAQABAoICAQCD0jb1zVdOVuTQqWOXfgIl 14 | Ov4ZLHrOMf+Y8XOodyDtdFnUaeF5EGohjOO/ap/09Y8Q90Z0x5O6/3FBjGaadvmo 15 | Xl/GHBkd9JJcr/X04Tx4IvCJ9LM1tVgxmv4+CVKk+hWl2i+8aYyOctoXLuslkFN+ 16 | uLR31g9Q/+CZqATsbe5inpFyen88/ReZAMkIt1iO1XiOIWt6o8V9HsQaNCtNaHHF 17 | vf/C6Dt6ECjHAq6A3NV68RF3JTHJP0HoJ6Zboy01CiF9JPY8N8ir0phCpBD2wNLH 18 | qlnrhtTR1v0yft7ROiAfhtRbYFoGHzFGSfqWJCx8OsTZKBQqdY0hSH2LO4LYvv7i 19 | 1j4J4wrZlpmGHb32HpBx1KQb8jyA1tbetGuJn9aRG07PMKdNOTSQn0W9EHRRp8hX 20 | kmFikoheDKobWLiA1FfbBssUYltBLtYebLZ/cVgrA9xqbYgTg4PGz+Iqx/9JmESA 21 | 5yIr6d6B9EVS4AAMt9LsxZiPpuLAXRl8iD8p5UmoC1b3WTuHywP5k2o938eisZvS 22 | pUTIWoqAqusYOWNz1NSzH/FB8Z1BdrRv/oND8XWjTYVL3O4vuzptBPclUCGTLPTo 23 | nvdwhpEzJUiyG3m10Q+4xRMuRgx9dKi5xmThrJFpNwv8wlqFKU3CwKYqXTJ3nV+z 24 | ARlXIoSSuwXhpMpTYpmrnQKCAQEA1j50/05k3GOtmMAp2FO3ygyOXogpoglM4kYe 25 | XHi9VdAgeMLStjMnATxRRf8QR58WGYoloOLD2YzRkR3cADc3h307dPXWGm1+SHjC 26 | 9BN9Ulc77CyRqoeb3UfLUQ4xlrkSMDtZS3PmYnkPijkFfOukxyUEHRIq5Tb3LDY5 27 | GecS5yganWJh2Zcsd0b12fCRDj8Lygp+TCuP3hNgOfhqM0Z0z9QWNRt3WBgwhpOp 28 | 99SejIXDdqBWi8DCkAgbRG6C2Gxb1nhvdjaDAoigRrbAL5qVF5mVj7Hn5aE7UlMP 29 | pnjcL2rgwjCC6qiyXenSD3dFDAmioE9Jm84cGllqa8OZG4NrkwKCAQEA00nmaA0O 30 | JXJhw6PGGF42+42h6sPHFrCZXRltwsXgNvWxf2JB/tuvizddfXkA9JllV0VHzr7P 31 | o6iql87DWZjMznu3rze7Aojl4Z3BCm24m/CBQYbhDHddRVNymjjf7bklA+9uexli 32 | PSxpy5QN1fCDUM69fUB8n19lsHxMJ0srRid0MErq0CVxgG4j6igMGXZT01wF1jxJ 33 | AQ5nsIdDfldsxwD3x7DwgPPPG/BtXNy4rMW3xI2+j0pIybkL5AOPzWRihv1J2Sd2 34 | lvut1E24HVq828kqWdbVAywTpw+A/GnZSwfvq25Tbsd77Dg+NjpPTXjXFOvmKX8t 35 | 5ij32Bt0AGUajwKCAQEAoW3cMEb8fodo1XDt1r91+3sshxQn8pz5XoGg/OJJH3C8 36 | FuINbRXr7OVrA8fll2HKgCH0ql5TXgfaHHMYQt1YLPHyq/VUhwM7pRXBJ7HLuxBZ 37 | 6WHHvGuDyN8A1qlQOhZnGwqNBOpmjIpVaDwA45iWxysgB1uaEK4y3hBZRCg1uREh 38 | xLbNretXqORB+R5n3gkpM+pmMbSdKaMF3YhNVSdgevx+YzO34BwR7EU1S7TcZluf 39 | 5TF5JMJwl6BUE2MFmldDd721lqgKXQ1fXwjylKvwbSDfba+KYR70mNpXL4NfvSbW 40 | BdwRuQi1pUe6bMbcsu5LqGk1uH079aqsIlj2c6G5mwKCAQAOHvZDJfw4DHj0NlHa 41 | 9xD9pu2XXxZDMfJOApEPX2IoyiretJZsjgohZ8XbyOaDcm0PdBXkf2QpL/CWzL7J 42 | kii99A7GqxbYiIW0mnJ4W/TmlyBMXNNVnfSh2X3jSMHOhX0v0iS0HXIvXHSFopN6 43 | JOTh1a9cW574l8uYC/aa+a8etSxSve8LkjUPvEyyLZMBcKMRkSjitIy8gVX0ulWM 44 | cBNZCXuF4RFxzNmeqGbmuemtwlkRzddahEBxGujsiEBoHbloBC2XUcffN1rawYb4 45 | j0Pn/H3UG0OJPrzhgWvBcuGO3izqbCkvUi26D9g4j5UVPGpDf7CWPWW36C0ISG1s 46 | SVWJAoIBAQDSMwHjfjSAKBlOUnCGzldO066iyAfQ7Stfy7apZkjwA8U4SxNdwJw6 47 | XQYe+QLndMqcQlZS9YQMbQcKZLq5I9zDRzPrdK5NzRr4BrrQKGH72oShhg/95jae 48 | JCP24UCr8PD0L0JUp0OZ+UpGSe048y5aPTyWy6kvzxn7rHepSkGCGuW6rJcjJX4l 49 | mhFWf+kZqeJzgcUiwZ/1StazEKF351QdvzLvUxR5UNuUiE93V9l0YB+ROGOXfuxs 50 | bfk+ku1L6l48OAoZibvP+sbHKDq9Iry+2POCWB01afTs3yVtTfiCdsPz66lo8TaP 51 | 22IXfCpZaSyhPWfLM2rMwiUPFdqM2MZV 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /tools/gen-tags.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Generate tags for lsquic project 4 | # 5 | # If your `ctags' is not Universal Ctags, set UCTAGS environment variable to 6 | # point to it. 7 | 8 | tmpfile=.tags.$$$RANDOM 9 | addl=.addl.$$$RANDOM 10 | 11 | ctags_bin=${UCTAGS:-ctags} 12 | 13 | export LC_ALL=C # So that sort(1) behaves 14 | $ctags_bin -f $tmpfile -R -I SLIST_ENTRY+=void -I LIST_ENTRY+=void \ 15 | -I STAILQ_ENTRY+=void -I TAILQ_ENTRY+=void -I CIRCLEQ_ENTRY+=void \ 16 | -I TAILQ_ENTRY+=void -I SLIST_HEAD+=void -I LIST_HEAD+=void \ 17 | -I STAILQ_HEAD+=void -I TAILQ_HEAD+=void -I CIRCLEQ_HEAD+=void \ 18 | -I TAILQ_HEAD+=void -I EV_CPP+= -I ecb_noinline= \ 19 | *.[ch] libev lsquic/include lsquic/src/liblsquic lsquic/src/lshpack \ 20 | && \ 21 | : some special sauce for conn_iface: && \ 22 | egrep '^(mini|full|ietf_full|id24_full|evanescent)_conn_ci_' $tmpfile | sed -r 's/(mini|full|ietf_full|id24_full|evanescent)_conn_//' > $addl && \ 23 | cat $addl >> $tmpfile && \ 24 | egrep '^(nocopy|hash|error)_di_' $tmpfile | sed -r 's/(nocopy|hash|error)_//' > $addl && \ 25 | egrep '^(gquic)_(be|Q046|Q050)_' $tmpfile | sed -r 's/(gquic)_(be|Q046|Q050)_/pf_/' >> $addl && \ 26 | egrep '^ietf_v[0-9][0-9]*_' $tmpfile | sed -r 's/^ietf_v[0-9][0-9]*_/pf_/' >> $addl && \ 27 | egrep '^(stock)_shi_' $tmpfile | sed -r 's/(stock)_//' >> $addl && \ 28 | egrep '^(iquic)_esf_' $tmpfile | sed -r 's/(iquic)_//' >> $addl && \ 29 | egrep '^(gquic[0-9]?)_esf_' $tmpfile | sed -r 's/(gquic[0-9]?)_//' >> $addl && \ 30 | egrep '^(iquic)_esfi_' $tmpfile | sed -r 's/(iquic)_//' >> $addl && \ 31 | egrep '^(lsquic_cubic|lsquic_bbr)_' $tmpfile | sed -r 's/(lsquic_cubic|lsquic_bbr)_/cci_/' >> $addl && \ 32 | cat $tmpfile >> $addl && \ 33 | sort $addl > $tmpfile && \ 34 | rm $addl && \ 35 | $ctags_bin -a -f $tmpfile /usr/include/sys/queue.h \ 36 | && \ 37 | mv $tmpfile tags \ 38 | || \ 39 | rm $tmpfile 40 | -------------------------------------------------------------------------------- /tools/print-glibc-version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Determine glibc version and print it to stdout. I have to resort to 4 | # using a shell script because it is taking too long to figure out how 5 | # to do this properly in cmake. 6 | 7 | CC=$1 8 | if [ "" = "$CC" ]; then 9 | CC=gcc 10 | fi 11 | 12 | $CC -print-file-name=libc.so.6 \ 13 | | perl -plne '$_ = readlink if -H; s/\.so$// && s/.*-//' 14 | -------------------------------------------------------------------------------- /tut.c: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2020 LiteSpeed Technologies */ 2 | /* 3 | * tut.c is the example program to illustrate lsquic API usage. 4 | */ 5 | 6 | #define _GNU_SOURCE /* For struct in6_pktinfo */ 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #define EV_STANDALONE 1 23 | #define EV_API_STATIC 1 24 | #include "ev.c" 25 | 26 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) 27 | 28 | #include "lsquic.h" 29 | 30 | 31 | static FILE *s_log_fh; 32 | 33 | 34 | struct tut 35 | { 36 | /* Common elements needed by both client and server: */ 37 | enum { 38 | TUT_SERVER = 1 << 0, 39 | } tut_flags; 40 | int tut_sock_fd; /* socket */ 41 | ev_io tut_sock_w; /* socket watcher */ 42 | ev_timer tut_timer; 43 | struct ev_loop *tut_loop; 44 | lsquic_engine_t *tut_engine; 45 | struct sockaddr_storage tut_local_sas; 46 | union 47 | { 48 | struct client 49 | { 50 | ev_io stdin_w; /* stdin watcher */ 51 | struct lsquic_conn *conn; 52 | size_t sz; /* Size of bytes read is stored here */ 53 | char buf[0x100]; /* Read up to this many bytes */ 54 | } c; 55 | } tut_u; 56 | }; 57 | 58 | static void tut_process_conns (struct tut *); 59 | 60 | static int 61 | tut_log_buf (void *ctx, const char *buf, size_t len) 62 | { 63 | FILE *out = ctx; 64 | fwrite(buf, 1, len, out); 65 | fflush(out); 66 | return 0; 67 | } 68 | static const struct lsquic_logger_if logger_if = { tut_log_buf, }; 69 | 70 | 71 | static int s_verbose; 72 | static void 73 | LOG (const char *fmt, ...) 74 | { 75 | if (s_verbose) 76 | { 77 | va_list ap; 78 | fprintf(s_log_fh, "LOG: "); 79 | va_start(ap, fmt); 80 | (void) vfprintf(s_log_fh, fmt, ap); 81 | va_end(ap); 82 | fprintf(s_log_fh, "\n"); 83 | } 84 | } 85 | 86 | 87 | static SSL_CTX *s_ssl_ctx; 88 | 89 | static int 90 | tut_load_cert (const char *cert_file, const char *key_file) 91 | { 92 | int rv = -1; 93 | 94 | s_ssl_ctx = SSL_CTX_new(TLS_method()); 95 | if (!s_ssl_ctx) 96 | { 97 | LOG("SSL_CTX_new failed"); 98 | goto end; 99 | } 100 | SSL_CTX_set_min_proto_version(s_ssl_ctx, TLS1_3_VERSION); 101 | SSL_CTX_set_max_proto_version(s_ssl_ctx, TLS1_3_VERSION); 102 | SSL_CTX_set_default_verify_paths(s_ssl_ctx); 103 | if (1 != SSL_CTX_use_certificate_chain_file(s_ssl_ctx, cert_file)) 104 | { 105 | LOG("SSL_CTX_use_certificate_chain_file failed"); 106 | goto end; 107 | } 108 | if (1 != SSL_CTX_use_PrivateKey_file(s_ssl_ctx, key_file, 109 | SSL_FILETYPE_PEM)) 110 | { 111 | LOG("SSL_CTX_use_PrivateKey_file failed"); 112 | goto end; 113 | } 114 | rv = 0; 115 | 116 | end: 117 | if (rv != 0) 118 | { 119 | if (s_ssl_ctx) 120 | SSL_CTX_free(s_ssl_ctx); 121 | s_ssl_ctx = NULL; 122 | } 123 | return rv; 124 | } 125 | 126 | 127 | static SSL_CTX * 128 | tut_get_ssl_ctx (void *peer_ctx) 129 | { 130 | return s_ssl_ctx; 131 | } 132 | 133 | 134 | enum ctl_what 135 | { 136 | CW_SENDADDR = 1 << 0, 137 | CW_ECN = 1 << 1, 138 | }; 139 | 140 | 141 | static void 142 | tut_setup_control_msg (struct msghdr *msg, enum ctl_what cw, 143 | const struct lsquic_out_spec *spec, unsigned char *buf, size_t bufsz) 144 | { 145 | struct cmsghdr *cmsg; 146 | struct sockaddr_in *local_sa; 147 | struct sockaddr_in6 *local_sa6; 148 | struct in_pktinfo info; 149 | struct in6_pktinfo info6; 150 | size_t ctl_len; 151 | 152 | msg->msg_control = buf; 153 | msg->msg_controllen = bufsz; 154 | 155 | /* Need to zero the buffer due to a bug(?) in CMSG_NXTHDR. See 156 | * https://stackoverflow.com/questions/27601849/cmsg-nxthdr-returns-null-even-though-there-are-more-cmsghdr-objects 157 | */ 158 | memset(buf, 0, bufsz); 159 | 160 | ctl_len = 0; 161 | for (cmsg = CMSG_FIRSTHDR(msg); cw && cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) 162 | { 163 | if (cw & CW_SENDADDR) 164 | { 165 | if (AF_INET == spec->dest_sa->sa_family) 166 | { 167 | local_sa = (struct sockaddr_in *) spec->local_sa; 168 | memset(&info, 0, sizeof(info)); 169 | info.ipi_spec_dst = local_sa->sin_addr; 170 | cmsg->cmsg_level = IPPROTO_IP; 171 | cmsg->cmsg_type = IP_PKTINFO; 172 | cmsg->cmsg_len = CMSG_LEN(sizeof(info)); 173 | ctl_len += CMSG_SPACE(sizeof(info)); 174 | memcpy(CMSG_DATA(cmsg), &info, sizeof(info)); 175 | } 176 | else 177 | { 178 | local_sa6 = (struct sockaddr_in6 *) spec->local_sa; 179 | memset(&info6, 0, sizeof(info6)); 180 | info6.ipi6_addr = local_sa6->sin6_addr; 181 | cmsg->cmsg_level = IPPROTO_IPV6; 182 | cmsg->cmsg_type = IPV6_PKTINFO; 183 | cmsg->cmsg_len = CMSG_LEN(sizeof(info6)); 184 | memcpy(CMSG_DATA(cmsg), &info6, sizeof(info6)); 185 | ctl_len += CMSG_SPACE(sizeof(info6)); 186 | } 187 | cw &= ~CW_SENDADDR; 188 | } 189 | else if (cw & CW_ECN) 190 | { 191 | if (AF_INET == spec->dest_sa->sa_family) 192 | { 193 | const int tos = spec->ecn; 194 | cmsg->cmsg_level = IPPROTO_IP; 195 | cmsg->cmsg_type = IP_TOS; 196 | cmsg->cmsg_len = CMSG_LEN(sizeof(tos)); 197 | memcpy(CMSG_DATA(cmsg), &tos, sizeof(tos)); 198 | ctl_len += CMSG_SPACE(sizeof(tos)); 199 | } 200 | else 201 | { 202 | const int tos = spec->ecn; 203 | cmsg->cmsg_level = IPPROTO_IPV6; 204 | cmsg->cmsg_type = IPV6_TCLASS; 205 | cmsg->cmsg_len = CMSG_LEN(sizeof(tos)); 206 | memcpy(CMSG_DATA(cmsg), &tos, sizeof(tos)); 207 | ctl_len += CMSG_SPACE(sizeof(tos)); 208 | } 209 | cw &= ~CW_ECN; 210 | } 211 | else 212 | assert(0); 213 | } 214 | 215 | msg->msg_controllen = ctl_len; 216 | } 217 | 218 | 219 | /* A simple version of ea_packets_out -- does not use ancillary messages */ 220 | static int 221 | tut_packets_out_v0 (void *packets_out_ctx, const struct lsquic_out_spec *specs, 222 | unsigned count) 223 | { 224 | unsigned n; 225 | int fd, s = 0; 226 | struct msghdr msg; 227 | 228 | if (0 == count) 229 | return 0; 230 | 231 | n = 0; 232 | msg.msg_flags = 0; 233 | msg.msg_control = NULL; 234 | msg.msg_controllen = 0; 235 | do 236 | { 237 | fd = (int) (uint64_t) specs[n].peer_ctx; 238 | msg.msg_name = (void *) specs[n].dest_sa; 239 | msg.msg_namelen = (AF_INET == specs[n].dest_sa->sa_family ? 240 | sizeof(struct sockaddr_in) : 241 | sizeof(struct sockaddr_in6)), 242 | msg.msg_iov = specs[n].iov; 243 | msg.msg_iovlen = specs[n].iovlen; 244 | s = sendmsg(fd, &msg, 0); 245 | if (s < 0) 246 | { 247 | LOG("sendmsg failed: %s", strerror(errno)); 248 | break; 249 | } 250 | ++n; 251 | } 252 | while (n < count); 253 | 254 | if (n < count) 255 | LOG("could not send all of them"); /* TODO */ 256 | 257 | if (n > 0) 258 | return n; 259 | else 260 | { 261 | assert(s < 0); 262 | return -1; 263 | } 264 | } 265 | 266 | 267 | /* A more complicated version of ea_packets_out -- this one sets source IP 268 | * address and ECN. 269 | */ 270 | static int 271 | tut_packets_out_v1 (void *packets_out_ctx, const struct lsquic_out_spec *specs, 272 | unsigned count) 273 | { 274 | struct tut *const tut = packets_out_ctx; 275 | unsigned n; 276 | int fd, s = 0; 277 | struct msghdr msg; 278 | enum ctl_what cw; 279 | union { 280 | /* cmsg(3) recommends union for proper alignment */ 281 | unsigned char buf[ 282 | CMSG_SPACE(MAX(sizeof(struct in_pktinfo), 283 | sizeof(struct in6_pktinfo))) + CMSG_SPACE(sizeof(int)) 284 | ]; 285 | struct cmsghdr cmsg; 286 | } ancil; 287 | 288 | if (0 == count) 289 | return 0; 290 | 291 | n = 0; 292 | msg.msg_flags = 0; 293 | do 294 | { 295 | fd = (int) (uint64_t) specs[n].peer_ctx; 296 | msg.msg_name = (void *) specs[n].dest_sa; 297 | msg.msg_namelen = (AF_INET == specs[n].dest_sa->sa_family ? 298 | sizeof(struct sockaddr_in) : 299 | sizeof(struct sockaddr_in6)), 300 | msg.msg_iov = specs[n].iov; 301 | msg.msg_iovlen = specs[n].iovlen; 302 | 303 | /* Set up ancillary message */ 304 | if (tut->tut_flags & TUT_SERVER) 305 | cw = CW_SENDADDR; 306 | else 307 | cw = 0; 308 | if (specs[n].ecn) 309 | cw |= CW_ECN; 310 | if (cw) 311 | tut_setup_control_msg(&msg, cw, &specs[n], ancil.buf, 312 | sizeof(ancil.buf)); 313 | else 314 | { 315 | msg.msg_control = NULL; 316 | msg.msg_controllen = 0; 317 | } 318 | 319 | s = sendmsg(fd, &msg, 0); 320 | if (s < 0) 321 | { 322 | LOG("sendmsg failed: %s", strerror(errno)); 323 | break; 324 | } 325 | ++n; 326 | } 327 | while (n < count); 328 | 329 | if (n < count) 330 | LOG("could not send all of them"); /* TODO */ 331 | 332 | if (n > 0) 333 | return n; 334 | else 335 | { 336 | assert(s < 0); 337 | return -1; 338 | } 339 | } 340 | 341 | 342 | static int (*const tut_packets_out[]) (void *packets_out_ctx, 343 | const struct lsquic_out_spec *specs, unsigned count) = 344 | { 345 | tut_packets_out_v0, 346 | tut_packets_out_v1, 347 | }; 348 | 349 | 350 | static void 351 | tut_usage (const char *argv0) 352 | { 353 | const char *name; 354 | 355 | name = strchr(argv0, '/'); 356 | if (name) 357 | ++name; 358 | else 359 | name = argv0; 360 | 361 | fprintf(stdout, 362 | "Usage: %s [-c cert -k key] [options] IP port\n" 363 | "\n" 364 | " -c cert.file Certificate.\n" 365 | " -k key.file Key file.\n" 366 | " -f log.file Log message to this log file. If not specified, the\n" 367 | " messages are printed to stderr.\n" 368 | " -L level Set library-wide log level. Defaults to 'warn'.\n" 369 | " -l module=level Set log level of specific module. Several of these\n" 370 | " can be specified via multiple -l flags or by combining\n" 371 | " these with comma, e.g. -l event=debug,conn=info.\n" 372 | " -v Verbose: log program messages as well.\n" 373 | " -b VERSION Use callbacks version VERSION.\n" 374 | " -p VERSION Use packets_out version VERSION.\n" 375 | " -w VERSION Use server write callback version VERSION.\n" 376 | " -o opt=val Set lsquic engine setting to some value, overriding the\n" 377 | " defaults. For example,\n" 378 | " -o version=ff00001c -o cc_algo=2\n" 379 | " -G DIR Log TLS secrets to a file in directory DIR.\n" 380 | " -h Print this help screen and exit.\n" 381 | , name); 382 | } 383 | 384 | 385 | static lsquic_conn_ctx_t * 386 | tut_client_on_new_conn (void *stream_if_ctx, struct lsquic_conn *conn) 387 | { 388 | struct tut *const tut = stream_if_ctx; 389 | tut->tut_u.c.conn = conn; 390 | LOG("created connection"); 391 | return (void *) tut; 392 | } 393 | 394 | 395 | static void 396 | tut_client_on_hsk_done (lsquic_conn_t *conn, enum lsquic_hsk_status status) 397 | { 398 | struct tut *const tut = (void *) lsquic_conn_get_ctx(conn); 399 | 400 | switch (status) 401 | { 402 | case LSQ_HSK_OK: 403 | case LSQ_HSK_RESUMED_OK: 404 | LOG("handshake successful, start stdin watcher"); 405 | ev_io_start(tut->tut_loop, &tut->tut_u.c.stdin_w); 406 | break; 407 | default: 408 | LOG("handshake failed"); 409 | break; 410 | } 411 | } 412 | 413 | 414 | static void 415 | tut_client_on_conn_closed (struct lsquic_conn *conn) 416 | { 417 | struct tut *const tut = (void *) lsquic_conn_get_ctx(conn); 418 | 419 | LOG("client connection closed -- stop reading from socket"); 420 | ev_io_stop(tut->tut_loop, &tut->tut_sock_w); 421 | } 422 | 423 | 424 | static lsquic_stream_ctx_t * 425 | tut_client_on_new_stream (void *stream_if_ctx, struct lsquic_stream *stream) 426 | { 427 | struct tut *tut = stream_if_ctx; 428 | LOG("created new stream, we want to write"); 429 | lsquic_stream_wantwrite(stream, 1); 430 | /* return tut: we don't have any stream-specific context */ 431 | return (void *) tut; 432 | } 433 | 434 | 435 | /* Echo whatever comes back from server, no verification */ 436 | static void 437 | tut_client_on_read_v0 (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 438 | { 439 | struct tut *tut = (struct tut *) h; 440 | ssize_t nread; 441 | unsigned char buf[3]; 442 | 443 | nread = lsquic_stream_read(stream, buf, sizeof(buf)); 444 | if (nread > 0) 445 | { 446 | fwrite(buf, 1, nread, stdout); 447 | fflush(stdout); 448 | } 449 | else if (nread == 0) 450 | { 451 | LOG("read to end-of-stream: close and read from stdin again"); 452 | lsquic_stream_shutdown(stream, 0); 453 | ev_io_start(tut->tut_loop, &tut->tut_u.c.stdin_w); 454 | } 455 | else 456 | { 457 | LOG("error reading from stream (%s) -- exit loop"); 458 | ev_break(tut->tut_loop, EVBREAK_ONE); 459 | } 460 | } 461 | 462 | 463 | static size_t 464 | tut_client_readf_v1 (void *ctx, const unsigned char *data, size_t len, int fin) 465 | { 466 | if (len) 467 | { 468 | fwrite(data, 1, len, stdout); 469 | fflush(stdout); 470 | } 471 | return len; 472 | } 473 | 474 | 475 | /* Same functionality as tut_client_on_read_v0(), but use a readf callback */ 476 | static void 477 | tut_client_on_read_v1 (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 478 | { 479 | struct tut *tut = (struct tut *) h; 480 | ssize_t nread; 481 | 482 | nread = lsquic_stream_readf(stream, tut_client_readf_v1, NULL); 483 | if (nread == 0) 484 | { 485 | LOG("read to end-of-stream: close and read from stdin again"); 486 | lsquic_stream_shutdown(stream, 0); 487 | ev_io_start(tut->tut_loop, &tut->tut_u.c.stdin_w); 488 | } 489 | else if (nread < 0) 490 | { 491 | LOG("error reading from stream (%s) -- exit loop"); 492 | ev_break(tut->tut_loop, EVBREAK_ONE); 493 | } 494 | } 495 | 496 | 497 | /* Alternatively, pass `stream' to lsquic_stream_readf() and call 498 | * lsquic_stream_get_ctx() to get struct tut * 499 | */ 500 | struct client_read_v2_ctx { 501 | struct tut *tut; 502 | lsquic_stream_t *stream; 503 | }; 504 | 505 | 506 | static size_t 507 | tut_client_readf_v2 (void *ctx, const unsigned char *data, size_t len, int fin) 508 | { 509 | struct client_read_v2_ctx *v2ctx = ctx; 510 | if (len) 511 | fwrite(data, 1, len, stdout); 512 | if (fin) 513 | { 514 | fflush(stdout); 515 | LOG("read to end-of-stream: close and read from stdin again"); 516 | lsquic_stream_shutdown(v2ctx->stream, 0); 517 | ev_io_start(v2ctx->tut->tut_loop, &v2ctx->tut->tut_u.c.stdin_w); 518 | } 519 | return len; 520 | } 521 | 522 | 523 | /* A bit different from v1: act on fin. This version saves an extra on_read() 524 | * call at the cost of some complexity. 525 | */ 526 | static void 527 | tut_client_on_read_v2 (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 528 | { 529 | struct tut *tut = (struct tut *) h; 530 | ssize_t nread; 531 | 532 | struct client_read_v2_ctx v2ctx = { tut, stream, }; 533 | nread = lsquic_stream_readf(stream, tut_client_readf_v2, &v2ctx); 534 | if (nread < 0) 535 | { 536 | LOG("error reading from stream (%s) -- exit loop"); 537 | ev_break(tut->tut_loop, EVBREAK_ONE); 538 | } 539 | } 540 | 541 | 542 | /* Write out the whole line to stream, shutdown write end, and switch 543 | * to reading the response. 544 | */ 545 | static void 546 | tut_client_on_write (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 547 | { 548 | lsquic_conn_t *conn; 549 | struct tut *tut; 550 | ssize_t nw; 551 | 552 | conn = lsquic_stream_conn(stream); 553 | tut = (void *) lsquic_conn_get_ctx(conn); 554 | 555 | nw = lsquic_stream_write(stream, tut->tut_u.c.buf, tut->tut_u.c.sz); 556 | if (nw > 0) 557 | { 558 | tut->tut_u.c.sz -= (size_t) nw; 559 | if (tut->tut_u.c.sz == 0) 560 | { 561 | LOG("wrote all %zd bytes to stream, switch to reading", 562 | (size_t) nw); 563 | lsquic_stream_shutdown(stream, 1); /* This flushes as well */ 564 | lsquic_stream_wantread(stream, 1); 565 | } 566 | else 567 | { 568 | memmove(tut->tut_u.c.buf, tut->tut_u.c.buf + nw, tut->tut_u.c.sz); 569 | LOG("wrote %zd bytes to stream, still have %zd bytes to write", 570 | (size_t) nw, tut->tut_u.c.sz); 571 | } 572 | } 573 | else 574 | { 575 | /* When `on_write()' is called, the library guarantees that at least 576 | * something can be written. If not, that's an error whether 0 or -1 577 | * is returned. 578 | */ 579 | LOG("stream_write() returned %ld, abort connection", (long) nw); 580 | lsquic_conn_abort(lsquic_stream_conn(stream)); 581 | } 582 | } 583 | 584 | 585 | static void 586 | tut_client_on_close (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 587 | { 588 | LOG("stream closed"); 589 | } 590 | 591 | 592 | static void (*const tut_client_on_read[]) 593 | (lsquic_stream_t *, lsquic_stream_ctx_t *h) = 594 | { 595 | tut_client_on_read_v0, 596 | tut_client_on_read_v1, 597 | tut_client_on_read_v2, 598 | }; 599 | 600 | 601 | static struct lsquic_stream_if tut_client_callbacks = 602 | { 603 | .on_new_conn = tut_client_on_new_conn, 604 | .on_hsk_done = tut_client_on_hsk_done, 605 | .on_conn_closed = tut_client_on_conn_closed, 606 | .on_new_stream = tut_client_on_new_stream, 607 | .on_read = tut_client_on_read_v0, 608 | .on_write = tut_client_on_write, 609 | .on_close = tut_client_on_close, 610 | }; 611 | 612 | 613 | static lsquic_conn_ctx_t * 614 | tut_server_on_new_conn (void *stream_if_ctx, struct lsquic_conn *conn) 615 | { 616 | struct tut *const tut = stream_if_ctx; 617 | 618 | LOG("created new connection"); 619 | return (void *) tut; /* Pointer to tut is the connection context */ 620 | } 621 | 622 | 623 | static void 624 | tut_server_on_conn_closed (lsquic_conn_t *conn) 625 | { 626 | LOG("closed connection"); 627 | } 628 | 629 | 630 | struct tut_server_stream_ctx 631 | { 632 | size_t tssc_sz; /* Number of bytes in tsc_buf */ 633 | off_t tssc_off; /* Number of bytes written to stream */ 634 | unsigned char tssc_buf[0x100]; /* Bytes read in from client */ 635 | }; 636 | 637 | 638 | static lsquic_stream_ctx_t * 639 | tut_server_on_new_stream (void *stream_if_ctx, struct lsquic_stream *stream) 640 | { 641 | struct tut_server_stream_ctx *tssc; 642 | 643 | /* Allocate a new buffer per stream. There is no reason why the echo 644 | * server could not process several echo streams at the same time. 645 | */ 646 | tssc = malloc(sizeof(*tssc)); 647 | if (!tssc) 648 | { 649 | LOG("cannot allocate server stream context"); 650 | lsquic_conn_abort(lsquic_stream_conn(stream)); 651 | return NULL; 652 | } 653 | 654 | tssc->tssc_sz = 0; 655 | tssc->tssc_off = 0; 656 | lsquic_stream_wantread(stream, 1); 657 | LOG("created new echo stream -- want to read"); 658 | return (void *) tssc; 659 | } 660 | 661 | 662 | static void 663 | reverse_string (unsigned char *p, size_t len) 664 | { 665 | unsigned char *q, tmp; 666 | 667 | q = p + len - 1; 668 | while (p < q) 669 | { 670 | tmp = *p; 671 | *p = *q; 672 | *q = tmp; 673 | ++p; 674 | --q; 675 | } 676 | } 677 | 678 | 679 | /* Read until newline and then echo it back */ 680 | static void 681 | tut_server_on_read (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 682 | { 683 | struct tut_server_stream_ctx *const tssc = (void *) h; 684 | ssize_t nread; 685 | unsigned char buf[1]; 686 | 687 | nread = lsquic_stream_read(stream, buf, sizeof(buf)); 688 | if (nread > 0) 689 | { 690 | tssc->tssc_buf[ tssc->tssc_sz ] = buf[0]; 691 | ++tssc->tssc_sz; 692 | if (buf[0] == (unsigned char) '\n' 693 | || tssc->tssc_sz == sizeof(tssc->tssc_buf)) 694 | { 695 | LOG("read newline or filled buffer, switch to writing"); 696 | reverse_string(tssc->tssc_buf, 697 | tssc->tssc_sz - (buf[0] == (unsigned char) '\n')); 698 | lsquic_stream_wantread(stream, 0); 699 | lsquic_stream_wantwrite(stream, 1); 700 | } 701 | } 702 | else if (nread == 0) 703 | { 704 | LOG("read EOF"); 705 | lsquic_stream_shutdown(stream, 0); 706 | if (tssc->tssc_sz) 707 | lsquic_stream_wantwrite(stream, 1); 708 | } 709 | else 710 | { 711 | /* This should not happen */ 712 | LOG("error reading from stream (errno: %d) -- abort connection", errno); 713 | lsquic_conn_abort(lsquic_stream_conn(stream)); 714 | } 715 | } 716 | 717 | 718 | static void 719 | tut_server_on_write_v0 (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 720 | { 721 | struct tut_server_stream_ctx *const tssc = (void *) h; 722 | ssize_t nw; 723 | 724 | assert(tssc->tssc_sz > 0); 725 | nw = lsquic_stream_write(stream, tssc->tssc_buf + tssc->tssc_off, 726 | tssc->tssc_sz - tssc->tssc_off); 727 | if (nw > 0) 728 | { 729 | tssc->tssc_off += nw; 730 | if (tssc->tssc_off == tssc->tssc_sz) 731 | { 732 | LOG("wrote all %zd bytes to stream, close stream", 733 | (size_t) nw); 734 | lsquic_stream_close(stream); 735 | } 736 | else 737 | LOG("wrote %zd bytes to stream, still have %zd bytes to write", 738 | (size_t) nw, tssc->tssc_sz - tssc->tssc_off); 739 | } 740 | else 741 | { 742 | /* When `on_write()' is called, the library guarantees that at least 743 | * something can be written. If not, that's an error whether 0 or -1 744 | * is returned. 745 | */ 746 | LOG("stream_write() returned %ld, abort connection", (long) nw); 747 | lsquic_conn_abort(lsquic_stream_conn(stream)); 748 | } 749 | } 750 | 751 | 752 | static size_t 753 | tssc_read (void *ctx, void *buf, size_t count) 754 | { 755 | struct tut_server_stream_ctx *tssc = ctx; 756 | 757 | if (count > tssc->tssc_sz - tssc->tssc_off) 758 | count = tssc->tssc_sz - tssc->tssc_off; 759 | memcpy(buf, tssc->tssc_buf + tssc->tssc_off, count); 760 | tssc->tssc_off += count; 761 | return count; 762 | } 763 | 764 | 765 | static size_t 766 | tssc_size (void *ctx) 767 | { 768 | struct tut_server_stream_ctx *tssc = ctx; 769 | return tssc->tssc_sz - tssc->tssc_off; 770 | } 771 | 772 | 773 | /* Same functionality as tut_server_on_write_v0(), but use the "reader" 774 | * callbacks. This is most useful when data comes from a different source 775 | * such as file descriptor. 776 | */ 777 | static void 778 | tut_server_on_write_v1 (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 779 | { 780 | struct tut_server_stream_ctx *const tssc = (void *) h; 781 | struct lsquic_reader reader = { tssc_read, tssc_size, tssc, }; 782 | const size_t left = tssc->tssc_sz; 783 | ssize_t nw; 784 | 785 | nw = lsquic_stream_writef(stream, &reader); 786 | if (nw > 0 && tssc->tssc_off == tssc->tssc_sz) 787 | { 788 | LOG("wrote all %zd bytes to stream, close stream", left); 789 | lsquic_stream_close(stream); 790 | } 791 | else if (nw < 0) 792 | { 793 | LOG("stream_write() returned %ld, abort connection", (long) nw); 794 | lsquic_conn_abort(lsquic_stream_conn(stream)); 795 | } 796 | } 797 | 798 | 799 | static void 800 | tut_server_on_close (struct lsquic_stream *stream, lsquic_stream_ctx_t *h) 801 | { 802 | struct tut_server_stream_ctx *const tssc = (void *) h; 803 | free(tssc); 804 | LOG("stream closed"); 805 | } 806 | 807 | 808 | static void (*const tut_server_on_write[])(lsquic_stream_t *, 809 | lsquic_stream_ctx_t *) = 810 | { 811 | tut_server_on_write_v0, 812 | tut_server_on_write_v1, 813 | }; 814 | 815 | 816 | static struct lsquic_stream_if tut_server_callbacks = 817 | { 818 | .on_new_conn = tut_server_on_new_conn, 819 | .on_conn_closed = tut_server_on_conn_closed, 820 | .on_new_stream = tut_server_on_new_stream, 821 | .on_read = tut_server_on_read, 822 | .on_write = tut_server_on_write_v0, 823 | .on_close = tut_server_on_close, 824 | }; 825 | 826 | 827 | /* Read one byte at a time -- when user hits enter, send line to server */ 828 | static void 829 | tut_read_stdin (EV_P_ ev_io *w, int revents) 830 | { 831 | struct tut *const tut = w->data; 832 | ssize_t nr; 833 | 834 | assert(tut->tut_u.c.sz < sizeof(tut->tut_u.c.buf)); 835 | 836 | nr = read(w->fd, tut->tut_u.c.buf + tut->tut_u.c.sz, 1); 837 | if (nr > 0) 838 | { 839 | tut->tut_u.c.sz += nr; 840 | if (tut->tut_u.c.buf[tut->tut_u.c.sz - 1] == '\n' 841 | || sizeof(tut->tut_u.c.buf) == tut->tut_u.c.sz) 842 | { 843 | LOG("read up to newline (or filled buffer): make new stream"); 844 | lsquic_conn_make_stream(tut->tut_u.c.conn); 845 | ev_io_stop(tut->tut_loop, w); 846 | tut_process_conns(tut); 847 | } 848 | } 849 | else if (nr == 0) 850 | { 851 | LOG("read EOF: stop reading from stdin, close connection"); 852 | ev_io_stop(tut->tut_loop, w); 853 | ev_io_stop(tut->tut_loop, &tut->tut_u.c.stdin_w); 854 | lsquic_conn_close(tut->tut_u.c.conn); 855 | tut_process_conns(tut); 856 | } 857 | else 858 | { 859 | LOG("error reading from stdin: %s", strerror(errno)); 860 | ev_break(tut->tut_loop, EVBREAK_ONE); 861 | } 862 | } 863 | 864 | 865 | static int 866 | tut_set_nonblocking (int fd) 867 | { 868 | int flags; 869 | 870 | flags = fcntl(fd, F_GETFL); 871 | if (-1 == flags) 872 | return -1; 873 | flags |= O_NONBLOCK; 874 | if (0 != fcntl(fd, F_SETFL, flags)) 875 | return -1; 876 | 877 | return 0; 878 | } 879 | 880 | 881 | /* ToS is used to get ECN value */ 882 | static int 883 | tut_set_ecn (int fd, const struct sockaddr *sa) 884 | { 885 | int on, s; 886 | 887 | on = 1; 888 | if (AF_INET == sa->sa_family) 889 | s = setsockopt(fd, IPPROTO_IP, IP_RECVTOS, &on, sizeof(on)); 890 | else 891 | s = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVTCLASS, &on, sizeof(on)); 892 | if (s != 0) 893 | perror("setsockopt(ecn)"); 894 | 895 | return s; 896 | } 897 | 898 | 899 | /* Set up the socket to return original destination address in ancillary data */ 900 | static int 901 | tut_set_origdst (int fd, const struct sockaddr *sa) 902 | { 903 | int on, s; 904 | 905 | on = 1; 906 | if (AF_INET == sa->sa_family) 907 | s = setsockopt(fd, IPPROTO_IP, 908 | #if defined(IP_RECVORIGDSTADDR) 909 | IP_RECVORIGDSTADDR, 910 | #else 911 | IP_PKTINFO, 912 | #endif 913 | &on, sizeof(on)); 914 | else 915 | s = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &on, sizeof(on)); 916 | 917 | if (s != 0) 918 | perror("setsockopt"); 919 | 920 | return s; 921 | } 922 | 923 | 924 | static void 925 | tut_timer_expired (EV_P_ ev_timer *timer, int revents) 926 | { 927 | tut_process_conns(timer->data); 928 | } 929 | 930 | 931 | static void 932 | tut_process_conns (struct tut *tut) 933 | { 934 | int diff; 935 | ev_tstamp timeout; 936 | 937 | ev_timer_stop(tut->tut_loop, &tut->tut_timer); 938 | lsquic_engine_process_conns(tut->tut_engine); 939 | 940 | if (lsquic_engine_earliest_adv_tick(tut->tut_engine, &diff)) 941 | { 942 | if (diff >= LSQUIC_DF_CLOCK_GRANULARITY) 943 | /* Expected case: convert to seconds */ 944 | timeout = (ev_tstamp) diff / 1000000; 945 | else if (diff <= 0) 946 | /* It should not happen often that the next tick is in the past 947 | * as we just processed connections. Avoid a busy loop by 948 | * scheduling an event: 949 | */ 950 | timeout = 0.0; 951 | else 952 | /* Round up to granularity */ 953 | timeout = (ev_tstamp) LSQUIC_DF_CLOCK_GRANULARITY / 1000000; 954 | LOG("converted diff %d usec to %.4lf seconds", diff, timeout); 955 | ev_timer_init(&tut->tut_timer, tut_timer_expired, timeout, 0.); 956 | ev_timer_start(tut->tut_loop, &tut->tut_timer); 957 | } 958 | } 959 | 960 | 961 | static void 962 | tut_proc_ancillary (struct msghdr *msg, struct sockaddr_storage *storage, 963 | int *ecn) 964 | { 965 | const struct in6_pktinfo *in6_pkt; 966 | struct cmsghdr *cmsg; 967 | 968 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) 969 | { 970 | if (cmsg->cmsg_level == IPPROTO_IP && 971 | cmsg->cmsg_type == 972 | #if defined(IP_RECVORIGDSTADDR) 973 | IP_ORIGDSTADDR 974 | #else 975 | IP_PKTINFO 976 | #endif 977 | ) 978 | { 979 | #if defined(IP_RECVORIGDSTADDR) 980 | memcpy(storage, CMSG_DATA(cmsg), sizeof(struct sockaddr_in)); 981 | #else 982 | const struct in_pktinfo *in_pkt; 983 | in_pkt = (void *) CMSG_DATA(cmsg); 984 | ((struct sockaddr_in *) storage)->sin_addr = in_pkt->ipi_addr; 985 | #endif 986 | } 987 | else if (cmsg->cmsg_level == IPPROTO_IPV6 && 988 | cmsg->cmsg_type == IPV6_PKTINFO) 989 | { 990 | in6_pkt = (void *) CMSG_DATA(cmsg); 991 | ((struct sockaddr_in6 *) storage)->sin6_addr = 992 | in6_pkt->ipi6_addr; 993 | } 994 | else if ((cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == IP_TOS) 995 | || (cmsg->cmsg_level == IPPROTO_IPV6 996 | && cmsg->cmsg_type == IPV6_TCLASS)) 997 | { 998 | memcpy(ecn, CMSG_DATA(cmsg), sizeof(*ecn)); 999 | *ecn &= IPTOS_ECN_MASK; 1000 | } 1001 | } 1002 | } 1003 | 1004 | 1005 | #if defined(IP_RECVORIGDSTADDR) 1006 | # define DST_MSG_SZ sizeof(struct sockaddr_in) 1007 | #else 1008 | # define DST_MSG_SZ sizeof(struct in_pktinfo) 1009 | #endif 1010 | 1011 | #define ECN_SZ CMSG_SPACE(sizeof(int)) 1012 | 1013 | /* Amount of space required for incoming ancillary data */ 1014 | #define CTL_SZ (CMSG_SPACE(MAX(DST_MSG_SZ, \ 1015 | sizeof(struct in6_pktinfo))) + ECN_SZ) 1016 | 1017 | 1018 | static void 1019 | tut_read_socket (EV_P_ ev_io *w, int revents) 1020 | { 1021 | struct tut *const tut = w->data; 1022 | ssize_t nread; 1023 | int ecn; 1024 | struct sockaddr_storage peer_sas, local_sas; 1025 | unsigned char buf[0x1000]; 1026 | struct iovec vec[1] = {{ buf, sizeof(buf) }}; 1027 | unsigned char ctl_buf[CTL_SZ]; 1028 | 1029 | struct msghdr msg = { 1030 | .msg_name = &peer_sas, 1031 | .msg_namelen = sizeof(peer_sas), 1032 | .msg_iov = vec, 1033 | .msg_iovlen = 1, 1034 | .msg_control = ctl_buf, 1035 | .msg_controllen = sizeof(ctl_buf), 1036 | }; 1037 | nread = recvmsg(w->fd, &msg, 0); 1038 | if (-1 == nread) { 1039 | if (!(EAGAIN == errno || EWOULDBLOCK == errno)) 1040 | LOG("recvmsg: %s", strerror(errno)); 1041 | return; 1042 | } 1043 | 1044 | local_sas = tut->tut_local_sas; 1045 | ecn = 0; 1046 | tut_proc_ancillary(&msg, &local_sas, &ecn); 1047 | 1048 | (void) lsquic_engine_packet_in(tut->tut_engine, buf, nread, 1049 | (struct sockaddr *) &local_sas, 1050 | (struct sockaddr *) &peer_sas, 1051 | (void *) (uintptr_t) w->fd, ecn); 1052 | 1053 | tut_process_conns(tut); 1054 | } 1055 | 1056 | 1057 | static void * 1058 | keylog_open (void *ctx, lsquic_conn_t *conn) 1059 | { 1060 | const char *const dir = ctx ? ctx : "."; 1061 | const lsquic_cid_t *cid; 1062 | FILE *fh; 1063 | int sz; 1064 | unsigned i; 1065 | char id_str[MAX_CID_LEN * 2 + 1]; 1066 | char path[PATH_MAX]; 1067 | static const char b2c[16] = "0123456789ABCDEF"; 1068 | 1069 | cid = lsquic_conn_id(conn); 1070 | for (i = 0; i < cid->len; ++i) 1071 | { 1072 | id_str[i * 2 + 0] = b2c[ cid->idbuf[i] >> 4 ]; 1073 | id_str[i * 2 + 1] = b2c[ cid->idbuf[i] & 0xF ]; 1074 | } 1075 | id_str[i * 2] = '\0'; 1076 | sz = snprintf(path, sizeof(path), "%s/%s.keys", dir, id_str); 1077 | if ((size_t) sz >= sizeof(path)) 1078 | { 1079 | LOG("WARN: %s: file too long", __func__); 1080 | return NULL; 1081 | } 1082 | fh = fopen(path, "wb"); 1083 | if (!fh) 1084 | LOG("WARN: could not open %s for writing: %s", path, strerror(errno)); 1085 | return fh; 1086 | } 1087 | 1088 | 1089 | static void 1090 | keylog_log_line (void *handle, const char *line) 1091 | { 1092 | fputs(line, handle); 1093 | fputs("\n", handle); 1094 | fflush(handle); 1095 | } 1096 | 1097 | 1098 | static void 1099 | keylog_close (void *handle) 1100 | { 1101 | fclose(handle); 1102 | } 1103 | 1104 | 1105 | static const struct lsquic_keylog_if keylog_if = 1106 | { 1107 | .kli_open = keylog_open, 1108 | .kli_log_line = keylog_log_line, 1109 | .kli_close = keylog_close, 1110 | }; 1111 | 1112 | 1113 | int 1114 | main (int argc, char **argv) 1115 | { 1116 | struct lsquic_engine_api eapi; 1117 | const char *cert_file = NULL, *key_file = NULL, *val; 1118 | int opt, is_server, version_cleared = 0, settings_initialized = 0; 1119 | int packets_out_version = 0; 1120 | socklen_t socklen; 1121 | struct lsquic_engine_settings settings; 1122 | struct tut tut; 1123 | union { 1124 | struct sockaddr sa; 1125 | struct sockaddr_in addr4; 1126 | struct sockaddr_in6 addr6; 1127 | } addr; 1128 | const char *key_log_dir = NULL; 1129 | char errbuf[0x100]; 1130 | 1131 | s_log_fh = stderr; 1132 | 1133 | if (0 != lsquic_global_init(LSQUIC_GLOBAL_SERVER|LSQUIC_GLOBAL_CLIENT)) 1134 | { 1135 | fprintf(stderr, "global initialization failed\n"); 1136 | exit(EXIT_FAILURE); 1137 | } 1138 | 1139 | memset(&tut, 0, sizeof(tut)); 1140 | 1141 | while (opt = getopt(argc, argv, "w:b:c:f:k:l:o:p:G:L:hv"), opt != -1) 1142 | { 1143 | switch (opt) 1144 | { 1145 | case 'b': 1146 | tut_client_callbacks.on_read = tut_client_on_read[ atoi(optarg) ]; 1147 | break; 1148 | case 'c': 1149 | if (settings_initialized) 1150 | { 1151 | fprintf(stderr, "-c and -k should precede -o flags\n"); 1152 | exit(EXIT_FAILURE); 1153 | } 1154 | cert_file = optarg; 1155 | break; 1156 | case 'f': 1157 | s_log_fh = fopen(optarg, "ab"); 1158 | if (!s_log_fh) 1159 | { 1160 | perror("cannot open log file for writing"); 1161 | exit(EXIT_FAILURE); 1162 | } 1163 | break; 1164 | case 'k': 1165 | if (settings_initialized) 1166 | { 1167 | fprintf(stderr, "-c and -k should precede -o flags\n"); 1168 | exit(EXIT_FAILURE); 1169 | } 1170 | key_file = optarg; 1171 | break; 1172 | case 'l': 1173 | if (0 != lsquic_logger_lopt(optarg)) 1174 | { 1175 | fprintf(stderr, "error processing -l option\n"); 1176 | exit(EXIT_FAILURE); 1177 | } 1178 | break; 1179 | case 'p': 1180 | packets_out_version = atoi(optarg); 1181 | break; 1182 | case 'G': 1183 | key_log_dir = optarg; 1184 | break; 1185 | case 'L': 1186 | if (0 != lsquic_set_log_level(optarg)) 1187 | { 1188 | fprintf(stderr, "error processing -L option\n"); 1189 | exit(EXIT_FAILURE); 1190 | } 1191 | break; 1192 | case 'v': 1193 | ++s_verbose; 1194 | break; 1195 | case 'w': 1196 | tut_server_callbacks.on_write = tut_server_on_write[ atoi(optarg) ]; 1197 | break; 1198 | case 'o': /* For example: -o version=h3-27 */ 1199 | if (!settings_initialized) 1200 | { 1201 | lsquic_engine_init_settings(&settings, 1202 | cert_file || key_file ? LSENG_SERVER : 0); 1203 | settings_initialized = 1; 1204 | } 1205 | val = strchr(optarg, '='); 1206 | if (!val) 1207 | { 1208 | fprintf(stderr, "error processing -o: no equal sign\n"); 1209 | exit(EXIT_FAILURE); 1210 | } 1211 | ++val; 1212 | if (0 == strncmp(optarg, "version=", val - optarg)) 1213 | { 1214 | if (!version_cleared) 1215 | { 1216 | /* Clear all version on first -o version= */ 1217 | version_cleared = 1; 1218 | settings.es_versions = 0; 1219 | } 1220 | enum lsquic_version ver = lsquic_str2ver(val, strlen(val)); 1221 | if ((unsigned) ver < N_LSQVER) 1222 | { 1223 | settings.es_versions |= 1 << ver; 1224 | break; 1225 | } 1226 | ver = lsquic_alpn2ver(val, strlen(val)); 1227 | if ((unsigned) ver < N_LSQVER) 1228 | { 1229 | settings.es_versions |= 1 << ver; 1230 | break; 1231 | } 1232 | fprintf(stderr, "error: unknown version `%s'\n", val); 1233 | exit(EXIT_FAILURE); 1234 | } 1235 | else if (0 == strncmp(optarg, "cc_algo=", val - optarg)) 1236 | settings.es_cc_algo = atoi(val); 1237 | else if (0 == strncmp(optarg, "ecn=", val - optarg)) 1238 | settings.es_ecn = atoi(val); 1239 | /* ...and so on: add more options here as necessary */ 1240 | else 1241 | { 1242 | fprintf(stderr, "error: unknown option `%.*s'\n", 1243 | (int) (val - 1 - optarg), optarg); 1244 | exit(EXIT_FAILURE); 1245 | } 1246 | break; 1247 | case 'h': 1248 | tut_usage(argv[0]); 1249 | exit(EXIT_SUCCESS); 1250 | break; 1251 | default: 1252 | exit(EXIT_FAILURE); 1253 | break; 1254 | } 1255 | } 1256 | 1257 | if (optind + 1 >= argc) 1258 | { 1259 | LOG("please specify IP address and port number"); 1260 | exit(EXIT_FAILURE); 1261 | } 1262 | 1263 | /* Parse IP address and port number */ 1264 | if (inet_pton(AF_INET, argv[optind], &addr.addr4.sin_addr)) 1265 | { 1266 | addr.addr4.sin_family = AF_INET; 1267 | addr.addr4.sin_port = htons(atoi(argv[optind + 1])); 1268 | } 1269 | else if (memset(&addr.addr6, 0, sizeof(addr.addr6)), 1270 | inet_pton(AF_INET6, argv[optind], &addr.addr6.sin6_addr)) 1271 | { 1272 | addr.addr6.sin6_family = AF_INET6; 1273 | addr.addr6.sin6_port = htons(atoi(argv[optind + 1])); 1274 | } 1275 | else 1276 | { 1277 | LOG("`%s' is not a valid IP address", argv[optind]); 1278 | exit(EXIT_FAILURE); 1279 | } 1280 | 1281 | /* Specifying certificate and key files indicates server mode */ 1282 | if (cert_file || key_file) 1283 | { 1284 | if (!(cert_file && key_file)) 1285 | { 1286 | LOG("Specify both cert (-c) and key (-k) files"); 1287 | exit(EXIT_FAILURE); 1288 | } 1289 | if (0 != tut_load_cert(cert_file, key_file)) 1290 | { 1291 | LOG("Cannot load certificate"); 1292 | exit(EXIT_FAILURE); 1293 | } 1294 | tut.tut_flags |= TUT_SERVER; 1295 | } 1296 | 1297 | if (!settings_initialized) 1298 | lsquic_engine_init_settings(&settings, 1299 | tut.tut_flags & TUT_SERVER ? LSENG_SERVER : 0); 1300 | 1301 | /* At the time of this writing, using the loss bits extension causes 1302 | * decryption failures in Wireshark. For the purposes of the demo, we 1303 | * override the default. 1304 | */ 1305 | settings.es_ql_bits = 0; 1306 | 1307 | /* Check settings */ 1308 | if (0 != lsquic_engine_check_settings(&settings, 1309 | tut.tut_flags & TUT_SERVER ? LSENG_SERVER : 0, 1310 | errbuf, sizeof(errbuf))) 1311 | { 1312 | LOG("invalid settings: %s", errbuf); 1313 | exit(EXIT_FAILURE); 1314 | } 1315 | 1316 | /* Initialize event loop */ 1317 | tut.tut_loop = EV_DEFAULT; 1318 | tut.tut_sock_fd = socket(addr.sa.sa_family, SOCK_DGRAM, 0); 1319 | 1320 | /* Set up socket */ 1321 | if (tut.tut_sock_fd < 0) 1322 | { 1323 | perror("socket"); 1324 | exit(EXIT_FAILURE); 1325 | } 1326 | if (0 != tut_set_nonblocking(tut.tut_sock_fd)) 1327 | { 1328 | perror("fcntl"); 1329 | exit(EXIT_FAILURE); 1330 | } 1331 | if (0 != tut_set_ecn(tut.tut_sock_fd, &addr.sa)) 1332 | exit(EXIT_FAILURE); 1333 | if (tut.tut_flags & TUT_SERVER) 1334 | if (0 != tut_set_origdst(tut.tut_sock_fd, &addr.sa)) 1335 | exit(EXIT_FAILURE); 1336 | 1337 | if (tut.tut_flags & TUT_SERVER) 1338 | { 1339 | socklen = sizeof(addr); 1340 | if (0 != bind(tut.tut_sock_fd, &addr.sa, socklen)) 1341 | { 1342 | perror("bind"); 1343 | exit(EXIT_FAILURE); 1344 | } 1345 | memcpy(&tut.tut_local_sas, &addr, sizeof(addr)); 1346 | } 1347 | else 1348 | { 1349 | tut.tut_local_sas.ss_family = addr.sa.sa_family; 1350 | socklen = sizeof(tut.tut_local_sas); 1351 | if (0 != bind(tut.tut_sock_fd, 1352 | (struct sockaddr *) &tut.tut_local_sas, socklen)) 1353 | { 1354 | perror("bind"); 1355 | exit(EXIT_FAILURE); 1356 | } 1357 | ev_init(&tut.tut_timer, tut_timer_expired); 1358 | } 1359 | ev_io_init(&tut.tut_sock_w, tut_read_socket, tut.tut_sock_fd, EV_READ); 1360 | ev_io_start(tut.tut_loop, &tut.tut_sock_w); 1361 | 1362 | /* Initialize logging */ 1363 | setvbuf(s_log_fh, NULL, _IOLBF, 0); 1364 | lsquic_logger_init(&logger_if, s_log_fh, LLTS_HHMMSSUS); 1365 | 1366 | /* Initialize callbacks */ 1367 | memset(&eapi, 0, sizeof(eapi)); 1368 | eapi.ea_packets_out = tut_packets_out[packets_out_version]; 1369 | eapi.ea_packets_out_ctx = &tut; 1370 | eapi.ea_stream_if = tut.tut_flags & TUT_SERVER 1371 | ? &tut_server_callbacks : &tut_client_callbacks; 1372 | eapi.ea_stream_if_ctx = &tut; 1373 | eapi.ea_get_ssl_ctx = tut_get_ssl_ctx; 1374 | if (key_log_dir) 1375 | { 1376 | eapi.ea_keylog_if = &keylog_if; 1377 | eapi.ea_keylog_ctx = (void *) key_log_dir; 1378 | } 1379 | eapi.ea_settings = &settings; 1380 | 1381 | tut.tut_engine = lsquic_engine_new(tut.tut_flags & TUT_SERVER 1382 | ? LSENG_SERVER : 0, &eapi); 1383 | if (!tut.tut_engine) 1384 | { 1385 | LOG("cannot create engine"); 1386 | exit(EXIT_FAILURE); 1387 | } 1388 | 1389 | tut.tut_timer.data = &tut; 1390 | tut.tut_sock_w.data = &tut; 1391 | if (!(tut.tut_flags & TUT_SERVER)) 1392 | { 1393 | if (0 != tut_set_nonblocking(STDIN_FILENO)) 1394 | { 1395 | perror("fcntl(stdin)"); 1396 | exit(EXIT_FAILURE); 1397 | } 1398 | ev_io_init(&tut.tut_u.c.stdin_w, tut_read_stdin, STDIN_FILENO, 1399 | EV_READ); 1400 | tut.tut_u.c.stdin_w.data = &tut; 1401 | tut.tut_u.c.conn = lsquic_engine_connect( 1402 | tut.tut_engine, N_LSQVER, 1403 | (struct sockaddr *) &tut.tut_local_sas, &addr.sa, 1404 | (void *) (uintptr_t) tut.tut_sock_fd, /* Peer ctx */ 1405 | NULL, NULL, 0, NULL, 0, NULL, 0); 1406 | if (!tut.tut_u.c.conn) 1407 | { 1408 | LOG("cannot create connection"); 1409 | exit(EXIT_FAILURE); 1410 | } 1411 | tut_process_conns(&tut); 1412 | } 1413 | ev_run(tut.tut_loop, 0); 1414 | 1415 | lsquic_engine_destroy(tut.tut_engine); 1416 | lsquic_global_cleanup(); 1417 | exit(EXIT_SUCCESS); 1418 | } 1419 | --------------------------------------------------------------------------------